diff --git a/.gitignore b/.gitignore
index 04c7fd320b19a3da2344057a2fd78ef420e71499..24174aef30f4d80865755184ff44cc29458bbfaf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@
 *node_modules
 **venv/
 doc.zih.tu-dresden.de/public/
+*mermaid.min.js
diff --git a/Dockerfile b/Dockerfile
index b272bf553212534167e23e083d4a0c088700a025..0999141870c48b88e0f983a51a9e5eec43b38d09 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,7 +12,7 @@ RUN pip install mkdocs>=1.1.2 mkdocs-material>=7.1.0
 # Linter #
 ##########
 
-RUN apt update && apt install -y nodejs npm aspell git
+RUN apt-get update && apt-get install -y nodejs npm aspell git
 
 RUN npm install -g markdownlint-cli markdown-link-check
 
@@ -29,6 +29,17 @@ RUN echo $'# gitlab.hrz.tu-chemnitz.de:22 SSH-2.0-OpenSSH_7.4\n\
 gitlab.hrz.tu-chemnitz.de ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNixJ1syD506jOtiLPxGhAXsNnVfweFfzseh9/WrNxbTgIhi09fLb5aZI2CfOOWIi4fQz07S+qGugChBs4lJenLYAu4b0IAnEv/n/Xnf7wITf/Wlba2VSKiXdDqbSmNbOQtbdBLNu1NSt+inFgrreaUxnIqvWX4pBDEEGBAgG9e2cteXjT/dHp4+vPExKEjM6Nsxw516Cqv5H1ZU7XUTHFUYQr0DoulykDoXU1i3odJqZFZQzcJQv/RrEzya/2bwaatzKfbgoZLlb18T2LjkP74b71DeFIQWV2e6e3vsNwl1NsvlInEcsSZB1TZP+mKke7JWiI6HW2IrlSaGqM8n4h\n\
 gitlab.hrz.tu-chemnitz.de ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ/cSNsKRPrfXCMjl+HsKrnrI3HgbCyKWiRa715S99BR\n' > ~/.ssh/known_hosts
 
+RUN git clone https://gitlab.hrz.tu-chemnitz.de/mago411c--tu-dresden.de/mkdocs_table_plugin.git ~/mkdocs_table_plugin
+RUN cd ~/mkdocs_table_plugin && python setup.py install
+
+#Make sure that mermaid is integrated...
+RUN echo '#!/bin/bash' > /entrypoint.sh
+RUN echo 'test \! -e /docs/tud_theme/javascripts/mermaid.min.js && test -x /docs/util/download-newest-mermaid.js.sh && /docs/util/download-newest-mermaid.js.sh' >> /entrypoint.sh
+RUN echo 'exec "$@"' >> /entrypoint.sh
+RUN chmod u+x /entrypoint.sh
+
 WORKDIR /docs
 
 CMD ["mkdocs", "build", "--verbose", "--strict"]
+
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/README.md b/README.md
index d3482f3ae680798e81cdd2ea7814eeadb4abe57d..f23e803c9f0d0c8a32b0361bfb8d50bfc6a1ffb3 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ issues.
 ## Contributing
 
 Contributions from user-side are highly welcome. Please refer to
-[Contribution guide](doc.zih.tu-dresden.de/README.md) to get started.
+[Contribution guide](doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md) to get started.
 
 ## Licenses
 
diff --git a/doc.zih.tu-dresden.de/README.md b/doc.zih.tu-dresden.de/README.md
deleted file mode 100644
index bf1b82f52a145f959068fa063d9dbdf31fb2eae3..0000000000000000000000000000000000000000
--- a/doc.zih.tu-dresden.de/README.md
+++ /dev/null
@@ -1,388 +0,0 @@
-# Contribution Guide
-
-In the following, it is outlined how to contribute to the
-[HPC documentation](https://doc.zih.tu-dresden.de/) of
-[TU Dresden/ZIH](https://tu-dresden.de/zih/) and
-which rules should be followed when adding to this project. Although, this document might seem very
-long describing complex steps, contributing is quite easy - trust us.
-
-## Contribute via Issue
-
-Users can contribute to the documentation via the
-[issue tracking system](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/issues).
-For that, open an issue to report typos and missing documentation or request for more precise
-wording etc. ZIH staff will get in touch with you to resolve the issue and improve the
-documentation.
-
-**Reminder:** Non-documentation issues and requests need to be send as ticket to
-[hpcsupport@zih.tu-dresden.de](mailto:hpcsupport@zih.tu-dresden.de).
-
-## Preparation
-
-Contributions can be done via editing the repository through GitLab's web interface or following
-the git-based workflow. Both ways are described in the following.
-
-### Fork and Clone Repository
-
-All contributing starts with forking the repository to either
-[gitlab.hrz.tu-chemnitz.de](https://gitlab.hrz.tu-chemnitz.de) or any other
-git service, e.g., [gitlab.com](https://www.gitlab.com), [github.com](https://www.github.com) or
-your personal preference.
-Now, create a local clone of your fork
-
-```Shell Session
-# SSH based method
-~ git@gitlab.hrz.tu-chemnitz.de:LOGIN/hpc-compendium.git
-
-# HTTP based method
-~ https://gitlab.hrz.tu-chemnitz.de/LOGIN/hpc-compendium.git
-```
-
-#### Install Dependencies
-
-See [Installation with Docker](#preview-using-mkdocs-with-dockerfile).
-
-<!--- All branches are protected, i.e., only ZIH staff can create branches and push to them --->
-
-## Contribute via Web IDE
-
-GitLab offers a rich and versatile web interface to work with repositories. To fix typos and edit
-source files, just select the file of interest and click the `Edit` button. A text and commit
-editor are invoked: Do your changes, add a meaningful commit message and commit the changes.
-
-The more sophisticated integrated Web IDE is reached from the top level menu of the repository or
-by selecting any source file.
-
-Other git services might have an equivalent web interface to interact with the repository. Please
-refer to the corresponding documentation for further information.
-
-<!--This option of contributing is only available for users of-->
-<!--[gitlab.hrz.tu-chemnitz.de](https://gitlab.hrz.tu-chemnitz.de). Furthermore, -->
-
-## Contribute via Local Clone
-
-### mkdocs Rocks
-
-As mentioned, this documentation bases on markdown files which are translated into static html files
-using the tool [mkdocs](https://www.mkdocs.org/). All markdown source files are located in the
-`docs` subfolder. The file `mkdocs.yml` is the single configuration file for the project from layout
-up to document structure and extensions.
-The navigation section `nav` in `mkdocs.yml` specifies the order, titles and nesting of the
-documentation pages.
-
-In principle, `mkdocs` is not mandatory on the local system to contribute to the project. But it
-also provides a builtin development server that allows to serve the documentation, i.e. it can
-preview the updated documentation locally before committing the changes to the repository.
-
-To make use of `mkdocs`, it is necessary to have two commands in mind
-
-```
-~ mkdocs serve - Start the live-reloading docs server.
-~ mkdocs build - Build the documentation site.
-```
-
-#### Preview Using mkdocs
-
-Invoke`mkdocs serve`to build and preview the documentation. The documentation is automatically
-rerendered and reloaded if the system detects updates (great!). By default, the builtin web server
-hosts the documentation at `http://127.0.0.1:8000`.
-
-```Shell Session
-~ cd /PATH/TO/hpc-compendium/doc.zih.tu-dresden.de
-~ mkdocs serve
-INFO    -  Building documentation...
-INFO    -  Cleaning site directory
-INFO    -  Documentation built in 0.08 seconds
-[I 210127 13:33:30 server:335] Serving on http://127.0.0.1:8000
-INFO    -  Serving on http://127.0.0.1:8000
-[I 210127 13:33:30 handlers:62] Start watching changes
-INFO    -  Start watching changes
-```
-
-Open `http://127.0.0.1:8000` with a web browser to preview the local copy of the documentation.
-
-#### Preview Using mkdocs With Dockerfile
-
-You can also use `docker` to build a container from the `Dockerfile`, if you are familiar with it.
-This may take a while, as mkdocs and other necessary software needs to be downloaded.
-Building a container could be done with the following steps:
-
-```Bash
-cd /PATH/TO/hpc-compendium
-docker build -t hpc-compendium .
-```
-
-To avoid a lot of retyping, use the following in your shell:
-
-```bash
-alias wiki="docker run --name=hpc-compendium --rm -it -w /docs --mount src=$PWD/doc.zih.tu-dresden.de,target=/docs,type=bind hpc-compendium bash -c"
-```
-
-If you want to see how it looks in your browser, you can use shell commands to serve
-the documentation:
-
-```Bash
-wiki "mkdocs build --verbose && mkdocs serve -a 0.0.0.0:8000"
-```
-
-You can view the documentation via `http://localhost:8000` in your browser, now.
-
-If that does not work, check if you can get the URL for your browser's address
-bar from a different terminal window:
-
-```Bash
-echo http://$(docker inspect -f "{{.NetworkSettings.IPAddress}}" $(docker ps -qf "name=hpc-compendium")):8000
-```
-
-The running container automatically takes care of file changes and rebuilds the
-documentation. If you want to check whether the markdown files are formatted
-properly, use the following command:
-
-```Bash
-wiki 'markdownlint docs'
-```
-
-To check whether there are links that point to a wrong target, use
-(this may take a while and gives a lot of output because it runs over all files):
-
-```Bash
-wiki "find docs -type f -name '*.md' | xargs -L1 markdown-link-check"
-```
-
-To check a single file, e. g. `doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md`, use:
-
-```Bash
-wiki 'markdown-link-check docs/software/big_data_frameworks_spark.md'
-```
-
-For spell-checking a single file, use:
-
-```Bash
-wiki 'util/check-spelling.sh <file>'
-```
-
-For spell-checking all files, use:
-
-```Bash
-docker run --name=hpc-compendium --rm -it -w /docs --mount src="$(pwd)",target=/docs,type=bind hpc-compendium ./doc.zih.tu-dresden.de/util/check-spelling.sh
-```
-
-This outputs all words of all files that are unknown to the spell checker.
-To let the spell checker "know" a word, append it to
-`doc.zih.tu-dresden.de/wordlist.aspell`.
-
-#### Build Static Documentation
-
-To build the documentation, invoke `mkdocs build`. This will create a new directory named `public`
-which holds the generated static html/jss/css files. This command is used to build the documentation
-within the CI/CD pipeline. Thus, it should exit without error.
-
-```Shell Session
-~ cd /PATH/TO/hpc-compendium/doc.zih.tu-dresden.de
-~ mkdocs build
-INFO    -  Cleaning site directory
-INFO    -  Building documentation to directory: /PATH/to/hpc-compendium.git/doc.zih.tu-dresden.de/public
-INFO    -  Documentation built in 0.09 seconds
-```
-
-### Git Workflow
-
-It is crucial to keep your branch synchronized with the upstream repository when you are working
-locally on the documentation. At first, you should add a remote pointing to the official
-documentation.
-
-```Shell Session
-~ git remote add upstream-zih git@gitlab.hrz.tu-chemnitz.de:zih/hpcsupport/hpc-compendium.git
-```
-
-Now, you have two remotes, namely *origin* and *upstream-zih*. The remote *origin* points to your fork,
-whereas *upstream-zih* points to the original documentation repository at GitLab Chemnitz.
-
-```Shell Session
-$ git remote -v
-origin  git@gitlab.hrz.tu-chemnitz.de:LOGIN/hpc-compendium.git (fetch)
-origin  git@gitlab.hrz.tu-chemnitz.de:LOGIN/hpc-compendium.git (push)
-upstream-zih  git@gitlab.hrz.tu-chemnitz.de:zih/hpcsupport/hpc-compendium.git (fetch)
-upstream-zih  git@gitlab.hrz.tu-chemnitz.de:zih/hpcsupport/hpc-compendium.git (push)
-```
-
-Next, you should synchronize your `main` branch with the upstream.
-
-```Shell Session
-~ git checkout main
-~ git pull upstream main
-```
-
-At this point, your `main` branch is up-to-date with the original documentation of HPC compendium.
-
-<!--To push your sync changes to your fork you can do the following:-->
-<!--git push origin main-->
-
-#### Making Changes and Merge Requests
-
-It is good git-practise to only use the `main` branch for synchronization with the upstream, not for
-changes, as outlined in the previous subsection. In order to commit to this documentation, create a
-new branch (a so-called feature branch) basing on the `main` branch and commit your changes to it.
-
-```Shell Session
-~ git checkout main
-~ git checkout -b <FEATUREBRANCH>
-# Edit file1, file2 etc.
-~ git add <file1> <file2>
-~ git commit -m <COMMIT MESSAGE>
-~ git push origin <FEATUREBRANCH>
-```
-
-The last command pushes the changes to your remote at branch `FEATUREBRANCH`. Now, it is time to
-incorporate the changes and improvements into the HPC Compendium. For this, create a
-[merge request](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/merge_requests/new)
-to the `main` branch.
-
-### Important Branches
-
-There are two important branches in this repository:
-
-- Preview:
-  - Branch containing recent changes which will be soon merged to main branch (protected
-    branch)
-  - Served at [https://doc.zih.tu-dresden.de/preview](https://doc.zih.tu-dresden.de/preview) from
-    TUD-ZIH VPN
-- Main: Branch which is deployed at [https://doc.zih.tu-dresden.de](https://doc.zih.tu-dresden.de)
-    holding the current documentation (protected branch)
-
-If you are totally sure about your commit (e.g., fix a typo), it is only the following steps:
-
-  1. Synchronize branches
-  1. Edit the markdown file of interest
-  1. Commit change
-  1. Push commit to your fork and probably new branch
-  1. Pose Merge Request
-
-## Checks
-
-We have several checks on the markdown sources to ensure for a consistent and high quality of the
-documentation. These checks are run within the CI/CD pipeline and changes are only deployed to the
-HPC compendium, if the checks are passed. Thus, we **highly recommend** running the checks locally
-before committing and posing a merge request.
-
-* Markdown linter
-* Check internal and external links
-* Check code and command examples
-
-### Markdown Linter
-
-The [markdown linter client](https://github.com/igorshubovych/markdownlint-cli) helps to keep the
-markdown source code clean and consistent.
-
-Installation
-
-```Shell Session
-~ [sudo] npm [-g]  install markdownlint-cli
-```
-
-The configuration is stored in `.markdownlintrc`.
-The tool `markdownlint` can be run in dry or fix mode.
-The *dry* mode (default) only outputs findings, whereas in *fix* mode it resolves basic
-errors directly in the markdown files.
-
-```Shell Session
-~ cd doc.zih.tu-dresden.de/
-~ markdownlint [--fix] docs/index.md
-docs/index.md:8:131 MD013/line-length Line length [Expected: 130; Actual: 138]
-```
-
-Before committing, invoke the script `util/lint-changes.sh` which calls the markdownlint tool for all
-(git-)changed markdown files.
-
-```Shell Session
-~ sh util/lint-changes.sh
-hpc-compendium-2.0-gitlab-tudzih git:(master) ✗ sh util/lint-changes.sh
-README.md:6 MD012/no-multiple-blanks Multiple consecutive blank lines [Expected: 1; Actual: 2]
-README.md:7 MD012/no-multiple-blanks Multiple consecutive blank lines [Expected: 1; Actual: 3]
-README.md:21 MD012/no-multiple-blanks Multiple consecutive blank lines [Expected: 1; Actual: 2]
-README.md:22 MD022/blanks-around-headings/blanks-around-headers Headings should be surrounded by blank lines [Expected: 1; Actual: 0; Below] [Context: "### Why is this not at gitlab.hrz.tu-chemnitz.de?"]
-[8< 8<]
-```
-
-### Check Links
-
-No one likes dead links. Therefore, we check the internal and external links within the markdown
-source files. For that, the script `util/check-links.sh` and the tool
-[markdown-link-check](https://github.com/tcort/markdown-link-check) can be used.
-
-The tool `markdown-link-check` checks links within a certain file (or using some shell magic for all
-markdown files, as depicted below). On the other hand, the script `util/check-links.sh` checks only
-links for files in the repository, which are gifferent (gifferent is a word composition from *git*
-and *different to main branch*).
-
-#### Markdown-link-check
-
-Installation (see [official documentation](https://github.com/tcort/markdown-link-check#installation))
-
-```Shell Session
-~ [sudo] npm [-g] install markdown-link-check
-```
-
-Run check
-
-```Shell Session
-~ cd doc.zih.tu-dresden.de/
-~ markdown-link-check docs/jobs/Slurm.md
-
-FILE: docs/jobs/Slurm.md
-[✖] Slurmgenerator
-[✖] Compendium.RunningNxGpuAppsInOneJob
-[✓] https://slurm.schedmd.com/sbatch.html
-[✖] BindingAndDistributionOfTasks
-[✓] http://slurm.schedmd.com/hdf5_profile_user_guide.html
-[✓] http://slurm.schedmd.com/sh5util.html
-[✓] mailto:hpcsupport@zih.tu-dresden.de
-[✓] http://slurm.schedmd.com/
-[✓] http://www.schedmd.com/slurmdocs/rosetta.html
-
-9 links checked.
-
-ERROR: 3 dead links found!
-[✖] Slurmgenerator → Status: 400
-[✖] Compendium.RunningNxGpuAppsInOneJob → Status: 400
-[✖] BindingAndDistributionOfTasks → Status: 400
-```
-
-In this example, all external links are fine, but three links to internal documents need to be
-fixed.
-
-To check the links within all markdown files in one sweep, some shell magic is necessary
-
-```Shell Session
-~ cd doc.zih.tu-dresden.de/
-~ find . -name \*.md -exec markdown-link-check {} \;
-```
-
-#### Check-links.sh
-
-The script `util/check-links.sh` checks links for all gifferent files, i.e., markdown files which
-are part of the repository and different to the `main` branch. Use this script before committing your
-changes to make sure your commit passes the CI/CD pipeline.
-
-### Check Code and Commands
-
-The script `xyz.sh` checks if the code chunks are runnable on a login node.
-It is invoked as follows ...
-
-**TODO:** Implement [Issue #9](#9)
-
-### Check Pages Structure
-
-The script `util/check-no-floating.sh` first checks the hierarchy depth of the pages structure and
-the second check tests if every markdown file is included in the navigation section of the
-`mkdocs.yaml` file.
-
-The script is invoked and reports as follows
-
-```Shell Session
-~ sh doc.zih.tu-dresden.de/util/check-no-floating.sh doc.zih.tu-dresden.de
-HardwareTaurus.md is not included in nav
-BigDataFrameworksApacheSparkApacheFlinkApacheHadoop.md is not included in nav
-pika.md is not included in nav
-specific_software.md is not included in nav
-```
diff --git a/doc.zih.tu-dresden.de/docs/access/graphical_applications_with_webvnc.md b/doc.zih.tu-dresden.de/docs/access/graphical_applications_with_webvnc.md
index c652738dc859beecf3dc9669fdde684dc49d04f3..85d54d21c4e9db614f5beb4a62c34b4217943077 100644
--- a/doc.zih.tu-dresden.de/docs/access/graphical_applications_with_webvnc.md
+++ b/doc.zih.tu-dresden.de/docs/access/graphical_applications_with_webvnc.md
@@ -60,15 +60,20 @@ Direct access to the compute nodes is not allowed. Therefore, you have to create
 laptop or workstation to the specific compute node and port as follows.
 
 ```bash
-marie@local$ ssh -NL <local port>:<compute node>:<remote port> <zih login>@tauruslogin.hrsk.tu-dresden.de
+marie@local$ ssh -NL <local port>:<compute node>:<remote port> taurus
 ```
 
 e.g.
 
 ```console
-marie@local$ ssh -NL 5901:172.24.146.46:5901 marie@tauruslogin.hrsk.tu-dresden.de
+marie@local$ ssh -NL 5901:172.24.146.46:5901 taurus
 ```
 
+!!! important "SSH command"
+
+    The previous SSH command requires that you have already set up your [SSH configuration
+    ](../access/ssh_login.md#configuring-default-parameters-for-ssh).
+
 ### Step 3
 
 Open your local web-browser and connect to the following URL, replacing `<local port>` with the
diff --git a/doc.zih.tu-dresden.de/docs/access/jupyterhub.md b/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
index f9a916195ecbf814cf426beb4d26885500b3b3de..193a563b9687fa8485a9d04446409b83615ced0a 100644
--- a/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
+++ b/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
@@ -1,8 +1,8 @@
 # JupyterHub
 
-With our JupyterHub service we offer you a quick and easy way to work with Jupyter notebooks on ZIH
-systems. This page covers starting and stopping JupyterHub sessions, error handling and customizing
-the environment.
+With our JupyterHub service, we offer you a quick and easy way to work with
+Jupyter notebooks on ZIH systems. This page covers starting and stopping
+JupyterHub sessions, error handling and customizing the environment.
 
 We also provide a comprehensive documentation on how to use
 [JupyterHub for Teaching (git-pull feature, quickstart links, direct links to notebook files)](jupyterhub_for_teaching.md).
@@ -13,16 +13,17 @@ We also provide a comprehensive documentation on how to use
 
     The JupyterHub service is provided *as-is*, use at your own discretion.
 
-Please understand that JupyterHub is a complex software system of which we are not the developers
-and don't have any downstream support contracts for, so we merely offer an installation of it but
-cannot give extensive support in every case.
+Please understand that JupyterHub is a complex software system of which we are
+not the developers and don't have any downstream support contracts for, so we
+merely offer an installation of it but cannot give extensive support in every
+case.
 
 ## Access
 
 !!! note
     This service is only available for users with an active HPC project.
-    See [Application for Login and Resources](../application/overview.md), if you need to apply for
-    an HPC project.
+    See [Application for Login and Resources](../application/overview.md), if
+    you need to apply for an HPC project.
 
 JupyterHub is available at
 [https://taurus.hrsk.tu-dresden.de/jupyter](https://taurus.hrsk.tu-dresden.de/jupyter).
@@ -37,8 +38,7 @@ offers you the most important settings to start quickly.
 ![Simple form](misc/simple_form.png)
 {: align="center"}
 
-For advanced users we have an extended form where you can change many
-settings. You can:
+For advanced users, we have an extended form where you can change many settings. You can:
 
 - modify batch system parameters to your needs ([more about batch system Slurm](../jobs_and_resources/slurm.md))
 - assign your session to a project or reservation
@@ -80,76 +80,101 @@ several views:
 
 ### Classic Jupyter Notebook
 
-Initially your `home` directory is listed. You can open existing notebooks or files by navigating to
-the corresponding path and clicking on them.
+Initially, your home directory is listed. You can open existing notebooks or
+files by navigating to the corresponding path and clicking on them.
 
 ![Jupyter notebook file browser](misc/jupyter_notebook_file_browser.png)
 {: align="center"}
 
-Above the table on the right side is the button `New` which lets you create new notebooks, files,
-directories or terminals.
+Above the table on the right side, there is the button `New` which lets you create new
+notebooks, files, directories or terminals.
 
-![Jupyter notebook example matplotlib](misc/jupyter_notebook_example_matplotlib.png)
+![Jupyter notebook example Matplotlib](misc/jupyter_notebook_example_matplotlib.png)
 {: align="center"}
 
 ## Jupyter Notebooks in General
 
-In JupyterHub you can create scripts in notebooks. Notebooks are programs which are split into
-multiple logical code blocks.  In between those code blocks you can insert text blocks for
-documentation and each block can be executed individually. Each notebook is paired with a kernel
-running the code. We currently offer one for Python, C++, MATLAB and R.
+In JupyterHub, you can create scripts in notebooks. Notebooks are programs which are split into
+multiple logical code blocks. Each block can be executed individually. In between those code
+blocks, you can insert text blocks for documentation. Each notebook is paired with a kernel running
+the code. We currently offer one for Python, C++, MATLAB and R.
+
+### Version Control of Jupyter Notebooks with Git
+
+Since Jupyter notebooks are files containing multiple blocks for input code,
+documentation, output and further information, it is difficult to use them with
+Git. Version tracking of the `.ipynb` notebook files can be improved with the
+[Jupytext plugin](https://jupytext.readthedocs.io/en/latest/). Jupytext will
+provide Markdown (`.md`) and Python (`.py`) conversions of notebooks on the fly,
+next to `.ipynb`. Tracking these files will then provide a cleaner Git history.
+A further advantage is that Python notebook versions can be imported, allowing
+to split larger notebooks into smaller ones, based on chained imports.
+
+!!! note
+    The Jupytext plugin is not installed on the ZIH system at the moment.
+    Currently, it can be [installed](https://jupytext.readthedocs.io/en/latest/install.html)
+    by the users with parameter `--user`.
+    Therefore, `ipynb` files need to be made available in a repository for shared
+    usage within the ZIH system.
 
 ## Stop a Session
 
-It is good practice to stop your session once your work is done. This releases resources for other
-users and your quota is less charged. If you just log out or close the window, your server continues
-running and **will not stop** until the Slurm job runtime hits the limit (usually 8 hours).
+It is good practice to stop your session once your work is done. This releases
+resources for other users and your quota is less charged. If you just log out or
+close the window, your server continues running and **will not stop** until the
+Slurm job runtime hits the limit (usually 8 hours).
 
-At first you have to open the JupyterHub control panel.
+At first, you have to open the JupyterHub control panel.
 
 **JupyterLab**: Open the file menu and then click on `Logout`. You can
-also click on `Hub Control Panel` which opens the control panel in a new
-tab instead.
+also click on `Hub Control Panel`, which opens the control panel in a new tab instead.
 
 ![JupyterLab logout](misc/jupyterlab_logout.png)
 {: align="center"}
 
-**Classic Jupyter notebook**: Click on the control panel button on the top right
-of your screen.
+**Classic Jupyter notebook**: Click on the control panel button on the top right of your screen.
 
 ![Jupyter notebook control panel button](misc/jupyter_notebook_control_panel_button.png)
 {: align="center"}
 
-Now you are back on the JupyterHub page and you can stop your server by clicking on
+Now, you are back on the JupyterHub page and you can stop your server by clicking on
 ![Stop my server](misc/stop_my_server.png)
 {: align="center"}
 
 ## Error Handling
 
 We want to explain some errors that you might face sooner or later.
-If you need help open a ticket at [HPC support](mailto:hpcsupport@zih.tu-dresden.de).
+If you need help, open a ticket and ask for support as described in
+[How to Ask for Support](../support/support.md).
 
 ### Error at Session Start
 
 ![Error batch job submission failed](misc/error_batch_job_submission_failed.png)
 {: align="center"}
 
-This message appears instantly if your batch system parameters are not valid.
+This message appears instantly, if your batch system parameters are not valid.
 Please check those settings against the available hardware.
 Useful pages for valid batch system parameters:
 
 - [General information how to use Slurm](../jobs_and_resources/slurm.md)
 - [Partitions and limits](../jobs_and_resources/partitions_and_limits.md)
 
+!!! hint
+    This message might also appear for other Slurm related problems, e.g. quota issues.
+    That might be the case when the error appears for you but not for others while using the same
+    system parameters. In this case, please ask for support as described in
+    [How to Ask for Support](../support/support.md).
+
 ### Error Message in JupyterLab
 
 ![JupyterLab error directory not found](misc/jupyterlab_error_directory_not_found.png)
 {: align="center"}
 
-If the connection to your notebook server unexpectedly breaks, you will get this error message.
-Sometimes your notebook server might hit a batch system or hardware limit and gets killed. Then
-usually the log file of the corresponding batch job might contain useful information. These log
-files are located in your `home` directory and have the name `jupyter-session-<jobid>.log`.
+If the connection to your notebook server unexpectedly breaks, you will get this
+error message. Sometimes your notebook server might hit a batch system or
+hardware limit and gets killed. Then, the log file of the corresponding
+batch job usually contains useful information. These log files are located in your
+home directory and have the name `jupyter-session-<jobid>.log`.
 
 ## Advanced Tips
 
@@ -163,8 +188,9 @@ exact standard environment through the spawner form:
 ![Environment package list](misc/environment_package_list.png)
 {: align="center"}
 
-This list shows all packages of the currently selected conda environment. This depends on your
-settings for partition (CPU architecture) and standard environment.
+This list shows all packages of the currently selected conda environment. This
+depends on your settings for partition (CPU architecture) and standard
+environment.
 
 There are three standard environments:
 
@@ -172,8 +198,9 @@ There are three standard environments:
 - test
 - python-env-python3.8.6
 
-**Python-env-python3.8.6** virtual environment can be used for all x86 partitions(`gpu2`, `alpha`,
-etc). It gives the opportunity to create a user kernel with the help of a Python environment.
+**Python-env-python3.8.6** virtual environment can be used for all x86
+partitions (`gpu2`, `alpha`, etc). It gives the opportunity to create a user
+kernel with the help of a Python environment.
 
 Here is a short list of some included software:
 
@@ -185,8 +212,8 @@ Here is a short list of some included software:
 | PyTorch    | 1.3.1     | 1.3.1  |
 | TensorFlow | 2.1.1     | 2.1.1  |
 | Keras      | 2.3.1     | 2.3.1  |
-| numpy      | 1.17.5    | 1.17.4 |
-| matplotlib | 3.3.1     | 3.0.3  |
+| NumPy      | 1.17.5    | 1.17.4 |
+| Matplotlib | 3.3.1     | 3.0.3  |
 
 \* generic = all partitions except ml
 
@@ -196,80 +223,85 @@ Here is a short list of some included software:
 
 !!! info
 
-    Interactive code interpreters which are used by Jupyter notebooks are called *kernels*. Creating
-    and using your own kernel has the benefit that you can install your own preferred Python
-    packages and use them in your notebooks.
+    Interactive code interpreters which are used by Jupyter notebooks are called
+    *kernels*. Creating and using your own kernel has the benefit that you can
+    install your own preferred Python packages and use them in your notebooks.
 
 We currently have two different architectures at ZIH systems.
 Build your kernel environment on the **same architecture** that you want to use
-later on with the kernel. In the examples below we use the name
+later on with the kernel. In the examples below, we use the name
 "my-kernel" for our user kernel. We recommend to prefix your kernels
-with keywords like `haswell`, `ml`, `romeo`, `venv`, `conda`. This way you
-can later recognize easier how you built the kernel and on which hardware it will work.
-
-**Intel nodes** (e.g. partition `haswell`, `gpu2`):
-
-```console
-maria@login$ srun --pty --ntasks=1 --cpus-per-task=2 --mem-per-cpu=2541 --time=08:00:00 bash -l
-```
-
-**Power nodes** (partition `ml`):
-
-```console
-maria@login$ srun --pty --partition=ml --ntasks=1 --cpus-per-task=2 --mem-per-cpu=1443 --time=08:00:00 bash -l
-```
-
-Create a virtual environment in your `home` directory. You can decide between Python virtualenvs or
-conda environments.
+with keywords like `haswell`, `ml`, `romeo`, `venv`, `conda`. This way, you
+can later recognize easier how you built the kernel and on which hardware it
+will work. Depending on that hardware, allocate resources:
+
+=== "x86 nodes (e.g. partition `haswell`, `gpu2`)"
+    ```console
+    maria@login$ srun --pty --ntasks=1 --cpus-per-task=2 --mem-per-cpu=2541 --time=08:00:00 bash -l
+    ```
+=== "PowerPC nodes (partition `ml`)"
+    ```console
+    maria@login$ srun --pty --partition=ml --ntasks=1 --cpus-per-task=2 --mem-per-cpu=1443 \
+     --time=08:00:00 bash -l
+    ```
+
+Create a virtual environment in your home directory. You can decide between
+Python virtualenv or conda environment.
 
 !!! note
-    Please take in mind that Python venv is the preferred way to create a Python virtual environment.
+    Please keep in mind that Python virtualenv is the preferred way to create a Python
+    virtual environment.
+    For working with conda virtual environments, it may be necessary to configure your shell via
+    `conda init` as described in [Python virtual environments](../software/python_virtual_environments.md#conda-virtual-environment)
 
 #### Python Virtualenv
 
 ```console
 marie@compute$ module load Python/3.8.6-GCCcore-10.2.0
+Module Python/3.8.6-GCCcore-10.2.0 and 11 dependencies loaded.
 marie@compute$ mkdir user-kernel # please use workspaces!
 marie@compute$ cd user-kernel
 marie@compute$ virtualenv --system-site-packages my-kernel
-Using base prefix '/sw/installed/Python/3.6.6-fosscuda-2018b'
-New python executable in .../user-kernel/my-kernel/bin/python
-Installing setuptools, pip, wheel...done.
+created virtual environment CPython3.8.6.final.0-64 in 5985ms
+  creator CPython3Posix(dest=[...]/my-kernel, clear=False, global=True)
+  seeder FromAppData(download=False, pip=bundle, setuptools=bundle, wheel=bundle, via=copy, app_data_dir=[...])
+    added seed packages: pip==20.2.3, setuptools==50.3.0, wheel==0.35.1
+  activators BashActivator,CShellActivator,FishActivator,PowerShellActivator,PythonActivator,XonshActivator
 marie@compute$ source my-kernel/bin/activate
-marie@compute$ pip install ipykernel
+(my-kernel) marie@compute$ pip install ipykernel
 Collecting ipykernel
 [...]
-Successfully installed ... ipykernel-5.1.0 ipython-7.5.0 ...
-marie@compute$ pip install --upgrade pip
-marie@compute$ python -m ipykernel install --user --name my-kernel --display-name="my kernel"
+Successfully installed [...] ipykernel-6.9.1 ipython-8.0.1 [...]
+(my-kernel) marie@compute$ pip install --upgrade pip
+(my-kernel) marie@compute$ python -m ipykernel install --user --name my-kernel --display-name="my kernel"
 Installed kernelspec my-kernel in .../.local/share/jupyter/kernels/my-kernel
-marie@compute$ pip install [...] # now install additional packages for your notebooks
-marie@compute$ deactivate
+(my-kernel) marie@compute$ pip install [...] # now install additional packages for your notebooks
+(my-kernel) marie@compute$ deactivate
 ```
 
 #### Conda Environment
 
-Load the needed module for Intel nodes
-
-```console
-marie@compute$ module load Anaconda3
-```
-
-... or for IBM nodes (partition `ml`):
+Load the needed module depending on partition architecture:
 
-```console
-marie@ml$ module load PythonAnaconda
-```
+=== "x86 nodes (e.g. partition `haswell`, `gpu2`)"
+    ```console
+    marie@compute$ module load Anaconda3
+    ```
+=== "PowerPC nodes (partition `ml`)"
+    ```console
+    marie@ml$ module load PythonAnaconda
+    ```
 
-Continue with environment creation, package installation and kernel registration:
+Continue with environment creation, package installation and kernel
+registration:
 
 ```console
 marie@compute$ mkdir user-kernel # please use workspaces!
-marie@compute$ conda create --prefix /home/<USER>/user-kernel/my-kernel python=3.6
+marie@compute$ conda create --prefix $HOME/user-kernel/my-kernel python=3.8.6
 Collecting package metadata: done
 Solving environment: done
 [...]
-marie@compute$ conda activate /home/<USER>/user-kernel/my-kernel
+marie@compute$ conda activate $HOME/user-kernel/my-kernel
 marie@compute$ conda install ipykernel
 Collecting package metadata: done
 Solving environment: done
@@ -303,11 +335,13 @@ You can switch kernels of existing notebooks in the kernel menu:
 {: align="center"}
 
 !!! note
-    Both python venv and conda virtual environments will be mention in the same list.
+    Both python venv and conda virtual environments will be mention in the same
+    list.
 
 ### Loading Modules
 
 You have now the option to preload modules from the [module system](../software/modules.md).
-Select multiple modules that will be preloaded before your notebook server starts. The list of
-available modules depends on the module environment you want to start the session in (`scs5` or
-`ml`). The right module environment will be chosen by your selected partition.
+Select multiple modules that will be preloaded before your notebook server
+starts. The list of available modules depends on the module environment you want
+to start the session in (`scs5` or `ml`). The right module environment will be
+chosen by your selected partition.
diff --git a/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md b/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md
index 797d9fc8e455b14e40a5ec7f3737874b2ac500ae..2d9bbb477dd9b07e66b92b70f13f235d8a7d5a77 100644
--- a/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md
+++ b/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md
@@ -1,7 +1,7 @@
 # JupyterHub for Teaching
 
-On this page, we want to introduce to you some useful features if you want to use JupyterHub for
-teaching.
+On this page, we want to introduce to you some useful features if you want to
+use JupyterHub for teaching.
 
 !!! note
 
@@ -9,31 +9,46 @@ teaching.
 
 Please be aware of the following notes:
 
-- ZIH systems operate at a lower availability level than your usual Enterprise Cloud VM. There can
-  always be downtimes, e.g. of the filesystems or the batch system.
-- Scheduled downtimes are announced by email. Please plan your courses accordingly.
-- Access to HPC resources is handled through projects. See your course as a project. Projects need
-  to be registered beforehand (more info on the page [Access](../application/overview.md)).
+- ZIH systems operate at a lower availability level than your usual Enterprise
+Cloud VM. There can always be downtimes, e.g. of the filesystems or the batch
+system.
+- Scheduled downtimes are announced by email. Please plan your courses
+accordingly.
+- Access to HPC resources is handled through projects. See your course as a
+project. Projects need to be registered beforehand (more info on the page
+[Access](../application/overview.md)).
 - Don't forget to [add your users](../application/project_management.md#manage-project-members-dis-enable)
   (e.g. students or tutors) to your project.
 - It might be a good idea to [request a reservation](../jobs_and_resources/overview.md#exclusive-reservation-of-hardware)
-  of part of the compute resources for your project/course to avoid unnecessary waiting times in
-  the batch system queue.
+  of part of the compute resources for your project/course to avoid unnecessary
+  waiting times in the batch system queue.
 
 ## Clone a Repository With a Link
 
-This feature bases on [nbgitpuller](https://github.com/jupyterhub/nbgitpuller). Further information
-can be found in the [external documentation about nbgitpuller](https://jupyterhub.github.io/nbgitpuller/).
+This feature bases on [nbgitpuller](https://github.com/jupyterhub/nbgitpuller).
+Further information can be found in the [external documentation about nbgitpuller](https://jupyterhub.github.io/nbgitpuller/).
 
-This extension for Jupyter notebooks can clone every public git repository into the users work
-directory. It's offering a quick way to distribute notebooks and other material to your students.
+This extension for Jupyter notebooks can clone every public git repository into
+the users work directory. It's offering a quick way to distribute notebooks and
+other material to your students.
 
 ![Git pull progress screen](misc/gitpull_progress.png)
 {: align="center"}
 
-A shareable link for this feature looks like this:
+To create a shareable link, we recommend to use [URL encoding](https://en.wikipedia.org/wiki/Percent-encoding)
+instead of plain text for the link in order to avoid defective links. The
+[nbgitpuller link generator](https://jupyterhub.github.io/nbgitpuller/link?hub=https://taurus.hrsk.tu-dresden.de/jupyter/)
+supports you in generating valid links for sharing.
 
-<https://taurus.hrsk.tu-dresden.de/jupyter/hub/user-redirect/git-pull?repo=https://github.com/jdwittenauer/ipython-notebooks&urlpath=/tree/ipython-notebooks/notebooks/language/Intro.ipynb>
+??? example
+    A shareable link for this feature looks like this:
+    ```
+    https://taurus.hrsk.tu-dresden.de/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fjdwittenauer%2Fipython-notebooks&urlpath=tree%2Fipython-notebooks%2Fnotebooks%2Flanguage%2FIntro.ipynb
+    ```
+
+!!! warning
+    For illustration purposes, we use plain text links in the following parts. In practice, we
+    highly recommend to use URL encoded links instead.
 
 ![URL with git-pull parameters](misc/url-git-pull.png)
 {: align="center"}
@@ -51,18 +66,14 @@ The following parameters are available:
 |`urlpath` | URL to redirect the user to a certain file, [more info about parameter urlpath](https://jupyterhub.github.io/nbgitpuller/topic/url-options.html#urlpath)|
 |`depth`   | clone only a certain amount of latest commits not recommended|
 
-This [link
-generator](https://jupyterhub.github.io/nbgitpuller/link?hub=https://taurus.hrsk.tu-dresden.de/jupyter/)
-might help creating those links
-
 ## Spawn Options Pass-through with URL Parameters
 
 The spawn form now offers a quick start mode by passing URL parameters.
 
 !!! example
 
-    The following link would create a jupyter notebook session on the `interactive` partition with the `test`
-    environment being loaded:
+    The following link would create a jupyter notebook session on the
+    `interactive` partition with the `test` environment being loaded:
 
     ```
     https://taurus.hrsk.tu-dresden.de/jupyter/hub/spawn#/~(partition~'interactive~environment~'test)
@@ -71,8 +82,8 @@ The spawn form now offers a quick start mode by passing URL parameters.
 ![URL with quickstart parameters](misc/url-quick-start.png)
 {: align="center"}
 
-Every parameter of the advanced form can be set with this parameter. If the parameter is not
-mentioned, the default value will be loaded.
+Every parameter of the advanced form can be set with this parameter. If the
+parameter is not mentioned, the default value will be loaded.
 
 | Parameter       | Default Value                            |
 |:----------------|:-----------------------------------------|
@@ -90,8 +101,8 @@ mentioned, the default value will be loaded.
 | `launch`          | JupyterLab                               |
 | `workspace_scope` | *empty* (home directory)                 |
 
-You can use the advanced form to generate a URL for the settings you want. The address bar contains
-the encoded parameters starting with `#/`.
+You can use the advanced form to generate a URL for the settings you want. The
+address bar contains the encoded parameters starting with `#/`.
 
 ### Combination of Quickstart and Git-Pull Feature
 
@@ -109,8 +120,7 @@ https://taurus.hrsk.tu-dresden.de/jupyter/hub/user-redirect/git-pull?repo=https:
 With the following link you will be redirected to a certain file in your
 home directory.
 
-[https://taurus.hrsk.tu-dresden.de/jupyter/user-redirect/notebooks/demo.ipynb]
-(https://taurus.hrsk.tu-dresden.de/jupyter/user-redirect/notebooks/demo.ipynb)
+[https://taurus.hrsk.tu-dresden.de/jupyter/user-redirect/notebooks/demo.ipynb](https://taurus.hrsk.tu-dresden.de/jupyter/user-redirect/notebooks/demo.ipynb)
 
 The file needs to exist, otherwise a 404 error will be thrown.
 
@@ -119,3 +129,106 @@ The file needs to exist, otherwise a 404 error will be thrown.
 
 This link would redirect to
 `https://taurus.hrsk.tu-dresden.de/jupyter/user/{login}/notebooks/demo.ipynb`.
+
+## Create a Shared Python Environment
+
+To provide a consistent Python environment, you can create a shared [workspace](../data_lifecycle/workspaces.md)
+and prepare a [Python virtual environment](../software/python_virtual_environments.md)
+in it. Then use a custom Jupyter Kernel to use this environment in JupyterHub.
+Please note the following:
+
+- Set the correct permissions to the workspace and all relevant subdirectories
+and files via `chmod`.
+
+- Install all relevant Python packages in the shared Python virtual environment
+(either pip or conda). Note that standard environments (as *production* or
+*test*) are not available in that case.
+
+- Modules can also be loaded in the Jupyter spawner via preload modules
+(considering the Python version of your virtual environment).
+
+Set up your shared Python virtual environment for JupyterHub.
+!!! hint
+    For working with conda virtual environments, it may be necessary to configure your shell via
+    `conda init` as described in [Python virtual environments](../software/python_virtual_environments.md#conda-virtual-environment)
+
+=== "virtualenv"
+
+    ```console
+    marie@compute$ module load Python #Load default Python
+    [...]
+    marie@compute$ ws_allocate -F scratch python_virtual_environment_teaching 1
+    Info: creating workspace.
+    /scratch/ws/1/python_virtual_environment_teaching
+    [...]
+    marie@compute$ virtualenv --system-site-packages /scratch/ws/1/python_virtual_environment_teaching/env #Create virtual environment
+    [...]
+    marie@compute$ source /scratch/ws/1/python_virtual_environment_teaching/env/bin/activate    #Activate virtual environment. Example output: (envtest) bash-4.2$
+    marie@compute$ pip install ipykernel
+    Collecting ipykernel
+    [...]
+    Successfully installed ... ipykernel-5.1.0 ipython-7.5.0 ...
+    marie@compute$ pip install --upgrade pip
+    marie@compute$ python -m ipykernel install --user --name my-teaching-kernel --display-name="my teaching kernel"
+    Installed kernelspec my-teaching-kernel in .../.local/share/jupyter/kernels/my-teaching-kernel
+    marie@compute$ pip install [...] #Now install additional packages for your notebooks
+    marie@compute$ deactivate
+    marie@compute$ chmod g+rx /scratch/ws/1/python_virtual_environment_teaching -R #Make the environment accesible for others
+
+    ```
+
+=== "conda"
+
+    ```console
+    marie@compute$ module load Anaconda3 #Load Anaconda
+    [...]
+    marie@compute$ ws_allocate -F scratch conda_virtual_environment_teaching 1
+    Info: creating workspace.
+    /scratch/ws/1/conda_virtual_environment_teaching
+    [...]
+    marie@compute$ conda create --prefix /scratch/ws/1/conda_virtual_environment_teaching/conda-env python=3.8 #create virtual environment with Python version 3.8
+    [...]
+    marie@compute$ conda activate /scratch/ws/1/conda_virtual_environment_teaching/conda-env #activate conda-env virtual environment
+    marie@compute$ conda install ipykernel
+    [...]
+    marie@compute$ python -m ipykernel install --user --name my-teaching-kernel --display-name="my teaching kernel"
+    Installed kernelspec my-teaching-kernel in .../.local/share/jupyter/kernels/my-teaching-kernel
+    marie@compute$ conda install [...] # now install additional packages for your notebooks
+    marie@compute$ conda deactivate
+    marie@compute$ chmod g+rx /scratch/ws/1/conda_virtual_environment_teaching -R #Make the environment accesible for others
+
+    ```
+
+Now, users have to install the kernel in order to use the shared Python virtual
+environment in JupyterHub:
+=== "virtualenv"
+
+    ```console
+    marie@compute$ module load Python #Load default Python
+    [...]
+    marie@compute$ source /scratch/ws/1/python_virtual_environment_teaching/env/bin/activate #Activate virtual environment. Example output: (envtest) bash-4.2$
+    marie@compute$ python -m ipykernel install --user --name my-teaching-kernel --display-name="my teaching kernel"
+    Installed kernelspec my-teaching-kernel in .../.local/share/jupyter/kernels/my-teaching-kernel
+    marie@compute$ deactivate
+
+    ```
+
+=== "conda"
+
+    ```console
+    marie@compute$ module load Anaconda3 #Load Anaconda
+    [...]
+    marie@compute$ conda activate /scratch/ws/1/conda_virtual_environment_teaching
+    marie@compute$ python -m ipykernel install --user --name my-teaching-kernel --display-name="my teaching kernel"
+    Installed kernelspec my-teaching-kernel in .../.local/share/jupyter/kernels/my-teaching-kernel
+    marie@compute$ conda deactivate
+
+    ```
+
+After spawning the Notebook, you can select the kernel with the created Python
+virtual environment.
+
+!!! hint
+    You can also execute the commands for installing the kernel from the Jupyter
+    as described in [JupyterHub Teaching Example](jupyterhub_teaching_example.md). Then users do not
+    have to use the command line interface after the preparation.
diff --git a/doc.zih.tu-dresden.de/docs/access/jupyterhub_teaching_example.md b/doc.zih.tu-dresden.de/docs/access/jupyterhub_teaching_example.md
new file mode 100644
index 0000000000000000000000000000000000000000..639bc25ebac1b1082a2ec4692526589610341801
--- /dev/null
+++ b/doc.zih.tu-dresden.de/docs/access/jupyterhub_teaching_example.md
@@ -0,0 +1,176 @@
+# JupyterHub Teaching Example
+
+Setting up a Jupyter Lab Course involves additional steps, beyond JupyterHub, such as creating
+course specific environments and allowing participants to link and activate these environments during
+the course. This page includes a work through of these additional steps, with best practice examples
+for each part.
+
+## Context
+
+- The common situation described here is that one or several Jupyter Lab Notebooks
+(`ipynb` files) are available and prepared. Students are supposed to open these notebooks
+through the [ZIH JupyterHub](../access/jupyterhub.md) and work through them during a course.
+
+- These notebooks are typically prepared for specific dependencies (Python packages)
+that need to be activated by participants in the course, when opening the notebooks.
+
+- These environments can either be chosen based on the pre-configured
+ZIH virtualenv/conda environments,
+or built in advance. We will focus on the custom environment approach here.
+
+## Prerequisites
+
+- A public git repository with the notebook files (`ipynb`) and all other starting files required
+  by participants. One option to host the repository is the [GitLab of TU Chemnitz](https://gitlab.hrz.tu-chemnitz.de/).
+- A [HPC project](../application/project_management.md) for teaching,
+  with students as registered participants
+- For the tutor, a shell access to the HPC resources and project folder.
+
+## Preparation on the Lecturer's Side
+
+The following part describes several steps for the preparation of a course with the JupyterHub at
+ZIH.
+
+### 1. Creating a custom Python environment
+
+Prepare a Python virtual environment (`virtualenv`) or conda virtual environment as described in
+[Python virtual environments](../software/python_virtual_environments.md). Note, for preparing a
+custom environment for a Jupyter Lab course, all participants will need to have read-access to this
+environment. This is best done by storing the environment in either a [workspace](../data_lifecycle/workspaces.md)
+with a limited lifetime or in a projects folder (e.g. `/projects/p_lv_jupyter_course/`) without a
+limited lifetime.
+
+### 2. Clone the repository and store environment setup
+
+First prepare the `requirements.txt` or the `environment.yml` to persist the environment as
+described in [Python virtual environments](../software/python_virtual_environments.md).
+
+Then clone the repository of your course to your home directory or into a directory in the projects
+folder and add the file to the repository.
+
+=== "virtualenv"
+    ```console
+    marie@compute$ git clone git@gitlab.hrz.tu-chemnitz.de:zih/projects/p_lv_jupyter_course/clone_marie/jupyterlab_course.git
+    [...]
+    marie@compute$ cp requirements.txt /projects/p_lv_jupyter_course/clone_marie/jupyterlab_course
+    marie@compute$ cd /projects/p_lv_jupyter_course/clone_marie/jupyterlab_course
+    marie@compute$ git add requirements.txt
+    marie@compute$ git commit
+    marie@compute$ git push
+
+    ```
+=== "conda"
+    ```console
+    marie@compute$ git clone git@gitlab.hrz.tu-chemnitz.de:zih/projects/p_lv_jupyter_course/clone_marie/jupyterlab_course.git
+    [...]
+    marie@compute$ cp requirements.txt /projects/p_lv_jupyter_course/clone_marie/jupyterlab_course
+    marie@compute$ cd /projects/p_lv_jupyter_course/clone_marie/jupyterlab_course
+    marie@compute$ git add environment.yml
+    marie@compute$ git commit
+    marie@compute$ git push
+
+    ```
+
+Now, you can re-create the environment and the whole course from the git repository in the future.
+
+To test the activation of the environment use:
+
+=== "virtualenv"
+
+    ```console
+    marie@compute$ source /scratch/ws/1/python_virtual_environment_teaching/env/bin/activate #Activate virtual environment. Example output: (envtest) bash-4.2$
+
+    ```
+=== "conda"
+
+    ```console
+    marie@compute$ conda activate /scratch/ws/1/conda_virtual_environment_teaching
+
+    ```
+
+### 3. Prepare an activation file
+
+Create a file to install the `ipykernel` to the user-folder, linking the central `workshop_env` to
+the ZIH JupyterLab. An `activate_workshop_env.sh` should have the following content:
+
+```console
+/projects/jupyterlab_course/workshop_env/bin/python -m ipykernel install --user --name workshop_env --display-name="workshop_env"
+```
+
+!!! note
+    The file for installing the kernel should also be added to the git repository.
+
+### 4. Prepare the spawn link
+
+Have a look at the instructions to prepare
+[a custom spawn link in combination with the git-pull feature](jupyterhub_for_teaching.md#combination-of-quickstart-and-git-pull-feature).
+
+## Usage on the Student's Side
+
+### Preparing activation of the custom environment in notebooks
+
+When students open the notebooks (e.g. through a Spawn Link that pulls the Git files
+and notebooks from our repository), the Python environment must be activated first by installing a
+Jupyter kernel. This can be done inside the first notebook using a shell command (`.sh`).
+
+Therefore the students will need to run the `activation_workshop_env.sh` file, which can be done
+in the first cell of the first notebook (e.g. inside `01_intro.ipynb`).
+
+In a code cell in `01_intro.ipynb`, add:
+
+```console
+!cd .. && sh activate_workshop_env.sh
+```
+
+When students run this file, the following output signals a successful setup.
+
+![Installed kernelspec](misc/kernelspec.png)
+{: align="center"}
+
+Afterwards, the `workshop_env` Jupyter kernel can be selected in the top-right corner of Jupyter
+Lab.
+
+!!! note
+    A few seconds may be needed until the environment becomes available in the list.
+
+## Test spawn link and environment activation
+
+During testing, it may be necessary to reset the workspace to the initial state. There are two steps
+involved:
+
+First, remove the cloned git repository in user home folder.
+
+!!! warning
+    Check carefully the syntax below, to avoid removing the wrong files.
+
+```console
+cd ~
+rm -rf ./jupyterlab_course.git
+```
+
+Second, the IPython Kernel must be un-linked from the user workshop_env.
+
+```console
+jupyter kernelspec uninstall workshop_env
+```
+
+## Summary
+
+The following video shows an example of the process of opening the
+spawn link and activating the environment, from the students perspective.
+Note that this video shows the case for a conda virtual environment.
+
+<div align="center">
+<video width="446" height="240" controls muted>
+  <source src="../misc/startup_hub.webm" type="video/webm">
+Your browser does not support the video tag.
+</video>
+</div>
+
+!!! note
+    - The spawn link may not work the first time a user logs in.
+
+    - Students must be advised to _not_ click "Start My Server" or edit the form,
+    if the server does not start automatically.
+
+    - If the server does not start automatically, click (or copy & paste) the spawn link again.
diff --git a/doc.zih.tu-dresden.de/docs/access/key_fingerprints.md b/doc.zih.tu-dresden.de/docs/access/key_fingerprints.md
index 6be427f53bd2247ab94a7abfdad25abfa01742d4..cec0634d6676a914a3694def88ff4298a8f84920 100644
--- a/doc.zih.tu-dresden.de/docs/access/key_fingerprints.md
+++ b/doc.zih.tu-dresden.de/docs/access/key_fingerprints.md
@@ -1,4 +1,4 @@
-# SSH Key Fingerprints
+# Key Fingerprints
 
 !!! hint
 
diff --git a/doc.zih.tu-dresden.de/docs/access/misc/kernelspec.png b/doc.zih.tu-dresden.de/docs/access/misc/kernelspec.png
new file mode 100644
index 0000000000000000000000000000000000000000..ff58282d7c06029162907e8ca8df950d949358a3
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/access/misc/kernelspec.png differ
diff --git a/doc.zih.tu-dresden.de/docs/access/misc/startup_hub.webm b/doc.zih.tu-dresden.de/docs/access/misc/startup_hub.webm
new file mode 100644
index 0000000000000000000000000000000000000000..49f8a420ed372b7b7b4aec7e7bf0d8b4dc2861a1
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/access/misc/startup_hub.webm differ
diff --git a/doc.zih.tu-dresden.de/docs/access/ssh_login.md b/doc.zih.tu-dresden.de/docs/access/ssh_login.md
index 60e24a0f3fdcc479a34f477864944025193b0f57..a0fef440151984abbe662fe8f096de166eae6dad 100644
--- a/doc.zih.tu-dresden.de/docs/access/ssh_login.md
+++ b/doc.zih.tu-dresden.de/docs/access/ssh_login.md
@@ -148,7 +148,11 @@ We recommend one of the following applications:
 
   * [MobaXTerm](https://mobaxterm.mobatek.net): [ZIH documentation](misc/basic_usage_of_MobaXterm.pdf)
   * [PuTTY](https://www.putty.org): [ZIH documentation](misc/basic_usage_of_PuTTY.pdf)
-  * OpenSSH Server: [docs](https://docs.microsoft.com/de-de/windows-server/administration/openssh/openssh_install_firstuse)
+  * For Windows 10 (1809 and higher):
+    * [Windows Terminal](https://www.microsoft.com/store/productId/9N0DX20HK701)
+    * Together with the built-in [OpenSSH Client](https://docs.microsoft.com/de-de/windows-server/administration/openssh/openssh_overview)
+
+## SSH Key Fingerprints
 
 The page [key fingerprints](key_fingerprints.md) holds the up-to-date fingerprints for the login
 nodes. Make sure they match.
diff --git a/doc.zih.tu-dresden.de/docs/accessibility.md b/doc.zih.tu-dresden.de/docs/accessibility.md
index 418d8a11c98be59a121a47f0d497dfce1a79aa05..ba40340fe0d9995c27b4013d06a01400dc279e87 100644
--- a/doc.zih.tu-dresden.de/docs/accessibility.md
+++ b/doc.zih.tu-dresden.de/docs/accessibility.md
@@ -39,4 +39,4 @@ Postanschrift: Archivstraße 1, 01097 Dresden
 E-Mail: <info.behindertenbeauftragter@sk.sachsen.de>  
 Telefon: +49 351 564-12161  
 Fax: +49 351 564-12169  
-Webseite: [https://www.inklusion.sachsen.de](https://www.inklusion.sachsen.de)
+Webseite: [https://www.inklusion.sachsen.de/](https://www.inklusion.sachsen.de/)
diff --git a/doc.zih.tu-dresden.de/docs/application/acknowledgement.md b/doc.zih.tu-dresden.de/docs/application/acknowledgement.md
new file mode 100644
index 0000000000000000000000000000000000000000..2cbb89c2b88afe5933da677f53676e600387ce1c
--- /dev/null
+++ b/doc.zih.tu-dresden.de/docs/application/acknowledgement.md
@@ -0,0 +1,25 @@
+# Acknowledgment
+
+To provide you with modern and powerful HPC systems in future as well, we have to show that these
+systems help to advance research. For that purpose we rely on your help. In most cases, the results
+of your computations are used for presentations and publications, especially in peer-reviewed
+magazines, journals, and conference proceedings. We kindly ask you to mention the HPC resource usage
+in the acknowledgment section of all publications that are based on granted HPC resources of the TU
+Dresden. Examples:
+
+!!! example
+
+    The authors gratefully acknowledge the GWK support for funding this project by providing
+    computing time through the Center for Information Services and HPC (ZIH) at TU Dresden.
+
+!!! example
+
+    The authors are grateful to the Center for Information Services and High Performance Computing
+    [Zentrum für Informationsdienste und Hochleistungsrechnen (ZIH)] at TU Dresden for providing its
+    facilities for high throughput calculations.
+
+!!! example
+
+    Die Autoren danken Bund und Land Sachsen für die Unterstützung bei der Finanzierung dieses
+    Projektes mit der Bereitstellung von Rechenzeit durch das Zentrum für Informationsdienste und
+    Hochleistungsrechnen (ZIH) an der TU Dresden.
diff --git a/doc.zih.tu-dresden.de/docs/application/overview.md b/doc.zih.tu-dresden.de/docs/application/overview.md
index 59e6e6e78833b63dd358ecaeda361135aba7ef30..ac8a94a3c78eadbd1e18fa5b104169ceb05f5816 100644
--- a/doc.zih.tu-dresden.de/docs/application/overview.md
+++ b/doc.zih.tu-dresden.de/docs/application/overview.md
@@ -5,9 +5,33 @@ The HPC project manager should hold a professorship (university) or head a resea
 also apply for a "Schnupperaccount" (trial account) for one year to find out if the machine is
 useful for your application.
 
-An other able use case is to request resources for a courses.
+Another possible use case is to request resources for a course.
 
-To learn more about applying for a project or a course,
-check the following page: [https://tu-dresden.de/zih/hochleistungsrechnen/zugang][1]
+To learn more about applying for a project or a course, check the following page:
+[https://tu-dresden.de/zih/hochleistungsrechnen/zugang][1]
+
+## HPC-Login
+
+### Get Access without an existing ZIH Login
+
+To use the resources, you need a [valid ZIH login][2]. To get a ZIH login and the access to HPC,
+please use the [HPC login application form][3].
+
+### Get Access with a valid ZIH Login
+
+When you have a valid ZIH login, there are two possibilities for you: Either the manager or
+administrator of a HPC project needs to add you as a member of this project via the
+[manager view](project_management.md#manage-project-members-dis-enable), or you have to
+[apply for a project](project_request_form.md).
+
+## Acknowledgment in Publications
+
+We kindly ask you to mention the HPC resource usage in the acknowledgment section of all
+publications that are based on granted HPC resources of the TU Dresden.
+
+!!! info "Acknowledgment Examples"
+    We provide some [acknowledgment examples](acknowledgement.md) that show you how to do that.
 
 [1]: https://tu-dresden.de/zih/hochleistungsrechnen/zugang
+[2]: https://tu-dresden.de/zih/dienste/service-katalog/zugangsvoraussetzung
+[3]: https://selfservice.zih.tu-dresden.de/l/index.php/hpclogin
diff --git a/doc.zih.tu-dresden.de/docs/application/project_management.md b/doc.zih.tu-dresden.de/docs/application/project_management.md
index 79e457cb2590d4109a160a8296b676c3384490d5..1180357d44027102020d4a76f51dfb46b2571472 100644
--- a/doc.zih.tu-dresden.de/docs/application/project_management.md
+++ b/doc.zih.tu-dresden.de/docs/application/project_management.md
@@ -22,7 +22,6 @@ to
 
 ## Access
 
-![Login Screen>](misc/external_login.png "Login Screen"){loading=lazy width=300 style="float:right"}
 [Entry point to the project management system](https://hpcprojekte.zih.tu-dresden.de/managers)
 The project leaders of an ongoing project and their accredited admins
 are allowed to login to the system. In general each of these persons
@@ -32,40 +31,36 @@ happen that a project leader of a foreign organization do not have a ZIH
 login. For this purpose, it is possible to set a local password:
 "[Missing Password](https://hpcprojekte.zih.tu-dresden.de/managers/members/missingPassword)".
 
-&nbsp;
-{: style="clear:right;"}
+![Login Screen](misc/external_login.png "Login Screen")
+{: align="center"}
 
-![Password Reset>](misc/password.png "Password Reset"){loading=lazy width=300 style="float:right"}
 On the 'Missing Password' page, it is possible to reset the passwords of a 'non-ZIH-login'. For this
 you write your login, which usually corresponds to your email address, in the field and click on
 'reset. Within 10 minutes the system sends a signed e-mail from <hpcprojekte@zih.tu-dresden.de> to
 the registered e-mail address. this e-mail contains a link to reset the password.
 
-&nbsp;
-{: style="clear:right;"}
+![Password Reset](misc/password.png "Password Reset")
+{: align="center"}
 
 ## Projects
 
-![Project Overview>](misc/overview.png "Project Overview"){loading=lazy width=300 style="float:right"}
 After login you reach an overview that displays all available projects. In each of these projects
 are listed, you are either project leader or an assigned project administrator. From this list, you
 have the option to view the details of a project or make a following project request. The latter is
 only possible if a project has been approved and is active or was. In the upper right area you will
 find a red button to log out from the system.
 
-&nbsp;
-{: style="clear:right;"}
+![Project Overview](misc/overview.png "Project Overview")
+{: align="center"}
 
-![Project Details>](misc/project_details.png "Project Details"){loading=lazy width=300 style="float:right"}
 The project details provide information about the requested and allocated resources. The other tabs
 show the employee and the statistics about the project.
 
-&nbsp;
-{: style="clear:right;"}
+![Project Details](misc/project_details.png "Project Details")
+{: align="center"}
 
 ### Manage Project Members (dis-/enable)
 
-![Project Members>](misc/members.png "Project Members"){loading=lazy width=300 style="float:right"}
 The project members can be managed under the tab 'employee' in the project details. This page gives
 an overview of all ZIH logins that are a member of a project and its status. If a project member
 marked in green, it can work on all authorized HPC machines when the project has been approved. If
@@ -82,23 +77,21 @@ and has a time delay of 5 minutes. An user can add or reactivate itself, with it
 project via the link on the end of the page. To prevent misuse this link is valid for 2 weeks and
 will then be renewed automatically.
 
-&nbsp;
-{: style="clear:right;"}
+![Project Members](misc/members.png "Project Members")
+{: align="center"}
 
-![Add Member>](misc/add_member.png "Add Member"){loading=lazy width=300 style="float:right"}
 The link leads to a page where you can sign in to a project by accepting the term of use. You need
 also an valid ZIH-Login. After this step it can take 1-1,5 h to transfer the login to all cluster
 nodes.
 
-&nbsp;
-{: style="clear:right;"}
+![Add Member](misc/add_member.png "Add Member")
+{: align="center"}
 
 ### Statistic
 
-![Project Statistic>](misc/stats.png "Project Statistic"){loading=lazy width=300 style="float:right"}
 The statistic is located under the tab 'Statistic' in the project details. The data will updated
 once a day an shows used CPU-time and used disk space of an project. Following projects shows also
 the data of the predecessor.
 
-&nbsp;
-{: style="clear:right;"}
+![Project Statistic](misc/stats.png "Project Statistic")
+{: align="center"}
diff --git a/doc.zih.tu-dresden.de/docs/application/project_request_form.md b/doc.zih.tu-dresden.de/docs/application/project_request_form.md
index e829f316cb26f11b9b9048a889c8b5e918b2e870..6f22ff4b01e34c5e33ac9231906fc9c2d5dfb13e 100644
--- a/doc.zih.tu-dresden.de/docs/application/project_request_form.md
+++ b/doc.zih.tu-dresden.de/docs/application/project_request_form.md
@@ -1,19 +1,20 @@
 # Project Request Form
 
+This page describes the steps to fill the form on
+[https://hpcprojekte.zih.tu-dresden.de/](https://hpcprojekte.zih.tu-dresden.de/).
+
 ## First Step: Requester
 
-![picture 1: Login Screen >](misc/request_step1_b.png "Login Screen"){loading=lazy width=300 style="float:right"}
 The first step is asking for the personal information of the requester.
 **That's you**, not the leader of this project!
 If you have an ZIH-Login, you can use it.
 If not, you have to fill in the whole information.
 
-&nbsp;
-{: style="clear:right;"}
+![Picture 1: Login Screen](misc/request_step1_b.png "Login Screen")
+{: align="center"}
 
 ## Second Step: Project Details
 
-![picture 3: Project Details >][1]{loading=lazy width=300 style="float:right"}
 This Step is asking for general project Details.
 
 Any project have:
@@ -25,17 +26,16 @@ Any project have:
     * The approval is for a maximum of one year. Be careful: a duration from "May, 2013" till
       "May 2014" has 13 month.
 * a selected science, according to the DFG:
-  http://www.dfg.de/dfg_profil/gremien/fachkollegien/faecher/index.jsp
+  [http://www.dfg.de/dfg_profil/gremien/fachkollegien/faecher/index.jsp](http://www.dfg.de/dfg_profil/gremien/fachkollegien/faecher/index.jsp)
 * a sponsorship a kind of request a project leader/manager The leader of this project should hold a
   professorship (university) or is the head of the research group.
     * If you are this person, leave this fields free.
 
-&nbsp;
-{: style="clear:right;"}
+![Picture 2: Project Details][1]
+{: align="center"}
 
 ## Third step: Hardware
 
-![picture 4: Hardware >](misc/request_step3_machines.png "Hardware"){loading=lazy width=300 style="float:right"}
 This step inquire the required hardware. The
 [hardware specifications](../jobs_and_resources/hardware_overview.md) might help you to estimate,
 e. g. the compute time.
@@ -49,35 +49,32 @@ The project home is a shared storage in your project. Here you exchange data or
 for your project group in userspace. The directory is not intended for active calculations, for this
 the scratch is available.
 
-&nbsp;
-{: style="clear:right;"}
+![Picture 3: Hardware](misc/request_step3_machines.png "Hardware")
+{: align="center"}
 
 ## Fourth Step: Software
 
-![Picture 5: Software >](misc/request_step4_software.png "Software"){loading=lazy width=300 style="float:right"}
 Any information you will give us in this step, helps us to make a rough estimate, if you are able
 to realize your project. For example, some software requires its own licenses.
 
-&nbsp;
-{: style="clear:right;"}
+![Picture 4: Software](misc/request_step4_software.png "Software")
+{: align="center"}
 
 ## Fifth Step: Project Description
 
-![picture 6: Project Description >][2]{loading=lazy width=300 style="float:right"} Please enter a
-short project description here. This is especially important for trial accounts and courses. For
-normal HPC projects a detailed project description is additionally required, which you can upload
-here.
+Please enter a short project description here. This is especially important for trial accounts and
+courses. For normal HPC projects a detailed project description is additionally required, which you
+can upload here.
 
-&nbsp;
-{: style="clear:right;"}
+![Picture 5: Project Description][2]
+{: align="center"}
 
 ## Sixth Step: Summary
 
-![picture 6: summary >](misc/request_step6.png "Summary"){loading=lazy width=300 style="float:right"}
 Check your entries and confirm the terms of use.
 
-&nbsp;
-{: style="clear:right;"}
+![Picture 6: Summary](misc/request_step6.png "Summary")
+{: align="center"}
 
 [1]: misc/request_step2_details.png "Project Details"
 [2]: misc/request_step5_description.png "Project Description"
diff --git a/doc.zih.tu-dresden.de/docs/archive/beegfs_on_demand.md b/doc.zih.tu-dresden.de/docs/archive/beegfs_on_demand.md
index e221188dcd1c33ef66815d38bffd4a8c5866f48e..84a400f4906832a6b96deae582d0604b9e63a3fc 100644
--- a/doc.zih.tu-dresden.de/docs/archive/beegfs_on_demand.md
+++ b/doc.zih.tu-dresden.de/docs/archive/beegfs_on_demand.md
@@ -105,7 +105,7 @@ Show contents of the previously created file, for example,
 cat .beegfs_11054579
 ```
 
-Note: don't forget to go over to your `home` directory where the file located
+Note: don't forget to go over to your home directory where the file located
 
 Example output:
 
diff --git a/doc.zih.tu-dresden.de/docs/archive/cxfs_end_of_support.md b/doc.zih.tu-dresden.de/docs/archive/cxfs_end_of_support.md
index 2854bb2aeccb7d016e91dda4d9de6d717521bf46..72db43fe2ee8b99103cb89dff4022b23bad5e258 100644
--- a/doc.zih.tu-dresden.de/docs/archive/cxfs_end_of_support.md
+++ b/doc.zih.tu-dresden.de/docs/archive/cxfs_end_of_support.md
@@ -18,7 +18,7 @@ Files worth keeping can be moved
 * to the new [Intermediate Archive](../data_lifecycle/intermediate_archive.md) (max storage
     duration: 3 years) - see
     [MigrationHints](#migration-from-cxfs-to-the-intermediate-archive) below,
-* or to the [Log-term Archive](../data_lifecycle/preservation_research_data.md) (tagged with
+* or to the [Long-term Archive](../data_lifecycle/longterm_preservation.md) (tagged with
     metadata).
 
 To run the filesystem without support comes with the risk of losing data. So, please store away
diff --git a/doc.zih.tu-dresden.de/docs/archive/install_jupyter.md b/doc.zih.tu-dresden.de/docs/archive/install_jupyter.md
index 3d59d1cc7cf9e93e9a7f3ca78d22100978a72b8f..a1d2966509244d71d32c7bfa22d74c18b45be628 100644
--- a/doc.zih.tu-dresden.de/docs/archive/install_jupyter.md
+++ b/doc.zih.tu-dresden.de/docs/archive/install_jupyter.md
@@ -60,6 +60,10 @@ marie@compute$ ./Anaconda3-2019.03-Linux-x86_64.sh
 
 (during installation you have to confirm the license agreement)
 
+!!! hint
+    For working with conda virtual environments, it may be necessary to configure your shell via
+    `conda init` as described in [Python virtual environments](../software/python_virtual_environments.md#conda-virtual-environment)
+
 Next step will install the anaconda environment into the home
 directory (`/home/userxx/anaconda3`). Create a new anaconda environment with the name `jnb`.
 
@@ -144,7 +148,7 @@ c.NotebookApp.allow_remote_access = True
 #SBATCH --time=02:30:00
 #SBATCH --mem=4000M
 #SBATCH -J "jupyter-notebook" # job-name
-#SBATCH -A p_marie
+#SBATCH -A p_number_crunch
 
 unset XDG_RUNTIME_DIR   # might be required when interactive instead of sbatch to avoid 'Permission denied error'
 srun jupyter notebook
diff --git a/doc.zih.tu-dresden.de/docs/archive/load_leveler.md b/doc.zih.tu-dresden.de/docs/archive/load_leveler.md
index 07daea3dbcef9d375a57f47dbec1d0d8a27d0491..7a96f4945d08650ca8c20592e140ab0f43bcfe16 100644
--- a/doc.zih.tu-dresden.de/docs/archive/load_leveler.md
+++ b/doc.zih.tu-dresden.de/docs/archive/load_leveler.md
@@ -287,7 +287,7 @@ This command will give you detailed job information.
 
 ### Job Status States
 
-|                  |     |                |
+| State            | Short | Description                |
 |------------------|-----|----------------|
 | Canceled         | CA  | The job has been canceled as by the `llcancel` command. |
 | Completed        | C   | The job has completed.                                  |
diff --git a/doc.zih.tu-dresden.de/docs/archive/migrate_to_atlas.md b/doc.zih.tu-dresden.de/docs/archive/migrate_to_atlas.md
index 051b65694ad03ebc248a813b3e46b400d4af286e..905f59a880cfafefa6633cdfdca3320feccd5b8f 100644
--- a/doc.zih.tu-dresden.de/docs/archive/migrate_to_atlas.md
+++ b/doc.zih.tu-dresden.de/docs/archive/migrate_to_atlas.md
@@ -7,13 +7,12 @@
 [Atlas](system_atlas.md) is a different machine than [Deimos](system_deimos.md), please have a look
 at the table:
 
-|                                                   |            |           |
-|---------------------------------------------------|------------|-----------|
-|                                                   | **Deimos** | **Atlas** |
-| **number of hosts**                               | 584        | 92        |
-| **cores per host**                                | 2...8      | 64        |
-| **memory \[GB\] per host**                        | 8...64     | 64..512   |
-| **example benchmark: SciMark (higher is better)** | 655        | 584       |
+|                                                   | Deimos     | Atlas     |
+|---------------------------------------------------|-----------:|----------:|
+| number of hosts                                   | 584        | 92        |
+| cores per host                                    | 2...8      | 64        |
+| memory \[GB\] per host                            | 8...64     | 64..512   |
+| example benchmark: SciMark (higher is better)     | 655        | 584       |
 
 A single thread on Atlas runs with a very poor performance in comparison
 with the 6 year old Deimos. The reason for this is that the AMD CPU
@@ -37,13 +36,12 @@ The most important changes are:
     `-M <memory per process in MByte>`, the default is 300 MB, e.g.
     `-M 2000`.
 
-|                       |        |                                                      |
-|-----------------------|--------|------------------------------------------------------|
-| Hosts on Atlas        | number | per process/core user memory limit in MB (-M option) |
-| nodes with 64 GB RAM  | 48     | 940                                                  |
-| nodes with 128 GB RAM | 24     | 1950                                                 |
-| nodes with 256 GB RAM | 12     | 4000                                                 |
-| nodes with 512 GB RAM | 8      | 8050                                                 |
+| Hosts on Atlas        | Count  | Per Process/Core User Memory Limit in MB (`-M` option) |
+|-----------------------|-------:|-------------------------------------------------------:|
+| nodes with 64 GB RAM  | 48     | 940                                                    |
+| nodes with 128 GB RAM | 24     | 1950                                                   |
+| nodes with 256 GB RAM | 12     | 4000                                                   |
+| nodes with 512 GB RAM | 8      | 8050                                                   |
 
 - Jobs with a job runtime greater than 72 hours (jobs that will run in
   the queue `long`) will be collected over the day and scheduled in a
@@ -98,7 +96,7 @@ compiler comes from the Open64 suite. For convenience, other compilers are insta
 shows good results as well. Please check the best compiler flags at
 [this overview] developer.amd.com/Assets/CompilerOptQuickRef-62004200.pdf.
 
-### MPI parallel applications
+### MPI Parallel Applications
 
 Please note the more convenient syntax on Atlas. Therefore, please use a
 command like
diff --git a/doc.zih.tu-dresden.de/docs/archive/overview.md b/doc.zih.tu-dresden.de/docs/archive/overview.md
index 7600ef01e81d7f623f616d28d70abbf73cb07ed2..dfcb393a253c916a86ab21649aa75eb509ee2862 100644
--- a/doc.zih.tu-dresden.de/docs/archive/overview.md
+++ b/doc.zih.tu-dresden.de/docs/archive/overview.md
@@ -3,4 +3,4 @@
 A warm welcome to the **archive**. You probably got here by following a link from within the compendium
 or by purpose.
 The archive holds outdated documentation for future reference.
-Hence, documentation in the archive, is not further updated.
+Hence, documentation in the archive is not further updated.
diff --git a/doc.zih.tu-dresden.de/docs/archive/system_altix.md b/doc.zih.tu-dresden.de/docs/archive/system_altix.md
index aa61353f4bec0c143b7c86892d8f3cb0a3c41d00..08cc3a3e8f739780205ef7de75bcd618ee111dd9 100644
--- a/doc.zih.tu-dresden.de/docs/archive/system_altix.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_altix.md
@@ -72,16 +72,16 @@ The current SGI Altix is based on the dual core Intel Itanium 2
 processor (code name "Montecito"). One core has the following basic
 properties:
 
-|                                     |                            |
+| Component                           | Count                      |
 |-------------------------------------|----------------------------|
-| clock rate                          | 1.6 GHz                    |
-| integer units                       | 6                          |
-| floating point units (multiply-add) | 2                          |
-| peak performance                    | 6.4 GFLOPS                 |
+| Clock rate                          | 1.6 GHz                    |
+| Integer units                       | 6                          |
+| Floating point units (multiply-add) | 2                          |
+| Peak performance                    | 6.4 GFLOPS                 |
 | L1 cache                            | 2 x 16 kB, 1 clock latency |
 | L2 cache                            | 256 kB, 5 clock latency    |
 | L3 cache                            | 9 MB, 12 clock latency     |
-| front side bus                      | 128 bit x 200 MHz          |
+| Front side bus                      | 128 bit x 200 MHz          |
 
 The theoretical peak performance of all Altix partitions is hence about 13.1 TFLOPS.
 
diff --git a/doc.zih.tu-dresden.de/docs/archive/system_venus.md b/doc.zih.tu-dresden.de/docs/archive/system_venus.md
index 56acf9b47081726c9662150f638ff430e099020c..d641e3d0380dfe93f00bc4e5e6d67bc2cacf18f1 100644
--- a/doc.zih.tu-dresden.de/docs/archive/system_venus.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_venus.md
@@ -21,7 +21,7 @@ hyperthreads.
 
 ### Filesystems
 
-Venus uses the same `home` filesystem as all our other HPC installations.
+Venus uses the same home filesystem as all our other HPC installations.
 For computations, please use `/scratch`.
 
 ## Usage
diff --git a/doc.zih.tu-dresden.de/docs/archive/systems_switched_off.md b/doc.zih.tu-dresden.de/docs/archive/systems_switched_off.md
index c4f9890ac3ad36580c617b6fb5292cb0b1ceffcb..0a9a50bb72c25b6920d41a66d2828ec57cd4c7b1 100644
--- a/doc.zih.tu-dresden.de/docs/archive/systems_switched_off.md
+++ b/doc.zih.tu-dresden.de/docs/archive/systems_switched_off.md
@@ -10,3 +10,29 @@ Documentation on former systems for future reference can be found on the followi
 - [Windows-HPC-Server Titan](system_titan.md)
 - [PC-Cluster Triton](system_triton.md)
 - [Shared-Memory-System Venus](system_venus.md)
+
+## Historical Overview
+
+| Year | System |
+|------|--------|
+| 1968 | Zeiss-Rechenautomat Nr. 1 (ZRA1) Performance: 150 to 200 instructions/s, 4096 storage cells with 48 bit each (magnetic drum) |
+| 1970 | Commissioning of large stand-alone computers, coupled stand-alone computers and terminals (BESM, ESER) |
+| 1976 | Computer network DELTA and graphic workstations |
+| 1981 | Deployment of the first microcomputers; experimental testing of local area networks (LAN) |
+| 1986 | Workstation computers replace mechanical devices; the first PC pools for teaching and studying are set up |
+| 1991 | Short-term operation of used mainframe computers |
+| 1993 | VP200-EX (857 MFlop/s Peak) |
+| 1996 | Development of infrastructure on the TU campus |
+| 1997 | SGI Origin2000 (21,8 GFlop/s, 56 CPUs, 17 GB RAM, 350 GB disk capacity)|
+| 1998 | Cray T3E (38,4 GFlop/s, 64 CPUs, 8 GB RAM, 100 GB disk capacity)|
+| 2001/02 | SGI Origin3800 (51,2 + 102,4 GFlop/s, 64 + 128 CPUs, 64 + 64 GB RAM, 790 GB disk capacity)|
+| 2004 | Itanium-Cluster Castillo|
+| 2005/06 | Hochleistungsrechner/Speicherkomplex: <br/> SGI Altix 4700: 13 TFlop/s, 6,5 TB RAM  <br/> PC-Farm: 13 TFlop/s, 5,5 TB RAM  <br/> SAN capacity: 136 TB  <br/> Tape archive: 1 PB, 2500 tapes |
+| 2007 | Setup PC-SAN <br/> NEC SX6: 72 GFlop/s |
+| 2008 | Microsoft HPC-System |
+| 2010 | IBM-Cluster iDataPlex |
+| 2012 | GPU-Cluster <br/>  HPC-Cluster Atlas: 50 TFlop/s Peak, 13 TB RAM |
+| 2012/13| SGI UV 2000: 10,6 TFlop/s Peak, 8 TB RAM |
+| 2013 | HPC-Cluster Taurus (HRSK-II): 135 TFlop/s Peak, 18 TB RAM |
+| 2015 | HRSK-II Extension: 1,64 PFlop/s und 139 TB RAM |
+| 2017/18| HPC-DA (HRSK-II Extension) |
diff --git a/doc.zih.tu-dresden.de/docs/archive/vampirtrace.md b/doc.zih.tu-dresden.de/docs/archive/vampirtrace.md
index 15746b60035e4ec7999159693dcaa56ca5f54f9f..edfa2a80f1b70902a5622f84d33efded97ee4dc3 100644
--- a/doc.zih.tu-dresden.de/docs/archive/vampirtrace.md
+++ b/doc.zih.tu-dresden.de/docs/archive/vampirtrace.md
@@ -47,7 +47,7 @@ The following sections show some examples depending on the parallelization type
 Compiling serial code is the default behavior of the wrappers. Simply replace the compiler by
 VampirTrace's wrapper:
 
-|                      |                               |
+|                      | Compile Command               |
 |----------------------|-------------------------------|
 | original             | `ifort a.f90 b.f90 -o myprog` |
 | with instrumentation | `vtf90 a.f90 b.f90 -o myprog` |
@@ -59,7 +59,7 @@ This will instrument user functions (if supported by compiler) and link the Vamp
 If your MPI implementation uses MPI compilers (this is the case on [Deimos](system_deimos.md)), you
 need to tell VampirTrace's wrapper to use this compiler instead of the serial one:
 
-|                      |                                      |
+|                      | Compile Command                      |
 |----------------------|--------------------------------------|
 | original             | `mpicc hello.c -o hello`             |
 | with instrumentation | `vtcc -vt:cc mpicc hello.c -o hello` |
@@ -68,7 +68,7 @@ MPI implementations without own compilers (as on the [Altix](system_altix.md) re
 link the MPI library manually. In this case, you simply replace the compiler by VampirTrace's
 compiler wrapper:
 
-|                      |                               |
+|                      | Compile Command               |
 |----------------------|-------------------------------|
 | original             | `icc hello.c -o hello -lmpi`  |
 | with instrumentation | `vtcc hello.c -o hello -lmpi` |
@@ -81,7 +81,7 @@ option `-vt:inst manual` to disable automatic instrumentation of user functions.
 When VampirTrace detects OpenMP flags on the command line, OPARI is invoked for automatic source
 code instrumentation of OpenMP events:
 
-|                      |                            |
+|                      | Compile Command            |
 |----------------------|----------------------------|
 | original             | `ifort -openmp pi.f -o pi` |
 | with instrumentation | `vtf77 -openmp pi.f -o pi` |
@@ -90,7 +90,7 @@ code instrumentation of OpenMP events:
 
 With a combination of the above mentioned approaches, hybrid applications can be instrumented:
 
-|                      |                                                     |
+|                      | Compile Command                                     |
 |----------------------|-----------------------------------------------------|
 | original             | `mpif90 -openmp hybrid.F90 -o hybrid`               |
 | with instrumentation | `vtf90 -vt:f90 mpif90 -openmp hybrid.F90 -o hybrid` |
diff --git a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
index ac9d6ca330650d91cd14854cecc7a67c6f12b5d2..1553ef4ac02dd2c38acaed54f198719756b1dbc2 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
@@ -14,6 +14,24 @@ documentation that is pure joy to read and use. It shall help to find answers an
 instead of being the bottleneck and a great annoyance. Therefore, it need some guide lines which are
 outlined in the following.
 
+## Responsibility And License
+
+This documentation and the repository have two licenses (cf. [Legal Notice](../legal_notice.md)):
+
+* All documentation is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
+* All software components are licensed under [MIT license](../license_mit.txt).
+
+These licenses also apply to your contributions.
+
+!!! note
+
+    Each user is fully and solely responsible for the content he/she creates and has to ensure that
+    he/she has the right to create it under the laws which apply.
+
+If you are in doubt, please contact us either via
+[GitLab Issue](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/issues)
+or via [Email](mailto:hpcsupport@zih.tu-dresden.de).
+
 ## Pages Structure and New Page
 
 The pages structure is defined in the configuration file `mkdocs.yaml`:
@@ -51,7 +69,6 @@ markdown dialects.
 * [Cheat Sheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)
 * [Style Guide](https://github.com/google/styleguide/blob/gh-pages/docguide/style.md)
 
-
 ### Graphics and Attachments
 
 Images and graphics are an important part of technical documentation. This also holds for this HPC
@@ -127,6 +144,7 @@ further decoration by providing the desired title as quoted string after the typ
 
 ## Writing Style
 
+* Avoid using tabs both in markdown files and in `mkdocs.yaml`. Type spaces instead.
 * Capitalize headings, e.g. *Exclusive Reservation of Hardware*
 * Give keywords in link texts, e.g. [Code Blocks](#code-blocks-and-syntax-highlighting) is way more
   descriptive than [this subsection](#code-blocks-and-syntax-highlighting).
@@ -154,8 +172,6 @@ there is a list of conventions w.r.t. spelling and technical wording.
 |       | HPC-DA |
 | partition `ml` | ML partition, ml partition, `ml` partition, "ml" partition, etc. |
 
-
-
 ## Code Blocks and Command Prompts
 
 Showing commands and sample output is an important part of all technical documentation. To make
@@ -193,6 +209,7 @@ This should help to avoid errors.
 | `alpha` partition      | `marie@alpha$`   |
 | `romeo` partition      | `marie@romeo$`   |
 | `julia` partition      | `marie@julia$`   |
+| `dcv` partition        | `marie@dcv$`     |
 | Localhost              | `marie@local$`   |
 
 * **Always use a prompt**, even there is no output provided for the shown command.
@@ -201,7 +218,7 @@ This should help to avoid errors.
   an example invocation, perhaps with output, should be given with the normal `console` code block.
   See also [Code Block description below](#code-blocks-and-syntax-highlighting).
 * Using some magic, the prompt as well as the output is identified and will not be copied!
-* Stick to the [generic user name](#data-privacy-and-generic-user-name) `marie`.
+* Stick to the [generic user name](#data-privacy-and-generic-names) `marie`.
 
 ### Code Blocks and Syntax Highlighting
 
@@ -230,11 +247,11 @@ templates should give a general idea of invocation and thus, do not contain any
 
 ````markdown
 ```bash
-marie@local$ ssh -NL <local port>:<compute node>:<remote port> <zih login>@tauruslogin.hrsk.tu-dresden.de
+marie@local$ ssh -NL <local port>:<compute node>:<remote port> taurus
 ```
 
 ```console
-marie@local$ ssh -NL 5901:172.24.146.46:5901 marie@tauruslogin.hrsk.tu-dresden.de
+marie@local$ ssh -NL 5901:172.24.146.46:5901 taurus
 ```
 ````
 
@@ -253,7 +270,6 @@ srun a.out
 ```
 ````
 
-
 `python` for Python source code:
 
 ````markdown
@@ -279,9 +295,9 @@ Line numbers can be added via
 ```bash linenums="1"
 #!/bin/bash
 
-#SBATCH -N 1
-#SBATCH -n 23
-#SBATCH -t 02:10:00
+#SBATCH --nodes=1
+#SBATCH --ntasks=23
+#SBATCH --time=02:10:00
 
 srun a.out
 ```
@@ -297,9 +313,9 @@ Specific Lines can be highlighted by using
 ```bash hl_lines="2 3"
 #!/bin/bash
 
-#SBATCH -N 1
-#SBATCH -n 23
-#SBATCH -t 02:10:00
+#SBATCH --nodes=1
+#SBATCH --ntasks=23
+#SBATCH --time=02:10:00
 
 srun a.out
 ```
@@ -309,16 +325,17 @@ _Result_:
 
 ![lines](misc/highlight_lines.png)
 
-### Data Privacy and Generic User Name
+### Data Privacy and Generic Names
 
-Where possible, replace login, project name and other private data with clearly arbitrary placeholders.
-E.g., use the generic login `marie` and the corresponding project name `p_marie`.
+Where possible, replace login, project name and other private data with clearly arbitrary
+placeholders.  In particular, use the generic login `marie` and the project title `p_number_crunch`
+as placeholders.
 
 ```console
 marie@login$ ls -l
-drwxr-xr-x   3 marie p_marie      4096 Jan 24  2020 code
-drwxr-xr-x   3 marie p_marie      4096 Feb 12  2020 data
--rw-rw----   1 marie p_marie      4096 Jan 24  2020 readme.md
+drwxr-xr-x   3 marie p_number_crunch      4096 Jan 24  2020 code
+drwxr-xr-x   3 marie p_number_crunch      4096 Feb 12  2020 data
+-rw-rw----   1 marie p_number_crunch      4096 Jan 24  2020 readme.md
 ```
 
 ## Mark Omissions
@@ -333,6 +350,23 @@ Stick to the Unix rules on optional and required arguments, and selection of ite
 * `[optional argument or value]`
 * `{choice1|choice2|choice3}`
 
-## Random things
+## Random Things
 
 **Remark:** Avoid using tabs both in markdown files and in `mkdocs.yaml`. Type spaces instead.
+
+All graphics and attachments are saved within `misc` directory of the respective sub directory in
+`docs`.
+
+The syntax to insert a graphic or attachment into a page is
+
+```Bash
+![PuTTY: Switch on X11](misc/putty2.jpg)
+{: align="center"}
+```
+
+The attribute `align` is optional. By default, graphics are left aligned. **Note:** It is crucial to
+have `{: align="center"}` on a new line.
+
+It is possible to add captions for tables and figures using `{: summary="This is a table caption"}`.
+The `summary` and `align` parameters can be combined as well:
+`{: summary="This is a table caption" align="top"}`.
diff --git a/doc.zih.tu-dresden.de/docs/contrib/contribute_browser.md b/doc.zih.tu-dresden.de/docs/contrib/contribute_browser.md
index 45e8018d263300c03101f1374b6350ce58a131dd..0cca02d9dd40bd43f195f1ba78e1848f112bdc43 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/contribute_browser.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/contribute_browser.md
@@ -1,4 +1,4 @@
-# Contribution Guide for Browser-based Editing
+# Contribute via Browser
 
 In the following, it is outlined how to contribute to the
 [HPC documentation](https://doc.zih.tu-dresden.de/) of
@@ -96,6 +96,8 @@ into the `preview` branch of the repository.
 
 As stated earlier, all changes undergo a review process.
 This covers automated checks contained in the CI/CD pipeline and the review by a maintainer.
+This is to ensure the quality of all contributions, e. g. by checking our
+[content rules](content_rules.md).
 You can follow this process under
 [Merge requests](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/merge_requests)
 (where you initiated your merge request).
diff --git a/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md b/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md
index dd44fafa136d63ae80267226f70dc00563507ba3..6e6ac44cf53b56d3b7a96959314117642d3ed06b 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md
@@ -1,48 +1,101 @@
-# Contributing Using a Local Clone and a Docker Container
+# Contribute via Local Clone
 
-## Git Procedure
+In the following, it is outlined how to contribute to the
+[HPC documentation](https://doc.zih.tu-dresden.de/) of
+[TU Dresden/ZIH](https://tu-dresden.de/zih/) via a local clone of the Git repository. Although, this
+document might seem very long describing complex steps, contributing is quite easy - trust us.
+
+## Initial Setup of your Local Clone
 
 Please follow this standard Git procedure for working with a local clone:
 
+1. Fork the project on
+[https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium)
+or request access to the project.
 1. Change to a local (unencrypted) filesystem. (We have seen problems running the container on an
 ecryptfs filesystem. So you might want to use e.g. `/tmp` as the start directory.)
 1. Create a new directory, e.g. with `mkdir hpc-wiki`
 1. Change into the new directory, e.g. `cd hpc-wiki`
 1. Clone the Git repository:
-`git clone git@gitlab.hrz.tu-chemnitz.de:zih/hpcsupport/hpc-compendium.git .` (don't forget the
+    1. `git clone git@gitlab.hrz.tu-chemnitz.de:zih/hpcsupport/hpc-compendium.git .` (don't forget the
 dot)
+    1. if you forked the repository, use
+`git clone git@gitlab.hrz.tu-chemnitz.de:<YOUR_LOGIN>/hpc-compendium.git .` (don't forget the dot).
+Add the original repository as a so-called remote:
+`git remote add upstream-zih git@gitlab.hrz.tu-chemnitz.de:zih/hpcsupport/hpc-compendium.git`
+
+## Working with your Local Clone
+
+1. Whenever you start working on an issue, first make sure that your local data is up to date:
+    1. `git checkout preview`
+    1. `git pull origin preview`
+    1. `git pull upstream-zih preview` (only required when you forked the project)
 1. Create a new feature branch for you to work in. Ideally, name it like the file you want to
-modify or the issue you want to work on, e.g.: `git checkout -b issue-174`. (If you are uncertain
-about the name of a file, please look into `mkdocs.yaml`.)
+modify or the issue you want to work on, e.g.:
+`git checkout -b 174-check-contribution-documentation` for issue 174 with title "Check contribution
+documentation". (If you are uncertain about the name of a file, please look into `mkdocs.yaml`.)
 1. Improve the documentation with your preferred editor, i.e. add new files and correct mistakes.
-automatically by our CI pipeline.
 1. Use `git add <FILE>` to select your improvements for the next commit.
 1. Commit the changes with `git commit -m "<DESCRIPTION>"`. The description should be a meaningful
 description of your changes. If you work on an issue, please also add "Closes 174" (for issue 174).
-1. Push the local changes to the GitLab server, e.g. with `git push origin issue-174`.
+1. Push the local changes to the GitLab server, e.g. with
+`git push origin 174-check-contribution-documentation`.
 1. As an output you get a link to create a merge request against the preview branch.
 1. When the merge request is created, a continuous integration (CI) pipeline automatically checks
-your contributions.
+your contributions. If you forked the repository, these automatic checks are not available, but you
+can [run checks locally](#run-the-proposed-checks-inside-container).
+
+!!! tip
+
+    When you contribute, please follow our [content rules](content_rules.md) to make incorporating
+    your changes easy. We also check these rules via continuous integration checks and/or reviews.
+    You can find the details and commands to [preview your changes](#start-the-local-web-server) and
+    [apply checks](#run-the-proposed-checks-inside-container).
+
+## Merging of Forked Repositories
+
+When you have forked the repository as mentioned above, the process for merging is a bit different
+from internal merge requests. Because branches of forks are not automatically checked by CI,
+someone with at least developer access needs to do some more steps to incorporate the changes of
+your MR:
+
+1. The developer informs you about the start of merging process.
+1. The developer needs to review your changes to make sure that your changes are specific and don't introduce
+problems, such as changes in the Dockerfile or any script could.
+1. The developer needs to create a branch in our repository. Let's call this "internal MR branch".
+1. The developer needs to change the target branch of your MR from "preview" to "internal MR branch".
+1. The developer needs to merge it.
+1. The developer needs to open another MR from "internal MR branch" to "preview" to check whether
+   the changes pass the CI checks.
+1. The developer needs to fix things that were found by CI.
+1. The developer informs you about the MR or asks for your support while fixing the CI.
 
-You can find the details and commands to preview your changes and apply checks in the next section.
+When you follow our [content rules](content_rules.md) and
+[run checks locally](#run-the-proposed-checks-inside-container), you are making this process
+faster.
 
-## Preparation
+## Tools to Ensure Quality
 
 Assuming you already have a working Docker installation and have cloned the repository as mentioned
 above, a few more steps are necessary.
 
-* a working Docker installation
-* all necessary access/execution rights
-* a local clone of the repository in the directory `./hpc-wiki`
-
-Build the docker image. This might take a bit longer, but you have to
-run it only once in a while.
+Build the docker image. This might take a bit longer, as `mkdocs` and other necessary software
+needs to be downloaded, but you have to run it only once in a while.
+Building a container could be done with the following steps:
 
 ```bash
 cd hpc-wiki
+doc.zih.tu-dresden.de/util/download-newest-mermaid.js.sh
 docker build -t hpc-compendium .
 ```
 
+To avoid a lot of retyping, use the following in your shell:
+
+```bash
+alias wikiscript="docker run --name=hpc-compendium --rm -w /docs --mount src=$PWD,target=/docs,type=bind hpc-compendium"
+alias wiki="docker run --name=hpc-compendium -p 8000:8000 --rm -w /docs --mount src=$PWD/doc.zih.tu-dresden.de,target=/docs,type=bind hpc-compendium"
+```
+
 ## Working with the Docker Container
 
 Here is a suggestion of a workflow which might be suitable for you.
@@ -52,7 +105,7 @@ Here is a suggestion of a workflow which might be suitable for you.
 The command(s) to start the dockerized web server is this:
 
 ```bash
-docker run --name=hpc-compendium -p 8000:8000 --rm -w /docs --mount src="$(pwd)"/doc.zih.tu-dresden.de,target=/docs,type=bind hpc-compendium bash -c "mkdocs build && mkdocs serve -a 0.0.0.0:8000"
+wiki mkdocs serve -a 0.0.0.0:8000
 ```
 
 You can view the documentation via `http://localhost:8000` in your browser, now.
@@ -62,6 +115,12 @@ You can view the documentation via `http://localhost:8000` in your browser, now.
     You can keep the local web server running in this shell to always have the opportunity to see
     the result of your changes in the browser. Simply open another terminal window for other
     commands.
+    If you cannot see the page in your browser, check if you can get the URL for your browser's
+    address bar from a different terminal window:
+
+    ```bash
+    echo http://$(docker inspect -f "{{.NetworkSettings.IPAddress}}" $(docker ps -qf "name=hpc-compendium")):8000
+    ```
 
 You can now update the contents in you preferred editor. The running container automatically takes
 care of file changes and rebuilds the documentation whenever you save a file.
@@ -77,23 +136,19 @@ In our continuous integration (CI) pipeline, a merge request triggers the automa
 * correct spelling,
 * correct text format.
 
-If one of them fails, the merge request will not be accepted. To prevent this, you can run these
-checks locally and adapt your files accordingly.
-
-To avoid a lot of retyping, use the following in your shell:
-
-```bash
-alias wiki="docker run --name=hpc-compendium --rm -it -w /docs --mount src=$PWD/doc.zih.tu-dresden.de,target=/docs,type=bind hpc-compendium bash -c"
-```
+These checks ensure a high quality and consistency of the content and follow our
+[content rules](content_rules.md). If one of them fails, the merge request will not be accepted. To
+prevent this, you can run these checks locally and adapt your files accordingly.
 
 You are now ready to use the different checks, however we suggest to try the pre-commit hook.
 
 #### Pre-commit Git Hook
 
-We recommend to automatically run checks whenever you try to commit a change. In this case, failing
-checks prevent commits (unless you use option `--no-verify`). This can be accomplished by adding a
-pre-commit hook to your local clone of the repository. The following code snippet shows how to do
-that:
+We have several checks on the markdown sources to ensure for a consistent and high quality of the
+documentation. We recommend to automatically run checks whenever you try to commit a change. In this
+case, failing checks prevent commits (unless you use option `--no-verify`). This can be accomplished
+by adding a pre-commit hook to your local clone of the repository. The following code snippet shows
+how to do that:
 
 ```bash
 cp doc.zih.tu-dresden.de/util/pre-commit .git/hooks/
@@ -111,40 +166,58 @@ Read on if you want to run a specific check.
 If you want to check whether the markdown files are formatted properly, use the following command:
 
 ```bash
-wiki 'markdownlint docs'
+wiki markdownlint docs
 ```
 
 #### Spell Checker
 
-For spell-checking a single file, , e.g.
-`doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md`, use:
+For spell-checking a single file, e.g.
+`doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md`, use:
 
 ```bash
-wiki 'util/check-spelling.sh docs/software/big_data_frameworks_spark.md'
+wikiscript doc.zih.tu-dresden.de/util/check-spelling.sh doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md
 ```
 
 For spell-checking all files, use:
 
 ```bash
-wiki 'find docs -type f -name "*.md" | xargs -L1 util/check-spelling.sh'
+wikiscript doc.zih.tu-dresden.de/util/check-spelling.sh -a
 ```
 
 This outputs all words of all files that are unknown to the spell checker.
 To let the spell checker "know" a word, append it to
 `doc.zih.tu-dresden.de/wordlist.aspell`.
 
+#### Check Pages Structure
+
+The script `util/check-no-floating.sh` first checks the hierarchy depth of the pages structure and
+the second check tests if every markdown file is included in the navigation section of the
+`mkdocs.yaml` file. Invoke it as follows:
+
+```bash
+wikiscript doc.zih.tu-dresden.de/util/check-no-floating.sh doc.zih.tu-dresden.de
+```
+
 #### Link Checker
 
-To check a single file, e.g.
-`doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md`, use:
+No one likes dead links. Therefore, we check the internal and external links within the markdown
+source files. To check a single file, e.g.
+`doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md`, use:
+
+```bash
+wikiscript doc.zih.tu-dresden.de/util/check-links.sh docs/software/big_data_frameworks.md
+```
+
+The script can also check all modified files, i.e., markdown files which are part of the repository
+and different to the `main` branch. Use this script before committing your changes to make sure
+your commit passes the CI/CD pipeline:
 
 ```bash
-wiki 'markdown-link-check docs/software/big_data_frameworks_spark.md'
+wikiscript doc.zih.tu-dresden.de/util/check-links.sh
 ```
 
-To check whether there are links that point to a wrong target, use
-(this may take a while and gives a lot of output because it runs over all files):
+To check all markdown file, which may take a while and give a lot of output, use:
 
 ```bash
-wiki 'find docs -type f -name "*.md" | xargs -L1 markdown-link-check'
+wikiscript doc.zih.tu-dresden.de/util/check-links.sh -a
 ```
diff --git a/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md b/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
index e2e11cbdd5631d9875f03d437fa1c1efeb84044f..2afbf7fd2a0aaacce05fd69e0789079c394f5792 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
@@ -15,6 +15,22 @@ three possible ways how to contribute to this documentation. These ways are outl
 Please refer to the [content guide lines page](content_guide_lines.md) regarding markdown syntax
 and writing style.
 
+Even though we try to cover all aspects of working with the ZIH systems and keep the documentation
+up to date, you might miss something. In principle, there are three possible ways how you can
+contribute to this documentation as outlined below.
+
+## Content Rules
+
+To ensure a high-quality and consistent documentation and to make it easier for readers to
+understand all content, we set some [content rules](content_rules.md). Please follow these rules
+when contributing! Furthermore, reviewing your changes take less time and your improvements appear
+faster on the official documentation.
+
+!!! note
+
+    Each user is fully and solely responsible for the content he/she creates and has to ensure that
+    he/she has the right to create it under the laws which apply.
+
 ## Contribute via Issue
 
 Users can contribute to the documentation via the
@@ -35,26 +51,15 @@ documentation.
 
 ## Contribute via Web IDE
 
-GitLab offers a rich and versatile web interface to work with repositories. To fix typos and edit
-source files, follow these steps:
-
-1. Navigate to the repository at
-[https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium)
-and log in.
-1. Select the right branch.
-1. Select the file of interest in `doc.zih.tu-dresden.de/docs/...` and click the `Edit` button.
-1. A text and commit editor are invoked: Do your changes, add a meaningful commit message and commit
-   the changes.
-
-The more sophisticated integrated Web IDE is reached from the top level menu of the repository or
-by selecting any source file.
-
-Other git services might have an equivalent web interface to interact with the repository. Please
-refer to the corresponding documentation for further information.
+If you have a web browser (most probably you are using it to read this page) and want to contribute
+to the documentation, you are good to go. GitLab offers a rich and versatile web interface to work
+with repositories. To start fixing typos and edit source files, please find more information on
+[Contributing via web browser](contribute_browser.md).
 
-## Contribute Using Git Locally
+## Contribute via Local Clone
 
 For experienced Git users, we provide a Docker container that includes all checks of the CI engine
 used in the back-end. Using them should ensure that merge requests will not be blocked
 due to automatic checking.
-For details, refer to the page [Work Locally Using Containers](contribute_container.md).
+The page on [Contributing via local clone](contribute_container.md) provides you with the details
+about how to setup and use your local clone.
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/experiments.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/experiments.md
deleted file mode 100644
index 22f704bdb3932bc61f78039cba6d10ca858cbbf7..0000000000000000000000000000000000000000
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/experiments.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Structuring Experiments
-
-* Input data
-* Calculation results
-* Log files
-* Submission scripts (examples / code for survival)
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/file_systems.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/file_systems.md
index 4174e2b46c0ff69b3fd6d9a12b0cf626e296bd88..c738b4bc4a849a06956888fd616362452a8fe4d6 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/file_systems.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/file_systems.md
@@ -1,4 +1,4 @@
-# Overview
+# Filesystems
 
 As soon as you have access to ZIH systems, you have to manage your data. Several filesystems are
 available. Each filesystem serves for special purpose according to their respective capacity,
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md
index bcfc86b6b35f01bc0a5a1eebffdf65ee6319d171..894626208947186e48ba7d08b439cf6aace48655 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md
@@ -1,12 +1,12 @@
 # Intermediate Archive
 
 With the "Intermediate Archive", ZIH is closing the gap between a normal disk-based filesystem and
-[Long-term Archive](preservation_research_data.md). The Intermediate Archive is a hierarchical
+[Long-term Archive](longterm_preservation.md). The Intermediate Archive is a hierarchical
 filesystem with disks for buffering and tapes for storing research data.
 
 Its intended use is the storage of research data for a maximal duration of 3 years. For storing the
 data after exceeding this time, the user has to supply essential metadata and migrate the files to
-the [Long-term Archive](preservation_research_data.md). Until then, she/he has to keep track of her/his
+the [Long-term Archive](longterm_preservation.md). Until then, she/he has to keep track of her/his
 files.
 
 Some more information:
@@ -20,13 +20,16 @@ Some more information:
 ## Access the Intermediate Archive
 
 For storing and restoring your data in/from the "Intermediate Archive" you can use the tool
-[Datamover](../data_transfer/datamover.md). To use the DataMover you have to login to ZIH systems.
+[Datamover](../data_transfer/datamover.md). To use the Datamover you have to login to ZIH systems.
 
 ### Store Data
 
 ```console
 marie@login$ dtcp -r /<directory> /archiv/<project or user>/<directory> # or
 marie@login$ dtrsync -av /<directory> /archiv/<project or user>/<directory>
+# example:
+marie@login$ dtcp -r /scratch/marie/results /archiv/marie/ # or
+marie@login$ dtrsync -av /scratch/marie/results /archiv/marie/results
 ```
 
 ### Restore Data
@@ -34,11 +37,16 @@ marie@login$ dtrsync -av /<directory> /archiv/<project or user>/<directory>
 ```console
 marie@login$ dtcp -r /archiv/<project or user>/<directory> /<directory> # or
 marie@login$ dtrsync -av /archiv/<project or user>/<directory> /<directory>
+# example:
+marie@login$ dtcp -r /archiv/marie/results /scratch/marie/ # or
+marie@login$ dtrsync -av /archiv/marie/results /scratch/marie/results
 ```
 
-### Examples
+!!! note "Listing files in archive"
 
-```console
-marie@login$ dtcp -r /scratch/rotscher/results /archiv/rotscher/ # or
-marie@login$ dtrsync -av /scratch/rotscher/results /archiv/rotscher/results
-```
+    The intermediate archive is not mounted on the login nodes, but only on the [export nodes](../data_transfer/export_nodes.md).
+
+    In order to list the user's files in the archive use the `dtls` command
+    ```console
+    marie@login$ dtls /archiv/$USER/
+    ```
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/preservation_research_data.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/longterm_preservation.md
similarity index 50%
rename from doc.zih.tu-dresden.de/docs/data_lifecycle/preservation_research_data.md
rename to doc.zih.tu-dresden.de/docs/data_lifecycle/longterm_preservation.md
index 79ae1cf00b45f8bf46bc054e1502fc9404417b75..3b1ad0c9c595fa4d09c0e113b65c82a71b274a35 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/preservation_research_data.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/longterm_preservation.md
@@ -1,4 +1,4 @@
-# Long-term Preservation for Research Data
+# Long-Term Preservation of Research Data
 
 ## Why should research data be preserved?
 
@@ -74,11 +74,65 @@ Below are some examples:
 
 ## Where can I get more information about management of research data?
 
-Go to [http://www.forschungsdaten.org/en/](http://www.forschungsdaten.org/en/) to find more
-information about managing research data.
+Please visit the wiki [forschungsdaten.org](https://www.forschungsdaten.org/en/) to learn more about
+all of the different aspects of research data management.
 
-## I want to store my research data at ZIH. How can I do that?
+For questions or individual consultations regarding research data management in general or any of
+its certain aspects, you can contact the
+[Service Center Research Data](https://tu-dresden.de/forschung-transfer/services-fuer-forschende/kontaktstelle-forschungsdaten?set_language=en)
+(Kontaktstelle Forschungsdaten) of TU Dresden.
 
-Long-term preservation of research data is under construction at ZIH and in a testing phase.
-Nevertheless you can already use the archiving service. If you would like to become a test
-user, please write an E-Mail to [Dr. Klaus Köhler](mailto:klaus.koehler@tu-dresden.de).
+## I want to archive my research data at ZIH safely. How can I do that?
+
+For TU Dresden there exist two different services at ZIH for archiving research data. Both of
+them ensure high data safety by duplicating data internally at two separate locations and
+require some data preparation (e.g. packaging), but serve different use cases:
+
+### Storing very infrequently used data during the course of the project
+
+The intermediate archive is a tape storage easily accessible as a directory
+(`/archiv/<HRSK-project>/` or `/archiv/<login>/`) using the
+[export nodes](../data_transfer/export_nodes.md)
+and
+[Datamover tools](https://doc.zih.tu-dresden.de/data_transfer/datamover/) to move your data to.
+For detailed information please visit the
+[ZIH intermediate archive documentation](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/backup_archiv/archivierung_am_zih#section-2-1).
+
+!!! note
+
+    The usage of the HRSK-project-related archive is preferable to the login-related archive, as
+    this enables assigning access rights and responsibility across multiple researchers, due to the
+    common staff turnover in research.
+
+The use of the intermediate archive usually is limited by the end of the corresponding
+research project. Afterwards data is required to be removed, tidied up and submitted to a
+long-term repository (see next section).
+
+The intermediate archive is the preferred service when you keep large, mostly unused data volumes
+during the course of your research project; if you want or need to free storage capacities, but
+you are still not able to define certain or relevant datasets for long-term archival.
+
+If you are able to identify complete and final datasets, which you probably won't use actively
+anymore, then repositories as described in the next section may be the more appropriate selection.
+
+### Archiving data beyond the project lifetime, for 10 years and above
+
+According to good scientific practice (cf.
+[DFG guidelines, #17](https://www.dfg.de/download/pdf/foerderung/rechtliche_rahmenbedingungen/gute_wissenschaftliche_praxis/kodex_gwp.pdf))
+and
+[TU Dresden research data guidelines](https://tu-dresden.de/tu-dresden/qualitaetsmanagement/ressourcen/dateien/wisprax/Leitlinien-fuer-den-Umgang-mit-Forschungsdaten-an-der-TU-Dresden.pdf),
+relevant research data needs to be archived at least for 10 years. The
+[OpARA service](https://opara.zih.tu-dresden.de/xmlui/) (Open Access Repository and Archive) is the
+joint research data repository service for Saxon universities to address this requirement.
+
+Data can be uploaded and, to comply to the demands of long-term understanding of data, additional
+metadata and description must be added. Large datasets may be optionally imported beforehand. In
+this case, please contact the
+[TU Dresden Service Desk](mailto:servicedesk@tu-dresden.de?subject=OpARA:%20Data%20Import).
+Optionally, data can also be **published** by OpARA. To ensure data quality, data submissions
+undergo a review process.
+
+Beyond OpARA, it is also recommended to use discipline-specific data repositories for data
+publications. Usually those are well known in a scientific community, and offer better fitting
+options of data description and classification. Please visit [re3data.org](https://re3data.org)
+to look up a suitable one for your discipline.
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/lustre.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/lustre.md
index d08a5d5f59490a8236fb6710b28d24d9a01fcfe6..df713cbf19f723575a0c39f40cccc160e16f8d4b 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/lustre.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/lustre.md
@@ -1,4 +1,4 @@
-# Lustre Filesystems
+# Lustre
 
 ## Large Files in /scratch
 
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/overview.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/overview.md
index a0152832601f63ef68ecb2191414265896237434..1b5561db6c93052bd4ee68fc119ac796e0a3c4c9 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/overview.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/overview.md
@@ -46,7 +46,7 @@ filesystems.
 
 Keep in mind that every workspace has a storage duration. Thus, be careful with the expire date
 otherwise it could vanish. The core data of your project should be [backed up](#backup) and the most
-important data should be [archived](preservation_research_data.md).
+important data should be [archived (long-term preservation)](longterm_preservation.md).
 
 ### Backup
 
@@ -105,7 +105,7 @@ expected? (software and version)
 ### Metadata
 
 Another important aspect is the Metadata. It is sufficient to use
-[Metadata](preservation_research_data.md#what-are-meta-data) for your HPC project. Metadata
+[Metadata](longterm_preservation.md#what-are-meta-data) for your HPC project. Metadata
 standards, i.e.,
 [Dublin core](http://dublincore.org/resources/metadata-basics/),
 [OME](https://www.openmicroscopy.org/),
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/permanent.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/permanent.md
index 14d7fc3e5e74819d568410340825934cb55d9960..3c75c37faf6650adbbad9a87bcfd228c9b55473d 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/permanent.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/permanent.md
@@ -93,4 +93,4 @@ In case a quota is above its limits:
     - For later use (weeks...months) at the ZIH systems, build and zip tar
       archives with meaningful names or IDs and store them, e.g., in a workspace in the
       [warm archive](warm_archive.md) or an [archive](intermediate_archive.md)
-    - Refer to the hints for [long term preservation for research data](preservation_research_data.md)
+    - Refer to the hints for [long-term preservation of research data](longterm_preservation.md)
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md
index cad27c4df4070206644612d85d7fadc7658e15f4..6180e5db831c8faf69a8752247ad5b8ee5ef6313 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md
@@ -130,6 +130,14 @@ marie@login$ ws_extend -F scratch my-workspace 40
 
 it will now expire in 40 days **not** 130 days.
 
+### Send Reminder for Workspace Expiry Date
+
+Send a calendar invitation by Email to ensure that the expiration date of a workspace is not forgotten
+
+```console
+ws_send_ical -F scratch my-workspace -m marie.testuser@tu-dresden.de
+```
+
 ### Deletion of a Workspace
 
 To delete a workspace use the `ws_release` command. It is mandatory to specify the name of the
@@ -176,7 +184,7 @@ well as a workspace that already contains data.
 ## Linking Workspaces in HOME
 
 It might be valuable to have links to personal workspaces within a certain directory, e.g., your
-`home` directory. The command `ws_register DIR` will create and manage links to all personal
+home directory. The command `ws_register DIR` will create and manage links to all personal
 workspaces within in the directory `DIR`. Calling this command will do the following:
 
 - The directory `DIR` will be created if necessary.
@@ -276,7 +284,7 @@ marie@login$ qinfo quota /warm_archive/ws/
 Note that the workspaces reside under the mountpoint `/warm_archive/ws/` and not `/warm_archive`
 anymore.
 
-## F.A.Q
+## FAQ
 
 **Q**: I am getting the error `Error: could not create workspace directory!`
 
diff --git a/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md b/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md
index 41333949cb352630294ccb3a2ffac7ea65d980e6..28aba7bbfdcec8411f6510061d509c949d128f34 100644
--- a/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md
+++ b/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md
@@ -1,7 +1,7 @@
-# Transferring Files Between ZIH Systems
+# Datamover - Data Transfer Inside ZIH Systems
 
-With the **datamover**, we provide a special data transfer machine for transferring data with best
-transfer speed between the filesystems of ZIH systems. The datamover machine is not accessible
+With the **Datamover**, we provide a special data transfer machine for transferring data with best
+transfer speed between the filesystems of ZIH systems. The Datamover machine is not accessible
 through SSH as it is dedicated to data transfers. To move or copy files from one filesystem to
 another filesystem, you have to use the following commands:
 
@@ -23,7 +23,7 @@ There are the commands `dtinfo`, `dtqueue`, `dtq`, and `dtcancel` to manage your
 and jobs.
 
 * `dtinfo` shows information about the nodes of the data transfer machine (like `sinfo`).
-* `dtqueue` and `dtq` show all your data transfer jobs (like `squeue -u $USER`).
+* `dtqueue` and `dtq` show all your data transfer jobs (like `squeue --me`).
 * `dtcancel` signals data transfer jobs (like `scancel`).
 
 To identify the mount points of the different filesystems on the data transfer machine, use
@@ -37,7 +37,7 @@ To identify the mount points of the different filesystems on the data transfer m
 |                    | `/warm_archive/ws`   | `/warm_archive/ws`                 |
 |                    | `/home`              | `/home`                            |
 |                    | `/projects`          | `/projects`                        |
-| **Archive**        |                      | `/archive`                         |
+| **Archive**        |                      | `/archiv`                         |
 | **Group storage**  |                      | `/grp/<group storage>`             |
 
 ## Usage of Datamover
@@ -45,7 +45,7 @@ To identify the mount points of the different filesystems on the data transfer m
 !!! example "Copying data from `/beegfs/global0` to `/projects` filesystem."
 
     ``` console
-    marie@login$ dtcp -r /beegfs/global0/ws/marie-workdata/results /projects/p_marie/.
+    marie@login$ dtcp -r /beegfs/global0/ws/marie-workdata/results /projects/p_number_crunch/.
     ```
 
 !!! example "Moving data from `/beegfs/global0` to `/warm_archive` filesystem."
@@ -57,7 +57,7 @@ To identify the mount points of the different filesystems on the data transfer m
 !!! example "Archive data from `/beegfs/global0` to `/archiv` filesystem."
 
     ``` console
-    marie@login$ dttar -czf /archiv/p_marie/results.tgz /beegfs/global0/ws/marie-workdata/results
+    marie@login$ dttar -czf /archiv/p_number_crunch/results.tgz /beegfs/global0/ws/marie-workdata/results
     ```
 
 !!! warning
@@ -66,4 +66,34 @@ To identify the mount points of the different filesystems on the data transfer m
 !!! note
     The [warm archive](../data_lifecycle/warm_archive.md) and the `projects` filesystem are not
     writable from within batch jobs.
-    However, you can store the data in the `warm_archive` using the datamover.
+    However, you can store the data in the `warm_archive` using the Datamover.
+
+## Transferring Files Between ZIH Systems and Group Drive
+
+1. Copy your private SSH key from ZIH system to `login1.zih.tu-dresden.de`.
+
+   ``` console
+   marie@login$ ssh-copy-id -i ~/.ssh/id_rsa.pub login1.zih.tu-dresden.de
+   ```
+
+1. Now you can access your group drive with the Datamover commands.
+!!! example "Export the name of your group drive."
+
+   ``` console
+   marie@login$ export GROUP_DRIVE_NAME=???
+   ```
+
+!!! note
+    Please replace `???` with the name of your group drive.
+
+!!! example "Copying data from your group drive to `/beegfs/global0` filesystem."
+
+    ``` console
+    marie@login$ dtrsync -av dgw.zih.tu-dresden.de:/glw/${GROUP_DRIVE_NAME}/inputfile /beegfs/global0/ws/marie-workdata/.
+    ```
+
+!!! example "Copying data from `/beegfs/global0` filesystem to your group drive."
+
+    ``` console
+    marie@login$ dtrsync -av /beegfs/global0/ws/marie-workdata/resultfile dgw.zih.tu-dresden.de:/glw/${GROUP_DRIVE_NAME}/.
+    ```
diff --git a/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md b/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
index 71ca41949f44d558c2ca3384d7651e9e85b19125..5e4abf29b8b90f560fcd6dbbebbb2541bb76ae57 100644
--- a/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
+++ b/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
@@ -1,4 +1,4 @@
-# Export Nodes: Transfer Data to/from ZIH's Filesystems
+# Export Nodes - Data Transfer to/from ZIH Systems
 
 To copy large data to/from ZIH systems, the so-called **export nodes** should be used. While it is
 possible to transfer small files directly via the login nodes, they are not intended to be used that
@@ -19,8 +19,8 @@ There are at least three tools to exchange data between your local workstation a
 are explained in the following section in more detail.
 
 !!! important
-    The following explanations require that you have already set up your [SSH configuration
-    ](../access/ssh_login.md#configuring-default-parameters-for-ssh).
+    The following explanations require that you have already set up your
+    [SSH configuration](../access/ssh_login.md#configuring-default-parameters-for-ssh).
 
 ### SCP
 
@@ -133,6 +133,14 @@ the local machine.
 
 ## Access From Windows
 
+### Command Line
+
+Windows 10 (1809 and higher) comes with a
+[built-in OpenSSH support](https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_overview)
+including the above described [SCP](#SCP) and [SFTP](#SFTP).
+
+### GUI - Using WinSCP
+
 First you have to install [WinSCP](http://winscp.net/eng/download.php).
 
 Then you have to execute the WinSCP application and configure some
diff --git a/doc.zih.tu-dresden.de/docs/data_transfer/overview.md b/doc.zih.tu-dresden.de/docs/data_transfer/overview.md
index c2f4fe1e669b17b4f0cdf21c39e4072d4b80fa5d..6e8a1bf1cc12e36e4aa15bd46b9eaf84e24171bc 100644
--- a/doc.zih.tu-dresden.de/docs/data_transfer/overview.md
+++ b/doc.zih.tu-dresden.de/docs/data_transfer/overview.md
@@ -1,6 +1,6 @@
-# Transfer of Data
+# Data Transfer
 
-## Moving Data to/from ZIH Systems
+## Data Transfer to/from ZIH Systems: Export Nodes
 
 There are at least three tools for exchanging data between your local workstation and ZIH systems:
 `scp`, `rsync`, and `sftp`. Please refer to the offline or online man pages of
@@ -12,11 +12,11 @@ No matter what tool you prefer, it is crucial that the **export nodes** are used
 copy data to/from ZIH systems. Please follow the link to the documentation on
 [export nodes](export_nodes.md) for further reference and examples.
 
-## Moving Data Inside ZIH Systems: Datamover
+## Data Transfer Inside ZIH Systems: Datamover
 
-The recommended way for data transfer inside ZIH Systems is the **datamover**. It is a special
+The recommended way for data transfer inside ZIH Systems is the **Datamover**. It is a special
 data transfer machine that provides the best transfer speed. To load, move, copy etc. files from one
 filesystem to another filesystem, you have to use commands prefixed with `dt`: `dtcp`, `dtwget`,
 `dtmv`, `dtrm`, `dtrsync`, `dttar`, `dtls`. These commands submit a job to the data transfer
 machines that execute the selected command.  Please refer to the detailed documentation regarding the
-[datamover](datamover.md).
+[Datamover](datamover.md).
diff --git a/doc.zih.tu-dresden.de/docs/index.md b/doc.zih.tu-dresden.de/docs/index.md
index 60f6f081cf4a1c2ea76663bccd65e9ff866597fb..4281ecabc941c1005e36957c45bf58584c210ee6 100644
--- a/doc.zih.tu-dresden.de/docs/index.md
+++ b/doc.zih.tu-dresden.de/docs/index.md
@@ -15,15 +15,20 @@ issues.
 
 Contributions from user-side are highly welcome. Please find out more in our [guidelines how to contribute](contrib/howto_contribute.md).
 
-**Reminder:** Non-documentation issues and requests need to be send as ticket to
-[hpcsupport@zih.tu-dresden.de](mailto:hpcsupport@zih.tu-dresden.de).
+!!! tip "Reminder"
 
----
-
----
+    Non-documentation issues and requests need to be send as ticket to
+    [hpcsupport@zih.tu-dresden.de](mailto:hpcsupport@zih.tu-dresden.de).
 
 ## News
 
-**2021-10-05** Offline-maintenance (black building test)
+**2022-01-13** [Supercomputing extension for TU Dresden](https://tu-dresden.de/zih/die-einrichtung/news/supercomputing-cluster-2022)
 
 **2021-09-29** Introduction to HPC at ZIH ([HPC introduction slides](misc/HPC-Introduction.pdf))
+
+## Training and Courses
+
+We offer a rich and colorful bouquet of courses from classical *HPC introduction* to various
+*Performance Analysis* and *Machine Learning* trainings. Please refer to the page
+[Training Offers](https://tu-dresden.de/zih/hochleistungsrechnen/nhr-training)
+for a detailed overview of the courses and the respective dates at ZIH.
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
index 4ab5ca41a5a8c11d4a52c03661b5810d4d09a65d..dadc94855ecc71a229e0ab19b15b6837f2bbf872 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
@@ -1,11 +1,12 @@
-# Alpha Centauri - Multi-GPU Sub-Cluster
+# Alpha Centauri
 
-The sub-cluster "Alpha Centauri" had been installed for AI-related computations (ScaDS.AI).
+The multi-GPU sub-cluster "Alpha Centauri" had been installed for AI-related computations (ScaDS.AI).
 It has 34 nodes, each with:
 
 * 8 x NVIDIA A100-SXM4 (40 GB RAM)
 * 2 x AMD EPYC CPU 7352 (24 cores) @ 2.3 GHz with multi-threading enabled
-* 1 TB RAM 3.5 TB `/tmp` local NVMe device
+* 1 TB RAM
+* 3.5 TB `/tmp` local NVMe device
 * Hostnames: `taurusi[8001-8034]`
 * Slurm partition `alpha` for batch jobs and `alpha-interactive` for interactive jobs
 
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md
index 38d6686d7a655c1c5d7161d6607be9d6f55d8b5c..180ed1d62febd311fd5cddd739d4f086825bc5b7 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md
@@ -63,9 +63,13 @@ To use it, first add a `dmtcp_launch` before your application call in your batch
 of MPI applications, you have to add the parameters `--ib --rm` and put it between `srun` and your
 application call, e.g.:
 
-```bash
-srun dmtcp_launch --ib --rm ./my-mpi-application
-```
+???+ my_script.sbatch
+
+    ```bash
+    [...]
+
+    srun dmtcp_launch --ib --rm ./my-mpi-application
+    ```
 
 !!! note
 
@@ -79,7 +83,7 @@ Then just substitute your usual `sbatch` call with `dmtcp_sbatch` and be sure to
 and `-i` parameters (don't forget you need to have loaded the `dmtcp` module).
 
 ```console
-marie@login$ dmtcp_sbatch --time 2-00:00:00 --interval 28000,800 my_batchfile.sh
+marie@login$ dmtcp_sbatch --time 2-00:00:00 --interval 28000,800 my_script.sbatch
 ```
 
 With `-t, --time` you set the total runtime of your calculations. This will be replaced in the batch
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
index 218bd3d4b186efcd583c3fb6c092b4e0dbad3180..673d83bd7f38336e9b2a5091ea85f5bad6553d33 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
@@ -1,6 +1,6 @@
-# ZIH Systems
+# HPC Resources
 
-ZIH systems comprises the *High Performance Computing and Storage Complex* and its
+HPC resources in ZIH systems comprises the *High Performance Computing and Storage Complex* and its
 extension *High Performance Computing – Data Analytics*. In total it offers scientists
 about 60,000 CPU cores and a peak performance of more than 1.5 quadrillion floating point
 operations per second. The architecture specifically tailored to data-intensive computing, Big Data
@@ -14,13 +14,17 @@ users and the ZIH.
   - each with 2x Intel(R) Xeon(R) CPU E5-2680 v3 each with 12 cores
     @ 2.50GHz, Multithreading Disabled, 64 GB RAM, 128 GB SSD local disk
   - IPs: 141.30.73.\[102-105\]
-- Transfer-Nodes (`taurusexport3/4.hrsk.tu-dresden.de`, DNS Alias
+- Transfer-Nodes (`taurusexport[3-4].hrsk.tu-dresden.de`, DNS Alias
   `taurusexport.hrsk.tu-dresden.de`)
   - 2 Servers without interactive login, only available via file transfer protocols (`rsync`, `ftp`)
   - IPs: 141.30.73.82/83
 - Direct access to these nodes is granted via IP whitelisting (contact
   hpcsupport@zih.tu-dresden.de) - otherwise use TU Dresden VPN.
 
+!!! warning "Run time limit"
+
+    Any process on login nodes is stopped after 5 minutes.
+
 ## AMD Rome CPUs + NVIDIA A100
 
 - 32 nodes, each with
@@ -67,8 +71,8 @@ For machine learning, we have 32 IBM AC922 nodes installed with this configurati
 
 - 1456 nodes, each with 2x Intel(R) Xeon(R) CPU E5-2680 v3 (12 cores)
   @ 2.50GHz, Multithreading disabled, 128 GB SSD local disk
-- Hostname: `taurusi4[001-232]`, `taurusi5[001-612]`,
-  `taurusi6[001-612]`
+- Hostname: `taurusi[4001-4232]`, `taurusi[5001-5612]`,
+  `taurusi[6001-6612]`
 - Varying amounts of main memory (selected automatically by the batch
   system for you according to your job requirements)
   - 1328 nodes with 2.67 GB RAM per core (64 GB total):
@@ -97,8 +101,8 @@ For machine learning, we have 32 IBM AC922 nodes installed with this configurati
 * 64 nodes, each with 2x Intel(R) Xeon(R) CPU E5-E5-2680 v3 (12 cores)
   @ 2.50GHz, Multithreading Disabled, 64 GB RAM (2.67 GB per core),
   128 GB SSD local disk, 4x NVIDIA Tesla K80 (12 GB GDDR RAM) GPUs
-* Hostname: `taurusi2[045-108]`
-* Slurm Partition `gpu`
+* Hostname: `taurusi[2045-2108]`
+* Slurm Partition `gpu2`
 * Node topology, same as [island 4 - 6](#island-4-to-6-intel-haswell-cpus)
 
 ## SMP Nodes - up to 2 TB RAM
@@ -112,16 +116,3 @@ For machine learning, we have 32 IBM AC922 nodes installed with this configurati
 
     ![Node topology](misc/smp2.png)
     {: align=center}
-
-## Island 2 Phase 1 - Intel Sandybridge CPUs + NVIDIA K20x GPUs
-
-- 44 nodes, each with 2x Intel(R) Xeon(R) CPU E5-2450 (8 cores) @
-  2.10GHz, Multithreading Disabled, 48 GB RAM (3 GB per core), 128 GB
-  SSD local disk, 2x NVIDIA Tesla K20x (6 GB GDDR RAM) GPUs
-- Hostname: `taurusi2[001-044]`
-- Slurm partition `gpu1`
-
-??? hint "Node topology"
-
-    ![Node topology](misc/i2000.png)
-    {: align=center}
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md
index 5240db14cb506d8719b9e46fe3feb89aede4a95f..1a96fc97145d5ee5587f2b9b216ffba004f4f9d8 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md
@@ -12,7 +12,7 @@ specialized for different application scenarios.
 
 When log in to ZIH systems, you are placed on a login node where you can
 [manage data life cycle](../data_lifecycle/overview.md),
-[setup experiments](../data_lifecycle/experiments.md),
+setup experiments,
 execute short tests and compile moderate projects. The login nodes cannot be used for real
 experiments and computations. Long and extensive computational work and experiments have to be
 encapsulated into so called **jobs** and scheduled to the compute nodes.
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/partitions_and_limits.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/partitions_and_limits.md
index edf5bae8582cff37ba5dca68d70c70a35438f341..23614e33c1d2bab66855c25cad7434e0f5093bf7 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/partitions_and_limits.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/partitions_and_limits.md
@@ -1,6 +1,6 @@
-# Partitions, Memory and Run Time Limits
+# Partitions and Limits
 
-There is no such thing as free lunch at ZIH systems. Since, compute nodes are operated in multi-user
+There is no such thing as free lunch at ZIH systems. Since compute nodes are operated in multi-user
 node by default, jobs of several users can run at the same time at the very same node sharing
 resources, like memory (but not CPU). On the other hand, a higher throughput can be achieved by
 smaller jobs. Thus, restrictions w.r.t. [memory](#memory-limits) and
@@ -8,10 +8,21 @@ smaller jobs. Thus, restrictions w.r.t. [memory](#memory-limits) and
 
 ## Runtime Limits
 
+!!! warning "Runtime limits on login nodes"
+
+    There is a time limit set for processes on login nodes. If you run applications outside of a
+    compute job, it will be stopped automatically after 5 minutes with
+
+    ```
+    CPU time limit exceeded
+    ```
+
+    Please start a job using the [batch system](slurm.md).
+
 !!! note "Runtime limits are enforced."
 
-    This means, a job will be canceled as soon as it exceeds its requested limit. Currently, the
-    maximum run time is 7 days.
+    A job is canceled as soon as it exceeds its requested limit. Currently, the maximum run time is
+    7 days.
 
 Shorter jobs come with multiple advantages:
 
@@ -37,14 +48,13 @@ not capable of checkpoint/restart can be adapted. Please refer to the section
 [Checkpoint/Restart](../jobs_and_resources/checkpoint_restart.md) for further documentation.
 
 ![Partitions](misc/part.png)
-{: align="center"}
+{: align="center" summary="Partitions image"}
 
 ## Memory Limits
 
 !!! note "Memory limits are enforced."
 
-    This means that jobs which exceed their per-node memory limit will be killed automatically by
-    the batch system.
+    Jobs which exceed their per-node memory limit are killed automatically by the batch system.
 
 Memory requirements for your job can be specified via the `sbatch/srun` parameters:
 
@@ -55,24 +65,34 @@ ZIH systems comprises different sets of nodes with different amount of installed
 where your job may be run. To achieve the shortest possible waiting time for your jobs, you should
 be aware of the limits shown in the following table.
 
-??? hint "Partitions and memory limits"
+???+ hint "Partitions and memory limits"
 
     | Partition          | Nodes                                    | # Nodes | Cores per Node  | MB per Core | MB per Node | GPUs per Node     |
     |:-------------------|:-----------------------------------------|:--------|:----------------|:------------|:------------|:------------------|
-    | `haswell64`        | `taurusi[4001-4104,5001-5612,6001-6612]` | `1328`  | `24`            | `2541`       | `61000`    | `-`               |
+    | `interactive`      | `taurusi[6605-6612]`                     | `8`     | `24`            | `2541`       | `61000`    | `-`               |
+    | `haswell64`        | `taurusi[4037-4104,5001-5612,6001-6604]` | `1284`  | `24`            | `2541`       | `61000`    | `-`               |
+    | `haswell64ht`      | `taurusi[4018-4036]`                     | `18`    | `24 (HT: 48)`   | `1270*`       | `61000`    | `-`               |
     | `haswell128`       | `taurusi[4105-4188]`                     | `84`    | `24`            | `5250`       | `126000`   | `-`               |
     | `haswell256`       | `taurusi[4189-4232]`                     | `44`    | `24`            | `10583`      | `254000`   | `-`               |
     | `broadwell`        | `taurusi[4233-4264]`                     | `32`    | `28`            | `2214`       | `62000`    | `-`               |
     | `smp2`             | `taurussmp[3-7]`                         | `5`     | `56`            | `36500`      | `2044000`  | `-`               |
-    | `gpu2`             | `taurusi[2045-2106]`                     | `62`    | `24`            | `2583`       | `62000`    | `4 (2 dual GPUs)` |
-    | `gpu2-interactive` | `taurusi[2045-2108]`                     | `64`    | `24`            | `2583`       | `62000`    | `4 (2 dual GPUs)` |
+    | `gpu2`**           | `taurusi[2045-2103]`                     | `59`    | `24`            | `2583`       | `62000`    | `4 (2 dual GPUs)` |
     | `hpdlf`            | `taurusa[3-16]`                          | `14`    | `12`            | `7916`       | `95000`    | `3`               |
-    | `ml`               | `taurusml[1-32]`                         | `32`    | `44 (HT: 176)`  | `1443*`      | `254000`   | `6`               |
-    | `romeo`            | `taurusi[7001-7192]`                     | `192`   | `128 (HT: 256)` | `1972*`      | `505000`   | `-`               |
-    | `julia`            | `taurussmp8`                             | `1`     | `896`           | `27343*`     | `49000000` | `-`               |
+    | `ml`**             | `taurusml[1-32]`                         | `32`    | `44 (HT: 176)`  | `1443*`      | `254000`   | `6`               |
+    | `romeo`**          | `taurusi[7001-7192]`                     | `192`   | `128 (HT: 256)` | `1972*`      | `505000`   | `-`               |
+    | `julia`            | `taurussmp8`                             | `1`     | `896`           | `27343`     | `49000000` | `-`               |
+    | `alpha`**          | `taurusi[8001-8034]`                     | `34`    | `48 (HT: 96)`   | `10312*`     | `990000`   | `8`               |
+    {: summary="Partitions and limits table" align="bottom"}
 
 !!! note
 
-    The ML nodes have 4way-SMT, so for every physical core allocated (,e.g., with
-    `SLURM_HINT=nomultithread`), you will always get 4*1443 MB because the memory of the other
-    threads is allocated implicitly, too.
+    Some nodes have multithreading (SMT) enabled, so for every physical core allocated
+    (e.g., with `SLURM_HINT=nomultithread`), you will always get `MB per Core`*`number of threads`,
+    because the memory of the other threads is allocated implicitly, too.
+    Those nodes are marked with an asterisk.
+    Some of the partitions, denoted with a double asterisk, have a counterpart for interactive
+    jobs. These partitions have a `-interactive` suffix (e.g. `ml-interactive`) and have the same
+    configuration.
+    There is also a meta partition `haswell`, which contain partition `haswell64`, `haswell128`, `haswell256` and `smp2`and this is also the default partition.
+    If you specify no partition or partition `haswell` a Slurm plugin will choose the partition which fits to your memory requirements.
+    There are some other partitions, which are not specified in the table above, but those partitions should not be used directly.
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/power9.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/power9.md
deleted file mode 100644
index 40c47702446000ecd7403eda1a84c241fa0d00e7..0000000000000000000000000000000000000000
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/power9.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# IBM Power9 Nodes for Machine Learning
-
-For machine learning, we have 32 IBM AC922 nodes installed with this
-configuration:
-
--   2 x IBM Power9 CPU (2.80 GHz, 3.10 GHz boost, 22 cores)
--   256 GB RAM DDR4 2666MHz
--   6x NVIDIA VOLTA V100 with 32GB HBM2
--   NVLINK bandwidth 150 GB/s between GPUs and host
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/sd_flex.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/sd_flex.md
index c09260cf8d814a6a6835f981a25d1e8700c71df2..34505f93de1673aea883574459157b41c9f56357 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/sd_flex.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/sd_flex.md
@@ -1,4 +1,10 @@
-# Large Shared-Memory Node - HPE Superdome Flex
+# HPE Superdome Flex
+
+The HPE Superdome Flex is a large shared memory node. It is especially well suited for data
+intensive application scenarios, for example to process extremely large data sets completely in main
+memory or in very fast NVMe memory.
+
+## Configuration Details
 
 - Hostname: `taurussmp8`
 - Access to all shared filesystems
@@ -10,29 +16,19 @@
 ## Local Temporary NVMe Storage
 
 There are 370 TB of NVMe devices installed. For immediate access for all projects, a volume of 87 TB
-of fast NVMe storage is available at `/nvme/1/<projectname>`. For testing, we have set a quota of
-100 GB per project on this NVMe storage.
+of fast NVMe storage is available at `/nvme/1/<projectname>`. A quota of
+100 GB per project on this NVMe storage is set.
 
-With a more detailed proposal on how this unique system (large shared memory + NVMe storage) can
-speed up their computations, a project's quota can be increased or dedicated volumes of up to the
-full capacity can be set up.
+With a more detailed proposal to [hpcsupport@zih.tu-dresden.de](mailto:hpcsupport@zih.tu-dresden.de)
+on how this unique system (large shared memory + NVMe storage) can speed up their computations, a
+project's quota can be increased or dedicated volumes of up to the full capacity can be set up.
 
 ## Hints for Usage
 
-- granularity should be a socket (28 cores)
-- can be used for OpenMP applications with large memory demands
+- Granularity should be a socket (28 cores)
+- Can be used for OpenMP applications with large memory demands
 - To use OpenMPI it is necessary to export the following environment
   variables, so that OpenMPI uses shared memory instead of Infiniband
   for message transport. `export OMPI_MCA_pml=ob1;   export  OMPI_MCA_mtl=^mxm`
 - Use `I_MPI_FABRICS=shm` so that Intel MPI doesn't even consider
   using Infiniband devices itself, but only shared-memory instead
-
-## Open for Testing
-
-- At the moment we have set a quota of 100 GB per project on this NVMe
-  storage. As soon as the first projects come up with proposals how
-  this unique system (large shared memory + NVMe storage) can speed up
-  their computations, we will gladly increase this limit, for selected
-  projects.
-- Test users might have to clean-up their `/nvme` storage within 4 weeks
-  to make room for large projects.
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md
index a5bb1980e342b8f1c19ecb6b610a5d481cd98268..f7456662f5ca54887b20b075e58dd25517aa3c96 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md
@@ -2,10 +2,32 @@
 
 When logging in to ZIH systems, you are placed on a login node. There, you can manage your
 [data life cycle](../data_lifecycle/overview.md),
-[setup experiments](../data_lifecycle/experiments.md), and
+setup experiments, and
 edit and prepare jobs. The login nodes are not suited for computational work! From the login nodes,
 you can interact with the batch system, e.g., submit and monitor your jobs.
 
+A typical workflow would look like this:
+
+```mermaid
+sequenceDiagram
+    user ->>+ login node: run programm
+    login node ->> login node: kill after 5 min
+    login node ->>- user: Killed!
+    user ->> login node: salloc [...]
+    login node ->> Slurm: Request resources
+    Slurm ->> user: resources
+    user ->>+ allocated resources: srun [options] [command]
+    allocated resources ->> allocated resources: run command (on allocated nodes)
+    allocated resources ->>- user: program finished
+    user ->>+ allocated resources: srun [options] [further_command]
+    allocated resources ->> allocated resources: run further command
+    allocated resources ->>- user: program finished
+    user ->>+ allocated resources: srun [options] [further_command]
+    allocated resources ->> allocated resources: run further command
+    Slurm ->> allocated resources: Job limit reached/exceeded
+    allocated resources ->>- user: Job limit reached
+```
+
 ??? note "Batch System"
 
     The batch system is the central organ of every HPC system users interact with its compute
@@ -261,10 +283,15 @@ provide a comprehensive collection of job examples.
 
 ### Job and Slurm Monitoring
 
-On the command line, use `squeue` to watch the scheduling queue. This command will tell the reason,
-why a job is not running (job status in the last column of the output). More information about job
-parameters can also be determined with `scontrol -d show job <jobid>`. The following table holds
-detailed descriptions of the possible job states:
+On the command line, use `squeue` to watch the scheduling queue.
+
+!!! tip "Show your jobs"
+
+    Invoke `squeue --me` to list only your jobs.
+
+The command `squeue` will tell the reason, why a job is not running (job status in the last column
+of the output). More information about job parameters can also be determined with `scontrol -d show
+job <jobid>`. The following table holds detailed descriptions of the possible job states:
 
 ??? tip "Reason Table"
 
@@ -376,13 +403,13 @@ If you want to use your reservation, you have to add the parameter
 
 ## Node Features for Selective Job Submission
 
-The nodes in our HPC system are becoming more diverse in multiple aspects: hardware, mounted
-storage, software. The system administrators can describe the set of properties and it is up to the
-user to specify her/his requirements. These features should be thought of as changing over time
+The nodes in our HPC system are becoming more diverse in multiple aspects, e.g, hardware, mounted
+storage, software. The system administrators can describe the set of properties and it is up to you
+as user to specify the requirements. These features should be thought of as changing over time
 (e.g., a filesystem get stuck on a certain node).
 
-A feature can be used with the Slurm option `--constrain` or `-C` like
-`srun -C fs_lustre_scratch2 ...` with `srun` or `sbatch`. Combinations like
+A feature can be used with the Slurm option `-C, --constraint=<ARG>` like
+`srun --constraint=fs_lustre_scratch2 ...` with `srun` or `sbatch`. Combinations like
 `--constraint="fs_beegfs_global0`are allowed. For a detailed description of the possible
 constraints, please refer to the [Slurm documentation](https://slurm.schedmd.com/srun.html).
 
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
index 65e445f354d08a3473e226cc97c45ff6c01e8c48..ebfd52972ac785b851a0c02758904a68dd09af8f 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
@@ -7,12 +7,12 @@ depend on the type of parallelization and architecture.
 
 ### OpenMP Jobs
 
-An SMP-parallel job can only run within a node, so it is necessary to include the options `-N 1` and
-`-n 1`. The maximum number of processors for an SMP-parallel program is 896 and 56 on partition
-`taurussmp8` and  `smp2`, respectively.  Please refer to the
+An SMP-parallel job can only run within a node, so it is necessary to include the options `--node=1`
+and `--ntasks=1`. The maximum number of processors for an SMP-parallel program is 896 and 56 on
+partition `taurussmp8` and  `smp2`, respectively.  Please refer to the
 [partitions section](partitions_and_limits.md#memory-limits) for up-to-date information. Using the
 option `--cpus-per-task=<N>` Slurm will start one task and you will have `N` CPUs available for your
-job.  An example job file would look like:
+job. An example job file would look like:
 
 !!! example "Job file for OpenMP application"
 
@@ -22,9 +22,9 @@ job.  An example job file would look like:
     #SBATCH --tasks-per-node=1
     #SBATCH --cpus-per-task=8
     #SBATCH --time=08:00:00
-    #SBATCH -J Science1
+    #SBATCH --job-name=Science1
     #SBATCH --mail-type=end
-    #SBATCH --mail-user=your.name@tu-dresden.de
+    #SBATCH --mail-user=<your.email>@tu-dresden.de
 
     export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
     ./path/to/binary
@@ -48,9 +48,9 @@ For MPI-parallel jobs one typically allocates one core per task that has to be s
     #!/bin/bash
     #SBATCH --ntasks=864
     #SBATCH --time=08:00:00
-    #SBATCH -J Science1
+    #SBATCH --job-name=Science1
     #SBATCH --mail-type=end
-    #SBATCH --mail-user=your.name@tu-dresden.de
+    #SBATCH --mail-user=<your.email>@tu-dresden.de
 
     srun ./path/to/binary
     ```
@@ -70,9 +70,9 @@ below.
     #SBATCH --ntasks=4
     #SBATCH --cpus-per-task=1
     #SBATCH --time=01:00:00
-    #SBATCH -J PseudoParallelJobs
+    #SBATCH --job-name=PseudoParallelJobs
     #SBATCH --mail-type=end
-    #SBATCH --mail-user=your.name@tu-dresden.de
+    #SBATCH --mail-user=<your.email>@tu-dresden.de
 
     # The following sleep command was reported to fix warnings/errors with srun by users (feel free to uncomment).
     #sleep 5
@@ -109,7 +109,7 @@ for `sbatch/srun` in this case is `--gres=gpu:[NUM_PER_NODE]` (where `NUM_PER_NO
     #SBATCH --cpus-per-task=6      # use 6 threads per task
     #SBATCH --gres=gpu:1           # use 1 GPU per node (i.e. use one GPU per task)
     #SBATCH --time=01:00:00        # run for 1 hour
-    #SBATCH -A Project1            # account CPU time to Project1
+    #SBATCH --account=p_number_crunch      # account CPU time to project p_number_crunch
 
     srun ./your/cuda/application   # start you application (probably requires MPI to use both nodes)
     ```
@@ -247,7 +247,7 @@ two you might want to use. Since we use cgroups for separation of jobs, your job
 use more resources than requested.*
 
 If you just want to use all available cores in a node, you have to specify how Slurm should organize
-them, like with `-p haswell -c 24` or `-p haswell --ntasks-per-node=24`.
+them, like with `--partition=haswell --cpus-per-tasks=24` or `--partition=haswell --ntasks-per-node=24`.
 
 Here is a short example to ensure that a benchmark is not spoiled by other jobs, even if it doesn't
 use up all resources in the nodes:
@@ -256,13 +256,13 @@ use up all resources in the nodes:
 
     ```Bash
     #!/bin/bash
-    #SBATCH -p haswell
+    #SBATCH --partition=haswell
     #SBATCH --nodes=2
     #SBATCH --ntasks-per-node=2
     #SBATCH --cpus-per-task=8
     #SBATCH --exclusive    # ensure that nobody spoils my measurement on 2 x 2 x 8 cores
     #SBATCH --time=00:10:00
-    #SBATCH -J Benchmark
+    #SBATCH --job-name=Benchmark
     #SBATCH --mail-user=your.name@tu-dresden.de
 
     srun ./my_benchmark
@@ -299,11 +299,11 @@ name specific to the job:
     ```Bash
     #!/bin/bash
     #SBATCH --array 0-9
-    #SBATCH -o arraytest-%A_%a.out
-    #SBATCH -e arraytest-%A_%a.err
+    #SBATCH --output=arraytest-%A_%a.out
+    #SBATCH --error=arraytest-%A_%a.err
     #SBATCH --ntasks=864
     #SBATCH --time=08:00:00
-    #SBATCH -J Science1
+    #SBATCH --job-name=Science1
     #SBATCH --mail-type=end
     #SBATCH --mail-user=your.name@tu-dresden.de
 
@@ -355,4 +355,22 @@ file) that will be executed one after each other with different CPU numbers:
 
 ## Array-Job with Afterok-Dependency and Datamover Usage
 
-This part is under construction.
+In this example scenario, imagine you need to move data, before starting the main job.
+For this you may use a data transfer job and tell Slurm to start the main job immediately after
+data transfer job successfully finish.
+
+First you have to start your data transfer job, which for example transfers your input data from one
+workspace to another.
+
+```console
+marie@login$ export DATAMOVER_JOB=$(dtcp /scratch/ws/1/marie-source/input.txt /beegfs/ws/1/marie-target/. | awk '{print $4}')
+```
+
+Now you can refer to the job id of the Datamover jobs from your work load jobs.
+
+```console
+marie@login$ srun --dependency afterok:${DATAMOVER_JOB} ls /beegfs/ws/1/marie-target
+srun: job 23872871 queued and waiting for resources
+srun: job 23872871 has been allocated resources
+input.txt
+```
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_profiling.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_profiling.md
index 273a87710602b62feb97c342335b4c44f30ad09e..175333cb01be36b74ab8eb906eae05430345ecb1 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_profiling.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_profiling.md
@@ -60,3 +60,38 @@ More information about profiling with Slurm:
 
 - [Slurm Profiling](http://slurm.schedmd.com/hdf5_profile_user_guide.html)
 - [`sh5util`](http://slurm.schedmd.com/sh5util.html)
+
+## Memory Consumption of a Job
+
+If you are only interested in the maximal memory consumption of your job, you don't need profiling
+at all. This information can be retrieved from within [job files](slurm.md#batch-jobs) as follows:
+
+```bash
+#!/bin/bash
+
+#SBATCH [...]
+
+module purge
+module load [...]
+
+srun a.exe
+
+# Retrieve max. memory for this job for all nodes
+srun max_mem.sh
+```
+
+The script `max_mem.sh` is:
+
+```bash
+#!/bin/bash
+
+echo -n "$(hostname): "
+cat /sys/fs/cgroup/memory/slurm/uid_${SLURM_JOB_UID}/job_${SLURM_JOB_ID}/memory.max_usage_in_bytes
+```
+
+!!! note
+
+  * Make sure that the script `max_mem.sh` is executable (e.g., `chmod +x max_mem.sh`) and add the
+    path to this script if it is not within the same directory.
+  * The `srun` command is necessary to gather the max. memory from all nodes within this job.
+    Otherwise, you would only get the data from one node.
diff --git a/doc.zih.tu-dresden.de/docs/legal_notice.md b/doc.zih.tu-dresden.de/docs/legal_notice.md
index a5e187ee3f5eb9937e8eb01c33eed182fb2c423d..e5029584f538f8d909d4bd6f0cf786b73e9872df 100644
--- a/doc.zih.tu-dresden.de/docs/legal_notice.md
+++ b/doc.zih.tu-dresden.de/docs/legal_notice.md
@@ -4,7 +4,7 @@
 
 Es gilt das [Impressum der TU Dresden](https://tu-dresden.de/impressum) mit folgenden Änderungen:
 
-### Ansprechpartner/Betreiber:
+### Ansprechpartner/Betreiber
 
 Technische Universität Dresden
 Zentrum für Informationsdienste und Hochleistungsrechnen
@@ -14,7 +14,7 @@ Tel.: +49 351 463-40000
 Fax: +49 351 463-42328
 E-Mail: servicedesk@tu-dresden.de
 
-### Konzeption, Technische Umsetzung, Anbieter:
+### Konzeption, Technische Umsetzung, Anbieter
 
 Technische Universität Dresden
 Zentrum für Informationsdienste und Hochleistungsrechnen
@@ -30,4 +30,4 @@ E-Mail: zih@tu-dresden.de
 This documentation and the repository have two licenses:
 
 * All documentation is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
-* All software components are licensed under MIT license.
+* All software components are licensed under [MIT license](license_mit.txt).
diff --git a/doc.zih.tu-dresden.de/docs/license_mit.txt b/doc.zih.tu-dresden.de/docs/license_mit.txt
new file mode 100644
index 0000000000000000000000000000000000000000..02c33cdf3c7a4f4ce54a670efc885018868f3f26
--- /dev/null
+++ b/doc.zih.tu-dresden.de/docs/license_mit.txt
@@ -0,0 +1,16 @@
+Copyright 2021, 2022 TU Dresden / ZIH
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+associated documentation files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
+OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md b/doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md
index 3375f599f68e04344f4497fcf1fe3f43f45fbbd4..4bd9634db24b8ba81a02368a4f51c0b46004885f 100644
--- a/doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md
+++ b/doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md
@@ -32,7 +32,6 @@ The steps are:
 
 Apache Spark can be used in [interactive](#interactive-jobs) and [batch](#batch-jobs) jobs as well
 as via [Jupyter notebooks](#jupyter-notebook). All three ways are outlined in the following.
-The usage of Flink with Jupyter notebooks is currently under examination.
 
 ## Interactive Jobs
 
@@ -74,8 +73,8 @@ Spark or `$FLINK_ROOT_DIR/conf` for Flink:
     marie@compute$ source framework-configure.sh flink $FLINK_ROOT_DIR/conf
     ```
 
-This places the configuration in a directory called `cluster-conf-<JOB_ID>` in your `home`
-directory, where `<JOB_ID>` stands for the id of the Slurm job. After that, you can start in
+This places the configuration in a directory called `cluster-conf-<JOB_ID>` in your home directory,
+where `<JOB_ID>` stands for the id of the Slurm job. After that, you can start in
 the usual way:
 
 === "Spark"
@@ -238,50 +237,34 @@ example below:
 
 ## Jupyter Notebook
 
-You can run Jupyter notebooks with Spark on the ZIH systems in a similar way as described on the
-[JupyterHub](../access/jupyterhub.md) page. Interaction of Flink with JupyterHub is currently
-under examination and will be posted here upon availability.
+You can run Jupyter notebooks with Spark and Flink on the ZIH systems in a similar way as described
+on the [JupyterHub](../access/jupyterhub.md) page.
 
-### Preparation
-
-If you want to run Spark in Jupyter notebooks, you have to prepare it first. This is comparable
-to [normal Python virtual environments](../software/python_virtual_environments.md#python-virtual-environment).
-You start with an allocation:
-
-```console
-marie@login$ srun --pty --ntasks=1 --cpus-per-task=2 --mem-per-cpu=2500 --time=01:00:00 bash -l
-```
+### Spawning a Notebook
 
-When a node is allocated, install the required packages:
+Go to [https://taurus.hrsk.tu-dresden.de/jupyter](https://taurus.hrsk.tu-dresden.de/jupyter).
+In the tab "Advanced", go to the field "Preload modules" and select the following Spark or Flink
+module:
 
-```console
-marie@compute$ cd $HOME
-marie@compute$ mkdir jupyter-kernel
-marie@compute$ module load Python
-marie@compute$ virtualenv --system-site-packages jupyter-kernel/env  #Create virtual environment
-[...]
-marie@compute$ source jupyter-kernel/env/bin/activate    #Activate virtual environment.
-(env) marie@compute$ pip install ipykernel
-[...]
-(env) marie@compute$ python -m ipykernel install --user --name haswell-py3.7-spark --display-name="haswell-py3.7-spark"
-Installed kernelspec haswell-py3.7-spark in [...]
-
-(env) marie@compute$ pip install findspark
-(env) marie@compute$ deactivate
-```
+=== "Spark"
+    ```
+    Spark/3.0.1-Hadoop-2.7-Java-1.8-Python-3.7.4-GCCcore-8.3.0
+    ```
+=== "Flink"
+    ```
+    Flink/1.12.3-Java-1.8.0_161-OpenJDK-Python-3.7.4-GCCcore-8.3.0
+    ```
 
-You are now ready to spawn a notebook with Spark.
+When your Jupyter instance is started, you can set up Spark/Flink. Since the setup in the notebook
+requires more steps than in an interactive session, we have created example notebooks that you can
+use as a starting point for convenience: [SparkExample.ipynb](misc/SparkExample.ipynb),
+[FlinkExample.ipynb](misc/FlinkExample.ipynb)
 
-### Spawning a Notebook
+!!! warning
 
-Assuming that you have prepared everything as described above, you can go to
-[https://taurus.hrsk.tu-dresden.de/jupyter](https://taurus.hrsk.tu-dresden.de/jupyter).
-In the tab "Advanced", go to the field "Preload modules" and select one of the Spark modules. When
-your Jupyter instance is started, check whether the kernel that you created in the preparation
-phase (see above) is shown in the top right corner of the notebook. If it is not already selected,
-select the kernel `haswell-py3.7-spark`. Then, you can set up Spark. Since the setup in the
-notebook requires more steps than in an interactive session, we have created an example notebook
-that you can use as a starting point for convenience: [SparkExample.ipynb](misc/SparkExample.ipynb)
+    The notebooks only work with the Spark or Flink module mentioned above. When using other
+    Spark/Flink modules, it is possible that you have to do additional or other steps in order to
+    make Spark/Flink running.
 
 !!! note
 
@@ -292,15 +275,12 @@ that you can use as a starting point for convenience: [SparkExample.ipynb](misc/
 
 ## FAQ
 
-Q: Command `source framework-configure.sh hadoop
-$HADOOP_ROOT_DIR/etc/hadoop` gives the output:
+Q: Command `source framework-configure.sh hadoop $HADOOP_ROOT_DIR/etc/hadoop` gives the output:
 `bash: framework-configure.sh: No such file or directory`. How can this be resolved?
 
-A: Please try to re-submit or re-run the job and if that doesn't help
-re-login to the ZIH system.
+A: Please try to re-submit or re-run the job and if that doesn't help re-login to the ZIH system.
 
-Q: There are a lot of errors and warnings during the set up of the
-session
+Q: There are a lot of errors and warnings during the set up of the session
 
 A: Please check the work capability on a simple example as shown in this documentation.
 
diff --git a/doc.zih.tu-dresden.de/docs/software/building_software.md b/doc.zih.tu-dresden.de/docs/software/building_software.md
index c83932a16c1c0227cb160d4853cd1815626fc404..73952b06efde809b7e91e936be0fbf9b240f88a8 100644
--- a/doc.zih.tu-dresden.de/docs/software/building_software.md
+++ b/doc.zih.tu-dresden.de/docs/software/building_software.md
@@ -17,16 +17,16 @@ For instance, when using CMake and keeping your source in `/projects`, you could
 
 ```console
 # save path to your source directory:
-marie@login$ export SRCDIR=/projects/p_marie/mysource
+marie@login$ export SRCDIR=/projects/p_number_crunch/mysource
 
 # create a build directory in /scratch:
-marie@login$ mkdir /scratch/p_marie/mysoftware_build
+marie@login$ mkdir /scratch/p_number_crunch/mysoftware_build
 
 # change to build directory within /scratch:
-marie@login$ cd /scratch/p_marie/mysoftware_build
+marie@login$ cd /scratch/p_number_crunch/mysoftware_build
 
 # create Makefiles:
-marie@login$ cmake -DCMAKE_INSTALL_PREFIX=/projects/p_marie/mysoftware $SRCDIR
+marie@login$ cmake -DCMAKE_INSTALL_PREFIX=/projects/p_number_crunch/mysoftware $SRCDIR
 
 # build in a job:
 marie@login$ srun --mem-per-cpu=1500 --cpus-per-task=12 --pty make -j 12
diff --git a/doc.zih.tu-dresden.de/docs/software/containers.md b/doc.zih.tu-dresden.de/docs/software/containers.md
index d15535933ef7f2b9e0330d07e35168f10fc22ded..be74caec03c6ffcf098eade46f4c3adb313f8754 100644
--- a/doc.zih.tu-dresden.de/docs/software/containers.md
+++ b/doc.zih.tu-dresden.de/docs/software/containers.md
@@ -1,4 +1,4 @@
-# Use of Containers
+# Singularity
 
 [Containerization](https://www.ibm.com/cloud/learn/containerization) encapsulating or packaging up
 software code and all its dependencies to run uniformly and consistently on any infrastructure. On
@@ -15,11 +15,11 @@ systems cannot be granted root permissions. A solution is a Virtual Machine (VM)
 `ml` which allows users to gain root permissions in an isolated environment. There are two main
 options on how to work with Virtual Machines on ZIH systems:
 
-1. [VM tools](virtual_machines_tools.md): Automated algorithms for using virtual machines;
+1. [VM tools](singularity_power9.md): Automated algorithms for using virtual machines;
 1. [Manual method](virtual_machines.md): It requires more operations but gives you more flexibility
    and reliability.
 
-## Singularity
+## Usage of Singularity
 
 If you wish to containerize your workflow and/or applications, you can use Singularity containers on
 ZIH systems. As opposed to Docker, this solution is much more suited to being used in an HPC
@@ -46,7 +46,7 @@ instructions from the official documentation to install Singularity.
 1. Check if `go` is installed by executing `go version`.  If it is **not**:
 
     ```console
-    marie@local$ wget <https://storage.googleapis.com/golang/getgo/installer_linux> && chmod +x
+    marie@local$ wget 'https://storage.googleapis.com/golang/getgo/installer_linux' && chmod +x
     installer_linux && ./installer_linux && source $HOME/.bash_profile
     ```
 
@@ -88,7 +88,9 @@ instructions from the official documentation to install Singularity.
 There are two possibilities:
 
 1. Create a new container on your local workstation (where you have the necessary privileges), and
-   then copy the container file to ZIH systems for execution.
+   then copy the container file to ZIH systems for execution. Therefore you also have to install
+   [Singularity](https://sylabs.io/guides/3.0/user-guide/quick_start.html#quick-installation-steps)
+   on your local workstation.
 1. You can, however, import an existing container from, e.g., Docker.
 
 Both methods are outlined in the following.
@@ -101,12 +103,13 @@ You can create a new custom container on your workstation, if you have root righ
 
     You cannot create containers for the partition `ml`, as it bases on Power9 micro-architecture
     which is different to the x86 architecture in common computers/laptops. For that you can use
-    the [VM Tools](virtual_machines_tools.md).
+    the [VM Tools](singularity_power9.md).
 
-Creating a container is done by writing a **definition file** and passing it to
+Creating a container is done by writing a definition file, such as `myDefinition.def`, and passing
+it to `singularity` via
 
 ```console
-marie@local$ singularity build myContainer.sif <myDefinition.def>
+marie@local$ singularity build myContainer.sif myDefinition.def
 ```
 
 A definition file contains a bootstrap
@@ -167,7 +170,7 @@ https://github.com/singularityware/singularity/tree/master/examples.
 You can import an image directly from the Docker repository (Docker Hub):
 
 ```console
-marie@local$ singularity build my-container.sif docker://ubuntu:latest
+marie@login$ singularity build my-container.sif docker://ubuntu:latest
 ```
 
 Creating a singularity container directly from a local docker image is possible but not
@@ -175,20 +178,20 @@ recommended. The steps are:
 
 ```console
 # Start a docker registry
-$ docker run -d -p 5000:5000 --restart=always --name registry registry:2
+marie@local$ docker run -d -p 5000:5000 --restart=always --name registry registry:2
 
 # Push local docker container to it
-$ docker tag alpine localhost:5000/alpine
-$ docker push localhost:5000/alpine
+marie@local$ docker tag alpine localhost:5000/alpine
+marie@local$ docker push localhost:5000/alpine
 
 # Create def file for singularity like this...
-$ cat example.def
+marie@local$ cat example.def
 Bootstrap: docker
 Registry: <a href="http://localhost:5000" rel="nofollow" target="_blank">http://localhost:5000</a>
 From: alpine
 
 # Build singularity container
-$ singularity build --nohttps alpine.sif example.def
+marie@local$ singularity build --nohttps alpine.sif example.def
 ```
 
 #### Start from a Dockerfile
@@ -284,7 +287,7 @@ While the `shell` command can be useful for tests and setup, you can also launch
 inside the container directly using "exec":
 
 ```console
-marie@login$ singularity exec my-container.img /opt/myapplication/bin/run_myapp
+marie@login$ singularity exec my-container.sif /opt/myapplication/bin/run_myapp
 ```
 
 This can be useful if you wish to create a wrapper script that transparently calls a containerized
@@ -299,7 +302,7 @@ if [ "z$X" = "z" ] ; then
   exit 1
 fi
 
-singularity exec /scratch/p_myproject/my-container.sif /opt/myapplication/run_myapp "$@"
+singularity exec /projects/p_number_crunch/my-container.sif /opt/myapplication/run_myapp "$@"
 ```
 
 The better approach is to use `singularity run`, which executes whatever was set in the `%runscript`
@@ -325,20 +328,20 @@ singularity build my-container.sif example.def
 Then you can run your application via
 
 ```console
-singularity run my-container.sif first_arg 2nd_arg
+marie@login$ singularity run my-container.sif first_arg 2nd_arg
 ```
 
 Alternatively you can execute the container directly which is equivalent:
 
 ```console
-./my-container.sif first_arg 2nd_arg
+marie@login$ ./my-container.sif first_arg 2nd_arg
 ```
 
 With this you can even masquerade an application with a singularity container as if it was an actual
 program by naming the container just like the binary:
 
 ```console
-mv my-container.sif myCoolAp
+marie@login$ mv my-container.sif myCoolAp
 ```
 
 ### Use-Cases
diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics.md b/doc.zih.tu-dresden.de/docs/software/data_analytics.md
index b4a5f7f8b9f86c9a47fec20b875970efd4d787b2..c3cb4afe1be3d613a915e42f1db1020919ecfa3c 100644
--- a/doc.zih.tu-dresden.de/docs/software/data_analytics.md
+++ b/doc.zih.tu-dresden.de/docs/software/data_analytics.md
@@ -29,8 +29,8 @@ can be installed individually by each user. If possible, the use of
 recommended (e.g. for Python). Likewise, software can be used within [containers](containers.md).
 
 For the transfer of larger amounts of data into and within the system, the
-[export nodes and datamover](../data_transfer/overview.md) should be used.
+[export nodes and Datamover](../data_transfer/overview.md) should be used.
 Data is stored in the [workspaces](../data_lifecycle/workspaces.md).
 Software modules or virtual environments can also be installed in workspaces to enable
-collaborative work even within larger groups. General recommendations for setting up workflows
-can be found in the [experiments](../data_lifecycle/experiments.md) section.
+collaborative work even within larger groups.
+<!--General recommendations for setting up workflows can be found in the experiments section.-->
diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md
index 00ce0c5c4c3ddbd3654161bab69ee0a493cb4350..cf8c1b559f4f496a729388a1e1f4353cdcd14733 100644
--- a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md
+++ b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md
@@ -1,4 +1,4 @@
-# Python for Data Analytics
+# Data Analytics with Python
 
 Python is a high-level interpreted language widely used in research and science. Using ZIH system
 allows you to work with Python quicker and more effective. Here, a general introduction to working
@@ -11,6 +11,10 @@ Often, it is useful to create an isolated development environment, which can be
 a research group and/or teaching class. For this purpose,
 [Python virtual environments](python_virtual_environments.md) can be used.
 
+!!! hint
+    For working with conda virtual environments, it may be necessary to configure your shell via
+    `conda init` as described in [Python virtual environments](python_virtual_environments.md#conda-virtual-environment)
+
 The interactive Python interpreter can also be used on ZIH systems via an interactive job:
 
 ```console
@@ -215,7 +219,7 @@ from dask_jobqueue import SLURMCluster
 cluster = SLURMCluster(queue='alpha',
   cores=8,
   processes=2,
-  project='p_marie',
+  project='p_number_crunch',
   memory="8GB",
   walltime="00:30:00")
 
@@ -238,7 +242,7 @@ from dask import delayed
 cluster = SLURMCluster(queue='alpha',
   cores=8,
   processes=2,
-  project='p_marie',
+  project='p_number_crunch',
   memory="80GB",
   walltime="00:30:00",
   extra=['--resources gpu=1'])
@@ -290,7 +294,7 @@ for the Monte-Carlo estimation of Pi.
 
     #create a Slurm cluster, please specify your project
 
-    cluster = SLURMCluster(queue='alpha', cores=2, project='p_marie', memory="8GB", walltime="00:30:00", extra=['--resources gpu=1'], scheduler_options={"dashboard_address": f":{portdash}"})
+    cluster = SLURMCluster(queue='alpha', cores=2, project='p_number_crunch', memory="8GB", walltime="00:30:00", extra=['--resources gpu=1'], scheduler_options={"dashboard_address": f":{portdash}"})
 
     #submit the job to the scheduler with the number of nodes (here 2) requested:
 
diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md
index 72224113fdf8a9c6f4727d47771283dc1d0c1baa..1f6be0614fc728f88212a5192fb1b11277ac8454 100644
--- a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md
+++ b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md
@@ -1,4 +1,4 @@
-# R for Data Analytics
+# Data Analytics with R
 
 [R](https://www.r-project.org/about.html) is a programming language and environment for statistical
 computing and graphics. It provides a wide variety of statistical (linear and nonlinear modeling,
@@ -63,7 +63,8 @@ marie@compute$ R -e 'install.packages("ggplot2")'
 ## Deep Learning with R
 
 The deep learning frameworks perform extremely fast when run on accelerators such as GPU.
-Therefore, using nodes with built-in GPUs, e.g., partitions [ml](../jobs_and_resources/power9.md)
+Therefore, using nodes with built-in GPUs, e.g., partitions
+[ml](../jobs_and_resources/hardware_overview.md)
 and [alpha](../jobs_and_resources/alpha_centauri.md), is beneficial for the examples here.
 
 ### R Interface to TensorFlow
@@ -268,8 +269,8 @@ since both are running multicore jobs on a **single** node. Below is an example:
 #SBATCH --tasks-per-node=1
 #SBATCH --cpus-per-task=16
 #SBATCH --time=00:10:00
-#SBATCH -o test_Rmpi.out
-#SBATCH -e test_Rmpi.err
+#SBATCH --output=test_Rmpi.out
+#SBATCH --error=test_Rmpi.err
 
 module purge
 module load modenv/scs5
diff --git a/doc.zih.tu-dresden.de/docs/software/debuggers.md b/doc.zih.tu-dresden.de/docs/software/debuggers.md
index 0d4bda97f61fe6453d6027406ff88145c4204cfb..d57ceab704a534302ff24407e2c20bdce3dbd833 100644
--- a/doc.zih.tu-dresden.de/docs/software/debuggers.md
+++ b/doc.zih.tu-dresden.de/docs/software/debuggers.md
@@ -22,7 +22,7 @@ errors.
 | Licenses at ZIH    | Free           | 1024 (max. number of processes/threads) |
 | Official documentation | [GDB website](https://www.gnu.org/software/gdb/) | [Arm DDT website](https://developer.arm.com/tools-and-software/server-and-hpc/debug-and-profile/arm-forge/arm-ddt) |
 
-## General Advices
+## General Advice
 
 - You need to compile your code with the flag `-g` to enable
   debugging. This tells the compiler to include information about
diff --git a/doc.zih.tu-dresden.de/docs/software/distributed_training.md b/doc.zih.tu-dresden.de/docs/software/distributed_training.md
index b3c6733bc0c7150eeee561ec450d33a7db27d54a..4e8fc427e71bd28ad1a3b663aba82d11bad088e6 100644
--- a/doc.zih.tu-dresden.de/docs/software/distributed_training.md
+++ b/doc.zih.tu-dresden.de/docs/software/distributed_training.md
@@ -128,10 +128,10 @@ Each worker runs the training loop independently.
     module load modenv/hiera GCC/10.2.0 CUDA/11.1.1 OpenMPI/4.0.5 TensorFlow/2.4.1
 
     # On the first node
-    TF_CONFIG='{"cluster": {"worker": ["'"${NODE_1}"':33562", "'"${NODE_2}"':33561"]}, "task": {"index": 0, "type": "worker"}}' srun -w ${NODE_1} -N 1 --ntasks=1 --gres=gpu:1 python main_ddl.py &
+    TF_CONFIG='{"cluster": {"worker": ["'"${NODE_1}"':33562", "'"${NODE_2}"':33561"]}, "task": {"index": 0, "type": "worker"}}' srun --nodelist=${NODE_1} --nodes=1 --ntasks=1 --gres=gpu:1 python main_ddl.py &
 
     # On the second node
-    TF_CONFIG='{"cluster": {"worker": ["'"${NODE_1}"':33562", "'"${NODE_2}"':33561"]}, "task": {"index": 1, "type": "worker"}}' srun -w ${NODE_2} -N 1 --ntasks=1 --gres=gpu:1 python main_ddl.py &
+    TF_CONFIG='{"cluster": {"worker": ["'"${NODE_1}"':33562", "'"${NODE_2}"':33561"]}, "task": {"index": 1, "type": "worker"}}' srun --nodelist=${NODE_2} --nodes=1 --ntasks=1 --gres=gpu:1 python main_ddl.py &
 
     wait
     ```
@@ -177,8 +177,8 @@ It is recommended to use
 [DistributedDataParallel](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html),
 instead of this class, to do multi-GPU training, even if there is only a single node.
 See: Use `nn.parallel.DistributedDataParallel` instead of multiprocessing or `nn.DataParallel`.
-Check the [page](https://pytorch.org/docs/stable/notes/cuda.html#cuda-nn-ddp-instead) and
-[Distributed Data Parallel](https://pytorch.org/docs/stable/notes/ddp.html#ddp).
+Check the [PyTorch CUDA page](https://pytorch.org/docs/stable/notes/cuda.html#cuda-nn-ddp-instead)
+and [Distributed Data Parallel](https://pytorch.org/docs/stable/notes/ddp.html#ddp).
 
 ??? example "Parallel Model"
 
diff --git a/doc.zih.tu-dresden.de/docs/software/energy_measurement.md b/doc.zih.tu-dresden.de/docs/software/energy_measurement.md
index 607263056fd593b5f0ae62474a1c63961c8b31aa..3c0e5b2f61634f086c485a0c0defff5cbc0cd43e 100644
--- a/doc.zih.tu-dresden.de/docs/software/energy_measurement.md
+++ b/doc.zih.tu-dresden.de/docs/software/energy_measurement.md
@@ -1,310 +1,217 @@
 # Energy Measurement Infrastructure
 
-All nodes of the HPC machine Taurus are equipped with power
-instrumentation that allow the recording and accounting of power
-dissipation and energy consumption data. The data is made available
-through several different interfaces, which will be described below.
+The Intel Haswell nodes of ZIH system are equipped with power instrumentation that allow the
+recording and accounting of power dissipation and energy consumption data. The data is made
+available through several different interfaces, which are described below.
 
-## System Description
-
-The Taurus system is split into two phases. While both phases are
-equipped with energy-instrumented nodes, the instrumentation
-significantly differs in the number of instrumented nodes and their
-spatial and temporal granularity.
-
-### Phase 1
-
-In phase one, the 270 Sandy Bridge nodes are equipped with node-level
-power instrumentation that is stored in the Dataheap infrastructure at a
-rate of 1Sa/s and further the energy consumption of a job is available
-in SLURM (see below).
-
-### Phase 2
-
-In phase two, all of the 1456 Haswell DLC nodes are equipped with power
-instrumentation. In addition to the access methods of phase one, users
-will also be able to access the measurements through a C API to get the
-full temporal and spatial resolution, as outlined below:
+## Summary of Measurement Interfaces
 
--   ** Blade:**1 kSa/s for the whole node, includes both sockets, DRAM,
-    SSD, and other on-board consumers. Since the system is directly
-    water cooled, no cooling components are included in the blade
-    consumption.
--   **Voltage regulators (VR):** 100 Sa/s for each of the six VR
-    measurement points, one for each socket and four for eight DRAM
-    lanes (two lanes bundled).
+| Interface                                  | Sensors         | Rate                            |
+|:-------------------------------------------|:----------------|:--------------------------------|
+| Dataheap (C, Python, VampirTrace, Score-P) | Blade, (CPU)    | 1 sample/s                          |
+| HDEEM\* (C, Score-P)                       | Blade, CPU, DDR | 1000 samples/s (Blade), 100 samples/s (VRs) |
+| HDEEM Command Line Interface               | Blade, CPU, DDR | 1000 samples/s (Blade), 100 samples/s (VR)  |
+| Slurm Accounting (`sacct`)                 | Blade           | Per Job Energy                  |
+| Slurm Profiling (HDF5)                     | Blade           | Up to 1 sample/s                    |
 
-The GPU blades of each Phase as well as the Phase I Westmere partition
-also have 1 Sa/s power instrumentation but have a lower accuracy.
+!!! note
 
-HDEEM is now generally available on all nodes in the "haswell"
-partition.
+    Please specify `--partition=haswell --exclusive` along with your job request if you wish to use
+    HDEEM.
 
-## Summary of Measurement Interfaces
+### Accuracy, Temporal and Spatial Resolution
 
-| Interface                                  | Sensors         | Rate                            | Phase I | Phase II Haswell |
-|:-------------------------------------------|:----------------|:--------------------------------|:--------|:-----------------|
-| Dataheap (C, Python, VampirTrace, Score-P) | Blade, (CPU)    | 1 Sa/s                          | yes     | yes              |
-| HDEEM\* (C, Score-P)                       | Blade, CPU, DDR | 1 kSa/s (Blade), 100 Sa/s (VRs) | no      | yes              |
-| HDEEM Command Line Interface               | Blade, CPU, DDR | 1 kSa/s (Blade), 100 Sa/s (VR)  | no      | yes              |
-| SLURM Accounting (sacct)                   | Blade           | Per Job Energy                  | yes     | yes              |
-| SLURM Profiling (hdf5)                     | Blade           | up to 1 Sa/s                    | yes     | yes              |
+In addition to the above mentioned interfaces, you can access the measurements through a
+[C API](#using-the-hdeem-c-api) to get the full temporal and spatial resolution:
 
-Note: Please specify `-p haswell --exclusive` along with your job
-request if you wish to use hdeem.
+- ** Blade:** 1000 samples/s for the whole node, includes both sockets, DRAM,
+  SSD, and other on-board consumers. Since the system is directly
+  water cooled, no cooling components are included in the blade
+  consumption.
+- **Voltage regulators (VR):** 100 samples/s for each of the six VR
+  measurement points, one for each socket and four for eight DRAM
+  lanes (two lanes bundled).
 
-## Accuracy
+The GPU blades also have 1 sample/s power instrumentation but have a lower accuracy.
 
-HDEEM measurements have an accuracy of 2 % for Blade (node)
-measurements, and 5 % for voltage regulator (CPU, DDR) measurements.
+HDEEM measurements have an accuracy of 2 % for Blade (node) measurements, and 5 % for voltage
+regulator (CPU, DDR) measurements.
 
 ## Command Line Interface
 
-The HDEEM infrastructure can be controlled through command line tools
-that are made available by loading the **hdeem** module. They are
-commonly used on the node under test to start, stop, and query the
-measurement device.
-
--   **startHdeem**: Start a measurement. After the command succeeds, the
-    measurement data with the 1000 / 100 Sa/s described above will be
-    recorded on the Board Management Controller (BMC), which is capable
-    of storing up to 8h of measurement data.
--   **stopHdeem**: Stop a measurement. No further data is recorded and
-    the previously recorded data remains available on the BMC.
--   **printHdeem**: Read the data from the BMC. By default, the data is
-    written into a CSV file, whose name can be controlled using the
-    **-o** argument.
--   **checkHdeem**: Print the status of the measurement device.
--   **clearHdeem**: Reset and clear the measurement device. No further
-    data can be read from the device after this command is executed
-    before a new measurement is started.
+The HDEEM infrastructure can be controlled through command line tools that are made available by
+loading the `hdeem` module. They are commonly used on the node under test to start, stop, and
+query the measurement device.
+
+- `startHdeem`: Start a measurement. After the command succeeds, the
+  measurement data with the 1000 / 100 samples/s described above will be
+  recorded on the Board Management Controller (BMC), which is capable
+  of storing up to 8h of measurement data.
+- `stopHdeem`: Stop a measurement. No further data is recorded and
+  the previously recorded data remains available on the BMC.
+- `printHdeem`: Read the data from the BMC. By default, the data is
+  written into a CSV file, whose name can be controlled using the
+  `-o` argument.
+- `checkHdeem`: Print the status of the measurement device.
+- `clearHdeem`: Reset and clear the measurement device. No further
+  data can be read from the device after this command is executed
+  before a new measurement is started.
 
 ## Integration in Application Performance Traces
 
-The per-node power consumption data can be included as metrics in
-application traces by using the provided metric plugins for Score-P (and
-VampirTrace). The plugins are provided as modules and set all necessary
-environment variables that are required to record data for all nodes
-that are part of the current job.
-
-For 1 Sa/s Blade values (Dataheap):
-
--   [Score-P](ScoreP): use the module **`scorep-dataheap`**
--   [VampirTrace](VampirTrace): use the module
-    **vampirtrace-plugins/power-1.1**
-
-For 1000 Sa/s (Blade) and 100 Sa/s (CPU{0,1}, DDR{AB,CD,EF,GH}):
-
--   [Score-P](ScoreP): use the module **\<span
-    class="WYSIWYG_TT">scorep-hdeem\</span>**\<br />Note: %ENDCOLOR%This
-    module requires a recent version of "scorep/sync-...". Please use
-    the latest that fits your compiler & MPI version.**\<br />**
--   [VampirTrace](VampirTrace): not supported
-
-By default, the modules are set up to record the power data for the
-nodes they are used on. For further information on how to change this
-behavior, please use module show on the respective module.
-
-    # Example usage with gcc
-    % module load scorep/trunk-2016-03-17-gcc-xmpi-cuda7.5
-    % module load scorep-dataheap
-    % scorep gcc application.c -o application
-    % srun ./application
-
-Once the application is finished, a trace will be available that allows
-you to correlate application functions with the component power
-consumption of the parallel application. Note: For energy measurements,
-only tracing is supported in Score-P/VampirTrace. The modules therefore
-disables profiling and enables tracing, please use [Vampir](Vampir) to
-view the trace.
-
-\<img alt="demoHdeem_high_low_vampir_3.png" height="262"
-src="%ATTACHURL%/demoHdeem_high_low_vampir_3.png" width="695" />
-
-%RED%Note<span class="twiki-macro ENDCOLOR"></span>: the power
-measurement modules **`scorep-dataheap`** and **`scorep-hdeem`** are
-dynamic and only need to be loaded during execution. However,
-**`scorep-hdeem`** does require the application to be linked with a
-certain version of Score-P.
-
-By default,** `scorep-dataheap`**records all sensors that are available.
-Currently this is the total node consumption and for Phase II the CPUs.
-**`scorep-hdeem`** also records all available sensors (node, 2x CPU, 4x
-DDR) by default. You can change the selected sensors by setting the
-environment variables:
-
-    # For HDEEM
-    % export SCOREP_METRIC_HDEEM_PLUGIN=Blade,CPU*
-    # For Dataheap
-    % export SCOREP_METRIC_DATAHEAP_PLUGIN=localhost/watts
-
-For more information on how to use Score-P, please refer to the
-[respective documentation](ScoreP).
+The per-node power consumption data can be included as metrics in application traces by using the
+provided metric plugins for Score-P (and VampirTrace). The plugins are provided as modules and set
+all necessary environment variables that are required to record data for all nodes that are part of
+the current job.
 
-## Access Using Slurm Tools
+For 1 sample/s Blade values (Dataheap):
 
-[Slurm](Slurm) maintains its own database of job information, including
-energy data. There are two main ways of accessing this data, which are
-described below.
+- [Score-P](scorep.md): use the module `scorep-dataheap`
+- [VampirTrace](../archive/vampirtrace.md): use the module `vampirtrace-plugins/power-1.1`
+  (**Remark:** VampirTrace is outdated!)
 
-### Post-Mortem Per-Job Accounting
+For 1000 samples/s (Blade) and 100 samples/s (CPU{0,1}, DDR{AB,CD,EF,GH}):
 
-This is the easiest way of accessing information about the energy
-consumed by a job and its job steps. The Slurm tool `sacct` allows users
-to query post-mortem energy data for any past job or job step by adding
-the field `ConsumedEnergy` to the `--format` parameter:
+- [Score-P](scorep.md): use the module `scorep-hdeem`. This
+  module requires a recent version of `scorep/sync-...`. Please use
+  the latest that fits your compiler and MPI version.
 
-    $&gt; sacct --format="jobid,jobname,ntasks,submit,start,end,ConsumedEnergy,nodelist,state" -j 3967027
-           JobID    JobName   NTasks              Submit               Start                 End ConsumedEnergy        NodeList      State 
-    ------------ ---------- -------- ------------------- ------------------- ------------------- -------------- --------------- ---------- 
-    3967027            bash          2014-01-07T12:25:42 2014-01-07T12:25:52 2014-01-07T12:41:20                    taurusi1159  COMPLETED 
-    3967027.0         sleep        1 2014-01-07T12:26:07 2014-01-07T12:26:07 2014-01-07T12:26:18              0     taurusi1159  COMPLETED 
-    3967027.1         sleep        1 2014-01-07T12:29:06 2014-01-07T12:29:06 2014-01-07T12:29:16          1.67K     taurusi1159  COMPLETED 
-    3967027.2         sleep        1 2014-01-07T12:33:25 2014-01-07T12:33:25 2014-01-07T12:33:36          1.84K     taurusi1159  COMPLETED 
-    3967027.3         sleep        1 2014-01-07T12:34:06 2014-01-07T12:34:06 2014-01-07T12:34:11          1.09K     taurusi1159  COMPLETED 
-    3967027.4         sleep        1 2014-01-07T12:38:03 2014-01-07T12:38:03 2014-01-07T12:39:44         18.93K     taurusi1159  COMPLETED  
+By default, the modules are set up to record the power data for the nodes they are used on. For
+further information on how to change this behavior, please use module show on the respective module.
 
-The job consisted of 5 job steps, each executing a sleep of a different
-length. Note that the ConsumedEnergy metric is only applicable to
-exclusive jobs.
+!!! example "Example usage with `gcc`"
 
-### 
+  ```console
+  marie@haswell$ module load scorep/trunk-2016-03-17-gcc-xmpi-cuda7.5
+  marie@haswell$ module load scorep-dataheap
+  marie@haswell$ scorep gcc application.c -o application
+  marie@haswell$ srun ./application
+  ```
 
-### Slurm Energy Profiling
+Once the application is finished, a trace will be available that allows you to correlate application
+functions with the component power consumption of the parallel application.
 
-The `srun` tool offers several options for profiling job steps by adding
-the `--profile` parameter. Possible profiling options are `All`,
-`Energy`, `Task`, `Lustre`, and `Network`. In all cases, the profiling
-information is stored in an hdf5 file that can be inspected using
-available hdf5 tools, e.g., `h5dump`. The files are stored under
-`/scratch/profiling/` for each job, job step, and node. A description of
-the data fields in the file can be found
-[here](http://slurm.schedmd.com/hdf5_profile_user_guide.html#HDF5). In
-general, the data files contain samples of the current **power**
-consumption on a per-second basis:
-
-    $&gt; srun -p sandy --acctg-freq=2,energy=1 --profile=energy  sleep 10 
-    srun: job 3967674 queued and waiting for resources
-    srun: job 3967674 has been allocated resources
-    $&gt; h5dump /scratch/profiling/jschuch/3967674_0_taurusi1073.h5
-    [...]
-                   DATASET "Energy_0000000002 Data" {
-                      DATATYPE  H5T_COMPOUND {
-                         H5T_STRING {
-                            STRSIZE 24;
-                            STRPAD H5T_STR_NULLTERM;
-                            CSET H5T_CSET_ASCII;
-                            CTYPE H5T_C_S1;
-                         } "Date_Time";
-                         H5T_STD_U64LE "Time";
-                         H5T_STD_U64LE "Power";
-                         H5T_STD_U64LE "CPU_Frequency";
-                      }
-                      DATASPACE  SIMPLE { ( 1 ) / ( 1 ) }
-                      DATA {
-                      (0): {
-                            "",
-                            1389097545,  # timestamp
-                            174,         # power value
-                            1
-                         }
-                      }
-                   }
-
-## 
+!!! note
 
-## Using the HDEEM C API
+    For energy measurements, only tracing is supported in Score-P/VampirTrace.
+    The modules therefore disables profiling and enables tracing,
+    please use [Vampir](vampir.md) to view the trace.
+
+![Energy measurements in Vampir](misc/energy_measurements-vampir.png)
+{: align="center"}
+
+!!! note
 
-Note: Please specify -p haswell --exclusive along with your job request
-if you wish to use hdeem.
+    The power measurement modules `scorep-dataheap` and `scorep-hdeem` are dynamic and only
+    need to be loaded during execution. However, `scorep-hdeem` does require the application to
+    be linked with a certain version of Score-P.
 
-Please download the offical documentation at \<font face="Calibri"
-size="2"> [\<font
-color="#0563C1">\<u>http://www.bull.com/download-hdeem-library-reference-guide\</u>\</font>](http://www.bull.com/download-hdeem-library-reference-guide)\</font>
+By default, `scorep-dataheap` records all sensors that are available. Currently this is the total
+node consumption and the CPUs. `scorep-hdeem` also records all available sensors
+(node, 2x CPU, 4x DDR) by default. You can change the selected sensors by setting the environment
+variables:
 
-The HDEEM headers and sample code are made available via the hdeem
-module. To find the location of the hdeem installation use
+!!! note
 
-    % module show hdeem
-    ------------------------------------------------------------------- 
-    /sw/modules/taurus/libraries/hdeem/2.1.9ms: 
+    The power measurement modules `scorep-dataheap` and `scorep-hdeem` are
+    dynamic and only need to be loaded during execution.
+    However, `scorep-hdeem` does require the application to be linked with
+    a certain version of Score-P.
 
-    conflict         hdeem  
-    module-whatis    Load hdeem version 2.1.9ms  
-    prepend-path     PATH /sw/taurus/libraries/hdeem/2.1.9ms/include  
-    setenv           HDEEM_ROOT /sw/taurus/libraries/hdeem/2.1.9ms 
-    -------------------------------------------------------------------
+??? hint "For HDEEM"
+    `export SCOREP_METRIC_HDEEM_PLUGIN=Blade,CPU*`
 
-You can find an example of how to use the API under
-\<span>$HDEEM_ROOT/sample.\</span>
+??? hint "For Dataheap"
+    `export SCOREP_METRIC_DATAHEAP_PLUGIN=localhost/watts`
 
-## Access Using the Dataheap Infrastructure
+For more information on how to use Score-P, please refer to the [respective documentation](scorep.md).
 
-In addition to the energy accounting data that is stored by Slurm, this
-information is also written into our local data storage and analysis
-infrastructure called
-[Dataheap](http://tu-dresden.de/die_tu_dresden/zentrale_einrichtungen/zih/forschung/projekte/dataheap/).
-From there, the data can be used in various ways, such as including it
-into application performance trace data or querying through a Python
-interface.
+## Access Using Slurm Tools
 
-The Dataheap infrastructure is designed to store various types of
-time-based samples from different data sources. In the case of the
-energy measurements on Taurus, the data is stored as a timeline of power
-values which allows the reconstruction of the power and energy
-consumption over time. The timestamps are stored as UNIX timestamps with
-a millisecond granularity. The data is stored for each node in the form
-of `nodename/watts`, e.g., `taurusi1073/watts`. Further metrics might
-already be available or might be added in the future for which
-information is available upon request.
+[Slurm](../jobs_and_resources/slurm.md) maintains its own database of job information, including
+energy data. There are two main ways of accessing this data, which are described below.
 
-**Note**: The dataheap infrastructure can only be accessed from inside
-the university campus network.
+### Post-Mortem Per-Job Accounting
 
-### Using the Python Interface
+This is the easiest way of accessing information about the energy consumed by a job and its job
+steps. The Slurm tool `sacct` allows users to query post-mortem energy data for any past job or job
+step by adding the field `ConsumedEnergy` to the `--format` parameter:
+
+```console
+marie@login $ sacct --format="jobid,jobname,ntasks,submit,start,end,ConsumedEnergy,nodelist,state" -j 3967027
+       JobID    JobName   NTasks              Submit               Start                 End ConsumedEnergy        NodeList      State
+------------ ---------- -------- ------------------- ------------------- ------------------- -------------- --------------- ----------
+3967027            bash          2014-01-07T12:25:42 2014-01-07T12:25:52 2014-01-07T12:41:20                    taurusi1159  COMPLETED
+3967027.0         sleep        1 2014-01-07T12:26:07 2014-01-07T12:26:07 2014-01-07T12:26:18              0     taurusi1159  COMPLETED
+3967027.1         sleep        1 2014-01-07T12:29:06 2014-01-07T12:29:06 2014-01-07T12:29:16          1.67K     taurusi1159  COMPLETED
+3967027.2         sleep        1 2014-01-07T12:33:25 2014-01-07T12:33:25 2014-01-07T12:33:36          1.84K     taurusi1159  COMPLETED
+3967027.3         sleep        1 2014-01-07T12:34:06 2014-01-07T12:34:06 2014-01-07T12:34:11          1.09K     taurusi1159  COMPLETED
+3967027.4         sleep        1 2014-01-07T12:38:03 2014-01-07T12:38:03 2014-01-07T12:39:44         18.93K     taurusi1159  COMPLETED
+```
+
+This example job consisted of 5 job steps, each executing a sleep of a different length. Note that the
+`ConsumedEnergy` metric is only applicable to exclusive jobs.
 
-The module `dataheap/1.0` provides a Python module that can be used to
-query the data in the Dataheap for personalized data analysis. The
-following is an example of how to use the interface:
+### Slurm Energy Profiling
 
-    import time
-    import os
-    from dhRequest import dhClient
+The `srun` tool offers several options for profiling job steps by adding the `--profile` parameter.
+Possible profiling options are `All`, `Energy`, `Task`, `Lustre`, and `Network`. In all cases, the
+profiling information is stored in an HDF5 file that can be inspected using available HDF5 tools,
+e.g., `h5dump`. The files are stored under `/scratch/profiling/` for each job, job step, and node. A
+description of the data fields in the file can be found
+[in the official documentation](http://slurm.schedmd.com/hdf5_profile_user_guide.html#HDF5).
+In general, the data files
+contain samples of the current **power** consumption on a per-second basis:
+
+```console
+marie@login $ srun --partition haswell64 --acctg-freq=2,energy=1 --profile=energy sleep 10
+srun: job 3967674 queued and waiting for resources
+srun: job 3967674 has been allocated resources
+marie@login $ h5dump /scratch/profiling/marie/3967674_0_taurusi1073.h5
+[...]
+  DATASET "Energy_0000000002 Data" {
+    DATATYPE  H5T_COMPOUND {
+      H5T_STRING {
+        STRSIZE 24;
+        STRPAD H5T_STR_NULLTERM;
+        CSET H5T_CSET_ASCII;
+        CTYPE H5T_C_S1;
+      } "Date_Time";
+      H5T_STD_U64LE "Time";
+      H5T_STD_U64LE "Power";
+      H5T_STD_U64LE "CPU_Frequency";
+    }
+    DATASPACE  SIMPLE { ( 1 ) / ( 1 ) }
+    DATA {
+    (0): {
+        "",
+        1389097545,  # timestamp
+        174,         # power value
+        1
+      }
+    }
+  }
+```
 
-    # Connect to the dataheap manager
-    dhc = dhClient()
-    dhc.connect(os.environ['DATAHEAP_MANAGER_ADDR'], int(os.environ['DATAHEAP_MANAGER_PORT']))
+## Using the HDEEM C API
 
-    # take timestamps
-    tbegin = dhc.getTimeStamp() 
-    # workload
-    os.system("srun -n 6 a.out")
-    tend   = dhc.getTimeStamp()
+Please specify `--partition=haswell --exclusive` along with your job request if you wish to use HDEEM.
 
-    # wait for the data to get to the
-    # dataheap
-    time.sleep(5)
+Please download the official documentation at
+[http://www.bull.com/download-hdeem-library-reference-guide](http://www.bull.com/download-hdeem-library-reference-guide).
 
-    # replace this with name of the node the job ran on
-    # Note: use multiple requests if the job used multiple nodes
-    countername = "taurusi1159/watts"
+The HDEEM header and sample code are locally installed on the nodes.
 
-    # query the dataheap
-    integral = dhc.storageRequest("INTEGRAL(%d,%d,\"%s\", 0)"%(tbegin, tend, countername))
-    # Remember: timestamps are stored in millisecond UNIX timestamps
-    energy   = integral/1000
+??? hint "HDEEM header location"
 
-    print energy
+    `/usr/include/hdeem.h`
 
-    timeline = dhc.storageRequest("TIMELINE(%d,%d,\"%s\", 0)"%(tbegin, tend, countername))
+??? hint "HDEEM sample location"
 
-    # output a list of all timestamp/power-value pairs
-    print timeline
+    `/usr/share/hdeem/sample/`
 
-## More information and Citing
+## Further Information and Citing
 
-More information can be found in the paper \<a
-href="<http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7016382>"
-title="HDEEM Paper E2SC 2014">HDEEM: high definition energy efficiency
-monitoring\</a> by Hackenberg et al. Please cite this paper if you are
-using HDEEM for your scientific work.
+More information can be found in the paper
+[HDEEM: high definition energy efficiency monitoring](http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7016382)
+by Daniel Hackenberg et al. Please cite this paper if you are using HDEEM for your scientific work.
diff --git a/doc.zih.tu-dresden.de/docs/software/fem_software.md b/doc.zih.tu-dresden.de/docs/software/fem_software.md
index 160aeded633f50e9abfdfae6d74a7627257ca565..af6b9fb80986e2bc727ae88e97b2cca614ffd629 100644
--- a/doc.zih.tu-dresden.de/docs/software/fem_software.md
+++ b/doc.zih.tu-dresden.de/docs/software/fem_software.md
@@ -8,7 +8,9 @@
     ```console
     marie@login$ module avail ANSYS
     [...]
-    marie@login$ module load ANSYS/<version>
+    marie@login$ # module load ANSYS/<version>
+    marie@login$ # e.g.
+    marie@login$ module load ANSYS/2020R2
     ```
 
     The section [runtime environment](modules.md) provides a comprehensive overview
@@ -59,7 +61,7 @@ Slurm or [writing job files](../jobs_and_resources/slurm.md#job-files).
     #SBATCH --job-name=yyyy         # give a name, what ever you want
     #SBATCH --mail-type=END,FAIL    # send email when the job finished or failed
     #SBATCH --mail-user=<name>@mailbox.tu-dresden.de  # set your email
-    #SBATCH -A p_xxxxxxx            # charge compute time to your project
+    #SBATCH --account=p_number_crunch       # charge compute time to project p_number_crunch
 
 
     # Abaqus has its own MPI
@@ -75,7 +77,7 @@ Slurm or [writing job files](../jobs_and_resources/slurm.md#job-files).
     ```
     4. Control the status of the job
     ```
-    marie@login squeue -u your_login     # in column "ST" (Status) you will find a R=Running or P=Pending (waiting for resources)
+    marie@login squeue --me     # in column "ST" (Status) you will find a R=Running or P=Pending (waiting for resources)
     ```
 
 ## Ansys
@@ -105,7 +107,9 @@ all data via `-C`.
 
 ```console
 # SSH connection established using -CX
-marie@login$ module load ANSYS/<version>
+marie@login$ # module load ANSYS/<version>
+marie@login$ # e.g.
+marie@login$ module load ANSYS/2020R2
 marie@login$ runwb2
 ```
 
@@ -113,8 +117,10 @@ If more time is needed, a CPU has to be allocated like this (see
 [batch systems Slurm](../jobs_and_resources/slurm.md) for further information):
 
 ```console
-marie@login$ module load ANSYS/<version>
-marie@login$ srun -t 00:30:00 --x11=first [SLURM_OPTIONS] --pty bash
+marie@login$ # module load ANSYS/<version>
+marie@login$ # e.g.
+marie@login$ module load ANSYS/2020R2
+marie@login$ srun --time=00:30:00 --x11=first [SLURM_OPTIONS] --pty bash
 [...]
 marie@login$ runwb2
 ```
@@ -153,7 +159,9 @@ parameter (for batch mode), `-F` for your project file, and can then either add
 
     unset SLURM_GTIDS              # Odd, but necessary!
 
-    module load ANSYS/<version>
+    # module load ANSYS/<version>
+    # e.g.
+    module load ANSYS ANSYS/2020R2
 
     runwb2 -B -F Workbench_Taurus.wbpj -E 'Project.Update' -E 'Save(Overwrite=True)'
     #or, if you wish to use a workbench replay file, replace the -E parameters with: -R mysteps.wbjn
@@ -162,7 +170,7 @@ parameter (for batch mode), `-F` for your project file, and can then either add
 ### Running Workbench in Parallel
 
 Unfortunately, the number of CPU cores you wish to use cannot simply be given as a command line
-parameter to your `runwb2` call. Instead, you have to enter it into an XML file in your `home`
+parameter to your `runwb2` call. Instead, you have to enter it into an XML file in your home
 directory. This setting will then be **used for all** your `runwb2` jobs. While it is also possible
 to edit this setting via the Mechanical GUI, experience shows that this can be problematic via
 X11-forwarding and we only managed to use the GUI properly via [DCV](virtual_desktops.md), so we
@@ -208,7 +216,7 @@ firewall of ZIH. For further information, please refer to the COMSOL manual.
 
     ```console
     marie@login$ module load COMSOL
-    marie@login$ srun -n 1 -c 4 --mem-per-cpu=2500 -t 8:00 comsol -np 4 server
+    marie@login$ srun --ntasks=1 --cpus-per-task=4 --mem-per-cpu=2500 --time=08:00:00 comsol -np 4 server
     ```
 
 ??? example "Interactive Job"
@@ -218,7 +226,7 @@ firewall of ZIH. For further information, please refer to the COMSOL manual.
 
     ```console
     marie@login$ module load COMSOL
-    marie@login$ srun -n 1 -c 4 --mem-per-cpu=2500 -t 8:00 --pty --x11=first comsol -np 4
+    marie@login$ srun --ntasks=1 --cpus-per-task=4 --mem-per-cpu=2500 --time=08:00:00 --pty --x11=first comsol -np 4
     ```
 
     Please make sure, that the option *Preferences* --> Graphics --> *Renedering* is set to *software
@@ -264,10 +272,10 @@ You need a job file (aka. batch script) to run the MPI version.
     srun mpp-dyna i=neon_refined01_30ms.k memory=120000000
     ```
 
-    Submit the job file to the batch system via
+    Submit the job file named `job.sh` to the batch system via
 
     ```console
-    marie@login$ sbatch <filename>
+    marie@login$ sbatch job.sh
     ```
 
     Please refer to the section [Slurm](../jobs_and_resources/slurm.md) for further details and
diff --git a/doc.zih.tu-dresden.de/docs/software/gpu_programming.md b/doc.zih.tu-dresden.de/docs/software/gpu_programming.md
index 070176efcb2ab0f463da30675841ade0e0a585a3..3911c94f8f8b65d6ef9ce6090867da132d22414d 100644
--- a/doc.zih.tu-dresden.de/docs/software/gpu_programming.md
+++ b/doc.zih.tu-dresden.de/docs/software/gpu_programming.md
@@ -1,5 +1,89 @@
 # GPU Programming
 
+## Available GPUs
+
+The full hardware specifications of the GPU-compute nodes may be found in the
+[HPC Resources](../jobs_and_resources/hardware_overview.md#hpc-resources) page.
+Each node uses a different [module environment](modules.md#module-environments):
+
+* [NVIDIA Tesla K80 GPUs nodes](../jobs_and_resources/hardware_overview.md#island-2-phase-2-intel-haswell-cpus-nvidia-k80-gpus)
+(partition `gpu2`): use the default `scs5` module environment (`module switch modenv/scs5`).
+* [NVIDIA Tesla V100 nodes](../jobs_and_resources/hardware_overview.md#ibm-power9-nodes-for-machine-learning)
+(partition `ml`): use the `ml` module environment (`modenv switch modenv/ml`)
+* [NVIDIA A100 nodes](../jobs_and_resources/hardware_overview.md#amd-rome-cpus-nvidia-a100)
+(partition `alpha`): use the `hiera` module environment (`module switch modenv/hiera`)
+
+## Using GPUs with Slurm
+
+For general information on how to use Slurm, read the respective [page in this compendium](../jobs_and_resources/slurm.md).
+When allocating resources on a GPU-node, you must specify the number of requested GPUs by using the
+`--gres=gpu:<N>` option, like this:
+
+=== "partition `gpu2`"
+    ```bash
+    #!/bin/bash                           # Batch script starts with shebang line
+
+    #SBATCH --ntasks=1                    # All #SBATCH lines have to follow uninterrupted
+    #SBATCH --time=01:00:00               # after the shebang line
+    #SBATCH --account=<KTR>               # Comments start with # and do not count as interruptions
+    #SBATCH --job-name=fancyExp
+    #SBATCH --output=simulation-%j.out
+    #SBATCH --error=simulation-%j.err
+    #SBATCH --partition=gpu2
+    #SBATCH --gres=gpu:1                  # request GPU(s) from Slurm
+
+    module purge                          # Set up environment, e.g., clean modules environment
+    module switch modenv/scs5             # switch module environment
+    module load <modules>                 # and load necessary modules
+
+    srun ./application [options]          # Execute parallel application with srun
+    ```
+=== "partition `ml`"
+    ```bash
+    #!/bin/bash                           # Batch script starts with shebang line
+
+    #SBATCH --ntasks=1                    # All #SBATCH lines have to follow uninterrupted
+    #SBATCH --time=01:00:00               # after the shebang line
+    #SBATCH --account=<KTR>               # Comments start with # and do not count as interruptions
+    #SBATCH --job-name=fancyExp
+    #SBATCH --output=simulation-%j.out
+    #SBATCH --error=simulation-%j.err
+    #SBATCH --partition=ml
+    #SBATCH --gres=gpu:1                  # request GPU(s) from Slurm
+
+    module purge                          # Set up environment, e.g., clean modules environment
+    module switch modenv/ml               # switch module environment
+    module load <modules>                 # and load necessary modules
+
+    srun ./application [options]          # Execute parallel application with srun
+    ```
+=== "partition `alpha`"
+    ```bash
+    #!/bin/bash                           # Batch script starts with shebang line
+
+    #SBATCH --ntasks=1                    # All #SBATCH lines have to follow uninterrupted
+    #SBATCH --time=01:00:00               # after the shebang line
+    #SBATCH --account=<KTR>               # Comments start with # and do not count as interruptions
+    #SBATCH --job-name=fancyExp
+    #SBATCH --output=simulation-%j.out
+    #SBATCH --error=simulation-%j.err
+    #SBATCH --partition=alpha
+    #SBATCH --gres=gpu:1                  # request GPU(s) from Slurm
+
+    module purge                          # Set up environment, e.g., clean modules environment
+    module switch modenv/hiera            # switch module environment
+    module load <modules>                 # and load necessary modules
+
+    srun ./application [options]          # Execute parallel application with srun
+    ```
+
+Alternatively, you can work on the partitions interactively:
+
+```bash
+marie@login$ srun --partition=<partition>-interactive --gres=gpu:<N> --pty bash
+marie@compute$ module purge; modenv switch modenv/<env>
+```
+
 ## Directive Based GPU Programming
 
 Directives are special compiler commands in your C/C++ or Fortran source code. They tell the
@@ -8,36 +92,281 @@ technique.
 
 ### OpenACC
 
-[OpenACC](http://www.openacc-standard.org) is a directive based GPU programming model. It currently
+[OpenACC](https://www.openacc.org) is a directive based GPU programming model. It currently
 only supports NVIDIA GPUs as a target.
 
 Please use the following information as a start on OpenACC:
 
-Introduction
+#### Introduction
 
-OpenACC can be used with the PGI and CAPS compilers. For PGI please be sure to load version 13.4 or
-newer for full support for the NVIDIA Tesla K20x GPUs at ZIH.
+OpenACC can be used with the PGI and NVIDIA HPC compilers. The NVIDIA HPC compiler, as part of the
+[NVIDIA HPC SDK](https://docs.nvidia.com/hpc-sdk/index.html), supersedes the PGI compiler.
+
+Various versions of the PGI compiler are available on the
+[NVIDIA Tesla K80 GPUs nodes](../jobs_and_resources/hardware_overview.md#island-2-phase-2-intel-haswell-cpus-nvidia-k80-gpus)
+(partition `gpu2`).
+
+The `nvc` compiler (NOT the `nvcc` compiler, which is used for CUDA) is available for the NVIDIA
+Tesla V100 and Nvidia A100 nodes.
 
 #### Using OpenACC with PGI compilers
 
+* Load the latest version via `module load PGI` or search for available versions with
+`module search PGI`
 * For compilation, please add the compiler flag `-acc` to enable OpenACC interpreting by the
-  compiler;
-* `-Minfo` tells you what the compiler is actually doing to your code;
-* If you only want to use the created binary at ZIH resources, please also add `-ta=nvidia:keple`;
-* OpenACC Tutorial: intro1.pdf, intro2.pdf.
+  compiler
+* `-Minfo` tells you what the compiler is actually doing to your code
+* Add `-ta=nvidia:keple` to enable optimizations for the K80 GPUs
+* You may find further information on the PGI compiler in the
+[user guide](https://docs.nvidia.com/hpc-sdk/pgi-compilers/20.4/x86/pgi-user-guide/index.htm)
+and in the [reference guide](https://docs.nvidia.com/hpc-sdk/pgi-compilers/20.4/x86/pgi-ref-guide/index.htm),
+which includes descriptions of available
+[command line options](https://docs.nvidia.com/hpc-sdk/pgi-compilers/20.4/x86/pgi-ref-guide/index.htm#cmdln-options-ref)
+
+#### Using OpenACC with NVIDIA HPC compilers
+
+* Switch into the correct module environment for your selected compute nodes
+(see [list of available GPUs](#available-gpus))
+* Load the `NVHPC` module for the correct module environment.
+Either load the default (`module load NVHPC`) or search for a specific version.
+* Use the correct compiler for your code: `nvc` for C, `nvc++` for C++ and `nvfortran` for Fortran
+* Use the `-acc` and `-Minfo` flag as with the PGI compiler
+* To create optimized code for either the V100 or A100, use `-gpu=cc70` or `-gpu=cc80`, respectively
+* Further information on this compiler is provided in the
+[user guide](https://docs.nvidia.com/hpc-sdk/compilers/hpc-compilers-user-guide/index.html) and the
+[reference guide](https://docs.nvidia.com/hpc-sdk/compilers/hpc-compilers-ref-guide/index.html),
+which includes descriptions of available
+[command line options](https://docs.nvidia.com/hpc-sdk/compilers/hpc-compilers-ref-guide/index.html#cmdln-options-ref)
+* Information specific the use of OpenACC with the NVIDIA HPC compiler is compiled in a
+[guide](https://docs.nvidia.com/hpc-sdk/compilers/openacc-gs/index.html)
+
+### OpenMP target offloading
+
+[OpenMP](https://www.openmp.org/) supports target offloading as of version 4.0. A dedicated set of
+compiler directives can be used to annotate code-sections that are intended for execution on the
+GPU (i.e., target offloading). Not all compilers with OpenMP support target offloading, refer to
+the [official list](https://www.openmp.org/resources/openmp-compilers-tools/) for details.
+Furthermore, some compilers, such as GCC, have basic support for target offloading, but do not
+enable these features by default and/or achieve poor performance.
+
+On the ZIH system, compilers with OpenMP target offloading support are provided on the partitions
+`ml` and `alpha`. Two compilers with good performance can be used: the NVIDIA HPC compiler and the
+IBM XL compiler.
+
+#### Using OpenMP target offloading with NVIDIA HPC compilers
+
+* Load the module environments and the NVIDIA HPC SDK as described in the
+[OpenACC](#using-openacc-with-nvidia-hpc-compilers) section
+* Use the `-mp=gpu` flag to enable OpenMP with offloading
+* `-Minfo` tells you what the compiler is actually doing to your code
+* The same compiler options as mentioned [above](#using-openacc-with-nvidia-hpc-compilers) are
+available for OpenMP, including the `-gpu=ccXY` flag as mentioned above.
+* OpenMP-specific advice may be found in the
+[respective section in the user guide](https://docs.nvidia.com/hpc-sdk/compilers/hpc-compilers-user-guide/#openmp-use)
 
-### HMPP
+#### Using OpenMP target offloading with the IBM XL compilers
 
-HMPP is available from the CAPS compilers.
+The IBM XL compilers (`xlc` for C, `xlc++` for C++ and `xlf` for Fortran (with sub-version for
+different versions of Fortran)) are only available on the partition `ml` with NVIDIA Tesla V100 GPUs.
+They are available by default when switching to `modenv/ml`.
+
+* The `-qsmp -qoffload` combination of flags enables OpenMP target offloading support
+* Optimizations specific to the V100 GPUs can be enabled by using the
+[`-qtgtarch=sm_70`](https://www.ibm.com/docs/en/xl-c-and-cpp-linux/16.1.1?topic=descriptions-qtgtarch)
+flag.
+* IBM provides a [XL compiler documentation](https://www.ibm.com/docs/en/xl-c-and-cpp-linux/16.1.1)
+with a
+[list of supported OpenMP directives](https://www.ibm.com/docs/en/xl-c-and-cpp-linux/16.1.1?topic=reference-pragma-directives-openmp-parallelization)
+and information on
+[target-offloading specifics](https://www.ibm.com/docs/en/xl-c-and-cpp-linux/16.1.1?topic=gpus-programming-openmp-device-constructs)
 
 ## Native GPU Programming
 
 ### CUDA
 
-Native [CUDA](http://www.nvidia.com/cuda) programs can sometimes offer a better performance. Please
-use the following slides as an introduction:
+Native [CUDA](http://www.nvidia.com/cuda) programs can sometimes offer a better performance.
+NVIDIA provides some [introductory material and links](https://developer.nvidia.com/how-to-cuda-c-cpp).
+An [introduction to CUDA](https://developer.nvidia.com/blog/even-easier-introduction-cuda/) is
+provided as well. The [toolkit documentation page](https://docs.nvidia.com/cuda/index.html) links to
+the [programming guide](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html) and the
+[best practice guide](https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html).
+Optimization guides for supported NVIDIA architectures are available, including for
+[Kepler (K80)](https://docs.nvidia.com/cuda/kepler-tuning-guide/index.html),
+[Volta (V100)](https://docs.nvidia.com/cuda/volta-tuning-guide/index.html) and
+[Ampere (A100)](https://docs.nvidia.com/cuda/ampere-tuning-guide/index.html).
+
+In order to compile an application with CUDA use the `nvcc` compiler command, which is described in
+detail in [nvcc documentation](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html).
+This compiler is available via several `CUDA` packages, a default version can be loaded via
+`module load CUDA`. Additionally, the `NVHPC` modules provide CUDA tools as well.
+
+#### Usage of the CUDA compiler
+
+The simple invocation `nvcc <code.cu>` will compile a valid CUDA program. `nvcc` differentiates
+between the device and the host code, which will be compiled in separate phases. Therefore, compiler
+options can be defined specifically for the device as well as for the host code. By default, the GCC
+is used as the host compiler. The following flags may be useful:
+
+* `--generate-code` (`-gencode`): generate optimized code for a target GPU (caution: these binaries
+cannot be used with GPUs of other generations).
+    * For Kepler (K80): `--generate-code arch=compute_37,code=sm_37`,
+    * For Volta (V100): `--generate-code arch=compute_70,code=sm_70`,
+    * For Ampere (A100): `--generate-code arch=compute_80,code=sm_80`
+* `-Xcompiler`: pass flags to the host compiler. E.g., generate OpenMP-parallel host code:
+`-Xcompiler -fopenmp`.
+The `-Xcompiler` flag has to be invoked for each host-flag
+
+## Performance Analysis
+
+Consult NVIDIA's [Best Practices Guide](https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html)
+and the [performance guidelines](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#performance-guidelines)
+for possible steps to take for the performance analysis and optimization.
+
+Multiple tools can be used for the performance analysis.
+For the analysis of applications on the older K80 GPUs, we recommend two
+[profiler tools](https://docs.nvidia.com/cuda/profiler-users-guide/index.html):
+the NVIDIA [nvprof](https://docs.nvidia.com/cuda/profiler-users-guide/index.html#nvprof-overview)
+command line profiler and the
+[NVIDIA Visual Profiler](https://docs.nvidia.com/cuda/profiler-users-guide/index.html#visual)
+as the accompanying graphical profiler. These tools will be deprecated in future CUDA releases but
+are still available in CUDA <= 11. On the newer GPUs (V100 and A100), we recommend the use of of the
+newer NVIDIA Nsight tools, [Nsight Systems](https://developer.nvidia.com/nsight-systems) for a
+system wide sampling and tracing and [Nsight Compute](https://developer.nvidia.com/nsight-compute)
+for a detailed analysis of individual kernels.
+
+### NVIDIA nvprof & Visual Profiler
+
+The nvprof command line and the Visual Profiler are available once a CUDA module has been loaded.
+For a simple analysis, you can call `nvprof` without any options, like such:
+
+```bash
+marie@compute$ nvprof ./application [options]
+```
+
+For a more in-depth analysis, we recommend you use the command line tool first to generate a report
+file, which you can later analyze in the Visual Profiler. In order to collect a set of general
+metrics for the analysis in the Visual Profiler, use the `--analysis-metrics` flag to collect
+metrics and `--export-profile` to generate a report file, like this:
+
+```bash
+marie@compute$ nvprof --analysis-metrics --export-profile  <output>.nvvp ./application [options]
+```
+
+[Transfer the report file to your local system](../data_transfer/export_nodes.md) and analyze it in
+the Visual Profiler (`nvvp`) locally. This will give the smoothest user experience. Alternatively,
+you can use [X11-forwarding](../access/ssh_login.md). Refer to the documentation for details about
+the individual
+[features and views of the Visual Profiler](https://docs.nvidia.com/cuda/profiler-users-guide/index.html#visual-views).
+
+Besides these generic analysis methods, you can profile specific aspects of your GPU kernels.
+`nvprof` can profile specific events. For this, use
+
+```bash
+marie@compute$ nvprof --query-events
+```
+
+to get a list of available events.
+Analyze one or more events by using specifying one or more events, separated by comma:
+
+```bash
+marie@compute$ nvprof --events <event_1>[,<event_2>[,...]] ./application [options]
+```
+
+Additionally, you can analyze specific metrics.
+Similar to the profiling of events, you can get a list of available metrics:
+
+```bash
+marie@compute$ nvprof --query-metrics
+```
+
+One or more metrics can be profiled at the same time:
+
+```bash
+marie@compute$ nvprof --metrics <metric_1>[,<metric_2>[,...]] ./application [options]
+```
+
+If you want to limit the profiler's scope to one or more kernels, you can use the
+`--kernels <kernel_1>[,<kernel_2>]` flag. For further command line options, refer to the
+[documentation on command line options](https://docs.nvidia.com/cuda/profiler-users-guide/index.html#nvprof-command-line-options).
+
+### NVIDIA Nsight Systems
+
+Use [NVIDIA Nsight Systems](https://developer.nvidia.com/nsight-systems) for a system-wide sampling
+of your code. Refer to the
+[NVIDIA Nsight Systems User Guide](https://docs.nvidia.com/nsight-systems/UserGuide/index.html) for
+details. With this, you can identify parts of your code that take a long time to run and are
+suitable optimization candidates.
+
+Use the command-line version to sample your code and create a report file for later analysis:
+
+```bash
+marie@compute$ nsys profile [--stats=true] ./application [options]
+```
+
+The `--stats=true` flag is optional and will create a summary on the command line. Depending on your
+needs, this analysis may be sufficient to identify optimizations targets.
+
+The graphical user interface version can be used for a thorough analysis of your previously
+generated report file. For an optimal user experience, we recommend a local installation of NVIDIA
+Nsight Systems. In this case, you can
+[transfer the report file to your local system](../data_transfer/export_nodes.md).
+Alternatively, you can use [X11-forwarding](../access/ssh_login.md). The graphical user interface is
+usually available as `nsys-ui`.
+
+Furthermore, you can use the command line interface for further analyses. Refer to the
+documentation for a
+[list of available command line options](https://docs.nvidia.com/nsight-systems/UserGuide/index.html#cli-options).
+
+### NVIDIA Nsight Compute
+
+Nsight Compute is used for the analysis of individual GPU-kernels. It supports GPUs from the Volta
+architecture onward (on the ZIH system: V100 and A100). Therefore, you cannot use Nsight Compute on
+the partition `gpu2`. If you are familiar with nvprof, you may want to consult the
+[Nvprof Transition Guide](https://docs.nvidia.com/nsight-compute/NsightComputeCli/index.html#nvprof-guide),
+as Nsight Compute uses a new scheme for metrics.
+We recommend those kernels as optimization targets that require a large portion of you run time,
+according to Nsight Systems. Nsight Compute is particularly useful for CUDA code, as you have much
+greater control over your code compared to the directive based approaches.
+
+Nsight Compute comes in a
+[command line](https://docs.nvidia.com/nsight-compute/NsightComputeCli/index.html)
+and a [graphical version](https://docs.nvidia.com/nsight-compute/NsightCompute/index.html).
+Refer to the
+[Kernel Profiling Guide](https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html)
+to get an overview of the functionality of these tools.
+
+You can call the command line version (`ncu`) without further options to get a broad overview of
+your kernel's performance:
+
+```bash
+marie@compute$ ncu ./application [options]
+```
+
+As with the other profiling tools, the Nsight Compute profiler can generate report files like this:
+
+```bash
+marie@compute$ ncu --export <report> ./application [options]
+```
+
+The report file will automatically get the file ending `.ncu-rep`, you do not need to specify this
+manually.
+
+This report file can be analyzed in the graphical user interface profiler. Again, we recommend you
+generate a report file on a compute node and
+[transfer the report file to your local system](../data_transfer/export_nodes.md).
+Alternatively, you can use [X11-forwarding](../access/ssh_login.md). The graphical user interface is
+usually available as `ncu-ui` or `nv-nsight-cu`.
+
+Similar to the `nvprof` profiler, you can analyze specific metrics. NVIDIA provides a
+[Metrics Guide](https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-guide). Use
+`--query-metrics` to get a list of available metrics, listing them by base name. Individual metrics
+can be collected by using
+
+```bash
+marie@compute$ ncu --metrics <metric_1>[,<metric_2>,...] ./application [options]
+```
 
-* Introduction to CUDA;
-* Advanced Tuning for NVIDIA Kepler GPUs.
+Collection of events is no longer possible with Nsight Compute. Instead, many nvprof events can be
+[measured with metrics](https://docs.nvidia.com/nsight-compute/NsightComputeCli/index.html#nvprof-event-comparison).
 
-In order to compile an application with CUDA use the `nvcc` compiler command.
+You can collect metrics for individual kernels by specifying the `--kernel-name` flag.
diff --git a/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md b/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md
index 8f61fe49fd56642aaded82cf711ca92d0035b99f..688ada0e2aabf973f545d54b1c15168de98aa912 100644
--- a/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md
+++ b/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md
@@ -354,11 +354,13 @@ In order to look into the results, there are the following basic approaches.
         top of the graphic (see red arrow on the image above).
 
     After creating a 2D scatter plot or a parallel plot, OmniOpt will try to display the
-    corresponding file (`html`, `png`) directly on the ZIH system. Therefore, it is necessary to
-    login via ssh with the option `-X` (X11 forwarding), e.g., `ssh -X taurus.hrsk.tu-dresden.de`.
-    Nevertheless, because of latency using x11 forwarding, it is recommended to download the created
-    files and explore them on the local machine (esp. for the parallel plot). The created files are
-    saved at `projects/<name_of_optimization_run>/{2d-scatterplots,parallel-plot}`.
+    corresponding file (`html`, `png`) directly on the ZIH system. Therefore, X11 forwarding must be
+    enabled, either by [SSH configuration
+    ](../access/ssh_login.md#configuring-default-parameters-for-ssh) or by using `ssh -X taurus`
+    while logging in. Nevertheless, because of latency using X11 forwarding, it is recommended to
+    download the created files and explore them on the local machine (esp. for the parallel plot).
+    The created files are saved at
+    `projects/<name_of_optimization_run>/{2d-scatterplots,parallel-plot}`.
 
 1. **Getting the raw data:**
     As a second approach, the raw data of the optimization process can be exported as a CSV file.
diff --git a/doc.zih.tu-dresden.de/docs/software/licenses.md b/doc.zih.tu-dresden.de/docs/software/licenses.md
index 3173cf98a1b9987c87a74e5175fc7746236613d9..5eface00968891d163982e3ed836a87a56b927c8 100644
--- a/doc.zih.tu-dresden.de/docs/software/licenses.md
+++ b/doc.zih.tu-dresden.de/docs/software/licenses.md
@@ -1,7 +1,7 @@
-# Use of External Licenses
+# External Licenses
 
 It is possible (please [contact the support team](../support/support.md) first) for users to install
-their own software and use their own license servers, e.g.  FlexLM. The outbound IP addresses from
+their own software and use their own license servers, e.g. FlexLM. The outbound IP addresses from
 ZIH systems are:
 
 - compute nodes: NAT via 141.76.3.193
@@ -17,5 +17,33 @@ by environment variable or file.
 
     If you are using software we have installed, but bring your own license key (e.g.
     commercial ANSYS), make sure that to substitute the environment variables we are using as default!
-    (To verify this, run `printenv|grep licserv` and make sure that you dont' see entries refering to
+    (To verify this, run `printenv|grep licenses` and make sure that you dont' see entries refering to
     our ZIH license server.)
+
+## How to adjust the license setting
+
+Most programs, that work with the FlexLM license manager,
+can be instructed to look for another license server,
+by overwriting the environment variable "LM_LICENSE_FILE".
+Do note that not all proprietary software looks for that environment variable.
+
+!!! example "Changing the license server"
+    ```console
+    marie@compute$ export LM_LICENSE_FILE=12345@example.com
+    ```
+    Here "12345" is the port on which the license server is listening,
+    while "example.com" is the network addresss of the license server.
+
+Some licensed software comes with a license file,
+it can be similarly specified like this:
+
+!!! example "Changing license"
+    ```bash
+    export LM_LICENSE_FILE=<SOME_PATH>
+    ```
+
+    Example:
+
+    ```console
+    export LM_LICENSE_FILE=$HOME/mylicense.dat
+    ```
diff --git a/doc.zih.tu-dresden.de/docs/software/machine_learning.md b/doc.zih.tu-dresden.de/docs/software/machine_learning.md
index f2e5f24aa9f4f8e5f8fb516310b842584d30a614..e293b007a9c07fbaf41ba3ec7ce25f29024f44d7 100644
--- a/doc.zih.tu-dresden.de/docs/software/machine_learning.md
+++ b/doc.zih.tu-dresden.de/docs/software/machine_learning.md
@@ -13,7 +13,7 @@ The main feature of the nodes is the ability to work with the
 [NVIDIA Tesla V100](https://www.nvidia.com/en-gb/data-center/tesla-v100/) GPU with **NV-Link**
 support that allows a total bandwidth with up to 300 GB/s. Each node on the
 partition ML has 6x Tesla V-100 GPUs. You can find a detailed specification of the partition in our
-[Power9 documentation](../jobs_and_resources/power9.md).
+[Power9 documentation](../jobs_and_resources/hardware_overview.md).
 
 !!! note
 
@@ -155,7 +155,7 @@ The following HPC related software is installed on all nodes:
 There are many different datasets designed for research purposes. If you would like to download some
 of them, keep in mind that many machine learning libraries have direct access to public datasets
 without downloading it, e.g. [TensorFlow Datasets](https://www.tensorflow.org/datasets). If you
-still need to download some datasets use [datamover](../data_transfer/datamover.md) machine.
+still need to download some datasets use [Datamover](../data_transfer/datamover.md) machine.
 
 ### The ImageNet Dataset
 
diff --git a/doc.zih.tu-dresden.de/docs/software/math_libraries.md b/doc.zih.tu-dresden.de/docs/software/math_libraries.md
index 9801fc83866c0f12025b67c0f2b6e7e3434df640..2bbb25a70d7a2455e4dc86d18c7ab810c0294ab0 100644
--- a/doc.zih.tu-dresden.de/docs/software/math_libraries.md
+++ b/doc.zih.tu-dresden.de/docs/software/math_libraries.md
@@ -1,4 +1,4 @@
-# Math Libraries
+# Mathematics Libraries
 
 Many software heavily relies on math libraries, e.g., for linear algebra or FFTW calculations.
 Writing portable and fast math functions is a really challenging task. You can try it for fun, but you
@@ -59,7 +59,7 @@ libraries tuned specifically for AMD EPYC processor family. AOCL offers linear a
  as well as
  [AMD Random Number Generator Library](https://developer.amd.com/amd-cpu-libraries/rng-library/)
  and
- [AMD Secure RNG Libraary](https://developer.amd.com/amd-cpu-libraries/rng-library/#securerng).
+ [AMD Secure RNG Library](https://developer.amd.com/amd-cpu-libraries/rng-library/#securerng).
 
 ## Math Kernel Library (MKL)
 
diff --git a/doc.zih.tu-dresden.de/docs/software/mathematics.md b/doc.zih.tu-dresden.de/docs/software/mathematics.md
index 5b8e23b2fd3ed373bdf7bf6394ae3b2faf98ce74..66fdc7050b050b77b899c83133d73758ac2dced6 100644
--- a/doc.zih.tu-dresden.de/docs/software/mathematics.md
+++ b/doc.zih.tu-dresden.de/docs/software/mathematics.md
@@ -22,10 +22,15 @@ font manager.
 You need to copy the fonts from ZIH systems to your local system and expand the font path
 
 ```console
-marie@local$ scp -r taurus.hrsk.tu-dresden.de:/sw/global/applications/mathematica/10.0/SystemFiles/Fonts/Type1/ ~/.fonts
+marie@local$ scp -r taurusexport:/sw/global/applications/mathematica/10.0/SystemFiles/Fonts/Type1/ ~/.fonts
 marie@local$ xset fp+ ~/.fonts/Type1
 ```
 
+!!! important "SCP command"
+
+    The previous SCP command requires that you have already set up your [SSH configuration
+    ](../access/ssh_login.md#configuring-default-parameters-for-ssh).
+
 #### Windows Workstation
 
 You have to add additional Mathematica fonts at your local PC
@@ -100,7 +105,7 @@ marie@compute$ matlab
 ```
 
 With following command you can see a list of installed software - also
-the different versions of matlab.
+the different versions of MATLAB.
 
 ```console
 marie@login$ module avail
@@ -118,7 +123,7 @@ Or use:
 marie@login$ module load MATLAB
 ```
 
-(then you will get the most recent Matlab version.
+(then you will get the most recent MATLAB version.
 [Refer to the modules section for details.](../software/modules.md#modules))
 
 ### Interactive
@@ -130,7 +135,7 @@ with command
 marie@login$ srun --pty --x11=first bash
 ```
 
-- now you can call "matlab" (you have 8h time to work with the matlab-GUI)
+- now you can call "matlab" (you have 8h time to work with the MATLAB-GUI)
 
 ### Non-interactive
 
@@ -213,9 +218,347 @@ marie@login$ srun ./run_compiled_executable.sh $EBROOTMATLAB
 
 Please refer to the documentation `help parfor` for further information.
 
-## Octave
+### MATLAB Parallel Computing Toolbox
+
+In the following, the steps to configure MATLAB to submit jobs to a cluster, retrieve results, and
+debug errors are outlined.
+
+#### Configuration – MATLAB client on the cluster
+
+After logging into the HPC system, you configure MATLAB to run parallel jobs on the HPC system by
+calling the shell script `configCluster.sh`.  This only needs to be called once per version of
+MATLAB.
+
+```console
+marie@login$ module load MATLAB
+marie@login$ configCluster.sh
+```
+
+Jobs will now default to the HPC system rather than submit to the local machine.
+
+#### Installation and Configuration – MATLAB client off the cluster
+
+The MATLAB support package for ZIH Systems can be found as follows:
+
+* Windows:
+    * [tud.nonshared.R2021b.zip](misc/tud.nonshared.R2021b.zip)
+    * [tud.nonshared.R2022a.zip](misc/tud.nonshared.R2022a.zip)
+* Linux/macOS:
+    * [tud.nonshared.R2021b.tar.gz](misc/tud.nonshared.R2021b.tar.gz)
+    * [tud.nonshared.R2022a.tar.gz](misc/tud.nonshared.R2022a.tar.gz)
+
+Download the appropriate archive file and start MATLAB. The archive file should be extracted
+in the location returned by calling
+
+```matlabsession
+>> userpath
+```
+
+Configure MATLAB to run parallel jobs on ZIH Systems by calling `configCluster`. `configCluster`
+only needs to be called once per version of MATLAB.
+
+```matlabsession
+>> configCluster
+```
+
+Submission to the remote cluster requires SSH credentials. You will be prompted for your SSH
+username and password or identity file (private key). The username and location of the private key
+will be stored in MATLAB for future sessions. Jobs will now default to the cluster rather than
+submit to the local machine.
+
+!!! note
+
+    If you would like to submit to the local machine then run the following command:
+
+    ```matlab
+    >> % Get a handle to the local resources
+    >> c = parcluster('local');
+    ```
+
+#### Configuring Jobs
+
+Prior to submitting the job, you can specify various parameters to pass to your jobs, such as queue,
+e-mail, walltime, etc. *Only `MemPerCpu` and `QueueName` are required*.
+
+```matlabsession
+>> % Get a handle to the cluster
+>> c = parcluster;
+
+[REQUIRED]
+
+>> % Specify memory to use, per core (default: 2gb)
+>> c.AdditionalProperties.MemPerCpu = '4gb';
+
+>> % Specify the walltime (e.g., 5 hours)
+>> c.AdditionalProperties.WallTime = '05:00:00';
+
+[OPTIONAL]
+
+>> % Specify the account to use
+>> c.AdditionalProperties.Account = 'account-name';
+
+>> % Request constraint
+>> c.AdditionalProperties.Constraint = 'a-constraint';
+
+>> % Request job to run on exclusive node(s) (default: false)
+>> c.AdditionalProperties.EnableExclusive = true;
+
+>> % Request email notification of job status
+>> c.AdditionalProperties.EmailAddress = 'user-id@tu-dresden.de';
+
+>> % Specify number of GPUs to use (GpuType is optional)
+>> c.AdditionalProperties.GpusPerNode = 1;
+>> c.AdditionalProperties.GpuType = 'gpu-card';
+
+>> % Specify the queue to use
+>> c.AdditionalProperties.Partition = 'queue-name';
+
+>> % Specify a reservation to use
+>> c.AdditionalProperties.Reservation = 'a-reservation';
+```
+
+Save changes after modifying `AdditionalProperties` for the above changes to persist between MATLAB
+sessions.
+
+```matlabsession
+>> c.saveProfile
+```
+
+To see the values of the current configuration options, display `AdditionalProperties`.
+
+```matlabsession
+>> % To view current properties
+>> c.AdditionalProperties
+```
+
+You can unset a value when no longer needed.
+
+```matlabsession
+>> % Turn off email notifications
+>> c.AdditionalProperties.EmailAddress = '';
+>> c.saveProfile
+```
+
+#### Interactive Jobs - MATLAB Client on the Cluster
+
+To run an interactive pool job on the ZIH systems, continue to use `parpool` as you’ve done before.
+
+```matlabsession
+>> % Get a handle to the cluster
+>> c = parcluster;
+
+>> % Open a pool of 64 workers on the cluster
+>> pool = c.parpool(64);
+```
+
+Rather than running local on your machine, the pool can now run across multiple nodes on the
+cluster.
+
+```matlabsession
+>> % Run a parfor over 1000 iterations
+>> parfor idx = 1:1000
+      a(idx) = …
+   end
+```
+
+Once you are done with the pool, delete it.
+
+```matlabsession
+>> % Delete the pool
+>> pool.delete
+```
+
+#### Independent Batch Job
+
+Use the batch command to submit asynchronous jobs to the HPC system. The `batch` command will return
+a job object which is used to access the output of the submitted job. See the MATLAB documentation
+for more help on `batch`.
+
+```matlabsession
+>> % Get a handle to the cluster
+>> c = parcluster;
+
+>> % Submit job to query where MATLAB is running on the cluster
+>> job = c.batch(@pwd, 1, {},  ...
+       'CurrentFolder','.', 'AutoAddClientPath',false);
+
+>> % Query job for state
+>> job.State
+
+>> % If state is finished, fetch the results
+>> job.fetchOutputs{:}
+
+>> % Delete the job after results are no longer needed
+>> job.delete
+```
+
+To retrieve a list of currently running or completed jobs, call `parcluster` to retrieve the cluster
+object. The cluster object stores an array of jobs that were run, are running, or are queued to
+run. This allows us to fetch the results of completed jobs. Retrieve and view the list of jobs as
+shown below.
+
+```matlabsession
+>> c = parcluster;
+>> jobs = c.Jobs;
+```
+
+Once you have identified the job you want, you can retrieve the results as done previously.
+
+`fetchOutputs` is used to retrieve function output arguments; if calling `batch` with a script, use
+`load` instead. Data that has been written to files on the cluster needs be retrieved directly
+from the filesystem (e.g. via ftp). To view results of a previously completed job:
+
+```matlabsession
+>> % Get a handle to the job with ID 2
+>> job2 = c.Jobs(2);
+```
+
+!!! note
+
+    You can view a list of your jobs, as well as their IDs, using the above `c.Jobs` command.
+
+    ```matlabsession
+    >> % Fetch results for job with ID 2
+    >> job2.fetchOutputs{:}
+    ```
+
+#### Parallel Batch Job
+
+You can also submit parallel workflows with the `batch` command. Let’s use the following example
+for a parallel job, which is saved as `parallel_example.m`.
+
+```matlab
+function [t, A] = parallel_example(iter)
+
+if nargin==0
+    iter = 8;
+end
+
+disp('Start sim')
+
+t0 = tic;
+parfor idx = 1:iter
+    A(idx) = idx;
+    pause(2)
+    idx
+end
+t = toc(t0);
+
+disp('Sim completed')
+
+save RESULTS A
+
+end
+```
+
+This time when you use the `batch` command, to run a parallel job, you will also specify a MATLAB
+Pool.
+
+```matlabsession
+>> % Get a handle to the cluster
+>> c = parcluster;
+
+>> % Submit a batch pool job using 4 workers for 16 simulations
+>> job = c.batch(@parallel_example, 1, {16}, 'Pool',4, ...
+       'CurrentFolder','.', 'AutoAddClientPath',false);
+
+>> % View current job status
+>> job.State
+
+>> % Fetch the results after a finished state is retrieved
+>> job.fetchOutputs{:}
+ans =
+  8.8872
+```
+
+The job ran in 8.89 seconds using four workers. Note that these jobs will always request N+1 CPU
+cores, since one worker is required to manage the batch job and pool of workers. For example, a
+job that needs eight workers will consume nine CPU cores.
+
+You might run the same simulation but increase the Pool size. This time, to retrieve the results later,
+you will keep track of the job ID.
+
+!!! note
+
+    For some applications, there will be a diminishing return when allocating too many workers, as
+    the overhead may exceed computation time.
+
+    ```matlabsession
+    >> % Get a handle to the cluster
+    >> c = parcluster;
+
+    >> % Submit a batch pool job using 8 workers for 16 simulations
+    >> job = c.batch(@parallel_example, 1, {16}, 'Pool', 8, ...
+           'CurrentFolder','.', 'AutoAddClientPath',false);
+
+    >> % Get the job ID
+    >> id = job.ID
+    id =
+      4
+    >> % Clear job from workspace (as though you quit MATLAB)
+    >> clear job
+    ```
+
+Once you have a handle to the cluster, you can call the `findJob` method to search for the job with
+the specified job ID.
+
+```matlabsession
+>> % Get a handle to the cluster
+>> c = parcluster;
+
+>> % Find the old job
+>> job = c.findJob('ID', 4);
+
+>> % Retrieve the state of the job
+>> job.State
+ans =
+  finished
+>> % Fetch the results
+>> job.fetchOutputs{:};
+ans =
+  4.7270
+```
+
+The job now runs in 4.73 seconds using eight workers. Run code with different number of workers to
+determine the ideal number to use. Alternatively, to retrieve job results via a graphical user
+interface, use the Job Monitor (Parallel > Monitor Jobs).
+
+![Job monitor](misc/matlab_monitor_jobs.png)
+{: summary="Retrieve job results via GUI using the Job Monitor." align="center"}
+
+#### Debugging
+
+If a serial job produces an error, call the `getDebugLog` method to view the error log file.  When
+submitting independent jobs, with multiple tasks, specify the task number.
+
+```matlabsession
+>> c.getDebugLog(job.Tasks(3))
+```
+
+For Pool jobs, only specify the job object.
+
+```matlabsession
+>> c.getDebugLog(job)
+```
+
+When troubleshooting a job, the cluster admin may request the scheduler ID of the job.  This can be
+derived by calling `schedID`.
+
+```matlabsession
+>> schedID(job)
+ans =
+  25539
+```
+
+#### Further Reading
+
+To learn more about the MATLAB Parallel Computing Toolbox, check out these resources:
 
-GNU [Octave](https://www.gnu.org/software/octave/index) is a high-level language, primarily intended
-for numerical computations. It provides a convenient command line interface for solving linear and
-nonlinear problems numerically, and for performing other numerical experiments using a language that
-is mostly compatible with Matlab. It may also be used as a batch-oriented language.
+* [Parallel Computing Coding
+    Examples](https://www.mathworks.com/help/parallel-computing/examples.html)
+* [Parallel Computing Documentation](http://www.mathworks.com/help/distcomp/index.html)
+* [Parallel Computing Overview](http://www.mathworks.com/products/parallel-computing/index.html)
+* [Parallel Computing
+    Tutorials](http://www.mathworks.com/products/parallel-computing/tutorials.html)
+* [Parallel Computing Videos](http://www.mathworks.com/products/parallel-computing/videos.html)
+* [Parallel Computing Webinars](http://www.mathworks.com/products/parallel-computing/webinars.html)
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/FlinkExample.ipynb b/doc.zih.tu-dresden.de/docs/software/misc/FlinkExample.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..5a867b8750704ea92a318087d82bb0ca3355018d
--- /dev/null
+++ b/doc.zih.tu-dresden.de/docs/software/misc/FlinkExample.ipynb
@@ -0,0 +1,159 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import sys\n",
+    "!{sys.executable} -m pip install apache-flink --user"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%bash\n",
+    "echo $FLINK_ROOT_DIR\n",
+    "echo $JAVA_HOME\n",
+    "hostname\n",
+    "if [ ! -d $HOME/jupyter-flink-conf ]\n",
+    "then\n",
+    "cp -r $FLINK_ROOT_DIR/conf $HOME/jupyter-flink-conf\n",
+    "chmod -R u+w $HOME/jupyter-flink-conf\n",
+    "fi"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import sys\n",
+    "import os\n",
+    "os.environ['FLINK_CONF_DIR'] = os.environ['HOME'] + '/cluster-conf-' + os.environ['SLURM_JOBID'] + '/flink'\n",
+    "os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + ':' + os.environ['HOME'] + '/.local/lib/python3.6/site-packages'"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!SHELL=/bin/bash bash framework-configure.sh flink $HOME/jupyter-flink-conf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "exitcode = os.system('start-cluster.sh')\n",
+    "if not exitcode:\n",
+    "    print(\"started Flink cluster successful\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%bash\n",
+    "echo \"This is a short story for you. In this story nothing is happening. Have a nice day!\" > myFlinkTestFile"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from pyflink.datastream import StreamExecutionEnvironment\n",
+    "from pyflink.datastream.connectors import FileSource\n",
+    "from pyflink.datastream.connectors import StreamFormat\n",
+    "from pyflink.common.watermark_strategy import WatermarkStrategy\n",
+    "from pyflink.common.typeinfo import Types\n",
+    "\n",
+    "env = StreamExecutionEnvironment.get_execution_environment()\n",
+    "env.set_parallelism(2)\n",
+    "#set the Python executable for the workers\n",
+    "env.set_python_executable(sys.executable)\n",
+    "# define the source\n",
+    "ds = env.from_source(source=FileSource.for_record_stream_format(StreamFormat.text_line_format(),\n",
+    "                                               \"myFlinkTestFile\").process_static_file_set().build(),\n",
+    "                     watermark_strategy=WatermarkStrategy.for_monotonous_timestamps(),\n",
+    "                     source_name=\"file_source\")\n",
+    "\n",
+    "def split(line):\n",
+    "    yield from line.split()\n",
+    "\n",
+    "    \n",
+    "# compute word count\n",
+    "ds = ds.flat_map(split) \\\n",
+    "    .map(lambda i: (i, 1), output_type=Types.TUPLE([Types.STRING(), Types.INT()])) \\\n",
+    "    .key_by(lambda i: i[0]) \\\n",
+    "    .reduce(lambda i, j: (i[0], i[1] + j[1])) \\\n",
+    "    .map(lambda i: print(i))\n",
+    "\n",
+    "# submit for execution\n",
+    "env.execute()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%bash\n",
+    "stop-cluster.sh"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!ps -ef | grep -i java"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!pkill -f \"java\""
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/Pytorch_jupyter_module.png b/doc.zih.tu-dresden.de/docs/software/misc/Pytorch_jupyter_module.png
index 5f3e324da2114dc24382f57dfeb14c10554d60f5..dfb61c25d0221f90a21acaccecfe470da1d2dbf4 100644
Binary files a/doc.zih.tu-dresden.de/docs/software/misc/Pytorch_jupyter_module.png and b/doc.zih.tu-dresden.de/docs/software/misc/Pytorch_jupyter_module.png differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/SparkExample.ipynb b/doc.zih.tu-dresden.de/docs/software/misc/SparkExample.ipynb
index 67eb37e898667946a0a6dbdf60bc104908e12601..959b536b85dd3d5d01c79217b697506a7517d4f3 100644
--- a/doc.zih.tu-dresden.de/docs/software/misc/SparkExample.ipynb
+++ b/doc.zih.tu-dresden.de/docs/software/misc/SparkExample.ipynb
@@ -1,5 +1,24 @@
 {
  "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import sys\n",
+    "!{sys.executable} -m pip install findspark --user"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!which python"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -27,7 +46,8 @@
     "import sys\n",
     "import os\n",
     "os.environ['PYSPARK_PYTHON'] = sys.executable\n",
-    "os.environ['SPARK_CONF_DIR'] = os.environ['HOME'] + '/cluster-conf-' + os.environ['SLURM_JOBID'] + '/spark'"
+    "os.environ['SPARK_CONF_DIR'] = os.environ['HOME'] + '/cluster-conf-' + os.environ['SLURM_JOBID'] + '/spark'\n",
+    "os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + ':' + os.environ['HOME'] + '/.local/lib/python3.6/site-packages'"
    ]
   },
   {
@@ -48,6 +68,16 @@
     "!start-all.sh"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import findspark\n",
+    "findspark.init(os.environ['SPARK_HOME'])"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -116,20 +146,13 @@
    "source": [
     "!pkill -f \"pyspark-shell\""
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "haswell-py3.7-spark",
+   "display_name": "Python 3",
    "language": "python",
-   "name": "haswell-py3.7-spark"
+   "name": "python3"
   },
   "language_info": {
    "codemirror_mode": {
@@ -141,7 +164,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.7.4"
+   "version": "3.6.10"
   }
  },
  "nbformat": 4,
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/energy_measurements-vampir.png b/doc.zih.tu-dresden.de/docs/software/misc/energy_measurements-vampir.png
new file mode 100644
index 0000000000000000000000000000000000000000..68bdbe318fc451ebb25a1938b70bb21905ad4358
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/energy_measurements-vampir.png differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/matlab_monitor_jobs.png b/doc.zih.tu-dresden.de/docs/software/misc/matlab_monitor_jobs.png
new file mode 100644
index 0000000000000000000000000000000000000000..c91906e819495e345da69f80192ea3b8fee0a248
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/matlab_monitor_jobs.png differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/start-virtual-desktop-dcv.mp4 b/doc.zih.tu-dresden.de/docs/software/misc/start-virtual-desktop-dcv.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..0df2dd469ef4ab479652862d71bf156a7a6fc98d
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/start-virtual-desktop-dcv.mp4 differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/start-virtual-desktop-dcv.webm b/doc.zih.tu-dresden.de/docs/software/misc/start-virtual-desktop-dcv.webm
new file mode 100644
index 0000000000000000000000000000000000000000..7e728464f18518a3f62e6b5eac0134dfcb2609b2
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/start-virtual-desktop-dcv.webm differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/terminate-virtual-desktop-dcv.mp4 b/doc.zih.tu-dresden.de/docs/software/misc/terminate-virtual-desktop-dcv.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..a5e10e440e2e615dc0d822a4cf5dc6c907559636
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/terminate-virtual-desktop-dcv.mp4 differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/terminate-virtual-desktop-dcv.webm b/doc.zih.tu-dresden.de/docs/software/misc/terminate-virtual-desktop-dcv.webm
new file mode 100644
index 0000000000000000000000000000000000000000..38d74c3e9f43f33c9f9207a623203b96a13c4469
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/terminate-virtual-desktop-dcv.webm differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2021b.tar.gz b/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2021b.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..a4a30943f36ee4ebb5ad94c635be49a016f1eadd
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2021b.tar.gz differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2021b.zip b/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2021b.zip
new file mode 100644
index 0000000000000000000000000000000000000000..02118ef5354a1f972321bde558a3e2bb08a5b6af
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2021b.zip differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2022a.tar.gz b/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2022a.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..a4160a5f33f094f340eb20c3e140687170864609
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2022a.tar.gz differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2022a.zip b/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2022a.zip
new file mode 100644
index 0000000000000000000000000000000000000000..481ab9a1d1a18515abcda82fbdb7d1ab3b580a5e
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/tud.nonshared.R2022a.zip differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/zsh_autocd.png b/doc.zih.tu-dresden.de/docs/software/misc/zsh_autocd.png
new file mode 100644
index 0000000000000000000000000000000000000000..1d30a13f2dcc3af6e706fe8849aff6ee0739a76c
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/zsh_autocd.png differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/zsh_autocomplete_parameters.png b/doc.zih.tu-dresden.de/docs/software/misc/zsh_autocomplete_parameters.png
new file mode 100644
index 0000000000000000000000000000000000000000..374e34a84ee88d6c0c9d47c47af609d01fc2c63c
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/zsh_autocomplete_parameters.png differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/zsh_autosuggestion.png b/doc.zih.tu-dresden.de/docs/software/misc/zsh_autosuggestion.png
new file mode 100644
index 0000000000000000000000000000000000000000..872ed226a3f66e78063ad610e5edd8c0463a2922
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/zsh_autosuggestion.png differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/zsh_syntax_highlighting.png b/doc.zih.tu-dresden.de/docs/software/misc/zsh_syntax_highlighting.png
new file mode 100644
index 0000000000000000000000000000000000000000..0e1e888c2bab317d1309289c07582dc08cdd1858
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/zsh_syntax_highlighting.png differ
diff --git a/doc.zih.tu-dresden.de/docs/software/misc/zsh_typo.png b/doc.zih.tu-dresden.de/docs/software/misc/zsh_typo.png
new file mode 100644
index 0000000000000000000000000000000000000000..de04ba3d061cfb3c402e8b6d02bd7f60698e69c8
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/software/misc/zsh_typo.png differ
diff --git a/doc.zih.tu-dresden.de/docs/software/modules.md b/doc.zih.tu-dresden.de/docs/software/modules.md
index fb9107b5d362ca348987e848a663de7586fb6a72..b4aa437d270b4dda1a64f655d3c8a9db9238df2c 100644
--- a/doc.zih.tu-dresden.de/docs/software/modules.md
+++ b/doc.zih.tu-dresden.de/docs/software/modules.md
@@ -156,7 +156,7 @@ The command `module spider <modname>` allows searching for a specific software a
 environments. It will also display information on how to load a particular module when giving a precise
 module (with version) as the parameter.
 
-??? example
+??? example "Spider command"
 
     ```console
     marie@login$ module spider p7zip
@@ -179,6 +179,54 @@ module (with version) as the parameter.
     ----------------------------------------------------------------------------------------------------------------------------------------------------------
     ```
 
+In some cases a desired software is available as an extension of a module.
+
+??? example "Extension module"
+    ```console  hl_lines="9"
+    marie@login$ module spider tensorboard
+
+    --------------------------------------------------------------------------------------------------------------------------------
+    tensorboard:
+    --------------------------------------------------------------------------------------------------------------------------------
+    Versions:
+        tensorboard/2.4.1 (E)
+
+    Names marked by a trailing (E) are extensions provided by another module.
+    [...]
+    ```
+
+    You retrieve further information using the `spider` command.
+
+    ```console
+    marie@login$  module spider tensorboard/2.4.1
+
+    --------------------------------------------------------------------------------------------------------------------------------
+    tensorboard: tensorboard/2.4.1 (E)
+    --------------------------------------------------------------------------------------------------------------------------------
+    This extension is provided by the following modules. To access the extension you must load one of the following modules. Note that any module names in parentheses show the module location in the software hierarchy.
+
+        TensorFlow/2.4.1 (modenv/hiera GCC/10.2.0 CUDA/11.1.1 OpenMPI/4.0.5)
+        TensorFlow/2.4.1-fosscuda-2019b-Python-3.7.4 (modenv/ml)
+        TensorFlow/2.4.1-foss-2020b (modenv/scs5)
+
+    Names marked by a trailing (E) are extensions provided by another module.
+    ```
+
+    Finaly, you can load the dependencies and `tensorboard/2.4.1` and check the version.
+
+    ```console
+    marie@login$ module load modenv/hiera GCC/10.2.0 CUDA/11.1.1 OpenMPI/4.0.5
+
+    The following have been reloaded with a version change:
+        1) modenv/scs5 => modenv/hiera
+
+    Module GCC/10.2.0, CUDA/11.1.1, OpenMPI/4.0.5 and 15 dependencies loaded.
+    marie@login$ module load TensorFlow/2.4.1
+    Module TensorFlow/2.4.1 and 34 dependencies loaded.
+    marie@login$ tensorboard --version
+    2.4.1
+    ```
+
 ## Per-Architecture Builds
 
 Since we have a heterogeneous cluster, we do individual builds of some of the software for each
diff --git a/doc.zih.tu-dresden.de/docs/software/mpi_usage_error_detection.md b/doc.zih.tu-dresden.de/docs/software/mpi_usage_error_detection.md
index b083e80cf9962a01a6580f8b5393912ebd2c3f40..a26a8c6ee9595129b32ee56db2040e7cbb11ca7a 100644
--- a/doc.zih.tu-dresden.de/docs/software/mpi_usage_error_detection.md
+++ b/doc.zih.tu-dresden.de/docs/software/mpi_usage_error_detection.md
@@ -1,4 +1,4 @@
-# Correctness Checking and Usage Error Detection for MPI Parallel Applications
+# MPI Error Detection
 
 MPI as the de-facto standard for parallel applications of the message passing paradigm offers
 more than one hundred different API calls with complex restrictions. As a result, developing
diff --git a/doc.zih.tu-dresden.de/docs/software/nanoscale_simulations.md b/doc.zih.tu-dresden.de/docs/software/nanoscale_simulations.md
index 7b40b9e480755b099c866f0dec2d9a707ea684af..9727d2d35f03fa334a79385ffd960625fc958348 100644
--- a/doc.zih.tu-dresden.de/docs/software/nanoscale_simulations.md
+++ b/doc.zih.tu-dresden.de/docs/software/nanoscale_simulations.md
@@ -1,4 +1,4 @@
-# Nanoscale Modeling Tools
+# Nanoscale Simulations
 
 ## ABINIT
 
@@ -81,8 +81,8 @@ For runs with [Slurm](../jobs_and_resources/slurm.md), please use a script like
 
 ```Bash
 #!/bin/bash
-#SBATCH -t 120
-#SBATCH -n 8
+#SBATCH --time=120
+#SBATCH --ntasks=8
 #SBATCH --ntasks-per-node=2
 ## you have to make sure that an even number of tasks runs on each node !!
 #SBATCH --mem-per-cpu=1900
diff --git a/doc.zih.tu-dresden.de/docs/software/papi.md b/doc.zih.tu-dresden.de/docs/software/papi.md
index 2de80b4e8a0f420a6b42cd01a3de027b5fb89be2..d8108bba3048da33661e0dd320a2807a0dd001aa 100644
--- a/doc.zih.tu-dresden.de/docs/software/papi.md
+++ b/doc.zih.tu-dresden.de/docs/software/papi.md
@@ -105,13 +105,14 @@ multiple events, please check which events can be measured concurrently using th
     The PAPI tools must be run on the compute node, using an interactive shell or job.
 
 !!! example "Example: Determine the events on the partition `romeo` from a login node"
+    Let us assume, that you are in project `p_number_crunch`. Then, use the following commands:
 
     ```console
     marie@login$ module load PAPI
-    marie@login$ salloc -A <project> --partition=romeo
+    marie@login$ salloc --account=p_number_crunch --partition=romeo
     [...]
-    marie@login$ srun papi_avail
-    marie@login$ srun papi_native_avail
+    marie@compute$ srun papi_avail
+    marie@compute$ srun papi_native_avail
     [...]
     # Exit with Ctrl+D
     ```
@@ -120,12 +121,13 @@ Instrument your application with either the high-level or low-level API. Load th
 compile your application against the  PAPI library.
 
 !!! example
+    Assuming that you are in project `p_number_crunch`, use the following commands:
 
     ```console
     marie@login$ module load PAPI
     marie@login$ gcc app.c -o app -lpapi
-    marie@login$ salloc -A <project> --partition=romeo
-    marie@login$ srun ./app
+    marie@login$ salloc --account=p_number_crunch --partition=romeo
+    marie@compute$ srun ./app
     [...]
     # Exit with Ctrl+D
     ```
diff --git a/doc.zih.tu-dresden.de/docs/software/perf_tools.md b/doc.zih.tu-dresden.de/docs/software/perf_tools.md
index 83398f49cb68a3255e051ae866a3679124559bef..2db805a12f96e3daad253ea43e5030ad275cfb12 100644
--- a/doc.zih.tu-dresden.de/docs/software/perf_tools.md
+++ b/doc.zih.tu-dresden.de/docs/software/perf_tools.md
@@ -1,20 +1,23 @@
-# Introduction
+# Perf Tools
 
-`perf` consists of two parts: the kernel space implementation and the userland tools. This wiki
-entry focusses on the latter. These tools are installed on ZIH systems, and others and provides
-support for sampling applications and reading performance counters.
+The Linux `perf` command provides support for sampling applications and reading performance
+counters. `perf` consists of two parts: the kernel space implementation and the userland tools.
+This compendium page focusses on the latter.
+
+For detailed information, please refer to the [perf
+documentation](https://perf.wiki.kernel.org/index.php/Main_Page) and the comprehensive
+[perf examples page](https://www.brendangregg.com/perf.html) of Brendan Gregg.
 
 ## Configuration
 
 Admins can change the behaviour of the perf tools kernel part via the
 following interfaces
 
-|                                             |                                                                                                                                   |
-|---------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|
 | File Name                                   | Description                                                                                                                       |
-| `/proc/sys/kernel/perf_event_max_sample_rate` | describes the maximal sample rate for perf record and native access. This is used to limit the performance influence of sampling. |
-| `/proc/sys/kernel/perf_event_mlock_kb`        | defines the number of pages that can be used for sampling via perf record or the native interface                                 |
-| `/proc/sys/kernel/perf_event_paranoid`        | defines access rights:                                                                                                            |
+|---------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|
+| `/proc/sys/kernel/perf_event_max_sample_rate` | Describes the maximal sample rate for perf record and native access. This is used to limit the performance influence of sampling. |
+| `/proc/sys/kernel/perf_event_mlock_kb`        | Defines the number of pages that can be used for sampling via perf record or the native interface                                 |
+| `/proc/sys/kernel/perf_event_paranoid`        | Defines access rights:                                                                                                            |
 |                                             | -1 - Not paranoid at all                                                                                                          |
 |                                             | 0 - Disallow raw tracepoint access for unpriv                                                                                     |
 |                                             | 1 - Disallow cpu events for unpriv                                                                                                |
@@ -31,9 +34,12 @@ performance data can provide hints on the internals of the application.
 ### For Users
 
 Run `perf stat <Your application>`. This will provide you with a general
-overview on some counters.
+overview on some counters. The following listing holds an exemplary output for sampling the `ls`
+command.
 
-```Bash
+```console
+marie@compute$ perf stat ls
+[...]
 Performance counter stats for 'ls':=
           2,524235 task-clock                #    0,352 CPUs utilized
                 15 context-switches          #    0,006 M/sec
@@ -51,7 +57,7 @@ Performance counter stats for 'ls':=
 
 - Generally speaking **task clock** tells you how parallel your job
   has been/how many cpus were used.
-- **[Context switches](http://en.wikipedia.org/wiki/Context_switch)**
+- [Context switches](http://en.wikipedia.org/wiki/Context_switch)
   are an information about how the scheduler treated the application.  Also interrupts cause context
   switches. Lower is better.
 - **CPU migrations** are an information on whether the scheduler moved
@@ -90,23 +96,26 @@ measures the performance counters for the whole computing node over one second.
 ## Perf Record
 
 `perf record` provides the possibility to sample an application or a system. You can find
-performance issues and hot parts of your code. By default perf record samples your program at a 4000
+performance issues and hot parts of your code. By default `perf record` samples your program at 4000
 Hz. It records CPU, Instruction Pointer and, if you specify it, the call chain. If your code runs
-long (or often) enough, you can find hot spots in your application and external libraries. Use
-**perf report** to evaluate the result. You should have debug symbols available, otherwise you won't
-be able to see the name of the functions that are responsible for your load. You can pass one or
-multiple events to define the **sampling event**.
-
-**What is a sampling event?** Sampling reads values at a specific sampling frequency. This
-frequency is usually static and given in Hz, so you have for example 4000 events per second and a
-sampling frequency of 4000 Hz and a sampling rate of 250 microseconds. With the sampling event, the
-concept of a static sampling frequency in time is somewhat redefined. Instead of a constant factor
-in time (sampling rate) you define a constant factor in events. So instead of a sampling rate of 250
-microseconds, you have a sampling rate of 10,000 floating point operations.
-
-**Why would you need sampling events?** Passing an event allows you to find the functions
-that produce cache misses, floating point operations, ... Again, you can use events defined in `perf
-list` and raw events.
+long (or often) enough, you can find hot spots in your application and external libraries.
+Use [perf report](#perf-report) to evaluate the result. You should have debug symbols available,
+otherwise you won't be able to see the name of the functions that are responsible for your load. You
+can pass one or multiple events to define the **sampling event**.
+
+!!! note "What is a sampling event?"
+
+    Sampling reads values at a specific sampling frequency. This frequency is usually static and
+    given in Hz, so you have for example 4000 events per second and a sampling frequency of 4000 Hz
+    and a sampling rate of 250 microseconds. With the sampling event, the concept of a static
+    sampling frequency in time is somewhat redefined. Instead of a constant factor in time (sampling
+    rate) you define a constant factor in events. So instead of a sampling rate of 250 microseconds,
+    you have a sampling rate of 10,000 floating point operations.
+
+!!! note "Why would you need sampling events?"
+
+    Passing an event allows you to find the functions that produce cache misses, floating point
+    operations, ... Again, you can use events defined in `perf list` and raw events.
 
 Use the `-g` flag to receive a call graph.
 
@@ -126,7 +135,7 @@ perf record -o perf.data.$SLURM_JOB_ID.$SLURM_PROCID $@
 ```
 
 To start the MPI program type `srun ./perfwrapper ./myapp` on your command line. The result will be
-n independent perf.data files that can be analyzed individually with perf report.
+n independent `perf.data` files that can be analyzed individually using `perf report`.
 
 ### For Admins
 
@@ -138,14 +147,18 @@ record -a -g` to monitor the whole node.
 
 `perf report` is a command line UI for evaluating the results from perf record. It creates something
 like a profile from the recorded samplings.  These profiles show you what the most used have been.
-If you added a callchain, it also gives you a callchain profile.\<br /> \*Disclaimer: Sampling is
-not an appropriate way to gain exact numbers. So this is merely a rough overview and not guaranteed
-to be absolutely correct.\*\<span style="font-size: 1em;"> \</span>
+If you added a callchain, it also gives you a callchain profile.
+
+!!! note "Disclaimer"
+
+    Sampling is not an appropriate way to gain exact numbers. So this is merely a rough overview and
+    not guaranteed to be absolutely correct.
 
-### On ZIH systems
+### On ZIH Systems
 
 On ZIH systems, users are not allowed to see the kernel functions. If you have multiple events
-defined, then the first thing you select in `perf report` is the type of event. Press right
+defined, then the first thing you select in `perf report` is the type of event. Press the right
+arrow key:
 
 ```Bash
 Available samples
@@ -153,12 +166,12 @@ Available samples
 11 cache-misse
 ```
 
-**Hints:**
+!!! hint
 
-* The more samples you have, the more exact is the profile. 96 or
-11 samples is not enough by far.
-* Repeat the measurement and set `-F 50000` to increase the sampling frequency.
-* The higher the frequency, the higher the influence on the measurement.
+    * The more samples you have, the more exact is the profile. 96 or
+    11 samples is not enough by far.
+    * Repeat the measurement and set `-F 50000` to increase the sampling frequency.
+    * The higher the frequency, the higher the influence on the measurement.
 
 If you'd select cycles, you would get such a screen:
 
@@ -172,7 +185,7 @@ Events: 96  cycles
 +   2,02%  test_gcc_perf  [kernel.kallsyms]  [k] 0xffffffff8102e9ea
 ```
 
-Increased sample frequency:
+With increased sample frequency, it might look like this:
 
 ```Bash
 Events: 7K cycles
@@ -198,16 +211,16 @@ Events: 7K cycles
 +   0,00%  test_gcc_perf  libc-2.12.so       [.] __execvpe
 ```
 
-Now you select the most often sampled function and zoom into it by pressing right. If debug symbols
-are not available, perf report will show which assembly instruction is hit most often when sampling.
-If debug symbols are available, it will also show you the source code lines for these assembly
-instructions. You can also go back and check which instruction caused the cache misses or whatever
-event you were passing to perf record.
+Now you select the most often sampled function and zoom into it by pressing the right arrow key. If
+debug symbols are not available, `perf report` will show which assembly instruction is hit most often
+when sampling. If debug symbols are available, it will also show you the source code lines for
+these assembly instructions. You can also go back and check which instruction caused the cache
+misses or whatever event you were passing to `perf record`.
 
 ## Perf Script
 
 If you need a trace of the sampled data, you can use `perf script` command, which by default prints
-all samples to stdout. You can use various interfaces (e.g., python) to process such a trace.
+all samples to stdout. You can use various interfaces (e.g., Python) to process such a trace.
 
 ## Perf Top
 
diff --git a/doc.zih.tu-dresden.de/docs/software/pika.md b/doc.zih.tu-dresden.de/docs/software/pika.md
index d9616e900e258909267fc9870db6ddfa24fee0de..3b9cd3fd7ff821f3dc5d76241b46b2645b9fc01b 100644
--- a/doc.zih.tu-dresden.de/docs/software/pika.md
+++ b/doc.zih.tu-dresden.de/docs/software/pika.md
@@ -1,20 +1,20 @@
-# Performance Analysis of HPC Applications with PIKA
+# PIKA
 
 PIKA is a hardware performance monitoring stack to identify inefficient HPC jobs. Users of ZIH
 systems have the possibility to visualize and analyze the efficiency of their jobs via the
-[PIKA web interface](https://selfservice.zih.tu-dresden.de/l/index.php/hpcportal/jobmonitoring/z../jobs_and_resources).
+[PIKA web interface](https://selfservice.zih.tu-dresden.de/l/index.php/hpcportal/jobmonitoring/zih/jobs).
 
 !!! hint
 
     To understand this small guide, it is recommended to open the
-    [web interface](https://selfservice.zih.tu-dresden.de/l/index.php/hpcportal/jobmonitoring/z../jobs_and_resources)
+    [web interface](https://selfservice.zih.tu-dresden.de/l/index.php/hpcportal/jobmonitoring/zih/jobs)
     in a separate window. Furthermore, at least one real HPC job should have been submitted.
 
 ## Overview
 
 PIKA consists of several components and tools. It uses the collection daemon collectd, InfluxDB to
 store time-series data and MariaDB to store job metadata. Furthermore, it provides a powerful
-[web interface](https://selfservice.zih.tu-dresden.de/l/index.php/hpcportal/jobmonitoring/z../jobs_and_resources)
+[web interface](https://selfservice.zih.tu-dresden.de/l/index.php/hpcportal/jobmonitoring/zih/jobs)
 for the visualization and analysis of job performance data.
 
 ## Table View and Job Search
diff --git a/doc.zih.tu-dresden.de/docs/software/power_ai.md b/doc.zih.tu-dresden.de/docs/software/power_ai.md
index b4beda5cec2b8b2e1ede4729df7434b6e8c8e7d5..5d1c397ab00d66fe61fc41fb4cee1efaeb25801b 100644
--- a/doc.zih.tu-dresden.de/docs/software/power_ai.md
+++ b/doc.zih.tu-dresden.de/docs/software/power_ai.md
@@ -1,4 +1,4 @@
-# PowerAI Documentation Links
+# Machine Learning with PowerAI
 
 There are different documentation sources for users to learn more about
 the PowerAI Framework for Machine Learning. In the following the links
diff --git a/doc.zih.tu-dresden.de/docs/software/private_modules.md b/doc.zih.tu-dresden.de/docs/software/private_modules.md
index 4b79463f05988afd689b5fa18bddc758c16dfaa7..00982700ec5bc35fe757660897cc1631453a820f 100644
--- a/doc.zih.tu-dresden.de/docs/software/private_modules.md
+++ b/doc.zih.tu-dresden.de/docs/software/private_modules.md
@@ -1,105 +1,134 @@
-# Project and User Private Modules
+# Private Modules
 
 Private module files allow you to load your own installed software packages into your environment
 and to handle different versions without getting into conflicts. Private modules can be setup for a
-single user as well as all users of project group. The workflow and settings for user private module
-files is described in the following. The [settings for project private
-modules](#project-private-modules) differ only in details.
+**single user** as well as **all users of a project group**. The workflow and settings for user
+private as well as project private module files are described in the following.
 
-In order to use your own module files please use the command
-`module use <path_to_module_files>`. It will add the path to the list of module directories
-that are searched by lmod (i.e. the `module` command). You may use a directory `privatemodules`
-within your home or project directory to setup your own module files.
+## Setup
 
-Please see the [Environment Modules open source project's web page](http://modules.sourceforge.net/)
-for further information on writing module files.
+### 0. Build and Install Software
 
-## 1. Create Directories
+Obviously the first step is to build and install the software you'd like to use. Please follow the
+instructions and tips provided on the page [building_software](building_software.md).
+For consistency, we use the placeholder variable `<sw_name>` in this documentation. When following this
+instructions, please substitute it with the actual software name within the commands.
+
+### 1. Create Directory
+
+Now, create the directory `privatemodules` to store all your private module files and the directory
+`sw_name` therein. All module files for different versions or build options of `sw_name` should be
+located in this directory.
 
 ```console
 marie@compute$ cd $HOME
-marie@compute$ mkdir --verbose --parents privatemodules/testsoftware
-marie@compute$ cd privatemodules/testsoftware
+marie@compute$ mkdir --verbose --parents privatemodules/<sw_name>
+marie@compute$ cd privatemodules/<sw_name>
 ```
 
-(create a directory in your home directory)
-
-## 2. Notify lmod
+Project private module files for software that can be used by all members of your group should be
+located in your global projects directory, e.g., `/projects/p_number_crunch/privatemodules`. Thus, create
+this directory:
 
 ```console
-marie@compute$ module use $HOME/privatemodules
+marie@compute$ mkdir --verbose --parents /projects/p_number_crunch/privatemodules/<sw_name>
+marie@compute$ cd /projects/p_number_crunch/privatemodules/<sw_name>
 ```
 
-(add the directory in the list of module directories)
+!!! note
 
-## 3. Create Modulefile
+    Make sure, that the directory is group-readable.
 
-Create a file with the name `1.0` with a
-test software in the `testsoftware` directory you created earlier
-(using your favorite editor) and paste the following text into it:
+### 2. Create Modulefile
 
-```
-#%Module######################################################################
-##
-##     testsoftware modulefile
-##
-proc ModulesHelp { } {
-        puts stderr "Loads testsoftware"
-}
-
-set version 1.0
-set arch    x86_64
-set path    /home/<user>/opt/testsoftware/$version/$arch/
-
-prepend-path PATH            $path/bin
-prepend-path LD_LIBRARY_PATH $path/lib
-
-if [ module-info mode load ] {
-        puts stderr "Load testsoftware version $version"
-}
-```
+Within the directory `<sw_name>` create the module file. The file can either be a TCL or a Lua. We
+recommend to use Lua. The module file name should reflect the particular version of the software,
+e.g., `1.4.lua`.
 
-## 4. Check lmod
+!!! note
 
-Check the availability of the module with `ml av`, the output should look like this:
+    If you create a group private module file, make sure it is group-readable.
 
-```
---------------------- /home/masterman/privatemodules ---------------------
-   testsoftware/1.0
-```
+A template module file is:
 
-## 5. Load Module
+```lua linenums="1"
+help([[
 
-Load the test module with `module load testsoftware`, the output should look like this:
+Description
+===========
+<sw_name> is ...
 
-```console
-Load testsoftware version 1.0
-Module testsoftware/1.0 loaded.
+More Information
+================
+For detailed instructions, go to:
+   https://...
+
+]])
+
+whatis("Version: 1.4")
+whatis("Keywords: [System, Utility, ...]")
+whatis("URL: <...>")
+whatis("Description: <...>")
+
+conflict("<sw_name>")
+
+local root = "</path/to/installation>"
+prepend_path( "PATH",            pathJoin(root, "bin"))
+prepend_path( "LD_LIBRARY_PATH", pathJoin(root, "lib"))
+prepend_path( "LIBRARY_PATH", pathJoin(root, "lib"))
+
+setenv(       "<SOME_ENV>",        "<value>")
 ```
 
-## Project Private Modules
+The most important functions to adjust the environment are listed and described in the following
+table.
+
+| Function | Description |
+|----------|-------------|
+| `help([[ help string ]]) ` | Message when the help command is called. |
+| `conflict(“name1”, “name2”)` | The current modulefile will only load if all listed modules are NOT loaded. |
+| `depends_on(“pkgA”, “pkgB”, “pkgC”)` | Loads all modules. When unloading only dependent modules are unloaded. |
+| `load(“pkgA”, “pkgB”, “pkgC”)` | Load all modules. Report error if unable to load.
+| `prepend_path(”PATH”, “/path/to/pkg/bin”)` | Prepend the value to a path-like variable. |
+| `setenv(“NAME”, “value”):` | Assign the value to the environment variable `NAME`. |
 
-Private module files allow you to load project- or group-wide installed software into your
-environment and to handle different versions without getting into conflicts.
+Please refer to the official documentation of Lmod on
+[writing modules](https://lmod.readthedocs.io/en/latest/015_writing_modules.html) and
+[Lua ModulefileFunctions](https://lmod.readthedocs.io/en/latest/050_lua_modulefiles.html)
+for detailed information.
+You can also have a look at present module files at the system.
 
-The module files have to be stored in your global projects directory
-`/projects/p_projectname/privatemodules`. An example of a module file can be found in the section
-above. To use a project-wide module file you have to add the path to the module file to the module
-environment with the command
+## Usage
+
+In order to use private module files and the corresponding software, you need to expand the module
+search path. This is done by invoking the command
 
 ```console
-marie@compute$ module use /projects/p_projectname/privatemodules
+marie@login$ module use $HOME/privatemodules
 ```
 
-After that, the modules are available in your module environment and you can load the modules with
-the `module load` command.
+for your private module files and
 
-## Using Private Modules and Programs in the $HOME Directory
+```console
+marie@login$ module use /projects/p_number_crunch/privatemodules
+```
 
-An automated backup system provides security for the HOME-directories on the cluster on a daily
+for group private module files, respectively.
+
+Afterwards, you can use the [module commands](modules.md) to, e.g., load and unload your private modules
+as usual.
+
+## Caveats
+
+An automated backup system provides security for the home directories on the cluster on a daily
 basis. This is the reason why we urge users to store (large) temporary data (like checkpoint files)
-on the /scratch filesystem or at local scratch disks.
+on the `/scratch` filesystem or at local scratch disks.
+
+This is also why we have set `ulimit -c 0` as a default setting to prevent users from filling the
+home directories with dumps of crashed programs. In particular, `ulimit -c 0` sets the core file
+size (blocks) to 0, which disables creation of core dumps in case an application crashes.
+
+!!! note "Enable core files for debugging"
 
-**Please note**: We have set `ulimit -c 0` as a default to prevent users from filling the disk with
-the dump of crashed programs. `bash` users can use `ulimit -Sc unlimited` to enable the debugging
-via analyzing the core file.
+    If you use `bash` as shell and you need these core files for analysis, set `ulimit -c
+    unlimited`.
diff --git a/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md b/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
index 67b10817c738b414a3302388b5cca3392ff96bb1..13b623174f21016084917fb2cd424b500727e5f3 100644
--- a/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
+++ b/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
@@ -1,16 +1,17 @@
 # Python Virtual Environments
 
-Virtual environments allow users to install additional Python packages and create an isolated
-run-time environment. We recommend using `virtualenv` for this purpose. In your virtual environment,
-you can use packages from the [modules list](modules.md) or if you didn't find what you need you can
-install required packages with the command: `pip install`. With the command `pip freeze`, you can
-see a list of all installed packages and their versions.
+Virtual environments allow users to install additional Python packages and
+create an isolated run-time environment. We recommend using `virtualenv` for
+this purpose. In your virtual environment, you can use packages from the
+[modules list](modules.md) or if you didn't find what you need you can install
+required packages with the command: `pip install`. With the command
+`pip freeze`, you can see a list of all installed packages and their versions.
 
 There are two methods of how to work with virtual environments on ZIH systems:
 
-1. **virtualenv** is a standard Python tool to create isolated Python environments.
-   It is the preferred interface for
-   managing installations and virtual environments on ZIH system and part of the Python modules.
+1. **virtualenv** is a standard Python tool to create isolated Python
+environments. It is the preferred interface for managing installations and
+virtual environments on ZIH system and part of the Python modules.
 
 2. **conda** is an alternative method for managing installations and
 virtual environments on ZIH system. conda is an open-source package
@@ -25,13 +26,13 @@ conda manager is included in all versions of Anaconda and Miniconda.
 
 ## Python Virtual Environment
 
-This example shows how to start working with **virtualenv** and Python virtual environment (using
-the module system).
+This example shows how to start working with **virtualenv** and Python virtual
+environment (using the module system).
 
 !!! hint
 
-    We recommend to use [workspaces](../data_lifecycle/workspaces.md) for your virtual
-    environments.
+    We recommend to use [workspaces](../data_lifecycle/workspaces.md) for your
+    virtual environments.
 
 At first, we check available Python modules and load the preferred version:
 
@@ -53,22 +54,60 @@ Info: creating workspace.
 [...]
 marie@compute$ virtualenv --system-site-packages /scratch/ws/1/python_virtual_environment/env  #Create virtual environment
 [...]
-marie@compute$ source /scratch/ws/1/python_virtual_environment/env/bin/activate    #Activate virtual environment. Example output: (envtest) bash-4.2$
+marie@compute$ source /scratch/ws/1/python_virtual_environment/env/bin/activate    #Activate virtual environment. Example output: (env) bash-4.2$
 ```
 
-Now you can work in this isolated environment, without interfering with other tasks running on the
-system. Note that the inscription (env) at the beginning of each line represents that you are in
-the virtual environment. You can deactivate the environment as follows:
+Now you can work in this isolated environment, without interfering with other
+tasks running on the system. Note that the inscription (env) at the beginning of
+each line represents that you are in the virtual environment. You can deactivate
+the environment as follows:
 
 ```console
 (env) marie@compute$ deactivate    #Leave the virtual environment
 ```
 
+### Persistence of Python Virtual Environment
+
+To persist a virtualenv, you can store the names and versions of installed
+packages in a file. Then you can restore this virtualenv by installing the
+packages from this file. Use the `pip freeze` command for storing:
+
+```console
+(env) marie@compute$ pip freeze > requirements.txt    #Store the currently installed packages
+```
+
+In order to recreate python virtual environment, use the `pip install` command to install the
+packages from the file:
+
+```console
+marie@compute$ module load Python    #Load default Python
+[...]
+marie@compute$ virtualenv --system-site-packages /scratch/ws/1/python_virtual_environment/env_post  #Create virtual environment
+[...]
+marie@compute$ source /scratch/ws/1/python_virtual_environment/env/bin/activate    #Activate virtual environment. Example output: (env_post) bash-4.2$
+(env_post) marie@compute$ pip install -r requirements.txt    #Install packages from the created requirements.txt file
+```
+
 ## Conda Virtual Environment
 
-This example shows how to start working with **conda** and virtual environment (with using module
-system). At first, we use an interactive job and create a directory for the conda virtual
-environment:
+**Prerequisite:** Before working with conda, your shell needs to be configured
+initially. Therefore login to the ZIH system, load the Anaconda module and run
+`conda init`. For more information use `conda init --help`. Note that changes
+take effect after closing and re-opening your shell.
+
+??? example
+
+    ```console
+    marie@compute$ module load Anaconda3    #load Anaconda module
+    Module Anaconda3/2019.03 loaded.
+    marie@compute$ conda init    #configure shell
+    [...]
+    modified      /home/marie/.bashrc
+    ```
+
+This example shows how to start working with **conda** and virtual environment
+(with using module system). At first, we use an interactive job and create a
+directory for the conda virtual environment:
 
 ```console
 marie@compute$ ws_allocate -F scratch conda_virtual_environment 1
@@ -77,7 +116,8 @@ Info: creating workspace.
 [...]
 ```
 
-Then, we load Anaconda, create an environment in our directory and activate the environment:
+Then, we load Anaconda, create an environment in our directory and activate the
+environment:
 
 ```console
 marie@compute$ module load Anaconda3    #load Anaconda module
@@ -85,9 +125,10 @@ marie@compute$ conda create --prefix /scratch/ws/1/conda_virtual_environment/con
 marie@compute$ conda activate /scratch/ws/1/conda_virtual_environment/conda-env    #activate conda-env virtual environment
 ```
 
-Now you can work in this isolated environment, without interfering with other tasks running on the
-system. Note that the inscription (conda-env) at the beginning of each line represents that you
-are in the virtual environment. You can deactivate the conda environment as follows:
+Now you can work in this isolated environment, without interfering with other
+tasks running on the system. Note that the inscription (conda-env) at the
+beginning of each line represents that you are in the virtual environment. You
+can deactivate the conda environment as follows:
 
 ```console
 (conda-env) marie@compute$ conda deactivate    #Leave the virtual environment
@@ -98,7 +139,7 @@ are in the virtual environment. You can deactivate the conda environment as foll
     This is an example on partition Alpha. The example creates a virtual environment, and installs
     the package `torchvision` with pip.
     ```console
-    marie@login$ srun --partition=alpha-interactive -N=1 --gres=gpu:1 --time=01:00:00 --pty bash
+    marie@login$ srun --partition=alpha-interactive --nodes=1 --gres=gpu:1 --time=01:00:00 --pty bash
     marie@alpha$ mkdir python-environments                               # please use workspaces
     marie@alpha$ module load modenv/hiera GCC/10.2.0 CUDA/11.1.1 OpenMPI/4.0.5 PyTorch
     Module GCC/10.2.0, CUDA/11.1.1, OpenMPI/4.0.5, PyTorch/1.9.0 and 54 dependencies loaded.
@@ -122,3 +163,53 @@ are in the virtual environment. You can deactivate the conda environment as foll
     0.10.0+cu102
     (my-torch-env) marie@alpha$ deactivate
     ```
+
+### Persistence of Conda Virtual Environment
+
+To persist a conda virtual environment, you can define an `environments.yml`
+file. Have a look a the [conda docs](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html?highlight=environment.yml#create-env-file-manually)
+for a description of the syntax. See an example for the `environments.yml` file
+below.
+
+??? example
+    ```yml
+    name: workshop_env
+    channels:
+    - conda-forge
+    - defaults
+    dependencies:
+    - python>=3.7
+    - pip
+    - colorcet
+    - 'geoviews-core=1.8.1'
+    - 'ipywidgets=7.6.*'
+    - geopandas
+    - hvplot
+    - pyepsg
+    - python-dotenv
+    - 'shapely=1.7.1'
+    - pip:
+        - python-hll
+    ```
+
+After specifying the `name`, the conda [channel priority](https://docs.conda.io/projects/conda/en/latest/user-guide/concepts/channels.html)
+is defined. In the example above, packages will be first installed from the
+`conda-forge` channel, and if not found, from the `default` Anaconda channel.
+
+Below, dependencies can be specified. Optionally, <abbr title="Pinning is a
+process that allows you to remain on a stable release while grabbing packages
+from a more recent version."> pinning</abbr> can be used to delimit the packages
+installed to compatible package versions.
+
+Finally, packages not available on conda can be specified (indented) below
+`- pip:`
+
+Recreate the conda virtual environment with the packages from the created
+`environment.yml` file:
+
+```console
+marie@compute$ mkdir workshop_env    #Create directory for environment
+marie@compute$ module load Anaconda3    #Load Anaconda
+marie@compute$ conda config --set channel_priority strict
+marie@compute$ conda env create --prefix workshop_env --file environment.yml    #Create conda env in directory with packages from environment.yml file
+```
diff --git a/doc.zih.tu-dresden.de/docs/software/scs5_software.md b/doc.zih.tu-dresden.de/docs/software/scs5_software.md
index b5a1bef60d20cdc9989c8db82f766d31a96d3cdc..73311c7fcc78001ad2dc201c19c0eb657397b33a 100644
--- a/doc.zih.tu-dresden.de/docs/software/scs5_software.md
+++ b/doc.zih.tu-dresden.de/docs/software/scs5_software.md
@@ -3,7 +3,7 @@
 Bull's new cluster software is called SCS 5 (*Super Computing Suite*).
 Here are the major changes from the user's perspective:
 
-| software                        | old    | new      |
+| Software                        | Old    | New      |
 |:--------------------------------|:-------|:---------|
 | Red Hat Enterprise Linux (RHEL) | 6.x    | 7.x      |
 | Linux kernel                    | 2.26   | 3.10     |
@@ -20,9 +20,9 @@ remove it and accept the new one after comparing its fingerprint with those list
 
 ## Using Software Modules
 
-Starting with SCS5, we only provide
-[Lmod](../software/modules.md#lmod-an-alternative-module-implementation) as the
-environment module tool of choice.
+Starting with SCS5, we do not support [C environment modules](http://modules.sourceforge.net/)
+anymore. We only provide [Lmod](http://lmod.readthedocs.io/en/latest/index.html) as the environment
+module tool of choice. Refer to the [module page](modules.md) for further information on modules.
 
 As usual, you can get a list of the available software modules via:
 
@@ -35,11 +35,11 @@ ml av
 There is a special module that is always loaded (sticky) called
 **modenv**. It determines the module environment you can see.
 
-|                |                                                 |         |
-|----------------|-------------------------------------------------|---------|
-| modenv/scs5    | SCS5 software                                   | default |
-| modenv/ml      | software for data analytics (partition ml)      |         |
-| modenv/classic | Manually built pre-SCS5 (AE4.0) software        | hidden  |
+| Module Environment | Description                                 | Status  |
+|--------------------|---------------------------------------------|---------|
+| `modenv/scs5`      | SCS5 software                               | default |
+| `modenv/ml`        | Software for data analytics (partition ml)  |         |
+| `modenv/classic`   | Manually built pre-SCS5 (AE4.0) software    | hidden  |
 
 The old modules (pre-SCS5) are still available after loading the
 corresponding **modenv** version (**classic**), however, due to changes
@@ -49,7 +49,7 @@ still work under SCS5. That's why those modenv versions are hidden.
 Example:
 
 ```Bash
-$ ml modenv/classic ansys/19.0
+marie@compute$ ml modenv/classic ansys/19.0
 
 The following have been reloaded with a version change:
   1) modenv/scs5 => modenv/classic
@@ -90,31 +90,28 @@ than you will be used to, coming from modenv/classic. A full toolchain, like "in
 
 For instance, the "intel" toolchain has the following structure:
 
-|              |            |
+| Toolchain    | `intel`    |
 |--------------|------------|
-| toolchain    | intel      |
-| compilers    | icc, ifort |
-| mpi library  | impi       |
-| math library | imkl       |
+| Compilers    | icc, ifort |
+| MPI library  | impi       |
+| Math. library | imkl       |
 
 On the other hand, the "foss" toolchain looks like this:
 
-|                |                     |
+| Toolchain      | `foss`              |
 |----------------|---------------------|
-| toolchain      | foss                |
-| compilers      | GCC (gcc, gfortran) |
-| mpi library    | OpenMPI             |
-| math libraries | OpenBLAS, FFTW      |
+| Compilers      | GCC (gcc, gfortran) |
+| MPI library    | OpenMPI             |
+| Math. libraries | OpenBLAS, FFTW      |
 
 If you want to combine the Intel compilers and MKL with OpenMPI, you'd have to use the "iomkl"
 toolchain:
 
-|              |            |
+| Toolchain    | `iomkl`    |
 |--------------|------------|
-| toolchain    | iomkl      |
-| compilers    | icc, ifort |
-| mpi library  | OpenMPI    |
-| math library | imkl       |
+| Compilers    | icc, ifort |
+| MPI library  | OpenMPI    |
+| Math library | imkl       |
 
 There are also subtoolchains that skip a layer or two, e.g. "iccifort" only consists of the
 respective compilers, same as "GCC". Then there is "iompi" that includes Intel compilers+OpenMPI but
@@ -145,7 +142,7 @@ Since "intel" is only a toolchain module now, it does not include the entire Par
 anymore. Tools like the Intel Advisor, Inspector, Trace Analyzer or VTune Amplifier are available as
 separate modules now:
 
-| product               | module    |
+| Product               | Module    |
 |:----------------------|:----------|
 | Intel Advisor         | Advisor   |
 | Intel Inspector       | Inspector |
diff --git a/doc.zih.tu-dresden.de/docs/software/virtual_machines_tools.md b/doc.zih.tu-dresden.de/docs/software/singularity_power9.md
similarity index 97%
rename from doc.zih.tu-dresden.de/docs/software/virtual_machines_tools.md
rename to doc.zih.tu-dresden.de/docs/software/singularity_power9.md
index fbec2e51bc453cc17e2d131d7229c50ff90aa23f..5daf70465d006799bc3df921dcb4698a8d648eab 100644
--- a/doc.zih.tu-dresden.de/docs/software/virtual_machines_tools.md
+++ b/doc.zih.tu-dresden.de/docs/software/singularity_power9.md
@@ -1,4 +1,4 @@
-# Singularity on Partition `ml`
+# Singularity for Power 9 Architecture
 
 !!! note "Root privileges"
 
@@ -35,7 +35,7 @@ in which case you'd need to change that when moving to ZIH systems.
 
 ## Build a Singularity Container in a Job
 
-To build a Singularity container for the power9-architecture on ZIH systems simply run:
+To build a Singularity container for the Power9 architecture on ZIH systems simply run:
 
 ```console
 marie@login$ buildSingularityImage --arch=power9 myContainer.sif myDefinition.def
@@ -105,7 +105,7 @@ needs to be re-generated on every script run.
 ## Start a Job in a VM
 
 Especially when developing a Singularity definition file, it might be useful to get a shell directly
-on a VM. To do so on the power9-architecture, simply run:
+on a VM. To do so on the Power9 architecture, simply run:
 
 ```console
 startInVM --arch=power9
diff --git a/doc.zih.tu-dresden.de/docs/software/tensorboard.md b/doc.zih.tu-dresden.de/docs/software/tensorboard.md
index d2c838d3961d8f48794e544ce1ca7846d24e7325..f7d3448079e8973218db99f0da7cf2a7acb432a7 100644
--- a/doc.zih.tu-dresden.de/docs/software/tensorboard.md
+++ b/doc.zih.tu-dresden.de/docs/software/tensorboard.md
@@ -76,9 +76,14 @@ For accessing TensorBoard now, you have to set up some port forwarding via ssh t
 machine:
 
 ```console
-marie@local$ ssh -N -f -L 6006:taurusi8034.taurus.hrsk.tu-dresden.de:6006 <zih-login>@taurus.hrsk.tu-dresden.de
+marie@local$ ssh -N -f -L 6006:taurusi8034:6006 taurus
 ```
 
+!!! important "SSH command"
+
+    The previous SSH command requires that you have already set up your [SSH configuration
+    ](../access/ssh_login.md#configuring-default-parameters-for-ssh).
+
 Now, you can see the TensorBoard in your browser at `http://localhost:6006/`.
 
 Note that you can also use TensorBoard in an [sbatch file](../jobs_and_resources/slurm.md).
diff --git a/doc.zih.tu-dresden.de/docs/software/tensorflow.md b/doc.zih.tu-dresden.de/docs/software/tensorflow.md
index 09a8352a32648178f3634a4099eee52ad6c0ccd0..db6b97596a153b852dabec4d5d9a1e73aa1aa19b 100644
--- a/doc.zih.tu-dresden.de/docs/software/tensorflow.md
+++ b/doc.zih.tu-dresden.de/docs/software/tensorflow.md
@@ -114,6 +114,10 @@ Basic test of tensorflow - A Hello World!!!...
 [...]
 ```
 
+!!! hint
+    In the above example, we create a conda virtual environment. To use conda, it is be necessary to
+    configure your shell in advance via `conda init` as described in [Python virtual environments](python_virtual_environments.md#conda-virtual-environment)
+
 ## TensorFlow with Python or R
 
 For further information on TensorFlow in combination with Python see
diff --git a/doc.zih.tu-dresden.de/docs/software/vampir.md b/doc.zih.tu-dresden.de/docs/software/vampir.md
index 9df5eb62a0d461da97fcb2ce28f461d9042e93a2..ebaa368e73f445422644b6159c1ab677fc50fecf 100644
--- a/doc.zih.tu-dresden.de/docs/software/vampir.md
+++ b/doc.zih.tu-dresden.de/docs/software/vampir.md
@@ -8,9 +8,9 @@ graphical displays, including state diagrams, statistics, and timelines, can be
 to obtain a better understanding of their parallel program inner working and to subsequently
 optimize it. Vampir allows to focus on appropriate levels of detail, which allows the detection and
 explanation of various performance bottlenecks such as load imbalances and communication
-deficiencies. Follow this
-[link](http://tu-dresden.de/die_tu_dresden/zentrale_einrichtungen/zih/forschung/projekte/vampir)
-for further information.
+deficiencies. [ZIH's Vampir overview page
+](http://tu-dresden.de/die_tu_dresden/zentrale_einrichtungen/zih/forschung/projekte/vampir) gives
+further information.
 
 [Score-P](scorep.md) is the primary code instrumentation and run-time measurement framework for
 Vampir and supports various instrumentation methods, including instrumentation at source level and
@@ -33,7 +33,7 @@ For members of TU Dresden the Vampir tool is also available as
 [download](http://tu-dresden.de/die_tu_dresden/zentrale_einrichtungen/zih/forschung/projekte/vampir/vampir_download_tu)
 for installation on your personal computer.
 
-Make sure, that compressed display forwarding (e.g., `ssh -YC taurus.hrsk.tu-dresden.de`) is
+Make sure, that compressed display forwarding (e.g., `ssh -YC taurus`) is
 enabled. Start the GUI by typing
 
 ```console
@@ -142,9 +142,14 @@ marie@login$ vampirserver list
 Open a second console on your local computer and establish an SSH tunnel to the compute node with:
 
 ```console
-marie@local$ ssh -L 30000:taurusi1253:30055 taurus.hrsk.tu-dresden.de
+marie@local$ ssh -L 30000:taurusi1253:30055 taurus
 ```
 
+!!! important "SSH command"
+
+    The previous SSH command requires that you have already set up your [SSH configuration
+    ](../access/ssh_login.md#configuring-default-parameters-for-ssh).
+
 Now, the port 30000 on your desktop is connected to the VampirServer port 30055 at the compute node
 `taurusi1253` of the ZIH system. Finally, start your local Vampir client and establish a remote
 connection to `localhost`, port 30000 as described in the manual.
diff --git a/doc.zih.tu-dresden.de/docs/software/virtual_desktops.md b/doc.zih.tu-dresden.de/docs/software/virtual_desktops.md
index cb809c3a99022c8ec5c5d3e9a98b96b8533baa0b..963e4f8a9680abbd773b4af8b14d11f7c6a71080 100644
--- a/doc.zih.tu-dresden.de/docs/software/virtual_desktops.md
+++ b/doc.zih.tu-dresden.de/docs/software/virtual_desktops.md
@@ -1,3 +1,83 @@
 # Virtual Desktops
 
-coming soon
+Use WebVNC or DCV to run GUI applications on HPC resources.
+
+|                | WebVNC                                                | DCV                                        |
+|----------------|-------------------------------------------------------|--------------------------------------------|
+| **use case**   | all GUI applications that do **not need** OpenGL      | only GUI applications that **need** OpenGL |
+| **partitions** | all\* (except partitions with GPUs (gpu2, hpdlf, ml)  | dcv                                        |
+
+## Launch a Virtual Desktop
+
+| Step | WebVNC         | DCV                    |
+|------|:--------------:|:----------------------:|
+| 1  <td colspan=2 align="center"> Navigate to [https://taurus.hrsk.tu-dresden.de](https://taurus.hrsk.tu-dresden.de). There is our [JupyterHub](../access/jupyterhub.md) instance.
+| 2  <td colspan=2 align="center"> Click on the "advanced" tab and choose a preset:
+| 3  <td colspan=2 align="center"> Optional: Fine tune your session with the available Slurm job parameters or assign a certain project or reservation. Then save your settings in a new preset for future use.
+| 4  <td colspan=2 align="center"> Click on `Spawn`. JupyterHub starts now a Slurm job for you. If everything is ready the JupyterLab interface will appear to you.
+| 5    | Click on `WebVNC` to start a virtual desktop. | Click on `DCV` to start a virtual desktop. |
+| 6  <td colspan=2 align="center"> The virtual desktop starts in a new tab or window.
+
+### Demonstration
+
+<video controls="" width="320" style="border: 1px solid black">
+<source src="https://doc.zih.tu-dresden.de/software/misc/start-virtual-desktop-dcv.mp4"
+        type="video/mp4" />
+<source src="https://doc.zih.tu-dresden.de/software/misc/start-virtual-desktop-dcv.webm"
+        type="video/webm" />
+</video>
+
+<!--Hier steht ein Video.-->
+<!--In der mkdocs.yaml muss aber noch das Plugin mkdocs-video eingetragen werden-->
+<!--![type:video](misc/start-virtual-desktop-dcv.mp4)-->
+
+### Using the Quickstart Feature
+
+JupyterHub can start a job automatically if the URL contains certain
+parameters.
+
+|              | WebVNC       | DCV          |
+|--------------|:------------:|:------------:|
+| Examples     | [WebVNC](https://taurus.hrsk.tu-dresden.de/jupyter/hub/spawn#/>\~(partition\~'interactive\~cpuspertask\~'2\~mempercpu\~'2583)) | [DCV](https://taurus.hrsk.tu-dresden.de/jupyter/hub/spawn#/>\~(partition\~'dcv\~cpuspertask\~'6\~gres\~'gpu\*3a1\~mempercpu\~'2583)) |
+| Description  | partition `interactive`, 2 CPUs with 2583 MB RAM per core, no GPU | partition `dcv`, 6 CPUs with 2583 MB RAM per core, 1 GPU |
+| Link creator <td colspan=2 align="center"> Use the spawn form to set your preferred options. The browser URL will be updated with the corresponding parameters.
+
+If you close the browser tabs or windows or log out from your local
+machine, you are able to open the virtual desktop later again - as long
+as the session runs. But please remember that a Slurm job is running in
+the background which has a certain time limit.
+
+## Reconnecting to a Session
+
+In order to reconnect to an active instance of WebVNC, simply repeat the
+steps required to start a session, beginning - if required - with the
+login, then clicking `My Server`, then by pressing the `+` sign on the
+upper left corner. Provided your server is still running and you simply
+closed the window or logged out without stopping your server, you will
+find your WebVNC desktop the way you left it.
+
+## Terminate a Remote Session
+
+| Step | Description |
+|------|-------------|
+| 1    | Close the VNC viewer tab or window. |
+| 2    | Click on File \> Log Out in the JupyterLab main menu. Now you get redirected to the JupyterLab control panel. If you don't have your JupyterLab tab or window anymore, navigate directly to [https://taurus.hrsk.tu-dresden.de/jupyter/hub/home](https://taurus.hrsk.tu-dresden.de/jupyter/hub/home) |
+| 3    | Click on `Stop My Server`. This cancels the Slurm job and terminates your session. |
+
+### Demonstration
+
+<video controls="" width="320" style="border: 1px solid black">
+<source src="https://doc.zih.tu-dresden.de/software/misc/terminate-virtual-desktop-dcv.mp4"
+        type="video/mp4" />
+<source src="https://doc.zih.tu-dresden.de/software/misc/terminate-virtual-desktop-dcv.webm"
+        type="video/webm" />
+</video>
+
+!!! note
+
+    This does not work if you click on the button `Logout` in your
+    virtual desktop. Instead this will just close your DCV session or cause
+    a black screen in your WebVNC window without a possibility to recover a
+    virtual desktop in the same Jupyter session. The solution for now would
+    be to terminate the whole Jupyter session and start a new one like
+    mentioned above.
diff --git a/doc.zih.tu-dresden.de/docs/software/virtual_machines.md b/doc.zih.tu-dresden.de/docs/software/virtual_machines.md
index 2527bbe91cbb735824598cc90311b88df2eab808..69b5c3798b0d4f28309ddec24fbea486cfaf2460 100644
--- a/doc.zih.tu-dresden.de/docs/software/virtual_machines.md
+++ b/doc.zih.tu-dresden.de/docs/software/virtual_machines.md
@@ -61,7 +61,7 @@ Last login: Fri Jul 24 13:53:48 2020 from gateway
 
 ## Automation
 
-We provide [tools](virtual_machines_tools.md) to automate these steps. You may just type `startInVM
+We provide [tools](singularity_power9.md) to automate these steps. You may just type `startInVM
 --arch=power9` on a login node and you will be inside the VM with everything mounted.
 
 ## Known Issues
@@ -79,7 +79,7 @@ rm -rf /tmp/sbuild-*
 ```
 
 If that does not help, e.g., because one build alone needs more than the available disk memory, then
-it will be necessary to use the tmp folder on scratch. In order to ensure that the files in the
+it will be necessary to use the `tmp` folder on `scratch`. In order to ensure that the files in the
 temporary folder will be owned by root, it is necessary to set up an image inside `/scratch/tmp`
 instead of using it directly. E.g., to create a 25 GB of temporary memory image:
 
diff --git a/doc.zih.tu-dresden.de/docs/software/visualization.md b/doc.zih.tu-dresden.de/docs/software/visualization.md
index f1e551c968cb4478069c98e691eef11bce7ccb01..b1a103a0c1ab1c999a002c2584eefa1a1813916b 100644
--- a/doc.zih.tu-dresden.de/docs/software/visualization.md
+++ b/doc.zih.tu-dresden.de/docs/software/visualization.md
@@ -38,9 +38,10 @@ parallel, if it was built using MPI.
     ```
 
 The resources for the MPI processes have to be allocated via the
-[batch system](../jobs_and_resources/slurm.md) option `-c NUM` (not `-n`, as it would be usually for
-MPI processes). It might be valuable in terms of runtime to bind/pin the MPI processes to hardware.
-A convenient option is `-bind-to core`. All other options can be obtained by
+[batch system](../jobs_and_resources/slurm.md) option `--cpus-per-task=<NUM>` (not `--ntasks=<NUM>`,
+as it would be usual for MPI processes). It might be valuable in terms of runtime to bind/pin the
+MPI processes to hardware.  A convenient option is `-bind-to core`. All other options can be
+obtained by
 
 ```console
 marie@login$ mpiexec -bind-to -help`
@@ -57,8 +58,8 @@ interactive allocation.
     ```Bash
     #!/bin/bash
 
-    #SBATCH -N 1
-    #SBATCH -c 12
+    #SBATCH --nodes=1
+    #SBATCH --cpus-per-task=12
     #SBATCH --time=01:00:00
 
     # Make sure to only use ParaView
@@ -71,7 +72,7 @@ interactive allocation.
 ??? example "Example of interactive allocation using `salloc`"
 
     ```console
-    marie@login$ salloc -N 1 -c 16 --time=01:00:00 bash
+    marie@login$ salloc --nodes=1 --cpus-per-task=16 --time=01:00:00 bash
     salloc: Pending job allocation 336202
     salloc: job 336202 queued and waiting for resources
     salloc: job 336202 has been allocated resources
@@ -102,8 +103,8 @@ cards (GPUs) specified by the device index. For that, make sure to use the modul
     ```Bash
     #!/bin/bash
 
-    #SBATCH -N 1
-    #SBATCH -c 12
+    #SBATCH --nodes=1
+    #SBATCH --cpus-per-task=12
     #SBATCH --gres=gpu:2
     #SBATCH --partition=gpu2
     #SBATCH --time=01:00:00
@@ -133,7 +134,7 @@ handling. First, you need to open a DCV session, so please follow the instructio
 virtual desktop session, then load the ParaView module as usual and start the GUI:
 
 ```console
-marie@dcv module load ParaView/5.7.0
+marie@dcv$ module load ParaView/5.7.0
 paraview
 ```
 
@@ -156,7 +157,7 @@ processes.
 
     ```console
     marie@login$ module ParaView/5.7.0-osmesa
-    marie@login$ srun -N1 -n8 --mem-per-cpu=2500 -p interactive --pty pvserver --force-offscreen-rendering
+    marie@login$ srun --nodes=1 --ntasks=8 --mem-per-cpu=2500 --partition=interactive --pty pvserver --force-offscreen-rendering
     srun: job 2744818 queued and waiting for resources
     srun: job 2744818 has been allocated resources
     Waiting for client...
@@ -166,7 +167,7 @@ processes.
 
 If the default port 11111 is already in use, an alternative port can be specified via `-sp=port`.
 *Once the resources are allocated, the pvserver is started in parallel and connection information
-are outputed.*
+are output.*
 
 This contains the node name which your job and server runs on. However, since the node names of the
 cluster are not present in the public domain name system (only cluster-internally), you cannot just
@@ -183,9 +184,14 @@ The SSH tunnel has to be created from the user's localhost. The following exampl
 forward SSH tunnel to localhost on port 22222 (or what ever port is preferred):
 
 ```console
-marie@local$ ssh -L 22222:172.24.140.229:11111 <zihlogin>@taurus.hrsk.tu-dresden.de
+marie@local$ ssh -L 22222:172.24.140.229:11111 taurus
 ```
 
+!!! important "SSH command"
+
+    The previous SSH command requires that you have already set up your
+    [SSH configuration](../access/ssh_login.md#configuring-default-parameters-for-ssh).
+
 The final step is to start ParaView locally on your own machine and add the connection
 
 - File -> Connect...
@@ -206,7 +212,7 @@ filesystems.
 
 #### Caveats
 
-Connecting to the compute nodes will only work when you are **inside the TUD campus network**,
+Connecting to the compute nodes will only work when you are **inside the TU Dresden campus network**,
 because otherwise, the private networks 172.24.\* will not be routed. That's why you either need to
 use [VPN](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/zugang_datennetz/vpn),
 or, when coming via the ZIH login gateway (`login1.zih.tu-dresden.de`), use an SSH tunnel. For the
@@ -234,15 +240,18 @@ it into thinking your provided GL rendering version is higher than what it actua
 
 ??? example
 
+    The following lines requires that you have already set up your
+    [SSH configuration](../access/ssh_login.md#configuring-default-parameters-for-ssh).
+
     ```console
     # 1st, connect to ZIH systems using X forwarding (-X).
     # It is a good idea to also enable compression for such connections (-C):
-    marie@local$ ssh -XC taurus.hrsk.tu-dresden.de
+    marie@local$ ssh -XC taurus
 
     # 2nd, load the ParaView module and override the GL version (if necessary):
     marie@login$ module Paraview/5.7.0
     marie@login$ export MESA_GL_VERSION_OVERRIDE=3.2
 
     # 3rd, start the ParaView GUI inside an interactive job. Don't forget the --x11 parameter for X forwarding:
-    marie@login$ srun -n1 -c1 -p interactive --mem-per-cpu=2500 --pty --x11=first paraview
+    marie@login$ srun --ntasks=1 --cpus-per-task=1 --partition=interactive --mem-per-cpu=2500 --pty --x11=first paraview
     ```
diff --git a/doc.zih.tu-dresden.de/docs/software/zsh.md b/doc.zih.tu-dresden.de/docs/software/zsh.md
new file mode 100644
index 0000000000000000000000000000000000000000..147758a6a66dd84aeb040c80d0000110f4af882c
--- /dev/null
+++ b/doc.zih.tu-dresden.de/docs/software/zsh.md
@@ -0,0 +1,238 @@
+# ZSH
+
+!!! warning
+    Though all efforts have been made to ensure the accuracy and
+    currency of the content on this website, please be advised that
+    some content might be out of date and there is no continuous
+    website support available. In case of any ambiguity or doubts,
+    users are advised to do their own research on the content's
+    accuracy and currency.
+
+The [ZSH](https://www.zsh.org), short for `z-shell`, is an alternative shell for Linux that offers
+many convenience features for productive use that `bash`, the default shell, does not offer.
+
+This should be a short introduction to `zsh` and offer some examples that are especially useful
+on ZIH systems.
+
+## `oh-my-zsh`
+
+`oh-my-zsh` is a plugin that adds many features to the `zsh` with a very simple install. Simply run:
+
+```
+marie@login$ sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
+```
+
+and then, if it is not already your login shell, run `zsh` or re-login.
+
+The rest of this document assumes that you have `oh-my-zsh` installed and running.
+
+## Features
+
+### Themes
+
+There are many different themes for the `zsh`. See the
+[GitHub-page of `oh-my-zsh`](https://github.com/ohmyzsh/ohmyzsh) for more details.
+
+### Auto-completion
+
+`zsh` offers more auto-completion features than `bash`. You can auto-complete programs, filenames, parameters,
+`man`-pages and a lot more, and you can cycle through the suggestions with `TAB`-button.
+
+![Cycling through auto-completion for parameter names](misc/zsh_autocomplete_parameters.png)
+
+### Syntax-highlighting
+
+When you add this line to your `~/.zshrc` with `oh-my-zsh` installed, you get syntax-highlighting directly
+in the shell:
+
+```bash
+plugins+=(
+  zsh-syntax-highlighting
+)
+```
+
+![Syntax-highlighting directly in the shell](misc/zsh_syntax_highlighting.png)
+
+### Typo-correction
+
+With
+
+```bash
+setopt correct_all
+ENABLE_CORRECTION="true"
+```
+
+in `~/.zshrc` you get correction suggestions when the shell thinks
+that it might be what you want, e.g. when a command
+is expected to be handed an existing file.
+
+![Correction suggestion](misc/zsh_typo.png)
+
+### Automatic `cd`
+
+Adding `AUTO_CD` to `~/.zshrc` file allows to leave out the `cd` when a folder name is provided.
+
+```bash
+setopt AUTO_CD
+```
+
+![Automatic cd](misc/zsh_autocd.png)
+
+### `fish`-like auto-suggestions
+
+Install [`zsh-autosuggestions`](https://github.com/zsh-users/zsh-autosuggestions) to get `fish`-shell-like
+auto-suggestions of previous commands that start with the same letters and that you can complete with
+the right arrow key.
+
+![Auto-suggestion](misc/zsh_autosuggestion.png)
+
+??? example "Addons for your shell"
+    === "`bash`"
+        ```bash
+        # Create a new directory and directly `cd` into it
+        mcd () {
+            mkdir -p $1
+            cd $1
+        }
+
+        # Find the largest files in the current directory easily
+        function treesizethis {
+            du -k --max-depth=1 | sort -nr | awk '
+             BEGIN {
+            split("KB,MB,GB,TB", Units, ",");
+             }
+             {
+            u = 1;
+            while ($1 >= 1024) {
+               $1 = $1 / 1024;
+               u += 1
+            }
+            $1 = sprintf("%.1f %s", $1, Units[u]);
+            print $0;
+             }
+            '
+        }
+
+        #This allows you to run `slurmlogpath $SLURM_ID` and get the log-path directly in stdout:
+        function slurmlogpath {
+            scontrol show job $1 | sed -n -e 's/^\s*StdOut=//p'
+        }
+
+        # `ftails` follow-tails a slurm-log. Call it without parameters to tail the only running job or
+        # get a list of running jobs or use `ftails $JOBID` to tail a specific job
+        function ftails {
+            JOBID=$1
+            if [[ -z $JOBID ]]; then
+                 JOBS=$(squeue --format="%i \\'%j\\' " --me | grep -v JOBID)
+                 NUMBER_OF_JOBS=$(echo "$JOBS" | wc -l)
+                 JOBID=
+                 if [[ "$NUMBER_OF_JOBS" -eq 1 ]]; then
+                     JOBID=$(echo $JOBS | sed -e "s/'//g" | sed -e 's/ .*//')
+                 else
+                     JOBS=$(echo $JOBS | tr -d '\n')
+                     JOBID=$(eval "whiptail --title 'Choose jobs to tail' --menu 'Choose Job to tail' 25 78 16 $JOBS" 3>&1 1>&2 2>&3)
+                 fi
+            fi
+            SLURMLOGPATH=$(slurmlogpath $JOBID)
+            if [[ -e $SLURMLOGPATH ]]; then
+                tail -n100 -f $SLURMLOGPATH
+            else
+                echo "No slurm-log-file found"
+            fi
+        }
+
+        #With this, you only need to type `sq` instead of `squeue -u $USER`.
+        alias sq="squeue --me"
+        ```
+    === "`zsh`"
+        ```bash
+        # Create a new directory and directly `cd` into it
+        mcd () {
+            mkdir -p $1
+            cd $1
+        }
+
+        # Find the largest files in the current directory easily
+        function treesizethis {
+            du -k --max-depth=1 | sort -nr | awk '
+             BEGIN {
+            split("KB,MB,GB,TB", Units, ",");
+             }
+             {
+            u = 1;
+            while ($1 >= 1024) {
+               $1 = $1 / 1024;
+               u += 1
+            }
+            $1 = sprintf("%.1f %s", $1, Units[u]);
+            print $0;
+             }
+            '
+        }
+
+        #This allows you to run `slurmlogpath $SLURM_ID` and get the log-path directly in stdout:
+        function slurmlogpath {
+            scontrol show job $1 | sed -n -e 's/^\s*StdOut=//p'
+        }
+
+        # `ftails` follow-tails a slurm-log. Call it without parameters to tail the only running job or
+        # get a list of running jobs or use `ftails $JOBID` to tail a specific job
+        function ftails {
+            JOBID=$1
+            if [[ -z $JOBID ]]; then
+                 JOBS=$(squeue --format="%i \\'%j\\' " --me | grep -v JOBID)
+                 NUMBER_OF_JOBS=$(echo "$JOBS" | wc -l)
+                 JOBID=
+                 if [[ "$NUMBER_OF_JOBS" -eq 1 ]]; then
+                     JOBID=$(echo $JOBS | sed -e "s/'//g" | sed -e 's/ .*//')
+                 else
+                     JOBS=$(echo $JOBS | tr -d '\n')
+                     JOBID=$(eval "whiptail --title 'Choose jobs to tail' --menu 'Choose Job to tail' 25 78 16 $JOBS" 3>&1 1>&2 2>&3)
+                 fi
+            fi
+            SLURMLOGPATH=$(slurmlogpath $JOBID)
+            if [[ -e $SLURMLOGPATH ]]; then
+                tail -n100 -f $SLURMLOGPATH
+            else
+                echo "No slurm-log-file found"
+            fi
+        }
+
+        #With this, you only need to type `sq` instead of `squeue -u $USER`.
+        alias sq="squeue --me"
+
+        #This will automatically replace `...` with `../..` and `....` with `../../..`
+        # and so on (each additional `.` adding another `/..`) when typing commands:
+        rationalise-dot() {
+            if [[ $LBUFFER = *.. ]]; then
+                LBUFFER+=/..
+            else
+                LBUFFER+=.
+            fi
+        }
+        zle -N rationalise-dot
+        bindkey . rationalise-dot
+
+        # This allows auto-completion for `module load`:
+        function _module {
+            MODULE_COMMANDS=(
+                '-t:Show computer parsable output'
+                'load:Load a module'
+                'unload:Unload a module'
+                'spider:Search for a module'
+                'avail:Show available modules'
+                'list:List loaded modules'
+            )
+
+            MODULE_COMMANDS_STR=$(printf "\n'%s'" "${MODULE_COMMANDS[@]}")
+
+            eval "_describe 'command' \"($MODULE_COMMANDS_STR)\""
+            _values -s ' ' 'flags' $(ml -t avail | sed -e 's#/$##' | tr '\n' ' ')
+        }
+
+        compdef _module "module"
+        ```
+
+## Setting `zsh` as default-shell
+
+Please ask HPC support if you want to set the `zsh` as your default login shell.
diff --git a/doc.zih.tu-dresden.de/mkdocs.yml b/doc.zih.tu-dresden.de/mkdocs.yml
index e897f83f9fca5362646806309cc79fb679994110..00f0b09db3e4f7470b65e203e517f38268739e78 100644
--- a/doc.zih.tu-dresden.de/mkdocs.yml
+++ b/doc.zih.tu-dresden.de/mkdocs.yml
@@ -2,21 +2,23 @@ nav:
   - Home: index.md
   - Application for Login and Resources:
     - Overview: application/overview.md
-    - Terms: application/terms_of_use.md
+    - Terms of Use: application/terms_of_use.md
     - Request for Resources: application/request_for_resources.md
     - Project Request Form: application/project_request_form.md
     - Project Management: application/project_management.md
+    - Acknowledgement: application/acknowledgement.md
   - Access to ZIH Systems:
     - Overview: access/overview.md
     - Connecting with SSH: access/ssh_login.md
-    - Desktop Visualization: access/desktop_cloud_visualization.md
+    - Desktop Cloud Visualization (DCV): access/desktop_cloud_visualization.md
     - Graphical Applications with WebVNC: access/graphical_applications_with_webvnc.md
     - JupyterHub:
       - JupyterHub: access/jupyterhub.md
       - JupyterHub for Teaching: access/jupyterhub_for_teaching.md
+      - JupyterHub Teaching Example: access/jupyterhub_teaching_example.md
     - Key Fingerprints: access/key_fingerprints.md  
     - Security Restrictions: access/security_restrictions.md
-  - Transfer of Data:
+  - Data Transfer:
     - Overview: data_transfer/overview.md
     - Datamover: data_transfer/datamover.md
     - Export Nodes: data_transfer/export_nodes.md
@@ -24,19 +26,20 @@ nav:
     - Overview: software/overview.md
     - Environment:
       - Modules: software/modules.md
-      - Private Modulefiles: software/private_modules.md
-      - Custom EasyBuild Modules: software/custom_easy_build_environment.md
+      - Private Modules: software/private_modules.md
+      - EasyBuild: software/custom_easy_build_environment.md
       - Python Virtual Environments: software/python_virtual_environments.md
+      - ZSH: software/zsh.md
     - Containers:
       - Singularity: software/containers.md
       - Singularity Recipes and Hints: software/singularity_recipe_hints.md
-      - Virtual Machines Tools: software/virtual_machines_tools.md
+      - Singularity for Power9: software/singularity_power9.md
       - Virtual Machines: software/virtual_machines.md
-      - NGC Containers: software/ngc_containers.md
+      - GPU-accelerated Containers for Deep Learning (NGC Containers): software/ngc_containers.md
     - Applications:
-      - Licenses: software/licenses.md
+      - External Licenses: software/licenses.md
       - Computational Fluid Dynamics (CFD): software/cfd.md
-      - Mathematics: software/mathematics.md
+      - Mathematics Applications: software/mathematics.md
       - Nanoscale Simulations: software/nanoscale_simulations.md
       - FEM Software: software/fem_software.md
     - Visualization: software/visualization.md
@@ -53,7 +56,7 @@ nav:
       - PyTorch: software/pytorch.md
       - Distributed Training: software/distributed_training.md
       - Hyperparameter Optimization (OmniOpt): software/hyperparameter_optimization.md
-      - PowerAI: software/power_ai.md
+      - Machine Learning with PowerAI: software/power_ai.md
     - SCS5 Migration Hints: software/scs5_software.md
     - Virtual Desktops: software/virtual_desktops.md
     - Software Development and Tools:
@@ -61,13 +64,13 @@ nav:
       - Building Software: software/building_software.md
       - Compilers: software/compilers.md
       - GPU Programming: software/gpu_programming.md
-      - Libraries: software/math_libraries.md
+      - Mathematics Libraries: software/math_libraries.md
       - Debugging: software/debuggers.md
       - MPI Error Detection: software/mpi_usage_error_detection.md
       - Score-P: software/scorep.md
       - lo2s: software/lo2s.md
       - PAPI Library: software/papi.md
-      - Pika: software/pika.md
+      - PIKA: software/pika.md
       - Perf Tools: software/perf_tools.md
       - Vampir: software/vampir.md
       - Energy Measurement: software/energy_measurement.md
@@ -75,20 +78,18 @@ nav:
     - Overview: data_lifecycle/overview.md
     - Filesystems:
       - Overview: data_lifecycle/file_systems.md
-      - Permanent File Systems: data_lifecycle/permanent.md
+      - Permanent Filesystems: data_lifecycle/permanent.md
       - Lustre: data_lifecycle/lustre.md
       - BeeGFS: data_lifecycle/beegfs.md
       - Warm Archive: data_lifecycle/warm_archive.md
       - Intermediate Archive: data_lifecycle/intermediate_archive.md
     - Workspaces: data_lifecycle/workspaces.md
-    - Preservation of Research Data: data_lifecycle/preservation_research_data.md
-    - Structuring Experiments: data_lifecycle/experiments.md
+    - Long-Term Preservation: data_lifecycle/longterm_preservation.md
   - HPC Resources and Jobs:
     - Overview: jobs_and_resources/overview.md
     - HPC Resources:
       - Overview: jobs_and_resources/hardware_overview.md
       - AMD Rome Nodes: jobs_and_resources/rome_nodes.md
-      - IBM Power9 Nodes: jobs_and_resources/power9.md
       - NVMe Storage: jobs_and_resources/nvme_storage.md
       - Alpha Centauri: jobs_and_resources/alpha_centauri.md
       - HPE Superdome Flex: jobs_and_resources/sd_flex.md
@@ -101,7 +102,7 @@ nav:
       - Binding And Distribution Of Tasks: jobs_and_resources/binding_and_distribution_of_tasks.md
   - Support:
     - How to Ask for Support: support/support.md
-  - Archive of the Old Wiki:
+  - Archive:
     - Overview: archive/overview.md
     - Bio Informatics: archive/bioinformatics.md
     - CXFS End of Support: archive/cxfs_end_of_support.md
@@ -126,10 +127,10 @@ nav:
     - VampirTrace: archive/vampirtrace.md
     - Windows Batchjobs: archive/windows_batch.md
   - Contribute:
-    - How-To: contrib/howto_contribute.md
+    - How-To Contribute: contrib/howto_contribute.md
     - Content Rules: contrib/content_rules.md
-    - Browser-based Editing: contrib/contribute_browser.md
-    - Work Locally Using Containers: contrib/contribute_container.md
+    - Contribute via Browser: contrib/contribute_browser.md
+    - Contribute via Local Clone: contrib/contribute_container.md
     
 # Project Information
 
@@ -137,7 +138,7 @@ site_name: ZIH HPC Compendium
 site_description: ZIH HPC Compendium
 site_author: ZIH Team
 site_dir: public
-site_url: https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium
+site_url: https://doc.zih.tu-dresden.de/
 
 # uncomment next 3 lines if link to repo should not be displayed in the navbar
 
@@ -169,17 +170,26 @@ theme:
 
   logo: assets/images/TUD_Logo_weiss_57.png
   second_logo: assets/images/zih_weiss.png
-
-# extends base css
-
+  features:
+    - navigation.instant
+    
+# extends base css/js
 extra_css:
 
   - stylesheets/extra.css
 
+extra_javascript:
+  - javascripts/extra.js
+  - javascripts/mermaid.min.js
+
 markdown_extensions:
     - admonition
     - pymdownx.details
-    - pymdownx.superfences
+    - pymdownx.superfences:
+        custom_fences:
+          - name: mermaid
+            class: mermaid
+            format: !!python/name:pymdownx.superfences.fence_code_format
     - pymdownx.highlight
     - toc:
         permalink: True
@@ -188,9 +198,14 @@ markdown_extensions:
     - pymdownx.tabbed:
         alternate_style: True
 
+          #  - mkdocs-video
+
 extra:
-  homepage: https://tu-dresden.de
+  tud_homepage: https://tu-dresden.de
+  tud_name: "TU Dresden"
+  # second logo
   zih_homepage: https://tu-dresden.de/zih
+  zih_name: "center for information services and high performance computing (ZIH)"
   hpcsupport_mail: hpcsupport@zih.tu-dresden.de
 
   # links in footer
@@ -204,3 +219,7 @@ extra:
       name: "Data Protection Declaration / Datenschutzerklärung"
     - link: https://tu-dresden.de/zertifikate
       name: "Certificates"
+
+plugins:
+  - search
+  - markdown-caption
diff --git a/doc.zih.tu-dresden.de/tud_theme/javascripts/extra.js b/doc.zih.tu-dresden.de/tud_theme/javascripts/extra.js
new file mode 100644
index 0000000000000000000000000000000000000000..c9b73dd717902dd9b8c30fc7810422cde2ac7e41
--- /dev/null
+++ b/doc.zih.tu-dresden.de/tud_theme/javascripts/extra.js
@@ -0,0 +1,12 @@
+// keyboard navigation
+// allow to expand navigation items with nested items by
+let nav_links = document.querySelectorAll('.md-nav__link');
+
+Array.from(nav_links).forEach(label => {
+    label.addEventListener('keydown', e => {
+      if (e.key === 'Enter' || e.key === ' ' || e.key === 'ArrowLeft'|| e.key === 'ArrowRight') {
+        e.preventDefault();
+        label.click();
+      };
+    });
+  });
\ No newline at end of file
diff --git a/doc.zih.tu-dresden.de/tud_theme/partials/header.html b/doc.zih.tu-dresden.de/tud_theme/partials/header.html
index 486ee1c0075bda3c98b07107c9f6158858efc766..cb32d3abf5ee74f9aaef3cdbb9af86709bca934d 100644
--- a/doc.zih.tu-dresden.de/tud_theme/partials/header.html
+++ b/doc.zih.tu-dresden.de/tud_theme/partials/header.html
@@ -24,24 +24,25 @@
   >
 
     <!-- Link to home -->
+    <!-- TUD Logo -->
     <a
-      href="{{ config.extra.homepage | d(nav.homepage.url, true) | url }}"
-      title="{{ config.site_name | e }}"
+      href="{{ config.extra.tud_homepage | d(nav.homepage.url, true) | url }}"
+      title="{{ config.extra.tud_name | e }}"
       class="md-header__button md-logo"
-      aria-label="{{ config.site_name }}"
+      aria-label="link to {{ config.extra.tud_name }}"
       data-md-component="logo"
     >
-      {% include "partials/logo.html" %}
+      <img src="{{ config.theme.logo | url }}" alt="tu dresden logo" />
     </a>
-
+    <!-- ZIH Logo -->
     <a
     href="{{ config.extra.zih_homepage | url }}"
-    title="{{ config.zih_name | e }}"
+    title="{{ config.extra.zih_name | e }}"
     class="md-header__button zih-logo"
-    aria-label="{{ config.zih_name }}"
+    aria-label="link to{{ config.extra.zih_name }}"
     data-md-component="logo"
     >
-      <img src="{{ config.theme.second_logo | url }}" alt="logo" />
+      <img src="{{ config.theme.second_logo | url }}" alt="zih logo" />
     </a>
 
     <!-- Button to open drawer -->
diff --git a/doc.zih.tu-dresden.de/tud_theme/partials/nav-item.html b/doc.zih.tu-dresden.de/tud_theme/partials/nav-item.html
new file mode 100644
index 0000000000000000000000000000000000000000..b3afc522e8852bd46fa09262585d3cee21e179d9
--- /dev/null
+++ b/doc.zih.tu-dresden.de/tud_theme/partials/nav-item.html
@@ -0,0 +1,140 @@
+<!--
+  Copyright (c) 2016-2021 Martin Donath <martin.donath@squidfunk.com>
+  Permission is hereby granted, free of charge, to any person obtaining a copy
+  of this software and associated documentation files (the "Software"), to
+  deal in the Software without restriction, including without limitation the
+  rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+  sell copies of the Software, and to permit persons to whom the Software is
+  furnished to do so, subject to the following conditions:
+  The above copyright notice and this permission notice shall be included in
+  all copies or substantial portions of the Software.
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
+  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+  IN THE SOFTWARE.
+-->
+
+<!-- Wrap everything with a macro to reduce file roundtrips (see #2213) -->
+{% macro render(nav_item, path, level) %}
+
+  <!-- Determine class according to state -->
+  {% set class = "md-nav__item" %}
+  {% if nav_item.active %}
+    {% set class = class ~ " md-nav__item--active" %}
+  {% endif %}
+
+  <!-- Main navigation item with nested items -->
+  {% if nav_item.children %}
+
+    <!-- Determine whether to render item as a section -->
+    {% if "navigation.sections" in features and level == 1 + (
+      "navigation.tabs" in features
+    ) %}
+      {% set class = class ~ " md-nav__item--section" %}
+    {% endif %}
+
+    <!-- Render item with nested items -->
+    <li class="{{ class }} md-nav__item--nested">
+
+      <!-- Active checkbox expands items contained within nested section -->
+      {% set checked = "checked" if nav_item.active %}
+      {% if "navigation.expand" in features and not checked %}
+        <input
+          class="md-nav__toggle md-toggle"
+          data-md-toggle="{{ path }}"
+          data-md-state="indeterminate"
+          type="checkbox"
+          id="{{ path }}"
+          checked
+        />
+      {% else %}
+        <input
+          class="md-nav__toggle md-toggle"
+          data-md-toggle="{{ path }}"
+          type="checkbox"
+          id="{{ path }}"
+          {{ checked }}
+        />
+      {% endif %}
+
+      <!-- Expand active pages -->
+      <label class="md-nav__link" for="{{ path }}" tabindex="0" aria-haspopup="true">
+        {{ nav_item.title }}
+        <span class="md-nav__icon md-icon"></span>
+      </label>
+      <nav
+        class="md-nav"
+        data-md-level="{{ level }}"
+      >
+        <label class="md-nav__title" for="{{ path }}">
+          <span class="md-nav__icon md-icon"></span>
+          {{ nav_item.title }}
+        </label>
+        <ul 
+          class="md-nav__list"
+          data-md-scrollfix 
+          aria-hidden="true" 
+          aria-expanded="false"
+        >
+          <!-- Render nested item list -->
+          {% for nav_item in nav_item.children %}
+            {{ render(nav_item, path ~ "_" ~ loop.index, level + 1) }}
+          {% endfor %}
+        </ul>
+      </nav>
+    </li>
+
+  <!-- Currently active page -->
+  {% elif nav_item == page %}
+    <li class="{{ class }}">
+      {% set toc = page.toc %}
+
+      <!-- Active checkbox expands items contained within nested section -->
+      <input
+        class="md-nav__toggle md-toggle"
+        data-md-toggle="toc"
+        type="checkbox"
+        id="__toc"
+      />
+
+      <!-- Hack: see partials/toc.html for more information -->
+      {% set first = toc | first %}
+      {% if first and first.level == 1 %}
+        {% set toc = first.children %}
+      {% endif %}
+
+      <!-- Render table of contents, if not empty -->
+      {% if toc %}
+        <label class="md-nav__link md-nav__link--active" for="__toc">
+          {{ nav_item.title }}
+          <span class="md-nav__icon md-icon"></span>
+        </label>
+      {% endif %}
+      <a
+        href="{{ nav_item.url | url }}"
+        class="md-nav__link md-nav__link--active"
+      >
+        {{ nav_item.title }}
+      </a>
+
+      <!-- Show table of contents -->
+      {% if toc %}
+        {% include "partials/toc.html" %}
+      {% endif %}
+    </li>
+
+  <!-- Main navigation item -->
+  {% else %}
+    <li class="{{ class }}">
+      <a href="{{ nav_item.url | url }}" class="md-nav__link">
+        {{ nav_item.title }}
+      </a>
+    </li>
+  {% endif %}
+{% endmacro %}
+
+<!-- Render current and nested navigation items -->
+{{ render(nav_item, path, level) }}
\ No newline at end of file
diff --git a/doc.zih.tu-dresden.de/tud_theme/partials/toc.html b/doc.zih.tu-dresden.de/tud_theme/partials/toc.html
index aa0d25faf7db6bf0cd1478daf6106edaf8b5dd8f..341dccc3bbfb70ee83b89c7606ae1dcf3e19a8a6 100644
--- a/doc.zih.tu-dresden.de/tud_theme/partials/toc.html
+++ b/doc.zih.tu-dresden.de/tud_theme/partials/toc.html
@@ -40,7 +40,7 @@
       HPC Support
     </label>
     <p>
-      <img class="operation-status-logo" alt="operation status" src="https://bs.zih.tu-dresden.de/bs.php?action=status_icon&dienst=6">
+      <img class="operation-status-logo" alt="operation status" src="https://doc.zih.tu-dresden.de/misc/status/bs.php?action=status_icon&dienst=6">
       <a href="http://tu-dresden.de/die_tu_dresden/zentrale_einrichtungen/zih/aktuelles/betriebsstatus/index_html?action=dienstinfo&dienst=6">
       Operation Status 
       </a>
diff --git a/doc.zih.tu-dresden.de/tud_theme/stylesheets/extra.css b/doc.zih.tu-dresden.de/tud_theme/stylesheets/extra.css
index 5505ff954a79532a27f55f2b0ad0d82eecd095de..98c54afa6b64cd8b38b576f52ff6f8117a75cf16 100644
--- a/doc.zih.tu-dresden.de/tud_theme/stylesheets/extra.css
+++ b/doc.zih.tu-dresden.de/tud_theme/stylesheets/extra.css
@@ -1,30 +1,47 @@
 /* general */
 @font-face {
     font-family: "Open Sans Regular";
-    src: 
+    src:
         local('Open Sans Regular'),
         url('./Open_Sans/OpenSans-Regular-webfont.woff') format('woff')
 }
 @font-face {
     font-family: "Open Sans Semibold";
-    src: 
+    src:
         local('Open Sans Semibold'),
         url('./Open_Sans/OpenSans-Semibold-webfont.woff') format('woff')
 }
+/*
+    change colors via "css color variables"
+    https://squidfunk.github.io/mkdocs-material/setup/changing-the-colors/#custom-colors
+*/
 :root {
     --md-text-font-family: 'Open Sans Regular', sans-serif;
+    /* TUD CD colors */
+    /* primary colors */
     --md-primary-fg-color:        rgb(0, 37, 87);
     --md-primary-fg-color--light: rgb(39, 66, 117);
     --md-footer-bg-color:         rgb(0, 37, 87);
+
+    --tud-blue-20:                rgb(191, 201, 215);
+    --tud-blue-10:                rgb(207, 214, 225);
+    --ms-blue:                    rgb(233, 237, 242);
+    /* secondary colors */
     --tud-grey-100:               rgb(51, 51, 51);
+    --tud-grey-95:                rgb(102, 102, 102);
+    --tud-grey-90:                rgb(153, 153, 153);
+    --tud-grey-80:                rgb(199, 199, 199);
+    --grey-75:                    rgb(222, 222, 222);
+    --tud-grey-70:                rgb(245, 245, 245);
+    /* interaction color */
     --tud-red-90:                 rgb(221, 29, 29);
-
+    --fg-color--light:              rgba(0, 0, 0, 0.6);
 }
 
-.md-typeset h1, 
-.md-typeset h2, 
-.md-typeset h3, 
-.md-typeset h4, 
+.md-typeset h1,
+.md-typeset h2,
+.md-typeset h3,
+.md-typeset h4,
 .md-typeset h5 {
     font-family: 'Open Sans Semibold';
     line-height: 130%;
@@ -70,14 +87,31 @@ strong {
 .md-grid {
     max-width: 1600px;
 }
+/* accessabilty */
 
+/* hide toogles */
+.md-toggle {
+    left: -100vw;
+    position: absolute;
+}
+/* admonitions */
 .md-typeset code {
     word-break: normal;
+    background-color: var(--ms-blue);
+}
+
+* {
+    --md-code-hl-comment-color:   var(--fg-color--light);
+}
+
+.md-clipboard {
+    color: var(--fg-color--light)
 }
 
 .md-typeset .admonition,
 .md-typeset details,
-.md-typeset code {
+.md-typeset code,
+.highlighttable .linenos {
     font-size: 0.8rem;
 }
 
@@ -87,11 +121,11 @@ strong {
 }
 @media screen and (min-width: 76.25rem) {
     .md-header,
-    .md-header__title, 
+    .md-header__title,
     .md-header__button,
     .md-header__topic {
         height: 80px;
-    }  
+    }
 
     .md-header__button,
     .md-header__button.md-logo,
@@ -110,7 +144,7 @@ strong {
 
 .md-header__topic{
     padding: 0 30px;
-    background-color: var(--md-primary-fg-color--light); 
+    background-color: var(--md-primary-fg-color--light);
 }
 
 .md-header__topic *,
@@ -136,20 +170,26 @@ strong {
     height: 36px;
     width: 125px;
 }
-
+/* navbar */
+.md-nav__link--active{
+    background-color: var(--tud-blue-10);
+    color: inherit;
+    border-radius: 5px;
+    padding: 0.2em 0.5em;
+}
 /* toc */
 /* operation-status */
 .operation-status-logo {
     width: 0.5rem;
 }
-    
+
 hr.solid {
     border-top: 2px solid #bbb;
 }
 
 p {
     padding: 0 0.6rem;
-	margin: 0.2em;	
+	margin: 0.2em;
 }
 /* main */
 
diff --git a/doc.zih.tu-dresden.de/util/check-spelling.sh b/doc.zih.tu-dresden.de/util/check-spelling.sh
index 0d574c1e6adeadacb895f31209b16a9d7f25a123..d97f93e20df73b9ea47e501e7196f605f0cacd48 100755
--- a/doc.zih.tu-dresden.de/util/check-spelling.sh
+++ b/doc.zih.tu-dresden.de/util/check-spelling.sh
@@ -7,7 +7,7 @@ basedir=`dirname "$scriptpath"`
 basedir=`dirname "$basedir"`
 wordlistfile=$(realpath $basedir/wordlist.aspell)
 branch="origin/${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-preview}"
-files_to_skip=(doc.zih.tu-dresden.de/docs/accessibility.md doc.zih.tu-dresden.de/docs/data_protection_declaration.md data_protection_declaration.md)
+files_to_skip=(doc.zih.tu-dresden.de/docs/accessibility.md doc.zih.tu-dresden.de/docs/data_protection_declaration.md doc.zih.tu-dresden.de/docs/legal_notice.md doc.zih.tu-dresden.de/docs/access/key_fingerprints.md)
 aspellmode=
 if aspell dump modes | grep -q markdown; then
   aspellmode="--mode=markdown"
diff --git a/doc.zih.tu-dresden.de/util/check-templated-code-snippets.py b/doc.zih.tu-dresden.de/util/check-templated-code-snippets.py
new file mode 100755
index 0000000000000000000000000000000000000000..51829691c01c3b214adb784e6fd5b033391e0e40
--- /dev/null
+++ b/doc.zih.tu-dresden.de/util/check-templated-code-snippets.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+#-*- coding: utf-8 -*-
+import re
+import sys
+
+def escapeSomeSigns(someString):
+    return someString.replace("$", "\\$").replace("(", "\\(").replace(")", "\\)").replace("*", "\\*")
+
+fileName = sys.argv[1]
+print("FILE: " + fileName)
+lines = []
+NORMAL_MODE = 0
+CODE_MODE = 1
+readMode = NORMAL_MODE
+#We need to avoid matches for "#include <iostream>", "<Ctrl+D>", "<-" (typically in R) and "<594>" (VampirServer)
+pattern = re.compile(r"(?<!#include )<(?!Ctrl\+)[^0-9 -][^<>']*>")
+with open(fileName) as f:
+    lineNumber = 1
+    for line in f:
+        if "```" in line:
+            # toggle read mode if we find a line with ```, so that we know that we are in a code block or not
+            readMode = CODE_MODE if readMode == NORMAL_MODE else NORMAL_MODE
+        strippedLine = line.strip()
+        # We want tuples with lineNumber, the line itself, whether it is a code line, whether it contains a template (e. g. <FILENAME>) and the line again with all templats replaced by '\\S'
+        lines.append((lineNumber, strippedLine, readMode, pattern.search(strippedLine) != None, pattern.sub(r"\\S*", escapeSomeSigns(strippedLine))))
+        lineNumber += 1
+# those tuples with the CODE_MODE as field 2 represent code lines
+codeLines = list(filter(lambda line: line[2] == CODE_MODE, lines))
+# we take line number, the line and a regular expression from the those code lines which contain a template, call them templatedLines
+templatedLines = list(map(lambda line: (line[0], line[1], re.compile(line[4])), filter(lambda line: line[3], codeLines)))
+allPatternsFound = True
+for templateLine in templatedLines:
+    # find code lines which have a higher line number than the templateLine, contain no template themselves and match the pattern of the templateLine
+    matchingCodeLines = list(filter(lambda line: (line[0] > templateLine[0]) and (not line[3]) and (templateLine[2].match(line[1]) != None), codeLines))
+    if len(matchingCodeLines) == 0:
+        allPatternsFound = False
+        print("  Example for \"" + templateLine[1] + "\" (Line " + str(templateLine[0]) + ") missing")
+
+if not allPatternsFound:
+    sys.exit(1)
diff --git a/doc.zih.tu-dresden.de/util/check-templated-code-snippets.sh b/doc.zih.tu-dresden.de/util/check-templated-code-snippets.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2eecc039b7a3611eeef9b4b264bc0185c6fcad39
--- /dev/null
+++ b/doc.zih.tu-dresden.de/util/check-templated-code-snippets.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+set -eo pipefail
+
+scriptpath=${BASH_SOURCE[0]}
+basedir=`dirname "$scriptpath"`
+pythonscript="$basedir/check-templated-code-snippets.py"
+basedir=`dirname "$basedir"`
+
+function usage() {
+  cat <<-EOF
+usage: $0 [file | -a]
+Search for code snippets that use templates but do not give examples.
+If file is given, outputs all lines where no example could be found.
+If parameter -a (or --all) is given instead of the file, checks all markdown files.
+Otherwise, checks whether any changed file contains code snippets with templates without examples.
+EOF
+}
+
+branch="origin/${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-preview}"
+
+# Options
+if [ $# -eq 1 ]; then
+  case $1 in
+  help | -help | --help)
+    usage
+    exit
+  ;;
+  -a | --all)
+    echo "Search in all markdown files."
+    files=$(git ls-tree --full-tree -r --name-only HEAD $basedir/ | grep .md)
+  ;;
+  *)
+    files="$1"
+  ;;
+  esac
+elif [ $# -eq 0 ]; then
+  echo "Search in git-changed files."
+  files=`git diff --name-only "$(git merge-base HEAD "$branch")" | grep .md || true`
+else
+  usage
+fi
+
+all_ok=''
+for f in $files; do
+if ! $pythonscript $f; then
+all_ok='no'
+fi
+done
+
+if [ -z "$all_ok" ]; then
+echo "Success!"
+else
+echo "Fail!"
+exit 1
+fi
diff --git a/doc.zih.tu-dresden.de/util/create-issues-all-pages.py b/doc.zih.tu-dresden.de/util/create-issues-all-pages.py
new file mode 100755
index 0000000000000000000000000000000000000000..87975e2c1f89f6c7373004ddace0a265f908dc9d
--- /dev/null
+++ b/doc.zih.tu-dresden.de/util/create-issues-all-pages.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+#-*- coding: utf-8 -*-
+
+
+import yaml
+
+
+"""
+    Create issue for every page of the compendium.
+
+    Import issues from csv file in GitLab:
+    It must have a header row and at least two columns:
+    the first column is the issue title and the second column is the issue description.
+    The separator is automatically detected. The maximum file size allowed is 100 MB.
+"""
+
+def extract_filename(d):
+    l = []
+    for j in d:
+        for key, value in j.items():
+            if isinstance(value, list):
+                l += extract_filename(value)
+            else:
+                l.append(value)
+    return l
+
+# Read in
+config = yaml.safe_load(open('../mkdocs.yml'))
+
+# Process
+l = extract_filename(config['nav'])
+
+# Output
+of = "issues.csv"
+with open(of, "w") as f:
+    f.write("Title, Description\n")
+    for i in l:
+        if "archive" in i:
+            continue
+        j = i.split("/")
+        t1 = j[0].upper() + " " + j[-1][0:-3]
+        f.write(f"{t1}, {i}\n")
+
+print(f"{of}" written)
diff --git a/doc.zih.tu-dresden.de/util/create-issues-all-pages.sh b/doc.zih.tu-dresden.de/util/create-issues-all-pages.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0d41c7bd1c8446c252dd53818a02a1c6976dae7a
--- /dev/null
+++ b/doc.zih.tu-dresden.de/util/create-issues-all-pages.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -eo pipefail
+
+scriptpath=${BASH_SOURCE[0]}
+basedir=`dirname "$scriptpath"`
+basedir=`dirname "$basedir"`
+
+files=$(git ls-tree --full-tree -r --name-only HEAD $basedir/ | grep '\.md$' | grep -v '/archive/' || true)
+
+description=""
+for f in $files; do
+description="$description- [ ] $f
+"
+done
+
+curl --request POST --header "PRIVATE-TOKEN: $SCHEDULED_PAGE_CHECK_PAT" --form 'title="Regular check of all pages"' --form "description=\"$description\"" --form "labels=Bot" https://gitlab.hrz.tu-chemnitz.de/api/v4/projects/${CI_PROJECT_ID}/issues
diff --git a/doc.zih.tu-dresden.de/util/download-newest-mermaid.js.sh b/doc.zih.tu-dresden.de/util/download-newest-mermaid.js.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9986ad6f49e2e739f8a53d7911f4e346196d21a4
--- /dev/null
+++ b/doc.zih.tu-dresden.de/util/download-newest-mermaid.js.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -euo pipefail
+
+scriptpath=${BASH_SOURCE[0]}
+basedir=`dirname "$scriptpath"`
+basedir=`dirname "$basedir"`
+cd $basedir/tud_theme/javascripts
+wget https://unpkg.com/mermaid/dist/mermaid.min.js
diff --git a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
index f3cfa673ce063a674cb2f850d7f7da252a6ab093..cacde0d9ee84f903a55d3109dcd330d3e43184ad 100755
--- a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
+++ b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
@@ -40,15 +40,27 @@ i	\<todo\>	<!--.*todo.*-->
 Replace variations of \"Coming soon\" with real content.
 
 i	\(\<coming soon\>\|This .* under construction\|posted here\)
+Add table column headers.
+
+i	^[ |]*|$
 Avoid spaces at end of lines.
 doc.zih.tu-dresden.de/docs/accessibility.md
 i	[[:space:]]$
+When referencing projects, please use p_number_crunch for consistency.
+
+i	\<p_	p_number_crunch
+Avoid \`home\`. Use home without backticks instead.
+
+i	\`home\`
+Internal links should not contain \"/#\".
+
+i	(.*/#.*)	(http
 When referencing partitions, put keyword \"partition\" in front of partition name, e. g. \"partition ml\", not \"ml partition\".
 doc.zih.tu-dresden.de/docs/contrib/content_rules.md
-i	\(alpha\|ml\|haswell\|romeo\|gpu\|smp\|julia\|hpdlf\|scs5\|dcv\)-\?\(interactive\)\?[^a-z]*partition
+i	\(alpha\|ml\|haswell\|romeo\|gpu\|smp\|julia\|hpdlf\|scs5\|dcv\)-\?\(interactive\)\?[^a-z|]*partition
 Give hints in the link text. Words such as \"here\" or \"this link\" are meaningless.
 doc.zih.tu-dresden.de/docs/contrib/content_rules.md
-i	\[\s\?\(documentation\|here\|more info\|this \(link\|page\|subsection\)\|slides\?\|manpage\)\s\?\]
+i	\[\s\?\(documentation\|here\|more info\|\(this \)\?\(link\|page\|subsection\)\|slides\?\|manpage\)\s\?\]
 Use \"workspace\" instead of \"work space\" or \"work-space\".
 doc.zih.tu-dresden.de/docs/contrib/content_rules.md
 i	work[ -]\+space"
@@ -74,7 +86,8 @@ function checkFile(){
     fi
     IFS=$'\t' read -r flags pattern exceptionPatterns
     while IFS=$'\t' read -r -a exceptionPatternsArray; do
-      if [ $silent = false ]; then
+      #Prevent patterns from being printed when the script is invoked with default arguments.
+      if [ $verbose = true ]; then
         echo "  Pattern: $pattern$skipping"
       fi
       if [ -z "$skipping" ]; then
@@ -87,6 +100,7 @@ function checkFile(){
         if grep -n $grepflag $color "$pattern" "$f" | grepExceptions "${exceptionPatternsArray[@]}" ; then
           number_of_matches=`grep -n $grepflag $color "$pattern" "$f" | grepExceptions "${exceptionPatternsArray[@]}" | wc -l`
           ((cnt=cnt+$number_of_matches))
+          #prevent messages when silent=true, only files, pattern matches and the summary are printed
           if [ $silent = false ]; then
             echo "    $message"
           fi
@@ -97,23 +111,29 @@ function checkFile(){
 }
 
 function usage () {
-  echo "$0 [options]"
-  echo "Search forbidden patterns in markdown files."
-  echo ""
-  echo "Options:"
-  echo "  -a     Search in all markdown files (default: git-changed files)" 
-  echo "  -f     Search in a specific markdown file" 
-  echo "  -s     Silent mode"
-  echo "  -h     Show help message"
-  echo "  -c     Show git matches in color"
+cat <<EOF
+$0 [options]
+Search forbidden patterns in markdown files.
+
+Options:
+  -a    Search in all markdown files (default: git-changed files)
+  -f    Search in a specific markdown file
+  -s    Silent mode
+  -h    Show help message
+  -c    Show git matches in color
+  -v    verbose mode
+EOF
 }
 
 # Options
 all_files=false
+#if silent=true: avoid printing of messages
 silent=false
+#if verbose=true: print files first and the pattern that is checked
+verbose=false
 file=""
 color=""
-while getopts ":ahsf:c" option; do
+while getopts ":ahsf:cv" option; do
  case $option in
    a)
      all_files=true
@@ -128,6 +148,9 @@ while getopts ":ahsf:c" option; do
    c)
      color=" --color=always "
      ;;
+   v)
+     verbose=true
+     ;;
    h)
      usage
      exit;;
@@ -150,7 +173,10 @@ else
   files=`git diff --name-only "$(git merge-base HEAD "$branch")"`
 fi
 
+#Prevent files from being printed when the script is invoked with default arguments.
+if [ $verbose = true ]; then
 echo "... $files ..."
+fi
 cnt=0
 if [[ ! -z $file ]]; then
   checkFile $file
diff --git a/doc.zih.tu-dresden.de/util/pre-commit b/doc.zih.tu-dresden.de/util/pre-commit
index 1cc901e00efbece94209bfa6c4bbbc54aad682e9..cd91996b696ba94f095cec86b0be9b45fdc560a0 100755
--- a/doc.zih.tu-dresden.de/util/pre-commit
+++ b/doc.zih.tu-dresden.de/util/pre-commit
@@ -82,6 +82,13 @@ then
   exit_ok=no
 fi
 
+echo "Looking for files with templates but without examples..."
+docker run --name=hpc-compendium --rm -w /docs --mount src="$(pwd)",target=/docs,type=bind hpc-compendium ./doc.zih.tu-dresden.de/util/check-templated-code-snippets.sh
+if [ $? -ne 0 ]
+then
+  exit_ok=no
+fi
+
 if [ $exit_ok == yes ]
 then
   exit 0
diff --git a/doc.zih.tu-dresden.de/wordlist.aspell b/doc.zih.tu-dresden.de/wordlist.aspell
index 8bcd6a7c24872843e665bc7fc1ed91241284c780..e158b9da24ea5b378352f9aec4c96d681c2cf5c8 100644
--- a/doc.zih.tu-dresden.de/wordlist.aspell
+++ b/doc.zih.tu-dresden.de/wordlist.aspell
@@ -1,11 +1,13 @@
-personal_ws-1.1 en 203
+personal_ws-1.1 en 429
 Abaqus
+Addon
+Addons
 ALLREDUCE
 Altix
 Amber
 Amdahl's
-analytics
 Analytics
+analytics
 anonymized
 Ansys
 APIs
@@ -13,7 +15,9 @@ AVX
 awk
 BeeGFS
 benchmarking
+BESM
 BLAS
+BMC
 broadwell
 bsub
 bullx
@@ -30,26 +34,30 @@ CLI
 CMake
 COMSOL
 conda
-config
 CONFIG
-cpu
+config
 CPU
+cpu
 CPUID
-cpus
 CPUs
+cpus
 crossentropy
 css
 CSV
 CUDA
 cuDNN
 CXFS
-dask
 Dask
-dataframes
+dask
 DataFrames
-datamover
+dataframes
+Dataheap
+Datamover
 DataParallel
 dataset
+Dataset
+datasets
+Datasets
 DCV
 ddl
 DDP
@@ -63,6 +71,7 @@ Dockerfile
 Dockerfiles
 DockerHub
 dockerized
+DOI
 dotfile
 dotfiles
 downtime
@@ -75,6 +84,7 @@ engl
 english
 env
 EPYC
+ESER
 Espresso
 ESSL
 facto
@@ -83,8 +93,9 @@ FFT
 FFTW
 filesystem
 filesystems
-flink
 Flink
+flink
+FlinkExample
 FMA
 foreach
 Fortran
@@ -94,6 +105,7 @@ Gaussian
 GBit
 GDB
 GDDR
+GFlop
 GFLOPS
 gfortran
 GiB
@@ -104,29 +116,33 @@ GitLab's
 glibc
 Gloo
 gnuplot
-gpu
 GPU
+gpu
 GPUs
 gres
 GROMACS
 GUIs
+Hackenberg
 hadoop
 haswell
 HBM
+HDEEM
 HDF
 HDFS
 HDFView
 hiera
-horovod
+Hochleistungsrechner
 Horovod
+horovod
 horovodrun
 hostname
 Hostnames
-hpc
 HPC
+hpc
 hpcsupport
 HPE
 HPL
+HRSK
 html
 hvd
 hyperparameter
@@ -134,6 +150,7 @@ hyperparameters
 hyperthreading
 icc
 icpc
+iDataPlex
 ifort
 ImageNet
 img
@@ -143,16 +160,21 @@ inode
 Instrumenter
 IOPS
 IPs
+ipynb
+ipython
+IPython
 ISA
 Itanium
 jobqueue
 jpg
 jss
-jupyter
 Jupyter
+jupyter
 JupyterHub
 JupyterLab
+Jupytext
 Keras
+kernelspec
 KNL
 Kunststofftechnik
 LAMMPS
@@ -167,14 +189,17 @@ LoadLeveler
 localhost
 lsf
 lustre
+macOS
 markdownlint
 Mathematica
 MathKernel
 MathWorks
-matlab
+MATLAB
+Matplotlib
 MEGWARE
 mem
 Memcheck
+MFlop
 MiB
 Microarchitecture
 MIMD
@@ -187,9 +212,11 @@ modenv
 modenvs
 modulefile
 Montecito
+mortem
+Mortem
 mountpoint
-mpi
 Mpi
+mpi
 mpicc
 mpiCC
 mpicxx
@@ -197,8 +224,8 @@ mpif
 mpifort
 mpirun
 multicore
-multiphysics
 Multiphysics
+multiphysics
 multithreaded
 Multithreading
 NAMD
@@ -206,24 +233,31 @@ natively
 nbgitpuller
 nbsp
 NCCL
+NEC
 Neptun
 NFS
 NGC
-nodelist
 NODELIST
+nodelist
 NRINGS
+Nsight
 ntasks
 NUM
 NUMA
 NUMAlink
 NumPy
 Nutzungsbedingungen
+nvcc
 Nvidia
+NVIDIA
 NVLINK
 NVMe
+nvprof
+Nvprof
 NWChem
 OME
 OmniOpt
+OpARA
 OPARI
 OpenACC
 OpenBLAS
@@ -234,6 +268,7 @@ OpenMPI
 openmpi
 OpenSSH
 Opteron
+OST
 OTF
 overfitting
 Pandarallel
@@ -244,10 +279,11 @@ parallelize
 parallelized
 parfor
 pdf
-perf
 Perf
+perf
 performant
 PESSL
+PFlop
 PGI
 PiB
 Pika
@@ -258,11 +294,15 @@ PowerAI
 ppc
 Pre
 pre
+preload
 Preload
 preloaded
 preloading
 prepend
 preprocessing
+profiler
+Profiler
+profiler's
 PSOCK
 Pthread
 Pthreads
@@ -270,19 +310,20 @@ pty
 PuTTY
 pymdownx
 PythonAnaconda
-pytorch
 PyTorch
+pytorch
 Quantum
 queue
-quickstart
 Quickstart
+quickstart
 randint
 reachability
 README
+Rechenautomat
 reproducibility
 requeueing
-resnet
 ResNet
+resnet
 RHEL
 Rmpi
 rome
@@ -290,6 +331,7 @@ romeo
 RSA
 RSS
 RStudio
+rsync
 Rsync
 runnable
 Runtime
@@ -305,25 +347,34 @@ scalable
 ScaLAPACK
 Scalasca
 scancel
+Schnupperaccount
 Scikit
 SciPy
 scontrol
 scp
 scs
+SDK
+sftp
 SFTP
 SGEMM
 SGI
 SHA
 SHMEM
+situ
 SLES
 Slurm
 SLURMCluster
 SMP
 SMT
+SparkExample
+spawner
+Speicherkomplex
 spython
 squeue
 srun
 ssd
+ssh
+SSH
 SSHFS
 STAR
 stderr
@@ -337,8 +388,9 @@ SXM
 TBB
 TCP
 TensorBoard
-tensorflow
 TensorFlow
+tensorflow
+TFlop
 TFLOPS
 Theano
 tmp
@@ -346,16 +398,18 @@ ToDo
 todo
 toolchain
 toolchains
-torchvision
 Torchvision
+torchvision
 tracefile
 tracefiles
 tracepoints
 transferability
 Trition
+und
 undistinguishable
 unencrypted
 uplink
+urlpath
 userspace
 Valgrind
 Vampir
@@ -369,9 +423,13 @@ virtualenv
 VirtualGL
 VMs
 VMSize
+VNC
 VPN
+VRs
+walltime
 WebVNC
 WinSCP
+WML
 Workdir
 workspace
 workspaces
@@ -382,6 +440,8 @@ XLC
 XLF
 Xming
 yaml
+Zeiss
 ZIH
-zih
 ZIH's
+ZRA
+ZSH