diff --git a/Dockerfile b/Dockerfile
index 57490c2509a22302ba13ed4bd05d32f0d7b0fb51..b272bf553212534167e23e083d4a0c088700a025 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,7 @@
 FROM python:3.8-bullseye
 
+SHELL ["/bin/bash", "-c"]
+
 ########
 # Base #
 ########
@@ -14,6 +16,19 @@ RUN apt update && apt install -y nodejs npm aspell git
 
 RUN npm install -g markdownlint-cli markdown-link-check
 
+###########################################
+# prepare git for automatic merging in CI #
+###########################################
+RUN git config --global user.name 'Gitlab Bot'
+RUN git config --global user.email 'hpcsupport@zih.tu-dresden.de'
+
+RUN mkdir -p ~/.ssh
+
+#see output of `ssh-keyscan gitlab.hrz.tu-chemnitz.de`
+RUN echo $'# gitlab.hrz.tu-chemnitz.de:22 SSH-2.0-OpenSSH_7.4\n\
+gitlab.hrz.tu-chemnitz.de ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNixJ1syD506jOtiLPxGhAXsNnVfweFfzseh9/WrNxbTgIhi09fLb5aZI2CfOOWIi4fQz07S+qGugChBs4lJenLYAu4b0IAnEv/n/Xnf7wITf/Wlba2VSKiXdDqbSmNbOQtbdBLNu1NSt+inFgrreaUxnIqvWX4pBDEEGBAgG9e2cteXjT/dHp4+vPExKEjM6Nsxw516Cqv5H1ZU7XUTHFUYQr0DoulykDoXU1i3odJqZFZQzcJQv/RrEzya/2bwaatzKfbgoZLlb18T2LjkP74b71DeFIQWV2e6e3vsNwl1NsvlInEcsSZB1TZP+mKke7JWiI6HW2IrlSaGqM8n4h\n\
+gitlab.hrz.tu-chemnitz.de ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ/cSNsKRPrfXCMjl+HsKrnrI3HgbCyKWiRa715S99BR\n' > ~/.ssh/known_hosts
+
 WORKDIR /docs
 
 CMD ["mkdocs", "build", "--verbose", "--strict"]
diff --git a/doc.zih.tu-dresden.de/README.md b/doc.zih.tu-dresden.de/README.md
index 57cb9a23f94bc38d004049816873cd3105d618d6..bf1b82f52a145f959068fa063d9dbdf31fb2eae3 100644
--- a/doc.zih.tu-dresden.de/README.md
+++ b/doc.zih.tu-dresden.de/README.md
@@ -11,7 +11,7 @@ long describing complex steps, contributing is quite easy - trust us.
 Users can contribute to the documentation via the
 [issue tracking system](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/issues).
 For that, open an issue to report typos and missing documentation or request for more precise
-wording etc.  ZIH staff will get in touch with you to resolve the issue and improve the
+wording etc. ZIH staff will get in touch with you to resolve the issue and improve the
 documentation.
 
 **Reminder:** Non-documentation issues and requests need to be send as ticket to
@@ -41,8 +41,6 @@ Now, create a local clone of your fork
 #### Install Dependencies
 
 See [Installation with Docker](#preview-using-mkdocs-with-dockerfile).
-**TODO:** virtual environment
-**TODO:** What we need for markdownlinter and checks?
 
 <!--- All branches are protected, i.e., only ZIH staff can create branches and push to them --->
 
@@ -107,7 +105,7 @@ Open `http://127.0.0.1:8000` with a web browser to preview the local copy of the
 
 You can also use `docker` to build a container from the `Dockerfile`, if you are familiar with it.
 This may take a while, as mkdocs and other necessary software needs to be downloaded.
-Building a container with the documentation inside could be done with the following steps:
+Building a container could be done with the following steps:
 
 ```Bash
 cd /PATH/TO/hpc-compendium
@@ -137,7 +135,7 @@ echo http://$(docker inspect -f "{{.NetworkSettings.IPAddress}}" $(docker ps -qf
 ```
 
 The running container automatically takes care of file changes and rebuilds the
-documentation.  If you want to check whether the markdown files are formatted
+documentation. If you want to check whether the markdown files are formatted
 properly, use the following command:
 
 ```Bash
@@ -247,7 +245,8 @@ There are two important branches in this repository:
 - Preview:
   - Branch containing recent changes which will be soon merged to main branch (protected
     branch)
-  - Served at [todo url](todo url) from TUD VPN
+  - Served at [https://doc.zih.tu-dresden.de/preview](https://doc.zih.tu-dresden.de/preview) from
+    TUD-ZIH VPN
 - Main: Branch which is deployed at [https://doc.zih.tu-dresden.de](https://doc.zih.tu-dresden.de)
     holding the current documentation (protected branch)
 
diff --git a/doc.zih.tu-dresden.de/docs/access/desktop_cloud_visualization.md b/doc.zih.tu-dresden.de/docs/access/desktop_cloud_visualization.md
index b9c0d1cd8f894c6944b52daa07fa09c772c73dc0..7395aad287f5c197ae8ba639491c493e87f2ffe9 100644
--- a/doc.zih.tu-dresden.de/docs/access/desktop_cloud_visualization.md
+++ b/doc.zih.tu-dresden.de/docs/access/desktop_cloud_visualization.md
@@ -11,7 +11,7 @@ if you want to know whether your browser is supported by DCV.
 
 **Check out our new documentation about** [Virtual Desktops](../software/virtual_desktops.md).
 
-To start a JupyterHub session on the dcv partition (taurusi210\[4-8\]) with one GPU, six CPU cores
+To start a JupyterHub session on the partition `dcv` (`taurusi210[4-8]`) with one GPU, six CPU cores
 and 2583 MB memory per core, click on:
 [https://taurus.hrsk.tu-dresden.de/jupyter/hub/spawn#/~(partition~'dcv~cpuspertask~'6~gres~'gpu*3a1~mempercpu~'2583~environment~'production)](https://taurus.hrsk.tu-dresden.de/jupyter/hub/spawn#/~(partition~'dcv~cpuspertask~'6~gres~'gpu*3a1~mempercpu~'2583~environment~'production))
 Optionally, you can modify many different Slurm parameters. For this
diff --git a/doc.zih.tu-dresden.de/docs/access/graphical_applications_with_webvnc.md b/doc.zih.tu-dresden.de/docs/access/graphical_applications_with_webvnc.md
index 6837ace6473f9532e608778ec96049394b4c4494..c652738dc859beecf3dc9669fdde684dc49d04f3 100644
--- a/doc.zih.tu-dresden.de/docs/access/graphical_applications_with_webvnc.md
+++ b/doc.zih.tu-dresden.de/docs/access/graphical_applications_with_webvnc.md
@@ -38,7 +38,7 @@ marie@login$ srun --pty --partition=interactive --mem-per-cpu=2500 --cpus-per-ta
 [...]
 ```
 
-Of course, you can adjust the batch job parameters to your liking. Note that the default timelimit
+Of course, you can adjust the batch job parameters to your liking. Note that the default time limit
 in partition `interactive` is only 30 minutes, so you should specify a longer one with `--time` (or `-t`).
 
 The script will automatically generate a self-signed SSL certificate and place it in your home
diff --git a/doc.zih.tu-dresden.de/docs/access/jupyterhub.md b/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
index b6b0f25d3963da0529f26274a3daf4bdfcb0bbe0..f9a916195ecbf814cf426beb4d26885500b3b3de 100644
--- a/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
+++ b/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
@@ -1,7 +1,7 @@
 # JupyterHub
 
 With our JupyterHub service we offer you a quick and easy way to work with Jupyter notebooks on ZIH
-systems. This page covers starting and stopping JuperterHub sessions, error handling and customizing
+systems. This page covers starting and stopping JupyterHub sessions, error handling and customizing
 the environment.
 
 We also provide a comprehensive documentation on how to use
@@ -21,7 +21,8 @@ cannot give extensive support in every case.
 
 !!! note
     This service is only available for users with an active HPC project.
-    See [here](../access/overview.md) how to apply for an HPC project.
+    See [Application for Login and Resources](../application/overview.md), if you need to apply for
+    an HPC project.
 
 JupyterHub is available at
 [https://taurus.hrsk.tu-dresden.de/jupyter](https://taurus.hrsk.tu-dresden.de/jupyter).
@@ -100,7 +101,7 @@ running the code. We currently offer one for Python, C++, MATLAB and R.
 
 ## Stop a Session
 
-It is good practise to stop your session once your work is done. This releases resources for other
+It is good practice to stop your session once your work is done. This releases resources for other
 users and your quota is less charged. If you just log out or close the window, your server continues
 running and **will not stop** until the Slurm job runtime hits the limit (usually 8 hours).
 
@@ -147,8 +148,8 @@ Useful pages for valid batch system parameters:
 
 If the connection to your notebook server unexpectedly breaks, you will get this error message.
 Sometimes your notebook server might hit a batch system or hardware limit and gets killed. Then
-usually the logfile of the corresponding batch job might contain useful information. These logfiles
-are located in your `home` directory and have the name `jupyter-session-<jobid>.log`.
+usually the log file of the corresponding batch job might contain useful information. These log
+files are located in your `home` directory and have the name `jupyter-session-<jobid>.log`.
 
 ## Advanced Tips
 
@@ -309,4 +310,4 @@ You can switch kernels of existing notebooks in the kernel menu:
 You have now the option to preload modules from the [module system](../software/modules.md).
 Select multiple modules that will be preloaded before your notebook server starts. The list of
 available modules depends on the module environment you want to start the session in (`scs5` or
-`ml`).  The right module environment will be chosen by your selected partition.
+`ml`). The right module environment will be chosen by your selected partition.
diff --git a/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md b/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md
index 92ad16d1325173c384c7472658239baca3e26157..797d9fc8e455b14e40a5ec7f3737874b2ac500ae 100644
--- a/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md
+++ b/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md
@@ -1,7 +1,7 @@
 # JupyterHub for Teaching
 
-On this page we want to introduce to you some useful features if you
-want to use JupyterHub for teaching.
+On this page, we want to introduce to you some useful features if you want to use JupyterHub for
+teaching.
 
 !!! note
 
@@ -9,23 +9,21 @@ want to use JupyterHub for teaching.
 
 Please be aware of the following notes:
 
-- ZIH systems operate at a lower availability level than your usual Enterprise Cloud VM. There
-  can always be downtimes, e.g. of the filesystems or the batch system.
+- ZIH systems operate at a lower availability level than your usual Enterprise Cloud VM. There can
+  always be downtimes, e.g. of the filesystems or the batch system.
 - Scheduled downtimes are announced by email. Please plan your courses accordingly.
 - Access to HPC resources is handled through projects. See your course as a project. Projects need
   to be registered beforehand (more info on the page [Access](../application/overview.md)).
 - Don't forget to [add your users](../application/project_management.md#manage-project-members-dis-enable)
-  (eg. students or tutors) to your project.
+  (e.g. students or tutors) to your project.
 - It might be a good idea to [request a reservation](../jobs_and_resources/overview.md#exclusive-reservation-of-hardware)
-  of part of the compute resources for your project/course to
-  avoid unnecessary waiting times in the batch system queue.
+  of part of the compute resources for your project/course to avoid unnecessary waiting times in
+  the batch system queue.
 
 ## Clone a Repository With a Link
 
-This feature bases on
-[nbgitpuller](https://github.com/jupyterhub/nbgitpuller).
-Documentation can be found at
-[this page](https://jupyterhub.github.io/nbgitpuller/).
+This feature bases on [nbgitpuller](https://github.com/jupyterhub/nbgitpuller). Further information
+can be found in the [external documentation about nbgitpuller](https://jupyterhub.github.io/nbgitpuller/).
 
 This extension for Jupyter notebooks can clone every public git repository into the users work
 directory. It's offering a quick way to distribute notebooks and other material to your students.
@@ -50,14 +48,14 @@ The following parameters are available:
 |---|---|
 |`repo`    | path to git repository|
 |`branch`  | branch in the repository to pull from default: `master`|
-|`urlpath` | URL to redirect the user to a certain file [more info](https://jupyterhub.github.io/nbgitpuller/topic/url-options.html#urlpath)|
+|`urlpath` | URL to redirect the user to a certain file, [more info about parameter urlpath](https://jupyterhub.github.io/nbgitpuller/topic/url-options.html#urlpath)|
 |`depth`   | clone only a certain amount of latest commits not recommended|
 
 This [link
 generator](https://jupyterhub.github.io/nbgitpuller/link?hub=https://taurus.hrsk.tu-dresden.de/jupyter/)
 might help creating those links
 
-## Spawner Options Passthrough with URL Parameters
+## Spawn Options Pass-through with URL Parameters
 
 The spawn form now offers a quick start mode by passing URL parameters.
 
diff --git a/doc.zih.tu-dresden.de/docs/access/ssh_login.md b/doc.zih.tu-dresden.de/docs/access/ssh_login.md
index 69dc79576910d37b001aaaff4cfc43c8ab583b18..60e24a0f3fdcc479a34f477864944025193b0f57 100644
--- a/doc.zih.tu-dresden.de/docs/access/ssh_login.md
+++ b/doc.zih.tu-dresden.de/docs/access/ssh_login.md
@@ -9,7 +9,7 @@ connection to enter the campus network. While active, it allows the user to conn
 HPC login nodes.
 
 For more information on our VPN and how to set it up, please visit the corresponding
-[ZIH service catalogue page](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/zugang_datennetz/vpn).
+[ZIH service catalog page](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/zugang_datennetz/vpn).
 
 ## Connecting from Linux
 
diff --git a/doc.zih.tu-dresden.de/docs/application/project_request_form.md b/doc.zih.tu-dresden.de/docs/application/project_request_form.md
index b5b9e348a94c4178d382e5ca27d67047c06f1481..e829f316cb26f11b9b9048a889c8b5e918b2e870 100644
--- a/doc.zih.tu-dresden.de/docs/application/project_request_form.md
+++ b/doc.zih.tu-dresden.de/docs/application/project_request_form.md
@@ -36,15 +36,16 @@ Any project have:
 ## Third step: Hardware
 
 ![picture 4: Hardware >](misc/request_step3_machines.png "Hardware"){loading=lazy width=300 style="float:right"}
-This step inquire the required hardware. You can find the specifications
-[here](../jobs_and_resources/hardware_overview.md).
+This step inquire the required hardware. The
+[hardware specifications](../jobs_and_resources/hardware_overview.md) might help you to estimate,
+e. g. the compute time.
 
-Please fill in the total computing time you expect in the project runtime.  The compute time is
+Please fill in the total computing time you expect in the project runtime. The compute time is
 given in cores per hour (CPU/h), this refers to the 'virtual' cores for nodes with hyperthreading.
-If they require GPUs, then this is given as GPU units per hour (GPU/h).  Please add 6 CPU hours per
+If they require GPUs, then this is given as GPU units per hour (GPU/h). Please add 6 CPU hours per
 GPU hour in your application.
 
-The project home is a shared storage in your project.  Here you exchange data or install software
+The project home is a shared storage in your project. Here you exchange data or install software
 for your project group in userspace. The directory is not intended for active calculations, for this
 the scratch is available.
 
diff --git a/doc.zih.tu-dresden.de/docs/archive/no_ib_jobs.md b/doc.zih.tu-dresden.de/docs/archive/no_ib_jobs.md
index 9ccce6361bcaa0bc024644f348708354d269a04f..49007a12354190a0fdde97a14a1a6bda922ea38d 100644
--- a/doc.zih.tu-dresden.de/docs/archive/no_ib_jobs.md
+++ b/doc.zih.tu-dresden.de/docs/archive/no_ib_jobs.md
@@ -25,8 +25,8 @@ Infiniband access if (and only if) they have set the `--tmp`-option as well:
 >units can be specified using the suffix \[K\|M\|G\|T\]. This option
 >applies to job allocations.
 
-Keep in mind: Since the scratch file system are not available and the
-project file system is read-only mounted at the compute nodes you have
+Keep in mind: Since the scratch filesystem are not available and the
+project filesystem is read-only mounted at the compute nodes you have
 to work in /tmp.
 
 A simple job script should do this:
@@ -34,7 +34,7 @@ A simple job script should do this:
 - create a temporary directory on the compute node in `/tmp` and go
   there
 - start the application (under /sw/ or /projects/)using input data
-  from somewhere in the project file system
+  from somewhere in the project filesystem
 - archive and transfer the results to some global location
 
 ```Bash
diff --git a/doc.zih.tu-dresden.de/docs/archive/system_altix.md b/doc.zih.tu-dresden.de/docs/archive/system_altix.md
index 951b06137a599fc95239e5d50144fd2fa205e096..aa61353f4bec0c143b7c86892d8f3cb0a3c41d00 100644
--- a/doc.zih.tu-dresden.de/docs/archive/system_altix.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_altix.md
@@ -22,9 +22,9 @@ The jobs for these partitions (except Neptun) are scheduled by the [Platform LSF
 batch system running on `mars.hrsk.tu-dresden.de`. The actual placement of a submitted job may
 depend on factors like memory size, number of processors, time limit.
 
-### File Systems
+### Filesystems
 
-All partitions share the same CXFS file systems `/work` and `/fastfs`.
+All partitions share the same CXFS filesystems `/work` and `/fastfs`.
 
 ### ccNUMA Architecture
 
@@ -123,8 +123,8 @@ nodes with dedicated resources for the user's job. Normally a job can be submitt
 
 #### LSF
 
-The batch system on Atlas is LSF. For general information on LSF, please follow
-[this link](platform_lsf.md).
+The batch system on Atlas is LSF, see also the
+[general information on LSF](platform_lsf.md).
 
 #### Submission of Parallel Jobs
 
diff --git a/doc.zih.tu-dresden.de/docs/archive/system_atlas.md b/doc.zih.tu-dresden.de/docs/archive/system_atlas.md
index 0e744c4ab702afac9d3ac413ccfb5abd58fef817..2bebd5511e69f98370aea0c721cee272f940fbc6 100644
--- a/doc.zih.tu-dresden.de/docs/archive/system_atlas.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_atlas.md
@@ -22,7 +22,7 @@ kernel. Currently, the following hardware is installed:
 
 Mars and Deimos users: Please read the [migration hints](migrate_to_atlas.md).
 
-All nodes share the `/home` and `/fastfs` file system with our other HPC systems. Each
+All nodes share the `/home` and `/fastfs` filesystem with our other HPC systems. Each
 node has 180 GB local disk space for scratch mounted on `/tmp`. The jobs for the compute nodes are
 scheduled by the [Platform LSF](platform_lsf.md) batch system from the login nodes
 `atlas.hrsk.tu-dresden.de` .
@@ -86,8 +86,8 @@ user's job. Normally a job can be submitted with these data:
 
 #### LSF
 
-The batch system on Atlas is LSF. For general information on LSF, please follow
-[this link](platform_lsf.md).
+The batch system on Atlas is LSF, see also the
+[general information on LSF](platform_lsf.md).
 
 #### Submission of Parallel Jobs
 
diff --git a/doc.zih.tu-dresden.de/docs/archive/system_venus.md b/doc.zih.tu-dresden.de/docs/archive/system_venus.md
index 2c0a1fe2b83b1c4e7d09f5e2f6495db8658cb7f9..56acf9b47081726c9662150f638ff430e099020c 100644
--- a/doc.zih.tu-dresden.de/docs/archive/system_venus.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_venus.md
@@ -19,9 +19,9 @@ the Linux operating system SLES 11 SP 3 with a kernel version 3.x.
 From our experience, most parallel applications benefit from using the additional hardware
 hyperthreads.
 
-### File Systems
+### Filesystems
 
-Venus uses the same `home` file system as all our other HPC installations.
+Venus uses the same `home` filesystem as all our other HPC installations.
 For computations, please use `/scratch`.
 
 ## Usage
@@ -77,8 +77,8 @@ nodes with dedicated resources for the user's job. Normally a job can be submitt
 - files for redirection of output and error messages,
 - executable and command line parameters.
 
-The batch system on Venus is Slurm. For general information on Slurm, please follow
-[this link](../jobs_and_resources/slurm.md).
+The batch system on Venus is Slurm. Please see
+[general information on Slurm](../jobs_and_resources/slurm.md).
 
 #### Submission of Parallel Jobs
 
@@ -92,10 +92,10 @@ On Venus, you can only submit jobs with a core number which is a multiple of 8 (
 srun -n 16 a.out
 ```
 
-**Please note:** There are different MPI libraries on Taurus and Venus,
+**Please note:** There are different MPI libraries on Venus than on other ZIH systems,
 so you have to compile the binaries specifically for their target.
 
-#### File Systems
+#### Filesystems
 
 - The large main memory on the system allows users to create RAM disks
   within their own jobs.
diff --git a/doc.zih.tu-dresden.de/docs/contrib/contribute_browser.md b/doc.zih.tu-dresden.de/docs/contrib/contribute_browser.md
new file mode 100644
index 0000000000000000000000000000000000000000..45e8018d263300c03101f1374b6350ce58a131dd
--- /dev/null
+++ b/doc.zih.tu-dresden.de/docs/contrib/contribute_browser.md
@@ -0,0 +1,105 @@
+# Contribution Guide for Browser-based Editing
+
+In the following, it is outlined how to contribute to the
+[HPC documentation](https://doc.zih.tu-dresden.de/) of
+[TU Dresden/ZIH](https://tu-dresden.de/zih/) by means of GitLab's web interface using a standard web
+browser only.
+
+## Preparation
+
+First of all, you need an account on [gitlab.hrz.tu-chemnitz.de](https://gitlab.hrz.tu-chemnitz.de).
+Secondly, you need access to the project
+[ZIH/hpcsupport/hpc-compendium](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium).
+
+The project is publicly visible, i.e., it is open to the world and any signed-in user has the
+[Guest role](https://gitlab.hrz.tu-chemnitz.de/help/user/permissions.md) on this repository. Guests
+have only very
+[limited permissions](https://gitlab.hrz.tu-chemnitz.de/help/user/permissions.md#project-members-permissions).
+In particular, as guest, you can contribute to the documentation by
+[creating issues](howto_contribute.md#contribute-via-issue), but you cannot edit files and create
+new branches.
+
+To be granted the role **Developer**, please request access by clicking the corresponding button.
+
+![Request access to the repository](misc/request_access.png)
+
+Once you are granted the developer role, choose "ZIH/hpcsupport/hpc-compendium" in your project list.
+
+!!! hint "Git basics"
+
+    If you are not familiar with the basics of git-based document revision control yet, please have
+    a look at [Gitlab tutorials](https://gitlab.hrz.tu-chemnitz.de/help/gitlab-basics/index.md).
+
+## Create a Branch
+
+Your contribution starts by creating your own branch of the repository that will hold your edits and
+additions. Create your branch by clicking on "+" near "preview->hpc-compendium/" as depicted in
+the figure and click "New branch".
+
+![create new branch](misc/cb_create_new_branch.png)
+
+By default, the new branch should be created from the `preview` branch, as pre-selected.
+
+Define a branch name that briefly describes what you plan to change, e.g., `edits-in-document-xyz`.
+Then, click on "Create branch" as depicted in this figure:
+
+![set branch name](misc/cb_set_branch_name.png)
+
+As a result, you should now see your branch's name on top of your list of repository files as
+depicted here:
+
+![branch indicator](misc/cb_branch_indicator.png)
+
+## Editing Existing Articles
+
+Navigate the depicted document hierarchy under `doc.zih.tu-dresden.de/docs` until you find the
+article to be edited. A click on the article's name opens a textual representation of the article.
+In the top right corner of it, you find the button "Edit" to be clicked in order to make changes.
+Once you completed your changes, click on "Commit changes". Please add meaningful comment about the
+changes you made under "Commit message". Feel free to do as many changes and commits as you wish in
+your branch of the repository.
+
+## Adding New Article
+
+Navigate the depicted document hierarchy under `doc.zih.tu-dresden.de/docs` to find a topic that
+fits best to your article. To start a completely new article, click on "+ New file" as depicted
+here:
+
+![create new file](misc/cb_create_new_file.png)
+
+Set a file name that corresponds well to your article like `application_xyz.md`.
+(The file name should follow the pattern `fancy_title_and_more.md`.)
+Once you completed your initial edits, click on "commit".
+
+![commit new file](misc/cb_commit_file.png)
+
+Finally, the new article needs to be added to the navigation section of the configuration file
+`doc.zih.tu-dresden.de/mkdocs.yaml`.
+
+## Submitting Articles for Publication
+
+Once you are satisfied with your edits, you are ready for publication.
+Therefore, your edits need to undergo an internal review process and pass the CI/CD pipeline tests.
+This process is triggered by creating a "merge request", which serves the purpose of merging your edits
+into the `preview` branch of the repository.
+
+* Click on "Merge requests" (in the menu to the left) as depicted below.
+* Then, click on the button "New merge request".
+* Select your source branch (for example `edits-in-document-xyz`) and click on "Compare branches and
+  continue". (The target branch is always `preview`. This is pre-selected - do not change!)
+* The next screen will give you an overview of your changes. Please provide a meaningful
+  description of the contributions. Once you checked them, click on "Create merge request".
+
+![new merge request](misc/cb_new_merge_request.png)
+
+## Revision of Articles
+
+As stated earlier, all changes undergo a review process.
+This covers automated checks contained in the CI/CD pipeline and the review by a maintainer.
+You can follow this process under
+[Merge requests](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/merge_requests)
+(where you initiated your merge request).
+If you are asked to make corrections or changes, follow the directions as indicated.
+Once your merge request has been accepted, the merge request will be closed and the branch will be deleted.
+At this point, there is nothing else to do for you.
+Except probably for waiting a little while until your changes become visible on the official web site.
diff --git a/doc.zih.tu-dresden.de/docs/contrib/misc/cb_branch_indicator.png b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_branch_indicator.png
new file mode 100644
index 0000000000000000000000000000000000000000..1c024c55142a12d390d4eaf8306632ed80e0eb9a
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_branch_indicator.png differ
diff --git a/doc.zih.tu-dresden.de/docs/contrib/misc/cb_commit_file.png b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_commit_file.png
new file mode 100644
index 0000000000000000000000000000000000000000..3df543cb2940c808a24bc7be023691aba40ff9c7
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_commit_file.png differ
diff --git a/doc.zih.tu-dresden.de/docs/contrib/misc/cb_create_new_branch.png b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_create_new_branch.png
new file mode 100644
index 0000000000000000000000000000000000000000..8e9bca4e7fcc8014f725c1c1d024037e23a64204
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_create_new_branch.png differ
diff --git a/doc.zih.tu-dresden.de/docs/contrib/misc/cb_create_new_file.png b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_create_new_file.png
new file mode 100644
index 0000000000000000000000000000000000000000..30fed32f3c5a12b91dc0c7cd2250978653ea84f6
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_create_new_file.png differ
diff --git a/doc.zih.tu-dresden.de/docs/contrib/misc/cb_new_merge_request.png b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_new_merge_request.png
new file mode 100644
index 0000000000000000000000000000000000000000..e74b1ec4d43c6017fa7d1e6326996c30795c71a6
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_new_merge_request.png differ
diff --git a/doc.zih.tu-dresden.de/docs/contrib/misc/cb_set_branch_name.png b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_set_branch_name.png
new file mode 100644
index 0000000000000000000000000000000000000000..4da02249faeea31495c792bc045d593d9b989a04
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/contrib/misc/cb_set_branch_name.png differ
diff --git a/doc.zih.tu-dresden.de/docs/contrib/misc/request_access.png b/doc.zih.tu-dresden.de/docs/contrib/misc/request_access.png
new file mode 100644
index 0000000000000000000000000000000000000000..c051e93b6a149ed69e95e5d9b653a80110836266
Binary files /dev/null and b/doc.zih.tu-dresden.de/docs/contrib/misc/request_access.png differ
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md
index 6aee19dd87cf1f9bcf589c2950ca11e5b99b1b65..bcfc86b6b35f01bc0a5a1eebffdf65ee6319d171 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md
@@ -1,12 +1,12 @@
 # Intermediate Archive
 
 With the "Intermediate Archive", ZIH is closing the gap between a normal disk-based filesystem and
-[Longterm Archive](preservation_research_data.md). The Intermediate Archive is a hierarchical
+[Long-term Archive](preservation_research_data.md). The Intermediate Archive is a hierarchical
 filesystem with disks for buffering and tapes for storing research data.
 
 Its intended use is the storage of research data for a maximal duration of 3 years. For storing the
 data after exceeding this time, the user has to supply essential metadata and migrate the files to
-the [Longterm Archive](preservation_research_data.md). Until then, she/he has to keep track of her/his
+the [Long-term Archive](preservation_research_data.md). Until then, she/he has to keep track of her/his
 files.
 
 Some more information:
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/preservation_research_data.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/preservation_research_data.md
index 5c035e56d8a3fa647f9d847a08ed5be9ef903f93..79ae1cf00b45f8bf46bc054e1502fc9404417b75 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/preservation_research_data.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/preservation_research_data.md
@@ -1,4 +1,4 @@
-# Longterm Preservation for Research Data
+# Long-term Preservation for Research Data
 
 ## Why should research data be preserved?
 
@@ -55,7 +55,7 @@ Below are some examples:
     - ISBN
 - possible meta-data for an electronically saved image would be:
     - resolution of the image
-    - information about the colour depth of the picture
+    - information about the color depth of the picture
     - file format (jpg or tiff or ...)
     - file size how was this image created (digital camera, scanner, ...)
     - description of what the image shows
@@ -79,6 +79,6 @@ information about managing research data.
 
 ## I want to store my research data at ZIH. How can I do that?
 
-Longterm preservation of research data is under construction at ZIH and in a testing phase.
+Long-term preservation of research data is under construction at ZIH and in a testing phase.
 Nevertheless you can already use the archiving service. If you would like to become a test
 user, please write an E-Mail to [Dr. Klaus Köhler](mailto:klaus.koehler@tu-dresden.de).
diff --git a/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md b/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
index 80ea758c57b09601cadd001aa018c56a2f219a3f..71ca41949f44d558c2ca3384d7651e9e85b19125 100644
--- a/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
+++ b/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
@@ -9,9 +9,13 @@ that you cannot log in via SSH to the export nodes, but only use `scp`, `rsync`
 The export nodes are reachable under the hostname `taurusexport.hrsk.tu-dresden.de` (or
 `taurusexport3.hrsk.tu-dresden.de` and `taurusexport4.hrsk.tu-dresden.de`).
 
+Please keep in mind that there are different
+[filesystems](../data_lifecycle/file_systems.md#recommendations-for-filesystem-usage). Choose the
+one that matches your needs.
+
 ## Access From Linux
 
-There are at least three tool to exchange data between your local workstation and ZIH systems. All
+There are at least three tools to exchange data between your local workstation and ZIH systems. They
 are explained in the following section in more detail.
 
 !!! important
@@ -33,13 +37,27 @@ in a directory, the option `-r` has to be specified.
     marie@local$ scp -r <directory> taurusexport:<target-location>
     ```
 
-??? example "Example: Copy a file from ZIH systems to your workstation"
+    For example, if you want to copy your data file `mydata.csv` to the directory `input` in your
+    home directory, you would use the following:
 
     ```console
-    marie@login$ scp taurusexport:<file> <target-location>
+    marie@local$ scp mydata.csv taurusexport:input/
+    ```
+
+??? example "Example: Copy a file from ZIH systems to your workstation"
+
+    ```bash
+    marie@local$ scp taurusexport:<file> <target-location>
 
     # Add -r to copy whole directory
-    marie@login$ scp -r taurusexport:<directory> <target-location>
+    marie@local$ scp -r taurusexport:<directory> <target-location>
+    ```
+
+    For example, if you have a directory named `output` in your home directory on ZIH systems and
+    you want to copy it to the directory `/tmp` on your workstation, you would use the following:
+
+    ```console
+    marie@local$ scp -r taurusexport:output /tmp
     ```
 
 ### SFTP
diff --git a/doc.zih.tu-dresden.de/docs/index.md b/doc.zih.tu-dresden.de/docs/index.md
index 60d43b4e73f285901931f652c55aedabc393c451..60f6f081cf4a1c2ea76663bccd65e9ff866597fb 100644
--- a/doc.zih.tu-dresden.de/docs/index.md
+++ b/doc.zih.tu-dresden.de/docs/index.md
@@ -26,4 +26,4 @@ Contributions from user-side are highly welcome. Please find out more in our [gu
 
 **2021-10-05** Offline-maintenance (black building test)
 
-**2021-09-29** Introduction to HPC at ZIH ([slides](misc/HPC-Introduction.pdf))
+**2021-09-29** Introduction to HPC at ZIH ([HPC introduction slides](misc/HPC-Introduction.pdf))
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
index 3d342f628fc7abfeb851500d3cc6fc785d1a03e2..4ab5ca41a5a8c11d4a52c03661b5810d4d09a65d 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
@@ -64,7 +64,8 @@ True
 
 ### Python Virtual Environments
 
-Virtual environments allow users to install additional python packages and create an isolated
+[Virtual environments](../software/python_virtual_environments.md) allow users to install
+additional python packages and create an isolated
 runtime environment. We recommend using `virtualenv` for this purpose.
 
 ```console
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
index 2af016d0188ae4f926b45e7b8fdc14b039e8baa3..65e445f354d08a3473e226cc97c45ff6c01e8c48 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
@@ -58,10 +58,10 @@ For MPI-parallel jobs one typically allocates one core per task that has to be s
 ### Multiple Programs Running Simultaneously in a Job
 
 In this short example, our goal is to run four instances of a program concurrently in a **single**
-batch script. Of course we could also start a batch script four times with `sbatch` but this is not
-what we want to do here. Please have a look at
-[this subsection](#multiple-programs-running-simultaneously-in-a-job)
-in case you intend to run GPU programs simultaneously in a **single** job.
+batch script. Of course, we could also start a batch script four times with `sbatch` but this is not
+what we want to do here. However, you can also find an example about
+[how to run GPU programs simultaneously in a single job](#running-multiple-gpu-applications-simultaneously-in-a-batch-job)
+below.
 
 !!! example " "
 
@@ -355,4 +355,4 @@ file) that will be executed one after each other with different CPU numbers:
 
 ## Array-Job with Afterok-Dependency and Datamover Usage
 
-This is a *todo*
+This part is under construction.
diff --git a/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md b/doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md
similarity index 55%
rename from doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md
rename to doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md
index 84f5935a168e7d06020b90be011ac314e99f4755..9600fe81d30531b2fc85bda91a67ab730414d97b 100644
--- a/doc.zih.tu-dresden.de/docs/software/big_data_frameworks_spark.md
+++ b/doc.zih.tu-dresden.de/docs/software/big_data_frameworks.md
@@ -1,13 +1,18 @@
-# Big Data Frameworks: Apache Spark
+# Big Data Frameworks
 
 [Apache Spark](https://spark.apache.org/), [Apache Flink](https://flink.apache.org/)
 and [Apache Hadoop](https://hadoop.apache.org/) are frameworks for processing and integrating
 Big Data. These frameworks are also offered as software [modules](modules.md) in both `ml` and
 `scs5` software environments. You can check module versions and availability with the command
 
-```console
-marie@login$ module avail Spark
-```
+=== "Spark"
+    ```console
+    marie@login$ module avail Spark
+    ```
+=== "Flink"
+    ```console
+    marie@login$ module avail Flink
+    ```
 
 **Prerequisites:** To work with the frameworks, you need [access](../access/ssh_login.md) to ZIH
 systems and basic knowledge about data analysis and the batch system
@@ -15,7 +20,8 @@ systems and basic knowledge about data analysis and the batch system
 
 The usage of Big Data frameworks is different from other modules due to their master-worker
 approach. That means, before an application can be started, one has to do additional steps.
-In the following, we assume that a Spark application should be started.
+In the following, we assume that a Spark application should be started and give alternative
+commands for Flink where applicable.
 
 The steps are:
 
@@ -26,49 +32,72 @@ The steps are:
 
 Apache Spark can be used in [interactive](#interactive-jobs) and [batch](#batch-jobs) jobs as well
 as via [Jupyter notebooks](#jupyter-notebook). All three ways are outlined in the following.
+The usage of Flink with Jupyter notebooks is currently under examination.
 
 ## Interactive Jobs
 
 ### Default Configuration
 
-The Spark module is available in both `scs5` and `ml` environments.
-Thus, Spark can be executed using different CPU architectures, e.g., Haswell and Power9.
+The Spark and Flink modules are available in both `scs5` and `ml` environments.
+Thus, Spark and Flink can be executed using different CPU architectures, e.g., Haswell and Power9.
 
 Let us assume that two nodes should be used for the computation. Use a `srun` command similar to
 the following to start an interactive session using the partition haswell. The following code
-snippet shows a job submission to haswell nodes with an allocation of two nodes with 60 GB main
+snippet shows a job submission to haswell nodes with an allocation of two nodes with 60000 MB main
 memory exclusively for one hour:
 
 ```console
-marie@login$ srun --partition=haswell --nodes=2 --mem=60g --exclusive --time=01:00:00 --pty bash -l
+marie@login$ srun --partition=haswell --nodes=2 --mem=60000M --exclusive --time=01:00:00 --pty bash -l
 ```
 
-Once you have the shell, load Spark using the command
+Once you have the shell, load desired Big Data framework using the command
 
-```console
-marie@compute$ module load Spark
-```
+=== "Spark"
+    ```console
+    marie@compute$ module load Spark
+    ```
+=== "Flink"
+    ```console
+    marie@compute$ module load Flink
+    ```
 
-Before the application can be started, the Spark cluster needs to be set up. To do this, configure
-Spark first using configuration template at `$SPARK_HOME/conf`:
+Before the application can be started, the cluster with the allocated nodes needs to be set up. To
+do this, configure the cluster first using the configuration template at `$SPARK_HOME/conf` for
+Spark or `$FLINK_ROOT_DIR/conf` for Flink:
 
-```console
-marie@compute$ source framework-configure.sh spark $SPARK_HOME/conf
-```
+=== "Spark"
+    ```console
+    marie@compute$ source framework-configure.sh spark $SPARK_HOME/conf
+    ```
+=== "Flink"
+    ```console
+    marie@compute$ source framework-configure.sh flink $FLINK_ROOT_DIR/conf
+    ```
 
 This places the configuration in a directory called `cluster-conf-<JOB_ID>` in your `home`
-directory, where `<JOB_ID>` stands for the id of the Slurm job. After that, you can start Spark in
+directory, where `<JOB_ID>` stands for the id of the Slurm job. After that, you can start in
 the usual way:
 
-```console
-marie@compute$ start-all.sh
-```
+=== "Spark"
+    ```console
+    marie@compute$ start-all.sh
+    ```
+=== "Flink"
+    ```console
+    marie@compute$ start-cluster.sh
+    ```
 
-The Spark processes should now be set up and you can start your application, e. g.:
+The necessary background processes should now be set up and you can start your application, e. g.:
 
-```console
-marie@compute$ spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME/examples/jars/spark-examples_2.12-3.0.1.jar 1000
-```
+=== "Spark"
+    ```console
+    marie@compute$ spark-submit --class org.apache.spark.examples.SparkPi \
+    $SPARK_HOME/examples/jars/spark-examples_2.12-3.0.1.jar 1000
+    ```
+=== "Flink"
+    ```console
+    marie@compute$ flink run $FLINK_ROOT_DIR/examples/batch/KMeans.jar
+    ```
 
 !!! warning
 
@@ -80,37 +109,57 @@ marie@compute$ spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOM
 The script `framework-configure.sh` is used to derive a configuration from a template. It takes two
 parameters:
 
-- The framework to set up (Spark, Flink, Hadoop)
+- The framework to set up (parameter `spark` for Spark, `flink` for Flink, and `hadoop` for Hadoop)
 - A configuration template
 
 Thus, you can modify the configuration by replacing the default configuration template with a
 customized one. This way, your custom configuration template is reusable for different jobs. You
 can start with a copy of the default configuration ahead of your interactive session:
 
-```console
-marie@login$ cp -r $SPARK_HOME/conf my-config-template
-```
+=== "Spark"
+    ```console
+    marie@login$ cp -r $SPARK_HOME/conf my-config-template
+    ```
+=== "Flink"
+    ```console
+    marie@login$ cp -r $FLINK_ROOT_DIR/conf my-config-template
+    ```
 
 After you have changed `my-config-template`, you can use your new template in an interactive job
 with:
 
-```console
-marie@compute$ source framework-configure.sh spark my-config-template
-```
+=== "Spark"
+    ```console
+    marie@compute$ source framework-configure.sh spark my-config-template
+    ```
+=== "Flink"
+    ```console
+    marie@compute$ source framework-configure.sh flink my-config-template
+    ```
 
 ### Using Hadoop Distributed Filesystem (HDFS)
 
 If you want to use Spark and HDFS together (or in general more than one framework), a scheme
 similar to the following can be used:
 
-```console
-marie@compute$ module load Hadoop
-marie@compute$ module load Spark
-marie@compute$ source framework-configure.sh hadoop $HADOOP_ROOT_DIR/etc/hadoop
-marie@compute$ source framework-configure.sh spark $SPARK_HOME/conf
-marie@compute$ start-dfs.sh
-marie@compute$ start-all.sh
-```
+=== "Spark"
+    ```console
+    marie@compute$ module load Hadoop
+    marie@compute$ module load Spark
+    marie@compute$ source framework-configure.sh hadoop $HADOOP_ROOT_DIR/etc/hadoop
+    marie@compute$ source framework-configure.sh spark $SPARK_HOME/conf
+    marie@compute$ start-dfs.sh
+    marie@compute$ start-all.sh
+    ```
+=== "Flink"
+    ```console
+    marie@compute$ module load Hadoop
+    marie@compute$ module load Flink
+    marie@compute$ source framework-configure.sh hadoop $HADOOP_ROOT_DIR/etc/hadoop
+    marie@compute$ source framework-configure.sh flink $FLINK_ROOT_DIR/conf
+    marie@compute$ start-dfs.sh
+    marie@compute$ start-cluster.sh
+    ```
 
 ## Batch Jobs
 
@@ -122,41 +171,76 @@ that, you can conveniently put the parameters directly into the job file and sub
 Please use a [batch job](../jobs_and_resources/slurm.md) with a configuration, similar to the
 example below:
 
-??? example "spark.sbatch"
-    ```bash
-    #!/bin/bash -l
-    #SBATCH --time=00:05:00
-    #SBATCH --partition=haswell
-    #SBATCH --nodes=2
-    #SBATCH --exclusive
-    #SBATCH --mem=60G
-    #SBATCH --job-name="example-spark"
+??? example "example-starting-script.sbatch"
+    === "Spark"
+        ```bash
+        #!/bin/bash -l
+        #SBATCH --time=01:00:00
+        #SBATCH --partition=haswell
+        #SBATCH --nodes=2
+        #SBATCH --exclusive
+        #SBATCH --mem=60000M
+        #SBATCH --job-name="example-spark"
+
+        module load Spark/3.0.1-Hadoop-2.7-Java-1.8-Python-3.7.4-GCCcore-8.3.0
+
+        function myExitHandler () {
+            stop-all.sh
+        }
+
+        #configuration
+        . framework-configure.sh spark $SPARK_HOME/conf
+
+        #register cleanup hook in case something goes wrong
+        trap myExitHandler EXIT
 
-    ml Spark/3.0.1-Hadoop-2.7-Java-1.8-Python-3.7.4-GCCcore-8.3.0
+        start-all.sh
+
+        spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME/examples/jars/spark-examples_2.12-3.0.1.jar 1000
 
-    function myExitHandler () {
         stop-all.sh
-    }
 
-    #configuration
-    . framework-configure.sh spark $SPARK_HOME/conf
+        exit 0
+        ```
+    === "Flink"
+        ```bash
+        #!/bin/bash -l
+        #SBATCH --time=01:00:00
+        #SBATCH --partition=haswell
+        #SBATCH --nodes=2
+        #SBATCH --exclusive
+        #SBATCH --mem=60000M
+        #SBATCH --job-name="example-flink"
 
-    #register cleanup hook in case something goes wrong
-    trap myExitHandler EXIT
+        module load Flink/1.12.3-Java-1.8.0_161-OpenJDK-Python-3.7.4-GCCcore-8.3.0
 
-    start-all.sh
+        function myExitHandler () {
+            stop-cluster.sh
+        }
 
-    spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME/examples/jars/spark-examples_2.12-3.0.1.jar 1000
+        #configuration
+        . framework-configure.sh flink $FLINK_ROOT_DIR/conf
 
-    stop-all.sh
+        #register cleanup hook in case something goes wrong
+        trap myExitHandler EXIT
 
-    exit 0
-    ```
+        #start the cluster
+        start-cluster.sh
+
+        #run your application
+        flink run $FLINK_ROOT_DIR/examples/batch/KMeans.jar
+
+        #stop the cluster
+        stop-cluster.sh
+
+        exit 0
+        ```
 
 ## Jupyter Notebook
 
 You can run Jupyter notebooks with Spark on the ZIH systems in a similar way as described on the
-[JupyterHub](../access/jupyterhub.md) page.
+[JupyterHub](../access/jupyterhub.md) page. Interaction of Flink with JupyterHub is currently
+under examination and will be posted here upon availability.
 
 ### Preparation
 
diff --git a/doc.zih.tu-dresden.de/docs/software/cfd.md b/doc.zih.tu-dresden.de/docs/software/cfd.md
index 186d7b3a5a97a2daf06d8618c7c91dc91d7ab971..62ed65116e51ae8bbb593664f4bc48a3373d3a41 100644
--- a/doc.zih.tu-dresden.de/docs/software/cfd.md
+++ b/doc.zih.tu-dresden.de/docs/software/cfd.md
@@ -16,7 +16,7 @@ The OpenFOAM (Open Field Operation and Manipulation) CFD Toolbox can simulate an
 fluid flows involving chemical reactions, turbulence and heat transfer, to solid dynamics,
 electromagnetics and the pricing of financial options. OpenFOAM is developed primarily by
 [OpenCFD Ltd](https://www.openfoam.com) and is freely available and open-source,
-licensed under the GNU General Public Licence.
+licensed under the GNU General Public License.
 
 The command `module spider OpenFOAM` provides the list of installed OpenFOAM versions. In order to
 use OpenFOAM, it is mandatory to set the environment by sourcing the `bashrc` (for users running
diff --git a/doc.zih.tu-dresden.de/docs/software/containers.md b/doc.zih.tu-dresden.de/docs/software/containers.md
index bbb3e80772f3fcc71480e4555fb146f602806804..d15535933ef7f2b9e0330d07e35168f10fc22ded 100644
--- a/doc.zih.tu-dresden.de/docs/software/containers.md
+++ b/doc.zih.tu-dresden.de/docs/software/containers.md
@@ -12,10 +12,10 @@ Singularity. Information about the use of Singularity on ZIH systems can be foun
 In some cases using Singularity requires a Linux machine with root privileges (e.g. using the
 partition `ml`), the same architecture and a compatible kernel. For many reasons, users on ZIH
 systems cannot be granted root permissions. A solution is a Virtual Machine (VM) on the partition
-`ml` which allows users to gain root permissions in an isolated environment.  There are two main
+`ml` which allows users to gain root permissions in an isolated environment. There are two main
 options on how to work with Virtual Machines on ZIH systems:
 
-1. [VM tools](virtual_machines_tools.md): Automative algorithms for using virtual machines;
+1. [VM tools](virtual_machines_tools.md): Automated algorithms for using virtual machines;
 1. [Manual method](virtual_machines.md): It requires more operations but gives you more flexibility
    and reliability.
 
@@ -35,7 +35,7 @@ execution. Follow the instructions for [locally installing Singularity](#local-i
 [container creation](#container-creation). Moreover, existing Docker container can easily be
 converted, see [Import a docker container](#importing-a-docker-container).
 
-If you are already familar with Singularity, you might be more intressted in our [singularity
+If you are already familiar with Singularity, you might be more interested in our [singularity
 recipes and hints](singularity_recipe_hints.md).
 
 ### Local Installation
diff --git a/doc.zih.tu-dresden.de/docs/software/custom_easy_build_environment.md b/doc.zih.tu-dresden.de/docs/software/custom_easy_build_environment.md
index d482d89a45a3849054af19a75ccaf64daeb6e9eb..231ce447b0fa8157ebb9b4a8ea6dd9bb1542fa7b 100644
--- a/doc.zih.tu-dresden.de/docs/software/custom_easy_build_environment.md
+++ b/doc.zih.tu-dresden.de/docs/software/custom_easy_build_environment.md
@@ -1,133 +1,155 @@
 # EasyBuild
 
-Sometimes the \<a href="SoftwareModulesList" target="\_blank"
-title="List of Modules">modules installed in the cluster\</a> are not
-enough for your purposes and you need some other software or a different
-version of a software.
-
-\<br />For most commonly used software, chances are high that there is
-already a *recipe* that EasyBuild provides, which you can use. But what
-is Easybuild?
-
-\<a href="<https://easybuilders.github.io/easybuild/>"
-target="\_blank">EasyBuild\</a>\<span style="font-size: 1em;"> is the
-software used to build and install software on, and create modules for,
-Taurus.\</span>
-
-\<span style="font-size: 1em;">The aim of this page is to introduce
-users to working with EasyBuild and to utilizing it to create
-modules**.**\</span>
-
-**Prerequisites:** \<a href="Login" target="\_blank">access\</a> to the
-Taurus system and basic knowledge about Linux, \<a href="SystemTaurus"
-target="\_blank" title="SystemTaurus">Taurus\</a> and the \<a
-href="RuntimeEnvironment" target="\_blank"
-title="RuntimeEnvironment">modules system \</a>on Taurus.
-
-\<span style="font-size: 1em;">EasyBuild uses a configuration file
-called recipe or "EasyConfig", which contains all the information about
-how to obtain and build the software:\</span>
+Sometimes the [modules](modules.md) installed in the cluster are not enough for your purposes and
+you need some other software or a different version of a software.
+
+For most commonly used software, chances are high that there is already a *recipe* that EasyBuild
+provides, which you can use. But what is EasyBuild?
+
+[EasyBuild](https://easybuild.io/) is the software used to build and install
+software on ZIH systems.
+
+The aim of this page is to introduce users to working with EasyBuild and to utilizing it to create
+modules.
+
+## Prerequisites
+
+1. [Shell access](../access/ssh_login.md) to ZIH systems
+1. basic knowledge about:
+   - [the ZIH system](../jobs_and_resources/hardware_overview.md)
+   - [the module system](modules.md) on ZIH systems
+
+EasyBuild uses a configuration file called recipe or "EasyConfig", which contains all the
+information about how to obtain and build the software:
 
 -   Name
 -   Version
 -   Toolchain (think: Compiler + some more)
 -   Download URL
--   Buildsystem (e.g. configure && make or cmake && make)
+-   Build system (e.g. `configure && make` or `cmake && make`)
 -   Config parameters
 -   Tests to ensure a successful build
 
-The "Buildsystem" part is implemented in so-called "EasyBlocks" and
-contains the common workflow. Sometimes those are specialized to
-encapsulate behaviour specific to multiple/all versions of the software.
-\<span style="font-size: 1em;">Everything is written in Python, which
-gives authors a great deal of flexibility.\</span>
+The build system part is implemented in so-called "EasyBlocks" and contains the common workflow.
+Sometimes, those are specialized to encapsulate behavior specific to multiple/all versions of the
+software. Everything is written in Python, which gives authors a great deal of flexibility.
 
 ## Set up a custom module environment and build your own modules
 
-Installation of the new software (or version) does not require any
-specific credentials.
+Installation of the new software (or version) does not require any specific credentials.
 
-\<br />Prerequisites: 1 An existing EasyConfig 1 a place to put your
-modules. \<span style="font-size: 1em;">Step by step guide:\</span>
+### Prerequisites
 
-1\. Create a \<a href="WorkSpaces" target="\_blank">workspace\</a> where
-you'll install your modules. You need a place where your modules will be
-placed. This needs to be done only once :
+1. An existing EasyConfig
+1. a place to put your modules.
 
-    ws_allocate -F scratch EasyBuild 50                 #
+### Step by step guide
 
-2\. Allocate nodes. You can do this with interactive jobs (see the
-example below) and/or put commands in a batch file and source it. The
-latter is recommended for non-interactive jobs, using the command sbatch
-in place of srun. For the sake of illustration, we use an interactive
-job as an example. The node parameters depend, to some extent, on the
-architecture you want to use. ML nodes for the Power9 and others for the
-x86. We will use Haswell nodes.
+**Step 1:** Create a [workspace](../data_lifecycle/workspaces.md#allocate-a-workspace) where you
+install your modules. You need a place where your modules are placed. This needs to be done only
+once:
 
-    srun -p haswell -N 1 -c 4 --time=08:00:00 --pty /bin/bash
+```console
+marie@login$ ws_allocate -F scratch EasyBuild 50
+marie@login$ ws_list | grep 'directory.*EasyBuild'
+     workspace directory  : /scratch/ws/1/marie-EasyBuild
+```
 
-\*Using EasyBuild on the login nodes is not allowed\*
+**Step 2:** Allocate nodes. You can do this with interactive jobs (see the example below) and/or
+put commands in a batch file and source it. The latter is recommended for non-interactive jobs,
+using the command `sbatch` instead of `srun`. For the sake of illustration, we use an
+interactive job as an example. Depending on the partitions that you want the module to be usable on
+later, you need to select nodes with the same architecture. Thus, use nodes from partition ml for
+building, if you want to use the module on nodes of that partition. In this example, we assume
+that we want to use the module on nodes with x86 architecture and thus, we use Haswell nodes.
 
-3\. Load EasyBuild module.
+```console
+marie@login$ srun --partition=haswell --nodes=1 --cpus-per-task=4 --time=08:00:00 --pty /bin/bash -l
+```
 
-    module load EasyBuild
+!!! warning
 
-\<br />4. Specify Workspace. The rest of the guide is based on it.
-Please create an environment variable called \`WORKSPACE\` with the
-location of your Workspace:
+    Using EasyBuild on the login nodes is not allowed.
 
-    WORKSPACE=<location_of_your_workspace>         # For example: WORKSPACE=/scratch/ws/anpo879a-EasyBuild
+**Step 3:** Specify the workspace. The rest of the guide is based on it. Please create an
+environment variable called `WORKSPACE` with the path to your workspace:
 
-5\. Load the correct modenv according to your current or target
-architecture: \`ml modenv/scs5\` for x86 (default) or \`modenv/ml\` for
-Power9 (ml partition). Load EasyBuild module
+```console
+marie@compute$ export WORKSPACE=/scratch/ws/1/marie-EasyBuild    #see output of ws_list above
+```
 
-    ml modenv/scs5
-    module load EasyBuild
+**Step 4:** Load the correct module environment  `modenv` according to your current or target
+architecture:
 
-6\. Set up your environment:
+=== "x86 (default, e. g. partition haswell)"
+    ```console
+    marie@compute$ module load modenv/scs5
+    ```
+=== "Power9 (partition ml)"
+    ```console
+    marie@ml$ module load modenv/ml
+    ```
 
-    export EASYBUILD_ALLOW_LOADED_MODULES=EasyBuild,modenv/scs5
-    export EASYBUILD_DETECT_LOADED_MODULES=unload
-    export EASYBUILD_BUILDPATH="/tmp/${USER}-EasyBuild${SLURM_JOB_ID:-}"
-    export EASYBUILD_SOURCEPATH="${WORKSPACE}/sources"
-    export EASYBUILD_INSTALLPATH="${WORKSPACE}/easybuild-$(basename $(readlink -f /sw/installed))"
-    export EASYBUILD_INSTALLPATH_MODULES="${EASYBUILD_INSTALLPATH}/modules"
-    module use "${EASYBUILD_INSTALLPATH_MODULES}/all"
-    export LMOD_IGNORE_CACHE=1
+**Step 5:** Load module `EasyBuild`
 
-7\. \<span style="font-size: 13px;">Now search for an existing
-EasyConfig: \</span>
+```console
+marie@compute$ module load EasyBuild
+```
 
-    eb --search TensorFlow
+**Step 6:** Set up your environment:
 
-\<span style="font-size: 13px;">8. Build the EasyConfig and its
-dependencies\</span>
+```console
+marie@compute$ export EASYBUILD_ALLOW_LOADED_MODULES=EasyBuild,modenv/scs5
+marie@compute$ export EASYBUILD_DETECT_LOADED_MODULES=unload
+marie@compute$ export EASYBUILD_BUILDPATH="/tmp/${USER}-EasyBuild${SLURM_JOB_ID:-}"
+marie@compute$ export EASYBUILD_SOURCEPATH="${WORKSPACE}/sources"
+marie@compute$ export EASYBUILD_INSTALLPATH="${WORKSPACE}/easybuild-$(basename $(readlink -f /sw/installed))"
+marie@compute$ export EASYBUILD_INSTALLPATH_MODULES="${EASYBUILD_INSTALLPATH}/modules"
+marie@compute$ module use "${EASYBUILD_INSTALLPATH_MODULES}/all"
+marie@compute$ export LMOD_IGNORE_CACHE=1
+```
 
-    eb TensorFlow-1.8.0-fosscuda-2018a-Python-3.6.4.eb -r
+**Step 7:** Now search for an existing EasyConfig:
 
-\<span style="font-size: 13px;">After this is done (may take A LONG
-time), you can load it just like any other module.\</span>
+```console
+marie@compute$ eb --search TensorFlow
+```
 
-9\. To use your custom build modules you only need to rerun step 4, 5, 6
-and execute the usual:
+**Step 8:** Build the EasyConfig and its dependencies (option `-r`)
 
-    module load <name_of_your_module>            # For example module load TensorFlow-1.8.0-fosscuda-2018a-Python-3.6.4
+```console
+marie@compute$ eb TensorFlow-1.8.0-fosscuda-2018a-Python-3.6.4.eb -r
+```
 
-The key is the \`module use\` command which brings your modules into
-scope so \`module load\` can find them and the LMOD_IGNORE_CACHE line
-which makes LMod pick up the custom modules instead of searching the
+This may take a long time. After this is done, you can load it just like any other module.
+
+**Step 9:** To use your custom build modules you only need to rerun steps 3, 4, 5, 6 and execute
+the usual:
+
+```console
+marie@compute$ module load TensorFlow-1.8.0-fosscuda-2018a-Python-3.6.4  #replace with the name of your module
+```
+
+The key is the `module use` command, which brings your modules into scope, so `module load` can find
+them. The `LMOD_IGNORE_CACHE` line makes `LMod` pick up the custom modules instead of searching the
 system cache.
 
 ## Troubleshooting
 
-When building your EasyConfig fails, you can first check the log
-mentioned and scroll to the bottom to see what went wrong.
+When building your EasyConfig fails, you can first check the log mentioned and scroll to the bottom
+to see what went wrong.
+
+It might also be helpful to inspect the build environment EasyBuild uses. For that you can run:
+
+```console
+marie@compute$ eb myEC.eb --dump-env-script`
+```
+
+This command creates a sourceable `.env`-file with `module load` and `export` commands that show
+what EasyBuild does before running, e.g., the configuration step.
 
-It might also be helpful to inspect the build environment EB uses. For
-that you can run \`eb myEC.eb --dump-env-script\` which creates a
-sourceable .env file with \`module load\` and \`export\` commands that
-show what EB does before running, e.g., the configure step.
+It might also be helpful to use
 
-It might also be helpful to use '\<span style="font-size: 1em;">export
-LMOD_IGNORE_CACHE=0'\</span>
+```console
+marie@compute$ export LMOD_IGNORE_CACHE=0
+```
diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics.md b/doc.zih.tu-dresden.de/docs/software/data_analytics.md
index 245bd5ae1a8ea0f246bd578d4365b3d23aaaba64..b4a5f7f8b9f86c9a47fec20b875970efd4d787b2 100644
--- a/doc.zih.tu-dresden.de/docs/software/data_analytics.md
+++ b/doc.zih.tu-dresden.de/docs/software/data_analytics.md
@@ -10,7 +10,7 @@ The following tools are available on ZIH systems, among others:
 * [Python](data_analytics_with_python.md)
 * [R](data_analytics_with_r.md)
 * [RStudio](data_analytics_with_rstudio.md)
-* [Big Data framework Spark](big_data_frameworks_spark.md)
+* [Big Data framework Spark](big_data_frameworks.md)
 * [MATLAB and Mathematica](mathematics.md)
 
 Detailed information about frameworks for machine learning, such as [TensorFlow](tensorflow.md)
@@ -24,7 +24,8 @@ marie@compute$ module spider <software_name>
 
 Refer to the section covering [modules](modules.md) for further information on the modules system.
 Additional software or special versions of [individual modules](custom_easy_build_environment.md)
-can be installed individually by each user. If possible, the use of virtual environments is
+can be installed individually by each user. If possible, the use of
+[virtual environments](python_virtual_environments.md) is
 recommended (e.g. for Python). Likewise, software can be used within [containers](containers.md).
 
 For the transfer of larger amounts of data into and within the system, the
diff --git a/doc.zih.tu-dresden.de/docs/software/debuggers.md b/doc.zih.tu-dresden.de/docs/software/debuggers.md
index d88ca5f068f0145e8acc46407feca93a14968522..0d4bda97f61fe6453d6027406ff88145c4204cfb 100644
--- a/doc.zih.tu-dresden.de/docs/software/debuggers.md
+++ b/doc.zih.tu-dresden.de/docs/software/debuggers.md
@@ -73,8 +73,8 @@ modified by DDT available, which has better support for Fortran 90 (e.g.  derive
 ![DDT Main Window](misc/ddt-main-window.png)
 
 - Intuitive graphical user interface and great support for parallel applications
-- We have 1024 licences, so many user can use this tool for parallel debugging
-- Don't expect that debugging an MPI program with 100ths of process will always work without
+- We have 1024 licenses, so many user can use this tool for parallel debugging
+- Don't expect that debugging an MPI program with hundreds of processes will always work without
   problems
   - The more processes and nodes involved, the higher is the probability for timeouts or other
     problems
@@ -159,7 +159,7 @@ marie@login$ srun -n 1 valgrind ./myprog
 
 - Not recommended for MPI parallel programs, since usually the MPI library will throw
   a lot of errors. But you may use Valgrind the following way such that every rank
-  writes its own Valgrind logfile:
+  writes its own Valgrind log file:
 
 ```console
 marie@login$ module load Valgrind
diff --git a/doc.zih.tu-dresden.de/docs/software/fem_software.md b/doc.zih.tu-dresden.de/docs/software/fem_software.md
index 3be2314889bfe45f9554fb499c4d757337bef33d..160aeded633f50e9abfdfae6d74a7627257ca565 100644
--- a/doc.zih.tu-dresden.de/docs/software/fem_software.md
+++ b/doc.zih.tu-dresden.de/docs/software/fem_software.md
@@ -176,7 +176,7 @@ under:
 
 `<MaxNumberProcessors>2</MaxNumberProcessors>`
 
-that you can simply change to something like 16 oder 24. For now, you should stay within single-node
+that you can simply change to something like 16 or 24. For now, you should stay within single-node
 boundaries, because multi-node calculations require additional parameters. The number you choose
 should match your used `--cpus-per-task` parameter in your job file.
 
diff --git a/doc.zih.tu-dresden.de/docs/software/gpu_programming.md b/doc.zih.tu-dresden.de/docs/software/gpu_programming.md
index 9847cc9dbfec4137eada70dbc23285c7825effc7..070176efcb2ab0f463da30675841ade0e0a585a3 100644
--- a/doc.zih.tu-dresden.de/docs/software/gpu_programming.md
+++ b/doc.zih.tu-dresden.de/docs/software/gpu_programming.md
@@ -2,8 +2,9 @@
 
 ## Directive Based GPU Programming
 
-Directives are special compiler commands in your C/C++ or Fortran source code. The tell the compiler
-how to parallelize and offload work to a GPU. This section explains how to use this technique.
+Directives are special compiler commands in your C/C++ or Fortran source code. They tell the
+compiler how to parallelize and offload work to a GPU. This section explains how to use this
+technique.
 
 ### OpenACC
 
@@ -19,10 +20,11 @@ newer for full support for the NVIDIA Tesla K20x GPUs at ZIH.
 
 #### Using OpenACC with PGI compilers
 
-* For compilaton please add the compiler flag `-acc`, to enable OpenACC interpreting by the compiler;
-* `-Minfo` will tell you what the compiler is actually doing to your code;
+* For compilation, please add the compiler flag `-acc` to enable OpenACC interpreting by the
+  compiler;
+* `-Minfo` tells you what the compiler is actually doing to your code;
 * If you only want to use the created binary at ZIH resources, please also add `-ta=nvidia:keple`;
-* OpenACC Turorial: intro1.pdf, intro2.pdf.
+* OpenACC Tutorial: intro1.pdf, intro2.pdf.
 
 ### HMPP
 
@@ -38,4 +40,4 @@ use the following slides as an introduction:
 * Introduction to CUDA;
 * Advanced Tuning for NVIDIA Kepler GPUs.
 
-In order to compiler an application with CUDA use the `nvcc` compiler command.
+In order to compile an application with CUDA use the `nvcc` compiler command.
diff --git a/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md b/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md
index 38190764e6c9efedb275ec9ff4324d916c851566..8f61fe49fd56642aaded82cf711ca92d0035b99f 100644
--- a/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md
+++ b/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md
@@ -270,9 +270,9 @@ This GUI guides through the configuration process and as result a configuration
 automatically according to the GUI input. If you are more familiar with using OmniOpt later on,
 this configuration file can be modified directly without using the GUI.
 
-A screenshot of the GUI, including a properly configuration for the MNIST fashion example is shown
-below. The GUI, in which the below displayed values are already entered, can be reached
-[here](https://imageseg.scads.ai/omnioptgui/?maxevalserror=5&mem_per_worker=1000&number_of_parameters=3&param_0_values=10%2C50%2C100&param_1_values=8%2C16%2C32&param_2_values=10%2C15%2C30&param_0_name=out-layer1&param_1_name=batchsize&param_2_name=batchsize&account=&projectname=mnist_fashion_optimization_set_1&partition=alpha&searchtype=tpe.suggest&param_0_type=hp.choice&param_1_type=hp.choice&param_2_type=hp.choice&max_evals=1000&objective_program=bash%20%3C%2Fpath%2Fto%2Fwrapper-script%2Frun-mnist-fashion.sh%3E%20--out-layer1%3D%28%24x_0%29%20--batchsize%3D%28%24x_1%29%20--epochs%3D%28%24x_2%29&workdir=%3C%2Fscratch%2Fws%2Fomniopt-workdir%2F%3E).
+A screenshot of
+[the GUI](https://imageseg.scads.ai/omnioptgui/?maxevalserror=5&mem_per_worker=1000&number_of_parameters=3&param_0_values=10%2C50%2C100&param_1_values=8%2C16%2C32&param_2_values=10%2C15%2C30&param_0_name=out-layer1&param_1_name=batchsize&param_2_name=batchsize&account=&projectname=mnist_fashion_optimization_set_1&partition=alpha&searchtype=tpe.suggest&param_0_type=hp.choice&param_1_type=hp.choice&param_2_type=hp.choice&max_evals=1000&objective_program=bash%20%3C%2Fpath%2Fto%2Fwrapper-script%2Frun-mnist-fashion.sh%3E%20--out-layer1%3D%28%24x_0%29%20--batchsize%3D%28%24x_1%29%20--epochs%3D%28%24x_2%29&workdir=%3C%2Fscratch%2Fws%2Fomniopt-workdir%2F%3E),
+including a properly configuration for the MNIST fashion example is shown below.
 
 Please modify the paths for `objective program` and `workdir` according to your needs.
 
diff --git a/doc.zih.tu-dresden.de/docs/software/mathematics.md b/doc.zih.tu-dresden.de/docs/software/mathematics.md
index 21aab2856a7b9582c3f6b8d5453d7ea2f8b6895b..5b8e23b2fd3ed373bdf7bf6394ae3b2faf98ce74 100644
--- a/doc.zih.tu-dresden.de/docs/software/mathematics.md
+++ b/doc.zih.tu-dresden.de/docs/software/mathematics.md
@@ -21,9 +21,9 @@ font manager.
 
 You need to copy the fonts from ZIH systems to your local system and expand the font path
 
-```bash
-localhost$ scp -r taurus.hrsk.tu-dresden.de:/sw/global/applications/mathematica/10.0/SystemFiles/Fonts/Type1/ ~/.fonts
-localhost$ xset fp+ ~/.fonts/Type1
+```console
+marie@local$ scp -r taurus.hrsk.tu-dresden.de:/sw/global/applications/mathematica/10.0/SystemFiles/Fonts/Type1/ ~/.fonts
+marie@local$ xset fp+ ~/.fonts/Type1
 ```
 
 #### Windows Workstation
@@ -93,29 +93,29 @@ interfaces with the Maple symbolic engine, allowing it to be part of a full comp
 Running MATLAB via the batch system could look like this (for 456 MB RAM per core and 12 cores
 reserved). Please adapt this to your needs!
 
-```bash
-zih$ module load MATLAB
-zih$ srun -t 8:00 -c 12 --mem-per-cpu=456 --pty --x11=first bash
-zih$ matlab
+```console
+marie@login$ module load MATLAB
+marie@login$ srun --time=8:00 --cpus-per-task=12 --mem-per-cpu=456 --pty --x11=first bash
+marie@compute$ matlab
 ```
 
 With following command you can see a list of installed software - also
 the different versions of matlab.
 
-```bash
-zih$ module avail
+```console
+marie@login$ module avail
 ```
 
 Please choose one of these, then load the chosen software with the command:
 
 ```bash
-zih$ module load MATLAB/version
+marie@login$ module load MATLAB/<version>
 ```
 
 Or use:
 
-```bash
-zih$ module load MATLAB
+```console
+marie@login$ module load MATLAB
 ```
 
 (then you will get the most recent Matlab version.
@@ -126,8 +126,8 @@ zih$ module load MATLAB
 If X-server is running and you logged in at ZIH systems, you should allocate a CPU for your work
 with command
 
-```bash
-zih$ srun --pty --x11=first bash
+```console
+marie@login$ srun --pty --x11=first bash
 ```
 
 - now you can call "matlab" (you have 8h time to work with the matlab-GUI)
@@ -138,8 +138,9 @@ Using Scripts
 
 You have to start matlab-calculation as a Batch-Job via command
 
-```bash
-srun --pty matlab -nodisplay -r basename_of_your_matlab_script #NOTE: you must omit the file extension ".m" here, because -r expects a matlab command or function call, not a file-name.
+```console
+marie@login$ srun --pty matlab -nodisplay -r basename_of_your_matlab_script
+# NOTE: you must omit the file extension ".m" here, because -r expects a matlab command or function call, not a file-name.
 ```
 
 !!! info "License occupying"
@@ -160,7 +161,7 @@ You can find detailed documentation on the Matlab compiler at
 Compile your `.m` script into a binary:
 
 ```bash
-mcc -m name_of_your_matlab_script.m -o compiled_executable -R -nodisplay -R -nosplash
+marie@login$ mcc -m name_of_your_matlab_script.m -o compiled_executable -R -nodisplay -R -nosplash
 ```
 
 This will also generate a wrapper script called `run_compiled_executable.sh` which sets the required
@@ -172,41 +173,35 @@ Then run the binary via the wrapper script in a job (just a simple example, you
 [sbatch script](../jobs_and_resources/slurm.md#job-submission) for that)
 
 ```bash
-zih$ srun ./run_compiled_executable.sh $EBROOTMATLAB
+marie@login$ srun ./run_compiled_executable.sh $EBROOTMATLAB
 ```
 
 ### Parallel MATLAB
 
 #### With 'local' Configuration
 
--   If you want to run your code in parallel, please request as many
-    cores as you need!
--   start a batch job with the number N of processes
--   example for N= 4: `srun -c 4 --pty --x11=first bash`
--   run Matlab with the GUI or the CLI or with a script
--   inside use `matlabpool open 4` to start parallel
-    processing
+- If you want to run your code in parallel, please request as many cores as you need!
+- Start a batch job with the number `N` of processes, e.g., `srun --cpus-per-task=4 --pty
+  --x11=first bash -l`
+- Run Matlab with the GUI or the CLI or with a script
+- Inside Matlab use `matlabpool open 4` to start parallel processing
 
--   example for 1000*1000 matrix multiplication
-
-!!! example
+!!! example "Example for 1000*1000 matrix-matrix multiplication"
 
     ```bash
     R = distributed.rand(1000);
     D = R * R
     ```
 
--   to close parallel task:
-`matlabpool close`
+- Close parallel task using `matlabpool close`
 
 #### With parfor
 
-- start a batch job with the number N of processes (e.g. N=12)
-- inside use `matlabpool open N` or
-  `matlabpool(N)` to start parallel processing. It will use
+- Start a batch job with the number `N` of processes (,e.g., `N=12`)
+- Inside use `matlabpool open N` or `matlabpool(N)` to start parallel processing. It will use
   the 'local' configuration by default.
-- Use `parfor` for a parallel loop, where the **independent** loop
-  iterations are processed by N threads
+- Use `parfor` for a parallel loop, where the **independent** loop iterations are processed by `N`
+  threads
 
 !!! example
 
diff --git a/doc.zih.tu-dresden.de/docs/software/modules.md b/doc.zih.tu-dresden.de/docs/software/modules.md
index 58f200d25f01d52385626776b53c93f38e999397..fb9107b5d362ca348987e848a663de7586fb6a72 100644
--- a/doc.zih.tu-dresden.de/docs/software/modules.md
+++ b/doc.zih.tu-dresden.de/docs/software/modules.md
@@ -206,7 +206,8 @@ Note that this will not work for meta-modules that do not have an installation d
 
 ## Advanced Usage
 
-For writing your own Modulefiles please have a look at the [Guide for writing project and private Modulefiles](private_modules.md).
+For writing your own module files please have a look at the
+[Guide for writing project and private module files](private_modules.md).
 
 ## Troubleshooting
 
diff --git a/doc.zih.tu-dresden.de/docs/software/mpi_usage_error_detection.md b/doc.zih.tu-dresden.de/docs/software/mpi_usage_error_detection.md
index 8d1d7e17a02c3dd2ab572216899cd37f7a9aee3a..b083e80cf9962a01a6580f8b5393912ebd2c3f40 100644
--- a/doc.zih.tu-dresden.de/docs/software/mpi_usage_error_detection.md
+++ b/doc.zih.tu-dresden.de/docs/software/mpi_usage_error_detection.md
@@ -40,7 +40,7 @@ Besides loading a MUST module, no further changes are needed during compilation
 
 ### Running your Application with MUST
 
-In order to run your application with MUST you need to replace the srun command with mustrun:
+In order to run your application with MUST you need to replace the `srun` command with `mustrun`:
 
 ```console
 marie@login$ mustrun -np <number of MPI processes> ./<your binary>
@@ -65,14 +65,14 @@ marie@login$ mustrun -np 4 ./fancy-program
 [MUST] Execution finished, inspect "/home/marie/MUST_Output.html"!
 ```
 
-Besides replacing the srun command you need to be aware that **MUST always allocates an extra
+Besides replacing the `srun` command you need to be aware that **MUST always allocates an extra
 process**, i.e. if you issue a `mustrun -np 4 ./a.out` then MUST will start 5 processes instead.
 This is usually not critical, however in batch jobs **make sure to allocate an extra CPU for this
 task**.
 
 Finally, MUST assumes that your application may crash at any time. To still gather correctness
 results under this assumption is extremely expensive in terms of performance overheads. Thus, if
-your application does not crash, you should add an "--must:nocrash" to the mustrun command to make
+your application does not crash, you should add `--must:nocrash` to the `mustrun` command to make
 MUST aware of this knowledge. Overhead is drastically reduced with this switch.
 
 ### Result Files
diff --git a/doc.zih.tu-dresden.de/docs/software/papi.md b/doc.zih.tu-dresden.de/docs/software/papi.md
index 9d96cc58f4453692ad7b57abe3e56abda1539290..2de80b4e8a0f420a6b42cd01a3de027b5fb89be2 100644
--- a/doc.zih.tu-dresden.de/docs/software/papi.md
+++ b/doc.zih.tu-dresden.de/docs/software/papi.md
@@ -20,8 +20,8 @@ To collect performance events, PAPI provides two APIs, the *high-level* and *low
 
 The high-level API provides the ability to record performance events inside instrumented regions of
 serial, multi-processing (MPI, SHMEM) and thread (OpenMP, Pthreads) parallel applications. It is
-designed for simplicity, not flexibility. For more details click
-[here](https://bitbucket.org/icl/papi/wiki/PAPI-HL.md).
+designed for simplicity, not flexibility. More details can be found in the
+[PAPI wiki High-Level API description](https://bitbucket.org/icl/papi/wiki/PAPI-HL.md).
 
 The following code example shows the use of the high-level API by marking a code section.
 
@@ -86,19 +86,19 @@ more output files in JSON format.
 
 ### Low-Level API
 
-The low-level API manages hardware events in user-defined groups
-called Event Sets. It is meant for experienced application programmers and tool developers wanting
-fine-grained measurement and control of the PAPI interface. It provides access to both PAPI preset
-and native events, and supports all installed components. For more details on the low-level API,
-click [here](https://bitbucket.org/icl/papi/wiki/PAPI-LL.md).
+The low-level API manages hardware events in user-defined groups called Event Sets. It is meant for
+experienced application programmers and tool developers wanting fine-grained measurement and
+control of the PAPI interface. It provides access to both PAPI preset and native events, and
+supports all installed components. The PAPI wiki contains also a page with more details on the
+[low-level API](https://bitbucket.org/icl/papi/wiki/PAPI-LL.md).
 
 ## Usage on ZIH Systems
 
 Before you start a PAPI measurement, check which events are available on the desired architecture.
-For this purpose PAPI offers the tools `papi_avail` and `papi_native_avail`. If you want to measure
+For this purpose, PAPI offers the tools `papi_avail` and `papi_native_avail`. If you want to measure
 multiple events, please check which events can be measured concurrently using the tool
-`papi_event_chooser`. For more details on the PAPI tools click
-[here](https://bitbucket.org/icl/papi/wiki/PAPI-Overview.md#markdown-header-papi-utilities).
+`papi_event_chooser`. The PAPI wiki contains more details on
+[the PAPI tools](https://bitbucket.org/icl/papi/wiki/PAPI-Overview.md#markdown-header-papi-utilities).
 
 !!! hint
 
@@ -133,8 +133,7 @@ compile your application against the  PAPI library.
 !!! hint
 
     The PAPI modules on ZIH systems are only installed with the default `perf_event` component. If you
-    want to measure, e.g., GPU events, you have to install your own PAPI. Instructions on how to
-    download and install PAPI can be found
-    [here](https://bitbucket.org/icl/papi/wiki/Downloading-and-Installing-PAPI.md). To install PAPI
-    with additional components, you have to specify them during configure, for details click
-    [here](https://bitbucket.org/icl/papi/wiki/PAPI-Overview.md#markdown-header-components).
+    want to measure, e.g., GPU events, you have to install your own PAPI. Please see the
+    [external instructions on how to download and install PAPI](https://bitbucket.org/icl/papi/wiki/Downloading-and-Installing-PAPI.md).
+    To install PAPI with additional components, you have to specify them during configure as
+    described for the [Installation of Components](https://bitbucket.org/icl/papi/wiki/PAPI-Overview.md#markdown-header-components).
diff --git a/doc.zih.tu-dresden.de/docs/software/pika.md b/doc.zih.tu-dresden.de/docs/software/pika.md
index 36aab905dbf33602c64333e2a695070ffc0ad9db..d9616e900e258909267fc9870db6ddfa24fee0de 100644
--- a/doc.zih.tu-dresden.de/docs/software/pika.md
+++ b/doc.zih.tu-dresden.de/docs/software/pika.md
@@ -90,7 +90,7 @@ reason for further investigation, since not all HUs are equally utilized.
 
 To identify imbalances between HUs over time, the visualization modes *Best* and *Lowest* are a
 first indicator how much the HUs differ in terms of resource usage. The timelines *Best* and
-*Lowest* show the recoded performance data of the best/lowest average HU over time.
+*Lowest* show the recorded performance data of the best/lowest average HU over time.
 
 ## Footprint Visualization
 
@@ -111,7 +111,7 @@ investigating their correlation.
 ## Hints
 
 If users wish to perform their own measurement of performance counters using performance tools other
-than PIKA, it is recommended to disable PIKA monitoring. This can be done using the following slurm
+than PIKA, it is recommended to disable PIKA monitoring. This can be done using the following Slurm
 flags in the job script:
 
 ```Bash
diff --git a/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md b/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
index e19daeeb6731aa32eb993f2495e6ec443bebe2dd..67b10817c738b414a3302388b5cca3392ff96bb1 100644
--- a/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
+++ b/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
@@ -93,8 +93,6 @@ are in the virtual environment. You can deactivate the conda environment as foll
 (conda-env) marie@compute$ conda deactivate    #Leave the virtual environment
 ```
 
-TODO: Link to this page from other DA/ML topics. insert link in alpha centauri
-
 ??? example
 
     This is an example on partition Alpha. The example creates a virtual environment, and installs
diff --git a/doc.zih.tu-dresden.de/docs/software/vampir.md b/doc.zih.tu-dresden.de/docs/software/vampir.md
index 24a22c35acda9afcfa6e1e56bdd553da716ec245..9df5eb62a0d461da97fcb2ce28f461d9042e93a2 100644
--- a/doc.zih.tu-dresden.de/docs/software/vampir.md
+++ b/doc.zih.tu-dresden.de/docs/software/vampir.md
@@ -146,7 +146,7 @@ marie@local$ ssh -L 30000:taurusi1253:30055 taurus.hrsk.tu-dresden.de
 ```
 
 Now, the port 30000 on your desktop is connected to the VampirServer port 30055 at the compute node
-taurusi1253 of the ZIH system. Finally, start your local Vampir client and establish a remote
+`taurusi1253` of the ZIH system. Finally, start your local Vampir client and establish a remote
 connection to `localhost`, port 30000 as described in the manual.
 
 ```console
diff --git a/doc.zih.tu-dresden.de/docs/software/visualization.md b/doc.zih.tu-dresden.de/docs/software/visualization.md
index 328acc490f5fa5c65e687d50bf9f43ceae44c541..f1e551c968cb4478069c98e691eef11bce7ccb01 100644
--- a/doc.zih.tu-dresden.de/docs/software/visualization.md
+++ b/doc.zih.tu-dresden.de/docs/software/visualization.md
@@ -49,10 +49,10 @@ marie@login$ mpiexec -bind-to -help`
 or from
 [mpich wiki](https://wiki.mpich.org/mpich/index.php/Using_the_Hydra_Process_Manager#Process-core_Binding%7Cwiki.mpich.org).
 
-In the following, we provide two examples on how to use `pvbatch` from within a jobfile and an
+In the following, we provide two examples on how to use `pvbatch` from within a job file and an
 interactive allocation.
 
-??? example "Example jobfile"
+??? example "Example job file"
 
     ```Bash
     #!/bin/bash
@@ -97,7 +97,7 @@ cards (GPUs) specified by the device index. For that, make sure to use the modul
 *-egl*, e.g., `ParaView/5.9.0-RC1-egl-mpi-Python-3.8`, and pass the option
 `--egl-device-index=$CUDA_VISIBLE_DEVICES`.
 
-??? example "Example jobfile"
+??? example "Example job file"
 
     ```Bash
     #!/bin/bash
@@ -171,7 +171,7 @@ are outputed.*
 This contains the node name which your job and server runs on. However, since the node names of the
 cluster are not present in the public domain name system (only cluster-internally), you cannot just
 use this line as-is for connection with your client. **You first have to resolve** the name to an IP
-address on ZIH systems: Suffix the nodename with `-mn` to get the management network (ethernet)
+address on ZIH systems: Suffix the node name with `-mn` to get the management network (ethernet)
 address, and pass it to a lookup-tool like `host` in another SSH session:
 
 ```console
diff --git a/doc.zih.tu-dresden.de/mkdocs.yml b/doc.zih.tu-dresden.de/mkdocs.yml
index aaf72fb2810f0cfefe78df2d3f9ee55e651905b7..8a974edf6630e8002199d2831c179a4fb1899720 100644
--- a/doc.zih.tu-dresden.de/mkdocs.yml
+++ b/doc.zih.tu-dresden.de/mkdocs.yml
@@ -46,7 +46,7 @@ nav:
       - Data Analytics with R: software/data_analytics_with_r.md
       - Data Analytics with RStudio: software/data_analytics_with_rstudio.md
       - Data Analytics with Python: software/data_analytics_with_python.md
-      - Apache Spark: software/big_data_frameworks_spark.md
+      - Big Data Analytics: software/big_data_frameworks.md
     - Machine Learning:
       - Overview: software/machine_learning.md
       - TensorFlow: software/tensorflow.md
@@ -70,7 +70,6 @@ nav:
       - PAPI Library: software/papi.md
       - Pika: software/pika.md
       - Perf Tools: software/perf_tools.md
-      - Score-P: software/scorep.md
       - Vampir: software/vampir.md
   - Data Life Cycle Management:
     - Overview: data_lifecycle/overview.md
@@ -131,6 +130,7 @@ nav:
   - Contribute:
     - How-To: contrib/howto_contribute.md
     - Content Rules: contrib/content_rules.md
+    - Browser-based Editing: contrib/contribute_browser.md
     - Work Locally Using Containers: contrib/contribute_container.md
     
 # Project Information
@@ -187,6 +187,8 @@ markdown_extensions:
         permalink: True
     - attr_list
     - footnotes
+    - pymdownx.tabbed:
+        alternate_style: true
 
 extra:
   homepage: https://tu-dresden.de
diff --git a/doc.zih.tu-dresden.de/util/check-bash-syntax.sh b/doc.zih.tu-dresden.de/util/check-bash-syntax.sh
index 9f31effee3ebc3380af5ca892047aca6a9357139..ac0fcd4621741d7f094e29aaf772f283b64c284d 100755
--- a/doc.zih.tu-dresden.de/util/check-bash-syntax.sh
+++ b/doc.zih.tu-dresden.de/util/check-bash-syntax.sh
@@ -47,12 +47,12 @@ branch="origin/${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-preview}"
 
 if [ $all_files = true ]; then
   echo "Search in all bash files."
-  files=`git ls-tree --full-tree -r --name-only HEAD $basedir/docs/ | grep .sh || true`
+  files=`git ls-tree --full-tree -r --name-only HEAD $basedir/docs/ | grep '\.sh$' || true`
 elif [[ ! -z $file ]]; then
   files=$file
 else
   echo "Search in git-changed files."
-  files=`git diff --name-only "$(git merge-base HEAD "$branch")" | grep .sh || true`
+  files=`git diff --name-only "$(git merge-base HEAD "$branch")" | grep '\.sh$' || true`
 fi
 
 
diff --git a/doc.zih.tu-dresden.de/util/check-empty-page.sh b/doc.zih.tu-dresden.de/util/check-empty-page.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7c4fdc2cd07b167b39b0b0ece58e199df0df6d84
--- /dev/null
+++ b/doc.zih.tu-dresden.de/util/check-empty-page.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+set -euo pipefail
+
+scriptpath=${BASH_SOURCE[0]}
+basedir=`dirname "$scriptpath"`
+basedir=`dirname "$basedir"`
+
+if find $basedir -name \*.md -exec wc -l {} \; | grep '^0 '; then
+  exit 1
+fi
diff --git a/doc.zih.tu-dresden.de/util/check-filesize.sh b/doc.zih.tu-dresden.de/util/check-filesize.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9b11b09c742a387513a265da28aca57d5533516b
--- /dev/null
+++ b/doc.zih.tu-dresden.de/util/check-filesize.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+# BSD 3-Clause License
+# 
+# Copyright (c) 2017, The Regents of the University of California, through
+# Lawrence Berkeley National Laboratory (subject to receipt of any required
+# approvals from the U.S. Dept. of Energy). All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# 
+# * Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer.
+# 
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# 
+# * Neither the name of the copyright holder nor the names of its
+#   contributors may be used to endorse or promote products derived from
+#   this software without specific prior written permission.
+# 
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+large_files_present=false
+branch="origin/${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-preview}"
+source_hash=`git merge-base HEAD "$branch"`
+
+for f in $(git diff $source_hash --name-only); do    
+    fs=$(wc -c $f | awk '{print $1}')
+    if [ $fs -gt 1048576 ]; then
+	echo $f 'is over 1M ('$fs' bytes)'
+	large_files_present=true
+    fi
+done
+
+if [ "$large_files_present" == true ]; then
+    exit 1
+fi
diff --git a/doc.zih.tu-dresden.de/util/check-no-floating.sh b/doc.zih.tu-dresden.de/util/check-no-floating.sh
index 6f94039f3125f87502b1583e699140e15e0e5f5f..4fbc5affe7c670c9dc2d998447c29e3a1e99fe55 100755
--- a/doc.zih.tu-dresden.de/util/check-no-floating.sh
+++ b/doc.zih.tu-dresden.de/util/check-no-floating.sh
@@ -4,30 +4,41 @@ if [ ${#} -ne 1 ]; then
   echo "Usage: ${0} <path>"
 fi
 
-DOCUMENT_ROOT=${1}
+basedir=${1}
+DOCUMENT_ROOT=${basedir}/docs
+maxDepth=4
+expectedFooter="$DOCUMENT_ROOT/legal_notice.md $DOCUMENT_ROOT/accessibility.md $DOCUMENT_ROOT/data_protection_declaration.md"
 
-check_md() {
-  awk -F'/' '{print $0,NF,$NF}' <<< "${1}" | while IFS=' ' read string depth md; do
-    #echo "string=${string} depth=${depth} md=${md}"
+MSG=$(find ${DOCUMENT_ROOT} -name "*.md" | awk -F'/' '{print $0,NF}' | while IFS=' ' read string depth
+  do
+    #echo "string=${string} depth=${depth}"
 
     # max depth check 
-    if [ "${depth}" -gt "5" ]; then
-      echo "max depth (4) exceeded for ${string}"
-      exit -1
+    if [ "${depth}" -gt $maxDepth ]; then
+      echo "max depth ($maxDepth) exceeded for ${string}"
     fi
 
+    md=${string#${DOCUMENT_ROOT}/}
+
     # md included in nav 
-    if ! sed -n '/nav:/,/^$/p' ${2}/mkdocs.yml | grep --quiet ${md}; then
-      echo "${md} is not included in nav"
-      exit -1
+    numberOfReferences=`sed -n '/nav:/,/^$/p' ${basedir}/mkdocs.yml | grep -c ${md}`
+    if [ $numberOfReferences -eq 0 ]; then
+      # fallback: md included in footer 
+      if [[ "${expectedFooter}" =~ ${string} ]]; then
+        numberOfReferencesInFooter=`sed -n '/footer:/,/^$/p' ${basedir}/mkdocs.yml | grep -c /${md%.md}`
+        if [ $numberOfReferencesInFooter -eq 0 ]; then
+          echo "${md} is not included in footer"
+        elif [ $numberOfReferencesInFooter -ne 1 ]; then
+          echo "${md} is included $numberOfReferencesInFooter times in footer"
+        fi
+      else
+        echo "${md} is not included in nav"
+      fi
+    elif [ $numberOfReferences -ne 1 ]; then
+      echo "${md} is included $numberOfReferences times in nav"
     fi
   done
-}
-
-export -f check_md
-
-#find ${DOCUMENT_ROOT}/docs -name "*.md" -exec bash -c 'check_md "${0#${1}}" "${1}"' {} ${DOCUMENT_ROOT} \; 
-MSG=$(find ${DOCUMENT_ROOT}/docs -name "*.md" -exec bash -c 'check_md "${0#${1}}" "${1}"' {} ${DOCUMENT_ROOT} \;)
+)
 if [ ! -z "${MSG}" ]; then
   echo "${MSG}"
   exit -1
diff --git a/doc.zih.tu-dresden.de/util/check-spelling.sh b/doc.zih.tu-dresden.de/util/check-spelling.sh
index 8448d0bbffe534b0fd676dbd00ca82e17e7d167d..0d574c1e6adeadacb895f31209b16a9d7f25a123 100755
--- a/doc.zih.tu-dresden.de/util/check-spelling.sh
+++ b/doc.zih.tu-dresden.de/util/check-spelling.sh
@@ -7,6 +7,7 @@ basedir=`dirname "$scriptpath"`
 basedir=`dirname "$basedir"`
 wordlistfile=$(realpath $basedir/wordlist.aspell)
 branch="origin/${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-preview}"
+files_to_skip=(doc.zih.tu-dresden.de/docs/accessibility.md doc.zih.tu-dresden.de/docs/data_protection_declaration.md data_protection_declaration.md)
 aspellmode=
 if aspell dump modes | grep -q markdown; then
   aspellmode="--mode=markdown"
@@ -14,9 +15,10 @@ fi
 
 function usage() {
   cat <<-EOF
-usage: $0 [file]
+usage: $0 [file | -a]
 If file is given, outputs all words of the file, that the spell checker cannot recognize.
-If file is omitted, checks whether any changed file contains more unrecognizable words than before the change.
+If parameter -a (or --all) is given instead of the file, checks all markdown files.
+Otherwise, checks whether any changed file contains more unrecognizable words than before the change.
 If you are sure a word is correct, you can put it in $wordlistfile.
 EOF
 }
@@ -29,12 +31,52 @@ function getNumberOfAspellOutputLines(){
   getAspellOutput | wc -l
 }
 
+function isWordlistSorted(){
+  #Unfortunately, sort depends on locale and docker does not provide much.
+  #Therefore, it uses bytewise comparison. We avoid problems with the command tr.
+  if sed 1d "$wordlistfile" | tr [:upper:] [:lower:] | sort -C; then
+    return 1
+  fi
+  return 0
+}
+
+function shouldSkipFile(){
+  printf '%s\n' "${files_to_skip[@]}" | grep -xq $1
+}
+
+function checkAllFiles(){
+  any_fails=false
+
+  if isWordlistSorted; then
+    echo "Unsorted wordlist in $wordlistfile"
+    any_fails=true
+  fi
+
+  files=$(git ls-tree --full-tree -r --name-only HEAD $basedir/ | grep .md)
+  while read file; do
+    if [ "${file: -3}" == ".md" ]; then
+      if shouldSkipFile ${file}; then
+        echo "Skip $file"
+      else
+        echo "Check $file"
+        echo "-- File $file"
+        if { cat "$file" | getAspellOutput | tee /dev/fd/3 | grep -xq '.*'; } 3>&1; then
+          any_fails=true
+        fi
+      fi
+    fi
+  done <<< "$files"
+
+  if [ "$any_fails" == true ]; then
+    return 1
+  fi
+  return 0
+}
+
 function isMistakeCountIncreasedByChanges(){
   any_fails=false
 
-  #Unfortunately, sort depends on locale and docker does not provide much.
-  #Therefore, it uses bytewise comparison. We avoid problems with the command tr.
-  if ! sed 1d "$wordlistfile" | tr [:upper:] [:lower:] | sort -C; then
+  if isWordlistSorted; then
     echo "Unsorted wordlist in $wordlistfile"
     any_fails=true
   fi
@@ -48,9 +90,7 @@ function isMistakeCountIncreasedByChanges(){
   while read oldfile; do
     read newfile
     if [ "${newfile: -3}" == ".md" ]; then
-      if [[ $newfile == *"accessibility.md"* ||
-            $newfile == *"data_protection_declaration.md"* ||
-            $newfile == *"legal_notice.md"* ]]; then
+      if shouldSkipFile ${newfile:2}; then
         echo "Skip $newfile"
       else
         echo "Check $newfile"
@@ -90,6 +130,9 @@ if [ $# -eq 1 ]; then
     usage
     exit
   ;;
+  -a | --all)
+    checkAllFiles
+  ;;
   *)
     cat "$1" | getAspellOutput
   ;;
diff --git a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
index 280e4003dc951164c86b44560d6c81e3a5dc640c..f3cfa673ce063a674cb2f850d7f7da252a6ab093 100755
--- a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
+++ b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
@@ -6,41 +6,53 @@ scriptpath=${BASH_SOURCE[0]}
 basedir=`dirname "$scriptpath"`
 basedir=`dirname "$basedir"`
 
-#This is the ruleset. Each line represents a rule of tab-separated fields.
+#This is the ruleset. Each rule consists of a message (first line), a tab-separated list of files to skip (second line) and a pattern specification (third line).
+#A pattern specification is a tab-separated list of fields:
 #The first field represents whether the match should be case-sensitive (s) or insensitive (i).
 #The second field represents the pattern that should not be contained in any file that is checked.
 #Further fields represent patterns with exceptions.
 #For example, the first rule says:
 # The pattern \<io\> should not be present in any file (case-insensitive match), except when it appears as ".io".
 ruleset="The word \"IO\" should not be used, use \"I/O\" instead.
+doc.zih.tu-dresden.de/docs/contrib/content_rules.md
 i	\<io\>	\.io
 \"SLURM\" (only capital letters) should not be used, use \"Slurm\" instead.
+doc.zih.tu-dresden.de/docs/contrib/content_rules.md
 s	\<SLURM\>
 \"File system\" should be written as \"filesystem\", except when used as part of a proper name.
+doc.zih.tu-dresden.de/docs/contrib/content_rules.md
 i	file \+system	HDFS
 Use \"ZIH systems\" or \"ZIH system\" instead of \"Taurus\". \"taurus\" is only allowed when used in ssh commands and other very specific situations.
+doc.zih.tu-dresden.de/docs/contrib/content_rules.md	doc.zih.tu-dresden.de/docs/archive/phase2_migration.md
 i	\<taurus\>	taurus\.hrsk	/taurus	/TAURUS	ssh	^[0-9]\+:Host taurus$
 \"HRSKII\" should be avoided, use \"ZIH system\" instead.
+doc.zih.tu-dresden.de/docs/contrib/content_rules.md
 i	\<hrskii\>
 The term \"HPC-DA\" should be avoided. Depending on the situation, use \"data analytics\" or similar.
+doc.zih.tu-dresden.de/docs/contrib/content_rules.md
 i	hpc[ -]\+da\>
 \"ATTACHURL\" was a keyword in the old wiki, don't use it.
+
 i	attachurl
 Replace \"todo\" with real content.
+doc.zih.tu-dresden.de/docs/archive/system_triton.md
 i	\<todo\>	<!--.*todo.*-->
+Replace variations of \"Coming soon\" with real content.
+
+i	\(\<coming soon\>\|This .* under construction\|posted here\)
 Avoid spaces at end of lines.
+doc.zih.tu-dresden.de/docs/accessibility.md
 i	[[:space:]]$
 When referencing partitions, put keyword \"partition\" in front of partition name, e. g. \"partition ml\", not \"ml partition\".
-i	\(alpha\|ml\|haswell\|romeo\|gpu\|smp\|julia\|hpdlf\|scs5\)-\?\(interactive\)\?[^a-z]*partition
+doc.zih.tu-dresden.de/docs/contrib/content_rules.md
+i	\(alpha\|ml\|haswell\|romeo\|gpu\|smp\|julia\|hpdlf\|scs5\|dcv\)-\?\(interactive\)\?[^a-z]*partition
 Give hints in the link text. Words such as \"here\" or \"this link\" are meaningless.
-i	\[\s\?\(documentation\|here\|this \(link\|page\|subsection\)\|slides\?\|manpage\)\s\?\]
+doc.zih.tu-dresden.de/docs/contrib/content_rules.md
+i	\[\s\?\(documentation\|here\|more info\|this \(link\|page\|subsection\)\|slides\?\|manpage\)\s\?\]
 Use \"workspace\" instead of \"work space\" or \"work-space\".
+doc.zih.tu-dresden.de/docs/contrib/content_rules.md
 i	work[ -]\+space"
 
-# Whitelisted files will be ignored
-# Whitespace separated list with full path
-whitelist=(doc.zih.tu-dresden.de/README.md doc.zih.tu-dresden.de/docs/contrib/content_rules.md)
-
 function grepExceptions () {
   if [ $# -gt 0 ]; then
     firstPattern=$1
@@ -55,22 +67,29 @@ function checkFile(){
   f=$1
   echo "Check wording in file $f"
   while read message; do
+    IFS=$'\t' read -r -a files_to_skip
+    skipping=""
+    if (printf '%s\n' "${files_to_skip[@]}" | grep -xq $f); then
+      skipping=" -- skipping"
+    fi
     IFS=$'\t' read -r flags pattern exceptionPatterns
     while IFS=$'\t' read -r -a exceptionPatternsArray; do
       if [ $silent = false ]; then
-        echo "  Pattern: $pattern"
+        echo "  Pattern: $pattern$skipping"
       fi
-      grepflag=
-      case "$flags" in
-        "i")
-          grepflag=-i
-        ;;
-      esac
-      if grep -n $grepflag $color "$pattern" "$f" | grepExceptions "${exceptionPatternsArray[@]}" ; then
-        number_of_matches=`grep -n $grepflag $color "$pattern" "$f" | grepExceptions "${exceptionPatternsArray[@]}" | wc -l`
-        ((cnt=cnt+$number_of_matches))
-        if [ $silent = false ]; then
-          echo "    $message"
+      if [ -z "$skipping" ]; then
+        grepflag=
+        case "$flags" in
+          "i")
+            grepflag=-i
+          ;;
+        esac
+        if grep -n $grepflag $color "$pattern" "$f" | grepExceptions "${exceptionPatternsArray[@]}" ; then
+          number_of_matches=`grep -n $grepflag $color "$pattern" "$f" | grepExceptions "${exceptionPatternsArray[@]}" | wc -l`
+          ((cnt=cnt+$number_of_matches))
+          if [ $silent = false ]; then
+            echo "    $message"
+          fi
         fi
       fi
     done <<< $exceptionPatterns
@@ -123,7 +142,7 @@ branch="origin/${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-preview}"
 
 if [ $all_files = true ]; then
   echo "Search in all markdown files."
-  files=$(git ls-tree --full-tree -r --name-only HEAD $basedir/docs/ | grep .md)
+  files=$(git ls-tree --full-tree -r --name-only HEAD $basedir/ | grep .md)
 elif [[ ! -z $file ]]; then
   files=$file
 else
@@ -138,10 +157,6 @@ if [[ ! -z $file ]]; then
 else
   for f in $files; do
     if [ "${f: -3}" == ".md" -a -f "$f" ]; then
-      if (printf '%s\n' "${whitelist[@]}" | grep -xq $f); then
-        echo "Skip whitelisted file $f"
-        continue
-      fi
       checkFile $f
     fi
   done
diff --git a/doc.zih.tu-dresden.de/util/pre-commit b/doc.zih.tu-dresden.de/util/pre-commit
index eb63bbea24052eb1dff4ec16a17b8b5aba275e18..1cc901e00efbece94209bfa6c4bbbc54aad682e9 100755
--- a/doc.zih.tu-dresden.de/util/pre-commit
+++ b/doc.zih.tu-dresden.de/util/pre-commit
@@ -75,6 +75,13 @@ then
   exit_ok=no
 fi
 
+echo "Looking for empty files..."
+docker run --name=hpc-compendium --rm -w /docs --mount src="$(pwd)",target=/docs,type=bind hpc-compendium ./doc.zih.tu-dresden.de/util/check-empty-page.sh
+if [ $? -ne 0 ]
+then
+  exit_ok=no
+fi
+
 if [ $exit_ok == yes ]
 then
   exit 0
diff --git a/doc.zih.tu-dresden.de/wordlist.aspell b/doc.zih.tu-dresden.de/wordlist.aspell
index 52ab5e8401892789922402dcf4a9d186ca67ca9b..8bcd6a7c24872843e665bc7fc1ed91241284c780 100644
--- a/doc.zih.tu-dresden.de/wordlist.aspell
+++ b/doc.zih.tu-dresden.de/wordlist.aspell
@@ -65,7 +65,11 @@ DockerHub
 dockerized
 dotfile
 dotfiles
+downtime
+downtimes
+EasyBlocks
 EasyBuild
+EasyConfig
 ecryptfs
 engl
 english
@@ -79,6 +83,7 @@ FFT
 FFTW
 filesystem
 filesystems
+flink
 Flink
 FMA
 foreach
@@ -87,6 +92,7 @@ Galilei
 Gauss
 Gaussian
 GBit
+GDB
 GDDR
 GFLOPS
 gfortran
@@ -116,7 +122,9 @@ Horovod
 horovodrun
 hostname
 Hostnames
+hpc
 HPC
+hpcsupport
 HPE
 HPL
 html
@@ -132,6 +140,7 @@ img
 Infiniband
 init
 inode
+Instrumenter
 IOPS
 IPs
 ISA
@@ -139,6 +148,7 @@ Itanium
 jobqueue
 jpg
 jss
+jupyter
 Jupyter
 JupyterHub
 JupyterLab
@@ -164,6 +174,7 @@ MathWorks
 matlab
 MEGWARE
 mem
+Memcheck
 MiB
 Microarchitecture
 MIMD
@@ -171,6 +182,7 @@ Miniconda
 mkdocs
 MKL
 MNIST
+MobaXTerm
 modenv
 modenvs
 modulefile
@@ -191,6 +203,7 @@ multithreaded
 Multithreading
 NAMD
 natively
+nbgitpuller
 nbsp
 NCCL
 Neptun
@@ -228,6 +241,7 @@ pandarallel
 PAPI
 parallelization
 parallelize
+parallelized
 parfor
 pdf
 perf
@@ -247,16 +261,21 @@ pre
 Preload
 preloaded
 preloading
+prepend
 preprocessing
 PSOCK
+Pthread
 Pthreads
 pty
+PuTTY
 pymdownx
 PythonAnaconda
 pytorch
 PyTorch
 Quantum
 queue
+quickstart
+Quickstart
 randint
 reachability
 README
@@ -301,6 +320,7 @@ Slurm
 SLURMCluster
 SMP
 SMT
+spython
 squeue
 srun
 ssd
@@ -337,7 +357,9 @@ undistinguishable
 unencrypted
 uplink
 userspace
+Valgrind
 Vampir
+VampirServer
 VampirTrace
 VampirTrace's
 VASP