diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
deleted file mode 100644
index f1875b3481da6d11053e5ad8aed49ae53033e5c4..0000000000000000000000000000000000000000
--- a/.gitlab-ci.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-variables:
-    GIT_STRATEGY: none
-    DOCKER_IMAGE: webpage:$CI_PIPELINE_ID
-
-workflow:
-  rules:
-    - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
-    - if: '$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS'
-      when: never
-    - if: '$CI_COMMIT_BRANCH'
-
-stages:
-    - build
-    - test
-    - release
-    - cleanup
-
-
-Build Linter:
-    stage: build
-    variables:
-        GIT_STRATEGY: clone
-        GIT_DEPTH: 0
-    script: docker build -t "${DOCKER_IMAGE}" .
-
-Test mkdocs:
-    stage: test
-    script: docker run ${DOCKER_IMAGE}
-
-Check wording of changed md-files:
-    stage: test
-    script:
-        - docker run --rm -w /src -e CI_MERGE_REQUEST_TARGET_BRANCH_NAME "${DOCKER_IMAGE}"
-          doc.zih.tu-dresden.de/util/grep-forbidden-words.sh
-    only: [ merge_requests ]
-
-Lint changed md-files:
-    stage: test
-    script:
-        - docker run --rm -w /src -e CI_MERGE_REQUEST_TARGET_BRANCH_NAME "${DOCKER_IMAGE}"
-          doc.zih.tu-dresden.de/util/lint-changes.sh
-    only: [ merge_requests ]
-
-Check spelling for changed md-files:
-    stage: test
-    script:
-        - docker run --rm -w /src -e CI_MERGE_REQUEST_TARGET_BRANCH_NAME "${DOCKER_IMAGE}"
-          doc.zih.tu-dresden.de/util/check-spelling.sh
-    only: [ merge_requests ]
-
-Check links for changed md-files:
-    stage: test
-    script:
-        - docker run --rm -w /src -e CI_MERGE_REQUEST_TARGET_BRANCH_NAME "${DOCKER_IMAGE}"
-          doc.zih.tu-dresden.de/util/check-links.sh
-    only: [ merge_requests ]
-
-Lint md-files:
-    stage: test
-    script: docker run --rm "${DOCKER_IMAGE}" markdownlint docs
-    only: [ main, preview ]
-
-Check links for md-files:
-    stage: test
-    script:
-        - docker run --rm "${DOCKER_IMAGE}"
-          bash -c "find docs -type f -name '*.md' | xargs -L1 markdown-link-check --quiet"
-    only: [ main, preview ]
-
-Release preview branch:
-    stage: release
-    script:
-        - docker run --rm -v /var/www/html/preview:/mnt "${DOCKER_IMAGE}" mkdocs build --strict --site-dir /mnt
-    only: [ preview ]
-
-Release:
-    stage: release
-    script:
-        - docker run --rm -v /var/www/html/hpc-wiki:/mnt "${DOCKER_IMAGE}" mkdocs build --strict --site-dir /mnt
-    only: [ main ]
-
-Cleanup docker:
-    stage: cleanup
-    script:
-        - docker rmi -f "${DOCKER_IMAGE}"
-        - docker system prune --force
-    when: always
diff --git a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
index 1b1d5b460d78b65f5f8516b827e06e7782480fe8..eb8beb7b97cd3b10d24c2df10f77b6e38096691c 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
@@ -327,7 +327,8 @@ highlighting. There is a complete list of supported
 
 Where possible, replace login, project name, and other private data with clearly recognizable
 placeholders. In particular, use the generic login `marie` and the project title `p_number_crunch`
-as placeholders.
+as placeholders at first. If you need a second login and a second project stick to `martin` and
+`p_long_computations`.
 
 ```console
 marie@login$ ls -l
@@ -344,6 +345,8 @@ drwxr-xr-x   3 marie p_number_crunch      4096 Feb 12  2020 data
     and [Marianne](https://en.wikipedia.org/wiki/Marianne), symbol of France standing for liberty,
     equality and fraternity.
 
+    The very same holds for the generic login *martin*.
+
 #### Placeholders
 
 Placeholders represent arguments or code parts that can be adapted to the user's needs. Use them to
diff --git a/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md b/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
index 3d3082e8969d486934449020fd7118f37a036e5d..065f40b75db35cd0ec4ac5c3e1a8fa51347c546e 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
@@ -4,30 +4,104 @@
 
     Ink is better than the best memory.
 
-This documentation is written in markdown and translated into static html pages using
-[mkdocs](https://www.mkdocs.org/). A single configuration file holds the pages structure
-as well as specification of the theme and extensions. This file is `mkdocs.yaml`.
+In this section you will find information about the technical setup of this documentation, the
+content rules that apply, the Git workflow, and specific ways to contribute.
 
-We manage all essential files (markdown pages, graphics, configuration, theme, etc.) within a Git
-repository, which makes it quite easy to contribute to this documentation. In principle, there are
-three possible ways how to contribute to this documentation. These ways are outlined below.
+Your contributions are highly welcome. This can range from fixing typos, improving the phrasing and
+wording to adopting examples, command lines and adding new content. Our goal is to provide a
+general, consistent and up to date documentation. Thus, it is by no means a static documentation.
+Moreover, is is constantly reviewed and updated.
 
-## Content Guide Lines
+## Technical Setup
 
-To ensure a high-quality and consistent documentation and to make it easier for readers to
-understand all content, we set some [Content rules](content_rules.md). Please follow
+This documentation is written in markdown and translated into static html pages using
+[mkdocs](https://www.mkdocs.org/). The single configuration file `mkdocs.yml` contains the page
+structure as well as the specification of the theme and extensions.
+
+We manage all essential files (markdown pages, graphics, configuration, theme, etc.) within a
+[public Git repository](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium),
+allowing for collaborative working and revision control. GitLab's features offer different
+possibilities of contribution and ensure up-to-date and consistent content by including a review
+process. There are three possible ways how you can contribute to this documentation.
+These are described below.
+
+!!! tip "Before you start"
+
+    Before you start your very first commit, please make sure that you are familiar with our
+    [Git workflow](#git-workflow) and that you have at least skimmed through the
+    [Content Rules](content_rules.md).
+
+## Git Workflow
+
+We employ a so-called Git feature workflow with a development branch. In our case, the working branch
+is called `preview` and is kept in parallel to the `main` branch.
+
+All contributions, e.g., new content, improved wording, fixed typos, etc., are added to separate
+feature branches which base on `preview`. If the contribution is ready, you will have to create a
+merge request back to the `preview` branch. A member of the ZIH team will review the changes
+(four-eyes principle) and finally merge your changes to `preview`. All contributions need to pass
+through the CI pipeline consisting of several checks to ensure compliance with the content rules.
+Please, don't worry too much about the checks. The ZIH staff will help you with that. You can find
+more information about the [CI/CD pipeline](cicd-pipeline) in the eponymous subsection.
+
+In order to publish the updates and make them visible in the compendium,
+the changes on `preview` branch are either automatically merged into the `main` branch on every
+Monday via a pipeline schedule, or manually by admin staff. Moreover, the `main` branch is deployed
+to [https://compendium.hpc.tu-dresden.de](https://compendium.hpc.tu-dresden.de) and always reflects
+a production-ready state. Manual interventions are only necessary in case of merge conflicts.
+This process is handled by the admins.
+
+???+ note "Graphic on Git workflow"
+
+    The applied Git workflow is depicted in the following graphic. Here, two feature branches `foo`
+    and `bar` are created basing on `preview`. Three individual commits are added to branch `foo`
+    before it is ready and merged back to `preview`. The contributions on `bar` consist only one
+    commit. In the end, all contribution are merged to the `main` branch.
+
+    ```mermaid
+    %% Issues:
+    %% - showCommitLabel: false does not work; workaround is to use `commit id: " "`%%
+    %% - Changing the theme does not effect the rendered output. %%
+    %%{init: { 'logLevel': 'debug', 'theme': 'base', 'gitGraph': {'showCommitLabel': false} }%%
+    gitGraph
+        commit
+        branch preview
+        checkout preview
+        commit
+        branch foo
+        checkout foo
+        commit
+        commit
+        checkout preview
+        branch bar
+        checkout bar
+        commit
+        checkout preview
+        merge bar
+        checkout foo
+        commit
+        checkout preview
+        merge foo
+        checkout main
+        merge preview
+    ```
+
+## Content Rules
+
+To ensure a high-quality and consistent documentation, and to make it easier for readers to
+understand all content, we have established [Content rules](content_rules.md). Please follow
 these rules regarding markdown syntax and writing style when contributing! Furthermore, reviewing
 your changes takes less time and your improvements appear faster on the official documentation.
 
 !!! note
 
     If you contribute, you are fully and solely responsible for the content you create and have to
-    ensure that you have the right to create it under the laws which apply.
+    ensure that you have the right to create it under applicable laws.
 
 ## Contribute via Issue
 
-You can contribute to the documentation via the
-[GitLab issue tracking system](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/issues).
+You can contribute to the documentation using
+[GitLab's issue tracking system](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/issues).
 For that, open an issue to report typos and missing documentation or request for more precise
 wording etc. ZIH staff will get in touch with you to resolve the issue and improve the
 documentation.
@@ -45,14 +119,21 @@ documentation.
 ## Contribute via Web IDE
 
 If you have a web browser (most probably you are using it to read this page) and want to contribute
-to the documentation, you are good to go. GitLab offers a rich and versatile web interface to work
-with repositories. To start fixing typos and edit source files, please find more information on
-[Contributing via web browser](contribute_browser.md).
+to the documentation, you are good to go. GitLab offers a rich and versatile web interface for
+working with repositories. To start fixing typos and edit source files, you can find more
+information on the page [Contributing via web browser](contribute_browser.md).
 
 ## Contribute via Local Clone
 
-For experienced Git users, we provide a Docker container that includes all checks of the CI engine
-used in the back-end. Using them should ensure that merge requests will not be blocked
-due to automatic checking.
-The page on [Contributing via local clone](contribute_container.md) provides you with the details
-about how to setup and use your local clone of the repository.
+For experienced Git users, we provide a Docker container that includes all the checks of the CI
+engine used in the backend. Using them should ensure that merge requests are not blocked due to
+automatic checks.  The page [Contributing via local clone](contribute_container.md) provides you
+with the details about how to set up and use your local clone of the repository.
+
+## CI/CD Pipeline
+
+All contributions need to pass through the CI pipeline which consists of various checks to ensure
+that the [content rules](content_rules.md) have been followed.
+
+The stages of the CI/CD pipeline are defined in a `.gitlab.yaml` file. For security reasons, this
+file is maintained in a second, private repository.
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md
index a450eb22b3ea7cadb756d78bb15bcb5c04773de1..515628fb9be4aeb3ce5d01ef928026b521be5832 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md
@@ -3,9 +3,8 @@
 Storage systems differ in terms of capacity, streaming bandwidth, IOPS rate, etc. Price and
 efficiency don't allow to have it all in one. That is why fast parallel filesystems at ZIH have
 restrictions with regards to **age of files** and [quota](permanent.md#quotas). The mechanism of
-workspaces enables users to better manage their HPC data.
-
-The concept of workspaces is common and used at a large number of HPC centers.
+workspaces enables you to better manage your HPC data. It is common and used at a large number
+of HPC centers.
 
 !!! note
 
diff --git a/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md b/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md
index 28aba7bbfdcec8411f6510061d509c949d128f34..80631a56987f8b5f67fca331d65d558740ec80e2 100644
--- a/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md
+++ b/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md
@@ -1,4 +1,4 @@
-# Datamover - Data Transfer Inside ZIH Systems
+# Transfer Data Inside ZIH Systems with Datamover
 
 With the **Datamover**, we provide a special data transfer machine for transferring data with best
 transfer speed between the filesystems of ZIH systems. The Datamover machine is not accessible
@@ -70,7 +70,7 @@ To identify the mount points of the different filesystems on the data transfer m
 
 ## Transferring Files Between ZIH Systems and Group Drive
 
-1. Copy your private SSH key from ZIH system to `login1.zih.tu-dresden.de`.
+1. Copy your public SSH key from ZIH system to `login1.zih.tu-dresden.de`.
 
    ``` console
    marie@login$ ssh-copy-id -i ~/.ssh/id_rsa.pub login1.zih.tu-dresden.de
diff --git a/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md b/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
index b4a22c95e3193bc9ff1a7c43b107fe5f7f74f953..2b3a3da9e005352b1c2165afa3ce184486b89e30 100644
--- a/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
+++ b/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
@@ -1,10 +1,11 @@
-# Export Nodes - Data Transfer to/from ZIH Systems
+# Transfer Data to/from ZIH Systems via Export Nodes
 
 To copy large data to/from ZIH systems, the so-called **export nodes** should be used. While it is
 possible to transfer small files directly via the login nodes, they are not intended to be used that
 way. Furthermore, longer transfers will hit the CPU time limit on the login nodes, i.e. the process
 get killed. The **export nodes** have a better uplink (10 GBit/s) allowing for higher bandwidth. Note
-that you cannot log in via SSH to the export nodes, but only use `scp`, `rsync` or `sftp` on them.
+that you cannot log in via SSH to the export nodes, but only use `scp`, `rsync` or `sftp`
+(incl. FTP-clients like e.g. [FileZilla](https://filezilla-project.org/)) on them.
 
 The export nodes are reachable under the hostname `taurusexport.hrsk.tu-dresden.de` (or
 `taurusexport3.hrsk.tu-dresden.de` and `taurusexport4.hrsk.tu-dresden.de`).
diff --git a/doc.zih.tu-dresden.de/docs/index.md b/doc.zih.tu-dresden.de/docs/index.md
index 6c0f7fb7fc104f60629293c870009759dff01a5a..c22ef202a4408ab09d938219fa9be8b896cd7ae1 100644
--- a/doc.zih.tu-dresden.de/docs/index.md
+++ b/doc.zih.tu-dresden.de/docs/index.md
@@ -1,20 +1,28 @@
 # ZIH HPC Documentation
 
 This is the documentation of the HPC systems and services provided at
-[TU Dresden/ZIH](https://tu-dresden.de/zih/). This documentation is work in progress, since we try
-to incorporate more information with increasing experience and with every question you ask us. The
-HPC team invites you to take part in the improvement of these pages by correcting or adding useful
-information.
+[TU Dresden/ZIH](https://tu-dresden.de/zih/).
+
+This documentation will be continuously updated, since we try
+to incorporate more information with increasing experience and with every question you ask us.
+
+If the provided HPC systems and services helped to advance your research, please cite us. Why this
+is important and acknowledgment examples can be found in the section
+[Acknowledgement](https://doc.zih.tu-dresden.de/application/acknowledgement/).
 
 ## Contribution
 
-Your contributions are highly welcome. The easiest way for you to contribute is to report issues via
+The HPC team invites you to take part in the improvement of these pages by correcting or adding
+useful information. Your contributions are highly welcome!
+
+The easiest way for you to contribute is to report issues via
 the GitLab
 [issue tracking system](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/issues).
 Please check for any already existing issue before submitting your issue in order to avoid duplicate
 issues.
 
-Please also find out the other ways you could contribute in our [guidelines how to contribute](contrib/howto_contribute.md).
+Please also find out the other ways you could contribute in our
+[guidelines how to contribute](contrib/howto_contribute.md).
 
 !!! tip "Reminder"
 
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
index 5a0e15613665ececfaa2d0915fd7022c742a9288..b5b09281dd9ab9fdde89c7ae4ffe9ad4ec48c089 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
@@ -12,6 +12,14 @@ The hardware specification is documented on the page
     The NVIDIA A100 GPUs may only be used with **CUDA 11** or later. Earlier versions do not
     recognize the new hardware properly. Make sure the software you are using is built with CUDA11.
 
+There is a total of 48 physical cores in each node. SMT is also active, so in total, 96 logical
+cores are available per node.
+
+!!! note
+
+        Multithreading is disabled per default in a job.
+        See the [Slurm page](slurm.md) on how to enable it.
+
 ### Modules
 
 The easiest way is using the [module system](../software/modules.md).
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/binding_and_distribution_of_tasks.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/binding_and_distribution_of_tasks.md
index 800af9adce4a9ffec7644ab92413f80449a4ad7c..3b84e8564e7bb29f9d442ad37876363587e23efc 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/binding_and_distribution_of_tasks.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/binding_and_distribution_of_tasks.md
@@ -270,3 +270,9 @@ node and the second node while filling the sockets linearly.
     export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
     srun --ntasks 8 --cpus-per-task $OMP_NUM_THREADS --cpu_bind=cores --distribution=cyclic:block ./application
     ```
+
+## GPU
+
+Currently with the Slurm version (20.11.9) used ZIH systems
+it **is not possible** to bind tasks to GPUs. Is will be possible as soon as Slurm is updated at
+least to version 21.08.0 (see [GRES/MIG documentation in Slurm 21.08.0](https://slurm.schedmd.com/archive/slurm-21.08.0/gres.html#MIG_Management)).
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
index c4bd1c7909fda4fa27703c00c68e284be07a4cb0..538296b4ea52aee6c99f132811af8112803adcf9 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
@@ -26,7 +26,7 @@ users and the ZIH.
 
 - 34 nodes, each with
     - 8 x NVIDIA A100-SXM4 Tensor Core-GPUs
-    - 2 x AMD EPYC CPU 7352 (24 cores) @ 2.3 GHz, Multithreading disabled
+    - 2 x AMD EPYC CPU 7352 (24 cores) @ 2.3 GHz, Multithreading available
     - 1 TB RAM
     - 3.5 TB local memory on NVMe device at `/tmp`
 - Hostnames: `taurusi[8001-8034]`
@@ -36,7 +36,7 @@ users and the ZIH.
 ## Island 7 - AMD Rome CPUs
 
 - 192 nodes, each with
-    - 2 x AMD EPYC CPU 7702 (64 cores) @ 2.0 GHz, Multithreading enabled,
+    - 2 x AMD EPYC CPU 7702 (64 cores) @ 2.0 GHz, Multithreading available
     - 512 GB RAM
     - 200 GB local memory on SSD at `/tmp`
 - Hostnames: `taurusi[7001-7192]`
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md
index d0f140d3bddf56816a6e0f41c5e063afed91a77d..98063cb50337c5396ee3125e245d9d929abe0679 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm.md
@@ -321,6 +321,32 @@ provide a comprehensive collection of job examples.
     * Submisson: `marie@login$ sbatch batch_script.sh`
     * Run with fewer MPI tasks: `marie@login$ sbatch --ntasks=14 batch_script.sh`
 
+## Using Simultaneous Multithreading (SMT)
+
+Most modern architectures offer simultaneous multithreading (SMT), where physical cores of a CPU are
+split into virtual cores (aka. threads). This technique allows to run two instruction streams per
+physical core in parallel.
+
+At ZIH systems, SMT is available at the partitions `rome` and `alpha`. It is deactivated by
+default, because the environment variable `SLURM_HINT` is set to `nomultithread`.
+If you wish to make use of the SMT cores, you need to explicitly activate it.
+In principle, there are two different ways:
+
+1. Change the value of the environment variable via `export SLURM_HINT=multithread` in your current
+   shell and submit your job file, or invoke your `srun` or `salloc` command line.
+
+1. Clear the environment variable via `unset SLURM_HINT` and provide the option `--hint=multithread`
+   to `sbatch`, `srun` or `salloc` command line.
+
+??? warning
+
+     If you like to activate SMT via the directive
+     ```
+     #SBATCH --hint=multithread
+     ```
+     within your job file, you also have to clear the environment variable `SLURM_HINT` before
+     submitting the job file. Otherwise, the environment varibale `SLURM_HINT` takes precedence.
+
 ## Heterogeneous Jobs
 
 A heterogeneous job consists of several job components, all of which can have individual job
diff --git a/doc.zih.tu-dresden.de/docs/software/singularity_recipe_hints.md b/doc.zih.tu-dresden.de/docs/software/singularity_recipe_hints.md
index b8304b57de0f1ae5da98341c92f6d9067b838ecd..6f60c340c53b97da31529a4e21a4cf9b20761063 100644
--- a/doc.zih.tu-dresden.de/docs/software/singularity_recipe_hints.md
+++ b/doc.zih.tu-dresden.de/docs/software/singularity_recipe_hints.md
@@ -32,8 +32,9 @@ From: alpine
   cd ..
   rm -r fmt-5.3.0*
 
-  cat hello.cpp
-#include <fmt/format.h>
+  cat <<'EOF' >>  hello.cpp
+
+#include <fmt/format.h>  // literal
 
 int main(int argc, char** argv){
   if(argc == 1) fmt::print("No arguments passed!\n");
@@ -58,6 +59,70 @@ EOF
     ./hello
 ```
 
+### Distributed memory
+
+#### MPICH
+
+Ubuntu+MPICH definition file:
+
+```bash
+Bootstrap: docker
+From: ubuntu:20.04
+
+%files
+    mpitest.c /opt
+
+%environment
+    export MPICH_DIR=/opt/mpich
+    export SINGULARITY_MPICH_DIR=$MPICH_DIR
+    export SINGULARITYENV_APPEND_PATH=$MPICH_DIR/bin
+    export SINGULAIRTYENV_APPEND_LD_LIBRARY_PATH=$MPICH_DIR/lib
+
+%post
+    echo "Installing required packages..."
+    apt-get update && apt-get install -y wget git bash gcc gfortran g++ make file
+
+    # required for F90 bindings
+    apt-get install -y python3
+
+    echo "Installing MPICH"
+    export MPICH_DIR=/opt/mpich
+    export MPICH_VERSION=4.1
+    export MPICH_URL="https://www.mpich.org/static/downloads/$MPICH_VERSION/mpich-$MPICH_VERSION.tar.gz"
+    mkdir -p /tmp/mpich
+    mkdir -p /opt
+    # Download
+    cd /tmp/mpich && wget -O mpich-$MPICH_VERSION.tar.gz $MPICH_URL && tar -xf mpich-$MPICH_VERSION.tar.gz
+    
+    # Configure and compile/install
+    cd /tmp/mpich/mpich-$MPICH_VERSION
+    ./configure --prefix=$MPICH_DIR && make install
+    
+    
+    # Set env variables so we can compile our application
+    export PATH=$MPICH_DIR/bin:$PATH
+    export LD_LIBRARY_PATH=$MPICH_DIR/lib:$LD_LIBRARY_PATH
+    export MANPATH=$MPICH_DIR/share/man:$MANPATH
+    
+    
+    echo "Compiling the MPI application..."
+    cd /opt && mpicc -o mpitest mpitest.c
+```
+
+At your local machine:
+
+```console
+marie@local$ sudo singularity build ubuntu_mpich.sif ubuntu_mpich.def
+```
+
+This will create the `ubuntu_mpich.sif` file that you have to copy to HPC system.
+
+At the HPC system run as following:
+
+```console
+marie@login$ srun -n 4 --ntasks-per-node 2 --time=00:10:00 singularity exec ubuntu_mpich.sif /opt/mpitest
+```
+
 ### CUDA + CuDNN + OpenMPI
 
 * Chosen CUDA version depends on installed driver of host
diff --git a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
index a6741b324d161380c440a5e13d81688046bca6ee..e8cd43a27ecd78a230bb8f2890a6ae68b7c71dba 100755
--- a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
+++ b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
@@ -38,7 +38,7 @@ Do not use \"up-to-date\", because this should be an inherent property of this g
 
 i	\(\<up-to-date\>\|up to date\)
 Replace \"todo\" with real content.
-doc.zih.tu-dresden.de/docs/archive/system_triton.md
+doc.zih.tu-dresden.de/docs/archive/system_triton.md	doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
 i	\<todo\>	<!--.*todo.*-->
 Replace variations of \"Coming soon\" with real content.
 
diff --git a/doc.zih.tu-dresden.de/wordlist.aspell b/doc.zih.tu-dresden.de/wordlist.aspell
index 54c0092c1c8cc3eda9c37f9780936ffe7ecc8b29..f523a59839eb5e03c6f6496515e62a87e3016d20 100644
--- a/doc.zih.tu-dresden.de/wordlist.aspell
+++ b/doc.zih.tu-dresden.de/wordlist.aspell
@@ -1,6 +1,7 @@
 personal_ws-1.1 en 475
 aarch
 Abaqus
+Acknowledgement
 ACL
 ACLs
 Addon
@@ -105,6 +106,7 @@ FFT
 FFTW
 filesystem
 filesystems
+FileZilla
 flink
 Flink
 FlinkExample