diff --git a/doc.zih.tu-dresden.de/docs/archive/install_jupyter.md b/doc.zih.tu-dresden.de/docs/archive/install_jupyter.md
index 02b78701bd8fa5b5eb3fbb7ed2de2cae9639042e..a1d2966509244d71d32c7bfa22d74c18b45be628 100644
--- a/doc.zih.tu-dresden.de/docs/archive/install_jupyter.md
+++ b/doc.zih.tu-dresden.de/docs/archive/install_jupyter.md
@@ -148,7 +148,7 @@ c.NotebookApp.allow_remote_access = True
 #SBATCH --time=02:30:00
 #SBATCH --mem=4000M
 #SBATCH -J "jupyter-notebook" # job-name
-#SBATCH -A p_marie
+#SBATCH -A p_number_crunch
 
 unset XDG_RUNTIME_DIR   # might be required when interactive instead of sbatch to avoid 'Permission denied error'
 srun jupyter notebook
diff --git a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
index 17322f635a304c8b47a79923d84e1a3c900914ee..358cd153282e3bdf4240f144bbafa618151f383d 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
@@ -1,6 +1,22 @@
 # Content Rules
 
-**Remark:** Avoid using tabs both in markdown files and in `mkdocs.yaml`. Type spaces instead.
+## Responsibility And License
+
+This documentation and the repository have two licenses (cf. [Legal Notice](../legal_notice.md)):
+
+* All documentation is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
+* All software components are licensed under [MIT license](../license_mit.txt).
+
+These licenses also apply to your contributions.
+
+!!! note
+
+    Each user is fully and solely responsible for the content he/she creates and has to ensure that
+    he/she has the right to create it under the laws which apply.
+
+If you are in doubt, please contact us either via
+[GitLab Issue](https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium/-/issues)
+or via [Email](mailto:hpcsupport@zih.tu-dresden.de).
 
 ## New Page and Pages Structure
 
@@ -50,6 +66,7 @@ should be highlighted, etc. Code examples, longer than half screen height should
 
 ## Writing Style
 
+* Avoid using tabs both in markdown files and in `mkdocs.yaml`. Type spaces instead.
 * Capitalize headings, e.g. *Exclusive Reservation of Hardware*
 * Give keywords in link texts, e.g. [Code Blocks](#code-blocks-and-syntax-highlighting) is more
   descriptive than [this subsection](#code-blocks-and-syntax-highlighting)
@@ -117,7 +134,7 @@ We follow this rules regarding prompts:
   an example invocation, perhaps with output, should be given with the normal `console` code block.
   See also [Code Block description below](#code-blocks-and-syntax-highlighting).
 * Using some magic, the prompt as well as the output is identified and will not be copied!
-* Stick to the [generic user name](#data-privacy-and-generic-user-name) `marie`.
+* Stick to the [generic user name](#data-privacy-and-generic-names) `marie`.
 
 ### Code Blocks and Syntax Highlighting
 
@@ -228,16 +245,17 @@ _Result_:
 
 ![lines](misc/highlight_lines.png)
 
-### Data Privacy and Generic User Name
+### Data Privacy and Generic Names
 
-Where possible, replace login, project name and other private data with clearly arbitrary placeholders.
-E.g., use the generic login `marie` and the corresponding project name `p_marie`.
+Where possible, replace login, project name and other private data with clearly arbitrary
+placeholders.  In particular, use the generic login `marie` and the project title `p_number_crunch`
+as placeholders.
 
 ```console
 marie@login$ ls -l
-drwxr-xr-x   3 marie p_marie      4096 Jan 24  2020 code
-drwxr-xr-x   3 marie p_marie      4096 Feb 12  2020 data
--rw-rw----   1 marie p_marie      4096 Jan 24  2020 readme.md
+drwxr-xr-x   3 marie p_number_crunch      4096 Jan 24  2020 code
+drwxr-xr-x   3 marie p_number_crunch      4096 Feb 12  2020 data
+-rw-rw----   1 marie p_number_crunch      4096 Jan 24  2020 readme.md
 ```
 
 ## Mark Omissions
diff --git a/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md b/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md
index 15382289e3b0b7abcfb9621fcbb33cde302dc1fc..7b7fa019095ebf9b721751d12e6b73fa717c4334 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/contribute_container.md
@@ -42,11 +42,37 @@ description of your changes. If you work on an issue, please also add "Closes 17
 `git push origin 174-check-contribution-documentation`.
 1. As an output you get a link to create a merge request against the preview branch.
 1. When the merge request is created, a continuous integration (CI) pipeline automatically checks
-your contributions.
-
-When you contribute, please follow our [content rules](content_rules.md) to make incorporating your
-changes easy. We also check these rules via continuous integration checks and/or reviews.
-You can find the details and commands to preview your changes and apply checks in the next sections.
+your contributions. If you forked the repository, these automatic checks are not available, but you
+can [run checks locally](#run-the-proposed-checks-inside-container).
+
+!!! tip
+
+    When you contribute, please follow our [content rules](content_rules.md) to make incorporating
+    your changes easy. We also check these rules via continuous integration checks and/or reviews.
+    You can find the details and commands to [preview your changes](#start-the-local-web-server) and
+    [apply checks](#run-the-proposed-checks-inside-container).
+
+## Merging of Forked Repositories
+
+When you have forked the repository as mentioned above, the process for merging is a bit different
+from internal merge requests. Because branches of forks are not automatically checked by CI,
+someone with at least developer access needs to do some more steps to incorporate the changes of
+your MR:
+
+1. She informs you about the start of merging process.
+1. She needs to review your changes to make sure that your changes are specific and don't introduce
+problems, such as changes in the Dockerfile or any script could.
+1. She needs to create a branch in our repository. Let's call this "internal MR branch".
+1. She needs to change the target branch of your MR from "preview" to "internal MR branch".
+1. She needs to merge it.
+1. She needs to open another MR from "internal MR branch" to "preview" to check whether the changes
+pass the CI checks.
+1. She needs to fix things that were found by CI.
+1. She informs you about the MR or asks for your support while fixing the CI.
+
+When you follow our [content rules](content_rules.md) and
+[run checks locally](#run-the-proposed-checks-inside-container), you are making this process
+faster.
 
 ## Tools to Ensure Quality
 
diff --git a/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md b/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
index 693229344e0eeee1d263899e906996b54cba207f..4a14f2d245a481e9a1a3cdfd2abccdd5a63efa4a 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/howto_contribute.md
@@ -4,7 +4,21 @@
 
     Ink is better than the best memory.
 
-In principle, there are three possible ways how to contribute to this documentation.
+Even though we try to cover all aspects of working with the ZIH systems and keep the documentation
+up to date, you might miss something. In principle, there are three possible ways how you can
+contribute to this documentation as outlined below.
+
+## Content Rules
+
+To ensure a high-quality and consistent documentation and to make it easier for readers to
+understand all content, we set some [content rules](content_rules.md). Please follow these rules
+when contributing! Furthermore, reviewing your changes take less time and your improvements appear
+faster on the official documentation.
+
+!!! note
+
+    Each user is fully and solely responsible for the content he/she creates and has to ensure that
+    he/she has the right to create it under the laws which apply.
 
 ## Contribute via Issue
 
@@ -38,9 +52,3 @@ used in the back-end. Using them should ensure that merge requests will not be b
 due to automatic checking.
 The page on [Contributing via local clone](contribute_container.md) provides you with the details
 about how to setup and use your local clone.
-
-## Content rules
-
-To ensure quality and to make it easier for readers to understand all content, we follow some
-[content rules](content_rules.md). If you follow these rules, you can be sure, that reviews of
-your changes take less time and your improvements appear faster on the official web site.
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md
index 73322cf3031a2550ddec1223546b3b393579b8b5..894626208947186e48ba7d08b439cf6aace48655 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/intermediate_archive.md
@@ -20,13 +20,16 @@ Some more information:
 ## Access the Intermediate Archive
 
 For storing and restoring your data in/from the "Intermediate Archive" you can use the tool
-[Datamover](../data_transfer/datamover.md). To use the DataMover you have to login to ZIH systems.
+[Datamover](../data_transfer/datamover.md). To use the Datamover you have to login to ZIH systems.
 
 ### Store Data
 
 ```console
 marie@login$ dtcp -r /<directory> /archiv/<project or user>/<directory> # or
 marie@login$ dtrsync -av /<directory> /archiv/<project or user>/<directory>
+# example:
+marie@login$ dtcp -r /scratch/marie/results /archiv/marie/ # or
+marie@login$ dtrsync -av /scratch/marie/results /archiv/marie/results
 ```
 
 ### Restore Data
@@ -34,11 +37,16 @@ marie@login$ dtrsync -av /<directory> /archiv/<project or user>/<directory>
 ```console
 marie@login$ dtcp -r /archiv/<project or user>/<directory> /<directory> # or
 marie@login$ dtrsync -av /archiv/<project or user>/<directory> /<directory>
+# example:
+marie@login$ dtcp -r /archiv/marie/results /scratch/marie/ # or
+marie@login$ dtrsync -av /archiv/marie/results /scratch/marie/results
 ```
 
-### Examples
+!!! note "Listing files in archive"
 
-```console
-marie@login$ dtcp -r /scratch/rotscher/results /archiv/rotscher/ # or
-marie@login$ dtrsync -av /scratch/rotscher/results /archiv/rotscher/results
-```
+    The intermediate archive is not mounted on the login nodes, but only on the [export nodes](../data_transfer/export_nodes.md).
+
+    In order to list the user's files in the archive use the `dtls` command
+    ```console
+    marie@login$ dtls /archiv/$USER/
+    ```
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/longterm_preservation.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/longterm_preservation.md
index 9a4c7e760282269792fbcb844935d37fd88f4bb3..3b1ad0c9c595fa4d09c0e113b65c82a71b274a35 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/longterm_preservation.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/longterm_preservation.md
@@ -74,13 +74,65 @@ Below are some examples:
 
 ## Where can I get more information about management of research data?
 
-Go to [http://www.forschungsdaten.org/en/](http://www.forschungsdaten.org/en/) to find more
-information about managing research data.
-
-## I want to store my research data at ZIH. How can I do that?
-
-You can use the following services for long-term preservation of research data:
-
- - [Long-term archive](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/backup_archiv/archivierung_am_zih)
- - [Long-term Archiving and Publication with OpARA (Open Access Repository and Archive)](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/backup_archiv/archivierung_am_zih#section-2-2)
- - [intermediate archive](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/backup_archiv/archivierung_am_zih#section-2-1)
+Please visit the wiki [forschungsdaten.org](https://www.forschungsdaten.org/en/) to learn more about
+all of the different aspects of research data management.
+
+For questions or individual consultations regarding research data management in general or any of
+its certain aspects, you can contact the
+[Service Center Research Data](https://tu-dresden.de/forschung-transfer/services-fuer-forschende/kontaktstelle-forschungsdaten?set_language=en)
+(Kontaktstelle Forschungsdaten) of TU Dresden.
+
+## I want to archive my research data at ZIH safely. How can I do that?
+
+For TU Dresden there exist two different services at ZIH for archiving research data. Both of
+them ensure high data safety by duplicating data internally at two separate locations and
+require some data preparation (e.g. packaging), but serve different use cases:
+
+### Storing very infrequently used data during the course of the project
+
+The intermediate archive is a tape storage easily accessible as a directory
+(`/archiv/<HRSK-project>/` or `/archiv/<login>/`) using the
+[export nodes](../data_transfer/export_nodes.md)
+and
+[Datamover tools](https://doc.zih.tu-dresden.de/data_transfer/datamover/) to move your data to.
+For detailed information please visit the
+[ZIH intermediate archive documentation](https://tu-dresden.de/zih/dienste/service-katalog/arbeitsumgebung/backup_archiv/archivierung_am_zih#section-2-1).
+
+!!! note
+
+    The usage of the HRSK-project-related archive is preferable to the login-related archive, as
+    this enables assigning access rights and responsibility across multiple researchers, due to the
+    common staff turnover in research.
+
+The use of the intermediate archive usually is limited by the end of the corresponding
+research project. Afterwards data is required to be removed, tidied up and submitted to a
+long-term repository (see next section).
+
+The intermediate archive is the preferred service when you keep large, mostly unused data volumes
+during the course of your research project; if you want or need to free storage capacities, but
+you are still not able to define certain or relevant datasets for long-term archival.
+
+If you are able to identify complete and final datasets, which you probably won't use actively
+anymore, then repositories as described in the next section may be the more appropriate selection.
+
+### Archiving data beyond the project lifetime, for 10 years and above
+
+According to good scientific practice (cf.
+[DFG guidelines, #17](https://www.dfg.de/download/pdf/foerderung/rechtliche_rahmenbedingungen/gute_wissenschaftliche_praxis/kodex_gwp.pdf))
+and
+[TU Dresden research data guidelines](https://tu-dresden.de/tu-dresden/qualitaetsmanagement/ressourcen/dateien/wisprax/Leitlinien-fuer-den-Umgang-mit-Forschungsdaten-an-der-TU-Dresden.pdf),
+relevant research data needs to be archived at least for 10 years. The
+[OpARA service](https://opara.zih.tu-dresden.de/xmlui/) (Open Access Repository and Archive) is the
+joint research data repository service for Saxon universities to address this requirement.
+
+Data can be uploaded and, to comply to the demands of long-term understanding of data, additional
+metadata and description must be added. Large datasets may be optionally imported beforehand. In
+this case, please contact the
+[TU Dresden Service Desk](mailto:servicedesk@tu-dresden.de?subject=OpARA:%20Data%20Import).
+Optionally, data can also be **published** by OpARA. To ensure data quality, data submissions
+undergo a review process.
+
+Beyond OpARA, it is also recommended to use discipline-specific data repositories for data
+publications. Usually those are well known in a scientific community, and offer better fitting
+options of data description and classification. Please visit [re3data.org](https://re3data.org)
+to look up a suitable one for your discipline.
diff --git a/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md b/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md
index 0bd3fbe88e6a7957232a04a98c2c5eeb33a245ad..28aba7bbfdcec8411f6510061d509c949d128f34 100644
--- a/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md
+++ b/doc.zih.tu-dresden.de/docs/data_transfer/datamover.md
@@ -1,7 +1,7 @@
 # Datamover - Data Transfer Inside ZIH Systems
 
-With the **datamover**, we provide a special data transfer machine for transferring data with best
-transfer speed between the filesystems of ZIH systems. The datamover machine is not accessible
+With the **Datamover**, we provide a special data transfer machine for transferring data with best
+transfer speed between the filesystems of ZIH systems. The Datamover machine is not accessible
 through SSH as it is dedicated to data transfers. To move or copy files from one filesystem to
 another filesystem, you have to use the following commands:
 
@@ -37,7 +37,7 @@ To identify the mount points of the different filesystems on the data transfer m
 |                    | `/warm_archive/ws`   | `/warm_archive/ws`                 |
 |                    | `/home`              | `/home`                            |
 |                    | `/projects`          | `/projects`                        |
-| **Archive**        |                      | `/archive`                         |
+| **Archive**        |                      | `/archiv`                         |
 | **Group storage**  |                      | `/grp/<group storage>`             |
 
 ## Usage of Datamover
@@ -45,7 +45,7 @@ To identify the mount points of the different filesystems on the data transfer m
 !!! example "Copying data from `/beegfs/global0` to `/projects` filesystem."
 
     ``` console
-    marie@login$ dtcp -r /beegfs/global0/ws/marie-workdata/results /projects/p_marie/.
+    marie@login$ dtcp -r /beegfs/global0/ws/marie-workdata/results /projects/p_number_crunch/.
     ```
 
 !!! example "Moving data from `/beegfs/global0` to `/warm_archive` filesystem."
@@ -57,7 +57,7 @@ To identify the mount points of the different filesystems on the data transfer m
 !!! example "Archive data from `/beegfs/global0` to `/archiv` filesystem."
 
     ``` console
-    marie@login$ dttar -czf /archiv/p_marie/results.tgz /beegfs/global0/ws/marie-workdata/results
+    marie@login$ dttar -czf /archiv/p_number_crunch/results.tgz /beegfs/global0/ws/marie-workdata/results
     ```
 
 !!! warning
@@ -66,7 +66,7 @@ To identify the mount points of the different filesystems on the data transfer m
 !!! note
     The [warm archive](../data_lifecycle/warm_archive.md) and the `projects` filesystem are not
     writable from within batch jobs.
-    However, you can store the data in the `warm_archive` using the datamover.
+    However, you can store the data in the `warm_archive` using the Datamover.
 
 ## Transferring Files Between ZIH Systems and Group Drive
 
diff --git a/doc.zih.tu-dresden.de/docs/data_transfer/overview.md b/doc.zih.tu-dresden.de/docs/data_transfer/overview.md
index a8af87cc55814ca0afe5b30193589cf1905ce356..6e8a1bf1cc12e36e4aa15bd46b9eaf84e24171bc 100644
--- a/doc.zih.tu-dresden.de/docs/data_transfer/overview.md
+++ b/doc.zih.tu-dresden.de/docs/data_transfer/overview.md
@@ -14,9 +14,9 @@ copy data to/from ZIH systems. Please follow the link to the documentation on
 
 ## Data Transfer Inside ZIH Systems: Datamover
 
-The recommended way for data transfer inside ZIH Systems is the **datamover**. It is a special
+The recommended way for data transfer inside ZIH Systems is the **Datamover**. It is a special
 data transfer machine that provides the best transfer speed. To load, move, copy etc. files from one
 filesystem to another filesystem, you have to use commands prefixed with `dt`: `dtcp`, `dtwget`,
 `dtmv`, `dtrm`, `dtrsync`, `dttar`, `dtls`. These commands submit a job to the data transfer
 machines that execute the selected command.  Please refer to the detailed documentation regarding the
-[datamover](datamover.md).
+[Datamover](datamover.md).
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md
index 38d6686d7a655c1c5d7161d6607be9d6f55d8b5c..180ed1d62febd311fd5cddd739d4f086825bc5b7 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md
@@ -63,9 +63,13 @@ To use it, first add a `dmtcp_launch` before your application call in your batch
 of MPI applications, you have to add the parameters `--ib --rm` and put it between `srun` and your
 application call, e.g.:
 
-```bash
-srun dmtcp_launch --ib --rm ./my-mpi-application
-```
+???+ my_script.sbatch
+
+    ```bash
+    [...]
+
+    srun dmtcp_launch --ib --rm ./my-mpi-application
+    ```
 
 !!! note
 
@@ -79,7 +83,7 @@ Then just substitute your usual `sbatch` call with `dmtcp_sbatch` and be sure to
 and `-i` parameters (don't forget you need to have loaded the `dmtcp` module).
 
 ```console
-marie@login$ dmtcp_sbatch --time 2-00:00:00 --interval 28000,800 my_batchfile.sh
+marie@login$ dmtcp_sbatch --time 2-00:00:00 --interval 28000,800 my_script.sbatch
 ```
 
 With `-t, --time` you set the total runtime of your calculations. This will be replaced in the batch
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
index b6ec206cf1950f416e81318daab0c9e0e88ba45a..ebfd52972ac785b851a0c02758904a68dd09af8f 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
@@ -109,7 +109,7 @@ for `sbatch/srun` in this case is `--gres=gpu:[NUM_PER_NODE]` (where `NUM_PER_NO
     #SBATCH --cpus-per-task=6      # use 6 threads per task
     #SBATCH --gres=gpu:1           # use 1 GPU per node (i.e. use one GPU per task)
     #SBATCH --time=01:00:00        # run for 1 hour
-    #SBATCH --account=p_marie      # account CPU time to project p_marie
+    #SBATCH --account=p_number_crunch      # account CPU time to project p_number_crunch
 
     srun ./your/cuda/application   # start you application (probably requires MPI to use both nodes)
     ```
diff --git a/doc.zih.tu-dresden.de/docs/legal_notice.md b/doc.zih.tu-dresden.de/docs/legal_notice.md
index 3c9432ecb16eaa0a2fae1a40da4217f92da8a454..e5029584f538f8d909d4bd6f0cf786b73e9872df 100644
--- a/doc.zih.tu-dresden.de/docs/legal_notice.md
+++ b/doc.zih.tu-dresden.de/docs/legal_notice.md
@@ -30,4 +30,4 @@ E-Mail: zih@tu-dresden.de
 This documentation and the repository have two licenses:
 
 * All documentation is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/).
-* All software components are licensed under [MIT license](https://opensource.org/licenses/MIT).
+* All software components are licensed under [MIT license](license_mit.txt).
diff --git a/doc.zih.tu-dresden.de/docs/license_mit.txt b/doc.zih.tu-dresden.de/docs/license_mit.txt
new file mode 100644
index 0000000000000000000000000000000000000000..02c33cdf3c7a4f4ce54a670efc885018868f3f26
--- /dev/null
+++ b/doc.zih.tu-dresden.de/docs/license_mit.txt
@@ -0,0 +1,16 @@
+Copyright 2021, 2022 TU Dresden / ZIH
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+associated documentation files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
+OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/doc.zih.tu-dresden.de/docs/software/building_software.md b/doc.zih.tu-dresden.de/docs/software/building_software.md
index c83932a16c1c0227cb160d4853cd1815626fc404..73952b06efde809b7e91e936be0fbf9b240f88a8 100644
--- a/doc.zih.tu-dresden.de/docs/software/building_software.md
+++ b/doc.zih.tu-dresden.de/docs/software/building_software.md
@@ -17,16 +17,16 @@ For instance, when using CMake and keeping your source in `/projects`, you could
 
 ```console
 # save path to your source directory:
-marie@login$ export SRCDIR=/projects/p_marie/mysource
+marie@login$ export SRCDIR=/projects/p_number_crunch/mysource
 
 # create a build directory in /scratch:
-marie@login$ mkdir /scratch/p_marie/mysoftware_build
+marie@login$ mkdir /scratch/p_number_crunch/mysoftware_build
 
 # change to build directory within /scratch:
-marie@login$ cd /scratch/p_marie/mysoftware_build
+marie@login$ cd /scratch/p_number_crunch/mysoftware_build
 
 # create Makefiles:
-marie@login$ cmake -DCMAKE_INSTALL_PREFIX=/projects/p_marie/mysoftware $SRCDIR
+marie@login$ cmake -DCMAKE_INSTALL_PREFIX=/projects/p_number_crunch/mysoftware $SRCDIR
 
 # build in a job:
 marie@login$ srun --mem-per-cpu=1500 --cpus-per-task=12 --pty make -j 12
diff --git a/doc.zih.tu-dresden.de/docs/software/containers.md b/doc.zih.tu-dresden.de/docs/software/containers.md
index 0d96479e335b61d8e004710cec12e32d366091ab..be74caec03c6ffcf098eade46f4c3adb313f8754 100644
--- a/doc.zih.tu-dresden.de/docs/software/containers.md
+++ b/doc.zih.tu-dresden.de/docs/software/containers.md
@@ -46,7 +46,7 @@ instructions from the official documentation to install Singularity.
 1. Check if `go` is installed by executing `go version`.  If it is **not**:
 
     ```console
-    marie@local$ wget <https://storage.googleapis.com/golang/getgo/installer_linux> && chmod +x
+    marie@local$ wget 'https://storage.googleapis.com/golang/getgo/installer_linux' && chmod +x
     installer_linux && ./installer_linux && source $HOME/.bash_profile
     ```
 
@@ -88,7 +88,9 @@ instructions from the official documentation to install Singularity.
 There are two possibilities:
 
 1. Create a new container on your local workstation (where you have the necessary privileges), and
-   then copy the container file to ZIH systems for execution.
+   then copy the container file to ZIH systems for execution. Therefore you also have to install
+   [Singularity](https://sylabs.io/guides/3.0/user-guide/quick_start.html#quick-installation-steps)
+   on your local workstation.
 1. You can, however, import an existing container from, e.g., Docker.
 
 Both methods are outlined in the following.
@@ -103,10 +105,11 @@ You can create a new custom container on your workstation, if you have root righ
     which is different to the x86 architecture in common computers/laptops. For that you can use
     the [VM Tools](singularity_power9.md).
 
-Creating a container is done by writing a **definition file** and passing it to
+Creating a container is done by writing a definition file, such as `myDefinition.def`, and passing
+it to `singularity` via
 
 ```console
-marie@local$ singularity build myContainer.sif <myDefinition.def>
+marie@local$ singularity build myContainer.sif myDefinition.def
 ```
 
 A definition file contains a bootstrap
@@ -167,7 +170,7 @@ https://github.com/singularityware/singularity/tree/master/examples.
 You can import an image directly from the Docker repository (Docker Hub):
 
 ```console
-marie@local$ singularity build my-container.sif docker://ubuntu:latest
+marie@login$ singularity build my-container.sif docker://ubuntu:latest
 ```
 
 Creating a singularity container directly from a local docker image is possible but not
@@ -175,20 +178,20 @@ recommended. The steps are:
 
 ```console
 # Start a docker registry
-$ docker run -d -p 5000:5000 --restart=always --name registry registry:2
+marie@local$ docker run -d -p 5000:5000 --restart=always --name registry registry:2
 
 # Push local docker container to it
-$ docker tag alpine localhost:5000/alpine
-$ docker push localhost:5000/alpine
+marie@local$ docker tag alpine localhost:5000/alpine
+marie@local$ docker push localhost:5000/alpine
 
 # Create def file for singularity like this...
-$ cat example.def
+marie@local$ cat example.def
 Bootstrap: docker
 Registry: <a href="http://localhost:5000" rel="nofollow" target="_blank">http://localhost:5000</a>
 From: alpine
 
 # Build singularity container
-$ singularity build --nohttps alpine.sif example.def
+marie@local$ singularity build --nohttps alpine.sif example.def
 ```
 
 #### Start from a Dockerfile
@@ -284,7 +287,7 @@ While the `shell` command can be useful for tests and setup, you can also launch
 inside the container directly using "exec":
 
 ```console
-marie@login$ singularity exec my-container.img /opt/myapplication/bin/run_myapp
+marie@login$ singularity exec my-container.sif /opt/myapplication/bin/run_myapp
 ```
 
 This can be useful if you wish to create a wrapper script that transparently calls a containerized
@@ -299,7 +302,7 @@ if [ "z$X" = "z" ] ; then
   exit 1
 fi
 
-singularity exec /scratch/p_myproject/my-container.sif /opt/myapplication/run_myapp "$@"
+singularity exec /projects/p_number_crunch/my-container.sif /opt/myapplication/run_myapp "$@"
 ```
 
 The better approach is to use `singularity run`, which executes whatever was set in the `%runscript`
@@ -325,20 +328,20 @@ singularity build my-container.sif example.def
 Then you can run your application via
 
 ```console
-singularity run my-container.sif first_arg 2nd_arg
+marie@login$ singularity run my-container.sif first_arg 2nd_arg
 ```
 
 Alternatively you can execute the container directly which is equivalent:
 
 ```console
-./my-container.sif first_arg 2nd_arg
+marie@login$ ./my-container.sif first_arg 2nd_arg
 ```
 
 With this you can even masquerade an application with a singularity container as if it was an actual
 program by naming the container just like the binary:
 
 ```console
-mv my-container.sif myCoolAp
+marie@login$ mv my-container.sif myCoolAp
 ```
 
 ### Use-Cases
diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics.md b/doc.zih.tu-dresden.de/docs/software/data_analytics.md
index 036a1c7a454faf84b8352f5fb79dbfc09343cb89..c3cb4afe1be3d613a915e42f1db1020919ecfa3c 100644
--- a/doc.zih.tu-dresden.de/docs/software/data_analytics.md
+++ b/doc.zih.tu-dresden.de/docs/software/data_analytics.md
@@ -29,7 +29,7 @@ can be installed individually by each user. If possible, the use of
 recommended (e.g. for Python). Likewise, software can be used within [containers](containers.md).
 
 For the transfer of larger amounts of data into and within the system, the
-[export nodes and datamover](../data_transfer/overview.md) should be used.
+[export nodes and Datamover](../data_transfer/overview.md) should be used.
 Data is stored in the [workspaces](../data_lifecycle/workspaces.md).
 Software modules or virtual environments can also be installed in workspaces to enable
 collaborative work even within larger groups.
diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md
index a7d2781669fc909e0628c6518825542cf8f7ced8..cf8c1b559f4f496a729388a1e1f4353cdcd14733 100644
--- a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md
+++ b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md
@@ -219,7 +219,7 @@ from dask_jobqueue import SLURMCluster
 cluster = SLURMCluster(queue='alpha',
   cores=8,
   processes=2,
-  project='p_marie',
+  project='p_number_crunch',
   memory="8GB",
   walltime="00:30:00")
 
@@ -242,7 +242,7 @@ from dask import delayed
 cluster = SLURMCluster(queue='alpha',
   cores=8,
   processes=2,
-  project='p_marie',
+  project='p_number_crunch',
   memory="80GB",
   walltime="00:30:00",
   extra=['--resources gpu=1'])
@@ -294,7 +294,7 @@ for the Monte-Carlo estimation of Pi.
 
     #create a Slurm cluster, please specify your project
 
-    cluster = SLURMCluster(queue='alpha', cores=2, project='p_marie', memory="8GB", walltime="00:30:00", extra=['--resources gpu=1'], scheduler_options={"dashboard_address": f":{portdash}"})
+    cluster = SLURMCluster(queue='alpha', cores=2, project='p_number_crunch', memory="8GB", walltime="00:30:00", extra=['--resources gpu=1'], scheduler_options={"dashboard_address": f":{portdash}"})
 
     #submit the job to the scheduler with the number of nodes (here 2) requested:
 
diff --git a/doc.zih.tu-dresden.de/docs/software/fem_software.md b/doc.zih.tu-dresden.de/docs/software/fem_software.md
index 3f9bf79d54d36711560054101536c82dfbbfe000..8b8eb4cfe10c4476e48c4b30ac7f16b83589a38d 100644
--- a/doc.zih.tu-dresden.de/docs/software/fem_software.md
+++ b/doc.zih.tu-dresden.de/docs/software/fem_software.md
@@ -59,7 +59,7 @@ Slurm or [writing job files](../jobs_and_resources/slurm.md#job-files).
     #SBATCH --job-name=yyyy         # give a name, what ever you want
     #SBATCH --mail-type=END,FAIL    # send email when the job finished or failed
     #SBATCH --mail-user=<name>@mailbox.tu-dresden.de  # set your email
-    #SBATCH --account=p_marie       # charge compute time to project p_marie
+    #SBATCH --account=p_number_crunch       # charge compute time to project p_number_crunch
 
 
     # Abaqus has its own MPI
diff --git a/doc.zih.tu-dresden.de/docs/software/machine_learning.md b/doc.zih.tu-dresden.de/docs/software/machine_learning.md
index 1f40e6199e88f6aa4fd68037a0f4b32113001913..e293b007a9c07fbaf41ba3ec7ce25f29024f44d7 100644
--- a/doc.zih.tu-dresden.de/docs/software/machine_learning.md
+++ b/doc.zih.tu-dresden.de/docs/software/machine_learning.md
@@ -155,7 +155,7 @@ The following HPC related software is installed on all nodes:
 There are many different datasets designed for research purposes. If you would like to download some
 of them, keep in mind that many machine learning libraries have direct access to public datasets
 without downloading it, e.g. [TensorFlow Datasets](https://www.tensorflow.org/datasets). If you
-still need to download some datasets use [datamover](../data_transfer/datamover.md) machine.
+still need to download some datasets use [Datamover](../data_transfer/datamover.md) machine.
 
 ### The ImageNet Dataset
 
diff --git a/doc.zih.tu-dresden.de/docs/software/papi.md b/doc.zih.tu-dresden.de/docs/software/papi.md
index 7460e3deef48bdf991e1b6fda36332cf0fc149b0..d8108bba3048da33661e0dd320a2807a0dd001aa 100644
--- a/doc.zih.tu-dresden.de/docs/software/papi.md
+++ b/doc.zih.tu-dresden.de/docs/software/papi.md
@@ -105,11 +105,11 @@ multiple events, please check which events can be measured concurrently using th
     The PAPI tools must be run on the compute node, using an interactive shell or job.
 
 !!! example "Example: Determine the events on the partition `romeo` from a login node"
-    Let us assume, that you are in project `p_marie`. Then, use the following commands:
+    Let us assume, that you are in project `p_number_crunch`. Then, use the following commands:
 
     ```console
     marie@login$ module load PAPI
-    marie@login$ salloc --account=p_marie --partition=romeo
+    marie@login$ salloc --account=p_number_crunch --partition=romeo
     [...]
     marie@compute$ srun papi_avail
     marie@compute$ srun papi_native_avail
@@ -121,12 +121,12 @@ Instrument your application with either the high-level or low-level API. Load th
 compile your application against the  PAPI library.
 
 !!! example
-    Assuming that you are in project `p_marie`, use the following commands:
+    Assuming that you are in project `p_number_crunch`, use the following commands:
 
     ```console
     marie@login$ module load PAPI
     marie@login$ gcc app.c -o app -lpapi
-    marie@login$ salloc --account=p_marie --partition=romeo
+    marie@login$ salloc --account=p_number_crunch --partition=romeo
     marie@compute$ srun ./app
     [...]
     # Exit with Ctrl+D
diff --git a/doc.zih.tu-dresden.de/docs/software/private_modules.md b/doc.zih.tu-dresden.de/docs/software/private_modules.md
index 6dd2d3d0498d78ca188c9af1af272fa3e6e6537d..00982700ec5bc35fe757660897cc1631453a820f 100644
--- a/doc.zih.tu-dresden.de/docs/software/private_modules.md
+++ b/doc.zih.tu-dresden.de/docs/software/private_modules.md
@@ -27,12 +27,12 @@ marie@compute$ cd privatemodules/<sw_name>
 ```
 
 Project private module files for software that can be used by all members of your group should be
-located in your global projects directory, e.g., `/projects/p_marie/privatemodules`. Thus, create
+located in your global projects directory, e.g., `/projects/p_number_crunch/privatemodules`. Thus, create
 this directory:
 
 ```console
-marie@compute$ mkdir --verbose --parents /projects/p_marie/privatemodules/<sw_name>
-marie@compute$ cd /projects/p_marie/privatemodules/<sw_name>
+marie@compute$ mkdir --verbose --parents /projects/p_number_crunch/privatemodules/<sw_name>
+marie@compute$ cd /projects/p_number_crunch/privatemodules/<sw_name>
 ```
 
 !!! note
@@ -110,7 +110,7 @@ marie@login$ module use $HOME/privatemodules
 for your private module files and
 
 ```console
-marie@login$ module use /projects/p_marie/privatemodules
+marie@login$ module use /projects/p_number_crunch/privatemodules
 ```
 
 for group private module files, respectively.
diff --git a/doc.zih.tu-dresden.de/docs/software/scs5_software.md b/doc.zih.tu-dresden.de/docs/software/scs5_software.md
index 2907bb9f35937ce7ca5573b0c1752989c7fa3d95..73311c7fcc78001ad2dc201c19c0eb657397b33a 100644
--- a/doc.zih.tu-dresden.de/docs/software/scs5_software.md
+++ b/doc.zih.tu-dresden.de/docs/software/scs5_software.md
@@ -49,7 +49,7 @@ still work under SCS5. That's why those modenv versions are hidden.
 Example:
 
 ```Bash
-$ ml modenv/classic ansys/19.0
+marie@compute$ ml modenv/classic ansys/19.0
 
 The following have been reloaded with a version change:
   1) modenv/scs5 => modenv/classic
diff --git a/doc.zih.tu-dresden.de/util/check-spelling.sh b/doc.zih.tu-dresden.de/util/check-spelling.sh
index f6b3fca83d71283a6430f260f5a75bdbca3a7e2a..d97f93e20df73b9ea47e501e7196f605f0cacd48 100755
--- a/doc.zih.tu-dresden.de/util/check-spelling.sh
+++ b/doc.zih.tu-dresden.de/util/check-spelling.sh
@@ -7,7 +7,7 @@ basedir=`dirname "$scriptpath"`
 basedir=`dirname "$basedir"`
 wordlistfile=$(realpath $basedir/wordlist.aspell)
 branch="origin/${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-preview}"
-files_to_skip=(doc.zih.tu-dresden.de/docs/accessibility.md doc.zih.tu-dresden.de/docs/data_protection_declaration.md doc.zih.tu-dresden.de/docs/legal_notice.md)
+files_to_skip=(doc.zih.tu-dresden.de/docs/accessibility.md doc.zih.tu-dresden.de/docs/data_protection_declaration.md doc.zih.tu-dresden.de/docs/legal_notice.md doc.zih.tu-dresden.de/docs/access/key_fingerprints.md)
 aspellmode=
 if aspell dump modes | grep -q markdown; then
   aspellmode="--mode=markdown"
diff --git a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
index b2f8b3478d7d8aaa2247b392c97dc09d09348743..cacde0d9ee84f903a55d3109dcd330d3e43184ad 100755
--- a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
+++ b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
@@ -46,9 +46,9 @@ i	^[ |]*|$
 Avoid spaces at end of lines.
 doc.zih.tu-dresden.de/docs/accessibility.md
 i	[[:space:]]$
-When referencing projects, please use p_marie for consistency.
+When referencing projects, please use p_number_crunch for consistency.
 
-i	\<p_	p_marie
+i	\<p_	p_number_crunch
 Avoid \`home\`. Use home without backticks instead.
 
 i	\`home\`
diff --git a/doc.zih.tu-dresden.de/wordlist.aspell b/doc.zih.tu-dresden.de/wordlist.aspell
index 8a5013fe00988ccdc5b1500520d4151b71af6527..a808318d64a38981956ed1ac5fa5a7d1c05e703d 100644
--- a/doc.zih.tu-dresden.de/wordlist.aspell
+++ b/doc.zih.tu-dresden.de/wordlist.aspell
@@ -51,7 +51,7 @@ Dask
 dataframes
 DataFrames
 Dataheap
-datamover
+Datamover
 DataParallel
 dataset
 Dataset