diff --git a/Dockerfile b/Dockerfile
index a001d027e7680ec4bdaf71693aa091b28be70748..731e831c9b2fc1ff1068ae2b2a80c04bbf0039c7 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM python:3.8-buster
+FROM python:3.8-bullseye
 
 ########
 # Base #
diff --git a/doc.zih.tu-dresden.de/docs/application/project_request_form.md b/doc.zih.tu-dresden.de/docs/application/project_request_form.md
index 07ed2eeb7d86c1041c55ae541a6a175f9df45d24..7a50b2274b2167e5d2efd89c7a4b1725074e8990 100644
--- a/doc.zih.tu-dresden.de/docs/application/project_request_form.md
+++ b/doc.zih.tu-dresden.de/docs/application/project_request_form.md
@@ -9,10 +9,10 @@ type="frame" align="right" caption="picture 1: login screen" width="170"
 zoom="on
 ">%ATTACHURL%/request_step1_b.png</span>
 
-The first step is asking for the personal informations of the requester.
+The first step is asking for the personal information of the requester.
 **That's you**, not the leader of this project! \<br />If you have an
 ZIH-Login, you can use it \<sup>\[Pic 1\]\</sup>. If not, you have to
-fill in the whole informations \<sup>\[Pic.:2\]\</sup>. <span
+fill in the whole information \<sup>\[Pic.:2\]\</sup>. <span
 class="twiki-macro IMAGE">clear</span>
 
 ## second step (project details)
@@ -27,8 +27,8 @@ general project Details.\<br />Any project have:
     -   Projects starts at the first of a month and ends on the last day
         of a month. So you are not able to send on the second of a month
         a project request which start in this month.
-    -   The approval is for a maximum of one year. Be carfull: a
-        duratoin from "May, 2013" till "May 2014" has 13 month.
+    -   The approval is for a maximum of one year. Be careful: a
+        duration from "May, 2013" till "May 2014" has 13 month.
 -   a selected science, according to the DFG:
     <http://www.dfg.de/dfg_profil/gremien/fachkollegien/faecher/index.jsp>
 -   a sponsorship
@@ -45,7 +45,7 @@ general project Details.\<br />Any project have:
 <span class="twiki-macro IMAGE" type="frame" align="right"
 caption="picture 4: hardware" width="170" zoom="on
 ">%ATTACHURL%/request_step3_machines.png</span> This step inquire the
-required hardware. You can find the specifications [here](../archive/hardware.md).
+required hardware. You can find the specifications [here]**todo fix link**
 \<br />For your guidance:
 
 -   gpu => taurus
diff --git a/doc.zih.tu-dresden.de/docs/archive/hardware.md b/doc.zih.tu-dresden.de/docs/archive/hardware.md
deleted file mode 100644
index 624b9b745fcd6adb67bb8984f8d0f648c8224faf..0000000000000000000000000000000000000000
--- a/doc.zih.tu-dresden.de/docs/archive/hardware.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Hardware
-
-Here, you can find basic information about the hardware installed at ZIH. We try to keep this list
-up-to-date.
-
-- [BULL HPC-Cluster Taurus](taurus_ii.md)
-- [SGI Ultraviolet (UV)](hardware_venus.md)
-
-Hardware hosted by ZIH:
-
-Former systems
-
-- [PC-Farm Deimos](hardware_deimos.md)
-- [SGI Altix](hardware_altix.md)
-- [PC-Farm Atlas](hardware_atlas.md)
-- [PC-Cluster Triton](hardware_triton.md)
-- [HPC-Windows-Cluster Titan](hardware_titan.md)
diff --git a/doc.zih.tu-dresden.de/docs/archive/hardware_altix.md b/doc.zih.tu-dresden.de/docs/archive/hardware_altix.md
deleted file mode 100644
index 202ab10bda1d8829ede7a1fc52da9bf6db292a78..0000000000000000000000000000000000000000
--- a/doc.zih.tu-dresden.de/docs/archive/hardware_altix.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# HPC Component SGI Altix
-
-The SGI Altix 4700 is a shared memory system with dual core Intel
-Itanium 2 CPUs (Montecito) operated by the Linux operating system SuSE
-SLES 10 with a 2.6 kernel. Currently, the following Altix partitions are
-installed at ZIH:
-
-|Name|Total Cores|Compute Cores|Memory per Core|
-|:----|:----|:----|:----|
-| Mars |384 |348 |1 GB|
-|Jupiter |512 |506 |4 GB|
-|Saturn |512 |506 |4 GB|
-|Uranus |512 |506|4 GB|
-|Neptun |128 |128 |1 GB|
-
-The jobs for these partitions (except Neptun) are scheduled by the [Platform LSF](platform_lsf.md)
-batch system running on `mars.hrsk.tu-dresden.de`. The actual placement of a submitted job may
-depend on factors like memory size, number of processors, time limit.
-
-## Filesystems
-
-All partitions share the same CXFS filesystems `/work` and `/fastfs`.
-
-## ccNuma Architecture
-
-The SGI Altix has a ccNUMA architecture, which stands for Cache Coherent Non-Uniform Memory Access.
-It can be considered as a SM-MIMD (*shared memory - multiple instruction multiple data*) machine.
-The SGI ccNuma system has the following properties:
-
-- Memory is physically distributed but logically shared
-- Memory is kept coherent automatically by hardware.
-- Coherent memory: memory is always valid (caches hold copies)
-- Granularity is L3 cacheline (128 B)
-- Bandwidth of NumaLink4 is 6.4 GB/s
-
-The ccNuma is a compromise between a distributed memory system and a flat symmetric multi processing
-machine (SMP). Altough the memory is shared, the access properties are not the same.
-
-## Compute Module
-
-The basic compute module of an Altix system is shown below.
-
-|                                                                                                                                                               |
-|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| \<img src="%ATTACHURLPATH%/altix_brick_web.png" alt="altix_brick_web.png" width='312' height='192' />\<CAPTION ALIGN="BOTTOM">Altix compute blade \</CAPTION> |
-
-It consists of one dual core Intel Itanium 2 "Montecito" processor, the
-local memory of 4 GB (2 GB on `Mars`), and the communication component,
-the so-called SHUB. All resources are shared by both cores. They have a
-common front side bus, so that accumulated memory bandwidth for both is
-not higher than for just one core.
-
-The SHUB connects local and remote ressources. Via the SHUB and NUMAlink
-all CPUs can access remote memory in the whole system. Naturally, the
-fastest access provides local memory. There are some hints and commands
-that may help you to get optimal memory allocation and process placement
-). Four of these blades are grouped together with a NUMA router in a
-compute brick. All bricks are connected with NUMAlink4 in a
-"fat-tree"-topology.
-
-|                                                                                                                                                                              |
-|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| \<img src="%ATTACHURLPATH%/memory_access_web.png" alt="memory_access_web.png" width='450' />\<CAPTION align="bottom">Remote memory access via SHUBs and NUMAlink \</CAPTION> |
-
-## CPU
-
-The current SGI Altix is based on the dual core Intel Itanium 2
-processor (codename "Montecito"). One core has the following basic
-properties:
-
-|                                     |                            |
-|-------------------------------------|----------------------------|
-| clock rate                          | 1.6 GHz                    |
-| integer units                       | 6                          |
-| floating point units (multiply-add) | 2                          |
-| peak performance                    | 6.4 GFLOPS                 |
-| L1 cache                            | 2 x 16 kB, 1 clock latency |
-| L2 cache                            | 256 kB, 5 clock latency    |
-| L3 cache                            | 9 MB, 12 clock latency     |
-| front side bus                      | 128 bit x 200 MHz          |
-
-The theoretical peak performance of all Altix partitions is hence about 13.1 TFLOPS.
-
-The processor has hardware support for efficient software pipelining.  For many scientific
-applications it provides a high sustained performance exceeding the performance of RISC CPUs with
-similar peak performance. On the down side is the fact that the compiler has to explicitely discover
-and exploit the parallelism in the application.
diff --git a/doc.zih.tu-dresden.de/docs/archive/hardware_atlas.md b/doc.zih.tu-dresden.de/docs/archive/hardware_atlas.md
deleted file mode 100644
index 62a81ae538fcc40a1664483e1d5353b57ac3e6d1..0000000000000000000000000000000000000000
--- a/doc.zih.tu-dresden.de/docs/archive/hardware_atlas.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# MEGWARE PC-Farm Atlas
-
-The PC farm `Atlas` is a heterogenous cluster based on multicore chips
-AMD Opteron 6274 ("Bulldozer"). The nodes are operated by the Linux
-operating system SuSE SLES 11 with a 2.6 kernel. Currently, the
-following hardware is installed:
-
-| CPUs |AMD Opteron 6274 |
-| number of cores | 5120 |
-|th. peak performance | 45 TFlops |
-|compute nodes | 4-way nodes *Saxonid* with 64 cores |
-|nodes with 64 GB RAM | 48 |
-|nodes with 128 GB RAM | 12 |
-|nodes with 512 GB RAM | 8 |
-
-Mars and Deimos users: Please read the [migration hints](migrate_to_atlas.md).
-
-All nodes share the `/home` and `/fastfs` file system with our other HPC systems. Each
-node has 180 GB local disk space for scratch mounted on `/tmp` . The jobs for the compute nodes are
-scheduled by the [Platform LSF](platform_lsf.md) batch system from the login nodes
-`atlas.hrsk.tu-dresden.de` .
-
-A QDR Infiniband interconnect provides the communication and I/O infrastructure for low latency /
-high throughput data traffic.
-
-Users with a login on the [SGI Altix](hardware_altix.md) can access their home directory via NFS
-below the mount point `/hpc_work`.
-
-## CPU AMD Opteron 6274
-
-| Clock rate | 2.2 GHz |
-| cores | 16 |
-| L1 data cache | 16 KB per core |
-| L1 instruction cache | 64 KB shared in a *module* (i.e. 2 cores) |
-| L2 cache | 2 MB per module |
-| L3 cache | 12 MB total, 6 MB shared between 4 modules = 8 cores |
-| FP units | 1 per module (supports fused multiply-add) |
-| th. peak performance | 8.8 GFlops per core (w/o turbo) |
-
-The CPU belongs to the x86_64 family. Since it is fully capable of
-running x86-code, one should compare the performances of the 32 and 64
-bit versions of the same code.
-
-For more architectural details, see the
-[AMD Bulldozer block diagram](http://upload.wikimedia.org/wikipedia/commons/e/ec/AMD_Bulldozer_block_diagram_%288_core_CPU%29.PNG)
-and [topology of Atlas compute nodes] **todo** %ATTACHURL%/Atlas_Knoten.pdf.
diff --git a/doc.zih.tu-dresden.de/docs/archive/hardware_venus.md b/doc.zih.tu-dresden.de/docs/archive/hardware_venus.md
deleted file mode 100644
index be90985eace893cbf28753d5fbd2463402338e67..0000000000000000000000000000000000000000
--- a/doc.zih.tu-dresden.de/docs/archive/hardware_venus.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# SGI UV2000 (venus)
-
-The SGI UV2000 is a shared memory system based on Intel Sandy Bridge
-processors. It is operated by the Linux operating system SLES 11 SP 3
-with a kernel version 3.x.
-
-|                            |       |
-|----------------------------|-------|
-| Number of CPU sockets      | 64    |
-| Physical cores per sockets | 8     |
-| Total number of cores      | 512   |
-| Total memory               | 8 TiB |
-
-From our experience, most parallel applications benefit from using the
-additional hardware hyperthreads.
-
-## Filesystems
-
-Venus uses the same HOME file system as all our other HPC installations.
-For computations, please use `/scratch`.
diff --git a/Compendium_attachments/HardwareAtlas/Atlas_Knoten.pdf b/doc.zih.tu-dresden.de/docs/archive/misc/Atlas_Knoten.pdf
similarity index 100%
rename from Compendium_attachments/HardwareAtlas/Atlas_Knoten.pdf
rename to doc.zih.tu-dresden.de/docs/archive/misc/Atlas_Knoten.pdf
diff --git a/Compendium_attachments/HardwareAltix/altix_brick_web.png b/doc.zih.tu-dresden.de/docs/archive/misc/altix_brick_web.png
similarity index 100%
rename from Compendium_attachments/HardwareAltix/altix_brick_web.png
rename to doc.zih.tu-dresden.de/docs/archive/misc/altix_brick_web.png
diff --git a/Compendium_attachments/HardwareAltix/memory_access_web.png b/doc.zih.tu-dresden.de/docs/archive/misc/memory_access_web.png
similarity index 100%
rename from Compendium_attachments/HardwareAltix/memory_access_web.png
rename to doc.zih.tu-dresden.de/docs/archive/misc/memory_access_web.png
diff --git a/doc.zih.tu-dresden.de/docs/archive/system_altix.md b/doc.zih.tu-dresden.de/docs/archive/system_altix.md
index d3ebdbbe554d5aa3f7dcda460d4831974a589744..951b06137a599fc95239e5d50144fd2fa205e096 100644
--- a/doc.zih.tu-dresden.de/docs/archive/system_altix.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_altix.md
@@ -1,12 +1,98 @@
 # SGI Altix
 
-**This page is deprecated! The SGI Atlix is a former system!**
+!!! warning
 
-The SGI Altix is shared memory system for large parallel jobs using up to 2000 cores in parallel (
-[information on the hardware](hardware_altix.md)). It's partitions are Mars (login), Jupiter, Saturn,
-Uranus, and Neptun (interactive).
+    **This page is deprecated! The SGI Altix is a former system!**
 
-## Compiling Parallel Applications
+## System
+
+The SGI Altix 4700 is a shared memory system with dual core Intel Itanium 2 CPUs (Montecito)
+operated by the Linux operating system SUSE SLES 10 with a 2.6 kernel. Currently, the following
+Altix partitions are installed at ZIH:
+
+|Name|Total Cores|Compute Cores|Memory per Core|
+|:----|:----|:----|:----|
+| Mars |384 |348 |1 GB|
+|Jupiter |512 |506 |4 GB|
+|Saturn |512 |506 |4 GB|
+|Uranus |512 |506|4 GB|
+|Neptun |128 |128 |1 GB|
+
+The jobs for these partitions (except Neptun) are scheduled by the [Platform LSF](platform_lsf.md)
+batch system running on `mars.hrsk.tu-dresden.de`. The actual placement of a submitted job may
+depend on factors like memory size, number of processors, time limit.
+
+### File Systems
+
+All partitions share the same CXFS file systems `/work` and `/fastfs`.
+
+### ccNUMA Architecture
+
+The SGI Altix has a ccNUMA architecture, which stands for *Cache Coherent Non-Uniform Memory Access*.
+It can be considered as a SM-MIMD (*shared memory - multiple instruction multiple data*) machine.
+The SGI ccNUMA system has the following properties:
+
+- Memory is physically distributed but logically shared
+- Memory is kept coherent automatically by hardware.
+- Coherent memory: memory is always valid (caches hold copies)
+- Granularity is L3 cache line (128 B)
+- Bandwidth of NUMAlink4 is 6.4 GB/s
+
+The ccNUMA is a compromise between a distributed memory system and a flat symmetric multi processing
+machine (SMP). Although the memory is shared, the access properties are not the same.
+
+### Compute Module
+
+The basic compute module of an Altix system is shown below.
+
+![Altix compute blade](misc/altix_brick_web.png)
+{: align="center"}
+
+It consists of one dual core Intel Itanium 2 "Montecito" processor, the
+local memory of 4 GB (2 GB on `Mars`), and the communication component,
+the so-called SHUB. All resources are shared by both cores. They have a
+common front side bus, so that accumulated memory bandwidth for both is
+not higher than for just one core.
+
+The SHUB connects local and remote resources. Via the SHUB and NUMAlink
+all CPUs can access remote memory in the whole system. Naturally, the
+fastest access provides local memory. There are some hints and commands
+that may help you to get optimal memory allocation and process placement
+). Four of these blades are grouped together with a NUMA router in a
+compute brick. All bricks are connected with NUMAlink4 in a
+"fat-tree"-topology.
+
+Remote memory access via SHUBs and NUMAlink
+![Remote memory access via SHUBs and NUMAlink](misc/memory_access_web.png)
+{: align="center"}
+
+### CPU
+
+The current SGI Altix is based on the dual core Intel Itanium 2
+processor (code name "Montecito"). One core has the following basic
+properties:
+
+|                                     |                            |
+|-------------------------------------|----------------------------|
+| clock rate                          | 1.6 GHz                    |
+| integer units                       | 6                          |
+| floating point units (multiply-add) | 2                          |
+| peak performance                    | 6.4 GFLOPS                 |
+| L1 cache                            | 2 x 16 kB, 1 clock latency |
+| L2 cache                            | 256 kB, 5 clock latency    |
+| L3 cache                            | 9 MB, 12 clock latency     |
+| front side bus                      | 128 bit x 200 MHz          |
+
+The theoretical peak performance of all Altix partitions is hence about 13.1 TFLOPS.
+
+The processor has hardware support for efficient software pipelining. For many scientific
+applications it provides a high sustained performance exceeding the performance of RISC CPUs with
+similar peak performance. On the down side is the fact that the compiler has to explicitly discover
+and exploit the parallelism in the application.
+
+## Usage
+
+### Compiling Parallel Applications
 
 This installation of the Message Passing Interface supports the MPI 1.2 standard with a few MPI-2
 features (see `man mpi` ). There is no command like `mpicc`, instead you just have to use the normal
@@ -16,32 +102,31 @@ additional library- or include-paths.
 
 - Note for C++ programmers: You need to link with `-lmpi++abi1002 -lmpi` instead of `-lmpi`.
 - Note for Fortran programmers: The MPI module is only provided for the Intel compiler and does not
-  work with gfortran.
+  work with `gfortran`.
 
 Please follow these following guidelines to run your parallel program using the batch system on
 Mars.
 
-## Batch system
+### Batch System
 
-Applications on an HPC system can not be run on the login node. They
-have to be submitted to compute nodes with dedicated resources for the
-user's job. Normally a job can be submitted with these data:
+Applications on an HPC system can not be run on the login node. They have to be submitted to compute
+nodes with dedicated resources for the user's job. Normally a job can be submitted with these data:
 
--   number of CPU cores,
--   requested CPU cores have to belong on one node (OpenMP programs) or
-    can distributed (MPI),
--   memory per process,
--   maximum wall clock time (after reaching this limit the process is
-    killed automatically),
--   files for redirection of output and error messages,
--   executable and command line parameters.
+- number of CPU cores,
+- requested CPU cores have to belong on one node (OpenMP programs) or
+  can distributed (MPI),
+- memory per process,
+- maximum wall clock time (after reaching this limit the process is
+  killed automatically),
+- files for redirection of output and error messages,
+- executable and command line parameters.
 
-### LSF
+#### LSF
 
-The batch sytem on Atlas is LSF. For general information on LSF, please follow
+The batch system on Atlas is LSF. For general information on LSF, please follow
 [this link](platform_lsf.md).
 
-### Submission of Parallel Jobs
+#### Submission of Parallel Jobs
 
 The MPI library running on the Altix is provided by SGI and highly optimized for the ccNUMA
 architecture of this machine. However, communication within a partition is faster than across
@@ -49,18 +134,18 @@ partitions. Take this into consideration when you submit your job.
 
 Single-partition jobs can be started like this:
 
-```Bash
+```console
 bsub -R "span[hosts=1]" -n 16 mpirun -np 16 a.out<
 ```
 
-Really large jobs with over 256 CPUs might run over multiple partitions.
-Cross-partition jobs can be submitted via PAM like this
+Really large jobs with over 256 CPUs might run over multiple partitions. Cross-partition jobs can
+be submitted via PAM like this
 
-```Bash
+```console
 bsub -n 1024 pamrun a.out
 ```
 
-### Batch Queues
+#### Batch Queues
 
 | Batch Queue    | Admitted Users   | Available CPUs      | Default Runtime | Max. Runtime |
 |:---------------|:-----------------|:--------------------|:----------------|:-------------|
diff --git a/doc.zih.tu-dresden.de/docs/archive/system_atlas.md b/doc.zih.tu-dresden.de/docs/archive/system_atlas.md
index c31a9b5dc536cbd6c76e772b317739171c83ab11..0e744c4ab702afac9d3ac413ccfb5abd58fef817 100644
--- a/doc.zih.tu-dresden.de/docs/archive/system_atlas.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_atlas.md
@@ -1,11 +1,62 @@
-# Atlas
+# MEGWARE PC-Farm Atlas
 
-**This page is deprecated! Atlas is a former system!**
+!!! warning
 
-Atlas is a general purpose HPC cluster for jobs using 1 to 128 cores in parallel
-([Information on the hardware](hardware_atlas.md)).
+    **This page is deprecated! Atlas is a former system!**
 
-## Compiling Parallel Applications
+## System
+
+The PC farm `Atlas` is a heterogeneous, general purpose cluster based on multicore chips AMD Opteron
+6274 ("Bulldozer"). The nodes are operated by the Linux operating system SUSE SLES 11 with a 2.6
+kernel. Currently, the following hardware is installed:
+
+| Component | Count |
+|-----------|--------|
+| CPUs |AMD Opteron 6274 |
+| number of cores | 5120 |
+|th. peak performance | 45 TFLOPS |
+|compute nodes | 4-way nodes *Saxonid* with 64 cores |
+|nodes with 64 GB RAM | 48 |
+|nodes with 128 GB RAM | 12 |
+|nodes with 512 GB RAM | 8 |
+
+Mars and Deimos users: Please read the [migration hints](migrate_to_atlas.md).
+
+All nodes share the `/home` and `/fastfs` file system with our other HPC systems. Each
+node has 180 GB local disk space for scratch mounted on `/tmp`. The jobs for the compute nodes are
+scheduled by the [Platform LSF](platform_lsf.md) batch system from the login nodes
+`atlas.hrsk.tu-dresden.de` .
+
+A QDR Infiniband interconnect provides the communication and I/O infrastructure for low latency /
+high throughput data traffic.
+
+Users with a login on the [SGI Altix](system_altix.md) can access their home directory via NFS
+below the mount point `/hpc_work`.
+
+### CPU AMD Opteron 6274
+
+| Component | Count |
+|-----------|--------|
+| Clock rate | 2.2 GHz |
+| cores | 16 |
+| L1 data cache | 16 KB per core |
+| L1 instruction cache | 64 KB shared in a *module* (i.e. 2 cores) |
+| L2 cache | 2 MB per module |
+| L3 cache | 12 MB total, 6 MB shared between 4 modules = 8 cores |
+| FP units | 1 per module (supports fused multiply-add) |
+| th. peak performance | 8.8 GFLOPS per core (w/o turbo) |
+
+The CPU belongs to the x86_64 family. Since it is fully capable of
+running x86-code, one should compare the performances of the 32 and 64
+bit versions of the same code.
+
+For more architectural details, see the
+[AMD Bulldozer block diagram](http://upload.wikimedia.org/wikipedia/commons/e/ec/AMD_Bulldozer_block_diagram_%288_core_CPU%29.PNG)
+and [topology of Atlas compute nodes](misc/Atlas_Knoten.pdf).
+
+## Usage
+
+### Compiling Parallel Applications
 
 When loading a compiler module on Atlas, the module for the MPI implementation OpenMPI is also
 loaded in most cases. If not, you should explicitly load the OpenMPI module with `module load
@@ -16,9 +67,9 @@ use the currently loaded compiler. To reveal the command lines behind the wrappe
 `-show`.
 
 For running your code, you have to load the same compiler and MPI module as for compiling the
-program. Please follow te following guiedlines to run your parallel program using the batch system.
+program. Please follow the outlined guidelines to run your parallel program using the batch system.
 
-## Batch System
+### Batch System
 
 Applications on an HPC system can not be run on the login node. They
 have to be submitted to compute nodes with dedicated resources for the
@@ -33,12 +84,12 @@ user's job. Normally a job can be submitted with these data:
 - files for redirection of output and error messages,
 - executable and command line parameters.
 
-### LSF
+#### LSF
 
-The batch sytem on Atlas is LSF. For general information on LSF, please follow
+The batch system on Atlas is LSF. For general information on LSF, please follow
 [this link](platform_lsf.md).
 
-### Submission of Parallel Jobs
+#### Submission of Parallel Jobs
 
 To run MPI jobs ensure that the same MPI module is loaded as during compile-time. In doubt, check
 you loaded modules with `module list`. If you code has been compiled with the standard OpenMPI
@@ -47,11 +98,11 @@ installation, you can load the OpenMPI module via `module load openmpi`.
 Please pay attention to the messages you get loading the module. They are more up-to-date than this
 manual. To submit a job the user has to use a script or a command-line like this:
 
-```Bash
+```console
 bsub -n <N> mpirun <program name>
 ```
 
-### Memory Limits
+#### Memory Limits
 
 **Memory limits are enforced.** This means that jobs which exceed their per-node memory limit **may
 be killed** automatically by the batch system.
@@ -79,7 +130,7 @@ or less** may be scheduled to smaller memory nodes.
 
 Have a look at the **examples below**.
 
-#### Monitoring memory usage
+#### Monitoring Memory Usage
 
 At the end of the job completion mail there will be a link to a website
 which shows the memory usage over time per node. This will only be
@@ -87,8 +138,8 @@ available for longer running jobs (>10 min).
 
 #### Examples
 
-| Job Spec.                                                                             | Nodes Allowed                                                                                     | Remark                                                                                                          |
-|:--------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------|
+| Job Spec. | Nodes Allowed | Remark |
+|:----------|:--------------|:-------|
 | `bsub -n 1 -M 500`     | All nodes      | <= 940 Fits everywhere                                                                                          |
 | `bsub -n 64 -M 700`    | All nodes      | <= 940 Fits everywhere                                                                                          |
 | `bsub -n 4 -M 1800`    | All nodes      | Is allowed to oversubscribe on small nodes n\[001-047\]                                                         |
diff --git a/doc.zih.tu-dresden.de/docs/archive/hardware_deimos.md b/doc.zih.tu-dresden.de/docs/archive/system_deimos.md
similarity index 65%
rename from doc.zih.tu-dresden.de/docs/archive/hardware_deimos.md
rename to doc.zih.tu-dresden.de/docs/archive/system_deimos.md
index a426381651f2807fb9c339e104ac4b2413aaec8f..a80890f070a92d5bcf6dc35205f72072e9ddd89a 100644
--- a/doc.zih.tu-dresden.de/docs/archive/hardware_deimos.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_deimos.md
@@ -1,10 +1,15 @@
 # Linux Networx PC-Farm Deimos
 
-The PC farm `Deimos` is a heterogenous cluster based on dual core AMD
-Opteron CPUs. The nodes are operated by the Linux operating system SuSE
-SLES 10 with a 2.6 kernel. Currently, the following hardware is
-installed:
+!!! warning
 
+    **This page is deprecated! Deimos is a former system!**
+
+The PC farm `Deimos` is a heterogeneous cluster based on dual core AMD Opteron CPUs. The nodes are
+operated by the Linux operating system SuSE SLES 10 with a 2.6 kernel. Currently, the following
+hardware is installed:
+
+| Component | Count |
+|-----------|-------|
 |CPUs |AMD Opteron X85 dual core |
 |RAM per core |2 GB |
 |Number of cores |2584 |
@@ -15,7 +20,7 @@ installed:
 |quad nodes (32 GB RAM) |24 |
 
 All nodes share a 68 TB on DDN hardware. Each node has per core 40 GB local disk space for scratch
-mounted on `/tmp` . The jobs for the compute nodes are scheduled by the
+mounted on `/tmp`. The jobs for the compute nodes are scheduled by the
 [Platform LSF](platform_lsf.md)
 batch system from the login nodes `deimos.hrsk.tu-dresden.de` .
 
@@ -23,14 +28,16 @@ Two separate Infiniband networks (10 Gb/s) with low cascading switches provide t
 I/O infrastructure for low latency / high throughput data traffic. An additional gigabit Ethernet
 network is used for control and service purposes.
 
-Users with a login on the [SGI Altix](hardware_altix.md) can access their home directory via NFS
+Users with a login on the [SGI Altix](system_altix.md) can access their home directory via NFS
 below the mount point `/hpc_work`.
 
 ## CPU
 
-The cluster is based on dual-core AMD Opteron X85 processor. One core
-has the following basic properties:
+The cluster is based on dual-core AMD Opteron X85 processor. One core has the following basic
+properties:
 
+| Component | Count |
+|-----------|-------|
 |clock rate |2.6 GHz |
 |floating point units |2 |
 |peak performance |5.2 GFLOPS |
diff --git a/doc.zih.tu-dresden.de/docs/archive/hardware_phobos.md b/doc.zih.tu-dresden.de/docs/archive/system_phobos.md
similarity index 76%
rename from doc.zih.tu-dresden.de/docs/archive/hardware_phobos.md
rename to doc.zih.tu-dresden.de/docs/archive/system_phobos.md
index 9f70d45161fac7363e9e0828af4b788d817fc1c9..bcd0d9cd88d758a643722669fcd50a6cbeaf99c5 100644
--- a/doc.zih.tu-dresden.de/docs/archive/hardware_phobos.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_phobos.md
@@ -1,12 +1,16 @@
 # Linux Networx PC-Cluster Phobos
 
-**Phobos was shut down on 1 November 2010.**
+!!! warning
+
+    **This page is deprecated! Phobos is a former system which was shut down on 1 November 2010.**
 
 `Phobos` is a cluster based on AMD Opteron CPUs. The nodes are operated
 by the Linux operating system SuSE SLES 9 with a 2.6 kernel. Currently,
 the following hardware is installed:
 
-|CPUs \|AMD Opteron 248 (single core) |
+| Component | Count |
+|-----------|-------|
+|CPUs |AMD Opteron 248 (single core) |
 |total peak performance |563.2 GFLOPS |
 |Number of nodes |64 compute + 1 master |
 |CPUs per node |2 |
@@ -25,6 +29,8 @@ and service purposes.
 `Phobos` is based on single-core AMD Opteron 248 processor. It has the
 following basic properties:
 
+| Component | Count |
+|-----------|-------|
 |clock rate |2.2 GHz |
 |floating point units |2 |
 |peak performance |4.4 GFLOPS |
@@ -32,6 +38,5 @@ following basic properties:
 |L2 cache |1 MB |
 |memory bus |128 bit x 200 MHz |
 
-The CPU belongs to the x86_64 family. Although it is fully capable of
-running x86-code, one should always try to use 64-bit programs due to
-their potentially higher performance.
+The CPU belongs to the x86_64 family. Although it is fully capable of running x86-code, one should
+always try to use 64-bit programs due to their potentially higher performance.
diff --git a/doc.zih.tu-dresden.de/docs/archive/hardware_titan.md b/doc.zih.tu-dresden.de/docs/archive/system_titan.md
similarity index 50%
rename from doc.zih.tu-dresden.de/docs/archive/hardware_titan.md
rename to doc.zih.tu-dresden.de/docs/archive/system_titan.md
index 6c383c94feafa9628f234b00a0f28f31c9f4902d..d22c774c3a52e55794a398e5533962f62df231e4 100644
--- a/doc.zih.tu-dresden.de/docs/archive/hardware_titan.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_titan.md
@@ -1,4 +1,8 @@
-# Windows HPC Server 2008 - Cluster Titan
+# Windows HPC Server 2008 Cluster Titan
+
+!!! warning
+
+    **This page is deprecated! Titan is a former system!**
 
 The Dell Blade Server `Titan` is a homogenous cluster based on quad core
 Intel Xeon CPUs. The cluster consists of one management and 8 compute
@@ -10,16 +14,24 @@ protocol.
 The nodes are operated by the Windows operating system Microsoft HPC
 Server 2008. Currently, the following hardware is installed:
 
-\* Compute Node: \|CPUs \|Intel Xeon E5440 Quad-Core \| \|RAM per core
-\|2 GB \| \|Number of cores \|64 \| \|total peak performance \|724,48
-GFLOPS \|
+* Compute Node:
+
+| Component | Count |
+|-----------|-------|
+| CPUs | Intel Xeon E5440 Quad-Core |
+| RAM per core |2 GB |
+| Number of cores | 64 |
+| total peak performance | 724,48 GFLOPS |
 
-\* Management Node:
+* Management Node:
 
-\|CPUs \|Intel Xeon E5410 Quad-Core \| \|RAM per core \|2 GB \| \|Number
-of cores \|8 \|
+| Component | Count |
+|-----------|-------|
+| CPUs |Intel Xeon E5410 Quad-Core |
+| RAM per core | 2 GB |
+| Number of cores | 8 |
 
-\<P> The management node shares 1.2 TB disk space via NTFS over all
+The management node shares 1.2 TB disk space via NTFS over all
 nodes. Each node has a local disk of 120 GB. The jobs for the compute
 nodes are scheduled by the Microsoft scheduler, which is a part of the
 Microsoft HPC Pack, from the management node. The job submission can be
@@ -33,19 +45,26 @@ and I/O infrastructure.
 The cluster is based on quad core Intel Xeon E5440 processor. One core
 has the following basic properties:
 
-\|clock rate \|2.83 GHz \| \|floating point units \|2 \| \|peak
-performance \|11.26 GFLOPS \| \|L1 cache \|32 KB I + 32KB on chip per
-core \| \|L2 cache \|12 MB I+D on chip per chip, 6MB shared/ 2 cores \|
-\|FSB \|1333 MHz \|
+| Component | Count |
+|-----------|-------|
+|clock rate | 2.83 GHz |
+|floating point units | 2 |
+|peak performance | 11.26 GFLOPS |
+|L1 cache |32 KB I + 32KB on chip per core |
+|L2 cache |12 MB I+D on chip per chip, 6MB shared/ 2 cores |
+|FSB |1333 MHz |
 
 The management node is based on a quad core Intel Xeon E5410 processor.
 One core has the following basic properties:
 
-\|clock rate \|2.33 GHz \| \|floating point units \|2 \| \|peak
-performance \|9.32 GFLOPS \| \|L1 cache \|32 KB I + 32KB on chip per
-core \| \|L2 cache \|12 MB I+D on chip per chip, 6MB shared/ 2 cores \|
-\|FSB \|1333 MHz \|
+| Component | Count |
+|-----------|-------|
+|clock rate |2.33 GHz |
+|floating point units |2 |
+|peak performance |9.32 GFLOPS |
+|L1 cache |32 KB I + 32KB on chip per core |
+|L2 cache | 12 MB I+D on chip per chip, 6MB shared/ 2 cores |
+|FSB |1333 MHz |
 
-The CPU belongs to the x86_64 family. Since it is fully capable of
-running x86-code, one should compare the performances of the 32 and 64
-bit versions of the same code.
+The CPU belongs to the x86_64 family. Since it is fully capable of running x86-code, one should
+compare the performances of the 32 and 64 bit versions of the same code.
diff --git a/doc.zih.tu-dresden.de/docs/archive/hardware_triton.md b/doc.zih.tu-dresden.de/docs/archive/system_triton.md
similarity index 82%
rename from doc.zih.tu-dresden.de/docs/archive/hardware_triton.md
rename to doc.zih.tu-dresden.de/docs/archive/system_triton.md
index 646972202c2679849ce2d7c5ac866123b55e617e..ada59bfc14208f476752c023ed9fb4f2558e15e1 100644
--- a/doc.zih.tu-dresden.de/docs/archive/hardware_triton.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_triton.md
@@ -1,11 +1,15 @@
-# Hardware
+# IBM-iDataPlex Cluster Trition
 
-## IBM-iDataPlex
+!!! warning
 
-is a cluster based on quadcore Intel Xeon CPUs. The nodes are operated
-by the Linux operating system SuSE SLES 11. Currently, the following
+    **This page is deprecated! Trition is a former system!**
+
+Trition is a cluster based on quadcore Intel Xeon CPUs. The nodes are operated
+by the Linux operating system SUSE SLES 11. Currently, the following
 hardware is installed:
 
+| Component | Count |
+|-----------|-------|
 |CPUs |Intel quadcore E5530 |
 |RAM per core |6 GB |
 |Number of cores |512 |
@@ -20,6 +24,9 @@ the login node triton.hrsk.tu-dresden.de .
 The cluster is based on dual-core Intel Xeon E5530 processor. One core
 has the following basic properties:
 
+| Component | Count |
+|-----------|-------|
+|CPUs |Intel quadcore E5530 |
 |clock rate |2.4 GHz |
 |Cores |4 |
 |Threads |8 |
diff --git a/doc.zih.tu-dresden.de/docs/archive/system_venus.md b/doc.zih.tu-dresden.de/docs/archive/system_venus.md
index 86990407a5d6e782f52444ef91ca6be5b5eec3d5..a08dc26394d56eb7e485b86d7587537f9f63953f 100644
--- a/doc.zih.tu-dresden.de/docs/archive/system_venus.md
+++ b/doc.zih.tu-dresden.de/docs/archive/system_venus.md
@@ -1,11 +1,32 @@
-# Venus
+# SGI UV2000 / Venus
 
-## Information about the hardware
+!!! warning
 
-Detailed information on the currect HPC hardware can be found
-[here](hardware_venus.md).
+    **This page is deprecated! The SGI UV2000 (Venus) is a former system!**
 
-## Login to the System
+## System
+
+The SGI UV2000 is a shared memory system based on Intel Sandy Bridge processors. It is operated by
+the Linux operating system SLES 11 SP 3 with a kernel version 3.x.
+
+| Component                  | Count |
+|----------------------------|-------|
+| Number of CPU sockets      | 64    |
+| Physical cores per sockets | 8     |
+| Total number of cores      | 512   |
+| Total memory               | 8 TiB |
+
+From our experience, most parallel applications benefit from using the additional hardware
+hyperthreads.
+
+### File Systems
+
+Venus uses the same `home` file system as all our other HPC installations.
+For computations, please use `/scratch`.
+
+## Usage
+
+### Login to the System
 
 Login to the system is available via ssh at `venus.hrsk.tu-dresden.de`.
 
@@ -21,7 +42,7 @@ and
 SHA256:Qq1OrgSCTzgziKoop3a/pyVcypxRfPcZT7oUQ3V7E0E
 ```
 
-## MPI
+### MPI
 
 The installation of the Message Passing Interface on Venus (SGI MPT) supports the MPI 2.2 standard
 (see `man mpi` ). There is no command like `mpicc`, instead you just have to use the "serial"
@@ -29,7 +50,7 @@ compiler (e.g. `icc`, `icpc`, or `ifort`) and append `-lmpi` to the linker comma
 
 Example:
 
-```Bash
+```console
 % icc -o myprog -g -O2 -xHost myprog.c -lmpi
 ```
 
@@ -38,11 +59,11 @@ Notes:
 - C++ programmers: You need to link with both libraries:
   `-lmpi++ -lmpi`.
 - Fortran programmers: The MPI module is only provided for the Intel
-  compiler and does not work with gfortran.
+  compiler and does not work with `gfortran`.
 
 Please follow the following guidelines to run your parallel program using the batch system on Venus.
 
-## Batch system
+### Batch System
 
 Applications on an HPC system can not be run on the login node. They have to be submitted to compute
 nodes with dedicated resources for the user's job. Normally a job can be submitted with these data:
@@ -56,10 +77,10 @@ nodes with dedicated resources for the user's job. Normally a job can be submitt
 - files for redirection of output and error messages,
 - executable and command line parameters.
 
-The batch sytem on Venus is Slurm. For general information on Slurm, please follow
+The batch system on Venus is Slurm. For general information on Slurm, please follow
 [this link](../jobs_and_resources/slurm.md).
 
-### Submission of Parallel Jobs
+#### Submission of Parallel Jobs
 
 The MPI library running on the UV is provided by SGI and highly optimized for the ccNUMA
 architecture of this machine.
@@ -74,7 +95,7 @@ srun -n 16 a.out
 **Please note:** There are different MPI libraries on Taurus and Venus,
 so you have to compile the binaries specifically for their target.
 
-### File Systems
+#### File Systems
 
 -   The large main memory on the system allows users to create ramdisks
     within their own jobs.
diff --git a/doc.zih.tu-dresden.de/docs/archive/systems_switched_off.md b/doc.zih.tu-dresden.de/docs/archive/systems_switched_off.md
new file mode 100644
index 0000000000000000000000000000000000000000..c4f9890ac3ad36580c617b6fb5292cb0b1ceffcb
--- /dev/null
+++ b/doc.zih.tu-dresden.de/docs/archive/systems_switched_off.md
@@ -0,0 +1,12 @@
+# Hardware
+
+HPC at ZIH has a quite long history and several systems have been installed and operated.
+Documentation on former systems for future reference can be found on the following pages:
+
+- [SGI Altix](system_altix.md)
+- [PC-Farm Atlas](system_atlas.md)
+- [PC-Farm Deimos](system_deimos.md)
+- [PC-Cluster Phobos](system_phobos.md)
+- [Windows-HPC-Server Titan](system_titan.md)
+- [PC-Cluster Triton](system_triton.md)
+- [Shared-Memory-System Venus](system_venus.md)
diff --git a/doc.zih.tu-dresden.de/mkdocs.yml b/doc.zih.tu-dresden.de/mkdocs.yml
index d0c0392aad49156c28f4b3d4842c839b0bf1117e..0f1292799c3a74b634602dc49cd9bfe9650cf1eb 100644
--- a/doc.zih.tu-dresden.de/mkdocs.yml
+++ b/doc.zih.tu-dresden.de/mkdocs.yml
@@ -114,23 +114,21 @@ nav:
     - Bio Informatics: archive/bioinformatics.md
     - CXFS End of Support: archive/cxfs_end_of_support.md
     - Debugging Tools: archive/debugging_tools.md
-    - Hardware: archive/hardware.md
-    - Hardware Altix: archive/hardware_altix.md
-    - Hardware Atlas: archive/hardware_atlas.md
-    - Hardware Deimos: archive/hardware_deimos.md
-    - Hardware Phobos: archive/hardware_phobos.md
-    - Hardware Titan: archive/hardware_titan.md
-    - Hardware Triton: archive/hardware_triton.md
-    - Hardware Venus: archive/hardware_venus.md
     - KNL Nodes: archive/knl_nodes.md
     - Load Leveler: archive/load_leveler.md
     - Migrate to Atlas: archive/migrate_to_atlas.md
     - No IB Jobs: archive/no_ib_jobs.md
     - Phase2 Migration: archive/phase2_migration.md
     - Platform LSF: archive/platform_lsf.md
-    - System Altix: archive/system_altix.md
-    - System Atlas: archive/system_atlas.md
-    - System Venus: archive/system_venus.md
+    - Switched-Off Systems:
+      - Overview: archive/systems_switched_off.md
+      - System Altix: archive/system_altix.md
+      - System Atlas: archive/system_atlas.md
+      - System Deimos: archive/system_deimos.md
+      - System Phobos: archive/system_phobos.md
+      - System Titan: archive/system_titan.md
+      - System Triton: archive/system_triton.md
+      - System Venus: archive/system_venus.md
     - Taurus II: archive/taurus_ii.md
     - UNICORE Rest API: archive/unicore_rest_api.md
     - Vampir Trace: archive/vampir_trace.md
diff --git a/doc.zih.tu-dresden.de/util/check-spelling-changes.sh b/doc.zih.tu-dresden.de/util/check-spelling-changes.sh
index d0849ac80a97a40ad6ffc99fee6e1cfd68155ae8..b44a3fd6afb17d0585f77f52c142fffcd509f7a1 100755
--- a/doc.zih.tu-dresden.de/util/check-spelling-changes.sh
+++ b/doc.zih.tu-dresden.de/util/check-spelling-changes.sh
@@ -5,10 +5,10 @@ set -euo pipefail
 scriptpath=${BASH_SOURCE[0]}
 basedir=`dirname "$scriptpath"`
 basedir=`dirname "$basedir"`
-wordlistfile=$basedir/wordlist.aspell
+wordlistfile=$(realpath $basedir/wordlist.aspell)
 
 function getNumberOfAspellOutputLines(){
-  cat - | aspell -p "$wordlistfile" --ignore 2 -l en_US list | sort -u | wc -l
+  cat - | aspell -p "$wordlistfile" --ignore 2 -l en_US list --mode=markdown | sort -u | wc -l
 }
 
 branch="preview"
@@ -19,18 +19,35 @@ fi
 any_fails=false
 
 source_hash=`git merge-base HEAD "$branch"`
-files=$(git diff --name-only "$source_hash")
-for f in $files; do
-    if [ "${f: -3}" == ".md" ]; then
-        previous_count=`git show "$source_hash:$f" | getNumberOfAspellOutputLines`
-        current_count=`cat "$f" | getNumberOfAspellOutputLines`
+#Remove everything except lines beginning with --- or +++
+files=`git diff $source_hash | sed -n 's/^[-+]\{3,3\} //p'`
+#echo "$files"
+#echo "-------------------------"
+#Assume that we have pairs of lines (starting with --- and +++).
+while read oldfile; do
+    read newfile
+    if [ "${newfile: -3}" == ".md" ]; then
+        if [ "$oldfile" == "/dev/null" ]; then
+            #Added files should not introduce new spelling mistakes
+            previous_count=0
+        else
+            previous_count=`git show "$source_hash:${oldfile:2}" | getNumberOfAspellOutputLines`
+        fi
+        if [ "$newfile" == "/dev/null" ]; then
+            #Deleted files do not contain any spelling mistakes
+            current_count=0
+        else
+            #Remove the prefix "b/"
+            newfile=${newfile:2}
+            current_count=`cat "$newfile" | getNumberOfAspellOutputLines`
+        fi
         if [ $current_count -gt $previous_count ]; then
-            echo "-- File $f"
+            echo "-- File $newfile"
             echo "Change increases spelling mistake count (from $previous_count to $current_count)"
             any_fails=true
         fi
     fi
-done
+done <<< "$files"
 
 if [ "$any_fails" == true ]; then
     exit 1
diff --git a/doc.zih.tu-dresden.de/util/check-spelling.sh b/doc.zih.tu-dresden.de/util/check-spelling.sh
index 327b29ec1a80d1a361b8be4bdde2e1a93bf0e981..8b7da3217c3f086a44eeb042cc645fcd63d49364 100755
--- a/doc.zih.tu-dresden.de/util/check-spelling.sh
+++ b/doc.zih.tu-dresden.de/util/check-spelling.sh
@@ -4,7 +4,7 @@ scriptpath=${BASH_SOURCE[0]}
 basedir=`dirname "$scriptpath"`
 basedir=`dirname "$basedir"`
 wordlistfile=$basedir/wordlist.aspell
-acmd="aspell -p $wordlistfile --ignore 2 -l en_US list"
+acmd="aspell -p $wordlistfile --ignore 2 -l en_US list --mode=markdown"
 
 function spell_check () {
   file_to_check=$1
diff --git a/doc.zih.tu-dresden.de/wordlist.aspell b/doc.zih.tu-dresden.de/wordlist.aspell
index 6d23d29110d57c85ecb248e0ac012652935c8022..01487c58c6f71f1bbcf1695f62038c91a4422ec9 100644
--- a/doc.zih.tu-dresden.de/wordlist.aspell
+++ b/doc.zih.tu-dresden.de/wordlist.aspell
@@ -40,3 +40,55 @@ TensorFlow
 Theano
 Vampir
 ZIH
+DFG
+NUMAlink
+ccNUMA
+NUMA
+Montecito
+Opteron
+Saxonid
+MIMD
+LSF
+lsf
+Itanium
+mpif
+mpicc
+mpiCC
+mpicxx
+mpirun
+mpifort
+ifort
+icc
+icpc
+gfortran
+Altix
+Neptun
+Trition
+SUSE
+SLES
+Fortran
+SMP
+MEGWARE
+SGI
+CXFS
+NFS
+CPUs
+GFLOPS
+TFLOPS
+png
+jpg
+pdf
+bsub
+OpenMPI
+openmpi
+multicore
+fastfs
+tmp
+MKL
+TBB
+LoadLeveler
+Gnuplot
+gnuplot
+RSA
+SHA
+pipelining