diff --git a/doc.zih.tu-dresden.de/docs/access/jupyterhub.md b/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
index 790e8b92c6a24ebfc08d8096ea14cbe68142b708..bc54e752cd79d334f03a3b5572acf7df2fc12c43 100644
--- a/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
+++ b/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
@@ -109,7 +109,7 @@ At the following table it's possible to see what is available at each cluster.
 |----------------------|--------|----|---------|--------|------------|--------|-----|
 | Alpha                |   -    | OK |    OK   |   OK   |     OK*    |   OK*  |  -  |
 | Barnard              |   OK   | OK |    OK   |   OK   |     OK*    |   OK*  |  -  |
-|Capella               |   OK   | OK |    OK   |   OK   |     -      |   -    |  -  |
+| Capella              |   OK   | OK |    OK   |   OK   |     -      |   -    |  -  |
 | Romeo                |   -    | OK |    OK   |   OK   |     OK*    |   OK*  |  -  |
 | VIS                  |   OK   | OK |    OK   |   OK   |     OK*    |   OK*  |  OK |
 
diff --git a/doc.zih.tu-dresden.de/docs/access/ssh_login.md b/doc.zih.tu-dresden.de/docs/access/ssh_login.md
index 73ac764ebd5f2917fce1797f7e95b75e53b5e09d..00cf3b7cb7e784777ea326a94d542a28158a13be 100644
--- a/doc.zih.tu-dresden.de/docs/access/ssh_login.md
+++ b/doc.zih.tu-dresden.de/docs/access/ssh_login.md
@@ -119,7 +119,7 @@ for more information on Dataport nodes.
 
     In the above `.ssh/config` file, the HPC system `Barnard` is chosen as an example.
     The very same settings can be made for individuall or all ZIH systems, e.g. `Capella`, `Alpha`,
-     `Julia`,`Romeo` etc.
+     `Julia`, `Romeo` etc.
 
 ## X11-Forwarding
 
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/working.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/working.md
index a00a400e1786bfbc9f3821bd2baa3cdbb657ef89..d7c5d9a2e5b55caea4095928ee1ef169579ce5a9 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/working.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/working.md
@@ -10,7 +10,7 @@ performance and permanence.
 | `Lustre`        | `/data/walrus`    | 20 PB    | global             | Only accessible via [Workspaces](workspaces.md). For moderately low bandwidth, low IOPS. Mounted read-only on compute nodes. |
 | `WEKAio`        | `/data/weasel`    | 1 PB     | global (w/o Power) | *Coming 2024!* For high IOPS                              |
 | `ext4`          | `/tmp`            | 95 GB    | node local         | Systems: tbd. Is cleaned up after the job automatically.  |
-| `WEKAio`        | `/data/cat`       | 1 PB     | shared on Capella  | For high IOPS. Only available on [Capella](../jobs_and_resources/capella.md).                 |
+| `WEKAio`        | `/data/cat`       | 1 PB     | only Capella  | For high IOPS. Only available on [Capella](../jobs_and_resources/capella.md).                 |
 
 ## Recommendations for Filesystem Usage
 
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/capella.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/capella.md
index 54e620fe74a19a03e0b0b74009b3e3f83a0365cd..6b82a999c6658727c213cd6aa138e3e73ae300f2 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/capella.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/capella.md
@@ -27,7 +27,7 @@ Although all other [filesystems](../data_lifecycle/file_systems.md)
 ### Modules
 
 The easiest way is using the [module system](../software/modules.md).
-All software available from the module system has been specifically build for the cluster `Alpha`
+All software available from the module system has been deliberately build for the cluster `Alpha`
 i.e., with optimization for Zen4 (Genoa) microarchitecture and CUDA-support enabled.
 
 To check the available modules for `Capella`, use the command
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/power9.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/power9.md
index 4f821717523935b8556a8fb34d3dd42d6a87cd46..600174f79c0bc3f69154b0daebc3726499322150 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/power9.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/power9.md
@@ -12,7 +12,7 @@ partition `power` within the now decommissioned `Taurus` system. With the decomm
 - 32 nodes, each with
     - 2 x IBM Power9 CPU (2.80 GHz, 3.10 GHz boost, 22 cores)
     - 256 GB RAM (8 x 16 GB DDR4-2666 MT/s per socket)
-    - 6 x NVIDIA VOLTA V100 with 32 GB HBM2
+    - 6 x NVIDIA Tesla V100 with 32 GB HBM2
     - NVLINK bandwidth 150 GB/s between GPUs and host
 - Login nodes: `login[1-2].power9.hpc.tu-dresden.de`
 - Hostnames: `ml[1-29].power9.hpc.tu-dresden.de`
@@ -25,14 +25,14 @@ partition `power` within the now decommissioned `Taurus` system. With the decomm
 If you want to use containers on `Power9`, please refer to the page
 [Singularity for Power9 Architecuture](../software/singularity_power9.md).
 
-The compute nodes of the cluster `power9` are built on the base of
+The compute nodes of the cluster `Power9` are built on the base of
 [Power9 architecture](https://www.ibm.com/it-infrastructure/power/power9) from IBM. The system was created
 for AI challenges, analytics and working with data-intensive workloads and accelerated databases.
 
 The main feature of the nodes is the ability to work with the
 [NVIDIA Tesla V100](https://www.nvidia.com/en-gb/data-center/tesla-v100/) GPU with **NV-Link**
 support that allows a total bandwidth with up to 300 GB/s. Each node on the
-cluster `power9` has 6x Tesla V-100 GPUs. You can find a detailed specification of the cluster in our
+cluster `Power9` has six Tesla V100 GPUs. You can find a detailed specification of the cluster in our
 [Power9 documentation](../jobs_and_resources/hardware_overview.md).
 
 
@@ -43,5 +43,5 @@ cluster `power9` has 6x Tesla V-100 GPUs. You can find a detailed specification
 
 ### Power AI
 
-There are tools provided by IBM, that work on cluster `power9` and are related to AI tasks.
+There are tools provided by IBM, that work on cluster `Power9` and are related to AI tasks.
 For more information see our [Power AI documentation](power_ai.md).