diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
index 65d000c9b1f022637f111ad0a1a17c334aac4b37..7ec5a636463c23724948fee1d7d52748db21e34f 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
@@ -1,7 +1,6 @@
 # HPC Resources
 
-TODO: Update this introduction
-
+<!--TODO: Update this introduction-->
 HPC resources in ZIH systems comprise the *High Performance Computing and Storage Complex* and its
 extension *High Performance Computing – Data Analytics*. In total it offers scientists
 about 60,000 CPU cores and a peak performance of more than 1.5 quadrillion floating point
@@ -18,18 +17,18 @@ users and the ZIH.
     will have five homogeneous clusters with their own Slurm instances and with cluster specific
     login nodes running on the same CPU.
 
-With the installation and start of operation of the [new HPC system Barnard(#barnard),
+With the installation and start of operation of the [new HPC system Barnard](#barnard),
 quite significant changes w.r.t. HPC system landscape at ZIH follow. The former HPC system Taurus is
 partly switched-off and partly split up into separate clusters. In the end, from the users'
 perspective, there will be **five separate clusters**:
 
-| Name | Description | Year| DNS |
-| --- | --- | --- | --- |
-| **Barnard** | CPU cluster |2023| `n[1001-1630].barnard.hpc.tu-dresden.de` |
-| **Romeo** | CPU cluster |2020| `i[8001-8190].romeo.hpc.tu-dresden.de` |
-| **Alpha Centauri** | GPU cluster | 2021| `i[8001-8037].alpha.hpc.tu-dresden.de` |
-| **Julia** | single SMP system |2021| `smp8.julia.hpc.tu-dresden.de` |
-| **Power** | IBM Power/GPU system |2018| `ml[1-29].power9.hpc.tu-dresden.de` |
+| Name                                | Description           | Year of Installation | DNS |
+| ----------------------------------- | ----------------------| -------------------- | --- |
+| [`Barnard`](#barnard)               | CPU cluster           | 2023                 | `n[1001-1630].barnard.hpc.tu-dresden.de` |
+| [`Alpha Centauri`](#alpha-centauri) | GPU cluster           | 2021                 | `i[8001-8037].alpha.hpc.tu-dresden.de` |
+| [`Julia`](#julia)                   | Single SMP system     | 2021                 | `smp8.julia.hpc.tu-dresden.de` |
+| [`Romeo`](#romeo)                   | CPU cluster           | 2020                 | `i[8001-8190].romeo.hpc.tu-dresden.de` |
+| [`Power`](#power9)                  | IBM Power/GPU cluster | 2018                 | `ml[1-29].power9.hpc.tu-dresden.de` |
 
 All clusters will run with their own [Slurm batch system](slurm.md) and job submission is possible
 only from their respective login nodes.
@@ -145,7 +144,7 @@ CPUs.
 ## Alpha Centauri
 
 The cluster **Alpha Centauri** (short: **Alpha**) by NEC provides AMD Rome CPUs and NVIDIA A100 GPUs
-and designed for AI and ML tasks.
+and is designed for AI and ML tasks.
 
 - 34 nodes, each with
     - 8 x NVIDIA A100-SXM4 Tensor Core-GPUs