From 0576a75a5e3213eab9c0e4c622a3c91faed3a2da Mon Sep 17 00:00:00 2001
From: Ulf Markwardt <ulf.markwardt@tu-dresden.de>
Date: Mon, 7 Oct 2024 13:48:38 +0200
Subject: [PATCH] Update overview.md

---
 .../docs/jobs_and_resources/overview.md               | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md
index 9582ab5ed..58e0b8f9e 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md
@@ -1,6 +1,6 @@
 # Introduction HPC Resources and Jobs
 
-ZIH operates high performance computing (HPC) systems with more than 100.000 cores, 1000 GPUs, and a
+ZIH operates high performance computing (HPC) systems with about 100.000 cores, 900 GPUs, and a
 flexible storage hierarchy with about 40 PB total capacity. The HPC system provides an optimal
 research environment especially in the area of data analytics, artificial intelligence methods and
 machine learning as well as for processing extremely large data sets. Moreover it is also a perfect
@@ -8,10 +8,10 @@ platform for highly scalable, data-intensive and compute-intensive applications
 capabilities for energy measurement and performance monitoring. Therefore provides ideal conditions
 to achieve the ambitious research goals of the users and the ZIH.
 
-The HPC system, redesigned in December 2023, consists of five homogeneous clusters with their own
+The HPC system consists of five clusters with their own
 [Slurm](slurm.md) instances and cluster specific
-login nodes. The clusters share one
-[filesystem](../data_lifecycle/file_systems.md) which enables users to easily switch between the
+login nodes. The clusters share a number of different
+[filesystems](../data_lifecycle/file_systems.md) which enable users to switch between the
 components.
 
 ## Selection of Suitable Hardware
@@ -53,7 +53,8 @@ The following questions may help to decide which cluster to use
 <!-- cluster_overview_table -->
 |Name|Description| DNS | Nodes | # Nodes | Cores per Node | Threads per Core | Memory per Node [in MB] | Memory per Core [in MB] | GPUs per Node
 |---|---|----|:---|---:|---:|---:|---:|---:|---:|
-|**Barnard**<br>_2023_| CPU|`n[node].barnard.hpc.tu-dresden.de` |n[1001-1630] | 630 |104| 2 |515,000 |2,475 | 0 |
+|**Capella**<br>_2024_| GPU|`c[node].barnard.hpc.tu-dresden.de` |c[1-144] | 144 |64| 1 |768,000 | | 0 |
+|**Barnard**<br>_2023_| CPU|`n[node].barnard.hpc.tu-dresden.de` |n[1001-1630] | 630 |104| 2 |515,000 |12,000 | 4 |
 |**Alpha**<br>_2021_| GPU |`i[node].alpha.hpc.tu-dresden.de`|taurusi[8001-8034] |  34 | 48 | 2 | 990,000 | 10,312|  8  |
 |**Romeo**<br>_2020_| CPU |`i[node].romeo.hpc.tu-dresden.de`|taurusi[7001-7192] | 192|128 | 2 | 505,000| 1,972 |  0  |
 |**Julia**<br>_2021_| single SMP system |`julia.hpc.tu-dresden.de`| julia | 1 | 896 | 1 | 48,390,000 | 54,006 | - |
-- 
GitLab