From 3f4c1ea3c0754551cce7e343645516e2592a6c2d Mon Sep 17 00:00:00 2001
From: Martin Schroschk <martin.schroschk@tu-dresden.de>
Date: Fri, 3 Nov 2023 13:33:25 +0100
Subject: [PATCH] Switch-off nodes on partition smp2: Remove partition smp2 and
 hostname

... from active documentation. Will remain in the archive.
---
 .../docs/jobs_and_resources/hardware_overview.md    | 13 -------------
 .../jobs_and_resources/partitions_and_limits.md     |  7 +++----
 .../docs/jobs_and_resources/slurm_examples.md       |  4 ++--
 3 files changed, 5 insertions(+), 19 deletions(-)

diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
index bf5f25146..3cd727be0 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview.md
@@ -94,16 +94,3 @@ For machine learning, we have IBM AC922 nodes installed with this configuration:
 - Hostnames: `taurusi[2045-2108]`
 - Slurm Partition: `gpu2`
 - Node topology, same as [island 4 - 6](#island-6-intel-haswell-cpus)
-
-## SMP Nodes - up to 2 TB RAM
-
-- 5 Nodes, each with
-    - 4 x Intel(R) Xeon(R) CPU E7-4850 v3 (14 cores) @ 2.20 GHz, Multithreading disabled
-    - 2 TB RAM
-- Hostnames: `taurussmp[3-7]`
-- Slurm partition: `smp2`
-
-??? hint "Node topology"
-
-    ![Node topology](../archive/misc/smp2.png)
-    {: align=center}
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/partitions_and_limits.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/partitions_and_limits.md
index f887a7662..9cff014ac 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/partitions_and_limits.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/partitions_and_limits.md
@@ -81,8 +81,8 @@ the memory of the other threads is allocated implicitly, too, and you will alway
 Some partitions have a *interactive* counterpart for interactive jobs. The corresponding partitions
 are suffixed with `-interactive` (e.g. `ml-interactive`) and have the same configuration.
 
-There is also a meta partition `haswell`, which contains the partitions `haswell64`,
-`haswell256` and `smp2`. `haswell` is also the default partition. If you specify no partition or
+There is also a meta partition `haswell`, which contains the partitions `haswell64`, and
+`haswell256`. `haswell` is also the default partition. If you specify no partition or
 partition `haswell` a Slurm plugin will choose the partition which fits to your memory requirements.
 There are some other partitions, which are not specified in the table above, but those partitions
 should not be used directly.
@@ -92,11 +92,10 @@ should not be used directly.
 |:--------|:------|--------:|---------------:|------------:|------------:|--------------:|--------------:|
 | gpu2 | taurusi[2045-2103] | 59 | 24 | 1 | 62,000 | 2,583 | 4 |
 | gpu2-interactive | taurusi[2045-2103] | 59 | 24 | 1 | 62,000 | 2,583 | 4 |
-| haswell | taurusi[6001-6604],taurussmp[3-7] | 609 |   |   |   |   |   |
+| haswell | taurusi[6001-6604] | 609 |   |   |   |   |   |
 | haswell64 | taurusi[6001-6540,6559-6604] | 586 | 24 | 1 | 61,000 | 2,541 |    |
 | haswell256 | taurusi[6541-6558] | 18 | 24 | 1 | 254,000 | 10,583 |    |
 | interactive | taurusi[6605-6612] | 8 | 24 | 1 | 61,000 | 2,541 |    |
-| smp2 | taurussmp[3-7] | 5 | 56 | 1 | 2,044,000 | 36,500 |    |
 | hpdlf | taurusa[3-16] | 14 | 12 | 1 | 95,000 | 7,916 | 3 |
 | ml | taurusml[3-32] | 30 | 44 | 4 | 254,000 | 1,443 | 6 |
 | ml-interactive | taurusml[1-2] | 2 | 44 | 4 | 254,000 | 1,443 | 6 |
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
index e35bf836d..5bc040217 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
@@ -8,8 +8,8 @@ depend on the type of parallelization and architecture.
 ### OpenMP Jobs
 
 An SMP-parallel job can only run within a node, so it is necessary to include the options `--node=1`
-and `--ntasks=1`. The maximum number of processors for an SMP-parallel program is 896 and 56 on
-partition `taurussmp8` and  `smp2`, respectively, as described in the
+and `--ntasks=1`. The maximum number of processors for an SMP-parallel program is 896 on
+partition `taurussmp8`, as described in the
 [section on memory limits](partitions_and_limits.md#memory-limits). Using the option
 `--cpus-per-task=<N>` Slurm will start one task and you will have `N` CPUs available for your job.
 An example job file would look like:
-- 
GitLab