diff --git a/doc.zih.tu-dresden.de/docs/access/key_fingerprints.md b/doc.zih.tu-dresden.de/docs/access/key_fingerprints.md
index 98e7bb8a6dd37fdb5525b37a61888d04086bc9c9..56daf41c37faa0f446228b6afb1938b69ad92444 100644
--- a/doc.zih.tu-dresden.de/docs/access/key_fingerprints.md
+++ b/doc.zih.tu-dresden.de/docs/access/key_fingerprints.md
@@ -62,7 +62,7 @@ shown matches one of the table.
 | ECDSA    | MD5:63:9a:67:68:37:85:31:77:a4:e6:0b:da:8c:d9:2f:96 |
 | ED25519  | SHA256:Ws/vbrp5e/Ay+fcVzhsL0jupjGkDdn1cJ+SX6gQB6Bs  |
 | ED25519  | MD5:7f:5c:e6:2b:6f:94:24:9b:0f:2f:1d:bc:40:6b:59:c7 |
-{: summary="List of valid fingerprints for barnard login1 node "}
+{: summary="List of valid fingerprints for Barnard login1 node"}
 
 #### login2.barnard.hpc.tu-dresden.de
 
@@ -74,7 +74,31 @@ shown matches one of the table.
 | ECDSA    | MD5:d9:33:27:26:00:c9:81:cf:bb:45:43:dc:05:e8:1f:43 |
 | ED25519  | SHA256:BewwkydtP2riZPShvNzAOWm+dQtdOq535j7Vow1HbRQ  |
 | ED25519  | MD5:18:8b:cd:1e:2e:9a:6c:8c:ee:b5:c9:3e:68:a3:4a:3f |
-{: summary="List of valid fingerprints for barnard login2 node "}
+{: summary="List of valid fingerprints for Barnard login2 node"}
+
+#### login3.barnard.hpc.tu-dresden.de
+
+| Key type | Fingerprint                                         |
+|:---------|:----------------------------------------------------|
+| RSA      | SHA256:+VSqQp+6LZrZXOHPuDhxd2ti9mam/gDLSbn5kH0S2UI  |
+| RSA      | MD5:19:16:ce:34:0e:2c:5f:37:42:06:f7:55:7d:19:cf:1a |
+| ECDSA    | SHA256:qZbC5BDKrTvE3J6qgGJLQwxtjfYy6pmrI7teEjFnHiE  |
+| ECDSA    | MD5:b1:19:a6:bf:9e:95:ce:ee:fd:ab:b3:ee:5e:d7:e0:a7 |
+| ED25519  | SHA256:ATNHOAZNjWHAXMwTWgxMvB9DIZ5bZurneN4sBKGSsz8  |
+| ED25519  | MD5:ee:cb:cc:ff:be:15:f2:e8:8e:ac:ef:da:a1:f9:48:33 |
+{: summary="List of valid fingerprints for Barnard login3 node"}
+
+#### login4.barnard.hpc.tu-dresden.de
+
+| Key type | Fingerprint                                         |
+|:---------|:----------------------------------------------------|
+| RSA      | SHA256:IYpo+qHKOIs4TEftlDp63QlQr85xlcgbapfMsbCeZDE  |
+| RSA      | MD5:a7:3d:c3:be:53:62:7f:fc:5a:b5:6b:ba:8c:83:6e:4c |
+| ECDSA    | SHA256:nnUzS1Zu9+yaXf8ayDIwmfXabPtyvdr5c3Hvp+/zXhs  |
+| ECDSA    | MD5:69:f9:54:60:24:79:22:cb:7f:ba:d0:90:f5:0f:4a:5d |
+| ED25519  | SHA256:1QXw+IC51iT55LiE/7JJEXL7Jm1GZjk+/7OjaYfWXUY  |
+| ED25519  | MD5:17:8c:ea:26:dc:f0:43:61:a8:4d:06:e3:8e:f7:27:29 |
+{: summary="List of valid fingerprints for Barnard login4 node"}
 
 ??? example "Connecting with SSH"
 
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md
index 3db16f94c248bfb726f6f80a207446434a69b29d..46ff6483a19cb37e73f6403fc5d300bb6fb9fc95 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/workspaces.md
@@ -28,10 +28,10 @@ times.
 ### List Available Filesystems
 
 To list all available filesystems for using workspaces, you can either invoke `ws_list -l` or
-`ws_find -l`, e.g.,
+`ws_find --list`, e.g.,
 
 ```console
-marie@login$ ws_find -l
+marie@login$ ws_find --list
 available filesystems:
 scratch (default)
 warm_archive
@@ -44,7 +44,7 @@ beegfs
 
     The default filesystem is `scratch`. If you prefer another filesystem (cf. section
     [List Available Filesystems](#list-available-filesystems)), you have to explictly
-    provide the option `-F <fs>` to the workspace commands.
+    provide the option `--filesystem=<fs>` to the workspace commands.
 
 ### List Current Workspaces
 
@@ -67,7 +67,7 @@ overview of some of these options. All available options can be queried by `ws_l
 === "Certain filesystem"
 
     ```
-    marie@login$ ws_list --filesystem scratch_fast
+    marie@login$ ws_list --filesystem=scratch_fast
     id: numbercrunch
          workspace directory  : /lustre/ssd/ws/marie-numbercrunch
          remaining time       : 2 days 23 hours
@@ -135,7 +135,7 @@ overview of some of these options. All available options can be queried by `ws_l
 ### Allocate a Workspace
 
 To allocate a workspace in one of the listed filesystems, use `ws_allocate`. It is necessary to
-specify a unique name and the duration of the workspace.
+specify a unique name and the duration (in days) of the workspace.
 
 ```console
 ws_allocate: [options] workspace_name duration
@@ -154,31 +154,54 @@ Options:
   -c [ --comment ] arg       comment
 ```
 
-!!! example
+!!! example "Simple workspace allocation"
+
+    The simple way to allocate a workspace is calling `ws_allocate` command with two arguments,
+    where the first specifies the workspace name and the second the duration. This allocates a
+    workspace on the default filesystem with no e-mail reminder.
 
     ```console
-    marie@login$ ws_allocate -F scratch -r 7 -m marie.testuser@tu-dresden.de test-workspace 90
+    marie@login$ ws_allocate test-workspace 90
     Info: creating workspace.
     /scratch/ws/marie-test-workspace
     remaining extensions  : 10
     remaining time in days: 90
     ```
 
-This will create a workspace with the name `test-workspace` on the `/scratch` filesystem for 90
-days with an email reminder for 7 days before the expiration.
+!!! example "Workspace allocation on specific filesystem"
+
+    In order to allocate a workspace on a non-default filesystem, the option
+    `--filesystem <filesystem>` is required.
+
+    ```console
+    marie@login$ ws_allocate --filesystem scratch_fast test-workspace 3
+    Info: creating workspace.
+    /lustre/ssd/ws/marie-test-workspace
+    remaining extensions  : 2
+    remaining time in days: 3
+    ```
+
+!!! example "Workspace allocation with e-mail reminder"
 
-!!! Note "Email reminder"
+    This command will create a workspace with the name `test-workspace` on the `/scratch` filesystem
+    with a duration of 90 days and send an e-mail reminder. The e-mail reminder will be sent every
+    day starting 7 days prior to expiration. We strongly recommend setting this e-mail reminder.
 
-    Setting the reminder to `7` means you will get a reminder email on every day starting `7` days
-    prior to expiration date. We strongly recommend to set this email reminder.
+    ```console
+    marie@login$ ws_allocate --reminder 7 --mailaddress marie.testuser@tu-dresden.de test-workspace 90
+    Info: creating workspace.
+    /scratch/ws/marie-test-workspace
+    remaining extensions  : 10
+    remaining time in days: 90
+    ```
 
 !!! Note "Name of a workspace"
 
-   The workspace name should help you to remember the experiment and data stored here. It has to
-   be unique on a certain filesystem. On the other hand it is possible to use the very same name
-   for workspaces on different filesystems.
+    The workspace name should help you to remember the experiment and data stored here. It has to
+    be unique on a certain filesystem. On the other hand it is possible to use the very same name
+    for workspaces on different filesystems.
 
-Please refer to the section [section Cooperative Usage](#cooperative-usage-group-workspaces) for
+Please refer to the [section Cooperative Usage](#cooperative-usage-group-workspaces) for
 group workspaces.
 
 ### Extension of a Workspace
@@ -186,7 +209,7 @@ group workspaces.
 The lifetime of a workspace is finite and different filesystems (storage systems) have different
 maximum durations. A workspace can be extended multiple times, depending on the filesystem.
 
-| Filesystem (use with parameter `-F <fs>`) | Duration, days | Extensions | [Filesystem Feature](../jobs_and_resources/slurm.md#filesystem-features) | Remarks |
+| Filesystem (use with parameter `--filesystem=<fs>`) | Duration, days | Extensions | [Filesystem Feature](../jobs_and_resources/slurm.md#filesystem-features) | Remarks |
 |:-------------------------------------|---------------:|-----------:|:-------------------------------------------------------------------------|:--------|
 | `scratch` (default)                  | 100            | 10         | `fs_lustre_scratch2`                                                     | Scratch filesystem (`/lustre/scratch2`, symbolic link: `/scratch`) with high streaming bandwidth, based on spinning disks |
 | `ssd`                                | 30             | 2          | `fs_lustre_ssd`                                                          | High-IOPS filesystem (`/lustre/ssd`, symbolic link: `/ssd`) on SSDs. |
@@ -205,7 +228,7 @@ remaining extensions  : 1
 remaining time in days: 100
 ```
 
-Mail reminder settings are retained. I.e., previously set mail alerts apply to the extended
+E-mail reminder settings are retained. I.e., previously set e-mail alerts apply to the extended
 workspace, too.
 
 !!! attention
@@ -221,28 +244,57 @@ marie@login$ ws_extend -F scratch my-workspace 40
 
 it will now expire in 40 days **not** 130 days.
 
-### Send Reminder for Workspace Expiry Date
+### Send Reminder for Workspace Expiration Date
+
+We strongly recommend using one of the two provided ways to ensure that the expiration date of a
+workspace is not forgotten.
+
+#### Send Daily Reminder
 
-Send a calendar invitation by Email to ensure that the expiration date of a workspace is not
-forgotten
+An e-mail reminder can be set at workspace allocation using
 
 ```console
-marie@login$ ws_send_ical -F scratch my-workspace -m marie.testuser@tu-dresden.de
+ws_allocate --reminder=<N> --mailaddress=<mail> [...]
+```
+
+This will send an e-mail every day starting `N` days prior to the expiration date.
+See the [example above](#allocate-a-workspace) for reference.
+
+If you missed setting an e-mail reminder at workspace allocation, you can add a reminder later, e.g.
+
+```
+marie@login$ ws_allocate --name=FancyExp --duration=17
+[...]
+marie@login$ ws_allocate --name=FancyExp --duration=17 --reminder=7 --mailaddress=marie@dlr.de
+--extension
 ```
 
+This will reallocate the workspace, which counts against your maximum number of reallocations (Note:
+No data is deleted, but the database entry is modified).
+
+#### Send Calender Invitation
+
+The command `ws_send_ical` sends you an ical event on the expiration date of a specified workspace. This
+   calender invitation can be further managed according to your personal preferences. The syntax is
+   as follows:
+
+   ```console
+   ws_send_ical --filesystem=<filesystem> --mail=<e-mail-address> --workspace=<workspace name>
+   ```
+
 ### Deletion of a Workspace
 
 To delete a workspace use the `ws_release` command. It is mandatory to specify the name of the
 workspace and the filesystem in which it is located:
 
 ```console
-marie@login$ ws_release -F scratch my-workspace
+marie@login$ ws_release --filesystem=scratch --name=my-workspace
 ```
 
-You can list your already released or expired workspaces using the `ws_restore -l` command.
+You can list your already released or expired workspaces using the `ws_restore --list` command.
 
 ```console
-marie@login$ ws_restore -l
+marie@login$ ws_restore --list
 warm_archive:
 scratch:
 marie-my-workspace-1665014486
@@ -257,9 +309,9 @@ beegfs:
 ```
 
 In this example, the user `marie` has three inactive, i.e., expired, workspaces namely
-`my-workspace` in `scratch`, as well as `foo` and `bar` in `ssd` filesystem. The command `ws_restore
--l` lists the name of the workspace and the expiration date. As you can see, the expiration date is
-added to the workspace name as Unix timestamp.
+`my-workspace` in `scratch`, as well as `foo` and `bar` in `ssd` filesystem. The command
+`ws_restore --list` lists the name of the workspace and the expiration date. As you can see, the
+expiration date is added to the workspace name as Unix timestamp.
 
 !!! hint "Deleting data in in an expired workspace"
 
@@ -275,7 +327,7 @@ It performs the following steps once per day and filesystem:
 
 - Check for remaining life time of all workspaces.
   - If the workspaces expired, move it to a hidden directory so that it becomes inactive.
-- Send reminder Emails to users if the reminder functionality was configured for their particular
+- Send reminder e-mails to users if the reminder functionality was configured for their particular
   workspaces.
 - Scan through all workspaces in grace period.
   - If a workspace exceeded the grace period, the workspace and its data are deleted.
@@ -295,7 +347,7 @@ warm_archive: 2 months), you can still restore your data **into an existing work
 Use
 
 ```console
-marie@login$ ws_restore -l -F scratch
+marie@login$ ws_restore --list --filesystem=scratch
 scratch:
 marie-my-workspace-1665014486
   unavailable since Thu Oct  6 02:01:26 2022
@@ -305,12 +357,12 @@ to get a list of your expired workspaces, and then restore them like that into a
 workspace 'new_ws':
 
 ```console
-marie@login$ ws_restore -F scratch marie-my-workspace-1665014486 new_ws
+marie@login$ ws_restore --filesystem=scratch marie-my-workspace-1665014486 new_ws
 ```
 
-The expired workspace has to be specified by its full name as listed by `ws_restore -l`, including
-username prefix and timestamp suffix (otherwise, it cannot be uniquely identified). The target
-workspace, on the other hand, must be given with just its short name, as listed by `ws_list`,
+The expired workspace has to be specified by its full name as listed by `ws_restore --list`,
+including username prefix and timestamp suffix (otherwise, it cannot be uniquely identified). The
+target workspace, on the other hand, must be given with just its short name, as listed by `ws_list`,
 without the username prefix.
 
 Both workspaces must be on the same filesystem. The data from the old workspace will be moved into
@@ -555,9 +607,9 @@ wrong name. Use only the short name that is listed after `id:` when using `ws_li
 
 ----
 
-**Q**: Man, I've missed to specify mail alert when allocating my workspace. How can I add the mail
-alert functionality to an existing workspace?
+**Q**: I forgot to specify an e-mail alert when allocating my workspace. How can I add the
+e-mail alert functionality to an existing workspace?
 
-**A**: You can add the mail alert by "overwriting" the workspace settings via `ws_allocate -x -m
+**A**: You can add the e-mail alert by "overwriting" the workspace settings via `ws_allocate -x -m
 <mail address> -r <days> -n <ws-name> -d <duration> -F <fs>`. (This will lower the remaining
 extensions by one.)
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/barnard_test.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/barnard_test.md
index 8a6be9f547383618b8a071c41487ff9dc731807b..92b9647f84473b848b486cb0c07c692984c5b7aa 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/barnard_test.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/barnard_test.md
@@ -17,32 +17,33 @@ Here, you can find few hints which might help you with the first steps.
 ## Login to Barnard
 
 * All users and projects from Taurus now can work on Barnard.
-
 * They can use `login[1-2].barnard.hpc.tu-dresden.de` to access the system
 from campus (or VPN). [Fingerprints](/access/key_fingerprints/#barnard)
 
 * All users have *new* home file systems, this means:
-
-    - Please create a new ssh keypair with ed25519 encryption, secured with
-    a passphrase.
-
+    - Please create a new SSH keypair with ed25519 encryption, secured with
+    a passphrase. Please refer to this
+    [page for instructions](../../access/ssh_login#before-your-first-connection).
     - After login, add the public key to your `.ssh/authorized_keys` file
     on Barnard.
 
-## Data management
+## Data Management
 
 * The `/project` filesystem is the same on Taurus and Barnard
 (mounted read-only on the compute nodes).
+* The **new work filesystem** is `/data/horse`. The slower `/data/walrus` can be used
+to store e.g. results. Both can be accesed via workspaces. Please refer to the
+[workspace page](../../data_lifecycle/workspaces/), if you are not familiar with workspaces. To list
+all available workspace filessystem, invoke the command  `ws_list -l`.
 
-* The new work filesystem is `/data/horse`. The slower `/data/walrus` can be used
-to store e.g. results. Both can be accesed via workspaces (see `ws_list -l`).
+!!! Note
 
-**To work with your data from Taurus you might have to move/copy them to the new
-storages.**
+    **To work with your data from Taurus you might have to move/copy them to the new storages.**
 
 For this, we have four new [datamover nodes](/data_transfer/datamover) that have mounted all storages
 of the old and new system. (Do not use the datamovers from Taurus!)
-Please use `dtinfo` to get the current mount points:
+
+Please use the command `dtinfo` to get the current mount points:
 
 ```
 marie@login1> dtinfo
@@ -66,7 +67,6 @@ directory on datamover      mounting clusters   directory on cluster
 ` /data/horse/lustre/scratch2/ws`. This replication took a **few weeks**. Ideally you
 can now just *move* their *content* to a newly created workspace. - Of course,
 everything newer than May is not there.
-
 * Please manually copy your needed data from your `beegfs` or `ssd` workspaces. These
 old storages will be purged, probably by the end of November.
 
@@ -80,7 +80,5 @@ on Taurus.
 ## Slurm
 
 * We are running the most recent Slurm version.
-
 * You must not use the old partition names.
-
 * Not all things are tested.
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview_2023.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview_2023.md
index c2f8b2b51951d0b218f7a43524fc3d1623155a39..c888857b47414e2c068cac78f9ca9804efb056b5 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview_2023.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/hardware_overview_2023.md
@@ -31,7 +31,7 @@ All clusters will have access to these shared parallel filesystems:
 ## Barnard - Intel Sapphire Rapids CPUs
 
 - 630 diskless nodes, each with
-    - 2 x XEON PLATINUM 8470 (52 cores) @ 2.50 GHz, Multithreading enabled
+    - 2 x Intel Xeon Platinum 8470 (52 cores) @ 2.00 GHz, Multithreading enabled
     - 512 GB RAM
 - Hostnames: `n[1001-1630].barnard.hpc.tu-dresden.de`
 - Login nodes: `login[1-4].barnard.hpc.tu-dresden.de`