From 3864f319d48d9c6503a57586dbae19f6977a7ff3 Mon Sep 17 00:00:00 2001
From: Martin Schroschk <martin.schroschk@tu-dresden.de>
Date: Thu, 24 Mar 2022 08:03:27 +0100
Subject: [PATCH] Use plugin to validate internal URLs

Further documentation can be found in mkdocs.yaml.
Resolve #282
---
 .../docs/archive/bioinformatics.md            |  8 +++---
 .../docs/contrib/content_rules.md             |  8 ++++--
 .../docs/data_lifecycle/overview.md           |  2 +-
 .../docs/data_transfer/export_nodes.md        |  2 +-
 .../jobs_and_resources/checkpoint_restart.md  |  2 +-
 .../docs/jobs_and_resources/overview.md       |  7 ++---
 .../docs/software/containers.md               |  2 +-
 .../docs/software/data_analytics_with_r.md    |  4 +--
 .../docs/software/overview.md                 |  2 +-
 .../docs/software/tensorflow.md               |  2 +-
 .../docs/software/visualization.md            |  2 +-
 doc.zih.tu-dresden.de/mkdocs.yml              | 26 +++++++++++++++----
 12 files changed, 44 insertions(+), 23 deletions(-)

diff --git a/doc.zih.tu-dresden.de/docs/archive/bioinformatics.md b/doc.zih.tu-dresden.de/docs/archive/bioinformatics.md
index 0a8a33741..7d2335d93 100644
--- a/doc.zih.tu-dresden.de/docs/archive/bioinformatics.md
+++ b/doc.zih.tu-dresden.de/docs/archive/bioinformatics.md
@@ -6,10 +6,10 @@
 
 | Software                          | Module                                    |
 |-----------------------------------|-------------------------------------------|
-| **[Infernal](#Infernal)**         | infernal                                  |
-| **[OpenProspect](#OpenProspect)** | openprospect, openprospect/885-mpi        |
-| **[Phylip](#Phylip)**             | phylip                                    |
-| **[PhyML](#PhyML)**               | phyml/2.4.4, phyml/2.4.5-mpi, phyml/3.0.0 |
+| **[Infernal](#infernal)**         | infernal                                  |
+| **[OpenProspect](#openprospect)** | openprospect, openprospect/885-mpi        |
+| **[Phylip](#phylip)**             | phylip                                    |
+| **[PhyML](#phyml)**               | phyml/2.4.4, phyml/2.4.5-mpi, phyml/3.0.0 |
 
 ## Infernal
 
diff --git a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
index f054c5fa3..c4660744c 100644
--- a/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
+++ b/doc.zih.tu-dresden.de/docs/contrib/content_rules.md
@@ -130,7 +130,7 @@ We follow this rules regarding prompts:
 * **Always use a prompt**, even there is no output provided for the shown command.
 * All code blocks should use long parameter names (e.g. Slurm parameters), if available.
 * All code blocks which specify some general command templates, e.g. containing `<` and `>`
-  (see [Placeholders](#mark-placeholders)), should use `bash` for the code block. Additionally,
+  (see [Placeholders](#placeholders)), should use `bash` for the code block. Additionally,
   an example invocation, perhaps with output, should be given with the normal `console` code block.
   See also [Code Block description below](#code-blocks-and-syntax-highlighting).
 * Using some magic, the prompt as well as the output is identified and will not be copied!
@@ -157,7 +157,7 @@ bar
 
 Make sure that shell session and console code blocks are executable on the login nodes of HPC system.
 
-Command templates use [Placeholders](#mark-placeholders) to mark replaceable code parts. Command
+Command templates use [Placeholders](#placeholders) to mark replaceable code parts. Command
 templates should give a general idea of invocation and thus, do not contain any output. Use a
 `bash` code block followed by an invocation example (with `console`):
 
@@ -257,6 +257,10 @@ drwxr-xr-x   3 marie p_marie      4096 Feb 12  2020 data
 -rw-rw----   1 marie p_marie      4096 Jan 24  2020 readme.md
 ```
 
+### Placeholders
+
+TODO!
+
 ## Mark Omissions
 
 If showing only a snippet of a long output, omissions are marked with `[...]`.
diff --git a/doc.zih.tu-dresden.de/docs/data_lifecycle/overview.md b/doc.zih.tu-dresden.de/docs/data_lifecycle/overview.md
index 1b5561db6..6aca68435 100644
--- a/doc.zih.tu-dresden.de/docs/data_lifecycle/overview.md
+++ b/doc.zih.tu-dresden.de/docs/data_lifecycle/overview.md
@@ -35,7 +35,7 @@ filesystems.
 !!! hint "Recommendations to choose of storage system"
 
     * For data that seldom changes but consumes a lot of space, the
-      [warm_archive](file_systems.md#warm_archive) can be used.
+      [warm_archive](warm_archive.md) can be used.
       (Note that this is mounted **read-only** on the compute nodes).
     * For a series of calculations that works on the same data please use a `scratch` based
       [workspace](workspaces.md).
diff --git a/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md b/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
index 5e4abf29b..b4a22c95e 100644
--- a/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
+++ b/doc.zih.tu-dresden.de/docs/data_transfer/export_nodes.md
@@ -137,7 +137,7 @@ the local machine.
 
 Windows 10 (1809 and higher) comes with a
 [built-in OpenSSH support](https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_overview)
-including the above described [SCP](#SCP) and [SFTP](#SFTP).
+including the above described [SCP](#scp) and [SFTP](#sftp).
 
 ### GUI - Using WinSCP
 
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md
index 38d6686d7..f05e5a3dd 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/checkpoint_restart.md
@@ -49,7 +49,7 @@ make it easier to utilize DMTCP together with Slurm.
 ## Using w.r.t. Chain Jobs
 
 For long-running jobs that you wish to split into multiple shorter jobs
-([chain jobs](../jobs_and_resources/slurm.md#chain-jobs)), thereby enabling the job scheduler to
+([chain jobs](slurm_examples.md#chain-jobs)), thereby enabling the job scheduler to
 fill the cluster much more efficiently and also providing some level of fault-tolerance, we have
 written a script that automatically creates a number of jobs for your desired runtime and adds the
 checkpoint/restart bits transparently to your batch script. You just have to specify the targeted
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md
index 1a96fc971..d3e9674ce 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/overview.md
@@ -6,9 +6,10 @@ research environment especially in the area of data analytics and machine learni
 processing extremely large data sets. Moreover it is also a perfect platform for highly scalable,
 data-intensive and compute-intensive applications.
 
-With shared [login nodes](#login-nodes) and [filesystems](../data_lifecycle/file_systems.md) our
-HPC system enables users to easily switch between [the components](hardware_overview.md), each
-specialized for different application scenarios.
+With shared [login nodes](hardware_overview.md#login-nodes) and
+[filesystems](../data_lifecycle/file_systems.md) our HPC system enables users to easily switch
+between [the components](hardware_overview.md), each specialized for different application
+scenarios.
 
 When log in to ZIH systems, you are placed on a login node where you can
 [manage data life cycle](../data_lifecycle/overview.md),
diff --git a/doc.zih.tu-dresden.de/docs/software/containers.md b/doc.zih.tu-dresden.de/docs/software/containers.md
index 5d96ea37f..ca9e3a14d 100644
--- a/doc.zih.tu-dresden.de/docs/software/containers.md
+++ b/doc.zih.tu-dresden.de/docs/software/containers.md
@@ -33,7 +33,7 @@ environment.
 However, new containers can be created on your local workstation and moved to ZIH systems for
 execution. Follow the instructions for [locally installing Singularity](#local-installation) and
 [container creation](#container-creation). Moreover, existing Docker container can easily be
-converted, see [Import a docker container](#importing-a-docker-container).
+converted, see [Import a docker container](#import-a-docker-container).
 
 If you are already familiar with Singularity, you might be more interested in our [singularity
 recipes and hints](singularity_recipe_hints.md).
diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md
index 1f6be0614..c7334d918 100644
--- a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md
+++ b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_r.md
@@ -260,7 +260,7 @@ is limited to the number of cores on a single node. The maximum number of cores
 be found in our [hardware documentation](../jobs_and_resources/hardware_overview.md).
 
 Submitting a multicore R job to Slurm is very similar to submitting an
-[OpenMP Job](../jobs_and_resources/slurm.md#binding-and-distribution-of-tasks),
+[OpenMP Job](../jobs_and_resources/binding_and_distribution_of_tasks.md),
 since both are running multicore jobs on a **single** node. Below is an example:
 
 ```Bash
@@ -292,7 +292,7 @@ This way of the R parallelism uses the
 [Rmpi](http://cran.r-project.org/web/packages/Rmpi/index.html) package and the
 [MPI](https://en.wikipedia.org/wiki/Message_Passing_Interface) (Message Passing Interface) as a
 "back-end" for its parallel operations. The MPI-based job in R is very similar to submitting an
-[MPI Job](../jobs_and_resources/slurm.md#binding-and-distribution-of-tasks) since both are running
+[MPI Job](../jobs_and_resources/binding_and_distribution_of_tasks.md) since both are running
 multicore jobs on multiple nodes. Below is an example of running R script with the Rmpi on the ZIH
 system:
 
diff --git a/doc.zih.tu-dresden.de/docs/software/overview.md b/doc.zih.tu-dresden.de/docs/software/overview.md
index 9d2d86d7c..2c560c309 100644
--- a/doc.zih.tu-dresden.de/docs/software/overview.md
+++ b/doc.zih.tu-dresden.de/docs/software/overview.md
@@ -12,7 +12,7 @@ so called dotfiles in your home directory, e.g., `~/.bashrc` or `~/.bash_profile
 ## Software Environment
 
 There are different options to work with software on ZIH systems: [modules](#modules),
-[Jupyter Notebook](#jupyternotebook) and [containers](#containers). Brief descriptions and related
+[Jupyter Notebook](#jupyter-notebook) and [containers](#containers). Brief descriptions and related
 links on these options are provided below.
 
 !!! note
diff --git a/doc.zih.tu-dresden.de/docs/software/tensorflow.md b/doc.zih.tu-dresden.de/docs/software/tensorflow.md
index db6b97596..016d6aa25 100644
--- a/doc.zih.tu-dresden.de/docs/software/tensorflow.md
+++ b/doc.zih.tu-dresden.de/docs/software/tensorflow.md
@@ -96,7 +96,7 @@ the notebook by pre-loading a specific TensorFlow module:
 
     You can also define your own Jupyter kernel for more specific tasks. Please read about Jupyter
     kernels and virtual environments in our
-    [JupyterHub](../access/jupyterhub.md#creating-and-using-your-own-environment) documentation.
+    [JupyterHub](../access/jupyterhub.md#creating-and-using-a-custom-environment) documentation.
 
 ## TensorFlow in Containers
 
diff --git a/doc.zih.tu-dresden.de/docs/software/visualization.md b/doc.zih.tu-dresden.de/docs/software/visualization.md
index b1a103a0c..f9de3764e 100644
--- a/doc.zih.tu-dresden.de/docs/software/visualization.md
+++ b/doc.zih.tu-dresden.de/docs/software/visualization.md
@@ -6,7 +6,7 @@
 application. The ParaView package comprises different tools which are designed to meet interactive,
 batch and in-situ workflows.
 
-ParaView is available on ZIH systems from the [modules system](modules.md#modules-environment). The
+ParaView is available on ZIH systems from the [modules system](modules.md#module-environments). The
 following command lists the available versions
 
 ```console
diff --git a/doc.zih.tu-dresden.de/mkdocs.yml b/doc.zih.tu-dresden.de/mkdocs.yml
index 346a6ac73..4c02af0be 100644
--- a/doc.zih.tu-dresden.de/mkdocs.yml
+++ b/doc.zih.tu-dresden.de/mkdocs.yml
@@ -138,7 +138,7 @@ site_name: ZIH HPC Compendium
 site_description: ZIH HPC Compendium
 site_author: ZIH Team
 site_dir: public
-site_url: https://doc.zih.tu-dresden.de/
+site_url: https://gitlab.hrz.tu-chemnitz.de/zih/hpcsupport/hpc-compendium
 
 # uncomment next 3 lines if link to repo should not be displayed in the navbar
 
@@ -200,6 +200,26 @@ markdown_extensions:
 
           #  - mkdocs-video
 
+plugins:
+  - search
+  # https://github.com/manuzhang/mkdocs-htmlproofer-plugin
+  - htmlproofer:
+      # True by default
+      # Toggle via env. var. ENABLED_HTMLPROOFER=false
+      enabled: !ENV [ENABLED_HTMLPROOFER, True]
+      # Check URLs before serving. Build fails in case of bad URLs.
+      raise_error: True
+      # Ignore errors
+      #raise_error_excludes:
+      #  504: ['https://www.mkdocs.org/']
+      # Toggle validating external URLs
+      validate_external_urls: False
+      # Validate the entire rendered template, slow!
+      validate_rendered_template: True
+
+# Enable cross-page anchor validation
+use_directory_urls: False
+
 extra:
   tud_homepage: https://tu-dresden.de
   tud_name: "TU Dresden"
@@ -219,7 +239,3 @@ extra:
       name: "Data Protection Declaration / Datenschutzerklärung"
     - link: https://tu-dresden.de/zertifikate
       name: "Certificates"
-
-plugins:
-  - search
-  - markdown-caption
\ No newline at end of file
-- 
GitLab