diff --git a/doc.zih.tu-dresden.de/docs/access/jupyterhub.md b/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
index b6b0f25d3963da0529f26274a3daf4bdfcb0bbe0..f9a916195ecbf814cf426beb4d26885500b3b3de 100644
--- a/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
+++ b/doc.zih.tu-dresden.de/docs/access/jupyterhub.md
@@ -1,7 +1,7 @@
 # JupyterHub
 
 With our JupyterHub service we offer you a quick and easy way to work with Jupyter notebooks on ZIH
-systems. This page covers starting and stopping JuperterHub sessions, error handling and customizing
+systems. This page covers starting and stopping JupyterHub sessions, error handling and customizing
 the environment.
 
 We also provide a comprehensive documentation on how to use
@@ -21,7 +21,8 @@ cannot give extensive support in every case.
 
 !!! note
     This service is only available for users with an active HPC project.
-    See [here](../access/overview.md) how to apply for an HPC project.
+    See [Application for Login and Resources](../application/overview.md), if you need to apply for
+    an HPC project.
 
 JupyterHub is available at
 [https://taurus.hrsk.tu-dresden.de/jupyter](https://taurus.hrsk.tu-dresden.de/jupyter).
@@ -100,7 +101,7 @@ running the code. We currently offer one for Python, C++, MATLAB and R.
 
 ## Stop a Session
 
-It is good practise to stop your session once your work is done. This releases resources for other
+It is good practice to stop your session once your work is done. This releases resources for other
 users and your quota is less charged. If you just log out or close the window, your server continues
 running and **will not stop** until the Slurm job runtime hits the limit (usually 8 hours).
 
@@ -147,8 +148,8 @@ Useful pages for valid batch system parameters:
 
 If the connection to your notebook server unexpectedly breaks, you will get this error message.
 Sometimes your notebook server might hit a batch system or hardware limit and gets killed. Then
-usually the logfile of the corresponding batch job might contain useful information. These logfiles
-are located in your `home` directory and have the name `jupyter-session-<jobid>.log`.
+usually the log file of the corresponding batch job might contain useful information. These log
+files are located in your `home` directory and have the name `jupyter-session-<jobid>.log`.
 
 ## Advanced Tips
 
@@ -309,4 +310,4 @@ You can switch kernels of existing notebooks in the kernel menu:
 You have now the option to preload modules from the [module system](../software/modules.md).
 Select multiple modules that will be preloaded before your notebook server starts. The list of
 available modules depends on the module environment you want to start the session in (`scs5` or
-`ml`).  The right module environment will be chosen by your selected partition.
+`ml`). The right module environment will be chosen by your selected partition.
diff --git a/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md b/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md
index 92ad16d1325173c384c7472658239baca3e26157..797d9fc8e455b14e40a5ec7f3737874b2ac500ae 100644
--- a/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md
+++ b/doc.zih.tu-dresden.de/docs/access/jupyterhub_for_teaching.md
@@ -1,7 +1,7 @@
 # JupyterHub for Teaching
 
-On this page we want to introduce to you some useful features if you
-want to use JupyterHub for teaching.
+On this page, we want to introduce to you some useful features if you want to use JupyterHub for
+teaching.
 
 !!! note
 
@@ -9,23 +9,21 @@ want to use JupyterHub for teaching.
 
 Please be aware of the following notes:
 
-- ZIH systems operate at a lower availability level than your usual Enterprise Cloud VM. There
-  can always be downtimes, e.g. of the filesystems or the batch system.
+- ZIH systems operate at a lower availability level than your usual Enterprise Cloud VM. There can
+  always be downtimes, e.g. of the filesystems or the batch system.
 - Scheduled downtimes are announced by email. Please plan your courses accordingly.
 - Access to HPC resources is handled through projects. See your course as a project. Projects need
   to be registered beforehand (more info on the page [Access](../application/overview.md)).
 - Don't forget to [add your users](../application/project_management.md#manage-project-members-dis-enable)
-  (eg. students or tutors) to your project.
+  (e.g. students or tutors) to your project.
 - It might be a good idea to [request a reservation](../jobs_and_resources/overview.md#exclusive-reservation-of-hardware)
-  of part of the compute resources for your project/course to
-  avoid unnecessary waiting times in the batch system queue.
+  of part of the compute resources for your project/course to avoid unnecessary waiting times in
+  the batch system queue.
 
 ## Clone a Repository With a Link
 
-This feature bases on
-[nbgitpuller](https://github.com/jupyterhub/nbgitpuller).
-Documentation can be found at
-[this page](https://jupyterhub.github.io/nbgitpuller/).
+This feature bases on [nbgitpuller](https://github.com/jupyterhub/nbgitpuller). Further information
+can be found in the [external documentation about nbgitpuller](https://jupyterhub.github.io/nbgitpuller/).
 
 This extension for Jupyter notebooks can clone every public git repository into the users work
 directory. It's offering a quick way to distribute notebooks and other material to your students.
@@ -50,14 +48,14 @@ The following parameters are available:
 |---|---|
 |`repo`    | path to git repository|
 |`branch`  | branch in the repository to pull from default: `master`|
-|`urlpath` | URL to redirect the user to a certain file [more info](https://jupyterhub.github.io/nbgitpuller/topic/url-options.html#urlpath)|
+|`urlpath` | URL to redirect the user to a certain file, [more info about parameter urlpath](https://jupyterhub.github.io/nbgitpuller/topic/url-options.html#urlpath)|
 |`depth`   | clone only a certain amount of latest commits not recommended|
 
 This [link
 generator](https://jupyterhub.github.io/nbgitpuller/link?hub=https://taurus.hrsk.tu-dresden.de/jupyter/)
 might help creating those links
 
-## Spawner Options Passthrough with URL Parameters
+## Spawn Options Pass-through with URL Parameters
 
 The spawn form now offers a quick start mode by passing URL parameters.
 
diff --git a/doc.zih.tu-dresden.de/docs/application/project_request_form.md b/doc.zih.tu-dresden.de/docs/application/project_request_form.md
index b5b9e348a94c4178d382e5ca27d67047c06f1481..e829f316cb26f11b9b9048a889c8b5e918b2e870 100644
--- a/doc.zih.tu-dresden.de/docs/application/project_request_form.md
+++ b/doc.zih.tu-dresden.de/docs/application/project_request_form.md
@@ -36,15 +36,16 @@ Any project have:
 ## Third step: Hardware
 
 ![picture 4: Hardware >](misc/request_step3_machines.png "Hardware"){loading=lazy width=300 style="float:right"}
-This step inquire the required hardware. You can find the specifications
-[here](../jobs_and_resources/hardware_overview.md).
+This step inquire the required hardware. The
+[hardware specifications](../jobs_and_resources/hardware_overview.md) might help you to estimate,
+e. g. the compute time.
 
-Please fill in the total computing time you expect in the project runtime.  The compute time is
+Please fill in the total computing time you expect in the project runtime. The compute time is
 given in cores per hour (CPU/h), this refers to the 'virtual' cores for nodes with hyperthreading.
-If they require GPUs, then this is given as GPU units per hour (GPU/h).  Please add 6 CPU hours per
+If they require GPUs, then this is given as GPU units per hour (GPU/h). Please add 6 CPU hours per
 GPU hour in your application.
 
-The project home is a shared storage in your project.  Here you exchange data or install software
+The project home is a shared storage in your project. Here you exchange data or install software
 for your project group in userspace. The directory is not intended for active calculations, for this
 the scratch is available.
 
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
index 3d342f628fc7abfeb851500d3cc6fc785d1a03e2..4ab5ca41a5a8c11d4a52c03661b5810d4d09a65d 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/alpha_centauri.md
@@ -64,7 +64,8 @@ True
 
 ### Python Virtual Environments
 
-Virtual environments allow users to install additional python packages and create an isolated
+[Virtual environments](../software/python_virtual_environments.md) allow users to install
+additional python packages and create an isolated
 runtime environment. We recommend using `virtualenv` for this purpose.
 
 ```console
diff --git a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
index 2af016d0188ae4f926b45e7b8fdc14b039e8baa3..65e445f354d08a3473e226cc97c45ff6c01e8c48 100644
--- a/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
+++ b/doc.zih.tu-dresden.de/docs/jobs_and_resources/slurm_examples.md
@@ -58,10 +58,10 @@ For MPI-parallel jobs one typically allocates one core per task that has to be s
 ### Multiple Programs Running Simultaneously in a Job
 
 In this short example, our goal is to run four instances of a program concurrently in a **single**
-batch script. Of course we could also start a batch script four times with `sbatch` but this is not
-what we want to do here. Please have a look at
-[this subsection](#multiple-programs-running-simultaneously-in-a-job)
-in case you intend to run GPU programs simultaneously in a **single** job.
+batch script. Of course, we could also start a batch script four times with `sbatch` but this is not
+what we want to do here. However, you can also find an example about
+[how to run GPU programs simultaneously in a single job](#running-multiple-gpu-applications-simultaneously-in-a-batch-job)
+below.
 
 !!! example " "
 
@@ -355,4 +355,4 @@ file) that will be executed one after each other with different CPU numbers:
 
 ## Array-Job with Afterok-Dependency and Datamover Usage
 
-This is a *todo*
+This part is under construction.
diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics.md b/doc.zih.tu-dresden.de/docs/software/data_analytics.md
index 44414493405bc36ffed74bb85fb805b331308af7..b4a5f7f8b9f86c9a47fec20b875970efd4d787b2 100644
--- a/doc.zih.tu-dresden.de/docs/software/data_analytics.md
+++ b/doc.zih.tu-dresden.de/docs/software/data_analytics.md
@@ -24,7 +24,8 @@ marie@compute$ module spider <software_name>
 
 Refer to the section covering [modules](modules.md) for further information on the modules system.
 Additional software or special versions of [individual modules](custom_easy_build_environment.md)
-can be installed individually by each user. If possible, the use of virtual environments is
+can be installed individually by each user. If possible, the use of
+[virtual environments](python_virtual_environments.md) is
 recommended (e.g. for Python). Likewise, software can be used within [containers](containers.md).
 
 For the transfer of larger amounts of data into and within the system, the
diff --git a/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md b/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md
index 38190764e6c9efedb275ec9ff4324d916c851566..8f61fe49fd56642aaded82cf711ca92d0035b99f 100644
--- a/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md
+++ b/doc.zih.tu-dresden.de/docs/software/hyperparameter_optimization.md
@@ -270,9 +270,9 @@ This GUI guides through the configuration process and as result a configuration
 automatically according to the GUI input. If you are more familiar with using OmniOpt later on,
 this configuration file can be modified directly without using the GUI.
 
-A screenshot of the GUI, including a properly configuration for the MNIST fashion example is shown
-below. The GUI, in which the below displayed values are already entered, can be reached
-[here](https://imageseg.scads.ai/omnioptgui/?maxevalserror=5&mem_per_worker=1000&number_of_parameters=3&param_0_values=10%2C50%2C100&param_1_values=8%2C16%2C32&param_2_values=10%2C15%2C30&param_0_name=out-layer1&param_1_name=batchsize&param_2_name=batchsize&account=&projectname=mnist_fashion_optimization_set_1&partition=alpha&searchtype=tpe.suggest&param_0_type=hp.choice&param_1_type=hp.choice&param_2_type=hp.choice&max_evals=1000&objective_program=bash%20%3C%2Fpath%2Fto%2Fwrapper-script%2Frun-mnist-fashion.sh%3E%20--out-layer1%3D%28%24x_0%29%20--batchsize%3D%28%24x_1%29%20--epochs%3D%28%24x_2%29&workdir=%3C%2Fscratch%2Fws%2Fomniopt-workdir%2F%3E).
+A screenshot of
+[the GUI](https://imageseg.scads.ai/omnioptgui/?maxevalserror=5&mem_per_worker=1000&number_of_parameters=3&param_0_values=10%2C50%2C100&param_1_values=8%2C16%2C32&param_2_values=10%2C15%2C30&param_0_name=out-layer1&param_1_name=batchsize&param_2_name=batchsize&account=&projectname=mnist_fashion_optimization_set_1&partition=alpha&searchtype=tpe.suggest&param_0_type=hp.choice&param_1_type=hp.choice&param_2_type=hp.choice&max_evals=1000&objective_program=bash%20%3C%2Fpath%2Fto%2Fwrapper-script%2Frun-mnist-fashion.sh%3E%20--out-layer1%3D%28%24x_0%29%20--batchsize%3D%28%24x_1%29%20--epochs%3D%28%24x_2%29&workdir=%3C%2Fscratch%2Fws%2Fomniopt-workdir%2F%3E),
+including a properly configuration for the MNIST fashion example is shown below.
 
 Please modify the paths for `objective program` and `workdir` according to your needs.
 
diff --git a/doc.zih.tu-dresden.de/docs/software/papi.md b/doc.zih.tu-dresden.de/docs/software/papi.md
index 9d96cc58f4453692ad7b57abe3e56abda1539290..2de80b4e8a0f420a6b42cd01a3de027b5fb89be2 100644
--- a/doc.zih.tu-dresden.de/docs/software/papi.md
+++ b/doc.zih.tu-dresden.de/docs/software/papi.md
@@ -20,8 +20,8 @@ To collect performance events, PAPI provides two APIs, the *high-level* and *low
 
 The high-level API provides the ability to record performance events inside instrumented regions of
 serial, multi-processing (MPI, SHMEM) and thread (OpenMP, Pthreads) parallel applications. It is
-designed for simplicity, not flexibility. For more details click
-[here](https://bitbucket.org/icl/papi/wiki/PAPI-HL.md).
+designed for simplicity, not flexibility. More details can be found in the
+[PAPI wiki High-Level API description](https://bitbucket.org/icl/papi/wiki/PAPI-HL.md).
 
 The following code example shows the use of the high-level API by marking a code section.
 
@@ -86,19 +86,19 @@ more output files in JSON format.
 
 ### Low-Level API
 
-The low-level API manages hardware events in user-defined groups
-called Event Sets. It is meant for experienced application programmers and tool developers wanting
-fine-grained measurement and control of the PAPI interface. It provides access to both PAPI preset
-and native events, and supports all installed components. For more details on the low-level API,
-click [here](https://bitbucket.org/icl/papi/wiki/PAPI-LL.md).
+The low-level API manages hardware events in user-defined groups called Event Sets. It is meant for
+experienced application programmers and tool developers wanting fine-grained measurement and
+control of the PAPI interface. It provides access to both PAPI preset and native events, and
+supports all installed components. The PAPI wiki contains also a page with more details on the
+[low-level API](https://bitbucket.org/icl/papi/wiki/PAPI-LL.md).
 
 ## Usage on ZIH Systems
 
 Before you start a PAPI measurement, check which events are available on the desired architecture.
-For this purpose PAPI offers the tools `papi_avail` and `papi_native_avail`. If you want to measure
+For this purpose, PAPI offers the tools `papi_avail` and `papi_native_avail`. If you want to measure
 multiple events, please check which events can be measured concurrently using the tool
-`papi_event_chooser`. For more details on the PAPI tools click
-[here](https://bitbucket.org/icl/papi/wiki/PAPI-Overview.md#markdown-header-papi-utilities).
+`papi_event_chooser`. The PAPI wiki contains more details on
+[the PAPI tools](https://bitbucket.org/icl/papi/wiki/PAPI-Overview.md#markdown-header-papi-utilities).
 
 !!! hint
 
@@ -133,8 +133,7 @@ compile your application against the  PAPI library.
 !!! hint
 
     The PAPI modules on ZIH systems are only installed with the default `perf_event` component. If you
-    want to measure, e.g., GPU events, you have to install your own PAPI. Instructions on how to
-    download and install PAPI can be found
-    [here](https://bitbucket.org/icl/papi/wiki/Downloading-and-Installing-PAPI.md). To install PAPI
-    with additional components, you have to specify them during configure, for details click
-    [here](https://bitbucket.org/icl/papi/wiki/PAPI-Overview.md#markdown-header-components).
+    want to measure, e.g., GPU events, you have to install your own PAPI. Please see the
+    [external instructions on how to download and install PAPI](https://bitbucket.org/icl/papi/wiki/Downloading-and-Installing-PAPI.md).
+    To install PAPI with additional components, you have to specify them during configure as
+    described for the [Installation of Components](https://bitbucket.org/icl/papi/wiki/PAPI-Overview.md#markdown-header-components).
diff --git a/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md b/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
index e19daeeb6731aa32eb993f2495e6ec443bebe2dd..67b10817c738b414a3302388b5cca3392ff96bb1 100644
--- a/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
+++ b/doc.zih.tu-dresden.de/docs/software/python_virtual_environments.md
@@ -93,8 +93,6 @@ are in the virtual environment. You can deactivate the conda environment as foll
 (conda-env) marie@compute$ conda deactivate    #Leave the virtual environment
 ```
 
-TODO: Link to this page from other DA/ML topics. insert link in alpha centauri
-
 ??? example
 
     This is an example on partition Alpha. The example creates a virtual environment, and installs
diff --git a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
index 7895f576e46e66caa9e14f3d77a74deb918fdab0..e4786c07e52177ba9a19bf7e5b571ac0d9057fb6 100755
--- a/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
+++ b/doc.zih.tu-dresden.de/util/grep-forbidden-patterns.sh
@@ -48,7 +48,7 @@ doc.zih.tu-dresden.de/docs/contrib/content_rules.md
 i	\(alpha\|ml\|haswell\|romeo\|gpu\|smp\|julia\|hpdlf\|scs5\)-\?\(interactive\)\?[^a-z]*partition
 Give hints in the link text. Words such as \"here\" or \"this link\" are meaningless.
 doc.zih.tu-dresden.de/docs/contrib/content_rules.md
-i	\[\s\?\(documentation\|here\|this \(link\|page\|subsection\)\|slides\?\|manpage\)\s\?\]
+i	\[\s\?\(documentation\|here\|more info\|this \(link\|page\|subsection\)\|slides\?\|manpage\)\s\?\]
 Use \"workspace\" instead of \"work space\" or \"work-space\".
 doc.zih.tu-dresden.de/docs/contrib/content_rules.md
 i	work[ -]\+space"
diff --git a/doc.zih.tu-dresden.de/wordlist.aspell b/doc.zih.tu-dresden.de/wordlist.aspell
index 443647e74a9cc4a7e17e92f381c914de04e1b0f3..73af7da3010a0570c99b148f180440c20f8277cd 100644
--- a/doc.zih.tu-dresden.de/wordlist.aspell
+++ b/doc.zih.tu-dresden.de/wordlist.aspell
@@ -65,6 +65,8 @@ DockerHub
 dockerized
 dotfile
 dotfiles
+downtime
+downtimes
 EasyBuild
 ecryptfs
 engl
@@ -142,6 +144,7 @@ Itanium
 jobqueue
 jpg
 jss
+jupyter
 Jupyter
 JupyterHub
 JupyterLab
@@ -194,6 +197,7 @@ multithreaded
 Multithreading
 NAMD
 natively
+nbgitpuller
 nbsp
 NCCL
 Neptun
@@ -260,6 +264,8 @@ pytorch
 PyTorch
 Quantum
 queue
+quickstart
+Quickstart
 randint
 reachability
 README