diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000000000000000000000000000000000..b09f511654852233da87ba9ccc35d29f0095004d --- /dev/null +++ b/.editorconfig @@ -0,0 +1,28 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true + +# Matches multiple files with brace expansion notation +# Set default charset +[*.{md,js,py}] +charset = utf-8 + +# 4 space indentation +[*.{md,py}] +indent_style = space +indent_size = 4 + +# Tab indentation (no size specified) +[Makefile] +indent_style = tab + +# Indentation override for all JS under lib directory +[lib/**.js] +indent_style = space +indent_size = 2 diff --git a/.gitignore b/.gitignore index ed9ec7dd5f3338e0cda169471c748dbdf5038a58..04c7fd320b19a3da2344057a2fd78ef420e71499 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *package-lock.json *package.json *node_modules -**venv/ \ No newline at end of file +**venv/ +doc.zih.tu-dresden.de/public/ diff --git a/doc.zih.tu-dresden.de/README.md b/doc.zih.tu-dresden.de/README.md index 6859e5ab8d39cbdf01bf77ba8438da49d1eb39da..31344cece97859451158faa45a172ebcacea1752 100644 --- a/doc.zih.tu-dresden.de/README.md +++ b/doc.zih.tu-dresden.de/README.md @@ -454,10 +454,8 @@ there is a list of conventions w.r.t. spelling and technical wording. * `Slurm` not `SLURM` * `Filesystem` not `file system` * `ZIH system` and `ZIH systems` not `Taurus`, `HRSKII`, `our HPC systems` etc. - -**TODO:** Put into file - -**TODO:** Implement checks [Issue #13](#13) +* `Workspace` not `work space` +* avoid term `HPC-DA` ### Code Blocks and Command Prompts diff --git a/doc.zih.tu-dresden.de/docs/accessibility.md b/doc.zih.tu-dresden.de/docs/accessibility.md new file mode 100644 index 0000000000000000000000000000000000000000..418d8a11c98be59a121a47f0d497dfce1a79aa05 --- /dev/null +++ b/doc.zih.tu-dresden.de/docs/accessibility.md @@ -0,0 +1,42 @@ +# Erklärung zur Barrierefreiheit + +Diese Erklärung zur Barrierefreiheit gilt für die unter +[https://doc.zih.tu-dresden.de](https://doc.zih.tu-dresden.de) und +[https://hpc-wiki.zih.tu-dresden.de](https://hpc-wiki.zih.tu-dresden.de) veröffentlichte Website +der Technischen Universität Dresden. +Als öffentliche Stelle im Sinne des Barrierefreie-Websites-Gesetz (BfWebG) ist die Technische +Universität Dresden bemüht, ihre Websites und mobilen Anwendungen im Einklang mit den Bestimmungen +des Barrierefreie-Websites-Gesetz (BfWebG) in Verbindung mit der +Barrierefreie-Informationstechnik-Verordnung (BITV 2.0) barrierefrei zugänglich zu machen. + +## Erstellung dieser Erklärung zur Barrierefreiheit + +Diese Erklärung wurde am 17.09.2020 erstellt und zuletzt am 17.09.2020 aktualisiert. Grundlage der +Erstellung dieser Erklärung zur Barrierefreiheit ist eine am 17.09.2020 von der TU Dresden +durchgeführte Selbstbewertung. + +## Stand der Barrierefreiheit + +Es wurde bisher noch kein BITV-Test für die Website durchgeführt. Dieser ist bis 30.11.2020 geplant. + +## Kontakt + +Sollten Ihnen Mängel in Bezug auf die barrierefreie Gestaltung auffallen, können Sie uns diese über +das Formular [Barriere melden](https://tu-dresden.de/barrierefreiheit/barriere-melden) mitteilen und +im zugänglichen Format anfordern. Alternativ können Sie sich direkt an die Meldestelle für Barrieren +wenden (Koordinatorin: Mandy Weickert, E-Mail: <barrieren@tu-dresden.de>, Telefon: +49 351 +463-42022, Fax: +49 351 463-42021, Besucheradresse: Nöthnitzer Straße 46, APB 1102, 01187 Dresden). + +## Durchsetzungsverfahren + +Wenn wir Ihre Rückmeldungen aus Ihrer Sicht nicht befriedigend bearbeiten, können Sie sich an die +Sächsische Durchsetzungsstelle wenden: + +Beauftragter der Sächsischen Staatsregierung für die Belange von Menschen mit Behinderungen +Albertstraße 10 +01097 Dresden +Postanschrift: Archivstraße 1, 01097 Dresden +E-Mail: <info.behindertenbeauftragter@sk.sachsen.de> +Telefon: +49 351 564-12161 +Fax: +49 351 564-12169 +Webseite: [https://www.inklusion.sachsen.de](https://www.inklusion.sachsen.de) diff --git a/doc.zih.tu-dresden.de/docs/archive/debugging_tools.md b/doc.zih.tu-dresden.de/docs/archive/debugging_tools.md deleted file mode 100644 index 0d902d2cfeb23f9ca1763df909d6746b16be81da..0000000000000000000000000000000000000000 --- a/doc.zih.tu-dresden.de/docs/archive/debugging_tools.md +++ /dev/null @@ -1,14 +0,0 @@ -# Debugging Tools - -Debugging is an essential but also rather time consuming step during application development. Tools -dramatically reduce the amount of time spent to detect errors. Besides the "classical" serial -programming errors, which may usually be easily detected with a regular debugger, there exist -programming errors that result from the usage of OpenMP, Pthreads, or MPI. These errors may also be -detected with debuggers (preferably debuggers with support for parallel applications), however, -specialized tools like MPI checking tools (e.g. Marmot) or thread checking tools (e.g. Intel Thread -Checker) can simplify this task. The following sections provide detailed information about the -different types of debugging tools: - -- [Debuggers] **todo** Debuggers -- debuggers (with and without support for parallel applications) -- [MPI Usage Error Detection] **todo** MPI Usage Error Detection -- tools to detect MPI usage errors -- [Thread Checking] **todo** Thread Checking -- tools to detect OpenMP/Pthread usage errors diff --git a/doc.zih.tu-dresden.de/docs/archive/load_leveler.md b/doc.zih.tu-dresden.de/docs/archive/load_leveler.md index fb85aaf079e6769005a461ee226f5329210feb69..07daea3dbcef9d375a57f47dbec1d0d8a27d0491 100644 --- a/doc.zih.tu-dresden.de/docs/archive/load_leveler.md +++ b/doc.zih.tu-dresden.de/docs/archive/load_leveler.md @@ -1,10 +1,17 @@ # LoadLeveler - IBM Tivoli Workload Scheduler +!!! warning + + This page is deprecated. + ## Job Submission First of all, to submit a job to LoadLeveler a job file needs to be created. This job file can be passed to the command: -`llsubmit [llsubmit_options] <job_file>` + +``` +llsubmit [llsubmit_options] <job_file> +``` ### Job File Examples @@ -29,7 +36,7 @@ An example job file may look like this: ``` This example requests a serial job with a runtime of 30 minutes and a -overall memory requirement of 1GByte. There are four groups available, +overall memory requirement of 1 GB. There are four groups available, don't forget to choose the one and only matching group. When the job completes, a mail will be sent which includes details about resource usage. @@ -58,9 +65,9 @@ mpirun -x OMP_NUM_THREADS=1 -x LD_LIBRARY_PATH -np 16 ./my_mpi_program ``` This example requests a parallel job with 16 processes (2 nodes, 8 tasks -per node), a runtime of 30 minutes, 1GByte memory requirement per task -and therefore a overall memory requirement of 8GByte per node. Please -keep in mind that each node on Triton only provides 45GByte. The choice +per node), a runtime of 30 minutes, 1 GB memory requirement per task +and therefore a overall memory requirement of 8 GB per node. Please +keep in mind that each node on Triton only provides 45 GB. The choice of the correct group is also important and necessary. The `-x` option of `mpirun` exports the specified environment variables to all MPI processes. @@ -105,10 +112,10 @@ mpirun -x OMP_NUM_THREADS=8 -x LD_LIBRARY_PATH -np 4 --bynode ./my_hybrid_progra ``` This example requests a parallel job with 32 processes (4 nodes, 8 tasks -per node), a runtime of 30 minutes, 1GByte memory requirement per task -and therefore a overall memory requirement of 8GByte per node. Please -keep in mind that each node on Triton only provides 45GByte. The choice -of the correct group is also important and necessary. The mpirun command +per node), a runtime of 30 minutes, 1 GB memory requirement per task +and therefore a overall memory requirement of 8 GB per node. Please +keep in mind that each node on Triton only provides 45 GB. The choice +of the correct group is also important and necessary. The `mpirun` command starts 4 MPI processes (`--bynode` forces one process per node). `OMP_NUM_THREADS` is set to 8, so that 8 threads are started per MPI rank. When the job completes, a mail will be sent which includes details @@ -119,14 +126,14 @@ about resource usage. | Keyword | Valid values | Description | |:-------------------|:------------------------------------------------|:-------------------------------------------------------------------------------------| | `notification` | `always`, `error`, `start`, `never`, `complete` | When to write notification email. | -| `notify_user` | valid email adress | Notification email adress. | +| `notify_user` | valid email address | Notification email address. | | `output` | file name | File for stdout of the job. | | `error` | file name | File for stderr of the job. | | `job_type` | `parallel`, `serial` | Job type, default is `serial`. | | `node` | `1` - `64` | Number of nodes requested (parallel jobs only). | | `tasks_per_node` | `1` - `8` | Number of processors per node requested (parallel jobs only). | | `class` | see `llclass` | Job queue. | -| `group` | triton-ww, triton-ipf, triton-ism, triton-et | choose matching group | +| `group` | `triton-ww`, `triton-ipf`, `triton-ism`, `triton-et` | choose matching group | | `wall_clock_limit` | HH:MM:SS | Run time limit of the job. | | `resources` | `name(count)` ... `name(count)` | Specifies quantities of the consumable resources consumed by each task of a job step | @@ -139,43 +146,46 @@ description of keywords\]\]. Submission of a job without a job file can be done by the command: `llsub [llsub_options] <command>` -This command is not part of the IBM Loadleveler software but was -developed at ZIH. +This command is not part of the IBM LoadLeveler software but was developed at ZIH. -The job file will be created in background by means of the command line -options. Afterwards, the job file will be passed to the command -`llsubmit` which submit the job to LoadLeveler (see above). +The job file will be created in background by means of the command line options. Afterwards, the job +file will be passed to the command `llsubmit` which submit the job to LoadLeveler (see above). Important options are: -| Option | Default | Description | -|:----------------------|:---------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `-J <name>` | `llsub` | Specifies the name of the job. You can name the job using any combination of letters, numbers, or both. The job name only appears in the long reports of the llq, llstatus, and llsummary commands. | -| `-n` | `1` | Specifies the total number of tasks of a parallel job you want to run on all available nodes. | -| `-T` | not specified | Specifies the maximum number of OpenMP threads to use per process by setting the environment variable OMP_NUM_THREADS to number. | -| `--o, -oo <filename>` | `<jobname>.<hostname>.<jobid>.out` | Specifies the name of the file to use as standard output (stdout) when your job step runs. | -| `-e, -oe <filename>` | `<jobname>.<hostname>.<jobid>.err` | Specifies the name of the file to use as standard error (stderr) when your job step runs. | -| `-I` | not specified | Submits an interactive job and sends the job's standard output (or standard error) to the terminal. | -| `-q <name>` | non-interactive: `short` interactive(n`1): =interactive` interactive(n>1): `interactive_par` | Specifies the name of a job class defined locally in your cluster. You can use the llclass command to find out information on job classes. | -| `-x` | not specified | Puts the node running your job into exclusive execution mode. In exclusive execution mode, your job runs by itself on a node. It is dispatched only to a node with no other jobs running, and LoadLeveler does not send any other jobs to the node until the job completes. | -| `-hosts <number>` | automatically | Specifies the number of nodes requested by a job step. This option is equal to the bsub option -R "span\[hosts=number\]". | -| `-ptile <number>` | automatically | Specifies the number of nodes requested by a job step. This option is equal to the bsub option -R "span\[ptile=number\]". | -| `-mem <size>` | not specified | Specifies the requirement of memory which the job needs on a single node. The memory requirement is specified in MB. This option is equal to the bsub option -R "rusage\[mem=size\]". | +| Option | Default | Description | +|:----------------------|:-------------|:------------| +| `-J <name>` | `llsub` | Specifies the name of the job. You can name the job using any combination of letters, numbers, or both. The job name only appears in the long reports of the `llq`, `llstatus`, and `llsummary` commands. | +| `-n` | `1` | Specifies the total number of tasks of a parallel job you want to run on all available nodes. | +| `-T` | not specified | Specifies the maximum number of OpenMP threads to use per process by setting the environment variable `OMP_NUM_THREADS` to number. | +| `--o, -oo <filename>` | `<jobname>.<hostname>.<jobid>.out` | Specifies the name of the file to use as standard output (stdout) when your job step runs. | +| `-e, -oe <filename>` | `<jobname>.<hostname>.<jobid>.err` | Specifies the name of the file to use as standard error (stderr) when your job step runs. | +| `-I` | not specified | Submits an interactive job and sends the job's standard output (or standard error) to the terminal. | +| `-q <name>` | non-interactive: `short` interactive(n`1): =interactive` interactive(n>1): `interactive_par` | Specifies the name of a job class defined locally in your cluster. You can use the `llclass` command to find out information on job classes. | +| `-x` | not specified | Puts the node running your job into exclusive execution mode. In exclusive execution mode, your job runs by itself on a node. It is dispatched only to a node with no other jobs running, and LoadLeveler does not send any other jobs to the node until the job completes. | +| `-hosts <number>` | automatically | Specifies the number of nodes requested by a job step. This option is equal to the bsub option `-R "span\[hosts=number\]"`. | +| `-ptile <number>` | automatically | Specifies the number of nodes requested by a job step. This option is equal to the bsub option `-R "span\[ptile=number\]"`. | +| `-mem <size>` | not specified | Specifies the requirement of memory which the job needs on a single node. The memory requirement is specified in MB. This option is equal to the bsub option `-R "rusage\[mem=size\]"`. | The option `-H` prints the list of all available command line options. Here is an example for an MPI Job: - llsub -T 1 -n 16 -e err.txt -o out.txt mpirun -x LD_LIBRARY_PATH -np 16 ./my_program +```console +llsub -T 1 -n 16 -e err.txt -o out.txt mpirun -x LD_LIBRARY_PATH -np 16 ./my_program +``` ### Interactive Jobs Interactive Jobs can be submitted by the command: -`llsub -I -q <interactive> <command>` -### Loadleveler Runtime Environment Variables +```console +llsub -I -q <interactive> <command> +``` + +### LoadLeveler Runtime Environment Variables -Loadleveler Runtime Variables give you some information within the job +LoadLeveler runtime variables give you some information within the job script, for example: ```Bash @@ -209,8 +219,8 @@ The `llclass` command provides information about each queue. Example output: ```Bash -Name MaxJobCPU MaxProcCPU Free Max Description - d+hh:mm:ss d+hh:mm:ss Slots Slots +Name MaxJobCPU MaxProcCPU Free Max Description + d+hh:mm:ss d+hh:mm:ss Slots Slots --------------- -------------- -------------- ----- ----- --------------------- interactive undefined undefined 32 32 interactive, exclusive shared nodes, max. 12h runtime triton_ism undefined undefined 8 80 exclusive, serial + parallel queue, nodes shared, unlimited runtime @@ -226,13 +236,13 @@ short undefined undefined 272 384 serial + parallel queu ```Bash # llq -```Bash +``` #### All of One's Own Jobs ```Bash # llq -u username -```Bash +``` ### Details About Why A Job Has Not Yet Started @@ -262,14 +272,14 @@ Total number of available initiators of this class on all machines in the cluste Minimum number of initiators of this class required by job step: 32 The number of available initiators of this class is not sufficient for this job step. Not enough resources to start now. -This step is top-dog. +This step is top-dog. Considered at: Fri Jul 13 12:12:04 2007 Will start by: Tue Jul 17 18:10:32 2007 ``` ### Generate a long listing rather than the standard one -```Bash +```console # llq -l job-id ``` @@ -277,41 +287,41 @@ This command will give you detailed job information. ### Job Status States -| | | | -|------------------|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Canceled | CA | The job has been canceled as by the llcancel command. | -| Completed | C | The job has completed. | -| Complete Pending | CP | The job is completed. Some tasks are finished. | +| | | | +|------------------|-----|----------------| +| Canceled | CA | The job has been canceled as by the `llcancel` command. | +| Completed | C | The job has completed. | +| Complete Pending | CP | The job is completed. Some tasks are finished. | | Deferred | D | The job will not be assigned until a specified date. The start date may have been specified by the user in the Job Command file or it may have been set by LoadLeveler because a parallel job could not obtain enough machines to run the job. | -| Idle | I | The job is being considered to run on a machine though no machine has been selected yet. | -| NotQueued | NQ | The job is not being considered to run. A job may enter this state due to an error in the command file or because LoadLeveler can not obtain information that it needs to act on the request. | -| Not Run | NR | The job will never run because a stated dependency in the Job Command file evaluated to be false. | -| Pending | P | The job is in the process of starting on one or more machines. The request to start the job has been sent but has not yet been acknowledged. | -| Rejected | X | The job did not start because there was a mismatch or requirements for your job and the resources on the target machine or because the user does not have a valid ID on the target machine. | -| Reject Pending | XP | The job is in the process of being rejected. | -| Removed | RM | The job was canceled by either LoadLeveler or the owner of the job. | -| Remove Pending | RP | The job is in the process of being removed. | -| Running | R | The job is running. | -| Starting | ST | The job is starting. | -| Submission Error | SX | The job can not start due to a submission error. Please notify the Bluedawg administration team if you encounter this error. | -| System Hold | S | The job has been put in hold by a system administrator. | -| System User Hold | HS | Both the user and a system administrator has put the job on hold. | -| Terminated | TX | The job was terminated, presumably by means beyond LoadLeveler's control. Please notify the Bluedawg administration team if you encounter this error. | -| User Hold | H | The job has been put on hold by the owner. | -| Vacated | V | The started job did not complete. The job will be scheduled again provided that the job may be rescheduled. | -| Vacate Pending | VP | The job is in the process of vacating. | +| Idle | I | The job is being considered to run on a machine though no machine has been selected yet. | +| NotQueued | NQ | The job is not being considered to run. A job may enter this state due to an error in the command file or because LoadLeveler can not obtain information that it needs to act on the request. | +| Not Run | NR | The job will never run because a stated dependency in the Job Command file evaluated to be false. | +| Pending | P | The job is in the process of starting on one or more machines. The request to start the job has been sent but has not yet been acknowledged. | +| Rejected | X | The job did not start because there was a mismatch or requirements for your job and the resources on the target machine or because the user does not have a valid ID on the target machine. | +| Reject Pending | XP | The job is in the process of being rejected. | +| Removed | RM | The job was canceled by either LoadLeveler or the owner of the job. | +| Remove Pending | RP | The job is in the process of being removed. | +| Running | R | The job is running. | +| Starting | ST | The job is starting. | +| Submission Error | SX | The job can not start due to a submission error. Please notify the Bluedawg administration team if you encounter this error. | +| System Hold | S | The job has been put in hold by a system administrator. | +| System User Hold | HS | Both the user and a system administrator has put the job on hold. | +| Terminated | TX | The job was terminated, presumably by means beyond LoadLeveler's control. Please notify the Bluedawg administration team if you encounter this error. | +| User Hold | H | The job has been put on hold by the owner. | +| Vacated | V | The started job did not complete. The job will be scheduled again provided that the job may be rescheduled. | +| Vacate Pending | VP | The job is in the process of vacating. | ## Cancel a Job ### A Particular Job -```Bash +```console # llcancel job-id ``` ### All of One's Jobs -```Bash +```console # llcancel -u username ``` @@ -319,18 +329,18 @@ This command will give you detailed job information. On each cluster, there exists a file that contains the history of all jobs run under LoadLeveler. This file is -**/var/loadl/archive/history.archive**, and may be queried using the -**llsummary** command. +`/var/loadl/archive/history.archive`, and may be queried using the +`llsummary` command. An example of usage would be as follows: -```Bash +```console # llsummary -u estrabd /var/loadl/archive/history.archive ``` And the output would look something like: -```Bash +```console Name Jobs Steps Job Cpu Starter Cpu Leverage estrabd 118 128 07:55:57 00:00:45 634.6 TOTAL 118 128 07:55:57 00:00:45 634.6 @@ -346,83 +356,85 @@ interactive 105 105 04:46:24 00:00:26 660.9 TOTAL 118 128 07:55:57 00:00:45 634.6 ``` -The **llsummary** tool has a lot of options, which are discussed in its +The `llsummary` tool has a lot of options, which are discussed in its man pages. ## Check status of each node - # llstatus +```console +# llstatus +``` And the output would look something like: -```Bash +```console root@triton[0]:~# llstatus -Name Schedd InQ Act Startd Run LdAvg Idle Arch OpSys -n01 Avail 0 0 Idle 0 0.00 2403 AMD64 Linux2 -n02 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n03 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n04 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n05 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n06 Avail 0 0 Idle 0 0.71 9999 AMD64 Linux2 -n07 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n08 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n09 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n10 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n11 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n12 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n13 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n14 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n15 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n16 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n17 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n18 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n19 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n20 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n21 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n22 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n23 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n24 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n25 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n26 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n27 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n28 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n29 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n30 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n31 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n32 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n33 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n34 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n35 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n36 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n37 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n38 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n39 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n40 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n41 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n42 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n43 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n44 Avail 0 0 Idle 0 0.01 9999 AMD64 Linux2 -n45 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n46 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n47 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n48 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n49 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n50 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n51 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n52 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n53 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n54 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n55 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n56 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n57 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n58 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n59 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n60 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n61 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n62 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n63 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -n64 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 -triton Avail 0 0 Idle 0 0.00 585 AMD64 Linux2 +Name Schedd InQ Act Startd Run LdAvg Idle Arch OpSys +n01 Avail 0 0 Idle 0 0.00 2403 AMD64 Linux2 +n02 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n03 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n04 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n05 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n06 Avail 0 0 Idle 0 0.71 9999 AMD64 Linux2 +n07 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n08 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n09 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n10 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n11 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n12 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n13 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n14 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n15 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n16 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n17 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n18 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n19 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n20 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n21 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n22 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n23 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n24 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n25 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n26 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n27 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n28 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n29 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n30 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n31 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n32 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n33 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n34 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n35 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n36 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n37 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n38 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n39 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n40 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n41 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n42 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n43 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n44 Avail 0 0 Idle 0 0.01 9999 AMD64 Linux2 +n45 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n46 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n47 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n48 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n49 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n50 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n51 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n52 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n53 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n54 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n55 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n56 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n57 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n58 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n59 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n60 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n61 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n62 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n63 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +n64 Avail 0 0 Idle 0 0.00 9999 AMD64 Linux2 +triton Avail 0 0 Idle 0 0.00 585 AMD64 Linux2 AMD64/Linux2 65 machines 0 jobs 0 running tasks Total Machines 65 machines 0 jobs 0 running tasks @@ -436,7 +448,7 @@ All machines on the machine_list are present. Detailed status information for a specific node: -```Bash +```console # llstatus -l n54 ``` diff --git a/doc.zih.tu-dresden.de/docs/data_protection_declaration.md b/doc.zih.tu-dresden.de/docs/data_protection_declaration.md new file mode 100644 index 0000000000000000000000000000000000000000..a9b56833750cab80fdec3d9c7f838dfdcbeec840 --- /dev/null +++ b/doc.zih.tu-dresden.de/docs/data_protection_declaration.md @@ -0,0 +1,15 @@ +# Datenschutzerklärung + +Zur Bereitstellung des Dienstes werden folgende personenbeziehbaren Daten verarbeitet: IP Addresse. + +Eine Nutzung dieser Daten für andere Zwecke erfolgt nicht. Eine Speicherung dieser Daten erfolgt nur +zur Fehleranalyse. Eine Übermittlung dieser Daten an Dritte erfolgt nur, wenn dies gesetzlich +bestimmt ist. + +Jeder Nutzer kann sich jederzeit an den [Datenschutzbeauftragten der TU +Dresden](https://tu-dresden.de/tu-dresden/organisation/gremien-und-beauftragte/beauftragte/datenschutzbeauftragter) +sowie an die [zuständige Aufsichtsbehörde für den Datenschutz](https://www.saechsdsb.de/) wenden. + +Weiterhin besteht die Möglichkeit jederzeit Auskunft über die zu seiner Person verarbeiteten Daten +zu verlangen und es steht eine Antwort mit der Frist von einem Monat nach Eingang des +Auskunftsersuchens zu. diff --git a/doc.zih.tu-dresden.de/docs/index.md b/doc.zih.tu-dresden.de/docs/index.md index caa05b3d9092529f86514885756dd2a5f73f7827..cc174e052a72bf6258ce4844749690ae28d7a46c 100644 --- a/doc.zih.tu-dresden.de/docs/index.md +++ b/doc.zih.tu-dresden.de/docs/index.md @@ -5,8 +5,7 @@ Dear HPC users, due to restrictions coming from data security and software incompatibilities the old "HPC Compendium" is now reachable only from inside TU Dresden campus (or via VPN). -Internal users should be redirected automatically to the -[internal IP address](http://141.76.17.11/hpc-wiki/bin/view/Compendium). +Internal users should be redirected automatically. We apologize for this severe action, but we are in the middle of the preparation for a wiki relaunch, so we do not want to redirect resources to fix technical/security issues for a system diff --git a/doc.zih.tu-dresden.de/docs/legal_notice.md b/doc.zih.tu-dresden.de/docs/legal_notice.md new file mode 100644 index 0000000000000000000000000000000000000000..3412a3a0a511d26d1a8bf8e730161622fb7930d9 --- /dev/null +++ b/doc.zih.tu-dresden.de/docs/legal_notice.md @@ -0,0 +1,24 @@ +# Legal Notice / Impressum + +Es gilt das [Impressum der TU Dresden](https://tu-dresden.de/impressum) mit folgenden Änderungen: + +## Ansprechpartner/Betreiber: + +Technische Universität Dresden +Zentrum für Informationsdienste und Hochleistungsrechnen +01062 Dresden + +Tel.: +49 351 463-40000 +Fax: +49 351 463-42328 +E-Mail: servicedesk@tu-dresden.de + +## Konzeption, Technische Umsetzung, Anbieter: + +Technische Universität Dresden +Zentrum für Informationsdienste und Hochleistungsrechnen +Prof. Dr. Wolfgang E. Nagel +01062 Dresden + +Tel.: +49 351 463-35450 +Fax: +49 351 463-37773 +E-Mail: zih@tu-dresden.de diff --git a/doc.zih.tu-dresden.de/docs/software/debuggers.md b/doc.zih.tu-dresden.de/docs/software/debuggers.md index fafb8c705f30a9e4b026d549b656aa7a0516540a..d88ca5f068f0145e8acc46407feca93a14968522 100644 --- a/doc.zih.tu-dresden.de/docs/software/debuggers.md +++ b/doc.zih.tu-dresden.de/docs/software/debuggers.md @@ -1,9 +1,16 @@ -# Debuggers +# Debugging -This section describes how to start the debuggers on the ZIH systems. +Debugging is an essential but also rather time consuming step during application development. Tools +dramatically reduce the amount of time spent to detect errors. Besides the "classical" serial +programming errors, which may usually be easily detected with a regular debugger, there exist +programming errors that result from the usage of OpenMP, Pthreads, or MPI. These errors may also be +detected with debuggers (preferably debuggers with support for parallel applications), however, +specialized tools like MPI checking tools (e.g. Marmot) or thread checking tools (e.g. Intel Thread +Checker) can simplify this task. -Detailed information about how to use the debuggers can be found on the -website of the debuggers (see below). +This page provides detailed information on classic debugging at ZIH systems. The more specific +topic [MPI Usage Error Detection](mpi_usage_error_detection.md) covers tools to detect MPI usage +errors. ## Overview of available Debuggers at ZIH @@ -17,30 +24,30 @@ website of the debuggers (see below). ## General Advices -- You need to compile your code with the flag `-g` to enable - debugging. This tells the compiler to include information about - variable and function names, source code lines etc. into the - executable. -- It is also recommendable to reduce or even disable optimizations - (`-O0` or gcc's `-Og`). At least inlining should be disabled (usually - `-fno-inline`). -- For parallel applications: try to reproduce the problem with less - processes or threads before using a parallel debugger. -- Use the compiler's check capabilites to find typical problems at - compile time or run time, read the manual (`man gcc`, `man ifort`, etc.) - - Intel C++ example: `icpc -g -std=c++14 -w3 -check=stack,uninit -check-pointers=rw -fp-trap=all` - - Intel Fortran example: `ifort -g -std03 -warn all -check all -fpe-all=0 -traceback` - - The flag `-traceback` of the Intel Fortran compiler causes to print - stack trace and source code location when the program terminates - abnormally. -- If your program crashes and you get an address of the failing - instruction, you can get the source code line with the command - `addr2line -e <executable> <address>` (if compiled with `-g`). -- Use [Memory Debuggers](#memory-debugging) to - verify the proper usage of memory. -- Core dumps are useful when your program crashes after a long - runtime. -- Slides from user training: [Introduction to Parallel Debugging](misc/debugging_intro.pdf) +- You need to compile your code with the flag `-g` to enable + debugging. This tells the compiler to include information about + variable and function names, source code lines etc. into the + executable. +- It is also recommendable to reduce or even disable optimizations + (`-O0` or gcc's `-Og`). At least inlining should be disabled (usually + `-fno-inline`). +- For parallel applications: try to reproduce the problem with less + processes or threads before using a parallel debugger. +- Use the compiler's check capabilities to find typical problems at + compile time or run time, read the manual (`man gcc`, `man ifort`, etc.) + - Intel C++ example: `icpc -g -std=c++14 -w3 -check=stack,uninit -check-pointers=rw -fp-trap=all` + - Intel Fortran example: `ifort -g -std03 -warn all -check all -fpe-all=0 -traceback` + - The flag `-traceback` of the Intel Fortran compiler causes to print + stack trace and source code location when the program terminates + abnormally. +- If your program crashes and you get an address of the failing + instruction, you can get the source code line with the command + `addr2line -e <executable> <address>` (if compiled with `-g`). +- Use [Memory Debuggers](#memory-debugging) to + verify the proper usage of memory. +- Core dumps are useful when your program crashes after a long + runtime. +- Slides from user training: [Introduction to Parallel Debugging](misc/debugging_intro.pdf) ## GNU Debugger (GDB) @@ -55,34 +62,28 @@ several ways: | Attach running program to GDB | `gdb --pid <process ID>` | | Open a core dump | `gdb <executable> <core file>` | -This [GDB Reference -Sheet](http://users.ece.utexas.edu/~adnan/gdb-refcard.pdf) makes life -easier when you often use GDB. +This [GDB Reference Sheet](http://users.ece.utexas.edu/~adnan/gdb-refcard.pdf) makes life easier +when you often use GDB. -Fortran 90 programmers may issue an -`module load ddt` before their debug session. This makes the GDB -modified by DDT available, which has better support for Fortran 90 (e.g. -derived types). +Fortran 90 programmers may issue an `module load ddt` before their debug session. This makes the GDB +modified by DDT available, which has better support for Fortran 90 (e.g. derived types). ## Arm DDT  -- Intuitive graphical user interface and great support for parallel applications -- We have 1024 licences, so many user can use this tool for parallel - debugging -- Don't expect that debugging an MPI program with 100ths of process - will always work without problems - - The more processes and nodes involved, the higher is the - probability for timeouts or other problems - - Debug with as few processes as required to reproduce the bug you - want to find -- Module to load before using: `module load ddt` -- Start: `ddt <executable>` -- If the GUI runs too slow over your remote connection: - Use [WebVNC](../access/graphical_applications_with_webvnc.md) to start a remote desktop - session in a web browser. -- Slides from user training: [Parallel Debugging with DDT](misc/debugging_ddt.pdf) +- Intuitive graphical user interface and great support for parallel applications +- We have 1024 licences, so many user can use this tool for parallel debugging +- Don't expect that debugging an MPI program with 100ths of process will always work without + problems + - The more processes and nodes involved, the higher is the probability for timeouts or other + problems + - Debug with as few processes as required to reproduce the bug you want to find +- Module to load before using: `module load ddt` Start: `ddt <executable>` If the GUI runs too slow +- over your remote connection: + Use [WebVNC](../access/graphical_applications_with_webvnc.md) to start a remote desktop session in + a web browser. +- Slides from user training: [Parallel Debugging with DDT](misc/debugging_ddt.pdf) ### Serial Program Example @@ -95,9 +96,9 @@ srun: job 123456 has been allocated resources marie@compute$ ddt ./myprog ``` -- Run dialog window of DDT opens. -- Optionally: configure options like program arguments. -- Hit *Run*. +- Run dialog window of DDT opens. +- Optionally: configure options like program arguments. +- Hit *Run*. ### Multi-threaded Program Example @@ -110,10 +111,10 @@ srun: job 123457 has been allocated resources marie@compute$ ddt ./myprog ``` -- Run dialog window of DDT opens. -- Optionally: configure options like program arguments. -- If OpenMP: set number of threads. -- Hit *Run*. +- Run dialog window of DDT opens. +- Optionally: configure options like program arguments. +- If OpenMP: set number of threads. +- Hit *Run*. ### MPI-Parallel Program Example @@ -128,27 +129,27 @@ salloc: Granted job allocation 123458 marie@login$ ddt srun ./myprog ``` -- Run dialog window of DDT opens. -- If MPI-OpenMP-hybrid: set number of threads. -- Hit *Run* +- Run dialog window of DDT opens. +- If MPI-OpenMP-hybrid: set number of threads. +- Hit *Run* ## Memory Debugging -- Memory debuggers find memory management bugs, e.g. - - Use of non-initialized memory - - Access memory out of allocated bounds -- DDT has memory debugging included (needs to be enabled in the run dialog) +- Memory debuggers find memory management bugs, e.g. + - Use of non-initialized memory + - Access memory out of allocated bounds +- DDT has memory debugging included (needs to be enabled in the run dialog) ### Valgrind (Memcheck) -- Simulation of the program run in a virtual machine which accurately observes memory operations. -- Extreme run time slow-down: use small program runs! -- Finds more memory errors than other debuggers. -- Further information: - - [Valgrind Website](http://www.valgrind.org) - - [Memcheck Manual](https://www.valgrind.org/docs/manual/mc-manual.html) - (explanation of output, command-line options) -- For serial or multi-threaded programs: +- Simulation of the program run in a virtual machine which accurately observes memory operations. +- Extreme run time slow-down: use small program runs! +- Finds more memory errors than other debuggers. +- Further information: + - [Valgrind Website](http://www.valgrind.org) + - [Memcheck Manual](https://www.valgrind.org/docs/manual/mc-manual.html) + (explanation of output, command-line options) +- For serial or multi-threaded programs: ```console marie@login$ module load Valgrind @@ -156,12 +157,12 @@ Module Valgrind/3.14.0-foss-2018b and 12 dependencies loaded. marie@login$ srun -n 1 valgrind ./myprog ``` -- Not recommended for MPI parallel programs, since usually the MPI library will throw - a lot of errors. But you may use valgrind the following way such that every rank - writes its own valgrind logfile: +- Not recommended for MPI parallel programs, since usually the MPI library will throw + a lot of errors. But you may use Valgrind the following way such that every rank + writes its own Valgrind logfile: ```console marie@login$ module load Valgrind Module Valgrind/3.14.0-foss-2018b and 12 dependencies loaded. -marie@login$ srun -n <number of processes> valgrind --log-file=valgrind-%p.out ./myprog +marie@login$ srun -n <number of processes> valgrind --log-file=valgrind-%p.out ./myprog ``` diff --git a/doc.zih.tu-dresden.de/docs/software/software_development_overview.md b/doc.zih.tu-dresden.de/docs/software/software_development_overview.md index 966647b4f6d7ee11f92255f3c5ceb619b2d1d647..d2dd73ed3a56bc49d31123cec65bc8694e7f0f10 100644 --- a/doc.zih.tu-dresden.de/docs/software/software_development_overview.md +++ b/doc.zih.tu-dresden.de/docs/software/software_development_overview.md @@ -37,9 +37,7 @@ Some questions you should ask yourself: Subsections: - [Compilers](compilers.md) -- [Debugging Tools](../archive/debugging_tools.md) - - [Debuggers](debuggers.md) (GDB, Allinea DDT, Totalview) - - [Tools to detect MPI usage errors](mpi_usage_error_detection.md) (MUST) +- [Debugging](debuggers.md) - PerformanceTools.md: [Score-P](scorep.md), [Vampir](vampir.md) - [Libraries](libraries.md) diff --git a/doc.zih.tu-dresden.de/mkdocs.yml b/doc.zih.tu-dresden.de/mkdocs.yml index 87e873ea84c1b381ab4dff287cbbd8c05c8e9c4d..12986242ffd6b75792de684ffd775c58759b1529 100644 --- a/doc.zih.tu-dresden.de/mkdocs.yml +++ b/doc.zih.tu-dresden.de/mkdocs.yml @@ -61,9 +61,10 @@ nav: - Building Software: software/building_software.md - GPU Programming: software/gpu_programming.md - Compilers: software/compilers.md - - Debuggers: software/debuggers.md + - Debugging: + - Overview: software/debuggers.md + - MPI Error Detection: software/mpi_usage_error_detection.md - Libraries: software/libraries.md - - MPI Error Detection: software/mpi_usage_error_detection.md - Score-P: software/scorep.md - Perf Tools: software/perf_tools.md - PIKA: software/pika.md @@ -112,7 +113,6 @@ nav: - Overview: archive/overview.md - Bio Informatics: archive/bioinformatics.md - CXFS End of Support: archive/cxfs_end_of_support.md - - Debugging Tools: archive/debugging_tools.md - KNL Nodes: archive/knl_nodes.md - Load Leveler: archive/load_leveler.md - Migrate to Atlas: archive/migrate_to_atlas.md @@ -179,9 +179,11 @@ extra: zih_homepage: https://tu-dresden.de/zih # links in footer footer: - - link: https://doc.zih.tu-dresden.de/hpc-wiki/bin/view/Compendium/Impressum - name: "Legal Notice" - - link: https://doc.zih.tu-dresden.de/hpc-wiki/bin/view/Compendium/Accessibility - name: "Accessibility" + - link: /legal_notice + name: "Legal Notice / Impressum" + - link: /accessibility + name: "Accessibility / Barrierefreiheit" + - link: /data_protection_declaration + name: "Data Protection Declaration / Datenschutzerklärung" - link: https://tu-dresden.de/zertifikate name: "Certificates" diff --git a/doc.zih.tu-dresden.de/util/check-spelling-changes.sh b/doc.zih.tu-dresden.de/util/check-spelling-changes.sh index b44a3fd6afb17d0585f77f52c142fffcd509f7a1..670e687ce898a613264219a6cc37bd20479da0c3 100755 --- a/doc.zih.tu-dresden.de/util/check-spelling-changes.sh +++ b/doc.zih.tu-dresden.de/util/check-spelling-changes.sh @@ -13,7 +13,7 @@ function getNumberOfAspellOutputLines(){ branch="preview" if [ -n "$CI_MERGE_REQUEST_TARGET_BRANCH_NAME" ]; then - branch="origin/$CI_MERGE_REQUEST_TARGET_BRANCH_NAME" + branch="origin/$CI_MERGE_REQUEST_TARGET_BRANCH_NAME" fi any_fails=false @@ -25,30 +25,37 @@ files=`git diff $source_hash | sed -n 's/^[-+]\{3,3\} //p'` #echo "-------------------------" #Assume that we have pairs of lines (starting with --- and +++). while read oldfile; do - read newfile - if [ "${newfile: -3}" == ".md" ]; then - if [ "$oldfile" == "/dev/null" ]; then - #Added files should not introduce new spelling mistakes - previous_count=0 - else - previous_count=`git show "$source_hash:${oldfile:2}" | getNumberOfAspellOutputLines` - fi - if [ "$newfile" == "/dev/null" ]; then - #Deleted files do not contain any spelling mistakes - current_count=0 - else - #Remove the prefix "b/" - newfile=${newfile:2} - current_count=`cat "$newfile" | getNumberOfAspellOutputLines` - fi - if [ $current_count -gt $previous_count ]; then - echo "-- File $newfile" - echo "Change increases spelling mistake count (from $previous_count to $current_count)" - any_fails=true - fi + read newfile + if [ "${newfile: -3}" == ".md" ]; then + if [[ $newfile == *"accessibility.md"* || + $newfile == *"data_protection_declaration.md"* || + $newfile == *"legal_notice.md"* ]]; then + echo "Skip $newfile" + else + echo "Check $newfile" + if [ "$oldfile" == "/dev/null" ]; then + #Added files should not introduce new spelling mistakes + previous_count=0 + else + previous_count=`git show "$source_hash:${oldfile:2}" | getNumberOfAspellOutputLines` + fi + if [ "$newfile" == "/dev/null" ]; then + #Deleted files do not contain any spelling mistakes + current_count=0 + else + #Remove the prefix "b/" + newfile=${newfile:2} + current_count=`cat "$newfile" | getNumberOfAspellOutputLines` + fi + if [ $current_count -gt $previous_count ]; then + echo "-- File $newfile" + echo "Change increases spelling mistake count (from $previous_count to $current_count)" + any_fails=true + fi fi + fi done <<< "$files" if [ "$any_fails" == true ]; then - exit 1 + exit 1 fi diff --git a/doc.zih.tu-dresden.de/util/grep-forbidden-words.sh b/doc.zih.tu-dresden.de/util/grep-forbidden-words.sh index eff2fecea5c8e24f3d3d36581a9d25104d572a7e..b6d586220052a2bf362aec3c4736c876e4901da6 100755 --- a/doc.zih.tu-dresden.de/util/grep-forbidden-words.sh +++ b/doc.zih.tu-dresden.de/util/grep-forbidden-words.sh @@ -2,58 +2,108 @@ set -euo pipefail -branch="preview" -if [ -n "$CI_MERGE_REQUEST_TARGET_BRANCH_NAME" ]; then - branch="origin/$CI_MERGE_REQUEST_TARGET_BRANCH_NAME" -fi +scriptpath=${BASH_SOURCE[0]} +basedir=`dirname "$scriptpath"` +basedir=`dirname "$basedir"` + +#This is the ruleset. Each line represents a rule of tab-separated fields. +#The first field represents whether the match should be case-sensitive (s) or insensitive (i). +#The second field represents the pattern that should not be contained in any file that is checked. +#Further fields represent patterns with exceptions. +#For example, the first rule says: +# The pattern \<io\> should not be present in any file (case-insensitive match), except when it appears as ".io". +ruleset="i \<io\> \.io +s \<SLURM\> +i file \+system +i \<taurus\> taurus\.hrsk /taurus +i \<hrskii\> +i hpc \+system +i hpc[ -]\+da\> +i work[ -]\+space" -any_fails=false +function grepExceptions () { + if [ $# -gt 0 ]; then + firstPattern=$1 + shift + grep -v "$firstPattern" | grepExceptions "$@" + else + cat - + fi +} -files=$(git diff --name-only "$(git merge-base HEAD "$branch")") +function usage () { + echo "$0 [options]" + echo "Search forbidden patterns in markdown files." + echo "" + echo "Options:" + echo " -a Search in all markdown files (default: git-changed files)" + echo " -s Silent mode" + echo " -h Show help message" +} + +# Options +all_files=false +silent=false +while getopts ":ahs" option; do + case $option in + a) + all_files=true + ;; + s) + silent=true + ;; + h) + usage + exit;; + \?) # Invalid option + echo "Error: Invalid option." + usage + exit;; + esac +done + +branch="origin/${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:-preview}" + +if [ $all_files = true ]; then + echo "Search in all markdown files." + files=$(git ls-tree --full-tree -r --name-only HEAD $basedir/docs/ | grep .md) +else + echo "Search in git-changed files." + files=`git diff --name-only "$(git merge-base HEAD "$branch")"` +fi + +cnt=0 for f in $files; do - if [ "$f" != doc.zih.tu-dresden.de/README.md -a "${f: -3}" == ".md" ]; then - #The following checks assume that grep signals success when it finds something, - #while it signals failure if it doesn't find something. - #We assume that we are successful if we DON'T find the pattern, - #which is the other way around, hence the "!". - - echo "Checking wording of $f: IO" - #io must be the whole word - if ! grep -n -i '\<io\>' "$f" | grep -v '\.io'; then - any_fails=true - fi - echo "Checking wording of $f: SLURM" - #SLURM must be the whole word, otherwise it might match script variables - #such as SLURM_JOB_ID - if ! grep -n '\<SLURM\>' "$f"; then - any_fails=true - fi - echo "Checking wording of $f: file system" - #arbitrary white space in between - if ! grep -n -i 'file \+system' "$f"; then - any_fails=true - fi - #check for word taurus, except when used in conjunction with .hrsk or /taurus, - #which might appear in code snippets - echo "Checking wording of $f: taurus" - if ! grep -n -i '\<taurus\>' "$f" | grep -v 'taurus\.hrsk' | grep -v '/taurus'; then - any_fails=true - fi - echo "Checking wording of $f: hrskii" - if ! grep -n -i '\<hrskii\>' "$f"; then - any_fails=true - fi - echo "Checking wording of $f: hpc system" - if ! grep -n -i 'hpc \+system' "$f"; then - any_fails=true + if [ "$f" != doc.zih.tu-dresden.de/README.md -a "${f: -3}" == ".md" -a -f "$f" ]; then + echo "Check wording in file $f" + while IFS=$'\t' read -r flags pattern exceptionPatterns; do + while IFS=$'\t' read -r -a exceptionPatternsArray; do + if [ $silent = false ]; then + echo " Pattern: $pattern" fi - echo "Checking wording of $f: hpc-da" - if ! grep -n -i 'hpc[ -]\+da\>' "$f"; then - any_fails=true + grepflag= + case "$flags" in + "i") + grepflag=-i + ;; + esac + if grep -n $grepflag "$pattern" "$f" | grepExceptions "${exceptionPatternsArray[@]}" ; then + ((cnt=cnt+1)) fi - fi + done <<< $exceptionPatterns + done <<< $ruleset + fi done -if [ "$any_fails" == true ]; then - exit 1 +echo "" +case $cnt in + 1) + echo "Forbidden Patterns: 1 match found" + ;; + *) + echo "Forbidden Patterns: $cnt matches found" + ;; +esac +if [ $cnt -gt 0 ]; then + exit 1 fi diff --git a/doc.zih.tu-dresden.de/wordlist.aspell b/doc.zih.tu-dresden.de/wordlist.aspell index 1c69562219d37611063561a901f0c55fb7f7056f..2ec271c6ca86b7573cfe825e6a4ba4ead4534b15 100644 --- a/doc.zih.tu-dresden.de/wordlist.aspell +++ b/doc.zih.tu-dresden.de/wordlist.aspell @@ -153,3 +153,6 @@ venv virtualenv workspace workspaces +stdout +stderr +multithreaded