diff --git a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md index 38d198969801a913287d92ffc300b0447bfacddb..4c17864f7ca44f861f2150a31987cfbc85fefa2c 100644 --- a/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md +++ b/doc.zih.tu-dresden.de/docs/software/data_analytics_with_python.md @@ -18,9 +18,9 @@ a research group and/or teaching class. For this purpose, The interactive Python interpreter can also be used on ZIH systems via an interactive job: ```console -marie@login$ srun --partition=haswell --gres=gpu:1 --ntasks=1 --cpus-per-task=7 --pty --mem-per-cpu=8000 bash -marie@haswell$ module load Python -marie@haswell$ python +marie@login$ srun --gres=gpu:1 --ntasks=1 --cpus-per-task=7 --pty --mem-per-cpu=8000 bash +marie@compute$ module load Python +marie@compute$ python Python 3.8.6 (default, Feb 17 2021, 11:48:51) [GCC 10.2.0] on linux Type "help", "copyright", "credits" or "license" for more information. @@ -50,7 +50,7 @@ threads that can be used in parallel depends on the number of cores (parameter ` within the Slurm request, e.g. ```console -marie@login$ srun --partition=haswell --cpus-per-task=4 --mem=2G --hint=nomultithread --pty --time=8:00:00 bash +marie@login$ srun --cpus-per-task=4 --mem=2G --hint=nomultithread --pty --time=8:00:00 bash ``` The above request allows to use 4 parallel threads. @@ -239,7 +239,7 @@ from distributed import Client from dask_jobqueue import SLURMCluster from dask import delayed -cluster = SLURMCluster(queue='alpha', +cluster = SLURMCluster( cores=8, processes=2, project='p_number_crunch', @@ -294,7 +294,7 @@ for the Monte-Carlo estimation of Pi. #create a Slurm cluster, please specify your project - cluster = SLURMCluster(queue='alpha', cores=2, project='p_number_crunch', memory="8GB", walltime="00:30:00", extra=['--resources gpu=1'], scheduler_options={"dashboard_address": f":{portdash}"}) + cluster = SLURMCluster(cores=2, project='p_number_crunch', memory="8GB", walltime="00:30:00", extra=['--resources gpu=1'], scheduler_options={"dashboard_address": f":{portdash}"}) #submit the job to the scheduler with the number of nodes (here 2) requested: @@ -439,7 +439,6 @@ For the multi-node case, use a script similar to this: ```bash #!/bin/bash #SBATCH --nodes=2 -#SBATCH --partition=ml #SBATCH --tasks-per-node=2 #SBATCH --cpus-per-task=1