diff --git a/doc/html/heterogeneous_jobs.shtml b/doc/html/heterogeneous_jobs.shtml index 20bfb51b0ced4cd0951f9d5eceed9c580b03ad0b..99cc61f61eb88883af5954121357c5351024a7b1 100644 --- a/doc/html/heterogeneous_jobs.shtml +++ b/doc/html/heterogeneous_jobs.shtml @@ -95,9 +95,10 @@ containing the line "#SBATCH packjob" as shown below.</p> <pre> $ cat new.bash #!/bin/bash -#SBATCH --cpus-per-task=4 --mem-per-cpu=1 --ntasks=128 +#SBATCH --cpus-per-task=4 --mem-per-cpu=16g --ntasks=1 #SBATCH packjob -#SBATCH --cpus-per-task=1 --mem-per-cpu=16 --ntasks=1 +#SBATCH --cpus-per-task=2 --mem-per-cpu=1g --ntasks=8 + srun run.app $ sbatch new.pash @@ -110,10 +111,14 @@ $ cat my.bash #!/bin/bash srun run.app -$ sbatch --cpus-per-task=4 --mem-per-cpu=1 --ntasks=128 : \ - --cpus-per-task=1 --mem-per-cpu=16 --ntasks=1 my.bash +$ sbatch --cpus-per-task=4 --mem-per-cpu=16g --ntasks=1 : \ + --cpus-per-task=2 --mem-per-cpu=1g --ntasks=8 my.bash </pre> +<p>The batch script will be executed in the first node in the first component +of the heterogeneous job. For the above example, that will be the job component +with 1 task, 4 CPUs and 64 GB of memory (16 GB for each of the 4 CPUs).</p> + <h2><a name="managing">Managing Jobs</a></h2> <p>Information maintained in Slurm for a heterogeneous job includes:</p> diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c index 3b5d9a1c47719f51dba3c49e4ead718be221de32..4e0045aa4616ed4683a53b0d96ba9080eda4911c 100644 --- a/src/slurmctld/proc_req.c +++ b/src/slurmctld/proc_req.c @@ -3896,13 +3896,18 @@ static void _slurm_rpc_submit_batch_pack_job(slurm_msg_t *msg) if (error_code != SLURM_SUCCESS) goto send_msg; + /* Create new job allocations */ submit_job_list = list_create(NULL); _throttle_start(&active_rpc_cnt); lock_slurmctld(job_write_lock); START_TIMER; /* Restart after we have locks */ iter = list_iterator_create(job_req_list); while ((job_desc_msg = (job_desc_msg_t *) list_next(iter))) { - /* Create new job allocation */ + if (alloc_only && job_desc_msg->script) { + info("%s: Pack job %u offset %u has script, being ignord", + __func__, pack_job_id, pack_job_offset); + xfree(job_desc_msg->script); + } error_code = job_allocate(job_desc_msg, job_desc_msg->immediate, false, NULL, alloc_only, uid, &job_ptr,