Skip to content
Snippets Groups Projects
Commit 29fdc31b authored by Morris Jette's avatar Morris Jette
Browse files

Clarify when batch script runs on pack job

parent 93589405
No related branches found
No related tags found
No related merge requests found
......@@ -95,9 +95,10 @@ containing the line "#SBATCH packjob" as shown below.</p>
<pre>
$ cat new.bash
#!/bin/bash
#SBATCH --cpus-per-task=4 --mem-per-cpu=1 --ntasks=128
#SBATCH --cpus-per-task=4 --mem-per-cpu=16g --ntasks=1
#SBATCH packjob
#SBATCH --cpus-per-task=1 --mem-per-cpu=16 --ntasks=1
#SBATCH --cpus-per-task=2 --mem-per-cpu=1g --ntasks=8
srun run.app
$ sbatch new.pash
......@@ -110,10 +111,14 @@ $ cat my.bash
#!/bin/bash
srun run.app
$ sbatch --cpus-per-task=4 --mem-per-cpu=1 --ntasks=128 : \
--cpus-per-task=1 --mem-per-cpu=16 --ntasks=1 my.bash
$ sbatch --cpus-per-task=4 --mem-per-cpu=16g --ntasks=1 : \
--cpus-per-task=2 --mem-per-cpu=1g --ntasks=8 my.bash
</pre>
<p>The batch script will be executed in the first node in the first component
of the heterogeneous job. For the above example, that will be the job component
with 1 task, 4 CPUs and 64 GB of memory (16 GB for each of the 4 CPUs).</p>
<h2><a name="managing">Managing Jobs</a></h2>
<p>Information maintained in Slurm for a heterogeneous job includes:</p>
......
......@@ -3896,13 +3896,18 @@ static void _slurm_rpc_submit_batch_pack_job(slurm_msg_t *msg)
if (error_code != SLURM_SUCCESS)
goto send_msg;
/* Create new job allocations */
submit_job_list = list_create(NULL);
_throttle_start(&active_rpc_cnt);
lock_slurmctld(job_write_lock);
START_TIMER; /* Restart after we have locks */
iter = list_iterator_create(job_req_list);
while ((job_desc_msg = (job_desc_msg_t *) list_next(iter))) {
/* Create new job allocation */
if (alloc_only && job_desc_msg->script) {
info("%s: Pack job %u offset %u has script, being ignord",
__func__, pack_job_id, pack_job_offset);
xfree(job_desc_msg->script);
}
error_code = job_allocate(job_desc_msg,
job_desc_msg->immediate, false,
NULL, alloc_only, uid, &job_ptr,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment