Skip to content
Snippets Groups Projects
Commit 2ed36ed6 authored by Danny Auble's avatar Danny Auble
Browse files

clean up whitespace

parent b18bd04f
No related branches found
No related tags found
No related merge requests found
......@@ -7,32 +7,32 @@
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Mark Grondona <mgrondona@llnl.gov>.
* CODE-OCEC-09-009. All rights reserved.
*
*
* This file is part of SLURM, a resource management program.
* For details, see <https://computing.llnl.gov/linux/slurm/>.
* Please also read the included file: DISCLAIMER.
*
*
* SLURM is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* In addition, as a special exception, the copyright holders give permission
* to link the code of portions of this program with the OpenSSL library under
* certain conditions as described in each individual source file, and
* distribute linked combinations including the two. You must obey the GNU
* General Public License in all respects for all of the code used other than
* OpenSSL. If you modify file(s) with this exception, you may extend this
* exception to your version of the file(s), but you are not obligated to do
* In addition, as a special exception, the copyright holders give permission
* to link the code of portions of this program with the OpenSSL library under
* certain conditions as described in each individual source file, and
* distribute linked combinations including the two. You must obey the GNU
* General Public License in all respects for all of the code used other than
* OpenSSL. If you modify file(s) with this exception, you may extend this
* exception to your version of the file(s), but you are not obligated to do
* so. If you do not wish to do so, delete this exception statement from your
* version. If you delete this exception statement from all source files in
* version. If you delete this exception statement from all source files in
* the program, then also delete it here.
*
*
* SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
*
* You should have received a copy of the GNU General Public License along
* with SLURM; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
......@@ -114,7 +114,7 @@ _valid_gid(struct passwd *pwd, gid_t *gid)
{
struct group *grp;
int i;
if (!pwd)
return 0;
if (pwd->pw_gid == *gid)
......@@ -137,8 +137,8 @@ _valid_gid(struct passwd *pwd, gid_t *gid)
return 1;
}
}
/* root user may have launched this job for this user, but
/* root user may have launched this job for this user, but
* root did not explicitly set the gid. This would set the
* gid to 0. In this case we should set the appropriate
* default gid for the user (from the passwd struct).
......@@ -153,7 +153,7 @@ _valid_gid(struct passwd *pwd, gid_t *gid)
}
/* create a slurmd job structure from a launch tasks message */
slurmd_job_t *
slurmd_job_t *
job_create(launch_tasks_request_msg_t *msg)
{
struct passwd *pwd = NULL;
......@@ -162,7 +162,7 @@ job_create(launch_tasks_request_msg_t *msg)
slurm_addr resp_addr;
slurm_addr io_addr;
int nodeid = NO_VAL;
xassert(msg != NULL);
xassert(msg->complete_nodelist != NULL);
debug3("entering job_create");
......@@ -185,12 +185,12 @@ job_create(launch_tasks_request_msg_t *msg)
job->node_name = xstrdup(msg->complete_nodelist);
#endif
if(nodeid < 0) {
error("couldn't find node %s in %s",
error("couldn't find node %s in %s",
job->node_name, msg->complete_nodelist);
job_destroy(job);
return NULL;
}
job->state = SLURMSTEPD_STEP_STARTING;
job->pwd = pwd;
job->ntasks = msg->tasks_to_launch[nodeid];
......@@ -201,12 +201,12 @@ job_create(launch_tasks_request_msg_t *msg)
job->job_mem = msg->job_mem;
if (job->job_mem)
jobacct_common_set_mem_limit(job->jobid, job->job_mem);
job->uid = (uid_t) msg->uid;
job->gid = (gid_t) msg->gid;
job->cwd = xstrdup(msg->cwd);
job->task_dist = msg->task_dist;
job->cpu_bind_type = msg->cpu_bind_type;
job->cpu_bind = xstrdup(msg->cpu_bind);
job->mem_bind_type = msg->mem_bind_type;
......@@ -240,7 +240,7 @@ job_create(launch_tasks_request_msg_t *msg)
job->envtp->mem_bind_type = 0;
job->envtp->mem_bind = NULL;
job->envtp->ckpt_dir = NULL;
memcpy(&resp_addr, &msg->orig_addr, sizeof(slurm_addr));
slurm_set_addr(&resp_addr,
msg->resp_port[nodeid % msg->num_resp_port],
......@@ -252,7 +252,7 @@ job_create(launch_tasks_request_msg_t *msg)
msg->io_port[nodeid % msg->num_io_port],
NULL);
}
srun = srun_info_create(msg->cred, &resp_addr, &io_addr);
job->buffered_stdio = msg->buffered_stdio;
......@@ -278,7 +278,7 @@ job_create(launch_tasks_request_msg_t *msg)
job->open_mode = msg->open_mode;
job->options = msg->options;
job->alloc_cores = format_core_allocs(msg->cred, conf->node_name);
list_append(job->sruns, (void *) srun);
_job_init_task_info(job, msg->global_task_ids[nodeid],
......@@ -293,13 +293,13 @@ job_create(launch_tasks_request_msg_t *msg)
static char *
_batchfilename(slurmd_job_t *job, const char *name)
{
if (name == NULL)
if (name == NULL)
return fname_create(job, "slurm-%J.out", 0);
else
return fname_create(job, name, 0);
}
slurmd_job_t *
slurmd_job_t *
job_batch_job_create(batch_job_launch_msg_t *msg)
{
struct passwd *pwd;
......@@ -308,7 +308,7 @@ job_batch_job_create(batch_job_launch_msg_t *msg)
char *in_name;
xassert(msg != NULL);
debug3("entering batch_job_create");
if ((pwd = _pwd_create((uid_t)msg->uid)) == NULL) {
......@@ -321,13 +321,13 @@ job_batch_job_create(batch_job_launch_msg_t *msg)
_pwd_destroy(pwd);
return NULL;
}
job = xmalloc(sizeof(slurmd_job_t));
job->state = SLURMSTEPD_STEP_STARTING;
job->pwd = pwd;
job->cpus = msg->cpus_per_node[0];
job->ntasks = 1;
job->ntasks = 1;
job->nprocs = msg->nprocs;
job->jobid = msg->job_id;
job->stepid = msg->step_id;
......@@ -394,7 +394,7 @@ job_batch_job_create(batch_job_launch_msg_t *msg)
if (msg->std_in == NULL)
in_name = xstrdup("/dev/null");
else
else
in_name = fname_create(job, msg->std_in, 0);
job->task[0] = task_info_create(0, 0,
......@@ -459,7 +459,7 @@ _job_init_task_info(slurmd_job_t *job, uint32_t *gtid,
return;
}
job->task = (slurmd_task_info_t **)
job->task = (slurmd_task_info_t **)
xmalloc(job->ntasks * sizeof(slurmd_task_info_t *));
for (i = 0; i < job->ntasks; i++){
......@@ -488,14 +488,14 @@ job_signal_tasks(slurmd_job_t *job, int signal)
if ((job->task[n]->pid > (pid_t) 0)
&& (kill(job->task[n]->pid, signal) < 0)) {
if (errno != ESRCH) {
error("job %d.%d: kill task %d: %m",
error("job %d.%d: kill task %d: %m",
job->jobid, job->stepid, n);
}
}
}
}
void
void
job_destroy(slurmd_job_t *job)
{
int i;
......@@ -534,7 +534,7 @@ static void
_array_free(char ***array)
{
int i = 0;
while ((*array)[i] != NULL)
while ((*array)[i] != NULL)
xfree((*array)[i++]);
xfree(*array);
*array = NULL;
......@@ -566,8 +566,8 @@ srun_info_create(slurm_cred_t *cred, slurm_addr *resp_addr, slurm_addr *ioaddr)
memcpy((void *) key->data, data, len);
if (len < SLURM_IO_KEY_SIZE)
memset( (void *) (key->data + len), 0,
SLURM_IO_KEY_SIZE - len);
memset( (void *) (key->data + len), 0,
SLURM_IO_KEY_SIZE - len);
}
if (ioaddr != NULL)
......@@ -627,7 +627,7 @@ task_info_create(int taskid, int gtaskid,
}
static void
static void
_task_info_destroy(slurmd_task_info_t *t, uint16_t multi_prog)
{
slurm_mutex_lock(&t->mutex);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment