From d48e0f4fca050d90e93b34da1dcdd2eff54b919a Mon Sep 17 00:00:00 2001
From: Moe Jette <jette1@llnl.gov>
Date: Tue, 19 Jul 2005 16:57:59 +0000
Subject: [PATCH] Fix bugs in job accounting: collecting info for single task
 job, memory size computation (Andy Riebs/HP).

---
 src/plugins/jobacct/log/jobacct_log.c | 10 +++-------
 src/slurmctld/job_mgr.c               |  9 +++++----
 2 files changed, 8 insertions(+), 11 deletions(-)

diff --git a/src/plugins/jobacct/log/jobacct_log.c b/src/plugins/jobacct/log/jobacct_log.c
index bce331b585a..e6dc5d16912 100644
--- a/src/plugins/jobacct/log/jobacct_log.c
+++ b/src/plugins/jobacct/log/jobacct_log.c
@@ -1018,6 +1018,9 @@ NextSlashProcEntry: ;
 				/* tally their memory usage */
 			psize += precTable[i].psize;
 			vsize += precTable[i].vsize;
+			if (vsize==0)
+				vsize=1; /* Flag to let us know we found it,
+					    though it is already finished */
 		}
 	}
 	if (max_psize < psize)
@@ -1173,13 +1176,6 @@ static void _process_mynode_msg_taskdata(_jrec_t *inrec){
 	debug2("jobacct(%d): in _process_mynode_msg_taskdata for job %u.%u"
 			" ntasks=%d",
 			getpid(), inrec->jobid, inrec->stepid, inrec->ntasks); 
-	if (inrec->ntasks == 1) {    /* if only one task, skip aggregation */
-		debug3("jobacct(%d): _process_mynode_msg_taskdata "
-				"skipping aggregation for job %u.%u",
-				getpid(), inrec->jobid, inrec->stepid);
-		_send_data_to_node_0(inrec);
-		return;
-	}
 	slurm_mutex_lock(&jobsteps_active_lock);
 	jrec = _get_jrec_by_jobstep(jobsteps_active, inrec->jobid,
 			inrec->stepid);
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index cd6327eeff9..125998fde38 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -1334,8 +1334,10 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate, int will_run,
 			job_ptr->start_time = job_ptr->end_time = time(NULL);
 			job_completion_logger(job_ptr);
 		} else		/* job remains queued */
-			if (error_code == ESLURM_NODES_BUSY) 
+			if (error_code == ESLURM_NODES_BUSY) {
 				error_code = SLURM_SUCCESS;
+				g_slurmctld_jobacct_job_start(job_ptr);
+			}
 		return error_code;
 	}
 
@@ -1349,9 +1351,8 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate, int will_run,
 	if (will_run) {		/* job would run, flag job destruction */
 		job_ptr->job_state  = JOB_FAILED;
 		job_ptr->start_time = job_ptr->end_time = time(NULL);
-	}
-
-	g_slurmctld_jobacct_job_start(job_ptr);
+	} else 
+		g_slurmctld_jobacct_job_start(job_ptr);
 	return SLURM_SUCCESS;
 }
 
-- 
GitLab