From 7e489546ab1e69dac877d644f9d3d1b5d3bc8a10 Mon Sep 17 00:00:00 2001
From: Don Lipari <lipari1@llnl.gov>
Date: Tue, 14 Jul 2009 22:31:09 +0000
Subject: [PATCH]  -- Created a spank_get_item() option (S_JOB_ALLOC_MEM) that
 conveys the memory     that the select/cons_res plugin has allocated to a
 job.

---
 NEWS                   | 6 ++++--
 slurm/spank.h          | 3 ++-
 src/api/job_info.c     | 6 +++---
 src/common/plugstack.c | 4 ++++
 4 files changed, 13 insertions(+), 6 deletions(-)

diff --git a/NEWS b/NEWS
index e38e10b4890..825008e2f5e 100644
--- a/NEWS
+++ b/NEWS
@@ -15,6 +15,8 @@ documents those changes that are of interest to users and admins.
     allocation info on a node-by-node basis.
  -- Added logic to give correct request uid for individual steps that 
     were cancelled.
+ -- Created a spank_get_item() option (S_JOB_ALLOC_MEM) that conveys the memory
+    that the select/cons_res plugin has allocated to a job.
 
 * Changes in SLURM 2.1.0-pre2
 =============================
@@ -40,8 +42,8 @@ documents those changes that are of interest to users and admins.
  -- Modify job step cancel logic for scancel and srun (on reciept of SIGTERM 
     or three SIGINT) to immediately send SIGKILL to spawned tasks.  Previous 
     logic would send SIGCONT, SIGTERM, wait KillWait seconds, SIGKILL.
- -- Create a spank_get_item() option that the SPANK cpuset plugin can use to
-    discover the cpus that the select/cons_res plugin has allocated to a job.
+ -- Created a spank_get_item() option (S_JOB_ALLOC_CORES) that conveys the cpus
+    that the select/cons_res plugin has allocated to a job.
  -- Improve sview performance (outrageously) on very large machines.
  -- Add support for licenses in resource reservation.
  -- BLUEGENE - Jobs waiting for a block to boot will now be in Configuring
diff --git a/slurm/spank.h b/slurm/spank.h
index f187ef13ebd..4cb046d92cb 100644
--- a/slurm/spank.h
+++ b/slurm/spank.h
@@ -150,7 +150,8 @@ enum spank_item {
     S_SLURM_VERSION_MICRO,   /* Current slurm version micro release (char **) */
     S_STEP_CPUS_PER_TASK,    /* CPUs allocated per task (=1 if --overcommit
                               * option is used, uint32_t *)                   */
-    S_JOB_ALLOC_CORES        /* Allocated cores in list format	      */
+    S_JOB_ALLOC_CORES,       /* Allocated cores in list format                */
+    S_JOB_ALLOC_MEM          /* Allocated memory in MB                        */
 };
 
 typedef enum spank_item spank_item_t;
diff --git a/src/api/job_info.c b/src/api/job_info.c
index cd31c36a65c..46acabff306 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -349,8 +349,8 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 				hostlist_ranged_string(hl_last,
 					       sizeof(last_hosts), last_hosts);
 				snprintf(tmp_line, sizeof(tmp_line),
-					 "Node=%s CPUs=%s Mem=%u", last_hosts,
-					 tmp2, last_mem_alloc_ptr ?
+					 "  Nodes=%s CPUs=%s Mem=%u",
+					 last_hosts, tmp2, last_mem_alloc_ptr ?
 					 last_mem_alloc : 0);
 				xstrcat(out, tmp_line);
 				if (one_liner)
@@ -386,7 +386,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 	if (hostlist_count(hl_last)) {
 		hostlist_ranged_string(hl_last, sizeof(last_hosts), last_hosts);
 		snprintf(tmp_line, sizeof(tmp_line),
-			 "Node=%s CPUs=%s Mem=%u", last_hosts, tmp2,
+			 "  Nodes=%s CPUs=%s Mem=%u", last_hosts, tmp2,
 			 last_mem_alloc_ptr ? last_mem_alloc : 0);
 		xstrcat(out, tmp_line);
 		if (one_liner)
diff --git a/src/common/plugstack.c b/src/common/plugstack.c
index e87813f5ffe..ba27736bd3d 100644
--- a/src/common/plugstack.c
+++ b/src/common/plugstack.c
@@ -1770,6 +1770,10 @@ spank_err_t spank_get_item(spank_t spank, spank_item_t item, ...)
 		p2str = va_arg(vargs, char **);
 		*p2str = slurmd_job->alloc_cores;
 		break;
+	case S_JOB_ALLOC_MEM:
+		p2uint32 = va_arg(vargs, uint32_t *);
+		*p2uint32 = slurmd_job->job_mem;
+		break;
 	case S_SLURM_VERSION:
 		p2vers = va_arg(vargs, char  **);
 		*p2vers = SLURM_VERSION;
-- 
GitLab