Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
S
Slurm
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
tud-zih-energy
Slurm
Commits
b2072b90
Commit
b2072b90
authored
12 years ago
by
Danny Auble
Browse files
Options
Downloads
Plain Diff
Merge remote-tracking branch 'origin/slurm-2.5'
parents
caa974ae
b50e2269
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
NEWS
+1
-0
1 addition, 0 deletions
NEWS
src/slurmctld/acct_policy.c
+21
-7
21 additions, 7 deletions
src/slurmctld/acct_policy.c
with
22 additions
and
7 deletions
NEWS
+
1
−
0
View file @
b2072b90
...
...
@@ -29,6 +29,7 @@ documents those changes that are of interest to users and admins.
* Changes in SLURM 2.5.2
========================
-- Fix advanced reservation recovery logic when upgrading from version 2.4.
-- BLUEGENE - fix for QOS/Association node limits.
* Changes in SLURM 2.5.1
========================
...
...
This diff is collapsed.
Click to expand it.
src/slurmctld/acct_policy.c
+
21
−
7
View file @
b2072b90
...
...
@@ -47,6 +47,7 @@
#include
"src/slurmctld/slurmctld.h"
#include
"src/slurmctld/acct_policy.h"
#include
"src/common/node_select.h"
#define _DEBUG 0
...
...
@@ -131,10 +132,23 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
uint64_t
unused_cpu_run_secs
=
0
;
uint64_t
used_cpu_run_secs
=
0
;
uint32_t
job_memory
=
0
;
uint32_t
node_cnt
;
if
(
!
(
accounting_enforce
&
ACCOUNTING_ENFORCE_LIMITS
)
||
!
_valid_job_assoc
(
job_ptr
))
return
;
#ifdef HAVE_BG
xassert
(
job_ptr
->
select_jobinfo
);
select_g_select_jobinfo_get
(
job_ptr
->
select_jobinfo
,
SELECT_JOBDATA_NODE_CNT
,
&
node_cnt
);
if
(
node_cnt
==
NO_VAL
)
{
/* This should never happen */
node_cnt
=
job_ptr
->
node_cnt
;
error
(
"node_cnt not available at %s:%d
\n
"
,
__FILE__
,
__LINE__
);
}
#else
node_cnt
=
job_ptr
->
node_cnt
;
#endif
if
(
type
==
ACCT_POLICY_JOB_FINI
)
unused_cpu_run_secs
=
_get_unused_cpu_run_secs
(
job_ptr
);
...
...
@@ -152,7 +166,7 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
job_memory
);
}
else
{
job_memory
=
(
job_ptr
->
details
->
pn_min_memory
)
*
job_ptr
->
node_cnt
;
*
node_cnt
;
debug2
(
"_adjust_limit_usage: job %u: MPN: "
"job_memory set to %u"
,
job_ptr
->
job_id
,
job_memory
);
...
...
@@ -202,12 +216,12 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
qos_ptr
->
usage
->
grp_used_jobs
++
;
qos_ptr
->
usage
->
grp_used_cpus
+=
job_ptr
->
total_cpus
;
qos_ptr
->
usage
->
grp_used_mem
+=
job_memory
;
qos_ptr
->
usage
->
grp_used_nodes
+=
job_ptr
->
node_cnt
;
qos_ptr
->
usage
->
grp_used_nodes
+=
node_cnt
;
qos_ptr
->
usage
->
grp_used_cpu_run_secs
+=
used_cpu_run_secs
;
used_limits
->
jobs
++
;
used_limits
->
cpus
+=
job_ptr
->
total_cpus
;
used_limits
->
nodes
+=
job_ptr
->
node_cnt
;
used_limits
->
nodes
+=
node_cnt
;
break
;
case
ACCT_POLICY_JOB_FINI
:
...
...
@@ -232,7 +246,7 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
"underflow for qos %s"
,
qos_ptr
->
name
);
}
qos_ptr
->
usage
->
grp_used_nodes
-=
job_ptr
->
node_cnt
;
qos_ptr
->
usage
->
grp_used_nodes
-=
node_cnt
;
if
((
int32_t
)
qos_ptr
->
usage
->
grp_used_nodes
<
0
)
{
qos_ptr
->
usage
->
grp_used_nodes
=
0
;
debug2
(
"acct_policy_job_fini: grp_used_nodes "
...
...
@@ -268,7 +282,7 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
qos_ptr
->
name
,
used_limits
->
uid
);
}
used_limits
->
nodes
-=
job_ptr
->
node_cnt
;
used_limits
->
nodes
-=
node_cnt
;
if
((
int32_t
)
used_limits
->
nodes
<
0
)
{
used_limits
->
nodes
=
0
;
debug2
(
"acct_policy_job_fini: "
...
...
@@ -303,7 +317,7 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
assoc_ptr
->
usage
->
used_jobs
++
;
assoc_ptr
->
usage
->
grp_used_cpus
+=
job_ptr
->
total_cpus
;
assoc_ptr
->
usage
->
grp_used_mem
+=
job_memory
;
assoc_ptr
->
usage
->
grp_used_nodes
+=
job_ptr
->
node_cnt
;
assoc_ptr
->
usage
->
grp_used_nodes
+=
node_cnt
;
assoc_ptr
->
usage
->
grp_used_cpu_run_secs
+=
used_cpu_run_secs
;
debug4
(
"acct_policy_job_begin: after adding job %i, "
...
...
@@ -335,7 +349,7 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
assoc_ptr
->
acct
);
}
assoc_ptr
->
usage
->
grp_used_nodes
-=
job_ptr
->
node_cnt
;
assoc_ptr
->
usage
->
grp_used_nodes
-=
node_cnt
;
if
((
int32_t
)
assoc_ptr
->
usage
->
grp_used_nodes
<
0
)
{
assoc_ptr
->
usage
->
grp_used_nodes
=
0
;
debug2
(
"acct_policy_job_fini: grp_used_nodes "
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment