Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
S
Slurm
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
tud-zih-energy
Slurm
Commits
5a35fa26
Commit
5a35fa26
authored
22 years ago
by
tewk
Browse files
Options
Downloads
Patches
Plain Diff
Added locking definitions for slurmd
parent
8ca491c1
No related branches found
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
src/slurmd/Makefile.am
+1
-0
1 addition, 0 deletions
src/slurmd/Makefile.am
src/slurmd/locks.c
+164
-0
164 additions, 0 deletions
src/slurmd/locks.c
src/slurmd/locks.h
+115
-0
115 additions, 0 deletions
src/slurmd/locks.h
with
280 additions
and
0 deletions
src/slurmd/Makefile.am
+
1
−
0
View file @
5a35fa26
...
@@ -26,6 +26,7 @@ slurmd_SOURCES = slurmd.c \
...
@@ -26,6 +26,7 @@ slurmd_SOURCES = slurmd.c \
signature_utils.c
\
signature_utils.c
\
io.c
\
io.c
\
pipes.c
\
pipes.c
\
locks.c
\
$(
interconnect_lib
)
$(
interconnect_lib
)
libelan_interconnect_a_SOURCES
=
elan_interconnect.c
libelan_interconnect_a_SOURCES
=
elan_interconnect.c
...
...
This diff is collapsed.
Click to expand it.
src/slurmd/locks.c
0 → 100644
+
164
−
0
View file @
5a35fa26
/*****************************************************************************\
* locks.c - semaphore functions for slurmctld
*****************************************************************************
* Copyright (C) 2002 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Moe Jette <jette@llnl.gov>, Randy Sanchez <rsancez@llnl.gov>
* UCRL-CODE-2002-040.
*
* This file is part of SLURM, a resource management program.
* For details, see <http://www.llnl.gov/linux/slurm/>.
*
* SLURM is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along
* with SLURM; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
\*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include
<errno.h>
#include
<pthread.h>
#include
<string.h>
#include
<sys/types.h>
#include
<src/slurmd/locks.h>
#include
<src/common/log.h>
pthread_mutex_t
locks_mutex
=
PTHREAD_MUTEX_INITIALIZER
;
pthread_cond_t
locks_cond
=
PTHREAD_COND_INITIALIZER
;
slurmd_lock_flags_t
slurmd_locks
;
void
wr_rdlock
(
lock_datatype_t
datatype
);
void
wr_rdunlock
(
lock_datatype_t
datatype
);
void
wr_wrlock
(
lock_datatype_t
datatype
);
void
wr_wrunlock
(
lock_datatype_t
datatype
);
/* init_locks - create locks used for slurmctld data structure access control */
void
init_locks
(
void
)
{
/* just clear all semaphores */
memset
((
void
*
)
&
slurmd_locks
,
0
,
sizeof
(
slurmd_locks
)
);
}
/* lock_slurmctld - Issue the required lock requests in a well defined order
* Returns 0 on success, -1 on failure */
void
lock_slurmd
(
slurmd_lock_t
lock_levels
)
{
if
(
lock_levels
.
jobs
==
READ_LOCK
)
wr_rdlock
(
JOB_LIST_LOCK
);
else
if
(
lock_levels
.
jobs
==
WRITE_LOCK
)
wr_wrlock
(
JOB_LIST_LOCK
);
if
(
lock_levels
.
tasks
==
READ_LOCK
)
wr_rdlock
(
TASK_LIST_LOCK
);
else
if
(
lock_levels
.
tasks
==
WRITE_LOCK
)
wr_wrlock
(
TASK_LIST_LOCK
);
if
(
lock_levels
.
credentials
==
READ_LOCK
)
wr_rdlock
(
CREDENTIAL_STATE_LOCK
);
else
if
(
lock_levels
.
credentials
==
WRITE_LOCK
)
wr_wrlock
(
CREDENTIAL_STATE_LOCK
);
}
/* unlock_slurmctld - Issue the required unlock requests in a well defined order */
void
unlock_slurmd
(
slurmd_lock_t
lock_levels
)
{
if
(
lock_levels
.
credentials
==
READ_LOCK
)
wr_rdunlock
(
CREDENTIAL_STATE_LOCK
);
else
if
(
lock_levels
.
credentials
==
WRITE_LOCK
)
wr_wrunlock
(
CREDENTIAL_STATE_LOCK
);
if
(
lock_levels
.
tasks
==
READ_LOCK
)
wr_rdunlock
(
TASK_LIST_LOCK
);
else
if
(
lock_levels
.
tasks
==
WRITE_LOCK
)
wr_wrunlock
(
TASK_LIST_LOCK
);
if
(
lock_levels
.
jobs
==
READ_LOCK
)
wr_rdunlock
(
JOB_LIST_LOCK
);
else
if
(
lock_levels
.
jobs
==
WRITE_LOCK
)
wr_wrunlock
(
JOB_LIST_LOCK
);
}
/* wr_rdlock - Issue a read lock on the specified data type */
void
wr_rdlock
(
lock_datatype_t
datatype
)
{
pthread_mutex_lock
(
&
locks_mutex
);
while
(
1
)
{
if
((
slurmd_locks
.
entity
[
write_wait_lock
(
datatype
)]
==
0
)
&&
(
slurmd_locks
.
entity
[
write_lock
(
datatype
)]
==
0
))
{
slurmd_locks
.
entity
[
read_lock
(
datatype
)]
++
;
break
;
}
else
{
/* wait for state change and retry */
pthread_cond_wait
(
&
locks_cond
,
&
locks_mutex
);
}
}
pthread_mutex_unlock
(
&
locks_mutex
);
}
/* wr_rdunlock - Issue a read unlock on the specified data type */
void
wr_rdunlock
(
lock_datatype_t
datatype
)
{
pthread_mutex_lock
(
&
locks_mutex
);
slurmd_locks
.
entity
[
read_lock
(
datatype
)]
--
;
pthread_mutex_unlock
(
&
locks_mutex
);
pthread_cond_broadcast
(
&
locks_cond
);
}
/* wr_wrlock - Issue a write lock on the specified data type */
void
wr_wrlock
(
lock_datatype_t
datatype
)
{
pthread_mutex_lock
(
&
locks_mutex
);
slurmd_locks
.
entity
[
write_wait_lock
(
datatype
)]
++
;
while
(
1
)
{
if
((
slurmd_locks
.
entity
[
read_lock
(
datatype
)]
==
0
)
&&
(
slurmd_locks
.
entity
[
write_lock
(
datatype
)]
==
0
))
{
slurmd_locks
.
entity
[
write_lock
(
datatype
)]
++
;
slurmd_locks
.
entity
[
write_wait_lock
(
datatype
)]
--
;
break
;
}
else
{
/* wait for state change and retry */
pthread_cond_wait
(
&
locks_cond
,
&
locks_mutex
);
}
}
pthread_mutex_unlock
(
&
locks_mutex
);
}
/* wr_wrunlock - Issue a write unlock on the specified data type */
void
wr_wrunlock
(
lock_datatype_t
datatype
)
{
pthread_mutex_lock
(
&
locks_mutex
);
slurmd_locks
.
entity
[
write_lock
(
datatype
)]
--
;
pthread_mutex_unlock
(
&
locks_mutex
);
pthread_cond_broadcast
(
&
locks_cond
);
}
/* get_lock_values - Get the current value of all locks */
void
get_lock_values
(
slurmd_lock_flags_t
*
lock_flags
)
{
if
(
lock_flags
==
NULL
)
fatal
(
"get_lock_values passed null pointer"
);
memcpy
((
void
*
)
lock_flags
,
(
void
*
)
&
slurmd_locks
,
sizeof
(
slurmd_locks
)
);
}
This diff is collapsed.
Click to expand it.
src/slurmd/locks.h
0 → 100644
+
115
−
0
View file @
5a35fa26
/*****************************************************************************\
* locks.h - definitions for semaphore functions for slurmctld (locks.c)
*****************************************************************************
* Copyright (C) 2002 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Moe Jette <jette@llnl.gov>, Randy Sanchez <rsancez@llnl.gov>
* UCRL-CODE-2002-040.
*
* This file is part of SLURM, a resource management program.
* For details, see <http://www.llnl.gov/linux/slurm/>.
*
* SLURM is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along
* with SLURM; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
\*****************************************************************************/
/*****************************************************************************\
* Read/write locks are implemented by the routines in this directory by using
* a set of three (3) UNIX semaphores to lock a resource.
*
* The set of three (3) semaphores represent a reader semaphore,
* a writer semaphore and a writers waiting semaphore.
*
* The reader semaphore indicates the number of readers that currently have a
* read lock on the resource.
* The writers semaphore indicates that a writer has the resource locked.
* The writers waiting semaphore indicates the number of writers waiting to
* lock the resource.
*
* Readers cannot lock the resource until there are no writers waiting for the
* resource and the resource is not locked by a writer.
*
* Writers cannot lock the resource if the resource is locked by other writers
* or if any readers have the resource locked.
*
* Writers will have priority in locking the resource over readers because
* of the writers waiting semaphore. The writers waiting semaphore is incremented
* by a writer that is waiting to lock the resource. A reader cannot lock
* the resource until there are no writers waiting to lock the resource and
* the resource is not locked by a writer.
*
* So, if the resource is locked by an unspecified number of readers,
* and a writer trys to lock the resource, then the writer will be blocked
* until all of the previous readers have unlocked the resource. But,
* just before the writer checked to see if there were any readers locking
* the resource, the writer incremented the writers waiting semaphore,
* indicating that there is now a writer waiting to lock the resource.
* In the mean time, if an unspecified number of readers try to lock the
* resource after a writer (or writers) has tried to lock the resource,
* those readers will be blocked until all writers have obtained the lock on
* the resource, used the resource and unlocked the resource. The subsequent
* unspecified number of readers are blocked because they are waiting for the
* number of writers waiting semaphore to become 0, meaning that there are no
* writers waiting to lock the resource.
*
* use init_locks() to initialize the locks then
* lock_slurmctld() and unlock_slurmctld() to get the ordering so as to
* prevent deadlock. The arguments indicate the lock type required for
* each entity (job, node, etc.) in a well defined order.
* For example: no lock on the config data structure, read lock on the job
* and node data structures, and write lock on the partition data structure
* would look like this: "{ NO_LOCK, READ_LOCK, READ_LOCK, WRITE_LOCK }"
\*****************************************************************************/
/* levels of locking required for each data structure */
typedef
enum
{
NO_LOCK
,
READ_LOCK
,
WRITE_LOCK
}
lock_level_t
;
/* slurmctld specific data structures to lock via APIs */
typedef
struct
{
lock_level_t
jobs
;
lock_level_t
tasks
;
lock_level_t
credentials
;
}
slurmd_lock_t
;
/* Interval lock structure
* we actually use three semaphores for each data type, see macros below
* (lock_datatype_t * 3 + 0) = read_lock
* (lock_datatype_t * 3 + 1) = write_lock
* (lock_datatype_t * 3 + 2) = write_wait_lock
*/
typedef
enum
{
JOB_LIST_LOCK
,
TASK_LIST_LOCK
,
CREDENTIAL_STATE_LOCK
,
ENTITY_COUNT
}
lock_datatype_t
;
#define read_lock(data_type) (data_type * 3 + 0)
#define write_lock(data_type) (data_type * 3 + 1)
#define write_wait_lock(data_type) (data_type * 3 + 2)
typedef
struct
{
int
entity
[
ENTITY_COUNT
*
3
];
}
slurmd_lock_flags_t
;
extern
void
get_lock_values
(
slurmd_lock_flags_t
*
lock_flags
);
extern
void
init_locks
(
void
);
extern
void
lock_slurmd
(
slurmd_lock_t
lock_levels
);
extern
void
unlock_slurmd
(
slurmd_lock_t
lock_levels
);
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment