Skip to content
Snippets Groups Projects
Commit b5231790 authored by Moe Jette's avatar Moe Jette
Browse files

Initial draft of read/write lock functions for slurmctld.

parent 854423de
No related branches found
No related tags found
No related merge requests found
......@@ -24,10 +24,12 @@ noinst_PROGRAMS = $(test_modules)
LDADD = $(top_srcdir)/src/common/libcommon.la
slurmctld_SOURCES = \
locks.h \
slurmctld.h \
controller.c \
job_mgr.c \
job_scheduler.c \
locks.c \
node_mgr.c \
node_scheduler.c\
partition_mgr.c \
......
/*****************************************************************************\
* locks.c - semaphore functions for slurmctld
*****************************************************************************
* Copyright (C) 2002 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Moe Jette <jette@llnl.gov>, Randy Sanchez <rsancez@llnl.gov>
* UCRL-CODE-2002-040.
*
* This file is part of SLURM, a resource management program.
* For details, see <http://www.llnl.gov/linux/slurm/>.
*
* SLURM is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along
* with SLURM; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
\*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include <errno.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/sem.h>
#include <src/slurmctld/locks.h>
#include <src/slurmctld/slurmctld.h>
/* available data structure locks
* we actually use three semaphores for each, see macros below
* (lock_datatype_t * 3 + 0) = read_lock
* (lock_datatype_t * 3 + 1) = write_lock
* (lock_datatype_t * 3 + 2) = write_wait_lock
*/
typedef enum {
CONFIG_LOCK,
JOB_LOCK,
NODE_LOCK,
PART_LOCK,
COUNT_OF_LOCKS
} lock_datatype_t;
#define read_lock(lock_type) (lock_type * 3 + 0)
#define write_lock(lock_type) (lock_type * 3 + 1)
#define write_wait_lock(lock_type) (lock_type * 3 + 2)
int sem_id = -1; /* semaphore ID */
void wr_rdlock (lock_datatype_t datatype);
void wr_rdunlock (lock_datatype_t datatype);
void wr_wrlock (lock_datatype_t datatype);
void wr_wrunlock (lock_datatype_t datatype);
/* init_locks - create locks used for slurmctld data structure access control */
void
init_locks ( )
{
if (sem_id == -1)
sem_id = semget ( IPC_PRIVATE, (COUNT_OF_LOCKS * 3), IPC_CREAT );
if (sem_id < 0)
fatal ("semget errno %d", errno);
}
/* lock_slurmctld - Issue the required lock requests in a well defined order
* Returns 0 on success, -1 on failure */
void
lock_slurmctld (slurmctld_lock_t lock_levels)
{
if (sem_id < 0) {
error ("init_locks was not called before lock use");
init_locks ();
}
if (lock_levels.config == READ_LOCK)
wr_rdlock (CONFIG_LOCK);
else if (lock_levels.config == WRITE_LOCK)
wr_wrlock (CONFIG_LOCK);
if (lock_levels.job == READ_LOCK)
wr_rdlock (JOB_LOCK);
else if (lock_levels.job == WRITE_LOCK)
wr_wrlock (JOB_LOCK);
if (lock_levels.node == READ_LOCK)
wr_rdlock (NODE_LOCK);
else if (lock_levels.node == WRITE_LOCK)
wr_wrlock (NODE_LOCK);
if (lock_levels.partition == READ_LOCK)
wr_rdlock (PART_LOCK);
else if (lock_levels.partition == WRITE_LOCK)
wr_wrlock (PART_LOCK);
}
/* Issue the required unlock requests in a well defined order */
void
unlock_slurmctld (slurmctld_lock_t lock_levels)
{
if (lock_levels.partition == READ_LOCK)
wr_rdunlock (PART_LOCK);
else if (lock_levels.partition == WRITE_LOCK)
wr_wrunlock (PART_LOCK);
if (lock_levels.node == READ_LOCK)
wr_rdunlock (NODE_LOCK);
else if (lock_levels.node == WRITE_LOCK)
wr_wrunlock (NODE_LOCK);
if (lock_levels.job == READ_LOCK)
wr_rdunlock (JOB_LOCK);
else if (lock_levels.job == WRITE_LOCK)
wr_wrunlock (JOB_LOCK);
if (lock_levels.config == READ_LOCK)
wr_rdunlock (CONFIG_LOCK);
else if (lock_levels.config == WRITE_LOCK)
wr_wrunlock (CONFIG_LOCK);
}
void
wr_rdlock (lock_datatype_t datatype)
{
struct sembuf rdlock_op[] = {
{ 0, 0, 0 }, /* write-wait count must be zero */
{ 0, 0, 0 }, /* write count must be zero */
{ 0, +1, SEM_UNDO } /* increment read count */
} ;
rdlock_op[0] . sem_num = write_wait_lock (datatype);
rdlock_op[1] . sem_num = write_lock (datatype);
rdlock_op[2] . sem_num = read_lock (datatype);
if (semop (sem_id, rdlock_op, 3) == -1)
fatal ("semop errno %d", errno);
}
void
wr_rdunlock (lock_datatype_t datatype)
{
struct sembuf rdunlock_op[] = {
{ 0, -1, SEM_UNDO } /* decrement read count */
} ;
rdunlock_op[0] . sem_num = read_lock (datatype);
if (semop (sem_id, rdunlock_op, 3) == -1)
fatal ("semop errno %d", errno);
}
void
wr_wrlock (lock_datatype_t datatype)
{
struct sembuf waitlock_op[] = {
{ 0, +1, SEM_UNDO } /* increment write-wait count */
} ;
struct sembuf wrlock_op[] = {
{ 0, 0, 0 }, /* read count must be zero */
{ 0, 0, 0 }, /* write count must be zero */
{ 0, -1, SEM_UNDO }, /* decrement write-wait count */
{ 0, +1, SEM_UNDO } /* increment write count */
} ;
waitlock_op[0] . sem_num = write_wait_lock (datatype);
wrlock_op[0] . sem_num = read_lock (datatype);
wrlock_op[1] . sem_num = write_lock (datatype);
wrlock_op[2] . sem_num = write_wait_lock (datatype);
wrlock_op[3] . sem_num = write_lock (datatype);
if (semop (sem_id, waitlock_op, 3) == -1)
fatal ("semop errno %d", errno);
if (semop (sem_id, wrlock_op, 3) == -1)
fatal ("semop errno %d", errno);
}
void
wr_wrunlock (lock_datatype_t datatype)
{
struct sembuf wrunlock_op[] = {
{ 0, -1, SEM_UNDO } /* decrement write count */
} ;
wrunlock_op[0] . sem_num = write_lock (datatype);
if (semop (sem_id, wrunlock_op, 1) == -1)
fatal ("semop errno %d", errno);
}
/*****************************************************************************\
* locks.h - definitions for semaphore functions for slurmctld (locks.c)
*****************************************************************************
* Copyright (C) 2002 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Moe Jette <jette@llnl.gov>, Randy Sanchez <rsancez@llnl.gov>
* UCRL-CODE-2002-040.
*
* This file is part of SLURM, a resource management program.
* For details, see <http://www.llnl.gov/linux/slurm/>.
*
* SLURM is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along
* with SLURM; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
\*****************************************************************************/
/*****************************************************************************\
* Read/write locks implemented using UNIX semaphores.
*
* Read/write locks are implemented by the routines in this directory by using
* a set of three (3) UNIX semaphores to lock a resource.
*
* The set of three (3) semaphores represent a reader semaphore,
* a writer semaphore and a writers waiting semaphore.
*
* The reader semaphore indicates the number of readers that currently have a
* read lock on the resource.
* The writers semaphore indicates that a writer has the resource locked.
* The writers waiting semaphore indicates the number of writers waiting to
* lock the resource.
*
* Readers cannot lock the resource until there are no writers waiting for the
* resource and the resource is not locked by a writer.
*
* Writers cannot lock the resource if the resource is locked by other writers
* or if any readers have the resource locked.
*
* Writers will have priority in locking the resource over readers because
* of the writers waiting semaphore. The writers waiting semaphore is incremented
* by a writer that is waiting to lock the resource. A reader cannot lock
* the resource until there are no writers waiting to lock the resource and
* the resource is not locked by a writer.
*
* So, if the resource is locked by an unspecified number of readers,
* and a writer trys to lock the resource, then the writer will be blocked
* until all of the previous readers have unlocked the resource. But,
* just before the writer checked to see if there were any readers locking
* the resource, the writer incremented the writers waiting semaphore,
* indicating that there is now a writer waiting to lock the resource.
* In the mean time, if an unspecified number of readers try to lock the
* resource after a writer (or writers) has tried to lock the resource,
* those readers will be blocked until all writers have obtained the lock on
* the resource, used the resource and unlocked the resource. The subsequent
* unspecified number of readers are blocked because they are waiting for the
* number of writers waiting semaphore to become 0, meaning that there are no
* writers waiting to lock the resource.
\*****************************************************************************/
/* levels of locking required for each data structure */
typedef enum {
NO_LOCK,
READ_LOCK,
WRITE_LOCK
} lock_level_t;
/* slurmctld specific data structures to lock */
typedef struct {
lock_level_t config;
lock_level_t job;
lock_level_t node;
lock_level_t partition;
} slurmctld_lock_t;
extern void init_locks ( );
extern void lock_slurmctld (slurmctld_lock_t lock_levels);
extern void unlock_slurmctld (slurmctld_lock_t lock_levels);
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment