From 5481ea56f5842cdcb2bcff0fba129181575bd43e Mon Sep 17 00:00:00 2001 From: Moe Jette <jette1@llnl.gov> Date: Fri, 3 Feb 2006 19:38:12 +0000 Subject: [PATCH] Minor reformatting work for SIGOPS. --- doc/common/project.bib | 8 ++++---- doc/sigops/Makefile | 2 +- doc/sigops/bio.txt | 9 --------- doc/sigops/report.tex | 12 ++++++------ 4 files changed, 11 insertions(+), 20 deletions(-) delete mode 100644 doc/sigops/bio.txt diff --git a/doc/common/project.bib b/doc/common/project.bib index e8458a20fb1..f64a99aa9c5 100644 --- a/doc/common/project.bib +++ b/doc/common/project.bib @@ -78,10 +78,10 @@ AUTHOR = {D. Jackson and Q. Snell and M. Clement}, TITLE = {{Core Algorithms of the Maui Scheduler}}, BOOKTITLE = {Job Scheduling Stategies for Parallel Processing}, - PUBLISHER = {Springer-Verlag} + PUBLISHER = {Springer-Verlag}, VOLUME = {2221}, PAGES = {87-102}, - ADDRESS = {7th International Workshop, JSSP 2001, Cambridge, MA}, + ADDRESS = {{7th International Workshop, JSSP 2001, Cambridge, MA}}, YEAR = {2001}, } @@ -99,7 +99,7 @@ @CONFERENCE { Jones2003, - AUTHOR = {T. Jones and S. Dawson, and R. Neely, and W. Tuel and L. Brenner and J. Fier and R. Blackmore and P. Caffrey and B. Maskell and P. Tomlinson and M. Roberts}, + AUTHOR = {T. Jones and S. Dawson and R. Neely and W. Tuel and L. Brenner and J. Fier and R. Blackmore and P. Caffrey and B. Maskell and P. Tomlinson and M. Roberts}, TITLE = {{Improving scalability of parallel jobs by adding parallel awareness to the operating system}}, BOOKTITLE = {Proceedings of Supercomputing 2003}, ADDRESS = {{Phoenix, AZ}}, @@ -213,7 +213,7 @@ PUBLISHER = {Springer-Verlag}, VOLUME = {2862}, PAGES = {44-60}, - ADDRESS = {9th International Workshop, JSSP 2003, Seattle, WA}, + ADDRESS = {{9th International Workshop, JSSP 2003, Seattle, WA}}, YEAR = {2003}, } diff --git a/doc/sigops/Makefile b/doc/sigops/Makefile index 8a943dfeec7..a71d48628c8 100644 --- a/doc/sigops/Makefile +++ b/doc/sigops/Makefile @@ -49,5 +49,5 @@ view: $(REPORT).ps ghostview $(REPORT) & clean: - rm -f *~ *.dvi *.log *.aux $(REPORT).ps *.blg *.bbl #*.eps #*.gif *.ps + rm -f *~ *.dvi *.log *.aux *.ps *.pdf *.blg *.bbl #*.eps #*.gif diff --git a/doc/sigops/bio.txt b/doc/sigops/bio.txt deleted file mode 100644 index f8155e3cf66..00000000000 --- a/doc/sigops/bio.txt +++ /dev/null @@ -1,9 +0,0 @@ -Morris Jette is a computer scientist with the Integrated -Computational Resource Management Group at Lawrence Livermore -National Laboratory. His primary research interest computer -scheduling, from individual tasks and processors to distributed -applications running across a computational grid. - -Mark Grondona is a computer scientist with the Production -Linux Group at Lawrence Livermore National Laboratory, where -he works on cluster administration tools and resource management. diff --git a/doc/sigops/report.tex b/doc/sigops/report.tex index b930a91b1b0..553e3f57da6 100644 --- a/doc/sigops/report.tex +++ b/doc/sigops/report.tex @@ -128,7 +128,8 @@ workload prioritization. \item {\tt Open Source}: SLURM is available to everyone and will remain free. Its source code is distributed under the GNU General Public -License~\cite{GPL2002}. +License +~\cite{GPL2002}. \item {\tt Portability}: SLURM is written in the C language, with a GNU {\em autoconf} configuration engine. @@ -235,7 +236,7 @@ While allocation of entire nodes to jobs is still a recommended mode of operation for very large clusters, an alternate SLURM plugin provides resource management down the the resolution of individual processors. -The SLURM's {\tt srun} command and daemons are extensively +The SLURM's {\tt srun} command and the daemons are extensively multi-threaded. {\tt slurmctld} also maintains independent read and write locks for critical data structures. @@ -266,8 +267,8 @@ This improves performance by distributing the communication workload. Note that every communication is authenticated and acknowleged for fault-tolerance. -A number of interesting papers -~\cite{Jones2003,Kerbyson2001,Petrini2003,Phillips2003,Tsafrir2005} +A number of interesting papers +~\cite{Jones2003,Kerbyson2001,Petrini2003,Phillips2003,Tsafrir2005} have recently been written about the impact of system daemons and other system overhead on parallel job performance. This {\tt system noise} can have a @@ -287,7 +288,7 @@ SLURM addresses this issue by: \item Making the {\tt slurmd} daemon resource requirements negligible \item Supporting configurations that let the {\tt slurmd} daemon sleep during the entire job execution period -\item If the {\tt slurmd} daemons are given work, it is done on a +\item If the {\tt slurmd} daemons do perform work, it is done on a highly synchronized fashion across all nodes \end{itemize} In addition, the default mode of operation is to allocate entire @@ -299,7 +300,6 @@ Allocation of resources to the resolution of individual processors on each node is supported by SLURM, but this comes at a higher cost in terms of the data managed. The selection of resource resolution is provided by different plugins. -These factors permit SLURM to effectively run highly parallel jobs. Resource management of large clusters entails the processing of large quantities of data, both for the software and the -- GitLab