From e4e11a78648bf49dd7a8aa4b334f862bfe00f33b7538178e34d851537d3e423f Mon Sep 17 00:00:00 2001 From: Egbert Eich Date: Fri, 17 Feb 2017 12:37:31 +0000 Subject: [PATCH] Accepting request 458469 from home:jengelh:branches:network:cluster - Trim redundant parts of description. Fixup RPM groups. - Replace unnecessary %__ macro indirections; replace historic $RPM_* variables by macros. OBS-URL: https://build.opensuse.org/request/show/458469 OBS-URL: https://build.opensuse.org/package/show/network:cluster/slurm?expand=0&rev=16 --- slurm.changes | 7 +++ slurm.spec | 124 +++++++++++++++++++++++++------------------------- 2 files changed, 69 insertions(+), 62 deletions(-) diff --git a/slurm.changes b/slurm.changes index 66d2ea4..10d7660 100644 --- a/slurm.changes +++ b/slurm.changes @@ -1,3 +1,10 @@ +------------------------------------------------------------------- +Thu Feb 16 12:12:45 UTC 2017 - jengelh@inai.de + +- Trim redundant parts of description. Fixup RPM groups. +- Replace unnecessary %__ macro indirections; + replace historic $RPM_* variables by macros. + ------------------------------------------------------------------- Wed Feb 15 18:55:28 UTC 2017 - eich@suse.com diff --git a/slurm.spec b/slurm.spec index 52add43..c0800dd 100644 --- a/slurm.spec +++ b/slurm.spec @@ -109,18 +109,17 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-build Recommends: %{name}-munge %description -SLURM is an open source, fault-tolerant, and highly -scalable cluster management and job scheduling system for Linux clusters -containing up to 65,536 nodes. Components include machine status, -partition management, job management, scheduling and accounting modules. - +SLURM is a cluster management and job scheduling system for Linux +clusters containing up to 65,536 nodes. Components include machine +status, partition management, job management, scheduling and +accounting modules. %package doc Summary: Documentation for SLURM -Group: Documentation/Clustering/Computing +Group: Documentation/HTML %description doc -Documentation (html) for the SLURM cluster managment software +Documentation for the SLURM cluster managment software. %package -n perl-slurm Summary: Perl API to SLURM @@ -227,21 +226,22 @@ Torque wrapper scripts used for helping migrate from Torque/PBS to SLURM. %package openlava Summary: Openlava/LSF wrappers for transitition from OpenLava/LSF to Slurm -Group: Development/System +Group: Productivity/Clustering/Computing Requires: slurm-perlapi +%description openlava +OpenLava wrapper scripts used for helping migrate from OpenLava/LSF to Slurm + %package seff Summary: Mail tool that includes job statistics in user notification email -Group: Development/System +Group: Productivity/Clustering/Computing Requires: slurm-perlapi + %description seff Mail program used directly by the Slurm daemons. On completion of a job, wait for it''s accounting information to be available and include that information in the email body. -%description openlava -OpenLava wrapper scripts used for helping migrate from OpenLava/LSF to Slurm - %package slurmdb-direct Summary: Wrappers to write directly to the slurmdb @@ -284,7 +284,7 @@ or any user who has allocated resources on the node according to the SLURM %package lua Summary: Lua API for SLURM -Group: Development/Libraries/Other +Group: Development/Languages/Other Requires: slurm = %{version} BuildRequires: lua-devel @@ -328,14 +328,14 @@ ln -sf %{_initrddir}/slurm %{buildroot}%{_sbindir}/rcslurm ln -sf %{_initrddir}/slurmdbd %{buildroot}%{_sbindir}/rcslurmdbd %endif -install -D -m644 etc/slurm.conf.example $RPM_BUILD_ROOT%{_sysconfdir}/%{name}/slurm.conf%{?OHPC_BUILD:.example} -install -D -m644 etc/slurmdbd.conf.example $RPM_BUILD_ROOT%{_sysconfdir}/%{name}/slurmdbd.conf -install -D -m644 etc/cgroup.conf.example $RPM_BUILD_ROOT%{_sysconfdir}/%{name}/cgroup.conf -install -D -m644 etc/cgroup_allowed_devices_file.conf.example $RPM_BUILD_ROOT%{_sysconfdir}/%{name}/cgroup_allowed_devices_file.conf -install -D -m755 etc/cgroup.release_common.example $RPM_BUILD_ROOT%{_sysconfdir}/%{name}/cgroup/release_common.example -install -D -m755 etc/cgroup.release_common.example $RPM_BUILD_ROOT%{_sysconfdir}/%{name}/cgroup/release_freezer -install -D -m755 etc/cgroup.release_common.example $RPM_BUILD_ROOT%{_sysconfdir}/%{name}/cgroup/release_cpuset -install -D -m755 etc/cgroup.release_common.example $RPM_BUILD_ROOT%{_sysconfdir}/%{name}/cgroup/release_memory +install -D -m644 etc/slurm.conf.example %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf%{?OHPC_BUILD:.example} +install -D -m644 etc/slurmdbd.conf.example %{buildroot}/%{_sysconfdir}/%{name}/slurmdbd.conf +install -D -m644 etc/cgroup.conf.example %{buildroot}/%{_sysconfdir}/%{name}/cgroup.conf +install -D -m644 etc/cgroup_allowed_devices_file.conf.example %{buildroot}/%{_sysconfdir}/%{name}/cgroup_allowed_devices_file.conf +install -D -m755 etc/cgroup.release_common.example %{buildroot}/%{_sysconfdir}/%{name}/cgroup/release_common.example +install -D -m755 etc/cgroup.release_common.example %{buildroot}/%{_sysconfdir}/%{name}/cgroup/release_freezer +install -D -m755 etc/cgroup.release_common.example %{buildroot}/%{_sysconfdir}/%{name}/cgroup/release_cpuset +install -D -m755 etc/cgroup.release_common.example %{buildroot}/%{_sysconfdir}/%{name}/cgroup/release_memory install -D -m644 etc/slurmdbd.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/slurmdbd.conf.example install -D -m755 etc/slurm.epilog.clean ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/slurm.epilog.clean install -D -m755 contribs/sgather/sgather ${RPM_BUILD_ROOT}%{_bindir}/sgather @@ -343,67 +343,67 @@ install -D -m755 contribs/sjstat ${RPM_BUILD_ROOT}%{_bindir}/sjstat %if 0%{?OHPC_BUILD} # 6/16/15 karl.w.schulz@intel.com - do not package Slurm's version of libpmi with OpenHPC. -## rm -f $RPM_BUILD_ROOT/%%{_libdir}/libpmi* -## rm -f $RPM_BUILD_ROOT/%%{_libdir}/mpi_pmi2* +## rm -f %{buildroot}/%%{_libdir}/libpmi* +## rm -f %{buildroot}/%%{_libdir}/mpi_pmi2* # 9/8/14 karl.w.schulz@intel.com - provide starting config file -head -n -2 $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf.example | grep -v ReturnToService > $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf -sed -i 's#\(StateSaveLocation=\).*#\1%_localstatedir/lib/slurm#' $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf -echo "# OpenHPC default configuration" >> $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf -echo "PropagateResourceLimitsExcept=MEMLOCK" >> $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf -echo "SlurmdLogFile=/var/log/slurm.log" >> $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf -echo "SlurmctldLogFile=/var/log/slurmctld.log" >> $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf -echo "Epilog=/etc/slurm/slurm.epilog.clean" >> $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf -echo "NodeName=c[1-4] Sockets=2 CoresPerSocket=8 ThreadsPerCore=2 State=UNKNOWN" >> $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf -echo "PartitionName=normal Nodes=c[1-4] Default=YES MaxTime=24:00:00 State=UP" >> $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf +head -n -2 %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf.example | grep -v ReturnToService > %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf +sed -i 's#\(StateSaveLocation=\).*#\1%_localstatedir/lib/slurm#' %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf +echo "# OpenHPC default configuration" >> %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf +echo "PropagateResourceLimitsExcept=MEMLOCK" >> %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf +echo "SlurmdLogFile=/var/log/slurm.log" >> %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf +echo "SlurmctldLogFile=/var/log/slurmctld.log" >> %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf +echo "Epilog=/etc/slurm/slurm.epilog.clean" >> %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf +echo "NodeName=c[1-4] Sockets=2 CoresPerSocket=8 ThreadsPerCore=2 State=UNKNOWN" >> %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf +echo "PartitionName=normal Nodes=c[1-4] Default=YES MaxTime=24:00:00 State=UP" >> %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf # 6/3/16 nirmalasrjn@gmail.com - Adding ReturnToService Directive to starting config file (note removal of variable during above creation) -echo "ReturnToService=1" >> $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.conf +echo "ReturnToService=1" >> %{buildroot}/%{_sysconfdir}/%{name}/slurm.conf # 9/17/14 karl.w.schulz@intel.com - Add option to drop VM cache during epilog -sed -i '/^# No other SLURM jobs,/i \\n# Drop clean caches (OpenHPC)\necho 3 > /proc/sys/vm/drop_caches\n\n#' $RPM_BUILD_ROOT/%{_sysconfdir}/%{name}/slurm.epilog.clean -%{__mkdir_p} $RPM_BUILD_ROOT%_localstatedir/lib/slurm +sed -i '/^# No other SLURM jobs,/i \\n# Drop clean caches (OpenHPC)\necho 3 > /proc/sys/vm/drop_caches\n\n#' %{buildroot}/%{_sysconfdir}/%{name}/slurm.epilog.clean +mkdir -p %{buildroot}/%_localstatedir/lib/slurm %endif # Delete unpackaged files: -rm -rf $RPM_BUILD_ROOT/%{_libdir}/slurm/*.{a,la} \ - $RPM_BUILD_ROOT/%{_libdir}/*.la \ - $RPM_BUILD_ROOT/%_lib/security/*.la \ - $RPM_BUILD_ROOT/%{_mandir}/man5/bluegene* +rm -rf %{buildroot}/%{_libdir}/slurm/*.{a,la} \ + %{buildroot}/%{_libdir}/*.la \ + %{buildroot}/%_lib/security/*.la \ + %{buildroot}/%{_mandir}/man5/bluegene* -rm -f $RPM_BUILD_ROOT%{_mandir}/man1/srun_cr* \ - $RPM_BUILD_ROOT%{_bindir}/srun_cr \ - $RPM_BUILD_ROOT%{_libexecdir}/slurm/cr_* +rm -f %{buildroot}/%{_mandir}/man1/srun_cr* \ + %{buildroot}/%{_bindir}/srun_cr \ + %{buildroot}/%{_libexecdir}/slurm/cr_* # Delete unpackaged files: -test -s $RPM_BUILD_ROOT/%{_perldir}/auto/Slurm/Slurm.bs || -rm -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurm/Slurm.bs +test -s %{buildroot}/%{_perldir}/auto/Slurm/Slurm.bs || +rm -f %{buildroot}/%{_perldir}/auto/Slurm/Slurm.bs -test -s $RPM_BUILD_ROOT/%{_perldir}/auto/Slurmdb/Slurmdb.bs || -rm -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurmdb/Slurmdb.bs +test -s %{buildroot}/%{_perldir}/auto/Slurmdb/Slurmdb.bs || +rm -f %{buildroot}/%{_perldir}/auto/Slurmdb/Slurmdb.bs rm doc/html/shtml2html.py doc/html/Makefile* -%{__rm} -f %{buildroot}/%{perl_archlib}/perllocal.pod -%{__rm} -f %{buildroot}/%{perl_vendorarch}/auto/Slurm/.packlist -%{__rm} -f %{buildroot}/%{perl_vendorarch}/auto/Slurmdb/.packlist -%{__mv} %{buildroot}/%{perl_sitearch}/config.slurmdb.pl %{buildroot}/%{perl_vendorarch} +rm -f %{buildroot}/%{perl_archlib}/perllocal.pod +rm -f %{buildroot}/%{perl_vendorarch}/auto/Slurm/.packlist +rm -f %{buildroot}/%{perl_vendorarch}/auto/Slurmdb/.packlist +mv %{buildroot}/%{perl_sitearch}/config.slurmdb.pl %{buildroot}/%{perl_vendorarch} # Build man pages that are generated directly by the tools -rm -f $RPM_BUILD_ROOT/%{_mandir}/man1/sjobexitmod.1 -${RPM_BUILD_ROOT}%{_bindir}/sjobexitmod --roff > $RPM_BUILD_ROOT/%{_mandir}/man1/sjobexitmod.1 -rm -f $RPM_BUILD_ROOT/%{_mandir}/man1/sjstat.1 -${RPM_BUILD_ROOT}%{_bindir}/sjstat --roff > $RPM_BUILD_ROOT/%{_mandir}/man1/sjstat.1 +rm -f %{buildroot}/%{_mandir}/man1/sjobexitmod.1 +${RPM_BUILD_ROOT}%{_bindir}/sjobexitmod --roff > %{buildroot}/%{_mandir}/man1/sjobexitmod.1 +rm -f %{buildroot}/%{_mandir}/man1/sjstat.1 +${RPM_BUILD_ROOT}%{_bindir}/sjstat --roff > %{buildroot}/%{_mandir}/man1/sjstat.1 # rpmlint reports wrong end of line for those files -sed -i 's/\r$//' $RPM_BUILD_ROOT%{_bindir}/qrerun -sed -i 's/\r$//' $RPM_BUILD_ROOT%{_bindir}/qalter +sed -i 's/\r$//' %{buildroot}/%{_bindir}/qrerun +sed -i 's/\r$//' %{buildroot}/%{_bindir}/qalter -mkdir -p $RPM_BUILD_ROOT/etc/ld.so.conf.d +mkdir -p %{buildroot}/etc/ld.so.conf.d echo '%{_libdir} -%{_libdir}/slurm' > $RPM_BUILD_ROOT/etc/ld.so.conf.d/slurm.conf -chmod 644 $RPM_BUILD_ROOT/etc/ld.so.conf.d/slurm.conf +%{_libdir}/slurm' > %{buildroot}/etc/ld.so.conf.d/slurm.conf +chmod 644 %{buildroot}/etc/ld.so.conf.d/slurm.conf # Make pkg-config file -mkdir -p $RPM_BUILD_ROOT/%{_libdir}/pkgconfig -cat > $RPM_BUILD_ROOT/%{_libdir}/pkgconfig/slurm.pc < %{buildroot}/%{_libdir}/pkgconfig/slurm.pc <