# # Macros for memory constraints on buildjobs. # If you happen to have HUGE packages sometimes you need to do # crazy magic in order to make sure it is most of the time building # rather than just giving you OOM # # m: memory limit in MBs per core; default is 1000 %limit_build(m:) \ _threads="`/usr/bin/getconf _NPROCESSORS_ONLN`" \ _core_memory="%{-m:%{-m*}}%{!-m:1000}" \ echo "Available memory:" \ cat /proc/meminfo \ echo "System limits:" \ ulimit -a \ echo "System jobs: $_threads" \ if test "$_threads" -gt 1 ; then \ mem_per_process="$_core_memory" \ max_physmem=$(awk '/MemTotal/ { print $2 }' /proc/meminfo) \ max_swapmem=$(awk '/SwapTotal/ { print $2 }' /proc/meminfo) \ max_jobs="$((($max_physmem + $max_swapmem) / ($mem_per_process * 1000)))" \ test "$_threads" -gt "$max_jobs" && _threads="$max_jobs" && echo "Warning: Reducing number of jobs to $max_jobs because of memory limits" \ test "$_threads" -le 0 && _threads=1 && echo "Warning: Do not use the parallel build at all because of memory limits" \ fi \ %global jobs $([[ -n $_threads ]] && echo $_threads || echo "`/usr/bin/getconf _NPROCESSORS_ONLN`") \ %global _smp_mflags $([[ -n $_threads ]] && echo "-j$_threads" || echo "-j`/usr/bin/getconf _NPROCESSORS_ONLN`") \ %{nil}