Merge pull request #1814 from coolo/refactor_pkglistgen

pkglistgen: Big refactoring of how packages are resolved
This commit is contained in:
Ludwig Nussel 2019-01-17 15:53:29 +01:00 committed by GitHub
commit f8b4b43745
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1741 additions and 2178 deletions

View File

@ -71,7 +71,7 @@ matrix:
# Needs python prefix to use the correct interpretor. # Needs python prefix to use the correct interpretor.
- python ./obs_clone.py --cache --debug --apiurl-target local - python ./obs_clone.py --cache --debug --apiurl-target local
script: script:
- nosetests --with-coverage --cover-package=. --cover-inclusive --exclude-dir=./oqamaint -c .noserc - nosetests --with-coverage --cover-package=. --cover-inclusive --exclude-dir=./oqamaint --exclude-dir=./pkglistgen -c .noserc
after_success: after_success:
- coveralls - coveralls
- env: TEST_SUITE=nosetests-osc-python3 - env: TEST_SUITE=nosetests-osc-python3
@ -99,7 +99,7 @@ matrix:
# Needs python prefix to use the correct interpretor. # Needs python prefix to use the correct interpretor.
- python ./obs_clone.py --cache --debug --apiurl-target local - python ./obs_clone.py --cache --debug --apiurl-target local
script: script:
- nosetests --with-coverage --cover-package=. --cover-inclusive --exclude-dir=./oqamaint -c .noserc - nosetests --with-coverage --cover-package=. --cover-inclusive --exclude-dir=./oqamaint --exclude-dir=./pkglistgen -c .noserc
after_success: after_success:
- coveralls - coveralls
- env: TEST_SUITE=nosetests-osc-python3 - env: TEST_SUITE=nosetests-osc-python3

View File

@ -1,40 +0,0 @@
## common part
#ifdef __x86_64__
system x86_64 rpm
#endif
#ifdef __ppc64le__
system ppc64le rpm
#endif
namespace namespace:language(en_US) @SYSTEM
job install provides pattern() = enhanced_base
job install provides pattern() = sw_management
job install provides pattern() = x11
job install provides pattern() = yast2_basis
job install name aaa_base-extras
job install name branding-openSUSE
job install name openSUSE-release-dvd
job install name sudo
job install name xdelta
job install name screen
job install name alsa-utils
job install name yast2-nfs-client
job lock name bash-completion
job lock name bash-doc
job lock name cups
job lock name ghostscript
job lock name graphviz
job lock name gtk2-branding-upstream
job lock name icewm-default
job lock name kernel-xen
job lock name nss-mdns-32bit
job lock name patterns-openSUSE-enhanced_base_opt
job lock name patterns-openSUSE-fonts_opt
job lock name patterns-openSUSE-x11_opt
job lock name readline-doc
job lock name SuSEfirewall2
job lock name vim-data
job lock name libgcc_s1-32bit
job lock name gettext-runtime-mini

View File

@ -1,59 +0,0 @@
## common part
#ifdef __x86_64__
system x86_64 rpm
#endif
#ifdef __ppc64le__
system ppc64le rpm
#endif
namespace namespace:language(en_US) @SYSTEM
job install provides pattern() = apparmor
job install provides pattern() = base
job install provides pattern() = enhanced_base
job install provides pattern() = gnome
job install provides pattern() = gnome_internet
job install provides pattern() = gnome_multimedia
job install provides pattern() = gnome_utilities
job install provides pattern() = imaging
job install provides pattern() = kde
job install provides pattern() = kde_plasma
job install provides pattern() = kde_multimedia
job install provides pattern() = kde_utilities
job install provides pattern() = multimedia
job install provides pattern() = minimal_base
job install provides pattern() = rest_core_dvd
job install name aaa_base-extras
job install name branding-openSUSE
job install name openSUSE-release-dvd
job install name sudo
job install name xdelta
job install name kate
job install name sddm
job lock name gtk2-branding-upstream
job lock name gdm-branding-upstream
job lock name kdebase4-workspace-branding-upstream
job lock name kdm-branding-upstream
job lock name kdebase4-runtime-branding-upstream
job lock name kdelibs4-branding-upstream
job lock name sddm-branding-upstream
job lock name plasma5-desktop-branding-upstream
job lock name plasma5-workspace-branding-upstream
job lock name readline-doc
#ifdef __x86_64__
job install provides pattern() = laptop
job install provides pattern() = office
job install name MozillaThunderbird
job install name libreoffice
job lock name libgcc_s1-32bit
job install provides virtualbox-guest-kmp
#endif
job lock name gtk3-branding-upstream
job lock name openvpn
job lock name systemtap
job lock name esound-daemon
job lock name gettext-runtime-mini

View File

@ -1,102 +0,0 @@
## common part
#ifdef __x86_64__
system x86_64 rpm
#endif
#ifdef __ppc64le__
system ppc64le rpm
#endif
namespace namespace:language(en_US) @SYSTEM
job install provides pattern() = apparmor
job install provides pattern() = base
job install provides pattern() = console
job install provides pattern() = enhanced_base
job install provides pattern() = gnome
job install provides pattern() = gnome_games
job install provides pattern() = gnome_internet
job install provides pattern() = gnome_multimedia
job install provides pattern() = gnome_office
job install provides pattern() = gnome_utilities
job install provides pattern() = gnome_yast
job install provides pattern() = imaging
job install provides pattern() = kde
job install provides pattern() = kde_games
job install provides pattern() = kde_imaging
job install provides pattern() = kde_multimedia
job install provides pattern() = kde_office
job install provides pattern() = kde_plasma
job install provides pattern() = kde_utilities
job install provides pattern() = kde_yast
job install provides pattern() = multimedia
job install provides pattern() = minimal_base
job install provides pattern() = sw_management
job install provides pattern() = sw_management_gnome
job install provides pattern() = yast2_basis
job install provides pattern() = yast2_install_wf
job install provides pattern() = x86
job install provides pattern() = rest_core_dvd
job install name aaa_base-extras
job install name amarok
job install name branding-openSUSE
job install name openSUSE-release-livecd-x11
job install name sudo
job install name xdelta3
job install name kate
job install name inkscape
job install name sddm
job install name gnome-music
job install name kexec-tools
job install name firewalld
job install name xfsprogs
job lock name gtk2-branding-upstream
job lock name gdm-branding-upstream
job lock name kdebase4-workspace-branding-upstream
job lock name kdm-branding-upstream
job lock name kdebase4-runtime-branding-upstream
job lock name sddm-branding-upstream
job lock name plasma5-desktop-branding-upstream
job lock name plasma5-workspace-branding-upstream
job lock name readline-doc
#ifdef __x86_64__
job install provides pattern() = laptop
job install provides pattern() = office
job install name MozillaThunderbird
job install name libreoffice
job install name mokutil
job lock name libgcc_s1-32bit
job install provides virtualbox-guest-kmp
#endif
job lock name gtk3-branding-upstream
job lock name openvpn
job lock name systemtap
job lock name esound-daemon
job lock name gettext-runtime-mini
job lock name udev-mini
job lock name libudev1-mini
## for testing docker
job install name docker
## for textmode test
job install name yast2-nfs-server
job install name apache2
job install name php7
job install name apache2-mod_php7
job install name php7-mysql
job install name php7-pgsql
job install name postgresql96-server
job install name bind
## required for ppc64 test
job install name mariadb
## we newly want to start glxgears in Staging tests (sync up with SLE)
job install name Mesa-demo-x
## required in Staging tests
job install name salt-master
job install name salt-minion

View File

@ -1,47 +0,0 @@
## common part
#ifdef __x86_64__
system x86_64 rpm
#endif
#ifdef __ppc64le__
system ppc64le rpm
#endif
namespace namespace:language(en_US) @SYSTEM
job install provides pattern() = enhanced_base
job install provides pattern() = sw_management
job install provides pattern() = x11
job install provides pattern() = yast2_basis
job install name aaa_base-extras
job install name branding-openSUSE
job install name openSUSE-release-dvd
job install name sudo
job install name xdelta
job install name screen
job install name alsa-utils
job install name yast2-nfs-client
job install name kernel-default
job install name kexec-tools
#ifdef __x86_64__
job install name mokutil
#endif
job lock name bash-completion
job lock name bash-doc
job lock name cups
job lock name ghostscript
job lock name graphviz
job lock name gtk2-branding-upstream
job lock name kernel-xen
job lock name nss-mdns-32bit
job lock name patterns-openSUSE-enhanced_base_opt
job lock name patterns-openSUSE-fonts_opt
job lock name patterns-openSUSE-x11_opt
job lock name readline-doc
job lock name SuSEfirewall2
job lock name vim-data
job lock name libgcc_s1-32bit
job lock name gettext-runtime-mini
job lock name udev-mini
job lock name libudev1-mini

View File

@ -1,63 +0,0 @@
## common part
#ifdef __x86_64__
system x86_64 rpm
#endif
#ifdef __ppc64le__
system ppc64le rpm
#endif
namespace namespace:language(en_US) @SYSTEM
job install provides pattern() = apparmor
job install provides pattern() = base
job install provides pattern() = enhanced_base
job install provides pattern() = gnome
job install provides pattern() = gnome_internet
job install provides pattern() = gnome_multimedia
job install provides pattern() = gnome_utilities
job install provides pattern() = imaging
job install provides pattern() = kde
job install provides pattern() = kde_plasma
job install provides pattern() = kde_multimedia
job install provides pattern() = kde_utilities
job install provides pattern() = multimedia
job install provides pattern() = minimal_base
job install provides pattern() = rest_core_dvd
job install name aaa_base-extras
job install name branding-openSUSE
job install name openSUSE-release-dvd
job install name sudo
job install name xdelta
job install name kate
job install name sddm
job install name kernel-default
job install name kexec-tools
job lock name gtk2-branding-upstream
job lock name gdm-branding-upstream
job lock name kdebase4-workspace-branding-upstream
job lock name kdm-branding-upstream
job lock name kdebase4-runtime-branding-upstream
job lock name kdelibs4-branding-upstream
job lock name sddm-branding-upstream
job lock name plasma5-desktop-branding-upstream
job lock name plasma5-workspace-branding-upstream
job lock name readline-doc
#ifdef __x86_64__
job install provides pattern() = laptop
job install provides pattern() = office
job install name MozillaThunderbird
job install name libreoffice
job install name mokutil
job lock name libgcc_s1-32bit
job install provides virtualbox-guest-kmp
#endif
job lock name gtk3-branding-upstream
job lock name openvpn
job lock name systemtap
job lock name esound-daemon
job lock name gettext-runtime-mini
job lock name udev-mini
job lock name libudev1-mini

View File

@ -1,51 +0,0 @@
## common part
#ifdef __x86_64__
system x86_64 rpm
#endif
#ifdef __ppc64le__
system ppc64le rpm
#endif
namespace namespace:language(en_US) @SYSTEM
job install provides pattern() = enhanced_base
job install provides pattern() = sw_management
job install provides pattern() = x11
job install provides pattern() = yast2_basis
job install name aaa_base-extras
job install name branding-openSUSE
job install name openSUSE-release-dvd
job install name sudo
job install name xdelta3
job install name screen
job install name alsa-utils
job install name yast2-nfs-client
job install name kernel-default
job install name kexec-tools
#ifdef __x86_64__
job install name mokutil
#endif
job lock name bash-completion
job lock name bash-doc
job lock name cups
job lock name ghostscript
job lock name graphviz
job lock name gtk2-branding-upstream
job lock name kernel-xen
job lock name nss-mdns-32bit
job lock name patterns-openSUSE-enhanced_base_opt
job lock name patterns-openSUSE-fonts_opt
job lock name patterns-openSUSE-x11_opt
job lock name readline-doc
job lock name SuSEfirewall2
job lock name vim-data
job lock name libgcc_s1-32bit
job lock name gettext-runtime-mini
job lock name udev-mini
job lock name libudev1-mini
// this one has no dependency on X but provides dbus-1-x11 for
// legacy reasons. It would break X session so block it for now
job lock name dbus-1-nox11

View File

@ -1,78 +0,0 @@
## common part
#ifdef __x86_64__
system x86_64 rpm
#endif
#ifdef __ppc64le__
system ppc64le rpm
#endif
namespace namespace:language(en_US) @SYSTEM
job install provides pattern() = apparmor
job install provides pattern() = base
job install provides pattern() = enhanced_base
job install provides pattern() = gnome
job install provides pattern() = gnome_internet
job install provides pattern() = gnome_multimedia
job install provides pattern() = gnome_utilities
job install provides pattern() = imaging
job install provides pattern() = kde
job install provides pattern() = kde_plasma
job install provides pattern() = kde_multimedia
job install provides pattern() = kde_utilities
job install provides pattern() = multimedia
job install provides pattern() = minimal_base
job install provides pattern() = generic_server
job install provides pattern() = rest_core_dvd
job install name aaa_base-extras
job install name branding-openSUSE
job install name openSUSE-release-dvd
job install name sudo
job install name xdelta3
job install name kate
job install name sddm
job install name kernel-default
job install name kexec-tools
job lock name gtk2-branding-upstream
job lock name gdm-branding-upstream
job lock name kdebase4-workspace-branding-upstream
job lock name kdm-branding-upstream
job lock name kdebase4-runtime-branding-upstream
job lock name kdelibs4-branding-upstream
job lock name sddm-branding-upstream
job lock name plasma5-desktop-branding-upstream
job lock name plasma5-workspace-branding-upstream
job lock name readline-doc
#ifdef __x86_64__
job install provides pattern() = laptop
job install provides pattern() = office
job install name MozillaThunderbird
job install name libreoffice
job install name mokutil
job lock name libgcc_s1-32bit
job install provides virtualbox-guest-kmp
#endif
job lock name gtk3-branding-upstream
job lock name openvpn
job lock name systemtap
job lock name esound-daemon
job lock name gettext-runtime-mini
job lock name udev-mini
job lock name libudev1-mini
// this one has no dependency on X but provides dbus-1-x11 for
// legacy reasons. It would break X session so block it for now
job lock name dbus-1-nox11
## for textmode test
job install name yast2-nfs-server
job install name apache2
job install name php5
job install name apache2-mod_php5
job install name php5-mysql
job install name php5-pgsql
job install name postgresql94-server
job install name bind

View File

@ -1,197 +0,0 @@
#! /bin/bash
set -e
shopt -s nullglob
if ! test -d co; then
echo "you need to call this in a directory with a co directory containting osc checkouts with the staging prjs"
exit 1
fi
dryrun=
# give it target Factory by default then will not breaks current operation
if [ $# -eq 0 ]; then
targets='Factory'
arch='x86_64'
has_ring_0='yes'
has_ring_1='yes'
has_ring_2='yes'
has_staging='yes'
else
for arg in $@;do
if [ "$arg" = "x86_64" -o "$arg" = "ppc64le" ]; then
arch="$arg"
elif [ "$arg" = "has_ring_all" ]; then
has_ring_0='yes'
has_ring_1='yes'
has_ring_2='yes'
elif [ "$arg" = "has_ring_0" ]; then
has_ring_0='yes'
elif [ "$arg" = "has_ring_1" ]; then
has_ring_0='yes'
has_ring_1='yes'
elif [ "$arg" = "has_ring_2" ]; then
has_ring_0='yes'
has_ring_1='yes'
has_ring_2='yes'
elif [ "$arg" = "has_staging" ]; then
has_staging='yes'
elif [ "$arg" = "dryrun" ]; then
dryrun='yes'
else
targets+="$arg"
fi
done
fi
CODIR=$PWD
SCRIPTDIR=`dirname "$0"`
function regenerate_pl() {
prj=$1
shift;
target=$1
shift;
suffix=$1
shift;
arch=${@: -1}
tcfile=tc.$target.$suffix.$1
: > $tcfile
for i in "$@"; do
if [ "$i" != "$arch" ];then
echo "repo $i 0 solv $i.solv" >> $tcfile
fi
done
cpp -E -U__ppc64__ -U__x86_64__ -D__$arch\__ $SCRIPTDIR/create_test_$target\_dvd-$suffix.testcase >> $tcfile
out=$(mktemp)
testsolv -r $tcfile > $out
ERRPKG=""
if grep ^problem $out ; then
# invalidate the kiwi file - ensuring it is not being built while we can't calculate it
ERRPKG="CREATE_TEST_DVD_PROBLEM"
fi
sed -i -e 's,^install \(.*\)-[^-]*-[^-]*\.[^-\.]*@.*,\1,' $out
p=$(mktemp)
tdir=$CODIR/co/$prj/Test-DVD-$arch
if [ ! -d "$tdir" ]; then
mkdir -p "$tdir"
osc co -o "$tdir" "$prj" Test-DVD-$arch
fi
pushd $tdir > /dev/null
osc up
popd > /dev/null
sed -n -e '1,/BEGIN-PACKAGELIST/p' $tdir/PRODUCT-$arch.kiwi > $p
for i in $(cat $out) $ERRPKG; do
echo "<repopackage name='$i'/>" >> $p
done
sed -n -e '/END-PACKAGELIST/,$p' $tdir/PRODUCT-$arch.kiwi >> $p
xmllint --format $p -o $tdir/PRODUCT-$arch.kiwi
rm $p $out
pushd $tdir > /dev/null
if ! cmp -s .osc/PRODUCT-$arch.kiwi PRODUCT-$arch.kiwi; then
if [ "$dryrun" = 'yes' ]; then
diff -u .osc/PRODUCT-$arch.kiwi PRODUCT-$arch.kiwi || :
else
osc ci -m "auto update"
fi
fi
popd > /dev/null
}
function sync_prj() {
prj=$1
dir=$2
arch=$3
mkdir -p $dir
perl $SCRIPTDIR/bs_mirrorfull --nodebug https://api.opensuse.org/public/build/$prj/$arch $dir
if [ "$dir" -nt "$dir.solv" ]; then
rpms=($dir/*.rpm)
if [ "${#rpms[@]}" -gt 0 ]; then
local start=$SECONDS
rpms2solv "${rpms[@]}" > $dir.solv
echo "creating ${dir}.solv took $((SECONDS-$start))s"
else
echo "cannot find any rpm file in ${dir}"
return
fi
fi
}
function start_creating() {
for target in "$targets"; do
echo "Start checking $target $arch"
# Rings part
if [ "$has_ring_0" = "yes" ]; then
sync_prj openSUSE:$target:Rings:0-Bootstrap/standard/ $target-bootstrap-$arch $arch
fi
if [ "$has_ring_1" = "yes" ]; then
sync_prj openSUSE:$target:Rings:1-MinimalX/standard $target-minimalx-$arch $arch
regenerate_pl openSUSE:$target:Rings:1-MinimalX $target 1 $target-bootstrap-$arch $target-minimalx-$arch $arch
fi
if [ "$has_ring_2" = "yes" ]; then
sync_prj openSUSE:$target:Rings:2-TestDVD/standard $target-testdvd-$arch $arch
regenerate_pl openSUSE:$target:Rings:2-TestDVD $target 2 $target-bootstrap-$arch $target-minimalx-$arch $target-testdvd-$arch $arch
if [ "$dryrun" != 'yes' ]; then
perl $SCRIPTDIR/rebuildpacs.pl openSUSE:$target:Rings:2-TestDVD standard $arch
fi
fi
# Staging Project part
if [ "$has_staging" = "yes" ]; then
projects=$(osc api "/search/project/id?match=starts-with(@name,\"openSUSE:$target:Staging\")" | grep name | cut -d\' -f2)
for prj in $projects; do
l=$(echo $prj | sed 's/^openSUSE.\+[:]Staging/Staging/g' | cut -d: -f2)
if [[ $prj =~ ^openSUSE.+:[A-Z]$ ]] || [[ $prj =~ ^openSUSE.+:[A-Z]:DVD$ ]]; then
# if the testdvd build is disabled, do not regenerate the pacakges list and go to next staging project
testdvd_disabled=$(osc api "/build/openSUSE:$target:Staging:$l/_result?view=summary&package=Test-DVD-$arch&repository=images" | grep 'statuscount code="disabled"' || true)
if [ -n "$testdvd_disabled" ]; then
echo "Skips openSUSE:$target:Staging:$l due to the testdvd build is disabled"
continue
fi
fi
if [[ $prj =~ ^openSUSE.+:[A-Z]$ ]] || [[ $prj =~ ^openSUSE.+:Gcc[0-9]$ ]]; then
echo "Checking $target:$l-$arch"
meta=$(mktemp)
use_bc="staging_$target:$l-bc-$arch"
osc meta prj $prj > $meta
if grep -q 0-Bootstrap $meta ; then
use_bc=
fi
if [ -n "$use_bc" ]; then
sync_prj openSUSE:$target:Staging:$l/bootstrap_copy "staging_$target:$l-bc-$arch" $arch
fi
sync_prj openSUSE:$target:Staging:$l/standard staging_$target:$l-$arch $arch
regenerate_pl "openSUSE:$target:Staging:$l" $target 1 $use_bc staging_$target:$l-$arch $arch
rm $meta
fi
if [[ $prj =~ :DVD ]]; then
echo "Rebuildpacs $prj"
if [ "$dryrun" != 'yes' ]; then
perl $SCRIPTDIR/rebuildpacs.pl $prj standard $arch
fi
fi
if [[ $prj =~ ^openSUSE.+:[A-Z]:DVD$ ]]; then
echo "Checking $target:$l:DVD-$arch"
sync_prj openSUSE:$target:Staging:$l:DVD/standard "staging_$target:$l-dvd-$arch" $arch
regenerate_pl "openSUSE:$target:Staging:$l:DVD" $target 2 $use_bc staging_$target:$l-$arch "staging_$target:$l-dvd-$arch" $arch
fi
done
fi
done
}
# call main function
start_creating $targets $arch

View File

@ -441,7 +441,6 @@ exit 0
%{_bindir}/osrt-check_source_in_factory %{_bindir}/osrt-check_source_in_factory
%{_bindir}/osrt-check_tags_in_requests %{_bindir}/osrt-check_tags_in_requests
%{_bindir}/osrt-compare_pkglist %{_bindir}/osrt-compare_pkglist
%{_bindir}/osrt-create_test_dvds
%{_bindir}/osrt-deptool %{_bindir}/osrt-deptool
%{_bindir}/osrt-fcc_submitter %{_bindir}/osrt-fcc_submitter
%{_bindir}/osrt-findfileconflicts %{_bindir}/osrt-findfileconflicts

View File

@ -24,7 +24,6 @@ DEFAULT = {
'staging-group': 'factory-staging', 'staging-group': 'factory-staging',
'staging-archs': 'i586 x86_64', 'staging-archs': 'i586 x86_64',
'staging-dvd-archs': '', 'staging-dvd-archs': '',
'nocleanup-packages': 'Test-DVD-x86_64 Test-DVD-ppc64le bootstrap-copy',
'rings': 'openSUSE:%(project)s:Rings', 'rings': 'openSUSE:%(project)s:Rings',
'nonfree': 'openSUSE:%(project)s:NonFree', 'nonfree': 'openSUSE:%(project)s:NonFree',
'rebuild': 'openSUSE:%(project)s:Rebuild', 'rebuild': 'openSUSE:%(project)s:Rebuild',
@ -45,6 +44,7 @@ DEFAULT = {
'repo_checker-no-filter': 'True', 'repo_checker-no-filter': 'True',
'repo_checker-package-comment-devel': 'True', 'repo_checker-package-comment-devel': 'True',
'pkglistgen-product-family-include': 'openSUSE:Leap:N', 'pkglistgen-product-family-include': 'openSUSE:Leap:N',
'pkglistgen-locales-from': 'openSUSE.product.in',
'mail-list': 'opensuse-factory@opensuse.org', 'mail-list': 'opensuse-factory@opensuse.org',
'mail-maintainer': 'Dominique Leuenberger <dimstar@suse.de>', 'mail-maintainer': 'Dominique Leuenberger <dimstar@suse.de>',
'mail-noreply': 'noreply@opensuse.org', 'mail-noreply': 'noreply@opensuse.org',
@ -100,7 +100,6 @@ DEFAULT = {
'pkglistgen-archs': 'x86_64', 'pkglistgen-archs': 'x86_64',
'pkglistgen-scopes': 'target rings staging', 'pkglistgen-scopes': 'target rings staging',
'pkglistgen-locales-from': 'openSUSE.product', 'pkglistgen-locales-from': 'openSUSE.product',
'pkglistgen-include-suggested': 'False',
'pkglistgen-delete-kiwis-rings': 'openSUSE-ftp-ftp-x86_64.kiwi openSUSE-cd-mini-x86_64.kiwi', 'pkglistgen-delete-kiwis-rings': 'openSUSE-ftp-ftp-x86_64.kiwi openSUSE-cd-mini-x86_64.kiwi',
'pkglistgen-delete-kiwis-staging': 'openSUSE-ftp-ftp-x86_64.kiwi openSUSE-cd-mini-x86_64.kiwi', 'pkglistgen-delete-kiwis-staging': 'openSUSE-ftp-ftp-x86_64.kiwi openSUSE-cd-mini-x86_64.kiwi',
'mail-list': 'opensuse-factory@opensuse.org', 'mail-list': 'opensuse-factory@opensuse.org',

View File

@ -35,6 +35,8 @@ You can also adapt the solving on a package level by putting a hash into the pac
* recommended * recommended
Evaluate also 'Recommends' in package to determine dependencies. Otherwise only 'required' are considered. Used mainly for patterns in SLE. It can not be combined with platforms, For architecture specific recommends, use patterns. Evaluate also 'Recommends' in package to determine dependencies. Otherwise only 'required' are considered. Used mainly for patterns in SLE. It can not be combined with platforms, For architecture specific recommends, use patterns.
* suggested
Evaluate also 'Suggests' in package to determine dependencies. This implies recommended
* architecture (e.g. x86_64,s390x,ppc64le,aarch64) * architecture (e.g. x86_64,s390x,ppc64le,aarch64)
Makes the entry specific to the listed architectures. Will get ignored if used in combination with 'recommended'. Makes the entry specific to the listed architectures. Will get ignored if used in combination with 'recommended'.
* locked * locked

File diff suppressed because it is too large Load Diff

0
pkglistgen/__init__.py Normal file
View File

139
pkglistgen/cli.py Executable file
View File

@ -0,0 +1,139 @@
#!/usr/bin/python
# TODO: solve all devel packages to include
from __future__ import print_function
import cmdln
import os
import re
import ToolBase
import traceback
import logging
from osc import conf
from osclib.conf import Config
from osclib.stagingapi import StagingAPI
from pkglistgen import solv_utils
from pkglistgen.tool import PkgListGen
from pkglistgen.update_repo_handler import update_project
class CommandLineInterface(ToolBase.CommandLineInterface):
SCOPES = ['all', 'target', 'rings', 'staging']
def __init__(self, *args, **kwargs):
ToolBase.CommandLineInterface.__init__(self, args, kwargs)
def setup_tool(self):
tool = PkgListGen()
tool.dry_run = self.options.dry
if self.options.debug:
logging.basicConfig(level=logging.DEBUG)
elif self.options.verbose:
logging.basicConfig(level=logging.INFO)
return tool
def do_create_sle_weakremovers(self, subcmd, opts, target, *prjs):
"""${cmd_name}: generate list of obsolete packages for SLE
The globally specified repositories are taken as the current
package set. All solv files specified on the command line
are old versions of those repos.
The command outputs the weakremovers.inc to be used in
000package-groups
${cmd_usage}
${cmd_option_list}
"""
return self.tool.create_sle_weakremovers(target, prjs)
def do_handle_update_repos(self, subcmd, opts, project):
"""${cmd_name}: Update 00update-repos
Reads config.yml from 00update-repos and will create required solv files
${cmd_usage}
${cmd_option_list}
"""
return update_project(conf.config['apiurl'], project)
@cmdln.option('-f', '--force', action='store_true', help='continue even if build is in progress')
@cmdln.option('-p', '--project', help='target project')
@cmdln.option('-s', '--scope', action='append', help='scope on which to operate ({}, staging:$letter)'.format(', '.join(SCOPES)))
@cmdln.option('--no-checkout', action='store_true', help='reuse checkout in cache')
@cmdln.option('--stop-after-solve', action='store_true', help='only create group files')
@cmdln.option('--staging', help='Only solve that one staging')
@cmdln.option('--only-release-packages', action='store_true', help='Generate 000release-packages only')
def do_update_and_solve(self, subcmd, opts):
"""${cmd_name}: update and solve for given scope
${cmd_usage}
${cmd_option_list}
"""
if opts.staging:
match = re.match('(.*):Staging:(.*)', opts.staging)
opts.scope = ['staging:' + match.group(2)]
if opts.project:
raise ValueError('--staging and --project conflict')
opts.project = match.group(1)
elif not opts.project:
raise ValueError('project is required')
elif not opts.scope:
opts.scope = ['all']
apiurl = conf.config['apiurl']
Config(apiurl, opts.project)
target_config = conf.config[opts.project]
# Store target project as opts.project will contain subprojects.
target_project = opts.project
api = StagingAPI(apiurl, target_project)
main_repo = target_config['main-repo']
if apiurl.find('suse.de') > 0:
# used by product converter
os.environ['OBS_NAME'] = 'build.suse.de'
# special case for all
if opts.scope == ['all']:
opts.scope = target_config.get('pkglistgen-scopes', 'target').split(' ')
self.error_occured = False
def solve_project(project, scope):
try:
if self.tool.update_and_solve_target(api, target_project, target_config, main_repo,
project=project, scope=scope, force=opts.force,
no_checkout=opts.no_checkout,
only_release_packages=opts.only_release_packages,
stop_after_solve=opts.stop_after_solve, drop_list=(scope == 'target')):
self.error_occured = True
except Exception:
# Print exception, but continue to prevent problems effecting one
# project from killing the whole process. Downside being a common
# error will be duplicated for each project. Common exceptions could
# be excluded if a set list is determined, but that is likely not
# practical.
traceback.print_exc()
self.error_occured = True
for scope in opts.scope:
if scope.startswith('staging:'):
letter = re.match('staging:(.*)', scope).group(1)
solve_project(api.prj_from_short(letter.upper()), 'staging')
elif scope == 'target':
solve_project(target_project, scope)
elif scope == 'rings':
solve_project(api.rings[1], scope)
elif scope == 'staging':
letters = api.get_staging_projects_short()
for letter in letters:
solve_project(api.prj_from_short(letter), scope)
else:
raise ValueError('scope "{}" must be one of: {}'.format(scope, ', '.join(self.SCOPES)))
return self.error_occured

55
pkglistgen/file_utils.py Normal file
View File

@ -0,0 +1,55 @@
import glob
import os
import os.path
import shutil
from lxml import etree as ET
def copy_list(file_list, destination):
for name in file_list:
shutil.copy(name, os.path.join(destination, os.path.basename(name)))
def move_list(file_list, destination):
for name in file_list:
os.rename(name, os.path.join(destination, os.path.basename(name)))
def unlink_all_except(path, ignore_list=['_service'], ignore_hidden=True):
for name in os.listdir(path):
if name in ignore_list or (ignore_hidden and name.startswith('.')):
continue
name_path = os.path.join(path, name)
if os.path.isfile(name_path):
os.unlink(name_path)
def copy_directory_contents(source, destination, ignore_list=[]):
for name in os.listdir(source):
name_path = os.path.join(source, name)
if name in ignore_list or not os.path.isfile(name_path):
continue
shutil.copy(name_path, os.path.join(destination, name))
def change_extension(path, original, final):
for name in glob.glob(os.path.join(path, '*{}'.format(original))):
# Assumes the extension is only found at the end.
os.rename(name, name.replace(original, final))
def multibuild_from_glob(destination, pathname):
root = ET.Element('multibuild')
for name in sorted(glob.glob(os.path.join(destination, pathname))):
package = ET.SubElement(root, 'package')
package.text = os.path.splitext(os.path.basename(name))[0]
with open(os.path.join(destination, '_multibuild'), 'w+b') as f:
f.write(ET.tostring(root, pretty_print=True))
def unlink_list(path, names):
for name in names:
if path is None:
name_path = name
else:
name_path = os.path.join(path, name)
if os.path.isfile(name_path):
os.unlink(name_path)

363
pkglistgen/group.py Normal file
View File

@ -0,0 +1,363 @@
from __future__ import print_function
import logging
import re
import time
from lxml import etree as ET
import solv
class Group(object):
def __init__(self, name, pkglist):
self.name = name
self.safe_name = re.sub(r'\W', '_', name.lower())
self.pkglist = pkglist
self.architectures = pkglist.all_architectures
self.conditional = None
self.packages = dict()
self.locked = set()
self.solved_packages = None
self.solved = False
self.not_found = dict()
self.unresolvable = dict()
self.default_support_status = None
for a in self.architectures:
self.packages[a] = []
self.unresolvable[a] = dict()
self.comment = ' ### AUTOMATICALLY GENERATED, DO NOT EDIT ### '
self.srcpkgs = None
self.develpkgs = dict()
self.silents = set()
self.ignored = set()
# special feature for SLE. Patterns are marked for expansion
# of recommended packages, all others aren't. Only works
# with recommends on actual package names, not virtual
# provides.
self.expand_recommended = set()
# special feature for Tumbleweed. Just like the above but for
# suggested (recommends are default)
self.expand_suggested = set()
pkglist.groups[self.safe_name] = self
self.logger = logging.getLogger(__name__)
def _add_to_packages(self, package, arch=None):
archs = self.architectures
if arch:
archs = [arch]
for a in archs:
# we use groups.yml for powerpc through a branch,
# so ignore inapplicable architectures
if not a in self.packages: continue
self.packages[a].append([package, self.name])
def parse_yml(self, packages):
# package less group is a rare exception
if packages is None:
return
for package in packages:
if not isinstance(package, dict):
self._add_to_packages(package)
continue
name = package.keys()[0]
for rel in package[name]:
arch = None
if rel == 'locked':
self.locked.add(name)
continue
elif rel == 'silent':
self.silents.add(name)
elif rel == 'recommended':
self.expand_recommended.add(name)
elif rel == 'suggested':
self.expand_suggested.add(name)
self.expand_recommended.add(name)
else:
arch = rel
self._add_to_packages(name, arch)
def _verify_solved(self):
if not self.solved:
raise Exception('group {} not solved'.format(self.name))
def inherit(self, group):
for arch in self.architectures:
self.packages[arch] += group.packages[arch]
self.locked.update(group.locked)
self.silents.update(group.silents)
self.expand_recommended.update(group.expand_recommended)
self.expand_suggested.update(group.expand_suggested)
# do not repeat packages
def ignore(self, without):
for arch in ['*'] + self.pkglist.filtered_architectures:
s = set(without.solved_packages[arch].keys())
s |= set(without.solved_packages['*'].keys())
for p in s:
self.solved_packages[arch].pop(p, None)
for p in without.not_found.keys():
if not p in self.not_found:
continue
self.not_found[p] -= without.not_found[p]
if not self.not_found[p]:
self.not_found.pop(p)
for g in without.ignored:
self.ignore(g)
self.ignored.add(without)
def solve(self, use_recommends=False):
""" base: list of base groups or None """
solved = dict()
for arch in self.pkglist.filtered_architectures:
solved[arch] = dict()
self.srcpkgs = dict()
self.recommends = dict()
self.suggested = dict()
for arch in self.pkglist.filtered_architectures:
pool = self.pkglist._prepare_pool(arch)
solver = pool.Solver()
solver.set_flag(solver.SOLVER_FLAG_IGNORE_RECOMMENDED, not use_recommends)
solver.set_flag(solver.SOLVER_FLAG_ADD_ALREADY_RECOMMENDED, use_recommends)
# pool.set_debuglevel(10)
suggested = dict()
# packages resulting from explicit recommended expansion
extra = []
def solve_one_package(n, group):
jobs = list(self.pkglist.lockjobs[arch])
sel = pool.select(str(n), solv.Selection.SELECTION_NAME)
if sel.isempty():
self.logger.debug('{}.{}: package {} not found'.format(self.name, arch, n))
self.not_found.setdefault(n, set()).add(arch)
return
else:
if n in self.expand_recommended:
for s in sel.solvables():
for dep in s.lookup_deparray(solv.SOLVABLE_RECOMMENDS):
# only add recommends that exist as packages
rec = pool.select(dep.str(), solv.Selection.SELECTION_NAME)
if not rec.isempty():
extra.append([dep.str(), group + ':recommended:' + n])
jobs += sel.jobs(solv.Job.SOLVER_INSTALL)
locked = self.locked | self.pkglist.unwanted
for l in locked:
sel = pool.select(str(l), solv.Selection.SELECTION_NAME)
# if we can't find it, it probably is not as important
if not sel.isempty():
jobs += sel.jobs(solv.Job.SOLVER_LOCK)
for s in self.silents:
sel = pool.select(str(s), solv.Selection.SELECTION_NAME | solv.Selection.SELECTION_FLAT)
if sel.isempty():
self.logger.warn('{}.{}: silent package {} not found'.format(self.name, arch, s))
else:
jobs += sel.jobs(solv.Job.SOLVER_INSTALL)
problems = solver.solve(jobs)
if problems:
for problem in problems:
msg = 'unresolvable: {}:{}.{}: {}'.format(self.name, n, arch, problem)
if self.pkglist.ignore_broken:
self.logger.debug(msg)
else:
self.logger.debug(msg)
self.unresolvable[arch][n] = str(problem)
return
for s in solver.get_recommended():
if s.name in locked:
continue
self.recommends.setdefault(s.name, group + ':' + n)
if n in self.expand_suggested:
for s in solver.get_suggested():
suggested[s.name] = group + ':suggested:' + n
self.suggested.setdefault(s.name, suggested[s.name])
trans = solver.transaction()
if trans.isempty():
self.logger.error('%s.%s: nothing to do', self.name, arch)
return
for s in trans.newsolvables():
solved[arch].setdefault(s.name, group + ':' + n)
if None:
reason, rule = solver.describe_decision(s)
print(self.name, s.name, reason, rule.info().problemstr())
# don't ask me why, but that's how it seems to work
if s.lookup_void(solv.SOLVABLE_SOURCENAME):
src = s.name
else:
src = s.lookup_str(solv.SOLVABLE_SOURCENAME)
self.srcpkgs[src] = group + ':' + s.name
start = time.time()
for n, group in self.packages[arch]:
solve_one_package(n, group)
jobs = list(self.pkglist.lockjobs[arch])
locked = self.locked | self.pkglist.unwanted
for l in locked:
sel = pool.select(str(l), solv.Selection.SELECTION_NAME)
# if we can't find it, it probably is not as important
if not sel.isempty():
jobs += sel.jobs(solv.Job.SOLVER_LOCK)
for n in solved[arch].keys() + suggested.keys():
if n in locked: continue
sel = pool.select(str(n), solv.Selection.SELECTION_NAME)
jobs += sel.jobs(solv.Job.SOLVER_INSTALL)
solver.solve(jobs)
trans = solver.transaction()
for s in trans.newsolvables():
solved[arch].setdefault(s.name, group + ':expansion')
end = time.time()
self.logger.info('%s - solving took %f', self.name, end - start)
common = None
# compute common packages across all architectures
for arch in self.pkglist.filtered_architectures:
if common is None:
common = set(solved[arch].keys())
continue
common &= set(solved[arch].keys())
if common is None:
common = set()
# reduce arch specific set by common ones
solved['*'] = dict()
for arch in self.pkglist.filtered_architectures:
for p in common:
solved['*'][p] = solved[arch].pop(p)
self.solved_packages = solved
self.solved = True
def check_dups(self, modules, overlap):
if not overlap:
return
packages = set(self.solved_packages['*'])
for arch in self.pkglist.filtered_architectures:
packages.update(self.solved_packages[arch])
for m in modules:
# do not check with ourselves and only once for the rest
if m.name <= self.name:
continue
if self.name in m.conflicts or m.name in self.conflicts:
continue
mp = set(m.solved_packages['*'])
for arch in self.pkglist.filtered_architectures:
mp.update(m.solved_packages[arch])
if len(packages & mp):
overlap.comment += '\n overlapping between ' + self.name + ' and ' + m.name + '\n'
for p in sorted(packages & mp):
for arch in m.solved_packages.keys():
if m.solved_packages[arch].get(p, None):
overlap.comment += ' # ' + m.name + '.' + arch + ': ' + m.solved_packages[arch][p] + '\n'
if self.solved_packages[arch].get(p, None):
overlap.comment += ' # ' + self.name + '.' + \
arch + ': ' + self.solved_packages[arch][p] + '\n'
overlap.comment += ' - ' + p + '\n'
overlap._add_to_packages(p)
def collect_devel_packages(self):
for arch in self.pkglist.filtered_architectures:
pool = self.pkglist._prepare_pool(arch)
pool.Selection()
for s in pool.solvables_iter():
if s.name.endswith('-devel'):
# don't ask me why, but that's how it seems to work
if s.lookup_void(solv.SOLVABLE_SOURCENAME):
src = s.name
else:
src = s.lookup_str(solv.SOLVABLE_SOURCENAME)
if src in self.srcpkgs.keys():
self.develpkgs[s.name] = self.srcpkgs[src]
def _filter_already_selected(self, modules, pkgdict):
# erase our own - so we don't filter our own
for p in pkgdict.keys():
already_present = False
for m in modules:
for arch in ['*'] + self.pkglist.filtered_architectures:
already_present = already_present or (p in m.solved_packages[arch])
if already_present:
del pkgdict[p]
def filter_already_selected(self, modules):
self._filter_already_selected(modules, self.recommends)
def toxml(self, arch, ignore_broken=False, comment=None):
packages = self.solved_packages.get(arch, dict())
name = self.name
if arch != '*':
name += '.' + arch
root = ET.Element('group', {'name': name})
if comment:
c = ET.Comment(comment)
root.append(c)
if arch != '*':
cond = ET.SubElement(root, 'conditional', {
'name': 'only_{}'.format(arch)})
packagelist = ET.SubElement(
root, 'packagelist', {'relationship': 'recommends'})
missing = dict()
if arch == '*':
missing = self.not_found
unresolvable = self.unresolvable.get(arch, dict())
for name in sorted(packages.keys() + missing.keys() + unresolvable.keys()):
if name in self.silents:
continue
if name in missing:
msg = ' {} not found on {}'.format(name, ','.join(sorted(missing[name])))
if ignore_broken:
c = ET.Comment(msg)
packagelist.append(c)
continue
name = msg
if name in unresolvable:
msg = ' {} uninstallable: {}'.format(name, unresolvable[name])
if ignore_broken:
c = ET.Comment(msg)
packagelist.append(c)
continue
else:
self.logger.error(msg)
name = msg
status = self.pkglist.supportstatus(name) or self.default_support_status
attrs = {'name': name}
if status is not None:
attrs['supportstatus'] = status
ET.SubElement(packagelist, 'package', attrs)
if name in packages and packages[name]:
c = ET.Comment(' reason: {} '.format(packages[name]))
packagelist.append(c)
return root
# just list all packages in it as an array - to be output as one yml
def summary(self):
ret = set()
for arch in ['*'] + self.pkglist.filtered_architectures:
ret |= set(self.solved_packages[arch].keys())
return ret

234
pkglistgen/solv_utils.py Normal file
View File

@ -0,0 +1,234 @@
from __future__ import print_function
import filecmp
import glob
import gzip
import hashlib
import io
import logging
import os.path
import random
import string
import subprocess
import sys
import shutil
import tempfile
from lxml import etree as ET
from osc import conf
from osclib.util import project_list_family
from osclib.util import project_list_family_prior
from osclib.conf import Config
from osclib.cache_manager import CacheManager
import requests
import solv
# share header cache with repochecker
CACHEDIR = CacheManager.directory('repository-meta')
try:
from urllib.parse import urljoin
except ImportError:
# python 2.x
from urlparse import urljoin
logger = logging.getLogger()
def dump_solv_build(baseurl):
"""Determine repo format and build string from remote repository."""
if not baseurl.endswith('/'):
baseurl += '/'
if 'update' in baseurl:
# Could look at .repo file or repomd.xml, but larger change.
return 'update-' + os.path.basename(os.path.normpath(baseurl)), 'update'
url = urljoin(baseurl, 'media.1/media')
with requests.get(url) as media:
for i, line in enumerate(media.iter_lines()):
if i != 1:
continue
name = line
if name is not None and '-Build' in name:
return name, 'media'
url = urljoin(baseurl, 'media.1/build')
with requests.get(url) as build:
name = build.content.strip()
if name is not None and '-Build' in name:
return name, 'build'
raise Exception(baseurl + 'media.1/{media,build} includes no build number')
def dump_solv(baseurl, output_dir, overwrite):
name = None
ofh = sys.stdout
if output_dir:
build, repo_style = dump_solv_build(baseurl)
name = os.path.join(output_dir, '{}.solv'.format(build))
# For update repo name never changes so always update.
if not overwrite and repo_style != 'update' and os.path.exists(name):
logger.info('%s exists', name)
return name
pool = solv.Pool()
pool.setarch()
repo = pool.add_repo(''.join(random.choice(string.letters) for _ in range(5)))
path_prefix = 'suse/' if name and repo_style == 'build' else ''
url = urljoin(baseurl, path_prefix + 'repodata/repomd.xml')
repomd = requests.get(url)
ns = {'r': 'http://linux.duke.edu/metadata/repo'}
root = ET.fromstring(repomd.content)
primary_element = root.find('.//r:data[@type="primary"]', ns)
location = primary_element.find('r:location', ns).get('href')
sha256_expected = primary_element.find('r:checksum[@type="sha256"]', ns).text
# No build information in update repo to use repomd checksum in name.
if repo_style == 'update':
name = os.path.join(output_dir, '{}::{}.solv'.format(build, sha256_expected))
if not overwrite and os.path.exists(name):
logger.info('%s exists', name)
return name
# Only consider latest update repo so remove old versions.
# Pre-release builds only make sense for non-update repos and once
# releases then only relevant for next product which does not
# consider pre-release from previous version.
for old_solv in glob.glob(os.path.join(output_dir, '{}::*.solv'.format(build))):
os.remove(old_solv)
f = tempfile.TemporaryFile()
f.write(repomd.content)
f.flush()
os.lseek(f.fileno(), 0, os.SEEK_SET)
repo.add_repomdxml(f, 0)
url = urljoin(baseurl, path_prefix + location)
with requests.get(url, stream=True) as primary:
sha256 = hashlib.sha256(primary.content).hexdigest()
if sha256 != sha256_expected:
raise Exception('checksums do not match {} != {}'.format(sha256, sha256_expected))
content = gzip.GzipFile(fileobj=io.BytesIO(primary.content))
os.lseek(f.fileno(), 0, os.SEEK_SET)
f.write(content.read())
f.flush()
os.lseek(f.fileno(), 0, os.SEEK_SET)
repo.add_rpmmd(f, None, 0)
repo.create_stubs()
ofh = open(name + '.new', 'w')
repo.write(ofh)
if name is not None:
# Only update file if overwrite or different.
ofh.flush() # Ensure entirely written before comparing.
if not overwrite and os.path.exists(name) and filecmp.cmp(name + '.new', name, shallow=False):
logger.debug('file identical, skip dumping')
os.remove(name + '.new')
else:
os.rename(name + '.new', name)
return name
def solv_merge(solv_merged, *solvs):
solvs = list(solvs) # From tuple.
if os.path.exists(solv_merged):
modified = map(os.path.getmtime, [solv_merged] + solvs)
if max(modified) <= modified[0]:
# The two inputs were modified before or at the same as merged.
logger.debug('merge skipped for {}'.format(solv_merged))
return
with open(solv_merged, 'w') as handle:
p = subprocess.Popen(['mergesolv'] + solvs, stdout=handle)
p.communicate()
if p.returncode:
raise Exception('failed to create merged solv file')
def solv_cache_update(apiurl, cache_dir_solv, target_project, family_last, family_include):
"""Dump solv files (do_dump_solv) for all products in family."""
prior = set()
project_family = project_list_family_prior(
apiurl, target_project, include_self=True, last=family_last)
if family_include:
# Include projects from a different family if desired.
project_family.extend(project_list_family(apiurl, family_include))
for project in project_family:
Config(apiurl, project)
project_config = conf.config[project]
baseurl = project_config.get('download-baseurl')
if not baseurl:
baseurl = project_config.get('download-baseurl-' + project.replace(':', '-'))
baseurl_update = project_config.get('download-baseurl-update')
if not baseurl:
logger.warning('no baseurl configured for {}'.format(project))
continue
urls = [urljoin(baseurl, 'repo/oss/')]
if baseurl_update:
urls.append(urljoin(baseurl_update, 'oss/'))
if project_config.get('nonfree'):
urls.append(urljoin(baseurl, 'repo/non-oss/'))
if baseurl_update:
urls.append(urljoin(baseurl_update, 'non-oss/'))
names = []
for url in urls:
project_display = project
if 'update' in url:
project_display += ':Update'
print('-> dump_solv for {}/{}'.format(
project_display, os.path.basename(os.path.normpath(url))))
logger.debug(url)
output_dir = os.path.join(cache_dir_solv, project)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
solv_name = dump_solv(baseurl=url, output_dir=output_dir, overwrite=False)
if solv_name:
names.append(solv_name)
if not len(names):
logger.warning('no solv files were dumped for {}'.format(project))
continue
# Merge nonfree solv with free solv or copy free solv as merged.
merged = names[0].replace('.solv', '.merged.solv')
if len(names) >= 2:
solv_merge(merged, *names)
else:
shutil.copyfile(names[0], merged)
prior.add(merged)
return prior
def update_merge(nonfree, repos, architectures):
"""Merge free and nonfree solv files or copy free to merged"""
for project, repo in repos:
for arch in architectures:
solv_file = os.path.join(
CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, repo, arch))
solv_file_merged = os.path.join(
CACHEDIR, 'repo-{}-{}-{}.merged.solv'.format(project, repo, arch))
if not nonfree:
shutil.copyfile(solv_file, solv_file_merged)
continue
solv_file_nonfree = os.path.join(
CACHEDIR, 'repo-{}-{}-{}.solv'.format(nonfree, repo, arch))
solv_merge(solv_file_merged, solv_file, solv_file_nonfree)

734
pkglistgen/tool.py Normal file
View File

@ -0,0 +1,734 @@
from __future__ import print_function
import ToolBase
import glob
import logging
import os
import re
import solv
import shutil
import subprocess
import yaml
import sys
from lxml import etree as ET
from osc.core import checkout_package
from osc.core import http_GET
from osc.core import HTTPError
from osc.core import show_results_meta
from osc.core import Package
from osc.core import undelete_package
from osclib.core import attribute_value_load
from osclib.core import target_archs
from osclib.conf import str2bool
from osclib.core import repository_path_expand
from osclib.core import repository_arch_state
from osclib.cache_manager import CacheManager
try:
from urllib.parse import urlparse
except ImportError:
# python 2.x
from urlparse import urlparse
from pkglistgen import file_utils, solv_utils
from pkglistgen.group import Group
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
PRODUCT_SERVICE = '/usr/lib/obs/service/create_single_product'
class PkgListGen(ToolBase.ToolBase):
def __init__(self):
ToolBase.ToolBase.__init__(self)
# package -> supportatus
self.packages = dict()
self.groups = dict()
self._supportstatus = None
self.input_dir = '.'
self.output_dir = '.'
self.lockjobs = dict()
self.ignore_broken = False
self.unwanted = set()
self.output = None
self.locales = set()
self.did_update = False
self.logger = logging.getLogger(__name__)
self.filtered_architectures = None
self.dry_run = False
self.all_architectures = None
def filter_architectures(self, architectures):
self.filtered_architectures = list(set(architectures) & set(self.all_architectures))
def _load_supportstatus(self):
# XXX
fn = os.path.join(self.input_dir, 'supportstatus.txt')
self._supportstatus = dict()
if os.path.exists(fn):
with open(fn, 'r') as fh:
for l in fh:
# pkg, status
a = l.rstrip().split(' ')
if len(a) > 1:
self._supportstatus[a[0]] = a[1]
def supportstatus(self, package):
if self._supportstatus is None:
self._load_supportstatus()
return self._supportstatus.get(package)
def _load_group_file(self, fn):
output = None
unwanted = None
with open(fn, 'r') as fh:
self.logger.debug('reading %s', fn)
for groupname, group in yaml.safe_load(fh).items():
if groupname == 'OUTPUT':
output = group
continue
if groupname == 'UNWANTED':
unwanted = set(group)
continue
g = Group(groupname, self)
g.parse_yml(group)
return output, unwanted
def load_all_groups(self):
for fn in glob.glob(os.path.join(self.input_dir, 'group*.yml')):
o, u = self._load_group_file(fn)
if o:
if self.output is not None:
raise Exception('OUTPUT defined multiple times')
self.output = o
if u:
self.unwanted |= u
# required to generate release spec files (only)
def write_group_stubs(self):
archs = ['*'] + self.all_architectures
for name in self.groups:
group = self.groups[name]
group.solved_packages = dict()
fn = '{}.group'.format(group.name)
with open(os.path.join(self.output_dir, fn), 'w') as fh:
for arch in archs:
x = group.toxml(arch, self.ignore_broken, None)
x = ET.tostring(x, pretty_print=True)
fh.write(x)
def write_all_groups(self):
self._check_supplements()
summary = dict()
archs = ['*'] + self.all_architectures
for name in self.groups:
group = self.groups[name]
if not group.solved:
continue
summary[name] = group.summary()
fn = '{}.group'.format(group.name)
with open(os.path.join(self.output_dir, fn), 'w') as fh:
comment = group.comment
for arch in archs:
x = group.toxml(arch, self.ignore_broken, comment)
# only comment first time
comment = None
x = ET.tostring(x, pretty_print=True)
x = re.sub(r'\s*<!-- reason:', ' <!-- reason:', x)
fh.write(x)
return summary
def solve_module(self, groupname, includes, excludes, use_recommends):
g = self.groups[groupname]
for i in includes:
g.inherit(self.groups[i])
g.solve(use_recommends)
for e in excludes:
g.ignore(self.groups[e])
def expand_repos(self, project, repo='standard'):
return repository_path_expand(self.apiurl, project, repo)
def _check_supplements(self):
tocheck = set()
tocheck_locales = set()
for arch in self.filtered_architectures:
pool = self._prepare_pool(arch)
sel = pool.Selection()
for s in pool.solvables_iter():
sel.add_raw(solv.Job.SOLVER_SOLVABLE, s.id)
for s in sel.solvables():
for dep in s.lookup_deparray(solv.SOLVABLE_SUPPLEMENTS):
for d in dep.str().split(' '):
if d.startswith('namespace:modalias') or d.startswith('namespace:filesystem'):
tocheck.add(s.name)
for l in self.locales:
i = pool.str2id('locale({})'.format(l))
for s in pool.whatprovides(i):
tocheck_locales.add(s.name)
all_grouped = set()
for g in self.groups.values():
if g.solved:
for arch in g.solved_packages.keys():
if g.solved_packages[arch]:
all_grouped.update(g.solved_packages[arch])
for p in tocheck - all_grouped:
self.logger.warn('package %s has supplements but is not grouped', p)
for p in tocheck_locales - all_grouped:
self.logger.warn('package %s provides supported locale but is not grouped', p)
def _prepare_pool(self, arch):
pool = solv.Pool()
pool.setarch(arch)
self.lockjobs[arch] = []
solvables = set()
for project, reponame in self.repos:
repo = pool.add_repo(project)
s = os.path.join(solv_utils.CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, reponame, arch))
r = repo.add_solv(s)
if not r:
if not self.did_update:
raise Exception(
'failed to add repo {}/{}/{}. Need to run update first?'.format(project, reponame, arch))
continue
for solvable in repo.solvables_iter():
solvable.unset(solv.SOLVABLE_CONFLICTS)
solvable.unset(solv.SOLVABLE_OBSOLETES)
# only take the first solvable in the repo chain
if solvable.name in solvables:
self.lockjobs[arch].append(pool.Job(solv.Job.SOLVER_SOLVABLE | solv.Job.SOLVER_LOCK, solvable.id))
solvables.add(solvable.name)
pool.addfileprovides()
pool.createwhatprovides()
for l in self.locales:
pool.set_namespaceproviders(solv.NAMESPACE_LANGUAGE, pool.Dep(l), True)
return pool
# parse file and merge all groups
def _parse_unneeded(self, filename):
filename = os.path.join(self.input_dir, filename)
if not os.path.isfile(filename):
return set()
fh = open(filename, 'r')
self.logger.debug('reading %s', filename)
result = set()
for groupname, group in yaml.safe_load(fh).items():
result.update(group)
return result
# the unsorted group is special and will contain all the rest for
# the FTP tree. We filter it with unneeded though to create a
# unsorted.yml file for release manager review
def _collect_unsorted_packages(self, modules, unsorted):
uneeded_regexps = [re.compile(r)
for r in self._parse_unneeded('unneeded.yml')]
packages = dict()
if unsorted:
unsorted.solved_packages = dict()
unsorted.solved_packages['*'] = dict()
for arch in self.filtered_architectures:
pool = self._prepare_pool(arch)
pool.Selection()
archpacks = [s.name for s in pool.solvables_iter()]
# copy
filtered = list(archpacks)
for r in uneeded_regexps:
filtered = [p for p in filtered if not r.match(p)]
# convert to set
filtered = set(filtered) - self.unwanted
for g in modules:
if unsorted and g == unsorted:
continue
for a in ('*', arch):
filtered -= set(g.solved_packages[a].keys())
for package in filtered:
packages.setdefault(package, []).append(arch)
if unsorted:
archpacks = set(archpacks)
unsorted.solved_packages[arch] = dict()
for g in modules:
archpacks -= set(g.solved_packages[arch].keys())
archpacks -= set(g.solved_packages['*'].keys())
unsorted.solved_packages[arch] = dict()
for p in archpacks:
unsorted.solved_packages[arch][p] = None
if unsorted:
common = None
for arch in self.filtered_architectures:
if common is None:
common = set(unsorted.solved_packages[arch].keys())
continue
common &= set(unsorted.solved_packages[arch].keys())
for p in common:
unsorted.solved_packages['*'][p] = None
for arch in self.filtered_architectures:
del unsorted.solved_packages[arch][p]
with open(os.path.join(self.output_dir, 'unsorted.yml'), 'w') as fh:
fh.write('unsorted:\n')
for p in sorted(packages.keys()):
fh.write(' - ')
fh.write(p)
if len(packages[p]) != len(self.filtered_architectures):
fh.write(': [')
fh.write(','.join(sorted(packages[p])))
fh.write(']')
reason = self._find_reason(p, modules)
if reason:
fh.write(' # ' + reason)
fh.write(' \n')
# give a hint if the package is related to a group
def _find_reason(self, package, modules):
# go through the modules multiple times to find the "best"
for g in modules:
if package in g.recommends:
return 'recommended by ' + g.recommends[package]
for g in modules:
if package in g.suggested:
return 'suggested by ' + g.suggested[package]
for g in modules:
if package in g.develpkgs:
return 'devel package of ' + g.develpkgs[package]
return None
def update_repos(self, architectures):
# only there to parse the repos
bs_mirrorfull = os.path.join(SCRIPT_PATH, '..', 'bs_mirrorfull')
global_update = False
for project, repo in self.repos:
for arch in architectures:
# TODO: refactor to common function with repo_checker.py
d = os.path.join(solv_utils.CACHEDIR, project, repo, arch)
if not os.path.exists(d):
os.makedirs(d)
try:
# Fetch state before mirroring in-case it changes during download.
state = repository_arch_state(self.apiurl, project, repo, arch)
except HTTPError:
continue
# Would be preferable to include hash in name, but cumbersome to handle without
# reworking a fair bit since the state needs to be tracked.
solv_file = os.path.join(solv_utils.CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, repo, arch))
solv_file_hash = '{}::{}'.format(solv_file, state)
if os.path.exists(solv_file) and os.path.exists(solv_file_hash):
# Solve file exists and hash unchanged, skip updating solv.
self.logger.debug('skipping solv generation for {} due to matching state {}'.format(
'/'.join([project, repo, arch]), state))
continue
# Either hash changed or new, so remove any old hash files.
file_utils.unlink_list(None, glob.glob(solv_file + '::*'))
global_update = True
self.logger.debug('updating %s', d)
args = [bs_mirrorfull]
args.append('--nodebug')
args.append('{}/public/build/{}/{}/{}'.format(self.apiurl, project, repo, arch))
args.append(d)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
for line in p.stdout:
self.logger.info(line.rstrip())
files = [os.path.join(d, f)
for f in os.listdir(d) if f.endswith('.rpm')]
fh = open(solv_file, 'w')
p = subprocess.Popen(
['rpms2solv', '-m', '-', '-0'], stdin=subprocess.PIPE, stdout=fh)
p.communicate('\0'.join(files))
p.wait()
fh.close()
# Create hash file now that solv creation is complete.
open(solv_file_hash, 'a').close()
self.did_update = True
return global_update
def create_sle_weakremovers(self, target, oldprjs):
self.repos = []
for prj in list(oldprjs) + [target]:
self.repos += self.expand_repos(prj, 'standard')
self.update_repos(self.all_architectures)
drops = dict()
for arch in self.all_architectures:
pool = solv.Pool()
pool.setarch(arch)
sysrepo = None
for project, repo in self.repos:
self.logger.debug('processing %s/%s/%s', project, repo, arch)
fn = os.path.join(solv_utils.CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, repo, arch))
r = pool.add_repo('/'.join([project, repo]))
r.add_solv(fn)
if project == target and repo == 'standard':
sysrepo = r
pool.createwhatprovides()
for s in pool.solvables_iter():
# we only want the old repos
if s.repo == sysrepo: continue
# ignore imported solvables. too dangerous
if s.arch != 'noarch' and s.arch != arch:
continue
haveit = False
for s2 in pool.whatprovides(s.nameid):
if s2.repo == sysrepo and s.nameid == s2.nameid:
haveit = True
if haveit:
continue
haveit = False
# check for already obsoleted packages
nevr = pool.rel2id(s.nameid, s.evrid, solv.REL_EQ)
for s2 in pool.whatmatchesdep(solv.SOLVABLE_OBSOLETES, nevr):
if s2.repo == sysrepo: continue
haveit = True
if haveit:
continue
drops.setdefault(s.name, {'repo': s.repo.name, 'archs': set()})
drops[s.name]['archs'].add(arch)
for project, repo in sorted(self.repos):
exclusives = dict()
print('#', project)
for name in sorted(drops.keys()):
#
if drops[name]['repo'] != '{}/{}'.format(project, repo):
#print(drops[name]['repo'], '!=', '{}/{}'.format(project, repo))
continue
if len(drops[name]['archs']) == len(self.all_architectures):
print('Provides: weakremover({})'.format(name))
else:
jarch = ' '.join(sorted(drops[name]['archs']))
exclusives.setdefault(jarch, []).append(name)
for arch in sorted(exclusives.keys()):
print('%ifarch {}'.format(arch))
for name in sorted(exclusives[arch]):
print('Provides: weakremover({})'.format(name))
print('%endif')
# TODO: no longer used, needs to be migrated
def create_droplist(self, output_dir, oldsolv):
drops = dict()
for arch in self.filtered_architectures:
for old in oldsolv:
self.logger.debug('%s: processing %s', arch, old)
pool = solv.Pool()
pool.setarch(arch)
for project, repo in self.repos:
fn = os.path.join(solv_utils.CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, repo, arch))
r = pool.add_repo(project)
r.add_solv(fn)
sysrepo = pool.add_repo(os.path.basename(old).replace('.merged.solv', ''))
sysrepo.add_solv(old)
pool.createwhatprovides()
for s in sysrepo.solvables:
haveit = False
for s2 in pool.whatprovides(s.nameid):
if s2.repo == sysrepo or s.nameid != s2.nameid:
continue
haveit = True
if haveit:
continue
nevr = pool.rel2id(s.nameid, s.evrid, solv.REL_EQ)
for s2 in pool.whatmatchesdep(solv.SOLVABLE_OBSOLETES, nevr):
if s2.repo == sysrepo:
continue
haveit = True
if haveit:
continue
if s.name not in drops:
drops[s.name] = sysrepo.name
# mark it explicitly to avoid having 2 pools while GC is not run
del pool
ofh = sys.stdout
if output_dir:
name = os.path.join(output_dir, 'obsoletepackages.inc')
ofh = open(name, 'w')
for reponame in sorted(set(drops.values())):
print('<!-- %s -->' % reponame, file=ofh)
for p in sorted(drops):
if drops[p] != reponame:
continue
print(' <obsoletepackage>%s</obsoletepackage>' % p, file=ofh)
def solve_project(self, ignore_unresolvable=False, ignore_recommended=False, locale=None, locales_from=None):
self.load_all_groups()
if not self.output:
self.logger.error('OUTPUT not defined')
return
if ignore_unresolvable:
self.ignore_broken = True
global_use_recommends = not ignore_recommended
if locale:
for l in locale:
self.locales |= set(l.split(','))
if locales_from:
with open(os.path.join(self.input_dir, locales_from), 'r') as fh:
root = ET.parse(fh).getroot()
self.locales |= set([lang.text for lang in root.findall('.//linguas/language')])
modules = []
# the yml parser makes an array out of everything, so
# we loop a bit more than what we support
for group in self.output:
groupname = group.keys()[0]
settings = group[groupname]
if not settings: # e.g. unsorted
settings = {}
includes = settings.get('includes', [])
excludes = settings.get('excludes', [])
use_recommends = settings.get('recommends', global_use_recommends)
self.solve_module(groupname, includes, excludes, use_recommends)
g = self.groups[groupname]
g.conflicts = settings.get('conflicts', [])
g.default_support_status = settings.get('default-support', 'unsupported')
modules.append(g)
# not defined for openSUSE
overlap = self.groups.get('overlap')
for module in modules:
module.check_dups(modules, overlap)
module.collect_devel_packages()
module.filter_already_selected(modules)
if overlap:
ignores = [x.name for x in overlap.ignored]
self.solve_module(overlap.name, [], ignores, use_recommends=False)
overlapped = set(overlap.solved_packages['*'])
for arch in self.filtered_architectures:
overlapped |= set(overlap.solved_packages[arch])
for module in modules:
if module.name == 'overlap' or module in overlap.ignored:
continue
for arch in ['*'] + self.filtered_architectures:
for p in overlapped:
module.solved_packages[arch].pop(p, None)
self._collect_unsorted_packages(modules, self.groups.get('unsorted'))
return self.write_all_groups()
def strip_medium_from_staging(self, path):
# staging projects don't need source and debug medium - and the glibc source
# rpm conflicts between standard and bootstrap_copy repository causing the
# product builder to fail
medium = re.compile('name="(DEBUG|SOURCE)MEDIUM"')
for name in glob.glob(os.path.join(path, '*.kiwi')):
lines = open(name).readlines()
lines = [l for l in lines if not medium.search(l)]
open(name, 'w').writelines(lines)
def build_stub(self, destination, extension):
with open(os.path.join(destination, '.'.join(['stub', extension])), 'w+') as f:
f.write('# prevent building single {} files twice\n'.format(extension))
f.write('Name: stub\n')
f.write('Version: 0.0\n')
def commit_package(self, path):
if self.dry_run:
package = Package(path)
for i in package.get_diff():
print(''.join(i))
else:
# No proper API function to perform the same operation.
print(subprocess.check_output(
' '.join(['cd', path, '&&', 'osc', 'addremove']), shell=True))
package = Package(path)
package.commit(msg='Automatic update', skip_local_service_run=True)
def replace_product_version(self, product_file, product_version):
product_version = '<version>{}</version>'.format(product_version)
lines = open(product_file).readlines()
new_lines = []
for line in lines:
new_lines.append(line.replace('<version></version>', product_version))
open(product_file, 'w').write(''.join(new_lines))
def update_and_solve_target(self, api, target_project, target_config, main_repo,
project, scope, force, no_checkout,
only_release_packages, stop_after_solve, drop_list=False):
self.all_architectures = target_config.get('pkglistgen-archs').split(' ')
self.repos = self.expand_repos(project, main_repo)
print('[{}] {}/{}: update and solve'.format(scope, project, main_repo))
group = target_config.get('pkglistgen-group', '000package-groups')
product = target_config.get('pkglistgen-product', '000product')
release = target_config.get('pkglistgen-release', '000release-packages')
url = api.makeurl(['source', project])
packages = ET.parse(http_GET(url)).getroot()
if packages.find('entry[@name="{}"]'.format(product)) is None:
if not self.dry_run:
undelete_package(api.apiurl, project, product, 'revive')
# TODO disable build.
print('{} undeleted, skip dvd until next cycle'.format(product))
return
elif not force:
root = ET.fromstringlist(show_results_meta(api.apiurl, project, product,
repository=[main_repo], multibuild=True))
if len(root.xpath('result[@state="building"]')) or len(root.xpath('result[@state="dirty"]')):
print('{}/{} build in progress'.format(project, product))
return
checkout_list = [group, product, release]
if packages.find('entry[@name="{}"]'.format(release)) is None:
if not self.dry_run:
undelete_package(api.apiurl, project, release, 'revive')
print('{} undeleted, skip dvd until next cycle'.format(release))
return
# Cache dir specific to hostname and project.
host = urlparse(api.apiurl).hostname
cache_dir = CacheManager.directory('pkglistgen', host, project)
if not no_checkout:
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
os.makedirs(cache_dir)
group_dir = os.path.join(cache_dir, group)
product_dir = os.path.join(cache_dir, product)
release_dir = os.path.join(cache_dir, release)
for package in checkout_list:
if no_checkout:
print('Skipping checkout of {}/{}'.format(project, package))
continue
checkout_package(api.apiurl, project, package, expand_link=True, prj_dir=cache_dir)
file_utils.unlink_all_except(release_dir)
if not only_release_packages:
file_utils.unlink_all_except(product_dir)
file_utils.copy_directory_contents(group_dir, product_dir,
['supportstatus.txt', 'groups.yml',
'reference-unsorted.yml', 'reference-summary.yml',
'package-groups.changes'])
file_utils.change_extension(product_dir, '.spec.in', '.spec')
file_utils.change_extension(product_dir, '.product.in', '.product')
self.input_dir = group_dir
self.output_dir = product_dir
print('-> do_update')
# make sure we only calculcate existant architectures
self.filter_architectures(target_archs(api.apiurl, project, main_repo))
self.update_repos(self.filtered_architectures)
if only_release_packages:
self.load_all_groups()
self.write_group_stubs()
else:
summary = self.solve_project(ignore_unresolvable=str2bool(target_config.get('pkglistgen-ignore-unresolvable')),
ignore_recommended=str2bool(target_config.get('pkglistgen-ignore-recommended')),
locale = target_config.get('pkglistgen-local'),
locales_from = target_config.get('pkglistgen-locales-from'))
if stop_after_solve:
return
delete_products = target_config.get('pkglistgen-delete-products', '').split(' ')
file_utils.unlink_list(product_dir, delete_products)
print('-> product service')
product_version = attribute_value_load(api.apiurl, project, 'ProductVersion')
if not product_version:
# for stagings the product version doesn't matter (I hope)
product_version = '1'
for product_file in glob.glob(os.path.join(product_dir, '*.product')):
self.replace_product_version(product_file, product_version)
print(subprocess.check_output(
[PRODUCT_SERVICE, product_file, product_dir, project]))
for delete_kiwi in target_config.get('pkglistgen-delete-kiwis-{}'.format(scope), '').split(' '):
delete_kiwis = glob.glob(os.path.join(product_dir, delete_kiwi))
file_utils.unlink_list(product_dir, delete_kiwis)
if scope == 'staging':
self.strip_medium_from_staging(product_dir)
spec_files = glob.glob(os.path.join(product_dir, '*.spec'))
file_utils.move_list(spec_files, release_dir)
inc_files = glob.glob(os.path.join(group_dir, '*.inc'))
file_utils.move_list(inc_files, release_dir)
file_utils.multibuild_from_glob(release_dir, '*.spec')
self.build_stub(release_dir, 'spec')
self.commit_package(release_dir)
if only_release_packages:
return
file_utils.multibuild_from_glob(product_dir, '*.kiwi')
self.build_stub(product_dir, 'kiwi')
self.commit_package(product_dir)
error_output = ''
reference_summary = os.path.join(group_dir, 'reference-summary.yml')
if os.path.isfile(reference_summary):
summary_file = os.path.join(product_dir, 'summary.yml')
with open(summary_file, 'w') as f:
f.write('# Summary of packages in groups')
for group in sorted(summary.keys()):
# the unsorted group should appear filtered by
# unneeded.yml - so we need the content of unsorted.yml
# not unsorted.group (this grew a little unnaturally)
if group == 'unsorted':
continue
f.write('\n' + group + ':\n')
for package in sorted(summary[group]):
f.write(' - ' + package + '\n')
try:
error_output += subprocess.check_output(['diff', '-u', reference_summary, summary_file])
except subprocess.CalledProcessError as e:
error_output += e.output
reference_unsorted = os.path.join(group_dir, 'reference-unsorted.yml')
unsorted_file = os.path.join(product_dir, 'unsorted.yml')
try:
error_output += subprocess.check_output(['diff', '-u', reference_unsorted, unsorted_file])
except subprocess.CalledProcessError as e:
error_output += e.output
if len(error_output) > 0:
self.logger.error('Difference in yml:\n' + error_output)
return True

View File

@ -0,0 +1,208 @@
from __future__ import print_function
import filecmp
import glob
import gzip
import hashlib
import io
import logging
import os.path
import re
import random
import string
import subprocess
import sys
import shutil
import tempfile
from lxml import etree as ET
from osc import conf
import osc.core
from osclib.util import project_list_family
from osclib.util import project_list_family_prior
from osclib.conf import Config
from osclib.cache_manager import CacheManager
import requests
import solv
import yaml
# share header cache with repochecker
CACHEDIR = CacheManager.directory('repository-meta')
try:
from urllib.parse import urljoin
except ImportError:
# python 2.x
from urlparse import urljoin
logger = logging.getLogger()
def dump_solv_build(baseurl):
"""Determine repo format and build string from remote repository."""
if not baseurl.endswith('/'):
baseurl += '/'
buildre = re.compile('.*-Build(.*)')
url = urljoin(baseurl, 'media.1/media')
with requests.get(url) as media:
for i, line in enumerate(media.iter_lines()):
if i != 1:
continue
build = buildre.match(line)
if build:
return build.group(1)
url = urljoin(baseurl, 'media.1/build')
with requests.get(url) as build:
name = build.content.strip()
build = buildre.match(name)
if build:
return build.group(1)
url = urljoin(baseurl, 'repodata/repomd.xml')
with requests.get(url) as media:
root = ET.parse(url)
rev = root.find('.//{http://linux.duke.edu/metadata/repo}revision')
if rev is not None:
return rev.text
raise Exception(baseurl + 'includes no build number')
def dump_solv(baseurl, output_dir):
name = None
ofh = sys.stdout
if output_dir:
build = dump_solv_build(baseurl)
name = os.path.join(output_dir, '{}.solv'.format(build))
pool = solv.Pool()
pool.setarch()
repo = pool.add_repo(''.join(random.choice(string.letters) for _ in range(5)))
url = urljoin(baseurl, 'repodata/repomd.xml')
repomd = requests.get(url)
ns = {'r': 'http://linux.duke.edu/metadata/repo'}
root = ET.fromstring(repomd.content)
print(url, root)
primary_element = root.find('.//r:data[@type="primary"]', ns)
location = primary_element.find('r:location', ns).get('href')
sha256_expected = primary_element.find('r:checksum[@type="sha256"]', ns).text
path_prefix = 'TODO'
f = tempfile.TemporaryFile()
f.write(repomd.content)
f.flush()
os.lseek(f.fileno(), 0, os.SEEK_SET)
repo.add_repomdxml(f, 0)
url = urljoin(baseurl, path_prefix + location)
with requests.get(url, stream=True) as primary:
sha256 = hashlib.sha256(primary.content).hexdigest()
if sha256 != sha256_expected:
raise Exception('checksums do not match {} != {}'.format(sha256, sha256_expected))
content = gzip.GzipFile(fileobj=io.BytesIO(primary.content))
os.lseek(f.fileno(), 0, os.SEEK_SET)
f.write(content.read())
f.flush()
os.lseek(f.fileno(), 0, os.SEEK_SET)
repo.add_rpmmd(f, None, 0)
repo.create_stubs()
ofh = open(name + '.new', 'w')
repo.write(ofh)
if name is not None:
# Only update file if overwrite or different.
ofh.flush() # Ensure entirely written before comparing.
os.rename(name + '.new', name)
return name
def solv_cache_update(apiurl, cache_dir_solv, target_project, family_last, family_include):
"""Dump solv files (do_dump_solv) for all products in family."""
prior = set()
project_family = project_list_family_prior(
apiurl, target_project, include_self=True, last=family_last)
if family_include:
# Include projects from a different family if desired.
project_family.extend(project_list_family(apiurl, family_include))
for project in project_family:
Config(apiurl, project)
project_config = conf.config[project]
baseurl = project_config.get('download-baseurl')
if not baseurl:
baseurl = project_config.get('download-baseurl-' + project.replace(':', '-'))
baseurl_update = project_config.get('download-baseurl-update')
print(project, baseurl, baseurl_update)
continue
if not baseurl:
logger.warning('no baseurl configured for {}'.format(project))
continue
urls = [urljoin(baseurl, 'repo/oss/')]
if baseurl_update:
urls.append(urljoin(baseurl_update, 'oss/'))
if project_config.get('nonfree'):
urls.append(urljoin(baseurl, 'repo/non-oss/'))
if baseurl_update:
urls.append(urljoin(baseurl_update, 'non-oss/'))
names = []
for url in urls:
project_display = project
if 'update' in url:
project_display += ':Update'
print('-> dump_solv for {}/{}'.format(
project_display, os.path.basename(os.path.normpath(url))))
logger.debug(url)
output_dir = os.path.join(cache_dir_solv, project)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
solv_name = dump_solv(baseurl=url, output_dir=output_dir, overwrite=False)
if solv_name:
names.append(solv_name)
if not len(names):
logger.warning('no solv files were dumped for {}'.format(project))
continue
print(prior)
return prior
def update_merge(nonfree, repos, architectures):
"""Merge free and nonfree solv files or copy free to merged"""
for project, repo in repos:
for arch in architectures:
solv_file = os.path.join(
CACHEDIR, 'repo-{}-{}-{}.solv'.format(project, repo, arch))
solv_file_merged = os.path.join(
CACHEDIR, 'repo-{}-{}-{}.merged.solv'.format(project, repo, arch))
if not nonfree:
shutil.copyfile(solv_file, solv_file_merged)
continue
solv_file_nonfree = os.path.join(
CACHEDIR, 'repo-{}-{}-{}.solv'.format(nonfree, repo, arch))
def fetch_item(key, opts):
ret = dump_solv(opts['url'], '/tmp')
print(key, opts, ret)
def update_project(apiurl, project):
url = osc.core.makeurl(apiurl, ['source', project, '00update-repos', 'config.yml'])
root = yaml.safe_load(osc.core.http_GET(url))
for item in root:
key = item.keys()[0]
fetch_item(key, item[key])