Merge pull request #1438 from Vogtinator/docker-release-properly
Introduce a new docker-publisher bot
This commit is contained in:
commit
7e00d7d8cb
1
.noserc
1
.noserc
@ -1,2 +1,3 @@
|
|||||||
[nosetests]
|
[nosetests]
|
||||||
ignore-files=metrics_release\.py
|
ignore-files=metrics_release\.py
|
||||||
|
ignore-files=docker_.+\.py
|
||||||
|
29
dist/package/openSUSE-release-tools.spec
vendored
29
dist/package/openSUSE-release-tools.spec
vendored
@ -113,6 +113,18 @@ BuildArch: noarch
|
|||||||
%description check-source
|
%description check-source
|
||||||
Check source review bot that performs basic source analysis and assigns reviews.
|
Check source review bot that performs basic source analysis and assigns reviews.
|
||||||
|
|
||||||
|
%package docker-publisher
|
||||||
|
Summary: Docker image publishing bot
|
||||||
|
Group: Development/Tools/Other
|
||||||
|
BuildArch: noarch
|
||||||
|
Requires: python3-requests
|
||||||
|
Requires: python3-lxml
|
||||||
|
Requires(pre): shadow
|
||||||
|
|
||||||
|
%description docker-publisher
|
||||||
|
A docker image publishing bot which regularly pushes built docker images from
|
||||||
|
several sources (Repo, URL) to several destinations (git, Docker registries)
|
||||||
|
|
||||||
%package maintenance
|
%package maintenance
|
||||||
Summary: Maintenance related services
|
Summary: Maintenance related services
|
||||||
Group: Development/Tools/Other
|
Group: Development/Tools/Other
|
||||||
@ -301,6 +313,14 @@ exit 0
|
|||||||
%postun check-source
|
%postun check-source
|
||||||
%{systemd_postun}
|
%{systemd_postun}
|
||||||
|
|
||||||
|
%pre docker-publisher
|
||||||
|
getent passwd osrt-docker-publisher > /dev/null || \
|
||||||
|
useradd -r -m -s /sbin/nologin -c "user for openSUSE-release-tools-docker-publisher" osrt-docker-publisher
|
||||||
|
exit 0
|
||||||
|
|
||||||
|
%postun docker-publisher
|
||||||
|
%{systemd_postun}
|
||||||
|
|
||||||
%pre maintenance
|
%pre maintenance
|
||||||
getent passwd osrt-maintenance > /dev/null || \
|
getent passwd osrt-maintenance > /dev/null || \
|
||||||
useradd -r -m -s /sbin/nologin -c "user for openSUSE-release-tools-maintenance" osrt-maintenance
|
useradd -r -m -s /sbin/nologin -c "user for openSUSE-release-tools-maintenance" osrt-maintenance
|
||||||
@ -372,6 +392,8 @@ exit 0
|
|||||||
%exclude %{_datadir}/%{source_dir}/check_maintenance_incidents.py
|
%exclude %{_datadir}/%{source_dir}/check_maintenance_incidents.py
|
||||||
%exclude %{_datadir}/%{source_dir}/check_source.py
|
%exclude %{_datadir}/%{source_dir}/check_source.py
|
||||||
%exclude %{_datadir}/%{source_dir}/devel-project.py
|
%exclude %{_datadir}/%{source_dir}/devel-project.py
|
||||||
|
%exclude %{_datadir}/%{source_dir}/docker_publisher.py
|
||||||
|
%exclude %{_datadir}/%{source_dir}/docker_registry.py
|
||||||
%exclude %{_datadir}/%{source_dir}/metrics
|
%exclude %{_datadir}/%{source_dir}/metrics
|
||||||
%exclude %{_datadir}/%{source_dir}/metrics.py
|
%exclude %{_datadir}/%{source_dir}/metrics.py
|
||||||
%exclude %{_datadir}/%{source_dir}/metrics_release.py
|
%exclude %{_datadir}/%{source_dir}/metrics_release.py
|
||||||
@ -409,6 +431,13 @@ exit 0
|
|||||||
%{_bindir}/osrt-check_source
|
%{_bindir}/osrt-check_source
|
||||||
%{_datadir}/%{source_dir}/check_source.py
|
%{_datadir}/%{source_dir}/check_source.py
|
||||||
|
|
||||||
|
%files docker-publisher
|
||||||
|
%{_bindir}/osrt-docker_publisher
|
||||||
|
%{_datadir}/%{source_dir}/docker_publisher.py
|
||||||
|
%{_datadir}/%{source_dir}/docker_registry.py
|
||||||
|
%{_unitdir}/osrt-docker-publisher.service
|
||||||
|
%{_unitdir}/osrt-docker-publisher.timer
|
||||||
|
|
||||||
%files maintenance
|
%files maintenance
|
||||||
%{_bindir}/osrt-check_maintenance_incidents
|
%{_bindir}/osrt-check_maintenance_incidents
|
||||||
%{_datadir}/%{source_dir}/check_maintenance_incidents.py
|
%{_datadir}/%{source_dir}/check_maintenance_incidents.py
|
||||||
|
475
docker_publisher.py
Executable file
475
docker_publisher.py
Executable file
@ -0,0 +1,475 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
#
|
||||||
|
# Copyright (c) 2022 SUSE LLC
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
# SOFTWARE.
|
||||||
|
|
||||||
|
# This script's job is to listen for new releases of products with docker images
|
||||||
|
# and publish those.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import requests
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
from lxml import etree as xml
|
||||||
|
|
||||||
|
import docker_registry
|
||||||
|
|
||||||
|
REPOMD_NAMESPACES = {'md': "http://linux.duke.edu/metadata/common",
|
||||||
|
'repo': "http://linux.duke.edu/metadata/repo",
|
||||||
|
'rpm': "http://linux.duke.edu/metadata/rpm"}
|
||||||
|
|
||||||
|
|
||||||
|
class DockerImagePublisher:
|
||||||
|
"""Base class for handling the publishing of docker images.
|
||||||
|
This handles multiple architectures, which have different layers
|
||||||
|
and therefore versions."""
|
||||||
|
|
||||||
|
def releasedDockerImageVersion(self, arch):
|
||||||
|
"""This function returns an identifier for the released docker
|
||||||
|
image's version."""
|
||||||
|
raise Exception("pure virtual")
|
||||||
|
|
||||||
|
def prepareReleasing(self):
|
||||||
|
"""Prepare the environment to allow calls to releaseDockerImage."""
|
||||||
|
raise Exception("pure virtual")
|
||||||
|
|
||||||
|
def addImage(self, version, arch, image_path):
|
||||||
|
"""This function adds the docker image with the image manifest, config layers
|
||||||
|
in image_path."""
|
||||||
|
raise Exception("pure virtual")
|
||||||
|
|
||||||
|
def finishReleasing(self):
|
||||||
|
"""This function publishes the released layers."""
|
||||||
|
raise Exception("pure virtual")
|
||||||
|
|
||||||
|
|
||||||
|
class DockerPublishException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DockerImageFetcher:
|
||||||
|
"""Base class for handling the acquiring of docker images."""
|
||||||
|
|
||||||
|
def currentVersion(self):
|
||||||
|
"""This function returns the version of the latest available version
|
||||||
|
of the image for the product."""
|
||||||
|
raise Exception("pure virtual")
|
||||||
|
|
||||||
|
def getDockerImage(self, callback):
|
||||||
|
"""This function downloads the root fs layer and calls callback
|
||||||
|
with its path as argument."""
|
||||||
|
raise Exception("pure virtual")
|
||||||
|
|
||||||
|
|
||||||
|
class DockerFetchException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DockerImagePublisherRegistry(DockerImagePublisher):
|
||||||
|
"""The DockerImagePublisherRegistry class works by using a manifest list to
|
||||||
|
describe a tag. The list contains a manifest for each architecture.
|
||||||
|
The manifest will be edited instead of replaced, which means if you don't
|
||||||
|
call addImage for an architecture, the existing released image stays in place."""
|
||||||
|
MAP_ARCH_RPM_DOCKER = {'i586': ("386", None),
|
||||||
|
'x86_64': ("amd64", None),
|
||||||
|
'armv6l': ("arm", "v6"),
|
||||||
|
'armv7l': ("arm", "v7"),
|
||||||
|
'aarch64': ("arm64", "v8"),
|
||||||
|
'ppc64le': ("ppc64le", None),
|
||||||
|
's390x': ("s390x", None)}
|
||||||
|
|
||||||
|
def __init__(self, dhc, tag, aliases=[]):
|
||||||
|
"""Construct a DIPR by passing a DockerRegistryClient instance as dhc
|
||||||
|
and a name for a tag as tag.
|
||||||
|
Optionally, add tag aliases as aliases. Those will only be written to,
|
||||||
|
never read."""
|
||||||
|
self.dhc = dhc
|
||||||
|
self.tag = tag
|
||||||
|
self.aliases = aliases
|
||||||
|
# The manifestlist for the tag is only downloaded if this cache is empty,
|
||||||
|
# so needs to be set to None to force a redownload.
|
||||||
|
self.cached_manifestlist = None
|
||||||
|
# Construct a new manifestlist for the tag.
|
||||||
|
self.new_manifestlist = None
|
||||||
|
|
||||||
|
def getDockerArch(self, arch):
|
||||||
|
if arch not in self.MAP_ARCH_RPM_DOCKER:
|
||||||
|
raise DockerPublishException("Unknown arch %s" % arch)
|
||||||
|
|
||||||
|
return self.MAP_ARCH_RPM_DOCKER[arch]
|
||||||
|
|
||||||
|
def _getManifestlist(self):
|
||||||
|
if self.cached_manifestlist is None:
|
||||||
|
self.cached_manifestlist = self.dhc.getManifest(self.tag)
|
||||||
|
|
||||||
|
return self.cached_manifestlist
|
||||||
|
|
||||||
|
def releasedDockerImageVersion(self, arch):
|
||||||
|
docker_arch, docker_variant = self.getDockerArch(arch)
|
||||||
|
|
||||||
|
manifestlist = self._getManifestlist()
|
||||||
|
|
||||||
|
if manifestlist is None:
|
||||||
|
# No manifest -> force outdated version
|
||||||
|
return "0"
|
||||||
|
|
||||||
|
for manifest in manifestlist['manifests']:
|
||||||
|
if docker_variant is not None:
|
||||||
|
if 'variant' not in manifest['platform'] or manifest['platform']['variant'] != docker_variant:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if manifest['platform']['architecture'] == docker_arch:
|
||||||
|
if 'vnd-opensuse-version' in manifest:
|
||||||
|
return manifest['vnd-opensuse-version']
|
||||||
|
|
||||||
|
# Arch not in the manifest -> force outdated version
|
||||||
|
return "0"
|
||||||
|
|
||||||
|
def prepareReleasing(self):
|
||||||
|
if self.new_manifestlist is not None:
|
||||||
|
raise DockerPublishException("Did not finish publishing")
|
||||||
|
|
||||||
|
self.new_manifestlist = self._getManifestlist()
|
||||||
|
|
||||||
|
# Generate an empty manifestlist
|
||||||
|
if not self.new_manifestlist:
|
||||||
|
self.new_manifestlist = {'schemaVersion': 2,
|
||||||
|
'tag': self.tag,
|
||||||
|
'mediaType': "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||||
|
'manifests': []}
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def getV2ManifestEntry(self, path, filename, mediaType):
|
||||||
|
"""For V1 -> V2 schema conversion. filename has to contain the digest"""
|
||||||
|
digest = filename
|
||||||
|
|
||||||
|
if re.match(r"^[a-f0-9]{64}", digest):
|
||||||
|
digest = "sha256:" + os.path.splitext(digest)[0]
|
||||||
|
|
||||||
|
if not digest.startswith("sha256"):
|
||||||
|
raise DockerPublishException("Invalid manifest contents")
|
||||||
|
|
||||||
|
return {'mediaType': mediaType,
|
||||||
|
'size': os.path.getsize(path + "/" + filename),
|
||||||
|
'digest': digest,
|
||||||
|
'x-osdp-filename': filename}
|
||||||
|
|
||||||
|
def convertV1ToV2Manifest(self, path, manifest_v1):
|
||||||
|
"""Converts the v1 manifest in manifest_v1 to a V2 manifest and returns it"""
|
||||||
|
|
||||||
|
layers = []
|
||||||
|
# The order of layers changed in V1 -> V2
|
||||||
|
for layer_filename in manifest_v1['Layers'][::-1]:
|
||||||
|
layers += [self.getV2ManifestEntry(path, layer_filename,
|
||||||
|
"application/vnd.docker.image.rootfs.diff.tar.gzip")]
|
||||||
|
|
||||||
|
return {'schemaVersion': 2,
|
||||||
|
'mediaType': "application/vnd.docker.distribution.manifest.v2+json",
|
||||||
|
'config': self.getV2ManifestEntry(path, manifest_v1['Config'],
|
||||||
|
"application/vnd.docker.container.image.v1+json"),
|
||||||
|
'layers': layers}
|
||||||
|
|
||||||
|
def addImage(self, version, arch, image_path):
|
||||||
|
docker_arch, docker_variant = self.getDockerArch(arch)
|
||||||
|
|
||||||
|
manifest = None
|
||||||
|
|
||||||
|
with open(image_path + "/manifest.json") as manifest_file:
|
||||||
|
manifest = json.load(manifest_file)
|
||||||
|
|
||||||
|
manifest_v2 = self.convertV1ToV2Manifest(image_path, manifest[0])
|
||||||
|
# Upload blobs
|
||||||
|
if not self.dhc.uploadBlob(image_path + "/" + manifest_v2['config']['x-osdp-filename'],
|
||||||
|
manifest_v2['config']['digest']):
|
||||||
|
raise DockerPublishException("Could not upload the image config")
|
||||||
|
|
||||||
|
for layer in manifest_v2['layers']:
|
||||||
|
if not self.dhc.uploadBlob(image_path + "/" + layer['x-osdp-filename'],
|
||||||
|
layer['digest']):
|
||||||
|
raise DockerPublishException("Could not upload an image layer")
|
||||||
|
|
||||||
|
# Upload the manifest
|
||||||
|
manifest_content = json.dumps(manifest_v2).encode("utf-8")
|
||||||
|
manifest_digest = self.dhc.uploadManifest(manifest_content)
|
||||||
|
|
||||||
|
if manifest_digest is False:
|
||||||
|
raise DockerPublishException("Could not upload the manifest")
|
||||||
|
|
||||||
|
# Register the manifest in the list
|
||||||
|
replaced = False
|
||||||
|
for manifest in self.new_manifestlist['manifests']:
|
||||||
|
if 'variant' in manifest['platform'] and manifest['platform']['variant'] != docker_variant:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if manifest['platform']['architecture'] == docker_arch:
|
||||||
|
manifest['mediaType'] = manifest_v2['mediaType']
|
||||||
|
manifest['size'] = len(manifest_content)
|
||||||
|
manifest['digest'] = manifest_digest
|
||||||
|
manifest['vnd-opensuse-version'] = version
|
||||||
|
if docker_variant is not None:
|
||||||
|
manifest['platform']['variant'] = docker_variant
|
||||||
|
|
||||||
|
replaced = True
|
||||||
|
|
||||||
|
if not replaced:
|
||||||
|
# Add it instead
|
||||||
|
manifest = {'mediaType': manifest_v2['mediaType'],
|
||||||
|
'size': len(manifest_content),
|
||||||
|
'digest': manifest_digest,
|
||||||
|
'vnd-opensuse-version': version,
|
||||||
|
'platform': {
|
||||||
|
'architecture': docker_arch,
|
||||||
|
'os': "linux"}
|
||||||
|
}
|
||||||
|
if docker_variant is not None:
|
||||||
|
manifest['platform']['variant'] = docker_variant
|
||||||
|
|
||||||
|
self.new_manifestlist['manifests'] += [manifest]
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def finishReleasing(self):
|
||||||
|
# Generate the manifest content
|
||||||
|
manifestlist_content = json.dumps(self.new_manifestlist).encode('utf-8')
|
||||||
|
|
||||||
|
# Push the aliases
|
||||||
|
for alias in self.aliases:
|
||||||
|
if not self.dhc.uploadManifest(manifestlist_content, alias):
|
||||||
|
raise DockerPublishException("Could not push an manifest list alias")
|
||||||
|
|
||||||
|
# Push the new manifest list
|
||||||
|
if not self.dhc.uploadManifest(manifestlist_content, self.tag):
|
||||||
|
raise DockerPublishException("Could not upload the new manifest list")
|
||||||
|
|
||||||
|
self.new_manifestlist = None
|
||||||
|
self.cached_manifestlist = None # force redownload
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class DockerImageFetcherURL(DockerImageFetcher):
|
||||||
|
"""A trivial implementation. It downloads a (compressed) tar archive and passes
|
||||||
|
the decompressed contents to the callback.
|
||||||
|
The version number can't be determined automatically (it would need to extract
|
||||||
|
the image and look at /etc/os-release each time - too expensive.) so it
|
||||||
|
has to be passed manually."""
|
||||||
|
def __init__(self, version, url):
|
||||||
|
self.version = version
|
||||||
|
self.url = url
|
||||||
|
|
||||||
|
def currentVersion(self):
|
||||||
|
return self.version
|
||||||
|
|
||||||
|
def getDockerImage(self, callback):
|
||||||
|
"""Download the tar and extract it"""
|
||||||
|
with tempfile.NamedTemporaryFile() as tar_file:
|
||||||
|
tar_file.write(requests.get(self.url).content)
|
||||||
|
with tempfile.TemporaryDirectory() as tar_dir:
|
||||||
|
# Extract the .tar.xz into the dir
|
||||||
|
subprocess.call("tar -xaf '%s' -C '%s'" % (tar_file.name, tar_dir), shell=True)
|
||||||
|
return callback(tar_dir)
|
||||||
|
|
||||||
|
|
||||||
|
class DockerImageFetcherOBS(DockerImageFetcher):
|
||||||
|
"""Uses the OBS API to access the build artifacts.
|
||||||
|
Url has to be https://build.opensuse.org/public/build/<project>/<repo>/<arch>/<pkgname>
|
||||||
|
If maintenance_release is True, it picks the buildcontainer released last with that name.
|
||||||
|
e.g. for "foo" it would pick "foo.2019" instead of "foo" or "foo.2018"."""
|
||||||
|
def __init__(self, url, maintenance_release=False):
|
||||||
|
self.url = url
|
||||||
|
self.newest_release_url = None
|
||||||
|
if not maintenance_release:
|
||||||
|
self.newest_release_url = url
|
||||||
|
|
||||||
|
def _isMaintenanceReleaseOf(self, release, source):
|
||||||
|
"""Returns whether release describes a maintenance release of source.
|
||||||
|
E.g. "foo.2019", "foo" -> True, "foo-asdf", "foo" -> False"""
|
||||||
|
sourcebuildflavor = source.split(":")[1] if ":" in source else None
|
||||||
|
releasebuildflavor = release.split(":")[1] if ":" in release else None
|
||||||
|
return sourcebuildflavor == releasebuildflavor and release.startswith(source.split(":")[0] + ".")
|
||||||
|
|
||||||
|
def _getNewestReleaseUrl(self):
|
||||||
|
if self.newest_release_url is None:
|
||||||
|
buildcontainername = self.url.split("/")[-1]
|
||||||
|
prjurl = self.url + "/.."
|
||||||
|
buildcontainerlist_req = requests.get(prjurl)
|
||||||
|
buildcontainerlist = xml.fromstring(buildcontainerlist_req.content)
|
||||||
|
releases = [entry for entry in buildcontainerlist.xpath("entry/@name") if
|
||||||
|
self._isMaintenanceReleaseOf(entry, buildcontainername)]
|
||||||
|
releases.sort()
|
||||||
|
# Pick the first one with binaries
|
||||||
|
for release in releases[::-1] + [buildcontainername]:
|
||||||
|
self.newest_release_url = prjurl + "/" + release
|
||||||
|
try:
|
||||||
|
self._getFilename()
|
||||||
|
break
|
||||||
|
except DockerFetchException:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return self.newest_release_url
|
||||||
|
|
||||||
|
def _getFilename(self):
|
||||||
|
"""Return the name of the binary at the URL with the filename ending in
|
||||||
|
.docker.tar."""
|
||||||
|
binarylist_req = requests.get(self._getNewestReleaseUrl())
|
||||||
|
binarylist = xml.fromstring(binarylist_req.content)
|
||||||
|
for binary in binarylist.xpath("binary/@filename"):
|
||||||
|
if binary.endswith(".docker.tar"):
|
||||||
|
return binary
|
||||||
|
|
||||||
|
raise DockerFetchException("No docker image built in the repository")
|
||||||
|
|
||||||
|
def currentVersion(self):
|
||||||
|
"""Return {version}-?({flavor}-)Build{build} of the docker file."""
|
||||||
|
filename = self._getFilename()
|
||||||
|
# Capture everything between arch and filename suffix
|
||||||
|
return re.match(r'[^.]*\.[^.]+-(.*)\.docker\.tar$', filename).group(1)
|
||||||
|
|
||||||
|
def getDockerImage(self, callback):
|
||||||
|
"""Download the tar and extract it"""
|
||||||
|
filename = self._getFilename()
|
||||||
|
with tempfile.NamedTemporaryFile() as tar_file:
|
||||||
|
tar_file.write(requests.get(self.newest_release_url + "/" + filename).content)
|
||||||
|
with tempfile.TemporaryDirectory() as tar_dir:
|
||||||
|
# Extract the .tar into the dir
|
||||||
|
subprocess.call("tar -xaf '%s' -C '%s'" % (tar_file.name, tar_dir), shell=True)
|
||||||
|
return callback(tar_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def run():
|
||||||
|
drc_tw = docker_registry.DockerRegistryClient(os.environ['REGISTRY'], os.environ['REGISTRY_USER'], os.environ['REGISTRY_PASSWORD'],
|
||||||
|
os.environ['REGISTRY_REPO_TW'])
|
||||||
|
drc_leap = docker_registry.DockerRegistryClient(os.environ['REGISTRY'], os.environ['REGISTRY_USER'], os.environ['REGISTRY_PASSWORD'],
|
||||||
|
os.environ['REGISTRY_REPO_LEAP'])
|
||||||
|
|
||||||
|
config = {
|
||||||
|
'tumbleweed': {
|
||||||
|
'fetchers': {
|
||||||
|
'i586': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Tumbleweed/containers/i586/opensuse-tumbleweed-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'x86_64': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Tumbleweed/containers/x86_64/opensuse-tumbleweed-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'aarch64': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Tumbleweed/containers/aarch64/opensuse-tumbleweed-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'armv7l': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Tumbleweed/containers/armv7l/opensuse-tumbleweed-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'armv6l': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Tumbleweed/containers/armv6l/opensuse-tumbleweed-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'ppc64le': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Tumbleweed/containers/ppc64le/opensuse-tumbleweed-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
's390x': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Tumbleweed/containers/s390x/opensuse-tumbleweed-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
},
|
||||||
|
'publisher': DockerImagePublisherRegistry(drc_tw, "latest"),
|
||||||
|
},
|
||||||
|
'leap-15.3': {
|
||||||
|
'fetchers': {
|
||||||
|
'x86_64': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Leap:15.3/containers/x86_64/opensuse-leap-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'aarch64': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Leap:15.3/containers/aarch64/opensuse-leap-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'armv7l': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Leap:15.3/containers_armv7/armv7l/opensuse-leap-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'ppc64le': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Leap:15.3/containers/ppc64le/opensuse-leap-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
's390x': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Leap:15.3/containers/s390x/opensuse-leap-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
},
|
||||||
|
'publisher': DockerImagePublisherRegistry(drc_leap, "latest", ["15.3", "15"]),
|
||||||
|
},
|
||||||
|
'leap-15.4': {
|
||||||
|
'fetchers': {
|
||||||
|
'x86_64': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Leap:15.4/containers/x86_64/opensuse-leap-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'aarch64': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Leap:15.4/containers/aarch64/opensuse-leap-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
'ppc64le': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Leap:15.4/containers/ppc64le/opensuse-leap-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
's390x': DockerImageFetcherOBS(url="https://build.opensuse.org/public/build/openSUSE:Containers:Leap:15.4/containers/s390x/opensuse-leap-image:docker", maintenance_release=True), # noqa: E501
|
||||||
|
},
|
||||||
|
'publisher': DockerImagePublisherRegistry(drc_leap, "15.4"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse args after defining the config - the available distros are included
|
||||||
|
# in the help output
|
||||||
|
parser = argparse.ArgumentParser(description="Docker image publish script",
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||||
|
parser.add_argument("distros", metavar="distro", type=str, nargs="*",
|
||||||
|
default=[key for key in config],
|
||||||
|
help="Which distros to check for images to publish.")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
success = True
|
||||||
|
|
||||||
|
for distro in args.distros:
|
||||||
|
print("Handling %s" % distro)
|
||||||
|
|
||||||
|
archs_to_update = {}
|
||||||
|
fetchers = config[distro]['fetchers']
|
||||||
|
publisher = config[distro]['publisher']
|
||||||
|
|
||||||
|
for arch in fetchers:
|
||||||
|
print("\tArchitecture %s" % arch)
|
||||||
|
try:
|
||||||
|
current = fetchers[arch].currentVersion()
|
||||||
|
print("\t\tAvailable version: %s" % current)
|
||||||
|
|
||||||
|
released = publisher.releasedDockerImageVersion(arch)
|
||||||
|
print("\t\tReleased version: %s" % released)
|
||||||
|
|
||||||
|
if current != released:
|
||||||
|
archs_to_update[arch] = current
|
||||||
|
except Exception as e:
|
||||||
|
print("\t\tException during version fetching: %s" % e)
|
||||||
|
|
||||||
|
if not archs_to_update:
|
||||||
|
print("\tNothing to do.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not publisher.prepareReleasing():
|
||||||
|
print("\tCould not prepare the publishing")
|
||||||
|
success = False
|
||||||
|
continue
|
||||||
|
|
||||||
|
need_to_upload = False
|
||||||
|
|
||||||
|
for arch, version in archs_to_update.items():
|
||||||
|
print("\tUpdating %s image to version %s" % (arch, version))
|
||||||
|
try:
|
||||||
|
fetchers[arch].getDockerImage(lambda image_path: publisher.addImage(version=version,
|
||||||
|
arch=arch,
|
||||||
|
image_path=image_path))
|
||||||
|
need_to_upload = True
|
||||||
|
|
||||||
|
except DockerFetchException as dfe:
|
||||||
|
print("\t\tCould not fetch the image: %s" % dfe)
|
||||||
|
success = False
|
||||||
|
continue
|
||||||
|
except DockerPublishException as dpe:
|
||||||
|
print("\t\tCould not publish the image: %s" % dpe)
|
||||||
|
success = False
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If nothing got added to the publisher, don't try to upload it.
|
||||||
|
# For docker hub it'll just update the "last pushed" time without any change
|
||||||
|
if not need_to_upload:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not publisher.finishReleasing():
|
||||||
|
print("\tCould not publish the image")
|
||||||
|
continue
|
||||||
|
|
||||||
|
return 0 if success else 1
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(run())
|
214
docker_registry.py
Normal file
214
docker_registry.py
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
#
|
||||||
|
# Copyright (c) 2018 SUSE LLC
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
# SOFTWARE.
|
||||||
|
|
||||||
|
# This is a very basic client for the Docker Registry V2 API.
|
||||||
|
# It exists for a single reason: All clients either:
|
||||||
|
# - Don't work
|
||||||
|
# - Don't support uploading
|
||||||
|
# - Don't support multi-arch images (manifest lists)
|
||||||
|
# and some even all three.
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import urllib.parse
|
||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
|
class DockerRegistryClient():
|
||||||
|
def __init__(self, url, username, password, repository):
|
||||||
|
self.url = url
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
self.repository = repository
|
||||||
|
self.scopes = ["repository:%s:pull,push,delete" % repository]
|
||||||
|
self.token = None
|
||||||
|
|
||||||
|
class DockerRegistryError(Exception):
|
||||||
|
"""Some nicer display of docker registry errors"""
|
||||||
|
def __init__(self, errors):
|
||||||
|
self.errors = errors
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
ret = "Docker Registry errors:"
|
||||||
|
for error in self.errors:
|
||||||
|
ret += "\n" + str(error)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def _updateToken(self, www_authenticate):
|
||||||
|
bearer_parts = www_authenticate[len("Bearer "):].split(",")
|
||||||
|
bearer_dict = {}
|
||||||
|
for part in bearer_parts:
|
||||||
|
assignment = part.split('=')
|
||||||
|
bearer_dict[assignment[0]] = assignment[1].strip('"')
|
||||||
|
|
||||||
|
scope_param = "&scope=".join([""] + [urllib.parse.quote(scope) for scope in self.scopes])
|
||||||
|
response = requests.get("%s?service=%s%s" % (bearer_dict['realm'], bearer_dict['service'], scope_param),
|
||||||
|
auth=(self.username, self.password))
|
||||||
|
self.token = response.json()['token']
|
||||||
|
|
||||||
|
def doHttpCall(self, method, url, **kwargs):
|
||||||
|
"""This method wraps the requested method from the requests module to
|
||||||
|
add the token for authorization."""
|
||||||
|
try_update_token = True
|
||||||
|
|
||||||
|
# Relative to the host
|
||||||
|
if url.startswith("/"):
|
||||||
|
url = self.url + url
|
||||||
|
|
||||||
|
if "headers" not in kwargs:
|
||||||
|
kwargs['headers'] = {}
|
||||||
|
|
||||||
|
while True:
|
||||||
|
resp = None
|
||||||
|
if self.token is not None:
|
||||||
|
kwargs['headers']['Authorization'] = "Bearer " + self.token
|
||||||
|
|
||||||
|
methods = {'POST': requests.post,
|
||||||
|
'GET': requests.get,
|
||||||
|
'HEAD': requests.head,
|
||||||
|
'PUT': requests.put,
|
||||||
|
'DELETE': requests.delete}
|
||||||
|
|
||||||
|
if method not in methods:
|
||||||
|
return False
|
||||||
|
|
||||||
|
resp = methods[method](url, **kwargs)
|
||||||
|
|
||||||
|
if resp.status_code == 401 or resp.status_code == 403:
|
||||||
|
if try_update_token:
|
||||||
|
try_update_token = False
|
||||||
|
self._updateToken(resp.headers['Www-Authenticate'])
|
||||||
|
continue
|
||||||
|
|
||||||
|
if resp.status_code > 400 and resp.status_code < 404:
|
||||||
|
try:
|
||||||
|
errors = resp.json()['errors']
|
||||||
|
raise self.DockerRegistryError(errors)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def uploadManifest(self, content, reference=None):
|
||||||
|
"""Upload a manifest. Data is given as bytes in content, the digest/tag in reference.
|
||||||
|
If reference is None, the digest is computed and used as reference.
|
||||||
|
On success, the used reference is returned. False otherwise."""
|
||||||
|
content_json = json.loads(content.decode('utf-8'))
|
||||||
|
if "mediaType" not in content_json:
|
||||||
|
raise Exception("Invalid manifest")
|
||||||
|
|
||||||
|
if reference is None:
|
||||||
|
alg = hashlib.sha256()
|
||||||
|
alg.update(content)
|
||||||
|
reference = "sha256:" + alg.hexdigest()
|
||||||
|
|
||||||
|
resp = self.doHttpCall("PUT", "/v2/%s/manifests/%s" % (self.repository, reference),
|
||||||
|
headers={'Content-Type': content_json['mediaType']},
|
||||||
|
data=content)
|
||||||
|
|
||||||
|
if resp.status_code != 201:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return reference
|
||||||
|
|
||||||
|
def uploadManifestFile(self, filename, reference=None):
|
||||||
|
"""Upload a manifest. If the filename doesn't equal the digest, it's computed.
|
||||||
|
If reference is None, the digest is used. You can use the manifest's tag
|
||||||
|
for example.
|
||||||
|
On success, the used reference is returned. False otherwise."""
|
||||||
|
with open(filename, "rb") as manifest:
|
||||||
|
content = manifest.read()
|
||||||
|
|
||||||
|
if reference is None:
|
||||||
|
basename = os.path.basename(filename)
|
||||||
|
if basename.startswith("sha256:"):
|
||||||
|
reference = basename
|
||||||
|
|
||||||
|
if reference is None:
|
||||||
|
raise Exception("No reference determined")
|
||||||
|
|
||||||
|
return self.uploadManifest(content, reference)
|
||||||
|
|
||||||
|
def getManifest(self, reference):
|
||||||
|
"""Get a (json-parsed) manifest with the given reference (digest or tag).
|
||||||
|
If the manifest does not exist, return None. For other errors, False."""
|
||||||
|
resp = self.doHttpCall("GET", "/v2/%s/manifests/%s" % (self.repository, reference),
|
||||||
|
headers={'Accept': "application/vnd.docker.distribution.manifest.list.v2+json,application/vnd.docker.distribution.manifest.v2+json"}) # noqa: E501
|
||||||
|
|
||||||
|
if resp.status_code == 404:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if resp.status_code != 200:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return resp.json()
|
||||||
|
|
||||||
|
def getManifestDigest(self, reference):
|
||||||
|
"""Return the digest of the manifest with the given reference.
|
||||||
|
If the manifest doesn't exist or the request fails, it returns False."""
|
||||||
|
resp = self.doHttpCall("HEAD", "/v2/%s/manifests/%s" % (self.repository, reference),
|
||||||
|
headers={'Accept': "application/vnd.docker.distribution.manifest.list.v2+json,application/vnd.docker.distribution.manifest.v2+json"}) # noqa: E501
|
||||||
|
|
||||||
|
if resp.status_code != 200:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return resp.headers['Docker-Content-Digest']
|
||||||
|
|
||||||
|
def deleteManifest(self, digest):
|
||||||
|
"""Delete the manifest with the given reference."""
|
||||||
|
resp = self.doHttpCall("DELETE", "/v2/%s/manifests/%s" % (self.repository, digest))
|
||||||
|
|
||||||
|
return resp.status_code == 202
|
||||||
|
|
||||||
|
def uploadBlob(self, filename, digest=None):
|
||||||
|
"""Upload the blob with the given filename and digest. If digest is None,
|
||||||
|
the basename has to equal the digest.
|
||||||
|
Returns True if blob already exists or upload succeeded."""
|
||||||
|
|
||||||
|
if digest is None:
|
||||||
|
digest = os.path.basename(filename)
|
||||||
|
|
||||||
|
if not digest.startswith("sha256:"):
|
||||||
|
raise Exception("Invalid digest")
|
||||||
|
|
||||||
|
# Check whether the blob already exists - don't upload it needlessly.
|
||||||
|
stat_request = self.doHttpCall("HEAD", "/v2/%s/blobs/%s" % (self.repository, digest))
|
||||||
|
if stat_request.status_code == 200 or stat_request.status_code == 307:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# For now we can do a single upload call with everything inlined
|
||||||
|
# (which also means completely in ram, but currently it's never > 50 MiB)
|
||||||
|
content = None
|
||||||
|
with open(filename, "rb") as blob:
|
||||||
|
content = blob.read()
|
||||||
|
|
||||||
|
# First request an upload "slot", we get an URL we can PUT to back
|
||||||
|
upload_request = self.doHttpCall("POST", "/v2/%s/blobs/uploads/" % self.repository)
|
||||||
|
if upload_request.status_code == 202:
|
||||||
|
location = upload_request.headers['Location']
|
||||||
|
upload = self.doHttpCall("PUT", location + "&digest=" + digest,
|
||||||
|
data=content)
|
||||||
|
return upload.status_code == 201
|
||||||
|
|
||||||
|
return False
|
26
gocd/dockerhub-publisher.yaml
Normal file
26
gocd/dockerhub-publisher.yaml
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
format_version: 3
|
||||||
|
pipelines:
|
||||||
|
openSUSE.DockerHub.Publish:
|
||||||
|
group: openSUSE.Checkers
|
||||||
|
lock_behavior: unlockWhenFinished
|
||||||
|
environment_variables:
|
||||||
|
REGISTRY: 'https://registry-1.docker.io'
|
||||||
|
REGISTRY_USER: 'opensusereleasebot'
|
||||||
|
REGISTRY_PASSWORD: '{{SECRET:[opensuse.secrets][REGISTRY_PASSWORD]}}'
|
||||||
|
REGISTRY_REPO_TW: 'opensuse/tumbleweed'
|
||||||
|
REGISTRY_REPO_LEAP: 'opensuse/leap'
|
||||||
|
materials:
|
||||||
|
git:
|
||||||
|
git: https://github.com/Vogtinator/opensuse-release-tools.git
|
||||||
|
branch: docker-release-gocd
|
||||||
|
timer:
|
||||||
|
spec: 0 */15 * ? * *
|
||||||
|
only_on_changes: false
|
||||||
|
stages:
|
||||||
|
- Run:
|
||||||
|
approval: manual
|
||||||
|
resources:
|
||||||
|
- staging-bot
|
||||||
|
tasks:
|
||||||
|
- script:
|
||||||
|
./docker_publisher.py
|
10
systemd/osrt-docker-publisher.service
Normal file
10
systemd/osrt-docker-publisher.service
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=openSUSE Release Tools: Docker image publisher
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=osrt-docker-publisher
|
||||||
|
EnvironmentFile=/home/osrt-docker-publisher/.config/osrt-docker_publisher
|
||||||
|
ExecStart=/usr/bin/osrt-docker_publisher
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
10
systemd/osrt-docker-publisher.timer
Normal file
10
systemd/osrt-docker-publisher.timer
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=openSUSE Release Tools: Docker image publisher
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnBootSec=120
|
||||||
|
OnUnitInactiveSec=15 min
|
||||||
|
Unit=osrt-docker-publisher.service
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
Loading…
x
Reference in New Issue
Block a user