- Update to Docker 28.1.1-ce. See upstream changelog online at

<https://docs.docker.com/engine/release-notes/28/#2811> bsc#1242114
  Includes upstream fixes:
   - CVE-2025-22872 bsc#1241830
- Remove long-outdated build handling for deprecated and unsupported
  devicemapper and AUFS storage drivers. AUFS was removed in v24, and
  devicemapper was removed in v25.
  <https://docs.docker.com/engine/deprecated/#aufs-storage-driver>
- Rebase patches:
 * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch
 * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch
 * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch
 * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch
 * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch
- Remove upstreamed patches:
 - 0006-CVE-2025-22868-vendor-jws-split-token-into-fixed-num.patch
 - 0007-CVE-2025-22869-vendor-ssh-limit-the-size-of-the-inte.patch
 - cli-0001-docs-include-required-tools-in-source-tree.patch
- Update to docker-buildx v0.23.0. Upstream changelog:
  <https://github.com/docker/buildx/releases/tag/v0.23.0>

OBS-URL: https://build.opensuse.org/package/show/Virtualization:containers/docker?expand=0&rev=431
This commit is contained in:
2025-05-01 17:17:53 +00:00
committed by Git OBS Bridge
commit ba29e28bc2
35 changed files with 30143 additions and 0 deletions

23
.gitattributes vendored Normal file
View File

@@ -0,0 +1,23 @@
## Default LFS
*.7z filter=lfs diff=lfs merge=lfs -text
*.bsp filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.gem filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.jar filter=lfs diff=lfs merge=lfs -text
*.lz filter=lfs diff=lfs merge=lfs -text
*.lzma filter=lfs diff=lfs merge=lfs -text
*.obscpio filter=lfs diff=lfs merge=lfs -text
*.oxt filter=lfs diff=lfs merge=lfs -text
*.pdf filter=lfs diff=lfs merge=lfs -text
*.png filter=lfs diff=lfs merge=lfs -text
*.rpm filter=lfs diff=lfs merge=lfs -text
*.tbz filter=lfs diff=lfs merge=lfs -text
*.tbz2 filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.ttf filter=lfs diff=lfs merge=lfs -text
*.txz filter=lfs diff=lfs merge=lfs -text
*.whl filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.osc

View File

@@ -0,0 +1,73 @@
From a37af5b7b0fa804b72b891fc995ffbec101a5975 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 8 Mar 2017 12:41:54 +1100
Subject: [PATCH 1/5] SECRETS: daemon: allow directory creation in /run/secrets
Since FileMode can have the directory bit set, allow a SecretStore
implementation to return secrets that are actually directories. This is
useful for creating directories and subdirectories of secrets.
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/container_operations_unix.go | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go
index 4316cc4a1157..2309d589ae9e 100644
--- a/daemon/container_operations_unix.go
+++ b/daemon/container_operations_unix.go
@@ -3,6 +3,7 @@
package daemon // import "github.com/docker/docker/daemon"
import (
+ "bytes"
"context"
"fmt"
"os"
@@ -21,6 +22,7 @@ import (
"github.com/docker/docker/libnetwork/drivers/bridge"
"github.com/docker/docker/pkg/process"
"github.com/docker/docker/pkg/stringid"
+ "github.com/moby/go-archive"
"github.com/moby/sys/mount"
"github.com/moby/sys/user"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -325,9 +327,6 @@ func (daemon *Daemon) setupSecretDir(ctr *container.Container) (setupErr error)
if err != nil {
return errors.Wrap(err, "unable to get secret from secret store")
}
- if err := os.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
- return errors.Wrap(err, "error injecting secret")
- }
uid, err := strconv.Atoi(s.File.UID)
if err != nil {
@@ -338,6 +337,24 @@ func (daemon *Daemon) setupSecretDir(ctr *container.Container) (setupErr error)
return err
}
+ if s.File.Mode.IsDir() {
+ if err := os.Mkdir(fPath, s.File.Mode); err != nil {
+ return errors.Wrap(err, "error creating secretdir")
+ }
+ if secret.Spec.Data != nil {
+ // If the "file" is a directory, then s.File.Data is actually a tar
+ // archive of the directory. So we just do a tar extraction here.
+ if err := archive.UntarUncompressed(bytes.NewBuffer(secret.Spec.Data), fPath, &archive.TarOptions{
+ IDMap: daemon.idMapping,
+ }); err != nil {
+ return errors.Wrap(err, "error injecting secretdir")
+ }
+ }
+ } else {
+ if err := os.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
+ return errors.Wrap(err, "error injecting secret")
+ }
+ }
if err := os.Chown(fPath, ruid+uid, rgid+gid); err != nil {
return errors.Wrap(err, "error setting ownership for secret")
}
--
2.49.0

View File

@@ -0,0 +1,509 @@
From 7ffb02ee26030a51b05a8396b67642097fe93376 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 8 Mar 2017 11:43:29 +1100
Subject: [PATCH 2/5] SECRETS: SUSE: implement SUSE container secrets
This allows for us to pass in host credentials to a container, allowing
for SUSEConnect to work with containers.
Users can disable this by setting DOCKER_SUSE_SECRETS_ENABLE=0 in
/etc/sysconfig/docker or by adding that setting to docker.service's
Environment using a drop-in file.
THIS PATCH IS NOT TO BE UPSTREAMED, DUE TO THE FACT THAT IT IS
SUSE-SPECIFIC, AND UPSTREAM DOES NOT APPROVE OF THIS CONCEPT BECAUSE IT
MAKES BUILDS NOT ENTIRELY REPRODUCIBLE.
SUSE-Bugs: bsc#1065609 bsc#1057743 bsc#1055676 bsc#1030702 bsc#1231348
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/start.go | 5 +
daemon/suse_secrets.go | 460 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 465 insertions(+)
create mode 100644 daemon/suse_secrets.go
diff --git a/daemon/start.go b/daemon/start.go
index fafb1ac2a342..e5468a27905d 100644
--- a/daemon/start.go
+++ b/daemon/start.go
@@ -146,6 +146,11 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
}
}()
+ // SUSE:secrets -- inject the SUSE secret store
+ if err := daemon.injectSuseSecretStore(container); err != nil {
+ return err
+ }
+
mnts, err := daemon.setupContainerDirs(container)
if err != nil {
return err
diff --git a/daemon/suse_secrets.go b/daemon/suse_secrets.go
new file mode 100644
index 000000000000..dc8639ba0a85
--- /dev/null
+++ b/daemon/suse_secrets.go
@@ -0,0 +1,460 @@
+/*
+ * suse-secrets: patch for Docker to implement SUSE secrets
+ * Copyright (C) 2017-2021 SUSE LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package daemon
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/container"
+
+ swarmtypes "github.com/docker/docker/api/types/swarm"
+ "github.com/moby/go-archive"
+ swarmexec "github.com/moby/swarmkit/v2/agent/exec"
+ swarmapi "github.com/moby/swarmkit/v2/api"
+ "github.com/moby/sys/user"
+
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+const suseSecretsTogglePath = "/etc/docker/suse-secrets-enable"
+
+// parseEnableFile parses a file that can only contain "0" or "1" (with some
+// whitespace).
+func parseEnableFile(path string) (bool, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return false, err
+ }
+ data = bytes.TrimSpace(data)
+
+ switch value := string(data); value {
+ case "1":
+ return true, nil
+ case "0", "":
+ return false, nil
+ default:
+ return false, fmt.Errorf("invalid value %q (must be 0 to disable or 1 to enable)", value)
+ }
+}
+
+func isSuseSecretsEnabled() bool {
+ value, err := parseEnableFile(suseSecretsTogglePath)
+ if err != nil {
+ logrus.Warnf("SUSE:secrets :: error parsing %s: %v -- disabling SUSE secrets", suseSecretsTogglePath, err)
+ value = false
+ }
+ return value
+}
+
+var suseSecretsEnabled = true
+
+func init() {
+ // Make this entire feature toggle-able so that users can disable it if
+ // they run into issues like bsc#1231348.
+ suseSecretsEnabled = isSuseSecretsEnabled()
+ if suseSecretsEnabled {
+ logrus.Infof("SUSE:secrets :: SUSEConnect support enabled (set %s to 0 to disable)", suseSecretsTogglePath)
+ } else {
+ logrus.Infof("SUSE:secrets :: SUSEConnect support disabled by %s", suseSecretsTogglePath)
+ }
+}
+
+// Creating a fake file.
+type SuseFakeFile struct {
+ Path string
+ Uid int
+ Gid int
+ Mode os.FileMode
+ Data []byte
+}
+
+func (s SuseFakeFile) id() string {
+ // NOTE: It is _very_ important that this string always has a prefix of
+ // "suse". This is how we can ensure that we can operate on
+ // SecretReferences with a confidence that it was made by us.
+ return fmt.Sprintf("suse_%s_%s", digest.FromBytes(s.Data).Hex(), s.Path)
+}
+
+func (s SuseFakeFile) toSecret() *swarmapi.Secret {
+ return &swarmapi.Secret{
+ ID: s.id(),
+ Internal: true,
+ Spec: swarmapi.SecretSpec{
+ Data: s.Data,
+ },
+ }
+}
+
+func (s SuseFakeFile) toSecretReference(idMaps user.IdentityMapping) *swarmtypes.SecretReference {
+ // Figure out the host-facing {uid,gid} based on the provided maps. Fall
+ // back to root if the UID/GID don't match (we are guaranteed that root is
+ // mapped).
+ hostUID, hostGID := idMaps.RootPair()
+ if uid, gid, err := idMaps.ToHost(s.Uid, s.Gid); err == nil {
+ hostUID, hostGID = uid, gid
+ }
+
+ // Return the secret reference as a file target.
+ return &swarmtypes.SecretReference{
+ SecretID: s.id(),
+ SecretName: s.id(),
+ File: &swarmtypes.SecretReferenceFileTarget{
+ Name: s.Path,
+ UID: fmt.Sprintf("%d", hostUID),
+ GID: fmt.Sprintf("%d", hostGID),
+ Mode: s.Mode,
+ },
+ }
+}
+
+// readDir will recurse into a directory prefix/dir, and return the set of
+// secrets in that directory (as a tar archive that is packed inside the "data"
+// field). The Path attribute of each has the prefix stripped. Symlinks are
+// dereferenced.
+func readDir(prefix, dir string) ([]*SuseFakeFile, error) {
+ var suseFiles []*SuseFakeFile
+
+ path := filepath.Join(prefix, dir)
+ fi, err := os.Stat(path)
+ if err != nil {
+ // Ignore missing files.
+ if os.IsNotExist(err) {
+ // If the path itself exists it was a dangling symlink so give a
+ // warning about the symlink dangling.
+ _, err2 := os.Lstat(path)
+ if !os.IsNotExist(err2) {
+ logrus.Warnf("SUSE:secrets :: ignoring dangling symlink: %s", path)
+ }
+ return nil, nil
+ }
+ return nil, err
+ } else if !fi.IsDir() {
+ // Just to be safe.
+ logrus.Infof("SUSE:secrets :: expected %q to be a directory, but was a file", path)
+ return readFile(prefix, dir)
+ }
+ path, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Construct a tar archive of the source directory. We tar up the prefix
+ // directory and add dir as an IncludeFiles specifically so that we
+ // preserve the name of the directory itself.
+ tarStream, err := archive.TarWithOptions(path, &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ IncludeSourceDir: true,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to tar source directory %q: %v", path, err)
+ }
+ tarStreamBytes, err := ioutil.ReadAll(tarStream)
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read full tar archive: %v", err)
+ }
+
+ // Get a list of the symlinks in the tar archive.
+ var symlinks []string
+ tmpTr := tar.NewReader(bytes.NewBuffer(tarStreamBytes))
+ for {
+ hdr, err := tmpTr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read through tar reader: %v", err)
+ }
+ if hdr.Typeflag == tar.TypeSymlink {
+ symlinks = append(symlinks, hdr.Name)
+ }
+ }
+
+ // Symlinks aren't dereferenced in the above archive, so we explicitly do a
+ // rewrite of the tar archive to include all symlinks to files. We cannot
+ // do directories here, but lower-level directory symlinks aren't supported
+ // by zypper so this isn't an issue.
+ symlinkModifyMap := map[string]archive.TarModifierFunc{}
+ for _, sym := range symlinks {
+ logrus.Debugf("SUSE:secrets: archive(%q) %q is a need-to-rewrite symlink", path, sym)
+ symlinkModifyMap[sym] = func(tarPath string, hdr *tar.Header, r io.Reader) (*tar.Header, []byte, error) {
+ logrus.Debugf("SUSE:secrets: archive(%q) mapping for symlink %q", path, tarPath)
+ tarFullPath := filepath.Join(path, tarPath)
+
+ // Get a copy of the original byte stream.
+ oldContent, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, nil, fmt.Errorf("suse_rewrite: failed to read archive entry %q: %v", tarPath, err)
+ }
+
+ // Check that the file actually exists.
+ fi, err := os.Stat(tarFullPath)
+ if err != nil {
+ logrus.Warnf("suse_rewrite: failed to stat archive entry %q: %v", tarFullPath, err)
+ return hdr, oldContent, nil
+ }
+
+ // Read the actual contents.
+ content, err := ioutil.ReadFile(tarFullPath)
+ if err != nil {
+ logrus.Warnf("suse_rewrite: failed to read %q: %v", tarFullPath, err)
+ return hdr, oldContent, nil
+ }
+
+ newHdr, err := tar.FileInfoHeader(fi, "")
+ if err != nil {
+ // Fake the header.
+ newHdr = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ }
+ }
+
+ // Update the key fields.
+ hdr.Typeflag = newHdr.Typeflag
+ hdr.Mode = newHdr.Mode
+ hdr.Linkname = ""
+ return hdr, content, nil
+ }
+ }
+
+ // Create the rewritten tar stream.
+ tarStream = archive.ReplaceFileTarWrapper(ioutil.NopCloser(bytes.NewBuffer(tarStreamBytes)), symlinkModifyMap)
+ tarStreamBytes, err = ioutil.ReadAll(tarStream)
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read rewritten archive: %v", err)
+ }
+
+ // Add the tar stream as a "file".
+ suseFiles = append(suseFiles, &SuseFakeFile{
+ Path: dir,
+ Mode: fi.Mode(),
+ Data: tarStreamBytes,
+ })
+ return suseFiles, nil
+}
+
+// readFile returns a secret given a file under a given prefix.
+func readFile(prefix, file string) ([]*SuseFakeFile, error) {
+ path := filepath.Join(prefix, file)
+ fi, err := os.Stat(path)
+ if err != nil {
+ // Ignore missing files.
+ if os.IsNotExist(err) {
+ // If the path itself exists it was a dangling symlink so give a
+ // warning about the symlink dangling.
+ _, err2 := os.Lstat(path)
+ if !os.IsNotExist(err2) {
+ logrus.Warnf("SUSE:secrets :: ignoring dangling symlink: %s", path)
+ }
+ return nil, nil
+ }
+ return nil, err
+ } else if fi.IsDir() {
+ // Just to be safe.
+ logrus.Infof("SUSE:secrets :: expected %q to be a file, but was a directory", path)
+ return readDir(prefix, file)
+ }
+
+ var uid, gid int
+ if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
+ uid, gid = int(stat.Uid), int(stat.Gid)
+ } else {
+ logrus.Warnf("SUSE:secrets :: failed to cast file stat_t: defaulting to owned by root:root: %s", path)
+ uid, gid = 0, 0
+ }
+
+ bytes, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var suseFiles []*SuseFakeFile
+ suseFiles = append(suseFiles, &SuseFakeFile{
+ Path: file,
+ Uid: uid,
+ Gid: gid,
+ Mode: fi.Mode(),
+ Data: bytes,
+ })
+ return suseFiles, nil
+}
+
+// getHostSuseSecretData returns the list of SuseFakeFiles the need to be added
+// as SUSE secrets.
+func getHostSuseSecretData() ([]*SuseFakeFile, error) {
+ secrets := []*SuseFakeFile{}
+
+ credentials, err := readDir("/etc/zypp", "credentials.d")
+ if err != nil {
+ if os.IsNotExist(err) {
+ credentials = []*SuseFakeFile{}
+ } else {
+ logrus.Errorf("SUSE:secrets :: error while reading zypp credentials: %s", err)
+ return nil, err
+ }
+ }
+ secrets = append(secrets, credentials...)
+
+ suseConnect, err := readFile("/etc", "SUSEConnect")
+ if err != nil {
+ if os.IsNotExist(err) {
+ suseConnect = []*SuseFakeFile{}
+ } else {
+ logrus.Errorf("SUSE:secrets :: error while reading /etc/SUSEConnect: %s", err)
+ return nil, err
+ }
+ }
+ secrets = append(secrets, suseConnect...)
+
+ return secrets, nil
+}
+
+// To fake an empty store, in the case where we are operating on a container
+// that was created pre-swarmkit. Otherwise segfaults and other fun things
+// happen. See bsc#1057743.
+type (
+ suseEmptyStore struct{}
+ suseEmptySecret struct{}
+ suseEmptyConfig struct{}
+ suseEmptyVolume struct{}
+)
+
+// In order to reduce the amount of code touched outside of this file, we
+// implement the swarm API for DependencyGetter. This asserts that this
+// requirement will always be matched. In addition, for the case of the *empty*
+// getters this reduces memory usage by having a global instance.
+var (
+ _ swarmexec.DependencyGetter = &suseDependencyStore{}
+ emptyStore swarmexec.DependencyGetter = suseEmptyStore{}
+ emptySecret swarmexec.SecretGetter = suseEmptySecret{}
+ emptyConfig swarmexec.ConfigGetter = suseEmptyConfig{}
+ emptyVolume swarmexec.VolumeGetter = suseEmptyVolume{}
+)
+
+var errSuseEmptyStore = fmt.Errorf("SUSE:secrets :: tried to get a resource from empty store [this is a bug]")
+
+func (_ suseEmptyConfig) Get(_ string) (*swarmapi.Config, error) { return nil, errSuseEmptyStore }
+func (_ suseEmptySecret) Get(_ string) (*swarmapi.Secret, error) { return nil, errSuseEmptyStore }
+func (_ suseEmptyVolume) Get(_ string) (string, error) { return "", errSuseEmptyStore }
+func (_ suseEmptyStore) Secrets() swarmexec.SecretGetter { return emptySecret }
+func (_ suseEmptyStore) Configs() swarmexec.ConfigGetter { return emptyConfig }
+func (_ suseEmptyStore) Volumes() swarmexec.VolumeGetter { return emptyVolume }
+
+type suseDependencyStore struct {
+ dfl swarmexec.DependencyGetter
+ secrets map[string]*swarmapi.Secret
+}
+
+// The following are effectively dumb wrappers that return ourselves, or the
+// default.
+func (s *suseDependencyStore) Secrets() swarmexec.SecretGetter { return s }
+func (s *suseDependencyStore) Volumes() swarmexec.VolumeGetter { return emptyVolume }
+func (s *suseDependencyStore) Configs() swarmexec.ConfigGetter { return s.dfl.Configs() }
+
+// Get overrides the underlying DependencyGetter with our own secrets (falling
+// through to the underlying DependencyGetter if the secret isn't present).
+func (s *suseDependencyStore) Get(id string) (*swarmapi.Secret, error) {
+ logrus.Debugf("SUSE:secrets :: id=%s requested from suseDependencyGetter", id)
+
+ secret, ok := s.secrets[id]
+ if !ok {
+ // fallthrough
+ return s.dfl.Secrets().Get(id)
+ }
+ return secret, nil
+}
+
+// removeSuseSecrets removes any SecretReferences which were added by us
+// explicitly (this is detected by checking that the prefix has a 'suse'
+// prefix). See bsc#1057743.
+func removeSuseSecrets(c *container.Container) {
+ var without []*swarmtypes.SecretReference
+ for _, secret := range c.SecretReferences {
+ if strings.HasPrefix(secret.SecretID, "suse") {
+ logrus.Debugf("SUSE:secrets :: removing 'old' suse secret %q from container %q", secret.SecretID, c.ID)
+ continue
+ }
+ without = append(without, secret)
+ }
+ c.SecretReferences = without
+}
+
+func (daemon *Daemon) injectSuseSecretStore(c *container.Container) error {
+ // We drop any "old" SUSE secrets, as it appears that old containers (when
+ // restarted) could still have references to old secrets. The .id() of all
+ // secrets have a prefix of "suse" so this is much easier. See bsc#1057743
+ // for details on why this could cause issues.
+ removeSuseSecrets(c)
+
+ // Don't inject anything if the administrator has disabled suse secrets.
+ // However, for previous existing containers we need to remove old secrets
+ // (see above), otherwise they will still have old secret data.
+ if !suseSecretsEnabled {
+ logrus.Debugf("SUSE:secrets :: skipping injection of secrets into container %q because of %s", c.ID, suseSecretsTogglePath)
+ return nil
+ }
+
+ newDependencyStore := &suseDependencyStore{
+ dfl: c.DependencyStore,
+ secrets: make(map[string]*swarmapi.Secret),
+ }
+ // Handle old containers. See bsc#1057743.
+ if newDependencyStore.dfl == nil {
+ newDependencyStore.dfl = emptyStore
+ }
+
+ secrets, err := getHostSuseSecretData()
+ if err != nil {
+ return err
+ }
+
+ idMaps := daemon.idMapping
+ for _, secret := range secrets {
+ newDependencyStore.secrets[secret.id()] = secret.toSecret()
+ c.SecretReferences = append(c.SecretReferences, secret.toSecretReference(idMaps))
+ }
+
+ c.DependencyStore = newDependencyStore
+
+ // bsc#1057743 -- In older versions of Docker we added volumes explicitly
+ // to the mount list. This causes clashes because of duplicate namespaces.
+ // If we see an existing mount that will clash with the in-built secrets
+ // mount we assume it's our fault.
+ intendedMounts, err := c.SecretMounts()
+ if err != nil {
+ logrus.Warnf("SUSE:secrets :: fetching old secret mounts: %v", err)
+ return err
+ }
+ for _, intendedMount := range intendedMounts {
+ mountPath := intendedMount.Destination
+ if volume, ok := c.MountPoints[mountPath]; ok {
+ logrus.Debugf("SUSE:secrets :: removing pre-existing %q mount: %#v", mountPath, volume)
+ delete(c.MountPoints, mountPath)
+ }
+ }
+ return nil
+}
--
2.49.0

View File

@@ -0,0 +1,46 @@
From 1e240291ac3c14cd43e2a88eba2bcf505ab6b499 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Mon, 22 May 2023 15:44:54 +1000
Subject: [PATCH 3/5] BUILD: SLE12: revert "graphdriver/btrfs: use kernel UAPI
headers"
This reverts commit 3208dcabdc8997340b255f5b880fef4e3f54580d.
On SLE 12, our UAPI headers are too old, resulting in us being unable to
build the btrfs driver with the new headers. This patch is only needed
for SLE-12.
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
daemon/graphdriver/btrfs/btrfs.go | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go
index 6f8ec970316b..4541abdfaf46 100644
--- a/daemon/graphdriver/btrfs/btrfs.go
+++ b/daemon/graphdriver/btrfs/btrfs.go
@@ -4,17 +4,12 @@ package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs"
/*
#include <stdlib.h>
-#include <stdio.h>
#include <dirent.h>
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
- #error "Headers from kernel >= 4.12 are required to build with Btrfs support."
- #error "HINT: Set 'DOCKER_BUILDTAGS=exclude_graphdriver_btrfs' to build without Btrfs."
-#endif
-
-#include <linux/btrfs.h>
-#include <linux/btrfs_tree.h>
+// keep struct field name compatible with btrfs-progs < 6.1.
+#define max_referenced max_rfer
+#include <btrfs/ioctl.h>
+#include <btrfs/ctree.h>
static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) {
snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value);
--
2.49.0

View File

@@ -0,0 +1,89 @@
From ea43e18e821bda80dd475ce0f16e1f617399dc20 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Fri, 29 Jun 2018 17:59:30 +1000
Subject: [PATCH 4/5] bsc1073877: apparmor: clobber docker-default profile on
start
In the process of making docker-default reloading far less expensive,
567ef8e7858c ("daemon: switch to 'ensure' workflow for AppArmor
profiles") mistakenly made the initial profile load at dockerd start-up
lazy. As a result, if you have a running Docker daemon and upgrade it to
a new one with an updated AppArmor profile the new profile will not take
effect (because the old one is still loaded). The fix for this is quite
trivial, and just requires us to clobber the profile on start-up.
Fixes: 567ef8e7858c ("daemon: switch to 'ensure' workflow for AppArmor profiles")
SUSE-Bugs: bsc#1099277
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/apparmor_default.go | 14 ++++++++++----
daemon/apparmor_default_unsupported.go | 4 ++++
daemon/daemon.go | 5 +++--
3 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/daemon/apparmor_default.go b/daemon/apparmor_default.go
index 5a3121d05b5a..d63799b4e113 100644
--- a/daemon/apparmor_default.go
+++ b/daemon/apparmor_default.go
@@ -23,6 +23,15 @@ func DefaultApparmorProfile() string {
return ""
}
+func clobberDefaultAppArmorProfile() error {
+ if apparmor.HostSupports() {
+ if err := aaprofile.InstallDefault(defaultAppArmorProfile); err != nil {
+ return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultAppArmorProfile, err)
+ }
+ }
+ return nil
+}
+
func ensureDefaultAppArmorProfile() error {
if apparmor.HostSupports() {
loaded, err := aaprofile.IsLoaded(defaultAppArmorProfile)
@@ -36,10 +45,7 @@ func ensureDefaultAppArmorProfile() error {
}
// Load the profile.
- if err := aaprofile.InstallDefault(defaultAppArmorProfile); err != nil {
- return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultAppArmorProfile, err)
- }
+ return clobberDefaultAppArmorProfile()
}
-
return nil
}
diff --git a/daemon/apparmor_default_unsupported.go b/daemon/apparmor_default_unsupported.go
index be4938f5b61a..2b326fea5829 100644
--- a/daemon/apparmor_default_unsupported.go
+++ b/daemon/apparmor_default_unsupported.go
@@ -2,6 +2,10 @@
package daemon // import "github.com/docker/docker/daemon"
+func clobberDefaultAppArmorProfile() error {
+ return nil
+}
+
func ensureDefaultAppArmorProfile() error {
return nil
}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index c38c34ab8348..6ad567279025 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -873,8 +873,9 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
log.G(ctx).Warnf("Failed to configure golang's threads limit: %v", err)
}
- // ensureDefaultAppArmorProfile does nothing if apparmor is disabled
- if err := ensureDefaultAppArmorProfile(); err != nil {
+ // Make sure we clobber any pre-existing docker-default profile to ensure
+ // that upgrades to the profile actually work smoothly.
+ if err := clobberDefaultAppArmorProfile(); err != nil {
log.G(ctx).WithError(err).Error("Failed to ensure default apparmor profile is loaded")
}
--
2.49.0

View File

@@ -0,0 +1,326 @@
From afefee2f8753fa82c7a719bd6118d832f61c82ac Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 11 Oct 2023 21:19:12 +1100
Subject: [PATCH 5/5] SLE12: revert "apparmor: remove version-conditionals from
template"
This reverts the following commits:
* 7008a514493a ("profiles/apparmor: remove version-conditional constraints (< 2.8.96)")
* 2e19a4d56bf2 ("contrib/apparmor: remove version-conditionals (< 2.9) from template")
* d169a5730649 ("contrib/apparmor: remove remaining version-conditionals (< 2.9) from template")
* ecaab085db4b ("profiles/apparmor: remove use of aaparser.GetVersion()")
* e3e715666f95 ("pkg/aaparser: deprecate GetVersion, as it's no longer used")
These version conditionals are still required on SLE 12, where our
apparmor_parser version is quite old.
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
contrib/apparmor/main.go | 16 ++++++-
contrib/apparmor/template.go | 16 +++++++
pkg/aaparser/aaparser.go | 86 +++++++++++++++++++++++++++++++++++
profiles/apparmor/apparmor.go | 16 ++++++-
profiles/apparmor/template.go | 4 ++
5 files changed, 134 insertions(+), 4 deletions(-)
create mode 100644 pkg/aaparser/aaparser.go
diff --git a/contrib/apparmor/main.go b/contrib/apparmor/main.go
index 899d8378edae..93f98cbd20e5 100644
--- a/contrib/apparmor/main.go
+++ b/contrib/apparmor/main.go
@@ -6,9 +6,13 @@ import (
"os"
"path"
"text/template"
+
+ "github.com/docker/docker/pkg/aaparser"
)
-type profileData struct{}
+type profileData struct {
+ Version int
+}
func main() {
if len(os.Args) < 2 {
@@ -18,6 +22,15 @@ func main() {
// parse the arg
apparmorProfilePath := os.Args[1]
+ version, err := aaparser.GetVersion()
+ if err != nil {
+ log.Fatal(err)
+ }
+ data := profileData{
+ Version: version,
+ }
+ fmt.Printf("apparmor_parser is of version %+v\n", data)
+
// parse the template
compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate)
if err != nil {
@@ -35,7 +48,6 @@ func main() {
}
defer f.Close()
- data := profileData{}
if err := compiled.Execute(f, data); err != nil {
log.Fatalf("executing template failed: %v", err)
}
diff --git a/contrib/apparmor/template.go b/contrib/apparmor/template.go
index 58afcbe845ee..e6d0b6d37c58 100644
--- a/contrib/apparmor/template.go
+++ b/contrib/apparmor/template.go
@@ -20,9 +20,11 @@ profile /usr/bin/docker (attach_disconnected, complain) {
umount,
pivot_root,
+{{if ge .Version 209000}}
signal (receive) peer=@{profile_name},
signal (receive) peer=unconfined,
signal (send),
+{{end}}
network,
capability,
owner /** rw,
@@ -45,10 +47,12 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/etc/ld.so.cache r,
/etc/passwd r,
+{{if ge .Version 209000}}
ptrace peer=@{profile_name},
ptrace (read) peer=docker-default,
deny ptrace (trace) peer=docker-default,
deny ptrace peer=/usr/bin/docker///bin/ps,
+{{end}}
/usr/lib/** rm,
/lib/** rm,
@@ -69,9 +73,11 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/sbin/zfs rCx,
/sbin/apparmor_parser rCx,
+{{if ge .Version 209000}}
# Transitions
change_profile -> docker-*,
change_profile -> unconfined,
+{{end}}
profile /bin/cat (complain) {
/etc/ld.so.cache r,
@@ -93,8 +99,10 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/dev/null rw,
/bin/ps mr,
+{{if ge .Version 209000}}
# We don't need ptrace so we'll deny and ignore the error.
deny ptrace (read, trace),
+{{end}}
# Quiet dac_override denials
deny capability dac_override,
@@ -112,11 +120,15 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/proc/tty/drivers r,
}
profile /sbin/iptables (complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
capability net_admin,
}
profile /sbin/auplink flags=(attach_disconnected, complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
capability sys_admin,
capability dac_override,
@@ -135,7 +147,9 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/proc/[0-9]*/mounts rw,
}
profile /sbin/modprobe /bin/kmod (complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
capability sys_module,
/etc/ld.so.cache r,
/lib/** rm,
@@ -149,7 +163,9 @@ profile /usr/bin/docker (attach_disconnected, complain) {
}
# xz works via pipes, so we do not need access to the filesystem.
profile /usr/bin/xz (complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
/etc/ld.so.cache r,
/lib/** rm,
/usr/bin/xz rm,
diff --git a/pkg/aaparser/aaparser.go b/pkg/aaparser/aaparser.go
new file mode 100644
index 000000000000..89b48b2dba58
--- /dev/null
+++ b/pkg/aaparser/aaparser.go
@@ -0,0 +1,86 @@
+// Package aaparser is a convenience package interacting with `apparmor_parser`.
+package aaparser // import "github.com/docker/docker/pkg/aaparser"
+
+import (
+ "fmt"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+const (
+ binary = "apparmor_parser"
+)
+
+// GetVersion returns the major and minor version of apparmor_parser.
+func GetVersion() (int, error) {
+ output, err := cmd("", "--version")
+ if err != nil {
+ return -1, err
+ }
+
+ return parseVersion(output)
+}
+
+// cmd runs `apparmor_parser` with the passed arguments.
+func cmd(dir string, arg ...string) (string, error) {
+ c := exec.Command(binary, arg...)
+ c.Dir = dir
+
+ output, err := c.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err)
+ }
+
+ return string(output), nil
+}
+
+// parseVersion takes the output from `apparmor_parser --version` and returns
+// a representation of the {major, minor, patch} version as a single number of
+// the form MMmmPPP {major, minor, patch}.
+func parseVersion(output string) (int, error) {
+ // output is in the form of the following:
+ // AppArmor parser version 2.9.1
+ // Copyright (C) 1999-2008 Novell Inc.
+ // Copyright 2009-2012 Canonical Ltd.
+
+ lines := strings.SplitN(output, "\n", 2)
+ words := strings.Split(lines[0], " ")
+ version := words[len(words)-1]
+
+ // trim "-beta1" suffix from version="3.0.0-beta1" if exists
+ version = strings.SplitN(version, "-", 2)[0]
+ // also trim "~..." suffix used historically (https://gitlab.com/apparmor/apparmor/-/commit/bca67d3d27d219d11ce8c9cc70612bd637f88c10)
+ version = strings.SplitN(version, "~", 2)[0]
+
+ // split by major minor version
+ v := strings.Split(version, ".")
+ if len(v) == 0 || len(v) > 3 {
+ return -1, fmt.Errorf("parsing version failed for output: `%s`", output)
+ }
+
+ // Default the versions to 0.
+ var majorVersion, minorVersion, patchLevel int
+
+ majorVersion, err := strconv.Atoi(v[0])
+ if err != nil {
+ return -1, err
+ }
+
+ if len(v) > 1 {
+ minorVersion, err = strconv.Atoi(v[1])
+ if err != nil {
+ return -1, err
+ }
+ }
+ if len(v) > 2 {
+ patchLevel, err = strconv.Atoi(v[2])
+ if err != nil {
+ return -1, err
+ }
+ }
+
+ // major*10^5 + minor*10^3 + patch*10^0
+ numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel
+ return numericVersion, nil
+}
diff --git a/profiles/apparmor/apparmor.go b/profiles/apparmor/apparmor.go
index 445eed64e979..871b1f7d63c2 100644
--- a/profiles/apparmor/apparmor.go
+++ b/profiles/apparmor/apparmor.go
@@ -11,10 +11,14 @@ import (
"path"
"strings"
"text/template"
+
+ "github.com/docker/docker/pkg/aaparser"
)
-// profileDirectory is the file store for apparmor profiles and macros.
-const profileDirectory = "/etc/apparmor.d"
+var (
+ // profileDirectory is the file store for apparmor profiles and macros.
+ profileDirectory = "/etc/apparmor.d"
+)
// profileData holds information about the given profile for generation.
type profileData struct {
@@ -26,6 +30,8 @@ type profileData struct {
Imports []string
// InnerImports defines the apparmor functions to import in the profile.
InnerImports []string
+ // Version is the {major, minor, patch} version of apparmor_parser as a single number.
+ Version int
}
// generateDefault creates an apparmor profile from ProfileData.
@@ -45,6 +51,12 @@ func (p *profileData) generateDefault(out io.Writer) error {
p.InnerImports = append(p.InnerImports, "#include <abstractions/base>")
}
+ ver, err := aaparser.GetVersion()
+ if err != nil {
+ return err
+ }
+ p.Version = ver
+
return compiled.Execute(out, p)
}
diff --git a/profiles/apparmor/template.go b/profiles/apparmor/template.go
index 35c75300f8f0..b7a0299af2b8 100644
--- a/profiles/apparmor/template.go
+++ b/profiles/apparmor/template.go
@@ -23,6 +23,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
capability,
file,
umount,
+{{if ge .Version 208096}}
# Host (privileged) processes may send signals to container processes.
signal (receive) peer=unconfined,
# runc may send signals to container processes (for "docker stop").
@@ -33,6 +34,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
signal (receive) peer={{.DaemonProfile}},
# Container processes may send signals amongst themselves.
signal (send,receive) peer={{.Name}},
+{{end}}
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
# deny write to files not in /proc/<number>/** or /proc/sys/**
@@ -53,7 +55,9 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
deny /sys/devices/virtual/powercap/** rwklx,
deny /sys/kernel/security/** rwklx,
+{{if ge .Version 208095}}
# suppress ptrace denials when using 'docker ps' or using 'ps' inside a container
ptrace (trace,read,tracedby,readby) peer={{.Name}},
+{{end}}
}
`
--
2.49.0

View File

@@ -0,0 +1,40 @@
From a4993bed18ac66035ef562e742f0144ffcbcbd05 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <cyphar@cyphar.com>
Date: Tue, 25 Mar 2025 12:02:42 +1100
Subject: [PATCH 6/7] CVE-2025-22868: vendor: jws: split token into fixed
number of parts
Thanks to 'jub0bs' for reporting this issue.
Fixes: CVE-2025-22868
Reviewed-on: https://go-review.googlesource.com/c/oauth2/+/652155
Reviewed-by: Damien Neil <dneil@google.com>
Reviewed-by: Roland Shoemaker <roland@golang.org>
(Cherry-picked from golang.org/x/oauth2@681b4d8edca1bcfea5bce685d77ea7b82ed3e7b3.)
SUSE-Bugs: https://bugzilla.suse.com/show_bug.cgi?id=1239185
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
vendor/golang.org/x/oauth2/jws/jws.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go
index 95015648b43f..6f03a49d3120 100644
--- a/vendor/golang.org/x/oauth2/jws/jws.go
+++ b/vendor/golang.org/x/oauth2/jws/jws.go
@@ -165,11 +165,11 @@ func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) {
// Verify tests whether the provided JWT token's signature was produced by the private key
// associated with the supplied public key.
func Verify(token string, key *rsa.PublicKey) error {
- parts := strings.Split(token, ".")
- if len(parts) != 3 {
+ if strings.Count(token, ".") != 2 {
return errors.New("jws: invalid token received, token must have 3 parts")
}
+ parts := strings.SplitN(token, ".", 3)
signedContent := parts[0] + "." + parts[1]
signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
if err != nil {
--
2.48.1

View File

@@ -0,0 +1,137 @@
From 336decaec9a2086a8da7eb1b82c64f66fbdf57ad Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <cyphar@cyphar.com>
Date: Tue, 25 Mar 2025 12:05:38 +1100
Subject: [PATCH 7/7] CVE-2025-22869: vendor: ssh: limit the size of the
internal packet queue while waiting for KEX
In the SSH protocol, clients and servers execute the key exchange to
generate one-time session keys used for encryption and authentication.
The key exchange is performed initially after the connection is
established and then periodically after a configurable amount of data.
While a key exchange is in progress, we add the received packets to an
internal queue until we receive SSH_MSG_KEXINIT from the other side.
This can result in high memory usage if the other party is slow to
respond to the SSH_MSG_KEXINIT packet, or memory exhaustion if a
malicious client never responds to an SSH_MSG_KEXINIT packet during a
large file transfer.
We now limit the internal queue to 64 packets: this means 2MB with the
typical 32KB packet size.
When the internal queue is full we block further writes until the
pending key exchange is completed or there is a read or write error.
Thanks to Yuichi Watanabe for reporting this issue.
Fixes: CVE-2025-22869
Reviewed-on: https://go-review.googlesource.com/c/crypto/+/652135
Reviewed-by: Neal Patel <nealpatel@google.com>
Reviewed-by: Roland Shoemaker <roland@golang.org>
(Cherry-picked from golang.org/x/crypto@7292932d45d55c7199324ab0027cc86e8198aa22.)
SUSE-Bugs: https://bugzilla.suse.com/show_bug.cgi?id=1239322
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
vendor/golang.org/x/crypto/ssh/handshake.go | 47 ++++++++++++++++-----
1 file changed, 37 insertions(+), 10 deletions(-)
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
index 56cdc7c21c3b..a68d20f7f396 100644
--- a/vendor/golang.org/x/crypto/ssh/handshake.go
+++ b/vendor/golang.org/x/crypto/ssh/handshake.go
@@ -25,6 +25,11 @@ const debugHandshake = false
// quickly.
const chanSize = 16
+// maxPendingPackets sets the maximum number of packets to queue while waiting
+// for KEX to complete. This limits the total pending data to maxPendingPackets
+// * maxPacket bytes, which is ~16.8MB.
+const maxPendingPackets = 64
+
// keyingTransport is a packet based transport that supports key
// changes. It need not be thread-safe. It should pass through
// msgNewKeys in both directions.
@@ -73,11 +78,19 @@ type handshakeTransport struct {
incoming chan []byte
readError error
- mu sync.Mutex
- writeError error
- sentInitPacket []byte
- sentInitMsg *kexInitMsg
- pendingPackets [][]byte // Used when a key exchange is in progress.
+ mu sync.Mutex
+ // Condition for the above mutex. It is used to notify a completed key
+ // exchange or a write failure. Writes can wait for this condition while a
+ // key exchange is in progress.
+ writeCond *sync.Cond
+ writeError error
+ sentInitPacket []byte
+ sentInitMsg *kexInitMsg
+ // Used to queue writes when a key exchange is in progress. The length is
+ // limited by pendingPacketsSize. Once full, writes will block until the key
+ // exchange is completed or an error occurs. If not empty, it is emptied
+ // all at once when the key exchange is completed in kexLoop.
+ pendingPackets [][]byte
writePacketsLeft uint32
writeBytesLeft int64
@@ -133,6 +146,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion,
config: config,
}
+ t.writeCond = sync.NewCond(&t.mu)
t.resetReadThresholds()
t.resetWriteThresholds()
@@ -259,6 +273,7 @@ func (t *handshakeTransport) recordWriteError(err error) {
defer t.mu.Unlock()
if t.writeError == nil && err != nil {
t.writeError = err
+ t.writeCond.Broadcast()
}
}
@@ -362,6 +377,8 @@ write:
}
}
t.pendingPackets = t.pendingPackets[:0]
+ // Unblock writePacket if waiting for KEX.
+ t.writeCond.Broadcast()
t.mu.Unlock()
}
@@ -567,11 +584,20 @@ func (t *handshakeTransport) writePacket(p []byte) error {
}
if t.sentInitMsg != nil {
- // Copy the packet so the writer can reuse the buffer.
- cp := make([]byte, len(p))
- copy(cp, p)
- t.pendingPackets = append(t.pendingPackets, cp)
- return nil
+ if len(t.pendingPackets) < maxPendingPackets {
+ // Copy the packet so the writer can reuse the buffer.
+ cp := make([]byte, len(p))
+ copy(cp, p)
+ t.pendingPackets = append(t.pendingPackets, cp)
+ return nil
+ }
+ for t.sentInitMsg != nil {
+ // Block and wait for KEX to complete or an error.
+ t.writeCond.Wait()
+ if t.writeError != nil {
+ return t.writeError
+ }
+ }
}
if t.writeBytesLeft > 0 {
@@ -588,6 +614,7 @@ func (t *handshakeTransport) writePacket(p []byte) error {
if err := t.pushPacket(p); err != nil {
t.writeError = err
+ t.writeCond.Broadcast()
}
return nil
--
2.48.1

5
80-docker.rules Normal file
View File

@@ -0,0 +1,5 @@
# hide docker's loopback devices from udisks, and thus from user desktops
SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"
SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"

230
README_SUSE.md Normal file
View File

@@ -0,0 +1,230 @@
# Abstract
Docker is a lightweight "virtualization" method to run multiple virtual units
(containers, akin to “chroot”) simultaneously on a single control host.
Containers are isolated with Kernel Control Groups (cgroups) and Kernel Namespaces.
Docker provides an operating system-level virtualization where the Kernel
controls the isolated containers. With other full virtualization solutions
like Xen, KVM, or libvirt the processor simulates a complete hardware
environment and controls its virtual machines.
# Terminology
## chroot
A change root (chroot, or change root jail) is a section in the file system
which is isolated from the rest of the file system. For this purpose, the chroot
command is used to change the root of the file system. A program which is
executed in such a “chroot jail” cannot access files outside the designated
directory tree.
## cgroups
Kernel Control Groups (commonly referred to as just “cgroups”) are a Kernel
feature that allows aggregating or partitioning tasks (processes) and all their
children into hierarchical organized groups to isolate resources.
## Image
A "virtual machine" on the host server that can run any Linux system, for
example openSUSE, SUSE Linux Enterprise Desktop, or SUSE Linux Enterprise Server.
A Docker image is made by a series of layers built one over the other. Each layer
corresponds to a permanent change committed from a container to the image.
For more details checkout [Docker's official documentation](http://docs.docker.com/terms/image/).
## Image Name
A name that refers to an image. The name is used by the docker commands.
## Container
A running Docker Image.
## Container ID
A ID that refers to a container. The ID is used by the docker commands.
## TAG
A string associated to a Image. It commonly used to identify a specific version
of a Image (like tags in version control systems). It is also possible to refer
the same Image with different TAGs.
## Kernel Namespaces
A Kernel feature to isolate some resources like network, users, and others for
a group of processes.
## Docker Host Server
The system that runs the Docker daemon, provides the images, and the management
control capabilities through cgroups.
# Overview
Docker is a platform that allows developers and sysadmins to manage the complete
lifecycle of images.
Docker makes incredibly easy to build, ship and run images containing
applications.
Benefits of Docker:
* Isolating applications and operating systems through containers.
* Providing nearly native performance as Docker manages allocation of resources
in real-time.
* Controlling network interfaces and applying resources inside containers through cgroups.
* Versioning of images.
* Building images based on existing ones.
* Sharining/storing on [public](http://docs.docker.com/docker-hub/) or
[private](http://docs.docker.com/userguide/dockerrepos/#private-repositories)
repositories.
Limitations of Docker:
* All Docker containers are running inside the host system's Kernel and not with
a different Kernel.
* Only allows Linux "guest" operating systems.
* Docker is not a full virtualization stack like Xen, KVM, or libvirt.
* Security depends on the host system. Refer to the [official documentation](http://docs.docker.com/articles/security/)
for more details.
## Container drivers
Docker has different backend drivers to handle the containers. The recommended
on is [libcontainer](https://github.com/docker/libcontainer), which is also the
default choice. This driver provides direct access with cgroups.
The Docker packages ships also a LXC driver which handles containers using the
LXC tools.
At the time of writing, upstream is working on a `libvirt-lxc` driver.
## Storage drivers
Docker supports different storage drivers:
* `vfs`: this driver is automatically used when the Docker host filesystem
does not support copy-on-write. This is a simple driver which does not offer
some of the advantages of Docker (like sharing layers, more on that in the
next sections). It is highly reliable but also slow.
* `devicemapper`: this driver relies on the device-mapper thin provisioning
module. It supports copy-on-write, hence it offers all the advantages of
Docker.
* `btrfs`: this driver relies on Btrfs to provide all the features required
by Docker. To use this driver the `/var/lib/docker` directory must be on a
btrfs filesystem.
* `AUFS`: this driver relies on AUFS union filesystem. Neither the upstream
kernel nor the SUSE one supports this filesystem. Hence the AUFS driver is
not built into the SUSE Docker package.
It is possible to specify which driver to use by changing the value of the
`DOCKER_OPTS` variable defined inside of the `/etc/sysconfig/docker` file.
This can be done either manually or using &yast; by browsing to:
* System
* /etc/sysconfig Editor
* System
* Management
* DOCKER_OPTS
menu and entering the `-s storage_driver` string.
For example, to force the usage of the `devicemapper` driver
enter the following text:
```
DOCKER_OPTS="-s devicemapper
```
It is recommended to have `/var/lib/docker` mounted on a different filesystem
to not affect the Docker host OS in case of a filesystem corruption.
# Setting up a Docker host
Prepare the host:
1. Install the `docker` package.
2. Automatically start the Docker daemon at boot:
`sudo systemctl enable docker`
3. Start the Docker daemon:
`sudo systemctl start docker`
The Docker daemon listens on a local socket which is accessible only by the `root`
user and by the members of the `docker` group.
The `docker` group is automatically created at package installation time. To
allow a certain user to connect to the local Docker daemon use the following
command:
```
sudo /usr/sbin/usermod -aG docker <username>
```
The user will be able to communicate with the local Docker daemon upon his next
login.
## Networking
If you want your containers to be able to access the external network you must
enable the `net.ipv4.ip_forward` rule.
This can be done using YaST by browsing to the
`Network Devices -> Network Settings -> Routing` menu and ensuring that the
`Enable IPv4 Forwarding` box is checked.
This option cannot be changed when networking is handled by the Network Manager.
In such cases the `/etc/sysconfig/SuSEfirewall2` file needs to be edited by
hand to ensure the `FW_ROUTE` flag is set to `yes` like so:
```
FW_ROUTE="yes"
```
# Basic Docker operations
Images can be pulled from [Docker's central index](http://index.docker.io) using
the following command:
```
docker pull <image name>
```
Containers can be started using the `docker run` command.
Please refer to the [official documentation](http://docs.docker.com/)
for more details.
# Building Docker containers using KIWI
Starting from version 5.06.8 KIWI can be used to build Docker images.
Please refer to KIWI's [official documentation](https://doc.opensuse.org/projects/kiwi/doc/#chap.lxc).
The official `kiwi-doc` package contains examples of Docker images.
## Docker build system versus KIWI
Docker has an [internal build system](http://docs.docker.com/reference/builder/)
which makes incredibly easy to create new images based on existing ones.
Some users might be confused about what to use. The right approach is to build
the [base images](http://docs.docker.com/terms/image/#base-image-def) using KIWI
and then use them as foundation blocks inside of your Docker's build system.
That two advantages:
1. Be able to use docker specific directives (like `ENTRYPOINT`, `EXPOSE`, ...).
2. Be able to reuse already existing layers.
Sharing the common layers between different images makes possible to:
* Use less disk space on the Docker hosts.
* Make the deployments faster: only the requested layers are sent over the
network (it is like upgrading installed packages using delta rpms).
* Take full advantage of caching while building Docker images: this will result
in faster executions of `docker build` command.
To recap: KIWI is not to be intended as a replacement for Docker's build system.
It rather complements with it.

30
_service Normal file
View File

@@ -0,0 +1,30 @@
<services>
<service name="tar_scm" mode="manual">
<param name="url">https://github.com/moby/moby.git</param>
<param name="scm">git</param>
<param name="exclude">.git</param>
<param name="versionformat">28.1.1_ce_%h</param>
<param name="revision">v28.1.1</param>
<param name="filename">docker</param>
</service>
<service name="tar_scm" mode="manual">
<param name="url">https://github.com/docker/cli.git</param>
<param name="scm">git</param>
<param name="exclude">.git</param>
<param name="versionformat">28.1.1_ce</param>
<param name="revision">v28.1.1</param>
<param name="filename">docker-cli</param>
</service>
<service name="tar_scm" mode="manual">
<param name="url">https://github.com/docker/buildx.git</param>
<param name="scm">git</param>
<param name="exclude">.git</param>
<param name="versionformat">0.23.0</param>
<param name="revision">v0.23.0</param>
<param name="filename">docker-buildx</param>
</service>
<service name="recompress" mode="manual">
<param name="file">docker-*.tar</param>
<param name="compression">xz</param>
</service>
</services>

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3a866c020abe705657cb373e692db7f1ad4ad547b9e25c7a557a06f4549a63c9
size 9909596

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:73a38f03a8fe7f3c96f976fe776dfed2a46331735f7c0d5e7871a4afedd59325
size 10076932

BIN
docker-27.5.1_ce_4c9b3b011ae4.tar.xz (Stored with Git LFS) Normal file

Binary file not shown.

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bd8a7b4eb65e76c19807022b0a2f8a87c015f52657fbdea2069b3566341d501b
size 10524888

27
docker-audit.rules Normal file
View File

@@ -0,0 +1,27 @@
##
# Audit rules based on CIS Docker 1.6 Benchmark v1.0.0
# https://benchmarks.cisecurity.org/tools2/docker/CIS_Docker_1.6_Benchmark_v1.0.0.pdf
# Not all of these apply to SUSE.
# 1.8 Audit docker daemon
-w /usr/bin/docker -k docker
# 1.9 Audit Docker files and directories
-w /var/lib/docker -k docker
# 1.10 Audit /etc/docker
-w /etc/docker -k docker
# 1.11 Audit Docker files and directories - docker-registry.service
-w /usr/lib/systemd/system/docker-registry.service -k docker
# 1.12 Audit Docker files and directories - docker.service
-w /usr/lib/systemd/system/docker.service -k docker
# 1.13 Audit Docker files and directories - /var/run/docker.sock
-w /var/run/docker.sock -k docker
# 1.14 Audit Docker files and directories - /etc/sysconfig/docker
-w /etc/sysconfig/docker -k docker
# 1.15 Audit Docker files and directories - /etc/sysconfig/docker-network
-w /etc/sysconfig/docker-network -k docker
# 1.16 Audit Docker files and directories - /etc/sysconfig/docker-registry
-w /etc/sysconfig/docker-registry -k docker
# 1.17 Audit Docker files and directories - /etc/sysconfig/docker-storage
-w /etc/sysconfig/docker-storage -k docker
# 1.18 Audit Docker files and directories - /etc/default/docker
-w /etc/default/docker -k docker
## end docker audit rules

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fd0f81752a02e20b611f95a35718bdc44eb1e203e0fd80d7afb87dfd8135c300
size 6445376

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e6efae9f26c3d94c2f73aa22197aa1b7e97c1c81bc73ac5e38f165753373bf0a
size 6480184

BIN
docker-buildx-0.22.0.tar.xz (Stored with Git LFS) Normal file

Binary file not shown.

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:551705bb4635659954b18c6931bd5025d831d68fed4d0e7ae787d95ed2831533
size 8093360

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9a2b7ab7e665e9469fdd71bca1dd28ead5dc58dc9886f285f1fa75978ef5c078
size 3971272

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fe4d69790c4d21affd64d9b81ec5323f9f23153f7b0856e274ebd675058f6488
size 4079044

BIN
docker-cli-27.5.1_ce.tar.xz (Stored with Git LFS) Normal file

Binary file not shown.

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:10ce48e46f86cd411027f67297aa702766da407eb874f7ed724dcce29497be6f
size 4234444

8
docker-daemon.json Normal file
View File

@@ -0,0 +1,8 @@
{
"log-level": "warn",
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "5"
}
}

291
docker-integration.sh Normal file
View File

@@ -0,0 +1,291 @@
#!/bin/bash
# docker-integration: run Docker's integration tests
# Copyright (C) 2024 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -Eeuo pipefail
TESTDIR=/usr/src/docker-test
TEST_SRCDIR="$TESTDIR/src"
TEST_BINDIR="$TESTDIR/bin"
TMPROOT="$(mktemp --tmpdir -d docker-integration-tmpdir.XXXXXX)"
TMPDIR="$TMPROOT/tmp"
DEST="$TMPROOT/dest"
mkdir -p "$TMPDIR" "$TEST_BINDIR" "$DEST"
chmod 1777 "$TMPDIR"
chmod 777 "$TMPROOT"
function usage() {
cat >&2 <<-EOF
docker-integration.sh [-Av] [-r TestName] [-t timeout] [<test-suites>...]
Arguments:
-A
Run all tests (do not fail on first suite failure).
-v
Run tests in verbose mode (go test -v).
-r
Only run tests that match the given regular expression (go test -run).
-t <timeout=$timeout>
Set the per-suite timeout to <timeout> (go test -timeout).
<test-suites>...
Only run the given test suites in /usr/src/docker-test. The
default is to run all test suites
Examples:
Run the build and network integration tests with a 60 minute timeout:
./docker-integration.sh -t 60m integration/build integration/network
Run all of the tests in verbose mode with a 6 hour timeout:
./docker-integration.sh -Av -t 360m
This script is maintained by openSUSE in the Virtualization:containers
project, and is only intended to be used by openSUSE developers.
EOF
exit "${1:-1}"
}
fail_fast=1
verbose=
filter=
timeout=20m
while getopts "Ahr:t:v" opt; do
case "$opt" in
A)
fail_fast=
;;
v)
verbose=1
;;
r)
filter="$OPTARG"
;;
t)
timeout="$OPTARG"
;;
h)
usage 0
;;
:)
echo "Missing argument: -$OPTARG" >&2
usage 1
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage 1
;;
esac
done
pushd "$TEST_SRCDIR"
if [ "$OPTIND" -le "$#" ]; then
SUITES=("${@:$OPTIND:$(($#+1))}")
else
readarray -t SUITES <<<"$(find . -type f -name test.main -printf "%h\n")"
fi
echo "Planning to run suites {${SUITES[@]}}."
# Download the frozen images.
if ! [ -d /docker-frozen-images ]; then
# TODO: Get the hashes from /usr/src/docker-test/Dockerfile...
contrib/download-frozen-image-v2.sh "$TMPDIR/docker-frozen-images" \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
debian:bookworm-slim@sha256:2bc5c236e9b262645a323e9088dfa3bb1ecb16cc75811daf40a23a824d665be9 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
sudo cp -r "$TMPDIR/docker-frozen-images" /
fi
# Create binaries in $TEST_BINDIR.
if ! [ -e "$TEST_BINDIR/docker-basic-plugin" ]; then
(
pushd "$TEST_SRCDIR/testutil/fixtures/plugin/basic"
go mod init docker-basic-plugin
go build -o "$TEST_BINDIR/docker-basic-plugin" .
)
fi
if ! [ -e "$TEST_BINDIR/registry-v2" ]; then
# The v2.x tags of Docker registry don't use go.mod, and pre-date the move
# to github.com/distribution, so we need to create a fake GOPATH with the
# old github.com/docker/distribution import path.
(
export GOPATH="$(mktemp -d -p "$TMPROOT" distribution-build-gopath.XXXXXX)"
pushd "$GOPATH"
git clone \
--depth=1 --branch=v2.8.3 \
https://github.com/distribution/distribution.git \
src/github.com/docker/distribution
pushd src/github.com/docker/distribution
GO111MODULE=off go build -o "$TEST_BINDIR/registry-v2" ./cmd/registry
)
fi
if ! [ -e "$TEST_BINDIR/ctr" ]; then
containerd-ctr --help >/dev/null
ln -sf "$(which containerd-ctr)" "$TEST_BINDIR/ctr"
fi
if ! [ -e "$TEST_BINDIR/docker" ]; then
# The integration-cli tests require a Docker 17.06.2 client (from 2017).
# This is mainly because the tests are all based on the specific output the
# client gives, and some tests fail on modern client versions.
(
export GOPATH="$(mktemp -d -p "$TMPROOT" distribution-build-gopath.XXXXXX)"
pushd "$GOPATH"
# This tag also comes from the time when this was called
# github.com/docker/docker-ce-packaging, so we need to work around this
# by moving the cli component into the right path...
git clone \
--depth=1 --branch=v17.06.2-ce \
https://github.com/docker/cli.git \
src/github.com/docker/docker-ce-packaging
mv \
src/github.com/docker/docker-ce-packaging/components/cli \
src/github.com/docker/cli
pushd src/github.com/docker/cli
GO111MODULE=off go build -o "$TEST_BINDIR/docker" ./cmd/docker
)
fi
# Create an unprivilegeduser account for tests.
if ! ( grep unprivilegeduser /etc/passwd &>/dev/null ); then
useradd --create-home --gid docker unprivilegeduser
fi
# Disable SUSE secrets for tests, as some tests (TestDiff from
# integration/container) will fail if we have secrets injected.
[ -e /etc/docker/suse-secrets-enable ] && \
mv -nv /etc/docker/suse-secrets-enable{,-DISABLED}
sudo systemctl restart docker
# Make sure docker-buildx is disabled.
[ -e /usr/lib/docker/cli-plugins/docker-buildx ] && \
mv -nv /usr/lib/docker/cli-plugins/docker-buildx{,-DISABLED}
# Disable any daemon configurations.
[ -e /etc/docker/daemon.json ] && \
mv -nv /etc/docker/daemon.json{,.DISABLED}
set -x
# In order for< gotest.tools/v3/assert> to parse the source and give us useful
# error messages, we have to create a fake source directory that points at
# $TEST_SRCDIR. This path is replaced with %{docker_builddir} during the
# docker.spec build.
__DOCKER_BUILDIR="@@docker_builddir@@"
DOCKER_BUILDDIR="${DOCKER_BUILDDIR:-$__DOCKER_BUILDIR}"
sudo rm -rvf "$DOCKER_BUILDDIR"
sudo mkdir -p "$(dirname "$DOCKER_BUILDDIR")"
sudo ln -svf "$TEST_SRCDIR" "$DOCKER_BUILDDIR"
# Clean up any old containers/images/networks/volumes before running the tests.
# We need to do this *BEFORE* we set PATH, as the outdated $TEST_BINDIR/docker
# doesn't support some of these commands.
docker container prune -f
docker image prune -af
#docker buildx prune -af
docker network prune -f
docker volume prune -af
[ -z "$(docker plugin ls -q)" ] || docker plugin ls -q | xargs docker plugin rm -f
docker system prune -af
export DOCKERFILE="$TEST_SRCDIR/Dockerfile"
export TMPDIR="$TMPDIR"
export TEMP="$TMPDIR"
export HOME="$TMPDIR/fake-home"
export DEST="$TEST_SRCDIR/bundles"
export ABS_DEST="$DEST"
export PATH="$TEST_BINDIR:$PATH"
export TZ=UTC
export DOCKER_INTEGRATION_DAEMON_DEST="$ABS_DEST"
export DOCKER_HOST=unix:///run/docker.sock
export DOCKER_GRAPHDRIVER=overlay2
export DOCKER_USERLANDPROXY=true
export DOCKER_REMAP_ROOT="${DOCKER_REMAP_ROOT:-}"
export DOCKER_TMPDIR="$TMPDIR"
export DOCKER_SUSE_SECRETS_ENABLE=0
set +x
# Make sure that we have a dummy "destination" directory for tests.
rm -rf "$DOCKER_INTEGRATION_DAEMON_DEST"
mkdir -p "$DOCKER_INTEGRATION_DAEMON_DEST"
# Install the emptyfs images.
sh ./hack/make/.build-empty-images
ls -la "$TMPROOT"
success=0
failed_suites=()
for suite_name in "${SUITES[@]}"; do
suite_name="${suite_name#*./}"
pushd "$TEST_SRCDIR/$suite_name"
test_flags=()
[ -n "$verbose" ] && test_flags+=("-test.v")
[ -n "$filter" ] && test_flags+=("-test.run" "$filter")
if [[ "$suite_name" == "integration-cli" ]]; then
# We need to disable docker-buildx for the integration-cli tests
# because otherwise the "docker build" command will use the wrong
# builder and the output won't match what the tests expect.
timeout=360m
fi
test_flags+=("-test.timeout" "$timeout")
echo "Running suite $suite_name (${test_flags[@]}) [success=$success fail=${#failed_suites[@]}]"
set -x +e
sudo -E HOME="$HOME" TMPDIR="$TMPDIR" PATH="$PATH" \
./test.main "${test_flags[@]}"
err="$?"
if (( $err != 0 )); then
[ -z "$fail_fast" ] || exit "$err"
failed_suites+=("$suite_name")
else
(( success++ ))
fi
set +x -e
popd
done
[ -e /usr/lib/docker/cli-plugins/docker-buildx-DISABLED ] && \
mv -nv /usr/lib/docker/cli-plugins/docker-buildx{-DISABLED,}
[ -e /etc/docker/suse-secrets-enable-DISABLED ] && \
mv -nv /etc/docker/suse-secrets-enable{-DISABLED,}
[ -e /etc/docker/daemon.json.DISABLED ] && \
mv -nv /etc/docker/daemon.json{.DISABLED,}
echo "Suite results: $success success(es) ${#failed_suites[@]} failure(s)."
if (( ${#failed_suites[@]} > 0 )); then
echo "Failed suites:"
printf " - %s\n" "${failed_suites[@]}"
exit 1
fi

7
docker-rpmlintrc Normal file
View File

@@ -0,0 +1,7 @@
addFilter("^docker-(stable-)?bash-completion.noarch: (E|W): non-executable-script /usr/share/bash-completion/completions/docker")
addFilter("^docker-(stable-)?zsh-completion.noarch: W: non-conffile-in-etc /etc/zsh_completion.d/_docker")
# The docker-integration-tests-devel package contains all of the source code of
# Docker, which causes a bunch of warnings. Note that
# docker-integration-tests-devel is used internally and isn't actually shipped.
addFilter("^docker-(stable-)?integration-tests-devel\..*: (E|W): .*")

4364
docker.changes Normal file

File diff suppressed because it is too large Load Diff

45
docker.service Normal file
View File

@@ -0,0 +1,45 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target lvm2-monitor.service firewalld.service
# We don't use the docker socket activation, but doing this ensures that the
# docker.socket unit is alive while Docker is (docker.socket has BindsTo, so we
# only need a weak requirement to make sure starting docker.service also
# "starts" the socket service). Forcefully stopping docker.socket will not
# cause docker to die, but there's no nice workaround for that.
Wants=docker.socket
[Service]
EnvironmentFile=/etc/sysconfig/docker
# While Docker has support for socket activation (-H fd://), this is not
# enabled by default because enabling socket activation means that on boot your
# containers won't start until someone tries to administer the Docker daemon.
Type=notify
ExecStart=/usr/bin/dockerd --add-runtime oci=/usr/sbin/runc $DOCKER_OPTS
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this property.
TasksMax=infinity
# Set delegate yes so that systemd does not reset the cgroups of docker containers
# Only systemd 218 and above support this property.
Delegate=yes
# Kill only the docker process, not all processes in the cgroup.
KillMode=process
# Restart the docker process if it exits prematurely.
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target

18
docker.socket Normal file
View File

@@ -0,0 +1,18 @@
[Unit]
Description=Docker Socket for the API
# We use BindsTo in order to make sure that you cannot use socket-activation
# with Docker (Docker must always start at boot if enabled, otherwise
# containers will not run until some administrator interacts with Docker).
BindsTo=docker.service
[Socket]
# If /var/run is not implemented as a symlink to /run, you may need to
# specify ListenStream=/var/run/docker.sock instead.
ListenStream=/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target

594
docker.spec Normal file
View File

@@ -0,0 +1,594 @@
#
# spec file for package docker
#
# Copyright (c) 2024 SUSE LLC
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.
# Please submit bugfixes or comments via https://bugs.opensuse.org/
#
# nodebuginfo
%bcond_without apparmor
# This subpackage is only used for testing by developers, and shouldn't be
# built for actual users.
%bcond_with integration_tests
%if 0%{?is_opensuse} == 0 && 0%{?suse_version} < 1600
# SUSEConnect support ("SUSE secrets") only makes sense for SLES hosts.
%bcond_without suseconnect
# There is currently a known bug between buildx and SUSE secrets, so we don't
# package docker-buildx for SLES<16. bsc#1233819
%bcond_with buildx
%else
%bcond_with suseconnect
%bcond_without buildx
%endif
# The flavour is defined with a macro to try to keep docker and docker-stable
# as similar as possible, to make maintenance a little easier.
%define flavour %{nil}
# Where important update information will be stored, such that an administrator
# is guaranteed to see the relevant warning.
%define update_messages %{_localstatedir}/adm/update-messages/%{name}-%{version}-%{release}
# Test binaries.
%define testdir /usr/src/docker-test
#Compat macro for new _fillupdir macro introduced in Nov 2017
%if ! %{defined _fillupdir}
%define _fillupdir /var/adm/fillup-templates
%endif
# MANUAL: This needs to be updated with every docker update.
%define docker_real_version 28.1.1
%define docker_git_version 01f442b84
%define docker_version %{docker_real_version}_ce
# This "nice version" is so that docker --version gives a result that can be
# parsed by other people. boo#1182476
%define docker_nice_version %{docker_real_version}-ce
%if %{with buildx}
# MANUAL: This needs to be updated with every docker-buildx update.
%define buildx_version 0.23.0
%endif
# Used when generating the "build" information for Docker version. The value of
# git_commit_epoch is unused here (we use SOURCE_DATE_EPOCH, which rpm
# helpfully injects into our build environment from the changelog). If you want
# to generate a new git_commit_epoch, use this:
# $ date --date="$(git show --format=fuller --date=iso $COMMIT_ID | grep -oP '(?<=^CommitDate: ).*')" '+%s'
%define git_commit_epoch 1744950323
Name: docker%{flavour}
Version: %{docker_version}
Release: 0
Summary: The Moby-project Linux container runtime
License: Apache-2.0
Group: System/Management
URL: http://www.docker.io
Source: docker-%{docker_version}_%{docker_git_version}.tar.xz
Source1: docker-cli-%{docker_version}.tar.xz
Source3: docker-rpmlintrc
# TODO: Move these source files to somewhere nicer.
Source100: docker.service
Source101: docker.socket
Source110: 80-docker.rules
Source120: sysconfig.docker
Source130: README_SUSE.md
Source140: docker-audit.rules
Source150: docker-daemon.json
Source160: docker.sysusers
# docker-integration-tests-devel
Source900: docker-integration.sh
# NOTE: All of these patches are maintained in <https://github.com/suse/docker>
# in the suse-v<version> branch. Make sure you update the patches in that
# branch and then git-format-patch the patch here.
# SUSE-FEATURE: Adds the /run/secrets mountpoint inside all Docker containers
# which is not snapshotted when images are committed.
Patch100: 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch
Patch101: 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch
# UPSTREAM: Revert of upstream patch to keep SLE-12 build working.
Patch200: 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch
# UPSTREAM: Backport of <https://github.com/moby/moby/pull/41954>.
Patch201: 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch
# UPSTREAM: Revert of upstream patches to make apparmor work on SLE 12.
Patch202: 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch
BuildRequires: audit
BuildRequires: bash-completion
BuildRequires: ca-certificates
BuildRequires: fdupes
%if %{with apparmor}
BuildRequires: libapparmor-devel
%endif
BuildRequires: fish
BuildRequires: go-go-md2man
BuildRequires: libbtrfs-devel >= 3.8
BuildRequires: libseccomp-devel >= 2.2
BuildRequires: libtool
BuildRequires: linux-glibc-devel
BuildRequires: procps
BuildRequires: sqlite3-devel
BuildRequires: sysuser-tools
BuildRequires: zsh
BuildRequires: golang(API) = 1.23
BuildRequires: pkgconfig(libsystemd)
%if %{with apparmor}
%if 0%{?suse_version} >= 1500
# This conditional only works on rpm>=4.13, which SLE 12 doesn't have. But we
# don't need to support Docker+selinux for SLE 12 anyway.
Requires: (apparmor-parser or container-selinux)
# This recommends is added to make sure that even if you have container-selinux
# installed you will still be prompted to install apparmor-parser which Docker
# requires to apply AppArmor profiles (for SELinux systems this doesn't matter
# but if you switch back to AppArmor on reboot this would result in insecure
# containers).
Recommends: apparmor-parser
%else
Requires: apparmor-parser
%endif
%else
%if 0%{?suse_version} >= 1500
# This conditional only works on rpm>=4.13, which SLE 12 doesn't have. But we
# don't need to support Docker+selinux for SLE 12 anyway.
Requires: (container-selinux if selinux-policy)
%else
Requires: container-selinux
%endif
%endif
Requires: ca-certificates-mozilla
# The docker-proxy binary used to be in a separate package. We obsolete it,
# since now docker-proxy is maintained as part of this package.
Obsoletes: docker-libnetwork < 0.7.0.2
Provides: docker-libnetwork = 0.7.0.2.%{docker_version}
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker = %{docker_version}
Obsoletes: docker < %{docker_version}
Conflicts: docker
%else
Conflicts: docker-stable
%endif
# Required to actually run containers. We require the minimum version that is
# pinned by Docker, but in order to avoid headaches we allow for updates.
Requires: runc >= 1.1.9
Requires: containerd >= 1.7.3
# Needed for --init support. We don't use "tini", we use our own implementation
# which handles edge-cases better.
Requires: catatonit
Requires: iproute2 >= 3.5
Requires: iptables >= 1.4
Requires: procps
Requires: tar >= 1.26
Requires: xz >= 4.9
%if %{with buildx}
# Standard docker-build is deprecated, so require docker-buildx to avoid users
# hitting bugs that have long since been fixed by docker-buildx. bsc#1230331
Requires: %{name}-buildx
%endif
%?sysusers_requires
Requires(post): %fillup_prereq
Requires(post): udev
Requires(post): shadow
Recommends: %{name}-rootless-extras
Recommends: git-core >= 1.7
ExcludeArch: s390 ppc
%description
Docker complements LXC with a high-level API which operates at the process
level. It runs unix processes with strong guarantees of isolation and
repeatability across servers.
Docker is a great building block for automating distributed systems: large-scale
web deployments, database clusters, continuous deployment systems, private PaaS,
service-oriented architectures, etc.
%if %{with buildx}
%package buildx
Version: %{buildx_version}
Summary: Docker CLI plugin for extended build capabilities with BuildKit
License: Apache-2.0
URL: https://github.com/docker/buildx
Source500: docker-buildx-%{buildx_version}.tar.xz
Group: System/Management
Requires: %{name} >= 19.03.0_ce
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-buildx = %{buildx_version}
Obsoletes: docker-buildx < %{buildx_version}
Conflicts: docker-buildx
%else
Conflicts: docker-stable-buildx
%endif
%description buildx
buildx is a Docker CLI plugin for extended build capabilities with BuildKit.
Key features:
- Familiar UI from docker build
- Full BuildKit capabilities with container driver
- Multiple builder instance support
- Multi-node builds for cross-platform images
- Compose build support
- High-level build constructs (bake)
- In-container driver support (both Docker and Kubernetes)
%endif
%package rootless-extras
Summary: Rootless support for Docker
Group: System/Management
Requires: %{name} = %{docker_version}
Requires: fuse-overlayfs >= 0.7
Requires: rootlesskit
Requires: slirp4netns >= 0.4
BuildArch: noarch
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-rootless-extras = %{docker_version}
Obsoletes: docker-rootless-extras < %{docker_version}
Conflicts: docker-rootless-extras
%else
Conflicts: docker-stable-rootless-extras
%endif
%description rootless-extras
Rootless support for Docker.
Use dockerd-rootless.sh to run the daemon.
Use dockerd-rootless-setuptool.sh to setup systemd for dockerd-rootless.sh.
%if %{with integration_tests}
%package integration-tests-devel
Summary: Rootless support for Docker
Group: TestSuite
Requires: %{name} = %{docker_version}
Requires: containerd-ctr
Requires: curl
Requires: gcc
Requires: git
Requires: glibc-devel-static
Requires: go
Requires: jq
Requires: libcap-progs
%description integration-tests-devel
Integration testing binaries for Docker.
THIS PACKAGE SHOULD NOT BE INSTALLED BY END-USERS, IT IS ONLY INTENDED FOR
INTERNAL DEVELOPMENT OF THE DOCKER PACKAGE FOR (OPEN)SUSE.
%endif
%package bash-completion
Summary: Bash Completion for %{name}
Group: System/Shells
Requires: %{name} = %{docker_version}
Requires: bash-completion
Supplements: packageand(%{name}:bash-completion)
BuildArch: noarch
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-bash-completion = %{docker_version}
Obsoletes: docker-bash-completion < %{docker_version}
Conflicts: docker-bash-completion
%else
Conflicts: docker-stable-bash-completion
%endif
%description bash-completion
Bash command line completion support for %{name}.
%package zsh-completion
Summary: Zsh Completion for %{name}
Group: System/Shells
Requires: %{name} = %{docker_version}
Requires: zsh
Supplements: packageand(%{name}:zsh)
BuildArch: noarch
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-zsh-completion = %{docker_version}
Obsoletes: docker-zsh-completion < %{docker_version}
Conflicts: docker-zsh-completion
%else
Conflicts: docker-stable-zsh-completion
%endif
%description zsh-completion
Zsh command line completion support for %{name}.
%package fish-completion
Summary: Fish completion for %{name}
Group: System/Shells
Requires: %{name} = %{docker_version}
Requires: fish
Supplements: packageand(%{name}:fish)
BuildArch: noarch
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-fish-completion = %{docker_version}
Obsoletes: docker-fish-completion < %{docker_version}
Conflicts: docker-fish-completion
%else
Conflicts: docker-stable-fish-completion
%endif
%description fish-completion
Fish command line completion support for %{name}.
%prep
# docker-cli
%define cli_builddir %{_builddir}/docker-cli-%{docker_version}
%setup -q -T -b 1 -n docker-cli-%{docker_version}
[ "%{cli_builddir}" = "$PWD" ]
%if %{with buildx}
# docker-buildx
%define buildx_builddir %{_builddir}/docker-buildx-%{buildx_version}
%setup -q -T -b 500 -n docker-buildx-%{buildx_version}
[ "%{buildx_builddir}" = "$PWD" ]
%endif
# docker
%define docker_builddir %{_builddir}/docker-%{docker_version}_%{docker_git_version}
%setup -q -n docker-%{docker_version}_%{docker_git_version}
[ "%{docker_builddir}" = "$PWD" ]
# README_SUSE.md for documentation.
cp %{SOURCE130} .
%if %{with suseconnect}
# PATCH-SUSE: Secrets patches.
%patch -P100 -p1
%patch -P101 -p1
%endif
%if 0%{?sle_version} == 120000
# Patches to build on SLE-12.
%patch -P200 -p1
%endif
# bsc#1099277
%patch -P201 -p1
# Solves apparmor issues on SLE-12, but okay for newer SLE versions too.
%patch -P202 -p1
%build
%sysusers_generate_pre %{SOURCE160} %{name} docker.conf
BUILDTAGS="apparmor selinux seccomp pkcs11"
export AUTO_GOPATH=1
# Make sure we always build PIC code. bsc#1048046
export BUILDFLAGS="-buildmode=pie"
# Specify all of the versioning information. We use SOURCE_DATE_EPOCH if it's
# been injected by rpmbuild, otherwise we use the hardcoded git_commit_epoch
# generated above. boo#1064781
export VERSION="%{docker_nice_version}"
export DOCKER_GITCOMMIT="%{docker_git_version}"
export GITCOMMIT="%{docker_git_version}"
export SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-%{git_commit_epoch}}"
export BUILDTIME="$(date -u -d "@$SOURCE_DATE_EPOCH" --rfc-3339 ns 2>/dev/null | sed -e 's/ /T/')"
###################
## DOCKER ENGINE ##
###################
pushd "%{docker_builddir}"
# use go module for build
cp {vendor,go}.mod
cp {vendor,go}.sum
./hack/make.sh dynbinary
# dockerd man page
GO_MD2MAN=go-md2man make -C ./man/
%if %{with integration_tests}
# build test binaries for integration tests
readarray -t integration_dirs \
<<<"$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/... ./integration-cli/...)"
for dir in "${integration_dirs[@]}"
do
pushd "$dir"
go test -c -buildmode=pie -tags "$BUILDTAGS" -o test.main .
popd
done
# Update __DOCKER_BUILDIR in the integration testing script.
sed -i 's|^__DOCKER_BUILDIR=.*|__DOCKER_BUILDIR=%{docker_builddir}|g' "%{SOURCE900}"
%endif
popd
###################
## DOCKER CLIENT ##
###################
pushd "%{cli_builddir}"
# use go module for build
cp {vendor,go}.mod
cp {vendor,go}.sum
make DISABLE_WARN_OUTSIDE_CONTAINER=1 dynbinary manpages
popd
%if %{with buildx}
###################
## DOCKER BUILDX ##
###################
pushd "%{buildx_builddir}"
make \
CGO_ENABLED=1 \
VERSION="%{buildx_version}" \
REVISION="v%{buildx_version}" \
GO_EXTRA_FLAGS="-buildmode=pie" \
build
popd
%endif
%install
install -Dd -m0755 \
%{buildroot}%{_sysconfdir}/init.d \
%{buildroot}%{_bindir} \
%{buildroot}%{_sbindir}
# docker daemon
install -D -m0755 %{docker_builddir}/bundles/dynbinary-daemon/dockerd %{buildroot}/%{_bindir}/dockerd
# docker proxy
install -D -m0755 %{docker_builddir}/bundles/dynbinary-daemon/docker-proxy %{buildroot}/%{_bindir}/docker-proxy
# cli-plugins/
install -d %{buildroot}/usr/lib/docker/cli-plugins
%if %{with buildx}
# buildx plugin
install -D -m0755 %{buildx_builddir}/bin/build/docker-buildx %{buildroot}/usr/lib/docker/cli-plugins/docker-buildx
%endif
# /var/lib/docker
install -d %{buildroot}/%{_localstatedir}/lib/docker
# daemon.json config file
install -D -m0644 %{SOURCE150} %{buildroot}%{_sysconfdir}/docker/daemon.json
%if %{with suseconnect}
# SUSE-specific config file
echo 1 > %{buildroot}%{_sysconfdir}/docker/suse-secrets-enable
%endif
# docker cli
install -D -m0755 %{cli_builddir}/build/docker %{buildroot}/%{_bindir}/docker
install -D -m0644 %{cli_builddir}/contrib/completion/bash/docker "%{buildroot}%{_datarootdir}/bash-completion/completions/docker"
install -D -m0644 %{cli_builddir}/contrib/completion/zsh/_docker "%{buildroot}%{_sysconfdir}/zsh_completion.d/_docker"
install -D -m0644 %{cli_builddir}/contrib/completion/fish/docker.fish "%{buildroot}/%{_datadir}/fish/vendor_completions.d/docker.fish"
# systemd service
install -D -m0644 %{SOURCE100} %{buildroot}%{_unitdir}/docker.service
install -D -m0644 %{SOURCE101} %{buildroot}%{_unitdir}/docker.socket
ln -sf service %{buildroot}%{_sbindir}/rcdocker
# udev rules that prevents dolphin to show all docker devices and slows down
# upstream report https://bugs.kde.org/show_bug.cgi?id=329930
install -D -m0644 %{SOURCE110} %{buildroot}%{_udevrulesdir}/80-docker.rules
# audit rules
install -D -m0640 %{SOURCE140} %{buildroot}%{_sysconfdir}/audit/rules.d/docker.rules
# sysconfig file
install -D -m0644 %{SOURCE120} %{buildroot}%{_fillupdir}/sysconfig.docker
# install manpages (using the ones from the engine)
for mansrcdir in %{cli_builddir}/man/man[1-9] %{docker_builddir}/man/man[1-9]
do
section="$(basename $mansrcdir)"
install -d %{buildroot}%{_mandir}/$section
install -p -m0644 $mansrcdir/* %{buildroot}%{_mandir}/$section
done
# sysusers.d
install -D -m0644 %{SOURCE160} %{buildroot}%{_sysusersdir}/docker.conf
# rootless extras
install -D -p -m 0755 contrib/dockerd-rootless.sh %{buildroot}/%{_bindir}/dockerd-rootless.sh
install -D -p -m 0755 contrib/dockerd-rootless-setuptool.sh %{buildroot}/%{_bindir}/dockerd-rootless-setuptool.sh
%if %{with integration_tests}
# integration tests
install -d %{buildroot}%{testdir}
cp -ar %{docker_builddir} %{buildroot}%{testdir}/src
install -d %{buildroot}%{testdir}/bin
install -D -p -m 0755 %{SOURCE900} %{buildroot}%{testdir}/docker-integration.sh
# remove all of the non-test binaries in bundles/
rm -rfv %{buildroot}%{testdir}/src/bundles/
%endif
%fdupes %{buildroot}
%pre -f %{name}.pre
# /etc/sub[ug]id should exist already (it's part of shadow-utils), but older
# distros don't have it. Docker just parses it and doesn't need any special
# shadow-utils helpers.
touch /etc/subuid /etc/subgid ||:
# "useradd -r" doesn't add sub[ug]ids so we manually add some. Hopefully there
# aren't any conflicts here, because usermod doesn't provide the same "get
# unusued range" feature that dockremap does.
grep -q '^dockremap:' /etc/subuid || \
usermod -v 100000000-200000000 dockremap &>/dev/null || \
echo "dockremap:100000000:100000001" >>/etc/subuid ||:
grep -q '^dockremap:' /etc/subgid || \
usermod -w 100000000-200000000 dockremap &>/dev/null || \
echo "dockremap:100000000:100000001" >>/etc/subgid ||:
%service_add_pre docker.service docker.socket
%post
%service_add_post docker.service docker.socket
%{fillup_only -n docker}
%preun
%service_del_preun docker.service docker.socket
%postun
%service_del_postun docker.service docker.socket
%files
%defattr(-,root,root)
%doc README.md README_SUSE.md
%license LICENSE
%{_bindir}/docker
%{_bindir}/dockerd
%{_bindir}/docker-proxy
%{_sbindir}/rcdocker
%dir %{_localstatedir}/lib/docker/
%dir /usr/lib/docker
%dir /usr/lib/docker/cli-plugins
%{_unitdir}/docker.service
%{_unitdir}/docker.socket
%{_sysusersdir}/docker.conf
%dir %{_sysconfdir}/docker
%config(noreplace) %{_sysconfdir}/docker/daemon.json
%if %{with suseconnect}
%config(noreplace) %{_sysconfdir}/docker/suse-secrets-enable
%endif
%{_fillupdir}/sysconfig.docker
%dir %attr(750,root,root) %{_sysconfdir}/audit/rules.d
%config %{_sysconfdir}/audit/rules.d/docker.rules
%{_udevrulesdir}/80-docker.rules
%{_mandir}/man*/*%{ext_man}
%if %{with buildx}
%files buildx
%defattr(-,root,root)
/usr/lib/docker/cli-plugins/docker-buildx
%endif
%files rootless-extras
%defattr(-,root,root)
%{_bindir}/dockerd-rootless.sh
%{_bindir}/dockerd-rootless-setuptool.sh
%if %{with integration_tests}
%files integration-tests-devel
%defattr(-,root,root)
%{testdir}
%endif
%files bash-completion
%defattr(-,root,root)
%{_datarootdir}/bash-completion/completions/docker
%files zsh-completion
%defattr(-,root,root)
%{_sysconfdir}/zsh_completion.d/_docker
%files fish-completion
%defattr(-,root,root)
%{_datadir}/fish/vendor_completions.d/docker.fish
%changelog

3
docker.sysusers Normal file
View File

@@ -0,0 +1,3 @@
#Type Name ID GECOS Home directory Shell
g docker - - - -
u dockremap - 'docker --userns-remap=default' - -

8
sysconfig.docker Normal file
View File

@@ -0,0 +1,8 @@
## Path : System/Management
## Description : Extra cli switches for docker daemon
## Type : string
## Default : ""
## ServiceRestart : docker
#
DOCKER_OPTS=""