Aleksa Sarai 2025-01-13 01:29:23 +00:00 committed by Git OBS Bridge
commit 1d00d6bb91
32 changed files with 39285 additions and 0 deletions

23
.gitattributes vendored Normal file
View File

@ -0,0 +1,23 @@
## Default LFS
*.7z filter=lfs diff=lfs merge=lfs -text
*.bsp filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.gem filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.jar filter=lfs diff=lfs merge=lfs -text
*.lz filter=lfs diff=lfs merge=lfs -text
*.lzma filter=lfs diff=lfs merge=lfs -text
*.obscpio filter=lfs diff=lfs merge=lfs -text
*.oxt filter=lfs diff=lfs merge=lfs -text
*.pdf filter=lfs diff=lfs merge=lfs -text
*.png filter=lfs diff=lfs merge=lfs -text
*.rpm filter=lfs diff=lfs merge=lfs -text
*.tbz filter=lfs diff=lfs merge=lfs -text
*.tbz2 filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.ttf filter=lfs diff=lfs merge=lfs -text
*.txz filter=lfs diff=lfs merge=lfs -text
*.whl filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.osc

View File

@ -0,0 +1,74 @@
From a94378d92f7ef523b17aa399ce83b27f7986980f Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 8 Mar 2017 12:41:54 +1100
Subject: [PATCH 01/11] SECRETS: daemon: allow directory creation in
/run/secrets
Since FileMode can have the directory bit set, allow a SecretStore
implementation to return secrets that are actually directories. This is
useful for creating directories and subdirectories of secrets.
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/container_operations_unix.go | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go
index 290ec59a34a7..b7013fb89c83 100644
--- a/daemon/container_operations_unix.go
+++ b/daemon/container_operations_unix.go
@@ -4,6 +4,7 @@
package daemon // import "github.com/docker/docker/daemon"
import (
+ "bytes"
"fmt"
"os"
"path/filepath"
@@ -14,6 +15,7 @@ import (
"github.com/docker/docker/daemon/links"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
+ "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/process"
"github.com/docker/docker/pkg/stringid"
@@ -206,9 +208,6 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
if err != nil {
return errors.Wrap(err, "unable to get secret from secret store")
}
- if err := os.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
- return errors.Wrap(err, "error injecting secret")
- }
uid, err := strconv.Atoi(s.File.UID)
if err != nil {
@@ -219,6 +218,24 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
return err
}
+ if s.File.Mode.IsDir() {
+ if err := os.Mkdir(fPath, s.File.Mode); err != nil {
+ return errors.Wrap(err, "error creating secretdir")
+ }
+ if secret.Spec.Data != nil {
+ // If the "file" is a directory, then s.File.Data is actually a tar
+ // archive of the directory. So we just do a tar extraction here.
+ if err := archive.UntarUncompressed(bytes.NewBuffer(secret.Spec.Data), fPath, &archive.TarOptions{
+ IDMap: daemon.idMapping,
+ }); err != nil {
+ return errors.Wrap(err, "error injecting secretdir")
+ }
+ }
+ } else {
+ if err := os.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
+ return errors.Wrap(err, "error injecting secret")
+ }
+ }
if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil {
return errors.Wrap(err, "error setting ownership for secret")
}
--
2.47.1

View File

@ -0,0 +1,510 @@
From 009cad241857541779baa2a9fae8291597dc85f8 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 8 Mar 2017 11:43:29 +1100
Subject: [PATCH 02/11] SECRETS: SUSE: implement SUSE container secrets
This allows for us to pass in host credentials to a container, allowing
for SUSEConnect to work with containers.
Users can disable this by setting DOCKER_SUSE_SECRETS_ENABLE=0 in
/etc/sysconfig/docker or by adding that setting to docker.service's
Environment using a drop-in file.
THIS PATCH IS NOT TO BE UPSTREAMED, DUE TO THE FACT THAT IT IS
SUSE-SPECIFIC, AND UPSTREAM DOES NOT APPROVE OF THIS CONCEPT BECAUSE IT
MAKES BUILDS NOT ENTIRELY REPRODUCIBLE.
SUSE-Bugs: bsc#1065609 bsc#1057743 bsc#1055676 bsc#1030702 bsc#1231348
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/start.go | 5 +
daemon/suse_secrets.go | 461 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 466 insertions(+)
create mode 100644 daemon/suse_secrets.go
diff --git a/daemon/start.go b/daemon/start.go
index 2e0b9e6be847..dca04486888f 100644
--- a/daemon/start.go
+++ b/daemon/start.go
@@ -151,6 +151,11 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C
return err
}
+ // SUSE:secrets -- inject the SUSE secret store
+ if err := daemon.injectSuseSecretStore(container); err != nil {
+ return errdefs.System(err)
+ }
+
spec, err := daemon.createSpec(ctx, container)
if err != nil {
return errdefs.System(err)
diff --git a/daemon/suse_secrets.go b/daemon/suse_secrets.go
new file mode 100644
index 000000000000..85b37bf46544
--- /dev/null
+++ b/daemon/suse_secrets.go
@@ -0,0 +1,461 @@
+/*
+ * suse-secrets: patch for Docker to implement SUSE secrets
+ * Copyright (C) 2017-2021 SUSE LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package daemon
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/container"
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/idtools"
+
+ swarmtypes "github.com/docker/docker/api/types/swarm"
+ swarmexec "github.com/moby/swarmkit/v2/agent/exec"
+ swarmapi "github.com/moby/swarmkit/v2/api"
+
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+const suseSecretsTogglePath = "/etc/docker/suse-secrets-enable"
+
+// parseEnableFile parses a file that can only contain "0" or "1" (with some
+// whitespace).
+func parseEnableFile(path string) (bool, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return false, err
+ }
+ data = bytes.TrimSpace(data)
+
+ switch value := string(data); value {
+ case "1":
+ return true, nil
+ case "0", "":
+ return false, nil
+ default:
+ return false, fmt.Errorf("invalid value %q (must be 0 to disable or 1 to enable)", value)
+ }
+}
+
+func isSuseSecretsEnabled() bool {
+ value, err := parseEnableFile(suseSecretsTogglePath)
+ if err != nil {
+ logrus.Warnf("SUSE:secrets :: error parsing %s: %v -- disabling SUSE secrets", suseSecretsTogglePath, err)
+ value = false
+ }
+ return value
+}
+
+var suseSecretsEnabled = true
+
+func init() {
+ // Make this entire feature toggle-able so that users can disable it if
+ // they run into issues like bsc#1231348.
+ suseSecretsEnabled = isSuseSecretsEnabled()
+ if suseSecretsEnabled {
+ logrus.Infof("SUSE:secrets :: SUSEConnect support enabled (set %s to 0 to disable)", suseSecretsTogglePath)
+ } else {
+ logrus.Infof("SUSE:secrets :: SUSEConnect support disabled by %s", suseSecretsTogglePath)
+ }
+}
+
+// Creating a fake file.
+type SuseFakeFile struct {
+ Path string
+ Uid int
+ Gid int
+ Mode os.FileMode
+ Data []byte
+}
+
+func (s SuseFakeFile) id() string {
+ // NOTE: It is _very_ important that this string always has a prefix of
+ // "suse". This is how we can ensure that we can operate on
+ // SecretReferences with a confidence that it was made by us.
+ return fmt.Sprintf("suse_%s_%s", digest.FromBytes(s.Data).Hex(), s.Path)
+}
+
+func (s SuseFakeFile) toSecret() *swarmapi.Secret {
+ return &swarmapi.Secret{
+ ID: s.id(),
+ Internal: true,
+ Spec: swarmapi.SecretSpec{
+ Data: s.Data,
+ },
+ }
+}
+
+func (s SuseFakeFile) toSecretReference(idMaps idtools.IdentityMapping) *swarmtypes.SecretReference {
+ // Figure out the host-facing {uid,gid} based on the provided maps. Fall
+ // back to root if the UID/GID don't match (we are guaranteed that root is
+ // mapped).
+ ctrUser := idtools.Identity{UID: s.Uid, GID: s.Gid}
+ hostUser := idMaps.RootPair()
+ if user, err := idMaps.ToHost(ctrUser); err == nil {
+ hostUser = user
+ }
+
+ // Return the secret reference as a file target.
+ return &swarmtypes.SecretReference{
+ SecretID: s.id(),
+ SecretName: s.id(),
+ File: &swarmtypes.SecretReferenceFileTarget{
+ Name: s.Path,
+ UID: fmt.Sprintf("%d", hostUser.UID),
+ GID: fmt.Sprintf("%d", hostUser.GID),
+ Mode: s.Mode,
+ },
+ }
+}
+
+// readDir will recurse into a directory prefix/dir, and return the set of
+// secrets in that directory (as a tar archive that is packed inside the "data"
+// field). The Path attribute of each has the prefix stripped. Symlinks are
+// dereferenced.
+func readDir(prefix, dir string) ([]*SuseFakeFile, error) {
+ var suseFiles []*SuseFakeFile
+
+ path := filepath.Join(prefix, dir)
+ fi, err := os.Stat(path)
+ if err != nil {
+ // Ignore missing files.
+ if os.IsNotExist(err) {
+ // If the path itself exists it was a dangling symlink so give a
+ // warning about the symlink dangling.
+ _, err2 := os.Lstat(path)
+ if !os.IsNotExist(err2) {
+ logrus.Warnf("SUSE:secrets :: ignoring dangling symlink: %s", path)
+ }
+ return nil, nil
+ }
+ return nil, err
+ } else if !fi.IsDir() {
+ // Just to be safe.
+ logrus.Infof("SUSE:secrets :: expected %q to be a directory, but was a file", path)
+ return readFile(prefix, dir)
+ }
+ path, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Construct a tar archive of the source directory. We tar up the prefix
+ // directory and add dir as an IncludeFiles specifically so that we
+ // preserve the name of the directory itself.
+ tarStream, err := archive.TarWithOptions(path, &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ IncludeSourceDir: true,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to tar source directory %q: %v", path, err)
+ }
+ tarStreamBytes, err := ioutil.ReadAll(tarStream)
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read full tar archive: %v", err)
+ }
+
+ // Get a list of the symlinks in the tar archive.
+ var symlinks []string
+ tmpTr := tar.NewReader(bytes.NewBuffer(tarStreamBytes))
+ for {
+ hdr, err := tmpTr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read through tar reader: %v", err)
+ }
+ if hdr.Typeflag == tar.TypeSymlink {
+ symlinks = append(symlinks, hdr.Name)
+ }
+ }
+
+ // Symlinks aren't dereferenced in the above archive, so we explicitly do a
+ // rewrite of the tar archive to include all symlinks to files. We cannot
+ // do directories here, but lower-level directory symlinks aren't supported
+ // by zypper so this isn't an issue.
+ symlinkModifyMap := map[string]archive.TarModifierFunc{}
+ for _, sym := range symlinks {
+ logrus.Debugf("SUSE:secrets: archive(%q) %q is a need-to-rewrite symlink", path, sym)
+ symlinkModifyMap[sym] = func(tarPath string, hdr *tar.Header, r io.Reader) (*tar.Header, []byte, error) {
+ logrus.Debugf("SUSE:secrets: archive(%q) mapping for symlink %q", path, tarPath)
+ tarFullPath := filepath.Join(path, tarPath)
+
+ // Get a copy of the original byte stream.
+ oldContent, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, nil, fmt.Errorf("suse_rewrite: failed to read archive entry %q: %v", tarPath, err)
+ }
+
+ // Check that the file actually exists.
+ fi, err := os.Stat(tarFullPath)
+ if err != nil {
+ logrus.Warnf("suse_rewrite: failed to stat archive entry %q: %v", tarFullPath, err)
+ return hdr, oldContent, nil
+ }
+
+ // Read the actual contents.
+ content, err := ioutil.ReadFile(tarFullPath)
+ if err != nil {
+ logrus.Warnf("suse_rewrite: failed to read %q: %v", tarFullPath, err)
+ return hdr, oldContent, nil
+ }
+
+ newHdr, err := tar.FileInfoHeader(fi, "")
+ if err != nil {
+ // Fake the header.
+ newHdr = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ }
+ }
+
+ // Update the key fields.
+ hdr.Typeflag = newHdr.Typeflag
+ hdr.Mode = newHdr.Mode
+ hdr.Linkname = ""
+ return hdr, content, nil
+ }
+ }
+
+ // Create the rewritten tar stream.
+ tarStream = archive.ReplaceFileTarWrapper(ioutil.NopCloser(bytes.NewBuffer(tarStreamBytes)), symlinkModifyMap)
+ tarStreamBytes, err = ioutil.ReadAll(tarStream)
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read rewritten archive: %v", err)
+ }
+
+ // Add the tar stream as a "file".
+ suseFiles = append(suseFiles, &SuseFakeFile{
+ Path: dir,
+ Mode: fi.Mode(),
+ Data: tarStreamBytes,
+ })
+ return suseFiles, nil
+}
+
+// readFile returns a secret given a file under a given prefix.
+func readFile(prefix, file string) ([]*SuseFakeFile, error) {
+ path := filepath.Join(prefix, file)
+ fi, err := os.Stat(path)
+ if err != nil {
+ // Ignore missing files.
+ if os.IsNotExist(err) {
+ // If the path itself exists it was a dangling symlink so give a
+ // warning about the symlink dangling.
+ _, err2 := os.Lstat(path)
+ if !os.IsNotExist(err2) {
+ logrus.Warnf("SUSE:secrets :: ignoring dangling symlink: %s", path)
+ }
+ return nil, nil
+ }
+ return nil, err
+ } else if fi.IsDir() {
+ // Just to be safe.
+ logrus.Infof("SUSE:secrets :: expected %q to be a file, but was a directory", path)
+ return readDir(prefix, file)
+ }
+
+ var uid, gid int
+ if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
+ uid, gid = int(stat.Uid), int(stat.Gid)
+ } else {
+ logrus.Warnf("SUSE:secrets :: failed to cast file stat_t: defaulting to owned by root:root: %s", path)
+ uid, gid = 0, 0
+ }
+
+ bytes, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var suseFiles []*SuseFakeFile
+ suseFiles = append(suseFiles, &SuseFakeFile{
+ Path: file,
+ Uid: uid,
+ Gid: gid,
+ Mode: fi.Mode(),
+ Data: bytes,
+ })
+ return suseFiles, nil
+}
+
+// getHostSuseSecretData returns the list of SuseFakeFiles the need to be added
+// as SUSE secrets.
+func getHostSuseSecretData() ([]*SuseFakeFile, error) {
+ secrets := []*SuseFakeFile{}
+
+ credentials, err := readDir("/etc/zypp", "credentials.d")
+ if err != nil {
+ if os.IsNotExist(err) {
+ credentials = []*SuseFakeFile{}
+ } else {
+ logrus.Errorf("SUSE:secrets :: error while reading zypp credentials: %s", err)
+ return nil, err
+ }
+ }
+ secrets = append(secrets, credentials...)
+
+ suseConnect, err := readFile("/etc", "SUSEConnect")
+ if err != nil {
+ if os.IsNotExist(err) {
+ suseConnect = []*SuseFakeFile{}
+ } else {
+ logrus.Errorf("SUSE:secrets :: error while reading /etc/SUSEConnect: %s", err)
+ return nil, err
+ }
+ }
+ secrets = append(secrets, suseConnect...)
+
+ return secrets, nil
+}
+
+// To fake an empty store, in the case where we are operating on a container
+// that was created pre-swarmkit. Otherwise segfaults and other fun things
+// happen. See bsc#1057743.
+type (
+ suseEmptyStore struct{}
+ suseEmptySecret struct{}
+ suseEmptyConfig struct{}
+ suseEmptyVolume struct{}
+)
+
+// In order to reduce the amount of code touched outside of this file, we
+// implement the swarm API for DependencyGetter. This asserts that this
+// requirement will always be matched. In addition, for the case of the *empty*
+// getters this reduces memory usage by having a global instance.
+var (
+ _ swarmexec.DependencyGetter = &suseDependencyStore{}
+ emptyStore swarmexec.DependencyGetter = suseEmptyStore{}
+ emptySecret swarmexec.SecretGetter = suseEmptySecret{}
+ emptyConfig swarmexec.ConfigGetter = suseEmptyConfig{}
+ emptyVolume swarmexec.VolumeGetter = suseEmptyVolume{}
+)
+
+var errSuseEmptyStore = fmt.Errorf("SUSE:secrets :: tried to get a resource from empty store [this is a bug]")
+
+func (_ suseEmptyConfig) Get(_ string) (*swarmapi.Config, error) { return nil, errSuseEmptyStore }
+func (_ suseEmptySecret) Get(_ string) (*swarmapi.Secret, error) { return nil, errSuseEmptyStore }
+func (_ suseEmptyVolume) Get(_ string) (string, error) { return "", errSuseEmptyStore }
+func (_ suseEmptyStore) Secrets() swarmexec.SecretGetter { return emptySecret }
+func (_ suseEmptyStore) Configs() swarmexec.ConfigGetter { return emptyConfig }
+func (_ suseEmptyStore) Volumes() swarmexec.VolumeGetter { return emptyVolume }
+
+type suseDependencyStore struct {
+ dfl swarmexec.DependencyGetter
+ secrets map[string]*swarmapi.Secret
+}
+
+// The following are effectively dumb wrappers that return ourselves, or the
+// default.
+func (s *suseDependencyStore) Secrets() swarmexec.SecretGetter { return s }
+func (s *suseDependencyStore) Volumes() swarmexec.VolumeGetter { return emptyVolume }
+func (s *suseDependencyStore) Configs() swarmexec.ConfigGetter { return s.dfl.Configs() }
+
+// Get overrides the underlying DependencyGetter with our own secrets (falling
+// through to the underlying DependencyGetter if the secret isn't present).
+func (s *suseDependencyStore) Get(id string) (*swarmapi.Secret, error) {
+ logrus.Debugf("SUSE:secrets :: id=%s requested from suseDependencyGetter", id)
+
+ secret, ok := s.secrets[id]
+ if !ok {
+ // fallthrough
+ return s.dfl.Secrets().Get(id)
+ }
+ return secret, nil
+}
+
+// removeSuseSecrets removes any SecretReferences which were added by us
+// explicitly (this is detected by checking that the prefix has a 'suse'
+// prefix). See bsc#1057743.
+func removeSuseSecrets(c *container.Container) {
+ var without []*swarmtypes.SecretReference
+ for _, secret := range c.SecretReferences {
+ if strings.HasPrefix(secret.SecretID, "suse") {
+ logrus.Debugf("SUSE:secrets :: removing 'old' suse secret %q from container %q", secret.SecretID, c.ID)
+ continue
+ }
+ without = append(without, secret)
+ }
+ c.SecretReferences = without
+}
+
+func (daemon *Daemon) injectSuseSecretStore(c *container.Container) error {
+ // We drop any "old" SUSE secrets, as it appears that old containers (when
+ // restarted) could still have references to old secrets. The .id() of all
+ // secrets have a prefix of "suse" so this is much easier. See bsc#1057743
+ // for details on why this could cause issues.
+ removeSuseSecrets(c)
+
+ // Don't inject anything if the administrator has disabled suse secrets.
+ // However, for previous existing containers we need to remove old secrets
+ // (see above), otherwise they will still have old secret data.
+ if !suseSecretsEnabled {
+ logrus.Debugf("SUSE:secrets :: skipping injection of secrets into container %q because of %s", c.ID, suseSecretsTogglePath)
+ return nil
+ }
+
+ newDependencyStore := &suseDependencyStore{
+ dfl: c.DependencyStore,
+ secrets: make(map[string]*swarmapi.Secret),
+ }
+ // Handle old containers. See bsc#1057743.
+ if newDependencyStore.dfl == nil {
+ newDependencyStore.dfl = emptyStore
+ }
+
+ secrets, err := getHostSuseSecretData()
+ if err != nil {
+ return err
+ }
+
+ idMaps := daemon.idMapping
+ for _, secret := range secrets {
+ newDependencyStore.secrets[secret.id()] = secret.toSecret()
+ c.SecretReferences = append(c.SecretReferences, secret.toSecretReference(idMaps))
+ }
+
+ c.DependencyStore = newDependencyStore
+
+ // bsc#1057743 -- In older versions of Docker we added volumes explicitly
+ // to the mount list. This causes clashes because of duplicate namespaces.
+ // If we see an existing mount that will clash with the in-built secrets
+ // mount we assume it's our fault.
+ intendedMounts, err := c.SecretMounts()
+ if err != nil {
+ logrus.Warnf("SUSE:secrets :: fetching old secret mounts: %v", err)
+ return err
+ }
+ for _, intendedMount := range intendedMounts {
+ mountPath := intendedMount.Destination
+ if volume, ok := c.MountPoints[mountPath]; ok {
+ logrus.Debugf("SUSE:secrets :: removing pre-existing %q mount: %#v", mountPath, volume)
+ delete(c.MountPoints, mountPath)
+ }
+ }
+ return nil
+}
--
2.47.1

View File

@ -0,0 +1,46 @@
From 3f1bda82f345cc919a70cf747cc8c6f094c9451a Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Mon, 22 May 2023 15:44:54 +1000
Subject: [PATCH 03/11] BUILD: SLE12: revert "graphdriver/btrfs: use kernel
UAPI headers"
This reverts commit 3208dcabdc8997340b255f5b880fef4e3f54580d.
On SLE 12, our UAPI headers are too old, resulting in us being unable to
build the btrfs driver with the new headers. This patch is only needed
for SLE-12.
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
daemon/graphdriver/btrfs/btrfs.go | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go
index d88efc4be2bb..4e976aa689cd 100644
--- a/daemon/graphdriver/btrfs/btrfs.go
+++ b/daemon/graphdriver/btrfs/btrfs.go
@@ -5,17 +5,12 @@ package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs"
/*
#include <stdlib.h>
-#include <stdio.h>
#include <dirent.h>
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
- #error "Headers from kernel >= 4.12 are required to build with Btrfs support."
- #error "HINT: Set 'DOCKER_BUILDTAGS=exclude_graphdriver_btrfs' to build without Btrfs."
-#endif
-
-#include <linux/btrfs.h>
-#include <linux/btrfs_tree.h>
+// keep struct field name compatible with btrfs-progs < 6.1.
+#define max_referenced max_rfer
+#include <btrfs/ioctl.h>
+#include <btrfs/ctree.h>
static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) {
snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value);
--
2.47.1

View File

@ -0,0 +1,89 @@
From ba4df1cb80fa7956c148230193037a2b112a40a5 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Fri, 29 Jun 2018 17:59:30 +1000
Subject: [PATCH 04/11] bsc1073877: apparmor: clobber docker-default profile on
start
In the process of making docker-default reloading far less expensive,
567ef8e7858c ("daemon: switch to 'ensure' workflow for AppArmor
profiles") mistakenly made the initial profile load at dockerd start-up
lazy. As a result, if you have a running Docker daemon and upgrade it to
a new one with an updated AppArmor profile the new profile will not take
effect (because the old one is still loaded). The fix for this is quite
trivial, and just requires us to clobber the profile on start-up.
Fixes: 567ef8e7858c ("daemon: switch to 'ensure' workflow for AppArmor profiles")
SUSE-Bugs: bsc#1099277
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/apparmor_default.go | 14 ++++++++++----
daemon/apparmor_default_unsupported.go | 4 ++++
daemon/daemon.go | 5 +++--
3 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/daemon/apparmor_default.go b/daemon/apparmor_default.go
index 6376001613f7..5fde21a4af8a 100644
--- a/daemon/apparmor_default.go
+++ b/daemon/apparmor_default.go
@@ -24,6 +24,15 @@ func DefaultApparmorProfile() string {
return ""
}
+func clobberDefaultAppArmorProfile() error {
+ if apparmor.HostSupports() {
+ if err := aaprofile.InstallDefault(defaultAppArmorProfile); err != nil {
+ return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultAppArmorProfile, err)
+ }
+ }
+ return nil
+}
+
func ensureDefaultAppArmorProfile() error {
if apparmor.HostSupports() {
loaded, err := aaprofile.IsLoaded(defaultAppArmorProfile)
@@ -37,10 +46,7 @@ func ensureDefaultAppArmorProfile() error {
}
// Load the profile.
- if err := aaprofile.InstallDefault(defaultAppArmorProfile); err != nil {
- return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultAppArmorProfile, err)
- }
+ return clobberDefaultAppArmorProfile()
}
-
return nil
}
diff --git a/daemon/apparmor_default_unsupported.go b/daemon/apparmor_default_unsupported.go
index e3dc18b32b5e..9c7723056268 100644
--- a/daemon/apparmor_default_unsupported.go
+++ b/daemon/apparmor_default_unsupported.go
@@ -3,6 +3,10 @@
package daemon // import "github.com/docker/docker/daemon"
+func clobberDefaultAppArmorProfile() error {
+ return nil
+}
+
func ensureDefaultAppArmorProfile() error {
return nil
}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index 585d85086f8d..6e4c6ad1ac01 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -845,8 +845,9 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
- // ensureDefaultAppArmorProfile does nothing if apparmor is disabled
- if err := ensureDefaultAppArmorProfile(); err != nil {
+ // Make sure we clobber any pre-existing docker-default profile to ensure
+ // that upgrades to the profile actually work smoothly.
+ if err := clobberDefaultAppArmorProfile(); err != nil {
logrus.Errorf(err.Error())
}
--
2.47.1

View File

@ -0,0 +1,241 @@
From 0ca28257e81eed36ff840bff822ff7add3e2efa2 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 11 Oct 2023 21:19:12 +1100
Subject: [PATCH 05/11] SLE12: revert "apparmor: remove version-conditionals
from template"
This reverts the following commits:
* 7008a514493a ("profiles/apparmor: remove version-conditional constraints (< 2.8.96)")
* 2e19a4d56bf2 ("contrib/apparmor: remove version-conditionals (< 2.9) from template")
* d169a5730649 ("contrib/apparmor: remove remaining version-conditionals (< 2.9) from template")
* ecaab085db4b ("profiles/apparmor: remove use of aaparser.GetVersion()")
* e3e715666f95 ("pkg/aaparser: deprecate GetVersion, as it's no longer used")
These version conditionals are still required on SLE 12, where our
apparmor_parser version is quite old.
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
contrib/apparmor/main.go | 16 ++++++++++++++--
contrib/apparmor/template.go | 16 ++++++++++++++++
pkg/aaparser/aaparser.go | 2 --
profiles/apparmor/apparmor.go | 14 ++++++++++++--
profiles/apparmor/template.go | 4 ++++
5 files changed, 46 insertions(+), 6 deletions(-)
diff --git a/contrib/apparmor/main.go b/contrib/apparmor/main.go
index d67890d265de..f4a2978b86cb 100644
--- a/contrib/apparmor/main.go
+++ b/contrib/apparmor/main.go
@@ -6,9 +6,13 @@ import (
"os"
"path"
"text/template"
+
+ "github.com/docker/docker/pkg/aaparser"
)
-type profileData struct{}
+type profileData struct {
+ Version int
+}
func main() {
if len(os.Args) < 2 {
@@ -18,6 +22,15 @@ func main() {
// parse the arg
apparmorProfilePath := os.Args[1]
+ version, err := aaparser.GetVersion()
+ if err != nil {
+ log.Fatal(err)
+ }
+ data := profileData{
+ Version: version,
+ }
+ fmt.Printf("apparmor_parser is of version %+v\n", data)
+
// parse the template
compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate)
if err != nil {
@@ -35,7 +48,6 @@ func main() {
}
defer f.Close()
- data := profileData{}
if err := compiled.Execute(f, data); err != nil {
log.Fatalf("executing template failed: %v", err)
}
diff --git a/contrib/apparmor/template.go b/contrib/apparmor/template.go
index 58afcbe845ee..e6d0b6d37c58 100644
--- a/contrib/apparmor/template.go
+++ b/contrib/apparmor/template.go
@@ -20,9 +20,11 @@ profile /usr/bin/docker (attach_disconnected, complain) {
umount,
pivot_root,
+{{if ge .Version 209000}}
signal (receive) peer=@{profile_name},
signal (receive) peer=unconfined,
signal (send),
+{{end}}
network,
capability,
owner /** rw,
@@ -45,10 +47,12 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/etc/ld.so.cache r,
/etc/passwd r,
+{{if ge .Version 209000}}
ptrace peer=@{profile_name},
ptrace (read) peer=docker-default,
deny ptrace (trace) peer=docker-default,
deny ptrace peer=/usr/bin/docker///bin/ps,
+{{end}}
/usr/lib/** rm,
/lib/** rm,
@@ -69,9 +73,11 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/sbin/zfs rCx,
/sbin/apparmor_parser rCx,
+{{if ge .Version 209000}}
# Transitions
change_profile -> docker-*,
change_profile -> unconfined,
+{{end}}
profile /bin/cat (complain) {
/etc/ld.so.cache r,
@@ -93,8 +99,10 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/dev/null rw,
/bin/ps mr,
+{{if ge .Version 209000}}
# We don't need ptrace so we'll deny and ignore the error.
deny ptrace (read, trace),
+{{end}}
# Quiet dac_override denials
deny capability dac_override,
@@ -112,11 +120,15 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/proc/tty/drivers r,
}
profile /sbin/iptables (complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
capability net_admin,
}
profile /sbin/auplink flags=(attach_disconnected, complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
capability sys_admin,
capability dac_override,
@@ -135,7 +147,9 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/proc/[0-9]*/mounts rw,
}
profile /sbin/modprobe /bin/kmod (complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
capability sys_module,
/etc/ld.so.cache r,
/lib/** rm,
@@ -149,7 +163,9 @@ profile /usr/bin/docker (attach_disconnected, complain) {
}
# xz works via pipes, so we do not need access to the filesystem.
profile /usr/bin/xz (complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
/etc/ld.so.cache r,
/lib/** rm,
/usr/bin/xz rm,
diff --git a/pkg/aaparser/aaparser.go b/pkg/aaparser/aaparser.go
index 3d7c2c5a97b3..2b5a2605f9c1 100644
--- a/pkg/aaparser/aaparser.go
+++ b/pkg/aaparser/aaparser.go
@@ -13,8 +13,6 @@ const (
)
// GetVersion returns the major and minor version of apparmor_parser.
-//
-// Deprecated: no longer used, and will be removed in the next release.
func GetVersion() (int, error) {
output, err := cmd("", "--version")
if err != nil {
diff --git a/profiles/apparmor/apparmor.go b/profiles/apparmor/apparmor.go
index d0f236160506..b3566b2f7354 100644
--- a/profiles/apparmor/apparmor.go
+++ b/profiles/apparmor/apparmor.go
@@ -14,8 +14,10 @@ import (
"github.com/docker/docker/pkg/aaparser"
)
-// profileDirectory is the file store for apparmor profiles and macros.
-const profileDirectory = "/etc/apparmor.d"
+var (
+ // profileDirectory is the file store for apparmor profiles and macros.
+ profileDirectory = "/etc/apparmor.d"
+)
// profileData holds information about the given profile for generation.
type profileData struct {
@@ -27,6 +29,8 @@ type profileData struct {
Imports []string
// InnerImports defines the apparmor functions to import in the profile.
InnerImports []string
+ // Version is the {major, minor, patch} version of apparmor_parser as a single number.
+ Version int
}
// generateDefault creates an apparmor profile from ProfileData.
@@ -46,6 +50,12 @@ func (p *profileData) generateDefault(out io.Writer) error {
p.InnerImports = append(p.InnerImports, "#include <abstractions/base>")
}
+ ver, err := aaparser.GetVersion()
+ if err != nil {
+ return err
+ }
+ p.Version = ver
+
return compiled.Execute(out, p)
}
diff --git a/profiles/apparmor/template.go b/profiles/apparmor/template.go
index 9f207e2014a8..626e5f6789a3 100644
--- a/profiles/apparmor/template.go
+++ b/profiles/apparmor/template.go
@@ -24,12 +24,14 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
capability,
file,
umount,
+{{if ge .Version 208096}}
# Host (privileged) processes may send signals to container processes.
signal (receive) peer=unconfined,
# dockerd may send signals to container processes (for "docker kill").
signal (receive) peer={{.DaemonProfile}},
# Container processes may send signals amongst themselves.
signal (send,receive) peer={{.Name}},
+{{end}}
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
# deny write to files not in /proc/<number>/** or /proc/sys/**
@@ -50,7 +52,9 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
deny /sys/devices/virtual/powercap/** rwklx,
deny /sys/kernel/security/** rwklx,
+{{if ge .Version 208095}}
# suppress ptrace denials when using 'docker ps' or using 'ps' inside a container
ptrace (trace,read,tracedby,readby) peer={{.Name}},
+{{end}}
}
`
--
2.47.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,898 @@
From b760758157cd0d00f46f37f86a9cbee7810cb666 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <cyphar@cyphar.com>
Date: Thu, 2 May 2024 22:50:23 +1000
Subject: [PATCH 07/11] bsc1221916: update to patched buildkit version to fix
symlink resolution
SUSE-Bugs: https://bugzilla.suse.com/show_bug.cgi?id=1221916
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
builder/builder-next/worker/worker.go | 2 +-
vendor.mod | 2 +-
vendor.sum | 4 +-
.../buildkit/cache/contenthash/checksum.go | 393 ++++++++++--------
.../moby/buildkit/cache/contenthash/path.go | 161 +++----
vendor/modules.txt | 4 +-
6 files changed, 314 insertions(+), 252 deletions(-)
diff --git a/builder/builder-next/worker/worker.go b/builder/builder-next/worker/worker.go
index 64d7b9131b16..7b40ac63ce7f 100644
--- a/builder/builder-next/worker/worker.go
+++ b/builder/builder-next/worker/worker.go
@@ -50,7 +50,7 @@ import (
)
func init() {
- version.Version = "v0.11.7+cd804dd86389"
+ version.Version = "v0.11.7+6b814972ef19"
}
const labelCreatedAt = "buildkit/createdat"
diff --git a/vendor.mod b/vendor.mod
index 2eb13746cacd..021d62b21d19 100644
--- a/vendor.mod
+++ b/vendor.mod
@@ -99,7 +99,7 @@ require (
)
// github.com/SUSE/buildkit suse-stable-v24.0.9
-replace github.com/moby/buildkit => github.com/SUSE/buildkit v0.0.0-20241218053907-cd804dd86389
+replace github.com/moby/buildkit => github.com/SUSE/buildkit v0.0.0-20241218053911-6b814972ef19
require (
cloud.google.com/go v0.102.1 // indirect
diff --git a/vendor.sum b/vendor.sum
index 716245c80413..4bdbbeb3f073 100644
--- a/vendor.sum
+++ b/vendor.sum
@@ -141,8 +141,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 h1:vX+gnvBc56EbWYrmlhYbFYRaeikAke1GL84N4BEYOFE=
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91/go.mod h1:cDLGBht23g0XQdLjzn6xOGXDkLK182YfINAaZEQLCHQ=
-github.com/SUSE/buildkit v0.0.0-20241218053907-cd804dd86389 h1:EKne0CAOXpf1QuZ3+jj7PTpOtSn+q1Yz5H6pAwrOktY=
-github.com/SUSE/buildkit v0.0.0-20241218053907-cd804dd86389/go.mod h1:bMQDryngJKGvJ/ZuRFhrejurbvYSv3NkGCheQ59X4AM=
+github.com/SUSE/buildkit v0.0.0-20241218053911-6b814972ef19 h1:3gfqJcXxLASvlAfgd+TFPrrhNrM+O26HplOhi3BNT+A=
+github.com/SUSE/buildkit v0.0.0-20241218053911-6b814972ef19/go.mod h1:bMQDryngJKGvJ/ZuRFhrejurbvYSv3NkGCheQ59X4AM=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
index dcf424a6b4fc..13a74be24c4e 100644
--- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
+++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
@@ -10,6 +10,7 @@ import (
"path/filepath"
"strings"
"sync"
+ "sync/atomic"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/golang-lru/simplelru"
@@ -288,7 +289,7 @@ func keyPath(p string) string {
// HandleChange notifies the source about a modification operation
func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
p = keyPath(p)
- k := convertPathToKey([]byte(p))
+ k := convertPathToKey(p)
deleteDir := func(cr *CacheRecord) {
if cr.Type == CacheRecordTypeDir {
@@ -367,7 +368,7 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
// note that the source may be called later because data writing is async
if fi.Mode()&os.ModeSymlink == 0 && stat.Linkname != "" {
ln := path.Join("/", filepath.ToSlash(stat.Linkname))
- v, ok := cc.txn.Get(convertPathToKey([]byte(ln)))
+ v, ok := cc.txn.Get(convertPathToKey(ln))
if ok {
cp := *v.(*CacheRecord)
cr = &cp
@@ -405,7 +406,7 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable,
defer m.clean()
if !opts.Wildcard && len(opts.IncludePatterns) == 0 && len(opts.ExcludePatterns) == 0 {
- return cc.checksumFollow(ctx, m, p, opts.FollowLinks)
+ return cc.lazyChecksum(ctx, m, p, opts.FollowLinks)
}
includedPaths, err := cc.includedPaths(ctx, m, p, opts)
@@ -416,7 +417,7 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable,
if opts.FollowLinks {
for i, w := range includedPaths {
if w.record.Type == CacheRecordTypeSymlink {
- dgst, err := cc.checksumFollow(ctx, m, w.path, opts.FollowLinks)
+ dgst, err := cc.lazyChecksum(ctx, m, w.path, opts.FollowLinks)
if err != nil {
return "", err
}
@@ -443,30 +444,6 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable,
return digester.Digest(), nil
}
-func (cc *cacheContext) checksumFollow(ctx context.Context, m *mount, p string, follow bool) (digest.Digest, error) {
- const maxSymlinkLimit = 255
- i := 0
- for {
- if i > maxSymlinkLimit {
- return "", errors.Errorf("too many symlinks: %s", p)
- }
- cr, err := cc.checksumNoFollow(ctx, m, p)
- if err != nil {
- return "", err
- }
- if cr.Type == CacheRecordTypeSymlink && follow {
- link := cr.Linkname
- if !path.IsAbs(cr.Linkname) {
- link = path.Join(path.Dir(p), link)
- }
- i++
- p = link
- } else {
- return cr.Digest, nil
- }
- }
-}
-
func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, opts ChecksumOpts) ([]*includedPath, error) {
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -476,12 +453,12 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
}
root := cc.tree.Root()
- scan, err := cc.needsScan(root, "")
+ scan, err := cc.needsScan(root, "", false)
if err != nil {
return nil, err
}
if scan {
- if err := cc.scanPath(ctx, m, ""); err != nil {
+ if err := cc.scanPath(ctx, m, "", false); err != nil {
return nil, err
}
}
@@ -534,13 +511,13 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
}
} else {
origPrefix = p
- k = convertPathToKey([]byte(origPrefix))
+ k = convertPathToKey(origPrefix)
// We need to resolve symlinks here, in case the base path
// involves a symlink. That will match fsutil behavior of
// calling functions such as stat and walk.
var cr *CacheRecord
- k, cr, err = getFollowLinks(root, k, true)
+ k, cr, err = getFollowLinks(root, k, false)
if err != nil {
return nil, err
}
@@ -552,7 +529,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
iter.SeekLowerBound(append(append([]byte{}, k...), 0))
}
- resolvedPrefix = string(convertKeyToPath(k))
+ resolvedPrefix = convertKeyToPath(k)
} else {
k, _, keyOk = iter.Next()
}
@@ -563,7 +540,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
)
for keyOk {
- fn := string(convertKeyToPath(k))
+ fn := convertKeyToPath(k)
// Convert the path prefix from what we found in the prefix
// tree to what the argument specified.
@@ -749,36 +726,12 @@ func wildcardPrefix(root *iradix.Node, p string) (string, []byte, bool, error) {
return "", nil, false, nil
}
- linksWalked := 0
- k, cr, err := getFollowLinksWalk(root, convertPathToKey([]byte(d1)), true, &linksWalked)
+ // Only resolve the final symlink component if there are components in the
+ // wildcard segment.
+ k, cr, err := getFollowLinks(root, convertPathToKey(d1), d2 != "")
if err != nil {
return "", k, false, err
}
-
- if d2 != "" && cr != nil && cr.Type == CacheRecordTypeSymlink {
- // getFollowLinks only handles symlinks in path
- // components before the last component, so
- // handle last component in d1 specially.
- resolved := string(convertKeyToPath(k))
- for {
- v, ok := root.Get(k)
-
- if !ok {
- return d1, k, false, nil
- }
- if v.(*CacheRecord).Type != CacheRecordTypeSymlink {
- break
- }
-
- linksWalked++
- if linksWalked > 255 {
- return "", k, false, errors.Errorf("too many links")
- }
-
- resolved := cleanLink(resolved, v.(*CacheRecord).Linkname)
- k = convertPathToKey([]byte(resolved))
- }
- }
return d1, k, cr != nil, nil
}
@@ -814,19 +767,22 @@ func containsWildcards(name string) bool {
return false
}
-func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
+func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string, followTrailing bool) (digest.Digest, error) {
p = keyPath(p)
+ k := convertPathToKey(p)
+ // Try to look up the path directly without doing a scan.
cc.mu.RLock()
if cc.txn == nil {
root := cc.tree.Root()
cc.mu.RUnlock()
- v, ok := root.Get(convertPathToKey([]byte(p)))
- if ok {
- cr := v.(*CacheRecord)
- if cr.Digest != "" {
- return cr, nil
- }
+
+ _, cr, err := getFollowLinks(root, k, followTrailing)
+ if err != nil {
+ return "", err
+ }
+ if cr != nil && cr.Digest != "" {
+ return cr.Digest, nil
}
} else {
cc.mu.RUnlock()
@@ -846,7 +802,11 @@ func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string
}
}()
- return cc.lazyChecksum(ctx, m, p)
+ cr, err := cc.scanChecksum(ctx, m, p, followTrailing)
+ if err != nil {
+ return "", err
+ }
+ return cr.Digest, nil
}
func (cc *cacheContext) commitActiveTransaction() {
@@ -854,7 +814,7 @@ func (cc *cacheContext) commitActiveTransaction() {
addParentToMap(d, cc.dirtyMap)
}
for d := range cc.dirtyMap {
- k := convertPathToKey([]byte(d))
+ k := convertPathToKey(d)
if _, ok := cc.txn.Get(k); ok {
cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir})
}
@@ -865,21 +825,21 @@ func (cc *cacheContext) commitActiveTransaction() {
cc.txn = nil
}
-func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
+func (cc *cacheContext) scanChecksum(ctx context.Context, m *mount, p string, followTrailing bool) (*CacheRecord, error) {
root := cc.tree.Root()
- scan, err := cc.needsScan(root, p)
+ scan, err := cc.needsScan(root, p, followTrailing)
if err != nil {
return nil, err
}
if scan {
- if err := cc.scanPath(ctx, m, p); err != nil {
+ if err := cc.scanPath(ctx, m, p, followTrailing); err != nil {
return nil, err
}
}
- k := convertPathToKey([]byte(p))
+ k := convertPathToKey(p)
txn := cc.tree.Txn()
root = txn.Root()
- cr, updated, err := cc.checksum(ctx, root, txn, m, k, true)
+ cr, updated, err := cc.checksum(ctx, root, txn, m, k, followTrailing)
if err != nil {
return nil, err
}
@@ -888,9 +848,9 @@ func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*
return cr, err
}
-func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, follow bool) (*CacheRecord, bool, error) {
+func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, followTrailing bool) (*CacheRecord, bool, error) {
origk := k
- k, cr, err := getFollowLinks(root, k, follow)
+ k, cr, err := getFollowLinks(root, k, followTrailing)
if err != nil {
return nil, false, err
}
@@ -916,7 +876,9 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
}
h.Write(bytes.TrimPrefix(subk, k))
- subcr, _, err := cc.checksum(ctx, root, txn, m, subk, true)
+ // We do not follow trailing links when checksumming a directory's
+ // contents.
+ subcr, _, err := cc.checksum(ctx, root, txn, m, subk, false)
if err != nil {
return nil, false, err
}
@@ -933,7 +895,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
dgst = digest.NewDigest(digest.SHA256, h)
default:
- p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0})))
+ p := convertKeyToPath(bytes.TrimSuffix(k, []byte{0}))
target, err := m.mount(ctx)
if err != nil {
@@ -965,42 +927,82 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
return cr2, true, nil
}
-// needsScan returns false if path is in the tree or a parent path is in tree
-// and subpath is missing
-func (cc *cacheContext) needsScan(root *iradix.Node, p string) (bool, error) {
- var linksWalked int
- return cc.needsScanFollow(root, p, &linksWalked)
+// pathSet is a set of path prefixes that can be used to see if a given path is
+// lexically a child of any path in the set. All paths provided to this set
+// MUST be absolute and use / as the separator.
+type pathSet struct {
+ // prefixes contains paths of the form "/a/b/", so that we correctly detect
+ // /a/b as being a parent of /a/b/c but not /a/bc.
+ prefixes []string
}
-func (cc *cacheContext) needsScanFollow(root *iradix.Node, p string, linksWalked *int) (bool, error) {
- if p == "/" {
- p = ""
- }
- v, ok := root.Get(convertPathToKey([]byte(p)))
- if !ok {
- if p == "" {
- return true, nil
+// add a path to the set. This is a no-op if includes(path) == true.
+func (s *pathSet) add(p string) {
+ // Ensure the path is absolute and clean.
+ p = path.Join("/", p)
+ if !s.includes(p) {
+ if p != "/" {
+ p += "/"
}
- return cc.needsScanFollow(root, path.Clean(path.Dir(p)), linksWalked)
+ s.prefixes = append(s.prefixes, p)
+ }
+}
+
+// includes returns true iff there is a path in the pathSet which is a lexical
+// parent of the given path. The provided path MUST be an absolute path and
+// MUST NOT contain any ".." components, as they will be path.Clean'd.
+func (s pathSet) includes(p string) bool {
+ // Ensure the path is absolute and clean.
+ p = path.Join("/", p)
+ if p != "/" {
+ p += "/"
}
- cr := v.(*CacheRecord)
- if cr.Type == CacheRecordTypeSymlink {
- if *linksWalked > 255 {
- return false, errTooManyLinks
+ for _, prefix := range s.prefixes {
+ if strings.HasPrefix(p, prefix) {
+ return true
}
- *linksWalked++
- link := path.Clean(cr.Linkname)
- if !path.IsAbs(cr.Linkname) {
- link = path.Join("/", path.Dir(p), link)
+ }
+ return false
+}
+
+// needsScan returns false if path is in the tree or a parent path is in tree
+// and subpath is missing.
+func (cc *cacheContext) needsScan(root *iradix.Node, path string, followTrailing bool) (bool, error) {
+ var (
+ goodPaths pathSet
+ hasParentInTree bool
+ )
+ k := convertPathToKey(path)
+ _, cr, err := getFollowLinksCallback(root, k, followTrailing, func(subpath string, cr *CacheRecord) error {
+ // If we found a path that exists in the cache, add it to the set of
+ // known-scanned paths. Otherwise, verify whether the not-found subpath
+ // is inside a known-scanned path (we might have hit a "..", taking us
+ // out of the scanned paths, or we might hit a non-existent path inside
+ // a scanned path). getFollowLinksCallback iterates left-to-right, so
+ // we will always hit ancestors first.
+ if cr != nil {
+ hasParentInTree = cr.Type != CacheRecordTypeSymlink
+ goodPaths.add(subpath)
+ } else {
+ hasParentInTree = goodPaths.includes(subpath)
}
- return cc.needsScanFollow(root, link, linksWalked)
+ return nil
+ })
+ if err != nil {
+ return false, err
}
- return false, nil
+ return cr == nil && !hasParentInTree, nil
}
-func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) {
+// Only used by TestNeedScanChecksumRegression to make sure scanPath is not
+// called for paths we have already scanned.
+var (
+ scanCounterEnable bool
+ scanCounter atomic.Uint64
+)
+
+func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string, followTrailing bool) (retErr error) {
p = path.Join("/", p)
- d, _ := path.Split(p)
mp, err := m.mount(ctx)
if err != nil {
@@ -1010,33 +1012,42 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
n := cc.tree.Root()
txn := cc.tree.Txn()
- parentPath, err := rootPath(mp, filepath.FromSlash(d), func(p, link string) error {
+ resolvedPath, err := rootPath(mp, filepath.FromSlash(p), followTrailing, func(p, link string) error {
cr := &CacheRecord{
Type: CacheRecordTypeSymlink,
Linkname: filepath.ToSlash(link),
}
- k := []byte(path.Join("/", filepath.ToSlash(p)))
- k = convertPathToKey(k)
- txn.Insert(k, cr)
+ p = path.Join("/", filepath.ToSlash(p))
+ txn.Insert(convertPathToKey(p), cr)
return nil
})
if err != nil {
return err
}
- err = filepath.Walk(parentPath, func(itemPath string, fi os.FileInfo, err error) error {
+ // Scan the parent directory of the path we resolved, unless we're at the
+ // root (in which case we scan the root).
+ scanPath := filepath.Dir(resolvedPath)
+ if !strings.HasPrefix(filepath.ToSlash(scanPath)+"/", filepath.ToSlash(mp)+"/") {
+ scanPath = resolvedPath
+ }
+
+ err = filepath.Walk(scanPath, func(itemPath string, fi os.FileInfo, err error) error {
+ if scanCounterEnable {
+ scanCounter.Add(1)
+ }
if err != nil {
+ // If the root doesn't exist, ignore the error.
+ if itemPath == scanPath && errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
return errors.Wrapf(err, "failed to walk %s", itemPath)
}
rel, err := filepath.Rel(mp, itemPath)
if err != nil {
return err
}
- k := []byte(path.Join("/", filepath.ToSlash(rel)))
- if string(k) == "/" {
- k = []byte{}
- }
- k = convertPathToKey(k)
+ k := convertPathToKey(keyPath(rel))
if _, ok := n.Get(k); !ok {
cr := &CacheRecord{
Type: CacheRecordTypeFile,
@@ -1069,55 +1080,118 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
return nil
}
-func getFollowLinks(root *iradix.Node, k []byte, follow bool) ([]byte, *CacheRecord, error) {
- var linksWalked int
- return getFollowLinksWalk(root, k, follow, &linksWalked)
+// followLinksCallback is called after we try to resolve each element. If the
+// path was not found, cr is nil.
+type followLinksCallback func(path string, cr *CacheRecord) error
+
+// getFollowLinks is shorthand for getFollowLinksCallback(..., nil).
+func getFollowLinks(root *iradix.Node, k []byte, followTrailing bool) ([]byte, *CacheRecord, error) {
+ return getFollowLinksCallback(root, k, followTrailing, nil)
}
-func getFollowLinksWalk(root *iradix.Node, k []byte, follow bool, linksWalked *int) ([]byte, *CacheRecord, error) {
+// getFollowLinksCallback looks up the requested key, fully resolving any
+// symlink components encountered. The implementation is heavily based on
+// <https://github.com/cyphar/filepath-securejoin>.
+//
+// followTrailing indicates whether the *final component* of the path should be
+// resolved (effectively O_PATH|O_NOFOLLOW). Note that (in contrast to some
+// Linux APIs), followTrailing is obeyed even if the key has a trailing slash
+// (though paths like "foo/link/." will cause the link to be resolved).
+//
+// cb is a callback that is called for each path component encountered during
+// path resolution (after the path component is looked up in the cache). This
+// means for a path like /a/b/c, the callback will be called for at least
+//
+// {/, /a, /a/b, /a/b/c}
+//
+// Note that if any of the components are symlinks, the paths will depend on
+// the symlink contents and there will be more callbacks. If the requested key
+// has a trailing slash, the callback will also be called for the final
+// trailing-slash lookup (/a/b/c/ in the above example). Note that
+// getFollowLinksCallback will try to look up the original key directly first
+// and the callback is not called for this first lookup.
+func getFollowLinksCallback(root *iradix.Node, k []byte, followTrailing bool, cb followLinksCallback) ([]byte, *CacheRecord, error) {
v, ok := root.Get(k)
- if ok {
+ if ok && (!followTrailing || v.(*CacheRecord).Type != CacheRecordTypeSymlink) {
return k, v.(*CacheRecord), nil
}
- if !follow || len(k) == 0 {
+ if len(k) == 0 {
return k, nil, nil
}
- dir, file := splitKey(k)
+ var (
+ currentPath = "/"
+ remainingPath = convertKeyToPath(k)
+ linksWalked int
+ cr *CacheRecord
+ )
+ // Trailing slashes are significant for the cache, but path.Clean strips
+ // them. We only care about the slash for the final lookup.
+ remainingPath, hadTrailingSlash := strings.CutSuffix(remainingPath, "/")
+ for remainingPath != "" {
+ // Get next component.
+ var part string
+ if i := strings.IndexRune(remainingPath, '/'); i == -1 {
+ part, remainingPath = remainingPath, ""
+ } else {
+ part, remainingPath = remainingPath[:i], remainingPath[i+1:]
+ }
- k, parent, err := getFollowLinksWalk(root, dir, follow, linksWalked)
- if err != nil {
- return nil, nil, err
- }
- if parent != nil {
- if parent.Type == CacheRecordTypeSymlink {
- *linksWalked++
- if *linksWalked > 255 {
- return nil, nil, errors.Errorf("too many links")
+ // Apply the component to the path. Since it is a single component, and
+ // our current path contains no symlinks, we can just apply it
+ // leixically.
+ nextPath := keyPath(path.Join("/", currentPath, part))
+ // In contrast to rootPath, we don't skip lookups for no-op components
+ // or / because we need to call the callback for every path component
+ // we hit (including /) and we need to make sure that the CacheRecord
+ // we return is correct after every iteration.
+
+ cr = nil
+ v, ok := root.Get(convertPathToKey(nextPath))
+ if ok {
+ cr = v.(*CacheRecord)
+ }
+ if cb != nil {
+ if err := cb(nextPath, cr); err != nil {
+ return nil, nil, err
}
+ }
+ if !ok || cr.Type != CacheRecordTypeSymlink {
+ currentPath = nextPath
+ continue
+ }
+ if !followTrailing && remainingPath == "" {
+ currentPath = nextPath
+ break
+ }
- link := cleanLink(string(convertKeyToPath(dir)), parent.Linkname)
- return getFollowLinksWalk(root, append(convertPathToKey([]byte(link)), file...), follow, linksWalked)
+ linksWalked++
+ if linksWalked > maxSymlinkLimit {
+ return nil, nil, errTooManyLinks
}
- }
- k = append(k, file...)
- v, ok = root.Get(k)
- if ok {
- return k, v.(*CacheRecord), nil
- }
- return k, nil, nil
-}
-func cleanLink(dir, linkname string) string {
- dirPath := path.Clean(dir)
- if dirPath == "." || dirPath == "/" {
- dirPath = ""
+ remainingPath = cr.Linkname + "/" + remainingPath
+ if path.IsAbs(cr.Linkname) {
+ currentPath = "/"
+ }
}
- link := path.Clean(linkname)
- if !path.IsAbs(link) {
- return path.Join("/", path.Join(path.Dir(dirPath), link))
+ // We've already looked up the final component. However, if there was a
+ // trailing slash in the original path, we need to do the lookup again with
+ // the slash applied.
+ if hadTrailingSlash {
+ cr = nil
+ currentPath += "/"
+ v, ok := root.Get(convertPathToKey(currentPath))
+ if ok {
+ cr = v.(*CacheRecord)
+ }
+ if cb != nil {
+ if err := cb(currentPath, cr); err != nil {
+ return nil, nil, err
+ }
+ }
}
- return link
+ return convertPathToKey(currentPath), cr, nil
}
func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) {
@@ -1174,25 +1248,10 @@ func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) {
return
}
-func convertPathToKey(p []byte) []byte {
+func convertPathToKey(p string) []byte {
return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1)
}
-func convertKeyToPath(p []byte) []byte {
- return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1)
-}
-
-func splitKey(k []byte) ([]byte, []byte) {
- foundBytes := false
- i := len(k) - 1
- for {
- if i <= 0 || foundBytes && k[i] == 0 {
- break
- }
- if k[i] != 0 {
- foundBytes = true
- }
- i--
- }
- return append([]byte{}, k[:i]...), k[i:]
+func convertKeyToPath(p []byte) string {
+ return string(bytes.Replace(p, []byte{0}, []byte("/"), -1))
}
diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/path.go b/vendor/github.com/moby/buildkit/cache/contenthash/path.go
index 42b7fd8349c7..ae950f713241 100644
--- a/vendor/github.com/moby/buildkit/cache/contenthash/path.go
+++ b/vendor/github.com/moby/buildkit/cache/contenthash/path.go
@@ -1,108 +1,111 @@
+// This code mostly comes from <https://github.com/cyphar/filepath-securejoin>.
+
+// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
+// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package contenthash
import (
"os"
"path/filepath"
+ "strings"
"github.com/pkg/errors"
)
-var (
- errTooManyLinks = errors.New("too many links")
-)
+var errTooManyLinks = errors.New("too many links")
+
+const maxSymlinkLimit = 255
type onSymlinkFunc func(string, string) error
-// rootPath joins a path with a root, evaluating and bounding any
-// symlink to the root directory.
-// This is containerd/continuity/fs RootPath implementation with a callback on
-// resolving the symlink.
-func rootPath(root, path string, cb onSymlinkFunc) (string, error) {
- if path == "" {
+// rootPath joins a path with a root, evaluating and bounding any symlink to
+// the root directory. This is a slightly modified version of SecureJoin from
+// github.com/cyphar/filepath-securejoin, with a callback which we call after
+// each symlink resolution.
+func rootPath(root, unsafePath string, followTrailing bool, cb onSymlinkFunc) (string, error) {
+ if unsafePath == "" {
return root, nil
}
- var linksWalked int // to protect against cycles
- for {
- i := linksWalked
- newpath, err := walkLinks(root, path, &linksWalked, cb)
- if err != nil {
- return "", err
- }
- path = newpath
- if i == linksWalked {
- newpath = filepath.Join("/", newpath)
- if path == newpath {
- return filepath.Join(root, newpath), nil
- }
- path = newpath
- }
- }
-}
-func walkLink(root, path string, linksWalked *int, cb onSymlinkFunc) (newpath string, islink bool, err error) {
- if *linksWalked > 255 {
- return "", false, errTooManyLinks
- }
+ unsafePath = filepath.FromSlash(unsafePath)
+ var (
+ currentPath string
+ linksWalked int
+ )
+ for unsafePath != "" {
+ // Windows-specific: remove any drive letters from the path.
+ if v := filepath.VolumeName(unsafePath); v != "" {
+ unsafePath = unsafePath[len(v):]
+ }
- path = filepath.Join("/", path)
- if path == "/" {
- return path, false, nil
- }
- realPath := filepath.Join(root, path)
+ // Remove any unnecessary trailing slashes.
+ unsafePath = strings.TrimSuffix(unsafePath, string(filepath.Separator))
- fi, err := os.Lstat(realPath)
- if err != nil {
- // If path does not yet exist, treat as non-symlink
- if errors.Is(err, os.ErrNotExist) {
- return path, false, nil
+ // Get the next path component.
+ var part string
+ if i := strings.IndexRune(unsafePath, filepath.Separator); i == -1 {
+ part, unsafePath = unsafePath, ""
+ } else {
+ part, unsafePath = unsafePath[:i], unsafePath[i+1:]
}
- return "", false, err
- }
- if fi.Mode()&os.ModeSymlink == 0 {
- return path, false, nil
- }
- newpath, err = os.Readlink(realPath)
- if err != nil {
- return "", false, err
- }
- if cb != nil {
- if err := cb(path, newpath); err != nil {
- return "", false, err
- }
- }
- *linksWalked++
- return newpath, true, nil
-}
-func walkLinks(root, path string, linksWalked *int, cb onSymlinkFunc) (string, error) {
- switch dir, file := filepath.Split(path); {
- case dir == "":
- newpath, _, err := walkLink(root, file, linksWalked, cb)
- return newpath, err
- case file == "":
- if os.IsPathSeparator(dir[len(dir)-1]) {
- if dir == "/" {
- return dir, nil
- }
- return walkLinks(root, dir[:len(dir)-1], linksWalked, cb)
+ // Apply the component lexically to the path we are building. path does
+ // not contain any symlinks, and we are lexically dealing with a single
+ // component, so it's okay to do filepath.Clean here.
+ nextPath := filepath.Join(string(filepath.Separator), currentPath, part)
+ if nextPath == string(filepath.Separator) {
+ // If we end up back at the root, we don't need to re-evaluate /.
+ currentPath = ""
+ continue
}
- newpath, _, err := walkLink(root, dir, linksWalked, cb)
- return newpath, err
- default:
- newdir, err := walkLinks(root, dir, linksWalked, cb)
- if err != nil {
+ fullPath := root + string(filepath.Separator) + nextPath
+
+ // Figure out whether the path is a symlink.
+ fi, err := os.Lstat(fullPath)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
return "", err
}
- newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked, cb)
+ // Treat non-existent path components the same as non-symlinks (we
+ // can't do any better here).
+ if errors.Is(err, os.ErrNotExist) || fi.Mode()&os.ModeSymlink == 0 {
+ currentPath = nextPath
+ continue
+ }
+ // Don't resolve the final component with !followTrailing.
+ if !followTrailing && unsafePath == "" {
+ currentPath = nextPath
+ break
+ }
+
+ // It's a symlink, so get its contents and expand it by prepending it
+ // to the yet-unparsed path.
+ linksWalked++
+ if linksWalked > maxSymlinkLimit {
+ return "", errTooManyLinks
+ }
+
+ dest, err := os.Readlink(fullPath)
if err != nil {
return "", err
}
- if !islink {
- return newpath, nil
+ if cb != nil {
+ if err := cb(nextPath, dest); err != nil {
+ return "", err
+ }
}
- if filepath.IsAbs(newpath) {
- return newpath, nil
+
+ unsafePath = dest + string(filepath.Separator) + unsafePath
+ // Absolute symlinks reset any work we've already done.
+ if filepath.IsAbs(dest) {
+ currentPath = ""
}
- return filepath.Join(newdir, newpath), nil
}
+
+ // There should be no lexical components left in path here, but just for
+ // safety do a filepath.Clean before the join.
+ finalPath := filepath.Join(string(filepath.Separator), currentPath)
+ return filepath.Join(root, finalPath), nil
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9adbc22b99fc..27bc31dfd397 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -577,7 +577,7 @@ github.com/mistifyio/go-zfs/v3
# github.com/mitchellh/hashstructure/v2 v2.0.2
## explicit; go 1.14
github.com/mitchellh/hashstructure/v2
-# github.com/moby/buildkit v0.11.7-0.20240124010513-435cb77e369c => github.com/SUSE/buildkit v0.0.0-20241218053907-cd804dd86389
+# github.com/moby/buildkit v0.11.7-0.20240124010513-435cb77e369c => github.com/SUSE/buildkit v0.0.0-20241218053911-6b814972ef19
## explicit; go 1.18
github.com/moby/buildkit/api/services/control
github.com/moby/buildkit/api/types
@@ -1313,4 +1313,4 @@ k8s.io/klog/v2/internal/severity
# resenje.org/singleflight v0.3.0
## explicit; go 1.18
resenje.org/singleflight
-# github.com/moby/buildkit => github.com/SUSE/buildkit v0.0.0-20241218053907-cd804dd86389
+# github.com/moby/buildkit => github.com/SUSE/buildkit v0.0.0-20241218053911-6b814972ef19
--
2.47.1

View File

@ -0,0 +1,54 @@
From 12c8b7a22f7140b5b4d2c87a7e5d70da082fe558 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <cyphar@cyphar.com>
Date: Wed, 19 Jun 2024 16:30:49 +1000
Subject: [PATCH 08/11] bsc1214855: volume: use AtomicWriteFile to save volume
options
If the system (or Docker) crashes while saivng the volume options, on
restart the daemon will error out when trying to read the options file
because it doesn't contain valid JSON.
In such a crash scenario, the new volume will be treated as though it
has the default options configuration. This is not ideal, but volumes
created on very old Docker versions (pre-1.11[1], circa 2016) do not
have opts.json and so doing some kind of cleanup when loading the volume
store (even if we take care to only delete empty volumes) could delete
existing volumes carried over from very old Docker versions that users
would not expect to disappear.
Ultimately, if a user creates a volume and the system crashes, a volume
that has the wrong config is better than Docker not being able to start.
[1]: commit b05b2370757d ("Support mount opts for `local` volume driver")
SUSE-Bugs: https://bugzilla.suse.com/show_bug.cgi?id=1214855
(Cherry-picked from commit b4c20da143502e5fc21cc4996b63e83691c515bf.)
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
volume/local/local.go | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/volume/local/local.go b/volume/local/local.go
index b4f3a3669a84..077b26f1b813 100644
--- a/volume/local/local.go
+++ b/volume/local/local.go
@@ -16,6 +16,7 @@ import (
"github.com/docker/docker/daemon/names"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/quota"
"github.com/docker/docker/volume"
"github.com/pkg/errors"
@@ -381,7 +382,7 @@ func (v *localVolume) saveOpts() error {
if err != nil {
return err
}
- err = os.WriteFile(filepath.Join(v.rootPath, "opts.json"), b, 0600)
+ err = ioutils.AtomicWriteFile(filepath.Join(v.rootPath, "opts.json"), b, 0o600)
if err != nil {
return errdefs.System(errors.Wrap(err, "error while persisting volume options"))
}
--
2.47.1

View File

@ -0,0 +1,209 @@
From 49605be604df94e216168288cdbcae0fda04d641 Mon Sep 17 00:00:00 2001
From: Jameson Hyde <jameson.hyde@docker.com>
Date: Mon, 26 Nov 2018 14:15:22 -0500
Subject: [PATCH 09/11] CVE-2024-41110: AuthZ plugin securty fixes
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This is a backport of the following upstream patches:
* 2ac8a479c53d ("Authz plugin security fixes for 0-length content and path validation")
* 5282cb25d09d ("If url includes scheme, urlPath will drop hostname, which would not match the auth check")
Signed-off-by: Jameson Hyde <jameson.hyde@docker.com>
Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
pkg/authorization/authz.go | 38 +++++++++++--
pkg/authorization/authz_unix_test.go | 84 +++++++++++++++++++++++++++-
2 files changed, 115 insertions(+), 7 deletions(-)
diff --git a/pkg/authorization/authz.go b/pkg/authorization/authz.go
index 590ac8dddd88..68ed8bbdaf97 100644
--- a/pkg/authorization/authz.go
+++ b/pkg/authorization/authz.go
@@ -7,6 +7,8 @@ import (
"io"
"mime"
"net/http"
+ "net/url"
+ "regexp"
"strings"
"github.com/docker/docker/pkg/ioutils"
@@ -52,10 +54,23 @@ type Ctx struct {
authReq *Request
}
+func isChunked(r *http.Request) bool {
+ // RFC 7230 specifies that content length is to be ignored if Transfer-Encoding is chunked
+ if strings.EqualFold(r.Header.Get("Transfer-Encoding"), "chunked") {
+ return true
+ }
+ for _, v := range r.TransferEncoding {
+ if strings.EqualFold(v, "chunked") {
+ return true
+ }
+ }
+ return false
+}
+
// AuthZRequest authorized the request to the docker daemon using authZ plugins
func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error {
var body []byte
- if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize {
+ if sendBody(ctx.requestURI, r.Header) && (r.ContentLength > 0 || isChunked(r)) && r.ContentLength < maxBodySize {
var err error
body, r.Body, err = drainBody(r.Body)
if err != nil {
@@ -108,7 +123,6 @@ func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error {
if sendBody(ctx.requestURI, rm.Header()) {
ctx.authReq.ResponseBody = rm.RawBody()
}
-
for _, plugin := range ctx.plugins {
logrus.Debugf("AuthZ response using plugin %s", plugin.Name())
@@ -146,10 +160,26 @@ func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) {
return nil, newBody, err
}
+func isAuthEndpoint(urlPath string) (bool, error) {
+ // eg www.test.com/v1.24/auth/optional?optional1=something&optional2=something (version optional)
+ matched, err := regexp.MatchString(`^[^\/]*\/(v\d[\d\.]*\/)?auth.*`, urlPath)
+ if err != nil {
+ return false, err
+ }
+ return matched, nil
+}
+
// sendBody returns true when request/response body should be sent to AuthZPlugin
-func sendBody(url string, header http.Header) bool {
+func sendBody(inURL string, header http.Header) bool {
+ u, err := url.Parse(inURL)
+ // Assume no if the URL cannot be parsed - an empty request will still be forwarded to the plugin and should be rejected
+ if err != nil {
+ return false
+ }
+
// Skip body for auth endpoint
- if strings.HasSuffix(url, "/auth") {
+ isAuth, err := isAuthEndpoint(u.Path)
+ if isAuth || err != nil {
return false
}
diff --git a/pkg/authorization/authz_unix_test.go b/pkg/authorization/authz_unix_test.go
index 835cb703839b..8bfe44e1a840 100644
--- a/pkg/authorization/authz_unix_test.go
+++ b/pkg/authorization/authz_unix_test.go
@@ -175,8 +175,8 @@ func TestDrainBody(t *testing.T) {
func TestSendBody(t *testing.T) {
var (
- url = "nothing.com"
testcases = []struct {
+ url string
contentType string
expected bool
}{
@@ -220,15 +220,93 @@ func TestSendBody(t *testing.T) {
contentType: "",
expected: false,
},
+ {
+ url: "nothing.com/auth",
+ contentType: "",
+ expected: false,
+ },
+ {
+ url: "nothing.com/auth",
+ contentType: "application/json;charset=UTF8",
+ expected: false,
+ },
+ {
+ url: "nothing.com/auth?p1=test",
+ contentType: "application/json;charset=UTF8",
+ expected: false,
+ },
+ {
+ url: "nothing.com/test?p1=/auth",
+ contentType: "application/json;charset=UTF8",
+ expected: true,
+ },
+ {
+ url: "nothing.com/something/auth",
+ contentType: "application/json;charset=UTF8",
+ expected: true,
+ },
+ {
+ url: "nothing.com/auth/test",
+ contentType: "application/json;charset=UTF8",
+ expected: false,
+ },
+ {
+ url: "nothing.com/v1.24/auth/test",
+ contentType: "application/json;charset=UTF8",
+ expected: false,
+ },
+ {
+ url: "nothing.com/v1/auth/test",
+ contentType: "application/json;charset=UTF8",
+ expected: false,
+ },
+ {
+ url: "www.nothing.com/v1.24/auth/test",
+ contentType: "application/json;charset=UTF8",
+ expected: false,
+ },
+ {
+ url: "https://www.nothing.com/v1.24/auth/test",
+ contentType: "application/json;charset=UTF8",
+ expected: false,
+ },
+ {
+ url: "http://nothing.com/v1.24/auth/test",
+ contentType: "application/json;charset=UTF8",
+ expected: false,
+ },
+ {
+ url: "www.nothing.com/test?p1=/auth",
+ contentType: "application/json;charset=UTF8",
+ expected: true,
+ },
+ {
+ url: "http://www.nothing.com/test?p1=/auth",
+ contentType: "application/json;charset=UTF8",
+ expected: true,
+ },
+ {
+ url: "www.nothing.com/something/auth",
+ contentType: "application/json;charset=UTF8",
+ expected: true,
+ },
+ {
+ url: "https://www.nothing.com/something/auth",
+ contentType: "application/json;charset=UTF8",
+ expected: true,
+ },
}
)
for _, testcase := range testcases {
header := http.Header{}
header.Set("Content-Type", testcase.contentType)
+ if testcase.url == "" {
+ testcase.url = "nothing.com"
+ }
- if b := sendBody(url, header); b != testcase.expected {
- t.Fatalf("Unexpected Content-Type; Expected: %t, Actual: %t", testcase.expected, b)
+ if b := sendBody(testcase.url, header); b != testcase.expected {
+ t.Fatalf("sendBody failed: url: %s, content-type: %s; Expected: %t, Actual: %t", testcase.url, testcase.contentType, testcase.expected, b)
}
}
}
--
2.47.1

View File

@ -0,0 +1,139 @@
From 60abff4c864c08b4ea05d96a304f6cf3f0cca787 Mon Sep 17 00:00:00 2001
From: Albin Kerouanton <albinker@gmail.com>
Date: Tue, 10 Oct 2023 01:13:25 +0200
Subject: [PATCH 10/11] CVE-2024-29018: libnet: Don't forward to upstream
resolvers on internal nw
Commit cbc2a71c2 makes `connect` syscall fail fast when a container is
only attached to an internal network. Thanks to that, if such a
container tries to resolve an "external" domain, the embedded resolver
returns an error immediately instead of waiting for a timeout.
This commit makes sure the embedded resolver doesn't even try to forward
to upstream servers.
Co-authored-by: Albin Kerouanton <albinker@gmail.com>
Signed-off-by: Rob Murray <rob.murray@docker.com>
(Cherry-picked from commit 790c3039d0ca5ed86ecd099b4b571496607628bc.)
[Drop test additions and test-related patches.]
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
libnetwork/endpoint.go | 12 +++++++++++-
libnetwork/resolver.go | 17 +++++++++++++----
libnetwork/sandbox_dns_unix.go | 6 +++++-
3 files changed, 29 insertions(+), 6 deletions(-)
diff --git a/libnetwork/endpoint.go b/libnetwork/endpoint.go
index b9903bb90188..b90500ce97a1 100644
--- a/libnetwork/endpoint.go
+++ b/libnetwork/endpoint.go
@@ -520,8 +520,13 @@ func (ep *Endpoint) sbJoin(sb *Sandbox, options ...EndpointOption) (err error) {
return sb.setupDefaultGW()
}
- moveExtConn := sb.getGatewayEndpoint() != extEp
+ currentExtEp := sb.getGatewayEndpoint()
+ // Enable upstream forwarding if the sandbox gained external connectivity.
+ if sb.resolver != nil {
+ sb.resolver.SetForwardingPolicy(currentExtEp != nil)
+ }
+ moveExtConn := currentExtEp != extEp
if moveExtConn {
if extEp != nil {
logrus.Debugf("Revoking external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
@@ -751,6 +756,11 @@ func (ep *Endpoint) sbLeave(sb *Sandbox, force bool, options ...EndpointOption)
// New endpoint providing external connectivity for the sandbox
extEp = sb.getGatewayEndpoint()
+ // Disable upstream forwarding if the sandbox lost external connectivity.
+ if sb.resolver != nil {
+ sb.resolver.SetForwardingPolicy(extEp != nil)
+ }
+
if moveExtConn && extEp != nil {
logrus.Debugf("Programming external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
extN, err := extEp.getNetworkFromStore()
diff --git a/libnetwork/resolver.go b/libnetwork/resolver.go
index ab19b7b08fc0..70ca33b53590 100644
--- a/libnetwork/resolver.go
+++ b/libnetwork/resolver.go
@@ -7,6 +7,7 @@ import (
"net"
"strings"
"sync"
+ "sync/atomic"
"time"
"github.com/docker/docker/libnetwork/types"
@@ -69,7 +70,7 @@ type Resolver struct {
tcpListen *net.TCPListener
err error
listenAddress string
- proxyDNS bool
+ proxyDNS atomic.Bool
startCh chan struct{}
logger *logrus.Logger
@@ -79,15 +80,17 @@ type Resolver struct {
// NewResolver creates a new instance of the Resolver
func NewResolver(address string, proxyDNS bool, backend DNSBackend) *Resolver {
- return &Resolver{
+ r := &Resolver{
backend: backend,
- proxyDNS: proxyDNS,
listenAddress: address,
err: fmt.Errorf("setup not done yet"),
startCh: make(chan struct{}, 1),
fwdSem: semaphore.NewWeighted(maxConcurrent),
logInverval: rate.Sometimes{Interval: logInterval},
}
+ r.proxyDNS.Store(proxyDNS)
+
+ return r
}
func (r *Resolver) log() *logrus.Logger {
@@ -192,6 +195,12 @@ func (r *Resolver) SetExtServers(extDNS []extDNSEntry) {
}
}
+// SetForwardingPolicy re-configures the embedded DNS resolver to either enable or disable forwarding DNS queries to
+// external servers.
+func (r *Resolver) SetForwardingPolicy(policy bool) {
+ r.proxyDNS.Store(policy)
+}
+
// NameServer returns the IP of the DNS resolver for the containers.
func (r *Resolver) NameServer() string {
return r.listenAddress
@@ -407,7 +416,7 @@ func (r *Resolver) serveDNS(w dns.ResponseWriter, query *dns.Msg) {
return
}
- if r.proxyDNS {
+ if r.proxyDNS.Load() {
// If the user sets ndots > 0 explicitly and the query is
// in the root domain don't forward it out. We will return
// failure and let the client retry with the search domain
diff --git a/libnetwork/sandbox_dns_unix.go b/libnetwork/sandbox_dns_unix.go
index 2218c6960e45..e3bb9abce93b 100644
--- a/libnetwork/sandbox_dns_unix.go
+++ b/libnetwork/sandbox_dns_unix.go
@@ -28,7 +28,11 @@ const (
func (sb *Sandbox) startResolver(restore bool) {
sb.resolverOnce.Do(func() {
var err error
- sb.resolver = NewResolver(resolverIPSandbox, true, sb)
+ // The resolver is started with proxyDNS=false if the sandbox does not currently
+ // have a gateway. So, if the Sandbox is only connected to an 'internal' network,
+ // it will not forward DNS requests to external resolvers. The resolver's
+ // proxyDNS setting is then updated as network Endpoints are added/removed.
+ sb.resolver = NewResolver(resolverIPSandbox, sb.getGatewayEndpoint() != nil, sb)
defer func() {
if err != nil {
sb.resolver = nil
--
2.47.1

View File

@ -0,0 +1,65 @@
From 0392c617b8e75f0b59a922f95c691fdd05eaf99f Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <cyphar@cyphar.com>
Date: Thu, 21 Nov 2024 20:00:07 +1100
Subject: [PATCH 11/11] TESTS: backport fixes for integration tests
We need a couple of patches to make the tests work on SLES:
* 143b3b2ef3d0 ("test: update registry version to latest")
* 1a453abfb172 ("integration-cli: don't skip AppArmor tests on SLES")
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
Dockerfile | 2 +-
integration-cli/requirements_test.go | 3 ---
testutil/registry/registry.go | 4 +++-
3 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index 463d5cfc1a86..7a23962af09b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -59,7 +59,7 @@ WORKDIR /go/src/github.com/docker/distribution
# from the https://github.com/docker/distribution repository. This version of
# the registry is used to test both schema 1 and schema 2 manifests. Generally,
# the version specified here should match a current release.
-ARG REGISTRY_VERSION=v2.3.0
+ARG REGISTRY_VERSION=v2.8.2
# REGISTRY_VERSION_SCHEMA1 specifies the version of the registry to build and
# install from the https://github.com/docker/distribution repository. This is
# an older (pre v2.3.0) version of the registry that only supports schema1
diff --git a/integration-cli/requirements_test.go b/integration-cli/requirements_test.go
index 2313272d7704..e5f72397e1bc 100644
--- a/integration-cli/requirements_test.go
+++ b/integration-cli/requirements_test.go
@@ -85,9 +85,6 @@ func Network() bool {
}
func Apparmor() bool {
- if strings.HasPrefix(testEnv.DaemonInfo.OperatingSystem, "SUSE Linux Enterprise Server ") {
- return false
- }
buf, err := os.ReadFile("/sys/module/apparmor/parameters/enabled")
return err == nil && len(buf) > 1 && buf[0] == 'Y'
}
diff --git a/testutil/registry/registry.go b/testutil/registry/registry.go
index 9213db2ba21a..d8bfe17678a4 100644
--- a/testutil/registry/registry.go
+++ b/testutil/registry/registry.go
@@ -107,10 +107,12 @@ http:
}
binary := V2binary
+ args := []string{"serve", confPath}
if c.schema1 {
binary = V2binarySchema1
+ args = []string{confPath}
}
- cmd := exec.Command(binary, confPath)
+ cmd := exec.Command(binary, args...)
cmd.Stdout = c.stdout
cmd.Stderr = c.stderr
if err := cmd.Start(); err != nil {
--
2.47.1

5
80-docker.rules Normal file
View File

@ -0,0 +1,5 @@
# hide docker's loopback devices from udisks, and thus from user desktops
SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"
SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"

230
README_SUSE.md Normal file
View File

@ -0,0 +1,230 @@
# Abstract
Docker is a lightweight "virtualization" method to run multiple virtual units
(containers, akin to “chroot”) simultaneously on a single control host.
Containers are isolated with Kernel Control Groups (cgroups) and Kernel Namespaces.
Docker provides an operating system-level virtualization where the Kernel
controls the isolated containers. With other full virtualization solutions
like Xen, KVM, or libvirt the processor simulates a complete hardware
environment and controls its virtual machines.
# Terminology
## chroot
A change root (chroot, or change root jail) is a section in the file system
which is isolated from the rest of the file system. For this purpose, the chroot
command is used to change the root of the file system. A program which is
executed in such a “chroot jail” cannot access files outside the designated
directory tree.
## cgroups
Kernel Control Groups (commonly referred to as just “cgroups”) are a Kernel
feature that allows aggregating or partitioning tasks (processes) and all their
children into hierarchical organized groups to isolate resources.
## Image
A "virtual machine" on the host server that can run any Linux system, for
example openSUSE, SUSE Linux Enterprise Desktop, or SUSE Linux Enterprise Server.
A Docker image is made by a series of layers built one over the other. Each layer
corresponds to a permanent change committed from a container to the image.
For more details checkout [Docker's official documentation](http://docs.docker.com/terms/image/).
## Image Name
A name that refers to an image. The name is used by the docker commands.
## Container
A running Docker Image.
## Container ID
A ID that refers to a container. The ID is used by the docker commands.
## TAG
A string associated to a Image. It commonly used to identify a specific version
of a Image (like tags in version control systems). It is also possible to refer
the same Image with different TAGs.
## Kernel Namespaces
A Kernel feature to isolate some resources like network, users, and others for
a group of processes.
## Docker Host Server
The system that runs the Docker daemon, provides the images, and the management
control capabilities through cgroups.
# Overview
Docker is a platform that allows developers and sysadmins to manage the complete
lifecycle of images.
Docker makes incredibly easy to build, ship and run images containing
applications.
Benefits of Docker:
* Isolating applications and operating systems through containers.
* Providing nearly native performance as Docker manages allocation of resources
in real-time.
* Controlling network interfaces and applying resources inside containers through cgroups.
* Versioning of images.
* Building images based on existing ones.
* Sharining/storing on [public](http://docs.docker.com/docker-hub/) or
[private](http://docs.docker.com/userguide/dockerrepos/#private-repositories)
repositories.
Limitations of Docker:
* All Docker containers are running inside the host system's Kernel and not with
a different Kernel.
* Only allows Linux "guest" operating systems.
* Docker is not a full virtualization stack like Xen, KVM, or libvirt.
* Security depends on the host system. Refer to the [official documentation](http://docs.docker.com/articles/security/)
for more details.
## Container drivers
Docker has different backend drivers to handle the containers. The recommended
on is [libcontainer](https://github.com/docker/libcontainer), which is also the
default choice. This driver provides direct access with cgroups.
The Docker packages ships also a LXC driver which handles containers using the
LXC tools.
At the time of writing, upstream is working on a `libvirt-lxc` driver.
## Storage drivers
Docker supports different storage drivers:
* `vfs`: this driver is automatically used when the Docker host filesystem
does not support copy-on-write. This is a simple driver which does not offer
some of the advantages of Docker (like sharing layers, more on that in the
next sections). It is highly reliable but also slow.
* `devicemapper`: this driver relies on the device-mapper thin provisioning
module. It supports copy-on-write, hence it offers all the advantages of
Docker.
* `btrfs`: this driver relies on Btrfs to provide all the features required
by Docker. To use this driver the `/var/lib/docker` directory must be on a
btrfs filesystem.
* `AUFS`: this driver relies on AUFS union filesystem. Neither the upstream
kernel nor the SUSE one supports this filesystem. Hence the AUFS driver is
not built into the SUSE Docker package.
It is possible to specify which driver to use by changing the value of the
`DOCKER_OPTS` variable defined inside of the `/etc/sysconfig/docker` file.
This can be done either manually or using &yast; by browsing to:
* System
* /etc/sysconfig Editor
* System
* Management
* DOCKER_OPTS
menu and entering the `-s storage_driver` string.
For example, to force the usage of the `devicemapper` driver
enter the following text:
```
DOCKER_OPTS="-s devicemapper
```
It is recommended to have `/var/lib/docker` mounted on a different filesystem
to not affect the Docker host OS in case of a filesystem corruption.
# Setting up a Docker host
Prepare the host:
1. Install the `docker` package.
2. Automatically start the Docker daemon at boot:
`sudo systemctl enable docker`
3. Start the Docker daemon:
`sudo systemctl start docker`
The Docker daemon listens on a local socket which is accessible only by the `root`
user and by the members of the `docker` group.
The `docker` group is automatically created at package installation time. To
allow a certain user to connect to the local Docker daemon use the following
command:
```
sudo /usr/sbin/usermod -aG docker <username>
```
The user will be able to communicate with the local Docker daemon upon his next
login.
## Networking
If you want your containers to be able to access the external network you must
enable the `net.ipv4.ip_forward` rule.
This can be done using YaST by browsing to the
`Network Devices -> Network Settings -> Routing` menu and ensuring that the
`Enable IPv4 Forwarding` box is checked.
This option cannot be changed when networking is handled by the Network Manager.
In such cases the `/etc/sysconfig/SuSEfirewall2` file needs to be edited by
hand to ensure the `FW_ROUTE` flag is set to `yes` like so:
```
FW_ROUTE="yes"
```
# Basic Docker operations
Images can be pulled from [Docker's central index](http://index.docker.io) using
the following command:
```
docker pull <image name>
```
Containers can be started using the `docker run` command.
Please refer to the [official documentation](http://docs.docker.com/)
for more details.
# Building Docker containers using KIWI
Starting from version 5.06.8 KIWI can be used to build Docker images.
Please refer to KIWI's [official documentation](https://doc.opensuse.org/projects/kiwi/doc/#chap.lxc).
The official `kiwi-doc` package contains examples of Docker images.
## Docker build system versus KIWI
Docker has an [internal build system](http://docs.docker.com/reference/builder/)
which makes incredibly easy to create new images based on existing ones.
Some users might be confused about what to use. The right approach is to build
the [base images](http://docs.docker.com/terms/image/#base-image-def) using KIWI
and then use them as foundation blocks inside of your Docker's build system.
That two advantages:
1. Be able to use docker specific directives (like `ENTRYPOINT`, `EXPOSE`, ...).
2. Be able to reuse already existing layers.
Sharing the common layers between different images makes possible to:
* Use less disk space on the Docker hosts.
* Make the deployments faster: only the requested layers are sent over the
network (it is like upgrading installed packages using delta rpms).
* Take full advantage of caching while building Docker images: this will result
in faster executions of `docker build` command.
To recap: KIWI is not to be intended as a replacement for Docker's build system.
It rather complements with it.

30
_service Normal file
View File

@ -0,0 +1,30 @@
<services>
<service name="tar_scm" mode="manual">
<param name="url">https://github.com/moby/moby.git</param>
<param name="scm">git</param>
<param name="exclude">.git</param>
<param name="versionformat">24.0.9_ce_%h</param>
<param name="revision">v24.0.9</param>
<param name="filename">docker</param>
</service>
<service name="tar_scm" mode="manual">
<param name="url">https://github.com/docker/cli.git</param>
<param name="scm">git</param>
<param name="exclude">.git</param>
<param name="versionformat">24.0.9_ce</param>
<param name="revision">v24.0.9</param>
<param name="filename">docker-cli</param>
</service>
<service name="tar_scm" mode="manual">
<param name="url">https://github.com/docker/buildx.git</param>
<param name="scm">git</param>
<param name="exclude">.git</param>
<param name="versionformat">0.19.3</param>
<param name="revision">v0.19.3</param>
<param name="filename">docker-buildx</param>
</service>
<service name="recompress" mode="manual">
<param name="file">docker-*.tar</param>
<param name="compression">xz</param>
</service>
</services>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7d44fcc3eb6e78a8424bdc3c3bdda65fee9555d1c28841e53a9fe4bb6a1f9e1b
size 8852852

27
docker-audit.rules Normal file
View File

@ -0,0 +1,27 @@
##
# Audit rules based on CIS Docker 1.6 Benchmark v1.0.0
# https://benchmarks.cisecurity.org/tools2/docker/CIS_Docker_1.6_Benchmark_v1.0.0.pdf
# Not all of these apply to SUSE.
# 1.8 Audit docker daemon
-w /usr/bin/docker -k docker
# 1.9 Audit Docker files and directories
-w /var/lib/docker -k docker
# 1.10 Audit /etc/docker
-w /etc/docker -k docker
# 1.11 Audit Docker files and directories - docker-registry.service
-w /usr/lib/systemd/system/docker-registry.service -k docker
# 1.12 Audit Docker files and directories - docker.service
-w /usr/lib/systemd/system/docker.service -k docker
# 1.13 Audit Docker files and directories - /var/run/docker.sock
-w /var/run/docker.sock -k docker
# 1.14 Audit Docker files and directories - /etc/sysconfig/docker
-w /etc/sysconfig/docker -k docker
# 1.15 Audit Docker files and directories - /etc/sysconfig/docker-network
-w /etc/sysconfig/docker-network -k docker
# 1.16 Audit Docker files and directories - /etc/sysconfig/docker-registry
-w /etc/sysconfig/docker-registry -k docker
# 1.17 Audit Docker files and directories - /etc/sysconfig/docker-storage
-w /etc/sysconfig/docker-storage -k docker
# 1.18 Audit Docker files and directories - /etc/default/docker
-w /etc/default/docker -k docker
## end docker audit rules

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fd0f81752a02e20b611f95a35718bdc44eb1e203e0fd80d7afb87dfd8135c300
size 6445376

BIN
docker-buildx-0.19.2.tar.xz (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e6efae9f26c3d94c2f73aa22197aa1b7e97c1c81bc73ac5e38f165753373bf0a
size 6480184

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6ac6c8ab1af0347391974301f29ee5ca27630919d143301cfba87e472c8a27e5
size 3563448

8
docker-daemon.json Normal file
View File

@ -0,0 +1,8 @@
{
"log-level": "warn",
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "5"
}
}

294
docker-integration.sh Normal file
View File

@ -0,0 +1,294 @@
#!/bin/bash
# docker-integration: run Docker's integration tests
# Copyright (C) 2024 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -Eeuo pipefail
TESTDIR=/usr/src/docker-test
TEST_SRCDIR="$TESTDIR/src"
TEST_BINDIR="$TESTDIR/bin"
TMPROOT="$(mktemp --tmpdir -d docker-integration-tmpdir.XXXXXX)"
TMPDIR="$TMPROOT/tmp"
DEST="$TMPROOT/dest"
mkdir -p "$TMPDIR" "$TEST_BINDIR" "$DEST"
chmod 1777 "$TMPDIR"
chmod 777 "$TMPROOT"
function usage() {
cat >&2 <<-EOF
docker-integration.sh [-Av] [-r TestName] [-t timeout] [<test-suites>...]
Arguments:
-A
Run all tests (do not fail on first suite failure).
-v
Run tests in verbose mode (go test -v).
-r
Only run tests that match the given regular expression (go test -run).
-t <timeout=$timeout>
Set the per-suite timeout to <timeout> (go test -timeout).
<test-suites>...
Only run the given test suites in /usr/src/docker-test. The
default is to run all test suites
Examples:
Run the build and network integration tests with a 60 minute timeout:
./docker-integration.sh -t 60m integration/build integration/network
Run all of the tests in verbose mode with a 6 hour timeout:
./docker-integration.sh -Av -t 360m
This script is maintained by openSUSE in the Virtualization:containers
project, and is only intended to be used by openSUSE developers.
EOF
exit "${1:-1}"
}
fail_fast=1
verbose=
filter=
timeout=20m
while getopts "Ahr:t:v" opt; do
case "$opt" in
A)
fail_fast=
;;
v)
verbose=1
;;
r)
filter="$OPTARG"
;;
t)
timeout="$OPTARG"
;;
h)
usage 0
;;
:)
echo "Missing argument: -$OPTARG" >&2
usage 1
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage 1
;;
esac
done
pushd "$TEST_SRCDIR"
if [ "$OPTIND" -le "$#" ]; then
SUITES=("${@:$OPTIND:$(($#+1))}")
else
readarray -t SUITES <<<"$(find . -type f -name test.main -printf "%h\n")"
fi
echo "Planning to run suites {${SUITES[*]}}."
# Download the frozen images.
if ! [ -d /docker-frozen-images ]; then
# TODO: Get the hashes from /usr/src/docker-test/Dockerfile...
contrib/download-frozen-image-v2.sh "$TMPDIR/docker-frozen-images" \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
debian:bookworm-slim@sha256:2bc5c236e9b262645a323e9088dfa3bb1ecb16cc75811daf40a23a824d665be9 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
sudo cp -r "$TMPDIR/docker-frozen-images" /
fi
# Create binaries in $TEST_BINDIR.
if ! [ -e "$TEST_BINDIR/docker-basic-plugin" ]; then
(
pushd "$TEST_SRCDIR/testutil/fixtures/plugin/basic"
[ -f go.mod ] || go mod init docker-basic-plugin
go build -o "$TEST_BINDIR/docker-basic-plugin" .
)
fi
if ! [ -e "$TEST_BINDIR/registry-v2" ]; then
# The v2.x tags of Docker registry don't use go.mod, and pre-date the move
# to github.com/distribution, so we need to create a fake GOPATH with the
# old github.com/docker/distribution import path.
(
# shellcheck disable=SC2030
GOPATH="$(mktemp -d -p "$TMPROOT" distribution-build-gopath.XXXXXX)"
export GOPATH
pushd "$GOPATH"
git clone \
--depth=1 --branch=v2.8.3 \
https://github.com/distribution/distribution.git \
src/github.com/docker/distribution
pushd src/github.com/docker/distribution
GO111MODULE=off go build -o "$TEST_BINDIR/registry-v2" ./cmd/registry
)
fi
if ! [ -e "$TEST_BINDIR/ctr" ]; then
containerd-ctr --help >/dev/null
ln -sf "$(which containerd-ctr)" "$TEST_BINDIR/ctr"
fi
if ! [ -e "$TEST_BINDIR/docker" ]; then
# The integration-cli tests require a Docker 17.06.2 client (from 2017).
# This is mainly because the tests are all based on the specific output the
# client gives, and some tests fail on modern client versions.
(
# shellcheck disable=SC2030
GOPATH="$(mktemp -d -p "$TMPROOT" docker-cli-build-gopath.XXXXXX)"
export GOPATH
pushd "$GOPATH"
# This tag also comes from the time when this was called
# github.com/docker/docker-ce-packaging, so we need to work around this
# by moving the cli component into the right path...
git clone \
--depth=1 --branch=v17.06.2-ce \
https://github.com/docker/cli.git \
src/github.com/docker/docker-ce-packaging
mv \
src/github.com/docker/docker-ce-packaging/components/cli \
src/github.com/docker/cli
pushd src/github.com/docker/cli
GO111MODULE=off go build -o "$TEST_BINDIR/docker" ./cmd/docker
)
fi
# Create an unprivilegeduser account for tests.
if ! ( grep unprivilegeduser /etc/passwd &>/dev/null ); then
useradd --create-home --gid docker unprivilegeduser
fi
# Disable SUSE secrets for tests, as some tests (TestDiff from
# integration/container) will fail if we have secrets injected.
[ -e /etc/docker/suse-secrets-enable ] && \
mv -nv /etc/docker/suse-secrets-enable{,-DISABLED}
sudo systemctl restart docker
# Make sure docker-buildx is disabled.
[ -e /usr/lib/docker/cli-plugins/docker-buildx ] && \
mv -nv /usr/lib/docker/cli-plugins/docker-buildx{,-DISABLED}
# Disable any daemon configurations.
[ -e /etc/docker/daemon.json ] && \
mv -nv /etc/docker/daemon.json{,.DISABLED}
set -x
# In order for< gotest.tools/v3/assert> to parse the source and give us useful
# error messages, we have to create a fake source directory that points at
# $TEST_SRCDIR. This path is replaced with %{docker_builddir} during the
# docker.spec build.
__DOCKER_BUILDIR="@@docker_builddir@@"
DOCKER_BUILDDIR="${DOCKER_BUILDDIR:-$__DOCKER_BUILDIR}"
sudo rm -rvf "$DOCKER_BUILDDIR"
sudo mkdir -p "$(dirname "$DOCKER_BUILDDIR")"
sudo ln -svf "$TEST_SRCDIR" "$DOCKER_BUILDDIR"
# Clean up any old containers/images/networks/volumes before running the tests.
# We need to do this *BEFORE* we set PATH, as the outdated $TEST_BINDIR/docker
# doesn't support some of these commands.
docker container prune -f
docker image prune -af
#docker buildx prune -af
docker network prune -f
docker volume prune -af
[ -z "$(docker plugin ls -q)" ] || docker plugin ls -q | xargs docker plugin rm -f
docker system prune -af
export DOCKERFILE="$TEST_SRCDIR/Dockerfile"
export TMPDIR="$TMPDIR"
export TEMP="$TMPDIR"
export HOME="$TMPDIR/fake-home"
export DEST="$TEST_SRCDIR/bundles/dummy-dir"
export ABS_DEST="$DEST"
export PATH="$TEST_BINDIR:$PATH"
export TZ=UTC
export DOCKER_INTEGRATION_DAEMON_DEST="$ABS_DEST"
export DOCKER_HOST=unix:///run/docker.sock
export DOCKER_GRAPHDRIVER=overlay2
export DOCKER_USERLANDPROXY=true
export DOCKER_REMAP_ROOT="${DOCKER_REMAP_ROOT:-}"
export DOCKER_TMPDIR="$TMPDIR"
set +x
# Make sure that we have a dummy "destination" directory for tests.
rm -rf "$DOCKER_INTEGRATION_DAEMON_DEST"
mkdir -p "$DOCKER_INTEGRATION_DAEMON_DEST"
# Install the emptyfs images.
sh ./hack/make/.build-empty-images
ls -la "$TMPROOT"
success=0
failed_suites=()
for suite_name in "${SUITES[@]}"; do
suite_name="${suite_name#*./}"
pushd "$TEST_SRCDIR/$suite_name"
test_flags=()
[ -n "$verbose" ] && test_flags+=("-test.v")
[ -n "$filter" ] && test_flags+=("-test.run" "$filter")
if [[ "$suite_name" == "integration-cli" ]]; then
# We need to disable docker-buildx for the integration-cli tests
# because otherwise the "docker build" command will use the wrong
# builder and the output won't match what the tests expect.
timeout=360m
fi
test_flags+=("-test.timeout" "$timeout")
echo "Running suite $suite_name (${test_flags[*]}) [success=$success fail=${#failed_suites[@]}]"
set -x +e
sudo -E HOME="$HOME" TMPDIR="$TMPDIR" PATH="$PATH" \
./test.main "${test_flags[@]}"
err="$?"
if (( err != 0 )); then
[ -z "$fail_fast" ] || exit "$err"
failed_suites+=("$suite_name")
else
(( success++ ))
fi
set +x -e
popd
done
[ -e /usr/lib/docker/cli-plugins/docker-buildx-DISABLED ] && \
mv -nv /usr/lib/docker/cli-plugins/docker-buildx{-DISABLED,}
[ -e /etc/docker/suse-secrets-enable-DISABLED ] && \
mv -nv /etc/docker/suse-secrets-enable{-DISABLED,}
[ -e /etc/docker/daemon.json.DISABLED ] && \
mv -nv /etc/docker/daemon.json{.DISABLED,}
echo "Suite results: $success success(es) ${#failed_suites[*]} failure(s)."
if (( ${#failed_suites[@]} > 0 )); then
echo "Failed suites:"
printf " - %s\n" "${failed_suites[@]}"
exit 1
fi

7
docker-rpmlintrc Normal file
View File

@ -0,0 +1,7 @@
addFilter("^docker-(stable-)?bash-completion.noarch: (E|W): non-executable-script /usr/share/bash-completion/completions/docker")
addFilter("^docker-(stable-)?zsh-completion.noarch: W: non-conffile-in-etc /etc/zsh_completion.d/_docker")
# The docker-integration-tests-devel package contains all of the source code of
# Docker, which causes a bunch of warnings. Note that
# docker-integration-tests-devel is used internally and isn't actually shipped.
addFilter("^docker-(stable-)?integration-tests-devel\..*: (E|W): .*")

140
docker-stable.changes Normal file
View File

@ -0,0 +1,140 @@
-------------------------------------------------------------------
Wed Dec 18 05:53:11 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Add backport for CVE-2024-29018 fix. bsc#1234089
+ 0010-CVE-2024-29018-libnet-Don-t-forward-to-upstream-reso.patch
- Add backport for CVE-2024-23650 fix and rename patch filename. bsc#1219437
- 0006-CVE-2024-23653-update-buildkit-to-include-CVE-patche.patch
+ 0006-CVE-2024-2365x-update-buildkit-to-include-CVE-patche.patch
- Reorder and rebase patches:
* 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch
* 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch
* 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch
* 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch
* 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch
* 0007-bsc1221916-update-to-patched-buildkit-version-to-fix.patch
* 0008-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch
* 0009-CVE-2024-41110-AuthZ-plugin-securty-fixes.patch
- 0010-TESTS-backport-fixes-for-integration-tests.patch
+ 0011-TESTS-backport-fixes-for-integration-tests.patch
-------------------------------------------------------------------
Tue Dec 17 13:20:39 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Update to docker-buildx 0.19.3. See upstream changelog online at
<https://github.com/docker/buildx/releases/tag/v0.19.3>
-------------------------------------------------------------------
Wed Dec 11 10:14:56 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Update docker-buildx to v0.19.2. See upstream changelog online at
<https://github.com/docker/buildx/releases/tag/v0.19.2>.
Some notable changelogs from the last update:
* <https://github.com/docker/buildx/releases/tag/v0.19.0>
* <https://github.com/docker/buildx/releases/tag/v0.18.0>
- Update to Go 1.22.
-------------------------------------------------------------------
Wed Dec 11 05:39:42 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Add a new toggle file /etc/docker/suse-secrets-enable which allows users to
disable the SUSEConnect integration with Docker (which creates special mounts
in /run/secrets to allow container-suseconnect to authenticate containers
with registries on registered hosts). bsc#1231348 bsc#1232999
In order to disable these mounts, just do
echo 0 > /etc/docker/suse-secrets-enable
and restart Docker. In order to re-enable them, just do
echo 1 > /etc/docker/suse-secrets-enable
and restart Docker. Docker will output information on startup to tell you
whether the SUSE secrets feature is enabled or not.
* 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch
-------------------------------------------------------------------
Wed Nov 27 12:10:42 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Disable docker-buildx builds for SLES. It turns out that build containers
with docker-buildx don't currently get the SUSE secrets mounts applied,
meaning that container-suseconnect doesn't work when building images.
bsc#1233819
-------------------------------------------------------------------
Wed Nov 20 05:34:38 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Add docker-integration-tests-devel subpackage for building and running the
upstream Docker integration tests on machines to test that Docker works
properly. Users should not install this package.
- docker-rpmlintrc updated to include allow-list for all of the integration
tests package, since it contains a bunch of stuff that wouldn't normally be
allowed.
- Rebased patches:
* 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch
* 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch
* 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch
* 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch
* 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch
* 0006-CVE-2024-23653-update-buildkit-to-include-CVE-patche.patch
* 0007-bsc1221916-update-to-patched-buildkit-version-to-fix.patch
* 0008-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch
* 0009-CVE-2024-41110-AuthZ-plugin-securty-fixes.patch
- Added patches:
+ 0010-TESTS-backport-fixes-for-integration-tests.patch
-------------------------------------------------------------------
Tue Nov 12 06:34:28 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Remove DOCKER_NETWORK_OPTS from docker.service. This was removed from
sysconfig a long time ago, and apparently this causes issues with systemd in
some cases.
- Update --add-runtime to point to correct binary path.
-------------------------------------------------------------------
Wed Oct 16 22:24:52 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Further merge docker and docker-stable specfiles to minimise the differences.
The main thing is that we now include both halves of the
Conflicts/Provides/Obsoletes dance in both specfiles.
-------------------------------------------------------------------
Wed Oct 16 05:37:14 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Update to docker-buildx v0.17.1 to match standalone docker-buildx package we
are replacing. See upstream changelog online at
<https://github.com/docker/buildx/releases/tag/v0.17.1>
-------------------------------------------------------------------
Sat Sep 7 13:10:30 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Import specfile changes for docker-buildx as well as the changes to help
reduce specfile differences between docker-stable and docker. bsc#1230331
bsc#1230333
-------------------------------------------------------------------
Wed Aug 14 03:21:00 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Backport patch for CVE-2024-41110. bsc#1228324
+ 0009-CVE-2024-41110-AuthZ-plugin-securty-fixes.patch
-------------------------------------------------------------------
Wed Jun 16 04:18:11 UTC 2024 - Aleksa Sarai <asarai@suse.com>
- Initial docker-stable release, forked from Docker 24.0.6-ce release
(packaged on 2023-10-11).
- Update to Docker 24.0.9-ce, which is the latest version of the 24.0.x branch.
It seems likely this will be the last upstream version of the 24.0.x branch
(it seems Mirantis is going to do LTS for 23.0.x, not 24.0.x).
<https://docs.docker.com/engine/release-notes/24.0/#2409>
- Fix BuildKit's symlink resolution logic to correctly handle non-lexical
symlinks. Backport of <https://github.com/moby/buildkit/pull/4896> and
<https://github.com/moby/buildkit/pull/5060>. bsc#1221916
+ 0007-bsc1221916-update-to-patched-buildkit-version-to-fix.patch
- Write volume options atomically so sudden system crashes won't result in
future Docker starts failing due to empty files. Backport of
<https://github.com/moby/moby/pull/48034>. bsc#1214855
+ 0008-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch

648
docker-stable.spec Normal file
View File

@ -0,0 +1,648 @@
#
# spec file for package docker-stable
#
# Copyright (c) 2025 SUSE LLC
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.
# Please submit bugfixes or comments via https://bugs.opensuse.org/
#
# nodebuginfo
%bcond_without apparmor
# This subpackage is only used for testing by developers, and shouldn't be
# built for actual users.
%bcond_with integration_tests
%if 0%{?is_opensuse} == 0
# SUSEConnect support ("SUSE secrets") only makes sense for SLES hosts.
%bcond_without suseconnect
# There is currently a known bug between buildx and SUSE secrets, so we don't
# package docker-buildx for SLES. bsc#1233819
%bcond_with buildx
%else
%bcond_with suseconnect
%bcond_without buildx
%endif
# The flavour is defined with a macro to try to keep docker and docker-stable
# as similar as possible, to make maintenance a little easier.
%define flavour -stable
# Where important update information will be stored, such that an administrator
# is guaranteed to see the relevant warning.
%define update_messages %{_localstatedir}/adm/update-messages/%{name}-%{version}-%{release}
# Test binaries.
%define testdir /usr/src/docker-test
#Compat macro for new _fillupdir macro introduced in Nov 2017
%if ! %{defined _fillupdir}
%define _fillupdir /var/adm/fillup-templates
%endif
# MANUAL: This needs to be updated with every docker update.
%define docker_real_version 24.0.9
%define docker_git_version fca702de7f71
%define docker_version %{docker_real_version}_ce
# This "nice version" is so that docker --version gives a result that can be
# parsed by other people. boo#1182476
%define docker_nice_version %{docker_real_version}-ce
%if %{with buildx}
# MANUAL: This needs to be updated with every docker-buildx update.
%define buildx_version 0.19.3
%endif
# Used when generating the "build" information for Docker version. The value of
# git_commit_epoch is unused here (we use SOURCE_DATE_EPOCH, which rpm
# helpfully injects into our build environment from the changelog). If you want
# to generate a new git_commit_epoch, use this:
# $ date --date="$(git show --format=fuller --date=iso $COMMIT_ID | grep -oP '(?<=^CommitDate: ).*')" '+%s'
%define git_commit_epoch 1706746343
# NOTE: We are using Docker 24 as the base for docker-stable because that
# happened to be the version we released when we started working on
# docker-stable. Unfortunately (at time of writing) the Mirantis-supported
# Docker LTS is based on Docker 23[1] so when we decide to update this (in
# 2027) we should try to match the Mirantis-supported version then.
#
# [1]: https://docs.mirantis.com/mcr/23.0/compat-matrix/maintenance-lifecycle.html
Name: docker%{flavour}
Version: %{docker_version}
Release: 0
Summary: The Moby-project Linux container runtime
License: Apache-2.0
Group: System/Management
URL: http://www.docker.io
Source: docker-%{docker_version}_%{docker_git_version}.tar.xz
Source1: docker-cli-%{docker_version}.tar.xz
Source3: docker-rpmlintrc
# TODO: Move these source files to somewhere nicer.
Source100: docker.service
Source101: docker.socket
Source110: 80-docker.rules
Source120: sysconfig.docker
Source130: README_SUSE.md
Source140: docker-audit.rules
Source150: docker-daemon.json
Source160: docker.sysusers
# docker-integration-tests-devel
Source900: docker-integration.sh
# NOTE: All of these patches are maintained in <https://github.com/suse/docker>
# in the suse-v<version> branch. Make sure you update the patches in that
# branch and then git-format-patch the patch here.
# SUSE-FEATURE: Adds the /run/secrets mountpoint inside all Docker containers
# which is not snapshotted when images are committed.
Patch100: 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch
Patch101: 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch
# UPSTREAM: Revert of upstream patch to keep SLE-12 build working.
Patch200: 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch
# UPSTREAM: Backport of <https://github.com/moby/moby/pull/41954>.
Patch201: 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch
# UPSTREAM: Revert of upstream patches to make apparmor work on SLE 12.
Patch202: 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch
# UPSTREAM: Backport of several BuildKit patches:
# (Various patches.) CVE-2024-23650
# <https://github.com/moby/buildkit/pull/4604> CVE-2024-23651
# <https://github.com/moby/buildkit/pull/4603> CVE-2024-23652
# <https://github.com/moby/buildkit/pull/4602> CVE-2024-23653
Patch203: 0006-CVE-2024-2365x-update-buildkit-to-include-CVE-patche.patch
# UPSTREAM: Backport of <https://github.com/moby/buildkit/pull/4896> and
# <https://github.com/moby/buildkit/pull/5060>. bsc#1221916
Patch204: 0007-bsc1221916-update-to-patched-buildkit-version-to-fix.patch
# UPSTREAM: Backport of <https://github.com/moby/moby/pull/48034>. bsc#1214855
Patch205: 0008-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch
# UPSTREAM: Backport of <https://github.com/moby/moby/security/advisories/GHSA-v23v-6jw2-98fq>
# fix. CVE-2024-41110
Patch206: 0009-CVE-2024-41110-AuthZ-plugin-securty-fixes.patch
# UPSTREAM: Backport of <https://github.com/moby/moby/pull/46609>. CVE-2024-29018
Patch207: 0010-CVE-2024-29018-libnet-Don-t-forward-to-upstream-reso.patch
# UPSTREAM: Backport of <https://github.com/moby/moby/pull/46307> and
# <https://github.com/moby/moby/pull/49061>.
Patch299: 0011-TESTS-backport-fixes-for-integration-tests.patch
# UPSTREAM: Backport of <https://github.com/docker/cli/pull/4228>.
Patch900: cli-0001-docs-include-required-tools-in-source-tree.patch
BuildRequires: audit
BuildRequires: bash-completion
BuildRequires: ca-certificates
BuildRequires: device-mapper-devel >= 1.2.68
BuildRequires: fdupes
%if %{with apparmor}
BuildRequires: libapparmor-devel
%endif
BuildRequires: fish
BuildRequires: go-go-md2man
BuildRequires: libbtrfs-devel >= 3.8
BuildRequires: libseccomp-devel >= 2.2
BuildRequires: libtool
BuildRequires: linux-glibc-devel
BuildRequires: procps
BuildRequires: sqlite3-devel
BuildRequires: sysuser-tools
BuildRequires: zsh
BuildRequires: golang(API) = 1.22
BuildRequires: pkgconfig(libsystemd)
%if %{with apparmor}
%if 0%{?sle_version} >= 150000
# This conditional only works on rpm>=4.13, which SLE 12 doesn't have. But we
# don't need to support Docker+selinux for SLE 12 anyway.
Requires: (apparmor-parser or container-selinux)
# This recommends is added to make sure that even if you have container-selinux
# installed you will still be prompted to install apparmor-parser which Docker
# requires to apply AppArmor profiles (for SELinux systems this doesn't matter
# but if you switch back to AppArmor on reboot this would result in insecure
# containers).
Recommends: apparmor-parser
%else
Requires: apparmor-parser
%endif
%else
Requires: container-selinux
%endif
Requires: ca-certificates-mozilla
# The docker-proxy binary used to be in a separate package. We obsolete it,
# since now docker-proxy is maintained as part of this package.
Obsoletes: docker-libnetwork < 0.7.0.2
Provides: docker-libnetwork = 0.7.0.2.%{docker_version}
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker = %{docker_version}
Obsoletes: docker < %{docker_version}
Conflicts: docker
%else
Conflicts: docker-stable
%endif
# Required to actually run containers. We require the minimum version that is
# pinned by Docker, but in order to avoid headaches we allow for updates.
Requires: runc >= 1.1.9
Requires: containerd >= 1.7.3
# Needed for --init support. We don't use "tini", we use our own implementation
# which handles edge-cases better.
Requires: catatonit
# Provides mkfs.ext4 - used by Docker when devicemapper storage driver is used
Requires: e2fsprogs
Requires: iproute2 >= 3.5
Requires: iptables >= 1.4
Requires: procps
Requires: tar >= 1.26
Requires: xz >= 4.9
%if %{with buildx}
# Standard docker-build is deprecated, so require docker-buildx to avoid users
# hitting bugs that have long since been fixed by docker-buildx. bsc#1230331
Requires: %{name}-buildx
%endif
%?sysusers_requires
Requires(post): %fillup_prereq
Requires(post): udev
Requires(post): shadow
# Not necessary, but must be installed when the underlying system is
# configured to use lvm and the user doesn't explicitly provide a
# different storage-driver than devicemapper
Recommends: lvm2 >= 2.2.89
Recommends: %{name}-rootless-extras
Recommends: git-core >= 1.7
ExcludeArch: s390 ppc
%description
Docker complements LXC with a high-level API which operates at the process
level. It runs unix processes with strong guarantees of isolation and
repeatability across servers.
Docker is a great building block for automating distributed systems: large-scale
web deployments, database clusters, continuous deployment systems, private PaaS,
service-oriented architectures, etc.
%if %{with buildx}
%package buildx
Version: %{buildx_version}
Summary: Docker CLI plugin for extended build capabilities with BuildKit
License: Apache-2.0
URL: https://github.com/docker/buildx
Source500: docker-buildx-%{buildx_version}.tar.xz
Group: System/Management
Requires: %{name} >= 19.03.0_ce
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-buildx = %{buildx_version}
Obsoletes: docker-buildx < %{buildx_version}
Conflicts: docker-buildx
%else
Conflicts: docker-stable-buildx
%endif
%description buildx
buildx is a Docker CLI plugin for extended build capabilities with BuildKit.
Key features:
- Familiar UI from docker build
- Full BuildKit capabilities with container driver
- Multiple builder instance support
- Multi-node builds for cross-platform images
- Compose build support
- High-level build constructs (bake)
- In-container driver support (both Docker and Kubernetes)
%endif
%package rootless-extras
Summary: Rootless support for Docker
Group: System/Management
Requires: %{name} = %{docker_version}
Requires: fuse-overlayfs >= 0.7
Requires: rootlesskit
Requires: slirp4netns >= 0.4
BuildArch: noarch
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-rootless-extras = %{docker_version}
Obsoletes: docker-rootless-extras < %{docker_version}
Conflicts: docker-rootless-extras
%else
Conflicts: docker-stable-rootless-extras
%endif
%description rootless-extras
Rootless support for Docker.
Use dockerd-rootless.sh to run the daemon.
Use dockerd-rootless-setuptool.sh to setup systemd for dockerd-rootless.sh.
%if %{with integration_tests}
%package integration-tests-devel
Summary: Rootless support for Docker
Group: TestSuite
Requires: %{name} = %{docker_version}
Requires: containerd-ctr
Requires: curl
Requires: gcc
Requires: git
Requires: glibc-devel-static
Requires: go
Requires: jq
Requires: libcap-progs
%description integration-tests-devel
Integration testing binaries for Docker.
THIS PACKAGE SHOULD NOT BE INSTALLED BY END-USERS, IT IS ONLY INTENDED FOR
INTERNAL DEVELOPMENT OF THE DOCKER PACKAGE FOR (OPEN)SUSE.
%endif
%package bash-completion
Summary: Bash Completion for %{name}
Group: System/Shells
Requires: %{name} = %{docker_version}
Requires: bash-completion
Supplements: packageand(%{name}:bash-completion)
BuildArch: noarch
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-bash-completion = %{docker_version}
Obsoletes: docker-bash-completion < %{docker_version}
Conflicts: docker-bash-completion
%else
Conflicts: docker-stable-bash-completion
%endif
%description bash-completion
Bash command line completion support for %{name}.
%package zsh-completion
Summary: Zsh Completion for %{name}
Group: System/Shells
Requires: %{name} = %{docker_version}
Requires: zsh
Supplements: packageand(%{name}:zsh)
BuildArch: noarch
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-zsh-completion = %{docker_version}
Obsoletes: docker-zsh-completion < %{docker_version}
Conflicts: docker-zsh-completion
%else
Conflicts: docker-stable-zsh-completion
%endif
%description zsh-completion
Zsh command line completion support for %{name}.
%package fish-completion
Summary: Fish completion for %{name}
Group: System/Shells
Requires: %{name} = %{docker_version}
Requires: fish
Supplements: packageand(%{name}:fish)
BuildArch: noarch
# docker-stable cannot be used alongside docker.
%if "%{name}" == "docker-stable"
Provides: docker-fish-completion = %{docker_version}
Obsoletes: docker-fish-completion < %{docker_version}
Conflicts: docker-fish-completion
%else
Conflicts: docker-stable-fish-completion
%endif
%description fish-completion
Fish command line completion support for %{name}.
%prep
# docker-cli
%define cli_builddir %{_builddir}/docker-cli-%{docker_version}
%setup -q -T -b 1 -n docker-cli-%{docker_version}
[ "%{cli_builddir}" = "$PWD" ]
# offline manpages
%patch -P900 -p1
%if %{with buildx}
# docker-buildx
%define buildx_builddir %{_builddir}/docker-buildx-%{buildx_version}
%setup -q -T -b 500 -n docker-buildx-%{buildx_version}
[ "%{buildx_builddir}" = "$PWD" ]
%endif
# docker
%define docker_builddir %{_builddir}/docker-%{docker_version}_%{docker_git_version}
%setup -q -n docker-%{docker_version}_%{docker_git_version}
[ "%{docker_builddir}" = "$PWD" ]
# README_SUSE.md for documentation.
cp %{SOURCE130} .
%if %{with suseconnect}
# PATCH-SUSE: Secrets patches.
%patch -P100 -p1
%patch -P101 -p1
%endif
%if 0%{?sle_version} == 120000
# Patches to build on SLE-12.
%patch -P200 -p1
%endif
# bsc#1099277
%patch -P201 -p1
# Solves apparmor issues on SLE-12, but okay for newer SLE versions too.
%patch -P202 -p1
# bsc#1221916
%patch -P203 -p1
# bsc#1214855
%patch -P204 -p1
# bsc#1214855
%patch -P205 -p1
# CVE-2024-41110
%patch -P206 -p1
# CVE-2024-29018
%patch -P207 -p1
%if %{with integration_tests}
# integration-tests patches
%patch -P299 -p1
%endif
%build
%sysusers_generate_pre %{SOURCE160} %{name} docker.conf
BUILDTAGS="exclude_graphdriver_aufs apparmor selinux seccomp pkcs11"
%if 0%{?sle_version} == 120000
# Allow us to build with older distros but still have deferred removal
# support at runtime. We only use this when building on SLE12, because
# later openSUSE/SLE versions have a new enough libdevicemapper to not
# require the runtime checking.
BUILDTAGS="libdm_dlsym_deferred_remove $BUILDTAGS"
%endif
export AUTO_GOPATH=1
# Make sure we always build PIC code. bsc#1048046
export BUILDFLAGS="-buildmode=pie"
# Specify all of the versioning information. We use SOURCE_DATE_EPOCH if it's
# been injected by rpmbuild, otherwise we use the hardcoded git_commit_epoch
# generated above. boo#1064781
export VERSION="%{docker_nice_version}"
export DOCKER_GITCOMMIT="%{docker_git_version}"
export GITCOMMIT="%{docker_git_version}"
export SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-%{git_commit_epoch}}"
export BUILDTIME="$(date -u -d "@$SOURCE_DATE_EPOCH" --rfc-3339 ns 2>/dev/null | sed -e 's/ /T/')"
###################
## DOCKER ENGINE ##
###################
pushd "%{docker_builddir}"
# use go module for build
ln -s {vendor,go}.mod
ln -s {vendor,go}.sum
./hack/make.sh dynbinary
%if %{with integration_tests}
# build test binaries for integration tests
readarray -t integration_dirs \
<<<"$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/... ./integration-cli/...)"
for dir in "${integration_dirs[@]}"
do
pushd "$dir"
go test -c -buildmode=pie -tags "$BUILDTAGS" -o test.main .
popd
done
# Update __DOCKER_BUILDIR in the integration testing script.
sed -i 's|^__DOCKER_BUILDIR=.*|__DOCKER_BUILDIR=%{docker_builddir}|g' "%{SOURCE900}"
%endif
popd
###################
## DOCKER CLIENT ##
###################
pushd "%{cli_builddir}"
# use go module for build
ln -s {vendor,go}.mod
ln -s {vendor,go}.sum
make DISABLE_WARN_OUTSIDE_CONTAINER=1 dynbinary manpages
popd
%if %{with buildx}
###################
## DOCKER BUILDX ##
###################
pushd "%{buildx_builddir}"
make \
CGO_ENABLED=1 \
VERSION="%{buildx_version}" \
REVISION="v%{buildx_version}" \
GO_EXTRA_FLAGS="-buildmode=pie" \
build
popd
%endif
%install
install -Dd -m0755 \
%{buildroot}%{_sysconfdir}/init.d \
%{buildroot}%{_bindir} \
%{buildroot}%{_sbindir}
# docker daemon
install -D -m0755 %{docker_builddir}/bundles/dynbinary-daemon/dockerd %{buildroot}/%{_bindir}/dockerd
# docker proxy
install -D -m0755 %{docker_builddir}/bundles/dynbinary-daemon/docker-proxy %{buildroot}/%{_bindir}/docker-proxy
# cli-plugins/
install -d %{buildroot}/usr/lib/docker/cli-plugins
%if %{with buildx}
# buildx plugin
install -D -m0755 %{buildx_builddir}/bin/build/docker-buildx %{buildroot}/usr/lib/docker/cli-plugins/docker-buildx
%endif
# /var/lib/docker
install -d %{buildroot}/%{_localstatedir}/lib/docker
# daemon.json config file
install -D -m0644 %{SOURCE150} %{buildroot}%{_sysconfdir}/docker/daemon.json
%if %{with suseconnect}
# SUSE-specific config file
echo 1 > %{buildroot}%{_sysconfdir}/docker/suse-secrets-enable
%endif
# docker cli
install -D -m0755 %{cli_builddir}/build/docker %{buildroot}/%{_bindir}/docker
install -D -m0644 %{cli_builddir}/contrib/completion/bash/docker "%{buildroot}%{_datarootdir}/bash-completion/completions/docker"
install -D -m0644 %{cli_builddir}/contrib/completion/zsh/_docker "%{buildroot}%{_sysconfdir}/zsh_completion.d/_docker"
install -D -m0644 %{cli_builddir}/contrib/completion/fish/docker.fish "%{buildroot}/%{_datadir}/fish/vendor_completions.d/docker.fish"
# systemd service
install -D -m0644 %{SOURCE100} %{buildroot}%{_unitdir}/docker.service
install -D -m0644 %{SOURCE101} %{buildroot}%{_unitdir}/docker.socket
ln -sf service %{buildroot}%{_sbindir}/rcdocker
# udev rules that prevents dolphin to show all docker devices and slows down
# upstream report https://bugs.kde.org/show_bug.cgi?id=329930
install -D -m0644 %{SOURCE110} %{buildroot}%{_udevrulesdir}/80-docker.rules
# audit rules
install -D -m0640 %{SOURCE140} %{buildroot}%{_sysconfdir}/audit/rules.d/docker.rules
# sysconfig file
install -D -m0644 %{SOURCE120} %{buildroot}%{_fillupdir}/sysconfig.docker
# install manpages (using the ones from the engine)
install -d %{buildroot}%{_mandir}/man1
install -p -m0644 %{cli_builddir}/man/man1/*.1 %{buildroot}%{_mandir}/man1
install -d %{buildroot}%{_mandir}/man5
install -p -m0644 %{cli_builddir}/man/man5/Dockerfile.5 %{buildroot}%{_mandir}/man5
install -d %{buildroot}%{_mandir}/man8
install -p -m0644 %{cli_builddir}/man/man8/*.8 %{buildroot}%{_mandir}/man8
# sysusers.d
install -D -m0644 %{SOURCE160} %{buildroot}%{_sysusersdir}/docker.conf
# rootless extras
install -D -p -m 0755 contrib/dockerd-rootless.sh %{buildroot}/%{_bindir}/dockerd-rootless.sh
install -D -p -m 0755 contrib/dockerd-rootless-setuptool.sh %{buildroot}/%{_bindir}/dockerd-rootless-setuptool.sh
%if %{with integration_tests}
# integration tests
install -d %{buildroot}%{testdir}
cp -ar %{docker_builddir} %{buildroot}%{testdir}/src
install -d %{buildroot}%{testdir}/bin
install -D -p -m 0755 %{SOURCE900} %{buildroot}%{testdir}/docker-integration.sh
# remove all of the non-test binaries in bundles/
rm -rfv %{buildroot}%{testdir}/src/bundles/
%endif
%fdupes %{buildroot}
%pre -f %{name}.pre
# /etc/sub[ug]id should exist already (it's part of shadow-utils), but older
# distros don't have it. Docker just parses it and doesn't need any special
# shadow-utils helpers.
touch /etc/subuid /etc/subgid ||:
# "useradd -r" doesn't add sub[ug]ids so we manually add some. Hopefully there
# aren't any conflicts here, because usermod doesn't provide the same "get
# unusued range" feature that dockremap does.
grep -q '^dockremap:' /etc/subuid || \
usermod -v 100000000-200000000 dockremap &>/dev/null || \
echo "dockremap:100000000:100000001" >>/etc/subuid ||:
grep -q '^dockremap:' /etc/subgid || \
usermod -w 100000000-200000000 dockremap &>/dev/null || \
echo "dockremap:100000000:100000001" >>/etc/subgid ||:
%service_add_pre docker.service docker.socket
%post
%service_add_post docker.service docker.socket
%{fillup_only -n docker}
%preun
%service_del_preun docker.service docker.socket
%postun
%service_del_postun docker.service docker.socket
%files
%defattr(-,root,root)
%doc README.md README_SUSE.md
%license LICENSE
%{_bindir}/docker
%{_bindir}/dockerd
%{_bindir}/docker-proxy
%{_sbindir}/rcdocker
%dir %{_localstatedir}/lib/docker/
%dir /usr/lib/docker
%dir /usr/lib/docker/cli-plugins
%{_unitdir}/docker.service
%{_unitdir}/docker.socket
%{_sysusersdir}/docker.conf
%dir %{_sysconfdir}/docker
%config(noreplace) %{_sysconfdir}/docker/daemon.json
%if %{with suseconnect}
%config(noreplace) %{_sysconfdir}/docker/suse-secrets-enable
%endif
%{_fillupdir}/sysconfig.docker
%dir %attr(750,root,root) %{_sysconfdir}/audit/rules.d
%config %{_sysconfdir}/audit/rules.d/docker.rules
%{_udevrulesdir}/80-docker.rules
%{_mandir}/man1/docker-*.1%{ext_man}
%{_mandir}/man1/docker.1%{ext_man}
%{_mandir}/man5/Dockerfile.5%{ext_man}
%{_mandir}/man8/dockerd.8%{ext_man}
%if %{with buildx}
%files buildx
%defattr(-,root,root)
/usr/lib/docker/cli-plugins/docker-buildx
%endif
%files rootless-extras
%defattr(-,root,root)
%{_bindir}/dockerd-rootless.sh
%{_bindir}/dockerd-rootless-setuptool.sh
%if %{with integration_tests}
%files integration-tests-devel
%defattr(-,root,root)
%{testdir}
%endif
%files bash-completion
%defattr(-,root,root)
%{_datarootdir}/bash-completion/completions/docker
%files zsh-completion
%defattr(-,root,root)
%{_sysconfdir}/zsh_completion.d/_docker
%files fish-completion
%defattr(-,root,root)
%{_datadir}/fish/vendor_completions.d/docker.fish
%changelog

45
docker.service Normal file
View File

@ -0,0 +1,45 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target lvm2-monitor.service firewalld.service
# We don't use the docker socket activation, but doing this ensures that the
# docker.socket unit is alive while Docker is (docker.socket has BindsTo, so we
# only need a weak requirement to make sure starting docker.service also
# "starts" the socket service). Forcefully stopping docker.socket will not
# cause docker to die, but there's no nice workaround for that.
Wants=docker.socket
[Service]
EnvironmentFile=/etc/sysconfig/docker
# While Docker has support for socket activation (-H fd://), this is not
# enabled by default because enabling socket activation means that on boot your
# containers won't start until someone tries to administer the Docker daemon.
Type=notify
ExecStart=/usr/bin/dockerd --add-runtime oci=/usr/sbin/runc $DOCKER_OPTS
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this property.
TasksMax=infinity
# Set delegate yes so that systemd does not reset the cgroups of docker containers
# Only systemd 218 and above support this property.
Delegate=yes
# Kill only the docker process, not all processes in the cgroup.
KillMode=process
# Restart the docker process if it exits prematurely.
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target

18
docker.socket Normal file
View File

@ -0,0 +1,18 @@
[Unit]
Description=Docker Socket for the API
# We use BindsTo in order to make sure that you cannot use socket-activation
# with Docker (Docker must always start at boot if enabled, otherwise
# containers will not run until some administrator interacts with Docker).
BindsTo=docker.service
[Socket]
# If /var/run is not implemented as a symlink to /run, you may need to
# specify ListenStream=/var/run/docker.sock instead.
ListenStream=/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target

3
docker.sysusers Normal file
View File

@ -0,0 +1,3 @@
#Type Name ID GECOS Home directory Shell
g docker - - - -
u dockremap - 'docker --userns-remap=default' - -

8
sysconfig.docker Normal file
View File

@ -0,0 +1,8 @@
## Path : System/Management
## Description : Extra cli switches for docker daemon
## Type : string
## Default : ""
## ServiceRestart : docker
#
DOCKER_OPTS=""