Accepting request 1190567 from home:cyphar:docker

- Update to Docker 26.1.5-ce. See upstream changelog online at
  <https://docs.docker.com/engine/release-notes/26.1/#2615>
- This update includes a fix for CVE-2024-41110. bsc#1228324
- Rebase patches:
  * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch
  * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch
  * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch
  * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch
  * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch
  * 0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch
  * 0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch
  * cli-0001-docs-include-required-tools-in-source-tree.patch

OBS-URL: https://build.opensuse.org/request/show/1190567
OBS-URL: https://build.opensuse.org/package/show/Virtualization:containers/docker?expand=0&rev=406
This commit is contained in:
Aleksa Sarai 2024-07-31 05:49:49 +00:00 committed by Git OBS Bridge
commit 2b14743f6e
26 changed files with 30272 additions and 0 deletions

23
.gitattributes vendored Normal file
View File

@ -0,0 +1,23 @@
## Default LFS
*.7z filter=lfs diff=lfs merge=lfs -text
*.bsp filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.gem filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.jar filter=lfs diff=lfs merge=lfs -text
*.lz filter=lfs diff=lfs merge=lfs -text
*.lzma filter=lfs diff=lfs merge=lfs -text
*.obscpio filter=lfs diff=lfs merge=lfs -text
*.oxt filter=lfs diff=lfs merge=lfs -text
*.pdf filter=lfs diff=lfs merge=lfs -text
*.png filter=lfs diff=lfs merge=lfs -text
*.rpm filter=lfs diff=lfs merge=lfs -text
*.tbz filter=lfs diff=lfs merge=lfs -text
*.tbz2 filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.ttf filter=lfs diff=lfs merge=lfs -text
*.txz filter=lfs diff=lfs merge=lfs -text
*.whl filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.osc

View File

@ -0,0 +1,73 @@
From ec53ee338835c4c1dc583695ac166f36bf3bac5c Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 8 Mar 2017 12:41:54 +1100
Subject: [PATCH 1/7] SECRETS: daemon: allow directory creation in /run/secrets
Since FileMode can have the directory bit set, allow a SecretStore
implementation to return secrets that are actually directories. This is
useful for creating directories and subdirectories of secrets.
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/container_operations_unix.go | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go
index 4dedc1b21c87..b7c310493e79 100644
--- a/daemon/container_operations_unix.go
+++ b/daemon/container_operations_unix.go
@@ -3,6 +3,7 @@
package daemon // import "github.com/docker/docker/daemon"
import (
+ "bytes"
"context"
"fmt"
"os"
@@ -16,6 +17,7 @@ import (
"github.com/docker/docker/daemon/links"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
+ "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/process"
"github.com/docker/docker/pkg/stringid"
@@ -240,9 +242,6 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
if err != nil {
return errors.Wrap(err, "unable to get secret from secret store")
}
- if err := os.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
- return errors.Wrap(err, "error injecting secret")
- }
uid, err := strconv.Atoi(s.File.UID)
if err != nil {
@@ -253,6 +252,24 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
return err
}
+ if s.File.Mode.IsDir() {
+ if err := os.Mkdir(fPath, s.File.Mode); err != nil {
+ return errors.Wrap(err, "error creating secretdir")
+ }
+ if secret.Spec.Data != nil {
+ // If the "file" is a directory, then s.File.Data is actually a tar
+ // archive of the directory. So we just do a tar extraction here.
+ if err := archive.UntarUncompressed(bytes.NewBuffer(secret.Spec.Data), fPath, &archive.TarOptions{
+ IDMap: daemon.idMapping,
+ }); err != nil {
+ return errors.Wrap(err, "error injecting secretdir")
+ }
+ }
+ } else {
+ if err := os.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
+ return errors.Wrap(err, "error injecting secret")
+ }
+ }
if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil {
return errors.Wrap(err, "error setting ownership for secret")
}
--
2.45.2

View File

@ -0,0 +1,460 @@
From 759482e941bde2b67d39b52c803e3390555ff9e9 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 8 Mar 2017 11:43:29 +1100
Subject: [PATCH 2/7] SECRETS: SUSE: implement SUSE container secrets
This allows for us to pass in host credentials to a container, allowing
for SUSEConnect to work with containers.
THIS PATCH IS NOT TO BE UPSTREAMED, DUE TO THE FACT THAT IT IS
SUSE-SPECIFIC, AND UPSTREAM DOES NOT APPROVE OF THIS CONCEPT BECAUSE IT
MAKES BUILDS NOT ENTIRELY REPRODUCIBLE.
SUSE-Bugs: bsc#1065609 bsc#1057743 bsc#1055676 bsc#1030702
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/start.go | 5 +
daemon/suse_secrets.go | 415 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 420 insertions(+)
create mode 100644 daemon/suse_secrets.go
diff --git a/daemon/start.go b/daemon/start.go
index b967947af2ce..09e79e410310 100644
--- a/daemon/start.go
+++ b/daemon/start.go
@@ -123,6 +123,11 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
return err
}
+ // SUSE:secrets -- inject the SUSE secret store
+ if err := daemon.injectSuseSecretStore(container); err != nil {
+ return err
+ }
+
m, cleanup, err := daemon.setupMounts(ctx, container)
if err != nil {
return err
diff --git a/daemon/suse_secrets.go b/daemon/suse_secrets.go
new file mode 100644
index 000000000000..32b0ece91b59
--- /dev/null
+++ b/daemon/suse_secrets.go
@@ -0,0 +1,415 @@
+/*
+ * suse-secrets: patch for Docker to implement SUSE secrets
+ * Copyright (C) 2017-2021 SUSE LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package daemon
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/container"
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/idtools"
+
+ swarmtypes "github.com/docker/docker/api/types/swarm"
+ swarmexec "github.com/moby/swarmkit/v2/agent/exec"
+ swarmapi "github.com/moby/swarmkit/v2/api"
+
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+func init() {
+ // Output to tell us in logs that SUSE:secrets is enabled.
+ logrus.Infof("SUSE:secrets :: enabled")
+}
+
+// Creating a fake file.
+type SuseFakeFile struct {
+ Path string
+ Uid int
+ Gid int
+ Mode os.FileMode
+ Data []byte
+}
+
+func (s SuseFakeFile) id() string {
+ // NOTE: It is _very_ important that this string always has a prefix of
+ // "suse". This is how we can ensure that we can operate on
+ // SecretReferences with a confidence that it was made by us.
+ return fmt.Sprintf("suse_%s_%s", digest.FromBytes(s.Data).Hex(), s.Path)
+}
+
+func (s SuseFakeFile) toSecret() *swarmapi.Secret {
+ return &swarmapi.Secret{
+ ID: s.id(),
+ Internal: true,
+ Spec: swarmapi.SecretSpec{
+ Data: s.Data,
+ },
+ }
+}
+
+func (s SuseFakeFile) toSecretReference(idMaps idtools.IdentityMapping) *swarmtypes.SecretReference {
+ // Figure out the host-facing {uid,gid} based on the provided maps. Fall
+ // back to root if the UID/GID don't match (we are guaranteed that root is
+ // mapped).
+ ctrUser := idtools.Identity{UID: s.Uid, GID: s.Gid}
+ hostUser := idMaps.RootPair()
+ if user, err := idMaps.ToHost(ctrUser); err == nil {
+ hostUser = user
+ }
+
+ // Return the secret reference as a file target.
+ return &swarmtypes.SecretReference{
+ SecretID: s.id(),
+ SecretName: s.id(),
+ File: &swarmtypes.SecretReferenceFileTarget{
+ Name: s.Path,
+ UID: fmt.Sprintf("%d", hostUser.UID),
+ GID: fmt.Sprintf("%d", hostUser.GID),
+ Mode: s.Mode,
+ },
+ }
+}
+
+// readDir will recurse into a directory prefix/dir, and return the set of
+// secrets in that directory (as a tar archive that is packed inside the "data"
+// field). The Path attribute of each has the prefix stripped. Symlinks are
+// dereferenced.
+func readDir(prefix, dir string) ([]*SuseFakeFile, error) {
+ var suseFiles []*SuseFakeFile
+
+ path := filepath.Join(prefix, dir)
+ fi, err := os.Stat(path)
+ if err != nil {
+ // Ignore missing files.
+ if os.IsNotExist(err) {
+ // If the path itself exists it was a dangling symlink so give a
+ // warning about the symlink dangling.
+ _, err2 := os.Lstat(path)
+ if !os.IsNotExist(err2) {
+ logrus.Warnf("SUSE:secrets :: ignoring dangling symlink: %s", path)
+ }
+ return nil, nil
+ }
+ return nil, err
+ } else if !fi.IsDir() {
+ // Just to be safe.
+ logrus.Infof("SUSE:secrets :: expected %q to be a directory, but was a file", path)
+ return readFile(prefix, dir)
+ }
+ path, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Construct a tar archive of the source directory. We tar up the prefix
+ // directory and add dir as an IncludeFiles specifically so that we
+ // preserve the name of the directory itself.
+ tarStream, err := archive.TarWithOptions(path, &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ IncludeSourceDir: true,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to tar source directory %q: %v", path, err)
+ }
+ tarStreamBytes, err := ioutil.ReadAll(tarStream)
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read full tar archive: %v", err)
+ }
+
+ // Get a list of the symlinks in the tar archive.
+ var symlinks []string
+ tmpTr := tar.NewReader(bytes.NewBuffer(tarStreamBytes))
+ for {
+ hdr, err := tmpTr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read through tar reader: %v", err)
+ }
+ if hdr.Typeflag == tar.TypeSymlink {
+ symlinks = append(symlinks, hdr.Name)
+ }
+ }
+
+ // Symlinks aren't dereferenced in the above archive, so we explicitly do a
+ // rewrite of the tar archive to include all symlinks to files. We cannot
+ // do directories here, but lower-level directory symlinks aren't supported
+ // by zypper so this isn't an issue.
+ symlinkModifyMap := map[string]archive.TarModifierFunc{}
+ for _, sym := range symlinks {
+ logrus.Debugf("SUSE:secrets: archive(%q) %q is a need-to-rewrite symlink", path, sym)
+ symlinkModifyMap[sym] = func(tarPath string, hdr *tar.Header, r io.Reader) (*tar.Header, []byte, error) {
+ logrus.Debugf("SUSE:secrets: archive(%q) mapping for symlink %q", path, tarPath)
+ tarFullPath := filepath.Join(path, tarPath)
+
+ // Get a copy of the original byte stream.
+ oldContent, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, nil, fmt.Errorf("suse_rewrite: failed to read archive entry %q: %v", tarPath, err)
+ }
+
+ // Check that the file actually exists.
+ fi, err := os.Stat(tarFullPath)
+ if err != nil {
+ logrus.Warnf("suse_rewrite: failed to stat archive entry %q: %v", tarFullPath, err)
+ return hdr, oldContent, nil
+ }
+
+ // Read the actual contents.
+ content, err := ioutil.ReadFile(tarFullPath)
+ if err != nil {
+ logrus.Warnf("suse_rewrite: failed to read %q: %v", tarFullPath, err)
+ return hdr, oldContent, nil
+ }
+
+ newHdr, err := tar.FileInfoHeader(fi, "")
+ if err != nil {
+ // Fake the header.
+ newHdr = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ }
+ }
+
+ // Update the key fields.
+ hdr.Typeflag = newHdr.Typeflag
+ hdr.Mode = newHdr.Mode
+ hdr.Linkname = ""
+ return hdr, content, nil
+ }
+ }
+
+ // Create the rewritten tar stream.
+ tarStream = archive.ReplaceFileTarWrapper(ioutil.NopCloser(bytes.NewBuffer(tarStreamBytes)), symlinkModifyMap)
+ tarStreamBytes, err = ioutil.ReadAll(tarStream)
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read rewritten archive: %v", err)
+ }
+
+ // Add the tar stream as a "file".
+ suseFiles = append(suseFiles, &SuseFakeFile{
+ Path: dir,
+ Mode: fi.Mode(),
+ Data: tarStreamBytes,
+ })
+ return suseFiles, nil
+}
+
+// readFile returns a secret given a file under a given prefix.
+func readFile(prefix, file string) ([]*SuseFakeFile, error) {
+ path := filepath.Join(prefix, file)
+ fi, err := os.Stat(path)
+ if err != nil {
+ // Ignore missing files.
+ if os.IsNotExist(err) {
+ // If the path itself exists it was a dangling symlink so give a
+ // warning about the symlink dangling.
+ _, err2 := os.Lstat(path)
+ if !os.IsNotExist(err2) {
+ logrus.Warnf("SUSE:secrets :: ignoring dangling symlink: %s", path)
+ }
+ return nil, nil
+ }
+ return nil, err
+ } else if fi.IsDir() {
+ // Just to be safe.
+ logrus.Infof("SUSE:secrets :: expected %q to be a file, but was a directory", path)
+ return readDir(prefix, file)
+ }
+
+ var uid, gid int
+ if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
+ uid, gid = int(stat.Uid), int(stat.Gid)
+ } else {
+ logrus.Warnf("SUSE:secrets :: failed to cast file stat_t: defaulting to owned by root:root: %s", path)
+ uid, gid = 0, 0
+ }
+
+ bytes, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var suseFiles []*SuseFakeFile
+ suseFiles = append(suseFiles, &SuseFakeFile{
+ Path: file,
+ Uid: uid,
+ Gid: gid,
+ Mode: fi.Mode(),
+ Data: bytes,
+ })
+ return suseFiles, nil
+}
+
+// getHostSuseSecretData returns the list of SuseFakeFiles the need to be added
+// as SUSE secrets.
+func getHostSuseSecretData() ([]*SuseFakeFile, error) {
+ secrets := []*SuseFakeFile{}
+
+ credentials, err := readDir("/etc/zypp", "credentials.d")
+ if err != nil {
+ if os.IsNotExist(err) {
+ credentials = []*SuseFakeFile{}
+ } else {
+ logrus.Errorf("SUSE:secrets :: error while reading zypp credentials: %s", err)
+ return nil, err
+ }
+ }
+ secrets = append(secrets, credentials...)
+
+ suseConnect, err := readFile("/etc", "SUSEConnect")
+ if err != nil {
+ if os.IsNotExist(err) {
+ suseConnect = []*SuseFakeFile{}
+ } else {
+ logrus.Errorf("SUSE:secrets :: error while reading /etc/SUSEConnect: %s", err)
+ return nil, err
+ }
+ }
+ secrets = append(secrets, suseConnect...)
+
+ return secrets, nil
+}
+
+// To fake an empty store, in the case where we are operating on a container
+// that was created pre-swarmkit. Otherwise segfaults and other fun things
+// happen. See bsc#1057743.
+type (
+ suseEmptyStore struct{}
+ suseEmptySecret struct{}
+ suseEmptyConfig struct{}
+ suseEmptyVolume struct{}
+)
+
+// In order to reduce the amount of code touched outside of this file, we
+// implement the swarm API for DependencyGetter. This asserts that this
+// requirement will always be matched. In addition, for the case of the *empty*
+// getters this reduces memory usage by having a global instance.
+var (
+ _ swarmexec.DependencyGetter = &suseDependencyStore{}
+ emptyStore swarmexec.DependencyGetter = suseEmptyStore{}
+ emptySecret swarmexec.SecretGetter = suseEmptySecret{}
+ emptyConfig swarmexec.ConfigGetter = suseEmptyConfig{}
+ emptyVolume swarmexec.VolumeGetter = suseEmptyVolume{}
+)
+
+var errSuseEmptyStore = fmt.Errorf("SUSE:secrets :: tried to get a resource from empty store [this is a bug]")
+
+func (_ suseEmptyConfig) Get(_ string) (*swarmapi.Config, error) { return nil, errSuseEmptyStore }
+func (_ suseEmptySecret) Get(_ string) (*swarmapi.Secret, error) { return nil, errSuseEmptyStore }
+func (_ suseEmptyVolume) Get(_ string) (string, error) { return "", errSuseEmptyStore }
+func (_ suseEmptyStore) Secrets() swarmexec.SecretGetter { return emptySecret }
+func (_ suseEmptyStore) Configs() swarmexec.ConfigGetter { return emptyConfig }
+func (_ suseEmptyStore) Volumes() swarmexec.VolumeGetter { return emptyVolume }
+
+type suseDependencyStore struct {
+ dfl swarmexec.DependencyGetter
+ secrets map[string]*swarmapi.Secret
+}
+
+// The following are effectively dumb wrappers that return ourselves, or the
+// default.
+func (s *suseDependencyStore) Secrets() swarmexec.SecretGetter { return s }
+func (s *suseDependencyStore) Volumes() swarmexec.VolumeGetter { return emptyVolume }
+func (s *suseDependencyStore) Configs() swarmexec.ConfigGetter { return s.dfl.Configs() }
+
+// Get overrides the underlying DependencyGetter with our own secrets (falling
+// through to the underlying DependencyGetter if the secret isn't present).
+func (s *suseDependencyStore) Get(id string) (*swarmapi.Secret, error) {
+ logrus.Debugf("SUSE:secrets :: id=%s requested from suseDependencyGetter", id)
+
+ secret, ok := s.secrets[id]
+ if !ok {
+ // fallthrough
+ return s.dfl.Secrets().Get(id)
+ }
+ return secret, nil
+}
+
+// removeSuseSecrets removes any SecretReferences which were added by us
+// explicitly (this is detected by checking that the prefix has a 'suse'
+// prefix). See bsc#1057743.
+func removeSuseSecrets(c *container.Container) {
+ var without []*swarmtypes.SecretReference
+ for _, secret := range c.SecretReferences {
+ if strings.HasPrefix(secret.SecretID, "suse") {
+ logrus.Warnf("SUSE:secrets :: removing 'old' suse secret %q from container %q", secret.SecretID, c.ID)
+ continue
+ }
+ without = append(without, secret)
+ }
+ c.SecretReferences = without
+}
+
+func (daemon *Daemon) injectSuseSecretStore(c *container.Container) error {
+ newDependencyStore := &suseDependencyStore{
+ dfl: c.DependencyStore,
+ secrets: make(map[string]*swarmapi.Secret),
+ }
+ // Handle old containers. See bsc#1057743.
+ if newDependencyStore.dfl == nil {
+ newDependencyStore.dfl = emptyStore
+ }
+
+ // We drop any "old" SUSE secrets, as it appears that old containers (when
+ // restarted) could still have references to old secrets. The .id() of all
+ // secrets have a prefix of "suse" so this is much easier. See bsc#1057743
+ // for details on why this could cause issues.
+ removeSuseSecrets(c)
+
+ secrets, err := getHostSuseSecretData()
+ if err != nil {
+ return err
+ }
+
+ idMaps := daemon.idMapping
+ for _, secret := range secrets {
+ newDependencyStore.secrets[secret.id()] = secret.toSecret()
+ c.SecretReferences = append(c.SecretReferences, secret.toSecretReference(idMaps))
+ }
+
+ c.DependencyStore = newDependencyStore
+
+ // bsc#1057743 -- In older versions of Docker we added volumes explicitly
+ // to the mount list. This causes clashes because of duplicate namespaces.
+ // If we see an existing mount that will clash with the in-built secrets
+ // mount we assume it's our fault.
+ intendedMounts, err := c.SecretMounts()
+ if err != nil {
+ logrus.Warnf("SUSE:secrets :: fetching old secret mounts: %v", err)
+ return err
+ }
+ for _, intendedMount := range intendedMounts {
+ mountPath := intendedMount.Destination
+ if volume, ok := c.MountPoints[mountPath]; ok {
+ logrus.Debugf("SUSE:secrets :: removing pre-existing %q mount: %#v", mountPath, volume)
+ delete(c.MountPoints, mountPath)
+ }
+ }
+ return nil
+}
--
2.45.2

View File

@ -0,0 +1,46 @@
From 983a57fd37dc8e42e9c4e4dfc72eb346a4385948 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Mon, 22 May 2023 15:44:54 +1000
Subject: [PATCH 3/7] BUILD: SLE12: revert "graphdriver/btrfs: use kernel UAPI
headers"
This reverts commit 3208dcabdc8997340b255f5b880fef4e3f54580d.
On SLE 12, our UAPI headers are too old, resulting in us being unable to
build the btrfs driver with the new headers. This patch is only needed
for SLE-12.
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
daemon/graphdriver/btrfs/btrfs.go | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go
index 6aaa33cf7622..7264d4036427 100644
--- a/daemon/graphdriver/btrfs/btrfs.go
+++ b/daemon/graphdriver/btrfs/btrfs.go
@@ -4,17 +4,12 @@ package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs"
/*
#include <stdlib.h>
-#include <stdio.h>
#include <dirent.h>
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
- #error "Headers from kernel >= 4.12 are required to build with Btrfs support."
- #error "HINT: Set 'DOCKER_BUILDTAGS=exclude_graphdriver_btrfs' to build without Btrfs."
-#endif
-
-#include <linux/btrfs.h>
-#include <linux/btrfs_tree.h>
+// keep struct field name compatible with btrfs-progs < 6.1.
+#define max_referenced max_rfer
+#include <btrfs/ioctl.h>
+#include <btrfs/ctree.h>
static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) {
snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value);
--
2.45.2

View File

@ -0,0 +1,89 @@
From 8829bb8ec53399fd41dd6f46e2bad64e773e8eaa Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Fri, 29 Jun 2018 17:59:30 +1000
Subject: [PATCH 4/7] bsc1073877: apparmor: clobber docker-default profile on
start
In the process of making docker-default reloading far less expensive,
567ef8e7858c ("daemon: switch to 'ensure' workflow for AppArmor
profiles") mistakenly made the initial profile load at dockerd start-up
lazy. As a result, if you have a running Docker daemon and upgrade it to
a new one with an updated AppArmor profile the new profile will not take
effect (because the old one is still loaded). The fix for this is quite
trivial, and just requires us to clobber the profile on start-up.
Fixes: 567ef8e7858c ("daemon: switch to 'ensure' workflow for AppArmor profiles")
SUSE-Bugs: bsc#1099277
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/apparmor_default.go | 14 ++++++++++----
daemon/apparmor_default_unsupported.go | 4 ++++
daemon/daemon.go | 5 +++--
3 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/daemon/apparmor_default.go b/daemon/apparmor_default.go
index 81e10b6cbec0..e695667a190f 100644
--- a/daemon/apparmor_default.go
+++ b/daemon/apparmor_default.go
@@ -23,6 +23,15 @@ func DefaultApparmorProfile() string {
return ""
}
+func clobberDefaultAppArmorProfile() error {
+ if apparmor.HostSupports() {
+ if err := aaprofile.InstallDefault(defaultAppArmorProfile); err != nil {
+ return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultAppArmorProfile, err)
+ }
+ }
+ return nil
+}
+
func ensureDefaultAppArmorProfile() error {
if apparmor.HostSupports() {
loaded, err := aaprofile.IsLoaded(defaultAppArmorProfile)
@@ -36,10 +45,7 @@ func ensureDefaultAppArmorProfile() error {
}
// Load the profile.
- if err := aaprofile.InstallDefault(defaultAppArmorProfile); err != nil {
- return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultAppArmorProfile, err)
- }
+ return clobberDefaultAppArmorProfile()
}
-
return nil
}
diff --git a/daemon/apparmor_default_unsupported.go b/daemon/apparmor_default_unsupported.go
index be4938f5b61a..2b326fea5829 100644
--- a/daemon/apparmor_default_unsupported.go
+++ b/daemon/apparmor_default_unsupported.go
@@ -2,6 +2,10 @@
package daemon // import "github.com/docker/docker/daemon"
+func clobberDefaultAppArmorProfile() error {
+ return nil
+}
+
func ensureDefaultAppArmorProfile() error {
return nil
}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index e7ca77d8cbfc..13b39538fb00 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -916,8 +916,9 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
log.G(ctx).Warnf("Failed to configure golang's threads limit: %v", err)
}
- // ensureDefaultAppArmorProfile does nothing if apparmor is disabled
- if err := ensureDefaultAppArmorProfile(); err != nil {
+ // Make sure we clobber any pre-existing docker-default profile to ensure
+ // that upgrades to the profile actually work smoothly.
+ if err := clobberDefaultAppArmorProfile(); err != nil {
log.G(ctx).Errorf(err.Error())
}
--
2.45.2

View File

@ -0,0 +1,326 @@
From 24173cd6a2643e5e680e84920864f42ed43b6f28 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 11 Oct 2023 21:19:12 +1100
Subject: [PATCH 5/7] SLE12: revert "apparmor: remove version-conditionals from
template"
This reverts the following commits:
* 7008a514493a ("profiles/apparmor: remove version-conditional constraints (< 2.8.96)")
* 2e19a4d56bf2 ("contrib/apparmor: remove version-conditionals (< 2.9) from template")
* d169a5730649 ("contrib/apparmor: remove remaining version-conditionals (< 2.9) from template")
* ecaab085db4b ("profiles/apparmor: remove use of aaparser.GetVersion()")
* e3e715666f95 ("pkg/aaparser: deprecate GetVersion, as it's no longer used")
These version conditionals are still required on SLE 12, where our
apparmor_parser version is quite old.
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
contrib/apparmor/main.go | 16 ++++++-
contrib/apparmor/template.go | 16 +++++++
pkg/aaparser/aaparser.go | 86 +++++++++++++++++++++++++++++++++++
profiles/apparmor/apparmor.go | 16 ++++++-
profiles/apparmor/template.go | 4 ++
5 files changed, 134 insertions(+), 4 deletions(-)
create mode 100644 pkg/aaparser/aaparser.go
diff --git a/contrib/apparmor/main.go b/contrib/apparmor/main.go
index 899d8378edae..93f98cbd20e5 100644
--- a/contrib/apparmor/main.go
+++ b/contrib/apparmor/main.go
@@ -6,9 +6,13 @@ import (
"os"
"path"
"text/template"
+
+ "github.com/docker/docker/pkg/aaparser"
)
-type profileData struct{}
+type profileData struct {
+ Version int
+}
func main() {
if len(os.Args) < 2 {
@@ -18,6 +22,15 @@ func main() {
// parse the arg
apparmorProfilePath := os.Args[1]
+ version, err := aaparser.GetVersion()
+ if err != nil {
+ log.Fatal(err)
+ }
+ data := profileData{
+ Version: version,
+ }
+ fmt.Printf("apparmor_parser is of version %+v\n", data)
+
// parse the template
compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate)
if err != nil {
@@ -35,7 +48,6 @@ func main() {
}
defer f.Close()
- data := profileData{}
if err := compiled.Execute(f, data); err != nil {
log.Fatalf("executing template failed: %v", err)
}
diff --git a/contrib/apparmor/template.go b/contrib/apparmor/template.go
index 58afcbe845ee..e6d0b6d37c58 100644
--- a/contrib/apparmor/template.go
+++ b/contrib/apparmor/template.go
@@ -20,9 +20,11 @@ profile /usr/bin/docker (attach_disconnected, complain) {
umount,
pivot_root,
+{{if ge .Version 209000}}
signal (receive) peer=@{profile_name},
signal (receive) peer=unconfined,
signal (send),
+{{end}}
network,
capability,
owner /** rw,
@@ -45,10 +47,12 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/etc/ld.so.cache r,
/etc/passwd r,
+{{if ge .Version 209000}}
ptrace peer=@{profile_name},
ptrace (read) peer=docker-default,
deny ptrace (trace) peer=docker-default,
deny ptrace peer=/usr/bin/docker///bin/ps,
+{{end}}
/usr/lib/** rm,
/lib/** rm,
@@ -69,9 +73,11 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/sbin/zfs rCx,
/sbin/apparmor_parser rCx,
+{{if ge .Version 209000}}
# Transitions
change_profile -> docker-*,
change_profile -> unconfined,
+{{end}}
profile /bin/cat (complain) {
/etc/ld.so.cache r,
@@ -93,8 +99,10 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/dev/null rw,
/bin/ps mr,
+{{if ge .Version 209000}}
# We don't need ptrace so we'll deny and ignore the error.
deny ptrace (read, trace),
+{{end}}
# Quiet dac_override denials
deny capability dac_override,
@@ -112,11 +120,15 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/proc/tty/drivers r,
}
profile /sbin/iptables (complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
capability net_admin,
}
profile /sbin/auplink flags=(attach_disconnected, complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
capability sys_admin,
capability dac_override,
@@ -135,7 +147,9 @@ profile /usr/bin/docker (attach_disconnected, complain) {
/proc/[0-9]*/mounts rw,
}
profile /sbin/modprobe /bin/kmod (complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
capability sys_module,
/etc/ld.so.cache r,
/lib/** rm,
@@ -149,7 +163,9 @@ profile /usr/bin/docker (attach_disconnected, complain) {
}
# xz works via pipes, so we do not need access to the filesystem.
profile /usr/bin/xz (complain) {
+{{if ge .Version 209000}}
signal (receive) peer=/usr/bin/docker,
+{{end}}
/etc/ld.so.cache r,
/lib/** rm,
/usr/bin/xz rm,
diff --git a/pkg/aaparser/aaparser.go b/pkg/aaparser/aaparser.go
new file mode 100644
index 000000000000..89b48b2dba58
--- /dev/null
+++ b/pkg/aaparser/aaparser.go
@@ -0,0 +1,86 @@
+// Package aaparser is a convenience package interacting with `apparmor_parser`.
+package aaparser // import "github.com/docker/docker/pkg/aaparser"
+
+import (
+ "fmt"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+const (
+ binary = "apparmor_parser"
+)
+
+// GetVersion returns the major and minor version of apparmor_parser.
+func GetVersion() (int, error) {
+ output, err := cmd("", "--version")
+ if err != nil {
+ return -1, err
+ }
+
+ return parseVersion(output)
+}
+
+// cmd runs `apparmor_parser` with the passed arguments.
+func cmd(dir string, arg ...string) (string, error) {
+ c := exec.Command(binary, arg...)
+ c.Dir = dir
+
+ output, err := c.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err)
+ }
+
+ return string(output), nil
+}
+
+// parseVersion takes the output from `apparmor_parser --version` and returns
+// a representation of the {major, minor, patch} version as a single number of
+// the form MMmmPPP {major, minor, patch}.
+func parseVersion(output string) (int, error) {
+ // output is in the form of the following:
+ // AppArmor parser version 2.9.1
+ // Copyright (C) 1999-2008 Novell Inc.
+ // Copyright 2009-2012 Canonical Ltd.
+
+ lines := strings.SplitN(output, "\n", 2)
+ words := strings.Split(lines[0], " ")
+ version := words[len(words)-1]
+
+ // trim "-beta1" suffix from version="3.0.0-beta1" if exists
+ version = strings.SplitN(version, "-", 2)[0]
+ // also trim "~..." suffix used historically (https://gitlab.com/apparmor/apparmor/-/commit/bca67d3d27d219d11ce8c9cc70612bd637f88c10)
+ version = strings.SplitN(version, "~", 2)[0]
+
+ // split by major minor version
+ v := strings.Split(version, ".")
+ if len(v) == 0 || len(v) > 3 {
+ return -1, fmt.Errorf("parsing version failed for output: `%s`", output)
+ }
+
+ // Default the versions to 0.
+ var majorVersion, minorVersion, patchLevel int
+
+ majorVersion, err := strconv.Atoi(v[0])
+ if err != nil {
+ return -1, err
+ }
+
+ if len(v) > 1 {
+ minorVersion, err = strconv.Atoi(v[1])
+ if err != nil {
+ return -1, err
+ }
+ }
+ if len(v) > 2 {
+ patchLevel, err = strconv.Atoi(v[2])
+ if err != nil {
+ return -1, err
+ }
+ }
+
+ // major*10^5 + minor*10^3 + patch*10^0
+ numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel
+ return numericVersion, nil
+}
diff --git a/profiles/apparmor/apparmor.go b/profiles/apparmor/apparmor.go
index 277c853ebe1f..d1aad80cbfd2 100644
--- a/profiles/apparmor/apparmor.go
+++ b/profiles/apparmor/apparmor.go
@@ -11,10 +11,14 @@ import (
"path"
"strings"
"text/template"
+
+ "github.com/docker/docker/pkg/aaparser"
)
-// profileDirectory is the file store for apparmor profiles and macros.
-const profileDirectory = "/etc/apparmor.d"
+var (
+ // profileDirectory is the file store for apparmor profiles and macros.
+ profileDirectory = "/etc/apparmor.d"
+)
// profileData holds information about the given profile for generation.
type profileData struct {
@@ -26,6 +30,8 @@ type profileData struct {
Imports []string
// InnerImports defines the apparmor functions to import in the profile.
InnerImports []string
+ // Version is the {major, minor, patch} version of apparmor_parser as a single number.
+ Version int
}
// generateDefault creates an apparmor profile from ProfileData.
@@ -45,6 +51,12 @@ func (p *profileData) generateDefault(out io.Writer) error {
p.InnerImports = append(p.InnerImports, "#include <abstractions/base>")
}
+ ver, err := aaparser.GetVersion()
+ if err != nil {
+ return err
+ }
+ p.Version = ver
+
return compiled.Execute(out, p)
}
diff --git a/profiles/apparmor/template.go b/profiles/apparmor/template.go
index 8dbc1b610288..2062aab1ac99 100644
--- a/profiles/apparmor/template.go
+++ b/profiles/apparmor/template.go
@@ -23,6 +23,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
capability,
file,
umount,
+{{if ge .Version 208096}}
# Host (privileged) processes may send signals to container processes.
signal (receive) peer=unconfined,
# runc may send signals to container processes (for "docker stop").
@@ -33,6 +34,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
signal (receive) peer={{.DaemonProfile}},
# Container processes may send signals amongst themselves.
signal (send,receive) peer={{.Name}},
+{{end}}
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
# deny write to files not in /proc/<number>/** or /proc/sys/**
@@ -53,7 +55,9 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
deny /sys/devices/virtual/powercap/** rwklx,
deny /sys/kernel/security/** rwklx,
+{{if ge .Version 208095}}
# suppress ptrace denials when using 'docker ps' or using 'ps' inside a container
ptrace (trace,read,tracedby,readby) peer={{.Name}},
+{{end}}
}
`
--
2.45.2

View File

@ -0,0 +1,890 @@
From dd16d113b9215bf5b0b56c409e7272ce07525836 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <cyphar@cyphar.com>
Date: Tue, 7 May 2024 01:51:25 +1000
Subject: [PATCH 6/7] bsc1221916: update to patched buildkit version to fix
symlink resolution
SUSE-Bugs: https://bugzilla.suse.com/show_bug.cgi?id=1221916
Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
---
vendor.mod | 2 +
vendor.sum | 4 +-
.../buildkit/cache/contenthash/checksum.go | 393 ++++++++++--------
.../moby/buildkit/cache/contenthash/path.go | 161 +++----
vendor/modules.txt | 3 +-
5 files changed, 314 insertions(+), 249 deletions(-)
diff --git a/vendor.mod b/vendor.mod
index d69d2aa9f87f..5c42a653b91b 100644
--- a/vendor.mod
+++ b/vendor.mod
@@ -114,6 +114,8 @@ require (
tags.cncf.io/container-device-interface v0.7.2
)
+replace github.com/moby/buildkit => github.com/cyphar/buildkit v0.0.0-20240624075140-0db2d2345b94
+
require (
cloud.google.com/go v0.110.8 // indirect
cloud.google.com/go/compute v1.23.1 // indirect
diff --git a/vendor.sum b/vendor.sum
index 7a5bd6b4077b..f2aba7f8d3eb 100644
--- a/vendor.sum
+++ b/vendor.sum
@@ -199,6 +199,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/cyphar/buildkit v0.0.0-20240624075140-0db2d2345b94 h1:xBwPT+ap0LDYsQJh1VKm9NNEKF5A7e/P3TRjnbTqZUE=
+github.com/cyphar/buildkit v0.0.0-20240624075140-0db2d2345b94/go.mod h1:2cyVOv9NoHM7arphK9ZfHIWKn9YVZRFd1wXB8kKmEzY=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -480,8 +482,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
-github.com/moby/buildkit v0.13.2 h1:nXNszM4qD9E7QtG7bFWPnDI1teUQFQglBzon/IU3SzI=
-github.com/moby/buildkit v0.13.2/go.mod h1:2cyVOv9NoHM7arphK9ZfHIWKn9YVZRFd1wXB8kKmEzY=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=
diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
index e0f58d57b3db..ec649f69b5e0 100644
--- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
+++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
@@ -10,6 +10,7 @@ import (
"path/filepath"
"strings"
"sync"
+ "sync/atomic"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/golang-lru/simplelru"
@@ -290,7 +291,7 @@ func keyPath(p string) string {
// HandleChange notifies the source about a modification operation
func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
p = keyPath(p)
- k := convertPathToKey([]byte(p))
+ k := convertPathToKey(p)
deleteDir := func(cr *CacheRecord) {
if cr.Type == CacheRecordTypeDir {
@@ -369,7 +370,7 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
// note that the source may be called later because data writing is async
if fi.Mode()&os.ModeSymlink == 0 && stat.Linkname != "" {
ln := path.Join("/", filepath.ToSlash(stat.Linkname))
- v, ok := cc.txn.Get(convertPathToKey([]byte(ln)))
+ v, ok := cc.txn.Get(convertPathToKey(ln))
if ok {
cp := *v.(*CacheRecord)
cr = &cp
@@ -407,7 +408,7 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable,
defer m.clean()
if !opts.Wildcard && len(opts.IncludePatterns) == 0 && len(opts.ExcludePatterns) == 0 {
- return cc.checksumFollow(ctx, m, p, opts.FollowLinks)
+ return cc.lazyChecksum(ctx, m, p, opts.FollowLinks)
}
includedPaths, err := cc.includedPaths(ctx, m, p, opts)
@@ -418,7 +419,7 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable,
if opts.FollowLinks {
for i, w := range includedPaths {
if w.record.Type == CacheRecordTypeSymlink {
- dgst, err := cc.checksumFollow(ctx, m, w.path, opts.FollowLinks)
+ dgst, err := cc.lazyChecksum(ctx, m, w.path, opts.FollowLinks)
if err != nil {
return "", err
}
@@ -445,30 +446,6 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable,
return digester.Digest(), nil
}
-func (cc *cacheContext) checksumFollow(ctx context.Context, m *mount, p string, follow bool) (digest.Digest, error) {
- const maxSymlinkLimit = 255
- i := 0
- for {
- if i > maxSymlinkLimit {
- return "", errors.Errorf("too many symlinks: %s", p)
- }
- cr, err := cc.checksumNoFollow(ctx, m, p)
- if err != nil {
- return "", err
- }
- if cr.Type == CacheRecordTypeSymlink && follow {
- link := cr.Linkname
- if !path.IsAbs(cr.Linkname) {
- link = path.Join(path.Dir(p), link)
- }
- i++
- p = link
- } else {
- return cr.Digest, nil
- }
- }
-}
-
func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, opts ChecksumOpts) ([]*includedPath, error) {
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -478,12 +455,12 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
}
root := cc.tree.Root()
- scan, err := cc.needsScan(root, "")
+ scan, err := cc.needsScan(root, "", false)
if err != nil {
return nil, err
}
if scan {
- if err := cc.scanPath(ctx, m, ""); err != nil {
+ if err := cc.scanPath(ctx, m, "", false); err != nil {
return nil, err
}
}
@@ -536,13 +513,13 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
}
} else {
origPrefix = p
- k = convertPathToKey([]byte(origPrefix))
+ k = convertPathToKey(origPrefix)
// We need to resolve symlinks here, in case the base path
// involves a symlink. That will match fsutil behavior of
// calling functions such as stat and walk.
var cr *CacheRecord
- k, cr, err = getFollowLinks(root, k, true)
+ k, cr, err = getFollowLinks(root, k, false)
if err != nil {
return nil, err
}
@@ -554,7 +531,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
iter.SeekLowerBound(append(append([]byte{}, k...), 0))
}
- resolvedPrefix = string(convertKeyToPath(k))
+ resolvedPrefix = convertKeyToPath(k)
} else {
k, _, keyOk = iter.Next()
}
@@ -565,7 +542,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
)
for keyOk {
- fn := string(convertKeyToPath(k))
+ fn := convertKeyToPath(k)
// Convert the path prefix from what we found in the prefix
// tree to what the argument specified.
@@ -751,36 +728,12 @@ func wildcardPrefix(root *iradix.Node, p string) (string, []byte, bool, error) {
return "", nil, false, nil
}
- linksWalked := 0
- k, cr, err := getFollowLinksWalk(root, convertPathToKey([]byte(d1)), true, &linksWalked)
+ // Only resolve the final symlink component if there are components in the
+ // wildcard segment.
+ k, cr, err := getFollowLinks(root, convertPathToKey(d1), d2 != "")
if err != nil {
return "", k, false, err
}
-
- if d2 != "" && cr != nil && cr.Type == CacheRecordTypeSymlink {
- // getFollowLinks only handles symlinks in path
- // components before the last component, so
- // handle last component in d1 specially.
- resolved := string(convertKeyToPath(k))
- for {
- v, ok := root.Get(k)
-
- if !ok {
- return d1, k, false, nil
- }
- if v.(*CacheRecord).Type != CacheRecordTypeSymlink {
- break
- }
-
- linksWalked++
- if linksWalked > 255 {
- return "", k, false, errors.Errorf("too many links")
- }
-
- resolved := cleanLink(resolved, v.(*CacheRecord).Linkname)
- k = convertPathToKey([]byte(resolved))
- }
- }
return d1, k, cr != nil, nil
}
@@ -816,19 +769,22 @@ func containsWildcards(name string) bool {
return false
}
-func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
+func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string, followTrailing bool) (digest.Digest, error) {
p = keyPath(p)
+ k := convertPathToKey(p)
+ // Try to look up the path directly without doing a scan.
cc.mu.RLock()
if cc.txn == nil {
root := cc.tree.Root()
cc.mu.RUnlock()
- v, ok := root.Get(convertPathToKey([]byte(p)))
- if ok {
- cr := v.(*CacheRecord)
- if cr.Digest != "" {
- return cr, nil
- }
+
+ _, cr, err := getFollowLinks(root, k, followTrailing)
+ if err != nil {
+ return "", err
+ }
+ if cr != nil && cr.Digest != "" {
+ return cr.Digest, nil
}
} else {
cc.mu.RUnlock()
@@ -848,7 +804,11 @@ func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string
}
}()
- return cc.lazyChecksum(ctx, m, p)
+ cr, err := cc.scanChecksum(ctx, m, p, followTrailing)
+ if err != nil {
+ return "", err
+ }
+ return cr.Digest, nil
}
func (cc *cacheContext) commitActiveTransaction() {
@@ -856,7 +816,7 @@ func (cc *cacheContext) commitActiveTransaction() {
addParentToMap(d, cc.dirtyMap)
}
for d := range cc.dirtyMap {
- k := convertPathToKey([]byte(d))
+ k := convertPathToKey(d)
if _, ok := cc.txn.Get(k); ok {
cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir})
}
@@ -867,21 +827,21 @@ func (cc *cacheContext) commitActiveTransaction() {
cc.txn = nil
}
-func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
+func (cc *cacheContext) scanChecksum(ctx context.Context, m *mount, p string, followTrailing bool) (*CacheRecord, error) {
root := cc.tree.Root()
- scan, err := cc.needsScan(root, p)
+ scan, err := cc.needsScan(root, p, followTrailing)
if err != nil {
return nil, err
}
if scan {
- if err := cc.scanPath(ctx, m, p); err != nil {
+ if err := cc.scanPath(ctx, m, p, followTrailing); err != nil {
return nil, err
}
}
- k := convertPathToKey([]byte(p))
+ k := convertPathToKey(p)
txn := cc.tree.Txn()
root = txn.Root()
- cr, updated, err := cc.checksum(ctx, root, txn, m, k, true)
+ cr, updated, err := cc.checksum(ctx, root, txn, m, k, followTrailing)
if err != nil {
return nil, err
}
@@ -890,9 +850,9 @@ func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*
return cr, err
}
-func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, follow bool) (*CacheRecord, bool, error) {
+func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, followTrailing bool) (*CacheRecord, bool, error) {
origk := k
- k, cr, err := getFollowLinks(root, k, follow)
+ k, cr, err := getFollowLinks(root, k, followTrailing)
if err != nil {
return nil, false, err
}
@@ -918,7 +878,9 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
}
h.Write(bytes.TrimPrefix(subk, k))
- subcr, _, err := cc.checksum(ctx, root, txn, m, subk, true)
+ // We do not follow trailing links when checksumming a directory's
+ // contents.
+ subcr, _, err := cc.checksum(ctx, root, txn, m, subk, false)
if err != nil {
return nil, false, err
}
@@ -935,7 +897,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
dgst = digest.NewDigest(digest.SHA256, h)
default:
- p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0})))
+ p := convertKeyToPath(bytes.TrimSuffix(k, []byte{0}))
target, err := m.mount(ctx)
if err != nil {
@@ -967,42 +929,82 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
return cr2, true, nil
}
-// needsScan returns false if path is in the tree or a parent path is in tree
-// and subpath is missing
-func (cc *cacheContext) needsScan(root *iradix.Node, p string) (bool, error) {
- var linksWalked int
- return cc.needsScanFollow(root, p, &linksWalked)
+// pathSet is a set of path prefixes that can be used to see if a given path is
+// lexically a child of any path in the set. All paths provided to this set
+// MUST be absolute and use / as the separator.
+type pathSet struct {
+ // prefixes contains paths of the form "/a/b/", so that we correctly detect
+ // /a/b as being a parent of /a/b/c but not /a/bc.
+ prefixes []string
}
-func (cc *cacheContext) needsScanFollow(root *iradix.Node, p string, linksWalked *int) (bool, error) {
- if p == "/" {
- p = ""
- }
- v, ok := root.Get(convertPathToKey([]byte(p)))
- if !ok {
- if p == "" {
- return true, nil
+// add a path to the set. This is a no-op if includes(path) == true.
+func (s *pathSet) add(p string) {
+ // Ensure the path is absolute and clean.
+ p = path.Join("/", p)
+ if !s.includes(p) {
+ if p != "/" {
+ p += "/"
}
- return cc.needsScanFollow(root, path.Clean(path.Dir(p)), linksWalked)
+ s.prefixes = append(s.prefixes, p)
+ }
+}
+
+// includes returns true iff there is a path in the pathSet which is a lexical
+// parent of the given path. The provided path MUST be an absolute path and
+// MUST NOT contain any ".." components, as they will be path.Clean'd.
+func (s pathSet) includes(p string) bool {
+ // Ensure the path is absolute and clean.
+ p = path.Join("/", p)
+ if p != "/" {
+ p += "/"
}
- cr := v.(*CacheRecord)
- if cr.Type == CacheRecordTypeSymlink {
- if *linksWalked > 255 {
- return false, errTooManyLinks
+ for _, prefix := range s.prefixes {
+ if strings.HasPrefix(p, prefix) {
+ return true
}
- *linksWalked++
- link := path.Clean(cr.Linkname)
- if !path.IsAbs(cr.Linkname) {
- link = path.Join("/", path.Dir(p), link)
+ }
+ return false
+}
+
+// needsScan returns false if path is in the tree or a parent path is in tree
+// and subpath is missing.
+func (cc *cacheContext) needsScan(root *iradix.Node, path string, followTrailing bool) (bool, error) {
+ var (
+ goodPaths pathSet
+ hasParentInTree bool
+ )
+ k := convertPathToKey(path)
+ _, cr, err := getFollowLinksCallback(root, k, followTrailing, func(subpath string, cr *CacheRecord) error {
+ // If we found a path that exists in the cache, add it to the set of
+ // known-scanned paths. Otherwise, verify whether the not-found subpath
+ // is inside a known-scanned path (we might have hit a "..", taking us
+ // out of the scanned paths, or we might hit a non-existent path inside
+ // a scanned path). getFollowLinksCallback iterates left-to-right, so
+ // we will always hit ancestors first.
+ if cr != nil {
+ hasParentInTree = cr.Type != CacheRecordTypeSymlink
+ goodPaths.add(subpath)
+ } else {
+ hasParentInTree = goodPaths.includes(subpath)
}
- return cc.needsScanFollow(root, link, linksWalked)
+ return nil
+ })
+ if err != nil {
+ return false, err
}
- return false, nil
+ return cr == nil && !hasParentInTree, nil
}
-func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) {
+// Only used by TestNeedScanChecksumRegression to make sure scanPath is not
+// called for paths we have already scanned.
+var (
+ scanCounterEnable bool
+ scanCounter atomic.Uint64
+)
+
+func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string, followTrailing bool) (retErr error) {
p = path.Join("/", p)
- d, _ := path.Split(p)
mp, err := m.mount(ctx)
if err != nil {
@@ -1012,33 +1014,42 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
n := cc.tree.Root()
txn := cc.tree.Txn()
- parentPath, err := rootPath(mp, filepath.FromSlash(d), func(p, link string) error {
+ resolvedPath, err := rootPath(mp, filepath.FromSlash(p), followTrailing, func(p, link string) error {
cr := &CacheRecord{
Type: CacheRecordTypeSymlink,
Linkname: filepath.ToSlash(link),
}
- k := []byte(path.Join("/", filepath.ToSlash(p)))
- k = convertPathToKey(k)
- txn.Insert(k, cr)
+ p = path.Join("/", filepath.ToSlash(p))
+ txn.Insert(convertPathToKey(p), cr)
return nil
})
if err != nil {
return err
}
- err = filepath.Walk(parentPath, func(itemPath string, fi os.FileInfo, err error) error {
+ // Scan the parent directory of the path we resolved, unless we're at the
+ // root (in which case we scan the root).
+ scanPath := filepath.Dir(resolvedPath)
+ if !strings.HasPrefix(filepath.ToSlash(scanPath)+"/", filepath.ToSlash(mp)+"/") {
+ scanPath = resolvedPath
+ }
+
+ err = filepath.Walk(scanPath, func(itemPath string, fi os.FileInfo, err error) error {
+ if scanCounterEnable {
+ scanCounter.Add(1)
+ }
if err != nil {
+ // If the root doesn't exist, ignore the error.
+ if itemPath == scanPath && errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
return errors.Wrapf(err, "failed to walk %s", itemPath)
}
rel, err := filepath.Rel(mp, itemPath)
if err != nil {
return err
}
- k := []byte(path.Join("/", filepath.ToSlash(rel)))
- if string(k) == "/" {
- k = []byte{}
- }
- k = convertPathToKey(k)
+ k := convertPathToKey(keyPath(rel))
if _, ok := n.Get(k); !ok {
cr := &CacheRecord{
Type: CacheRecordTypeFile,
@@ -1071,55 +1082,118 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
return nil
}
-func getFollowLinks(root *iradix.Node, k []byte, follow bool) ([]byte, *CacheRecord, error) {
- var linksWalked int
- return getFollowLinksWalk(root, k, follow, &linksWalked)
+// followLinksCallback is called after we try to resolve each element. If the
+// path was not found, cr is nil.
+type followLinksCallback func(path string, cr *CacheRecord) error
+
+// getFollowLinks is shorthand for getFollowLinksCallback(..., nil).
+func getFollowLinks(root *iradix.Node, k []byte, followTrailing bool) ([]byte, *CacheRecord, error) {
+ return getFollowLinksCallback(root, k, followTrailing, nil)
}
-func getFollowLinksWalk(root *iradix.Node, k []byte, follow bool, linksWalked *int) ([]byte, *CacheRecord, error) {
+// getFollowLinksCallback looks up the requested key, fully resolving any
+// symlink components encountered. The implementation is heavily based on
+// <https://github.com/cyphar/filepath-securejoin>.
+//
+// followTrailing indicates whether the *final component* of the path should be
+// resolved (effectively O_PATH|O_NOFOLLOW). Note that (in contrast to some
+// Linux APIs), followTrailing is obeyed even if the key has a trailing slash
+// (though paths like "foo/link/." will cause the link to be resolved).
+//
+// cb is a callback that is called for each path component encountered during
+// path resolution (after the path component is looked up in the cache). This
+// means for a path like /a/b/c, the callback will be called for at least
+//
+// {/, /a, /a/b, /a/b/c}
+//
+// Note that if any of the components are symlinks, the paths will depend on
+// the symlink contents and there will be more callbacks. If the requested key
+// has a trailing slash, the callback will also be called for the final
+// trailing-slash lookup (/a/b/c/ in the above example). Note that
+// getFollowLinksCallback will try to look up the original key directly first
+// and the callback is not called for this first lookup.
+func getFollowLinksCallback(root *iradix.Node, k []byte, followTrailing bool, cb followLinksCallback) ([]byte, *CacheRecord, error) {
v, ok := root.Get(k)
- if ok {
+ if ok && (!followTrailing || v.(*CacheRecord).Type != CacheRecordTypeSymlink) {
return k, v.(*CacheRecord), nil
}
- if !follow || len(k) == 0 {
+ if len(k) == 0 {
return k, nil, nil
}
- dir, file := splitKey(k)
+ var (
+ currentPath = "/"
+ remainingPath = convertKeyToPath(k)
+ linksWalked int
+ cr *CacheRecord
+ )
+ // Trailing slashes are significant for the cache, but path.Clean strips
+ // them. We only care about the slash for the final lookup.
+ remainingPath, hadTrailingSlash := strings.CutSuffix(remainingPath, "/")
+ for remainingPath != "" {
+ // Get next component.
+ var part string
+ if i := strings.IndexRune(remainingPath, '/'); i == -1 {
+ part, remainingPath = remainingPath, ""
+ } else {
+ part, remainingPath = remainingPath[:i], remainingPath[i+1:]
+ }
- k, parent, err := getFollowLinksWalk(root, dir, follow, linksWalked)
- if err != nil {
- return nil, nil, err
- }
- if parent != nil {
- if parent.Type == CacheRecordTypeSymlink {
- *linksWalked++
- if *linksWalked > 255 {
- return nil, nil, errors.Errorf("too many links")
+ // Apply the component to the path. Since it is a single component, and
+ // our current path contains no symlinks, we can just apply it
+ // leixically.
+ nextPath := keyPath(path.Join("/", currentPath, part))
+ // In contrast to rootPath, we don't skip lookups for no-op components
+ // or / because we need to call the callback for every path component
+ // we hit (including /) and we need to make sure that the CacheRecord
+ // we return is correct after every iteration.
+
+ cr = nil
+ v, ok := root.Get(convertPathToKey(nextPath))
+ if ok {
+ cr = v.(*CacheRecord)
+ }
+ if cb != nil {
+ if err := cb(nextPath, cr); err != nil {
+ return nil, nil, err
}
+ }
+ if !ok || cr.Type != CacheRecordTypeSymlink {
+ currentPath = nextPath
+ continue
+ }
+ if !followTrailing && remainingPath == "" {
+ currentPath = nextPath
+ break
+ }
- link := cleanLink(string(convertKeyToPath(dir)), parent.Linkname)
- return getFollowLinksWalk(root, append(convertPathToKey([]byte(link)), file...), follow, linksWalked)
+ linksWalked++
+ if linksWalked > maxSymlinkLimit {
+ return nil, nil, errTooManyLinks
}
- }
- k = append(k, file...)
- v, ok = root.Get(k)
- if ok {
- return k, v.(*CacheRecord), nil
- }
- return k, nil, nil
-}
-func cleanLink(dir, linkname string) string {
- dirPath := path.Clean(dir)
- if dirPath == "." || dirPath == "/" {
- dirPath = ""
+ remainingPath = cr.Linkname + "/" + remainingPath
+ if path.IsAbs(cr.Linkname) {
+ currentPath = "/"
+ }
}
- link := path.Clean(linkname)
- if !path.IsAbs(link) {
- return path.Join("/", path.Join(path.Dir(dirPath), link))
+ // We've already looked up the final component. However, if there was a
+ // trailing slash in the original path, we need to do the lookup again with
+ // the slash applied.
+ if hadTrailingSlash {
+ cr = nil
+ currentPath += "/"
+ v, ok := root.Get(convertPathToKey(currentPath))
+ if ok {
+ cr = v.(*CacheRecord)
+ }
+ if cb != nil {
+ if err := cb(currentPath, cr); err != nil {
+ return nil, nil, err
+ }
+ }
}
- return link
+ return convertPathToKey(currentPath), cr, nil
}
func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) {
@@ -1176,25 +1250,10 @@ func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) {
return
}
-func convertPathToKey(p []byte) []byte {
+func convertPathToKey(p string) []byte {
return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1)
}
-func convertKeyToPath(p []byte) []byte {
- return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1)
-}
-
-func splitKey(k []byte) ([]byte, []byte) {
- foundBytes := false
- i := len(k) - 1
- for {
- if i <= 0 || foundBytes && k[i] == 0 {
- break
- }
- if k[i] != 0 {
- foundBytes = true
- }
- i--
- }
- return append([]byte{}, k[:i]...), k[i:]
+func convertKeyToPath(p []byte) string {
+ return string(bytes.Replace(p, []byte{0}, []byte("/"), -1))
}
diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/path.go b/vendor/github.com/moby/buildkit/cache/contenthash/path.go
index 42b7fd8349c7..ae950f713241 100644
--- a/vendor/github.com/moby/buildkit/cache/contenthash/path.go
+++ b/vendor/github.com/moby/buildkit/cache/contenthash/path.go
@@ -1,108 +1,111 @@
+// This code mostly comes from <https://github.com/cyphar/filepath-securejoin>.
+
+// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
+// Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package contenthash
import (
"os"
"path/filepath"
+ "strings"
"github.com/pkg/errors"
)
-var (
- errTooManyLinks = errors.New("too many links")
-)
+var errTooManyLinks = errors.New("too many links")
+
+const maxSymlinkLimit = 255
type onSymlinkFunc func(string, string) error
-// rootPath joins a path with a root, evaluating and bounding any
-// symlink to the root directory.
-// This is containerd/continuity/fs RootPath implementation with a callback on
-// resolving the symlink.
-func rootPath(root, path string, cb onSymlinkFunc) (string, error) {
- if path == "" {
+// rootPath joins a path with a root, evaluating and bounding any symlink to
+// the root directory. This is a slightly modified version of SecureJoin from
+// github.com/cyphar/filepath-securejoin, with a callback which we call after
+// each symlink resolution.
+func rootPath(root, unsafePath string, followTrailing bool, cb onSymlinkFunc) (string, error) {
+ if unsafePath == "" {
return root, nil
}
- var linksWalked int // to protect against cycles
- for {
- i := linksWalked
- newpath, err := walkLinks(root, path, &linksWalked, cb)
- if err != nil {
- return "", err
- }
- path = newpath
- if i == linksWalked {
- newpath = filepath.Join("/", newpath)
- if path == newpath {
- return filepath.Join(root, newpath), nil
- }
- path = newpath
- }
- }
-}
-func walkLink(root, path string, linksWalked *int, cb onSymlinkFunc) (newpath string, islink bool, err error) {
- if *linksWalked > 255 {