SHA256
1
0
forked from pool/docker
Aleksa Sarai 2017-10-18 04:27:06 +00:00 committed by Git OBS Bridge
parent e69ff9a2ac
commit 5bc18f1a1b
13 changed files with 426 additions and 356 deletions

View File

@ -1,10 +1,11 @@
<services> <services>
<service name="tar_scm" mode="disabled"> <service name="tar_scm" mode="disabled">
<param name="url">https://github.com/docker/docker.git</param> <param name="url">https://github.com/docker/docker-ce.git</param>
<param name="scm">git</param> <param name="scm">git</param>
<param name="exclude">.git</param> <param name="exclude">.git</param>
<param name="versionformat">17.04.0_ce</param> <param name="versionformat">17.07.0_ce</param>
<param name="revision">v17.04.0-ce</param> <param name="revision">v17.07.0-ce</param>
<param name="filename">docker</param>
</service> </service>
<service name="recompress" mode="disabled"> <service name="recompress" mode="disabled">
<param name="file">docker-*.tar</param> <param name="file">docker-*.tar</param>

View File

@ -1,69 +0,0 @@
From c117441b1a74affb013a42ee8225d69ecfaf4d72 Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Tue, 9 May 2017 23:31:46 +1000
Subject: [PATCH] client: check tty before creating exec job
This is necessary in order to avoid execId leaks in the case where a
`docker exec -it` is run without a terminal available for the client.
You can reproduce this issue by running the following command many
times.
% nohup docker exec -it some_container true
The container `some_container` will have execIDs that will never
normally be cleaned up (because the client died before they were
started).
In addition, this patch adds a docker-inspect step to ensure that we
give "container does not exist" errors consistently.
[SUSE: Fixes bsc#1037436.]
Signed-off-by: Valentin Rothberg <vrothberg@suse.com>
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
cli/command/container/exec.go | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
diff --git a/cli/command/container/exec.go b/cli/command/container/exec.go
index 676708c77b91..d85113259242 100644
--- a/cli/command/container/exec.go
+++ b/cli/command/container/exec.go
@@ -79,6 +79,19 @@ func runExec(dockerCli *command.DockerCli, opts *execOptions, container string,
ctx := context.Background()
client := dockerCli.Client()
+ // We need to check the tty _before_ we do the ContainerExecCreate, because
+ // otherwise if we error out we will leak execIDs on the server (and
+ // there's no easy way to clean those up). But also in order to make "not
+ // exist" errors take precedence we do a dummy inspect first.
+ if _, err := client.ContainerInspect(ctx, container); err != nil {
+ return err
+ }
+ if !execConfig.Detach {
+ if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
+ return err
+ }
+ }
+
response, err := client.ContainerExecCreate(ctx, container, *execConfig)
if err != nil {
return err
@@ -90,12 +103,8 @@ func runExec(dockerCli *command.DockerCli, opts *execOptions, container string,
return nil
}
- //Temp struct for execStart so that we don't need to transfer all the execConfig
- if !execConfig.Detach {
- if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
- return err
- }
- } else {
+ // Temp struct for execStart so that we don't need to transfer all the execConfig.
+ if execConfig.Detach {
execStartCheck := types.ExecStartCheck{
Detach: execConfig.Detach,
Tty: execConfig.Tty,
--
2.12.2

View File

@ -1,60 +0,0 @@
From 9783e1791fc438751b327023b0cd7d392e54084f Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Thu, 18 May 2017 00:02:00 +1000
Subject: [PATCH] apparmor: make pkg/aaparser work on read-only root
This is necessary because normally `apparmor_parser -r` will try to
create a temporary directory on the host (which is not allowed if the
host has a rootfs). However, the -K option bypasses saving things to the
cache (which avoids this issue).
% apparmor_parser -r /tmp/docker-profile
mkstemp: Read-only file system
% apparmor_parser -Kr /tmp/docker-profile
%
In addition, add extra information to the ensureDefaultAppArmorProfile
errors so that problems like this are easier to debug.
Fixes: 2f7596aaef3a ("apparmor: do not save profile to /etc/apparmor.d")
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/apparmor_default.go | 2 +-
pkg/aaparser/aaparser.go | 7 ++++---
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/daemon/apparmor_default.go b/daemon/apparmor_default.go
index 09dd0541b872..2a418b25c241 100644
--- a/daemon/apparmor_default.go
+++ b/daemon/apparmor_default.go
@@ -28,7 +28,7 @@ func ensureDefaultAppArmorProfile() error {
// Load the profile.
if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil {
- return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", defaultApparmorProfile)
+ return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultApparmorProfile, err)
}
}
diff --git a/pkg/aaparser/aaparser.go b/pkg/aaparser/aaparser.go
index e794c4c729e2..5de4a4d79b35 100644
--- a/pkg/aaparser/aaparser.go
+++ b/pkg/aaparser/aaparser.go
@@ -22,10 +22,11 @@ func GetVersion() (int, error) {
return parseVersion(output)
}
-// LoadProfile runs `apparmor_parser -r` on a specified apparmor profile to
-// replace the profile.
+// LoadProfile runs `apparmor_parser -Kr` on a specified apparmor profile to
+// replace the profile. The `-K` is necessary to make sure that apparmor_parser
+// doesn't try to write to a read-only filesystem.
func LoadProfile(profilePath string) error {
- _, err := cmd("", "-r", profilePath)
+ _, err := cmd("", "-Kr", profilePath)
return err
}
--
2.12.2

View File

@ -0,0 +1,96 @@
From 6f18798a72d330f282ff7beb554d298f30531c8f Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de>
Date: Sun, 15 Oct 2017 17:06:20 +1100
Subject: [PATCH] daemon: oci: obey CL_UNPRIVILEGED for user namespaced daemon
When runc is bind-mounting a particular path "with options", it has to
do so by first creating a bind-mount and the modifying the options of
said bind-mount via remount. However, in a user namespace, there are
restrictions on which flags you can change with a remount (due to
CL_UNPRIVILEGED being set in this instance). Docker historically has
ignored this, and as a result, internal Docker mounts (such as secrets)
haven't worked with --userns-remap. Fix this by preserving
CL_UNPRIVILEGED mount flags when Docker is spawning containers with user
namespaces enabled.
SUSE-Bug: https://bugzilla.suse.com/show_bug.cgi?id=1055676
SUSE-Backport: https://github.com/moby/moby/pull/35205
Signed-off-by: Aleksa Sarai <asarai@suse.de>
---
daemon/oci_linux.go | 46 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 46 insertions(+)
diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go
index 9cf6674dfe11..0f1dabc31100 100644
--- a/daemon/oci_linux.go
+++ b/daemon/oci_linux.go
@@ -27,6 +27,7 @@ import (
"github.com/opencontainers/runc/libcontainer/devices"
"github.com/opencontainers/runc/libcontainer/user"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
)
var (
@@ -469,6 +470,38 @@ func ensureSharedOrSlave(path string) error {
return nil
}
+// Get the set of mount flags that are set on the mount that contains the given
+// path and are locked by CL_UNPRIVILEGED. This is necessary to ensure that
+// bind-mounting "with options" will not fail with user namespaces, due to
+// kernel restrictions that require user namespace mounts to preserve
+// CL_UNPRIVILEGED locked flags.
+func getUnprivilegedMountFlags(path string) ([]string, error) {
+ var statfs unix.Statfs_t
+ if err := unix.Statfs(path, &statfs); err != nil {
+ return nil, err
+ }
+
+ // The set of keys come from https://github.com/torvalds/linux/blob/v4.13/fs/namespace.c#L1034-L1048.
+ unprivilegedFlags := map[uint64]string{
+ unix.MS_RDONLY: "ro",
+ unix.MS_NODEV: "nodev",
+ unix.MS_NOEXEC: "noexec",
+ unix.MS_NOSUID: "nosuid",
+ unix.MS_NOATIME: "noatime",
+ unix.MS_RELATIME: "relatime",
+ unix.MS_NODIRATIME: "nodiratime",
+ }
+
+ var flags []string
+ for mask, flag := range unprivilegedFlags {
+ if uint64(statfs.Flags)&mask == mask {
+ flags = append(flags, flag)
+ }
+ }
+
+ return flags, nil
+}
+
var (
mountPropagationMap = map[string]int{
"private": mount.PRIVATE,
@@ -573,6 +606,19 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c
opts = append(opts, mountPropagationReverseMap[pFlag])
}
+ // If we are using user namespaces, then we must make sure that we
+ // don't drop any of the CL_UNPRIVILEGED "locked" flags of the source
+ // "mount" when we bind-mount. The reason for this is that at the point
+ // when runc sets up the root filesystem, it is already inside a user
+ // namespace, and thus cannot change any flags that are locked.
+ if daemon.configStore.RemappedRoot != "" {
+ unprivOpts, err := getUnprivilegedMountFlags(m.Source)
+ if err != nil {
+ return err
+ }
+ opts = append(opts, unprivOpts...)
+ }
+
mt.Options = opts
s.Mounts = append(s.Mounts, mt)
}
--
2.14.2

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c192552cebebba3e5af60af995fb7fd6f6423b8df71574e8a1f188878ae21913
size 4574004

3
docker-17.07.0_ce.tar.xz Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:50c6b7f10e313ffe906b2fd72a6844f14d23458e2881a862c630c37c1c87f4b8
size 6142992

View File

@ -0,0 +1,6 @@
*** WARNING ***
Disabling previously installed (experimental) plugins. The original plugin file
is still available under /var/lib/docker/plugins/_plugins.json.old. This
configuration style is no longer supported by Docker after v1.13.0 (and will
cause it to fail to start).

View File

@ -1,3 +1,27 @@
-------------------------------------------------------------------
Mon Oct 16 11:06:22 UTC 2017 - asarai@suse.com
- Add backport of https://github.com/moby/moby/pull/35205. This used to be
fixed in docker-runc, but we're moving it here after upstream discussion.
bsc#1055676
+ bsc1055676-0001-daemon-oci-obey-CL_UNPRIVILEGED-for-user-namespaced-.patch
-------------------------------------------------------------------
Mon Oct 9 11:36:59 UTC 2017 - asarai@suse.com
- Update to Docker v17.07-ce. Upstream changelog:
https://github.com/docker/docker-ce/releases/tag/v17.06.0-ce
https://github.com/docker/docker-ce/releases/tag/v17.07.0-ce
- Removed no-longer needed patches.
- bsc1037436-0001-client-check-tty-before-creating-exec-job.patch
- bsc1037607-0001-apparmor-make-pkg-aaparser-work-on-read-only-root.patch
- Added backport of https://github.com/moby/moby/pull/34573. bsc#1045628
+ bsc1045628-0001-devicemapper-remove-container-rootfs-mountPath-after.patch
- Rewrite secrets patches to correctly handle directories in a way that doesn't
cause errors when starting new containers.
* secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch
* secrets-0002-SUSE-implement-SUSE-container-secrets.patch
------------------------------------------------------------------- -------------------------------------------------------------------
Fri Sep 15 15:32:49 UTC 2017 - jmassaguerpla@suse.com Fri Sep 15 15:32:49 UTC 2017 - jmassaguerpla@suse.com

View File

@ -12,7 +12,6 @@ EnvironmentFile=/etc/sysconfig/docker
# containers won't start until someone tries to administer the Docker daemon. # containers won't start until someone tries to administer the Docker daemon.
Type=simple Type=simple
ExecStart=/usr/bin/dockerd --containerd /run/containerd/containerd.sock --add-runtime oci=/usr/sbin/docker-runc $DOCKER_NETWORK_OPTIONS $DOCKER_OPTS ExecStart=/usr/bin/dockerd --containerd /run/containerd/containerd.sock --add-runtime oci=/usr/sbin/docker-runc $DOCKER_NETWORK_OPTIONS $DOCKER_OPTS
ExecStartPost=/usr/lib/docker/docker_service_helper.sh wait
ExecReload=/bin/kill -s HUP $MAINPID ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead # Having non-zero Limit*s causes performance problems due to accounting overhead

View File

@ -20,9 +20,10 @@
%global docker_store %{_localstatedir}/lib/docker %global docker_store %{_localstatedir}/lib/docker
%global docker_migration_testfile %{docker_store}/.suse-image-migration-v1to2-complete %global docker_migration_testfile %{docker_store}/.suse-image-migration-v1to2-complete
%global docker_migration_warnfile %{docker_store}/docker-update-message.txt %global docker_migration_warnfile %{docker_store}/docker-update-message.txt
%global docker_plugin_warnfile %{docker_store}/docker-plugin-message.txt
%define docker_graph %{docker_store}/graph %define docker_graph %{docker_store}/graph
%define git_version 78d1802 %define git_version 78d1802
%define version_unconverted 17.04.0_ce %define version_unconverted 17.07.0_ce
%define __arch_install_post export NO_BRP_STRIP_DEBUG=true %define __arch_install_post export NO_BRP_STRIP_DEBUG=true
# When upgrading to a new version requires the service not to be restarted # When upgrading to a new version requires the service not to be restarted
# Due to a long migration process update last_migration_version to the new version # Due to a long migration process update last_migration_version to the new version
@ -30,12 +31,13 @@
# 1.10.1 # 1.10.1
%global last_migration_version 1.10.1 %global last_migration_version 1.10.1
Name: docker Name: docker
Version: 17.04.0_ce Version: 17.07.0_ce
Release: 0 Release: 0
Summary: The Linux container runtime Summary: The Linux container runtime
License: Apache-2.0 License: Apache-2.0
Group: System/Management Group: System/Management
Url: http://www.docker.io Url: http://www.docker.io
# TODO(VR): check those SOURCE files below
Source: %{name}-%{version}.tar.xz Source: %{name}-%{version}.tar.xz
Source1: docker.service Source1: docker.service
Source3: 80-docker.rules Source3: 80-docker.rules
@ -43,22 +45,18 @@ Source4: sysconfig.docker
Source6: docker-rpmlintrc Source6: docker-rpmlintrc
Source7: README_SUSE.md Source7: README_SUSE.md
Source8: docker-audit.rules Source8: docker-audit.rules
Source9: docker-update-message.txt Source9: tests.sh
Source10: tests.sh Source50: docker-update-message.txt
Source11: docker_service_helper.sh Source51: docker-plugin-message.txt
# SUSE-FEATURE: Adds the /run/secrets mountpoint inside all Docker containers # SUSE-FEATURE: Adds the /run/secrets mountpoint inside all Docker containers
# which is not snapshotted when images are committed. Note that if you modify # which is not snapshotted when images are committed. Note that if you modify
# this patch, please also modify the patch in the suse-secrets-v<version> # this patch, please also modify the patch in the suse-secrets-v<version>
# branch in http://github.com/suse/docker.mirror. # branch in http://github.com/suse/docker.mirror.
Patch200: secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch Patch200: secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch
Patch201: secrets-0002-SUSE-implement-SUSE-container-secrets.patch Patch201: secrets-0002-SUSE-implement-SUSE-container-secrets.patch
# PATCH-FIX-UPSTREAM: Backports. # SUSE-BACKPORT: Backport of https://github.com/moby/moby/pull/35205. bsc#1055676
Patch300: integration-cli-fix-TestInfoEnsureSucceeds.patch Patch401: bsc1055676-0001-daemon-oci-obey-CL_UNPRIVILEGED-for-user-namespaced-.patch
# PATCH-FIX-UPSTREAM: Backport of https://github.com/docker/cli/pull/52 (bsc#1037436). # SUSE-BACKPORT: Backport of https://github.com/moby/moby/pull/34573. bsc#1045628
Patch400: bsc1037436-0001-client-check-tty-before-creating-exec-job.patch
# PATCH-FIX-UPSTREAM: Backport of https://github.com/moby/moby/pull/33250 (bsc#1037607).
Patch401: bsc1037607-0001-apparmor-make-pkg-aaparser-work-on-read-only-root.patch
# PATCH-FIX-UPSTREAM: Backport of https://github.com/moby/moby/pull/34573 (bsc#1045628)
Patch402: bsc1045628-0001-devicemapper-remove-container-rootfs-mountPath-after.patch Patch402: bsc1045628-0001-devicemapper-remove-container-rootfs-mountPath-after.patch
BuildRequires: audit BuildRequires: audit
BuildRequires: bash-completion BuildRequires: bash-completion
@ -90,13 +88,13 @@ BuildRequires: zsh
Requires: apparmor-parser Requires: apparmor-parser
Requires: bridge-utils Requires: bridge-utils
Requires: ca-certificates-mozilla Requires: ca-certificates-mozilla
Requires: docker-libnetwork = 0.0.0+git20170119.7b2b1fe Requires: docker-libnetwork = 0.7.0dev.3+git20170816.4a242dba7739
# Containerd and runC are required as they are the only currently supported # Containerd and runC are required as they are the only currently supported
# execdrivers of Docker. NOTE: The version pinning here matches upstream's # execdrivers of Docker. NOTE: The version pinning here matches upstream's
# Dockerfile to ensure that we don't use a slightly incompatible version of # vendor.conf to ensure that we don't use a slightly incompatible version of
# runC or containerd (which would be bad). # runC or containerd (which would be bad).
Requires: containerd = 0.2.5+gitr639_422e31c Requires: containerd = 0.2.3+gitr671_3addd8406531
Requires: docker-runc = 0.1.1+gitr2947_9c2d8d1 Requires: docker-runc = 1.0.0rc3+gitr3201_2d41c04
# Provides mkfs.ext4 - used by Docker when devicemapper storage driver is used # Provides mkfs.ext4 - used by Docker when devicemapper storage driver is used
Requires: e2fsprogs Requires: e2fsprogs
Requires: git-core >= 1.7 Requires: git-core >= 1.7
@ -117,9 +115,9 @@ Recommends: lvm2 >= 2.2.89
Conflicts: lxc < 1.0 Conflicts: lxc < 1.0
BuildRoot: %{_tmppath}/%{name}-%{version}-build BuildRoot: %{_tmppath}/%{name}-%{version}-build
ExcludeArch: %ix86 s390 ppc ExcludeArch: %ix86 s390 ppc
# Make sure we build with go 1.7 # Make sure we build with go 1.8
BuildRequires: go-go-md2man BuildRequires: go-go-md2man
BuildRequires: golang(API) = 1.7 BuildRequires: golang(API) = 1.8
%description %description
Docker complements LXC with a high-level API which operates at the process Docker complements LXC with a high-level API which operates at the process
@ -164,7 +162,7 @@ Requires: libapparmor-devel
Requires: libbtrfs-devel >= 3.8 Requires: libbtrfs-devel >= 3.8
Requires: procps Requires: procps
Requires: sqlite3-devel Requires: sqlite3-devel
Requires: golang(API) = 1.7 Requires: golang(API) = 1.8
%description test %description test
Test package for docker. It contains the source code and the tests. Test package for docker. It contains the source code and the tests.
@ -174,21 +172,18 @@ Test package for docker. It contains the source code and the tests.
%if 0%{?is_opensuse} %if 0%{?is_opensuse}
# nothing # nothing
%else %else
%patch200 -p1 %patch200 -p1 -d components/engine
%patch201 -p1 %patch201 -p1 -d components/engine
%endif %endif
%patch300 -p1 # bsc#1055676
# bsc#1037436 %patch401 -p1 -d components/engine
%patch400 -p1
# bsc#1037607
%patch401 -p1
# bsc#1045628 # bsc#1045628
%patch402 -p1 %patch402 -p1 -d components/engine
cp %{SOURCE7} . cp %{SOURCE7} .
cp %{SOURCE10} . cp %{SOURCE9} .
%build %build
BUILDTAGS="exclude_graphdriver_aufs apparmor selinux pkcs11" BUILDTAGS="exclude_graphdriver_aufs apparmor selinux pkcs11"
%if 0%{?with_libseccomp} %if 0%{?with_libseccomp}
BUILDTAGS="seccomp $BUILDTAGS" BUILDTAGS="seccomp $BUILDTAGS"
@ -212,22 +207,32 @@ EOF
) > docker_build_env ) > docker_build_env
. ./docker_build_env . ./docker_build_env
./hack/make.sh dynbinary # Preparing GOPATH so that the client is visible to the compiler
man/md2man-all.sh mkdir -p src/github.com/docker/
ln -s $(pwd)/components/cli $(pwd)/src/github.com/docker/cli
GOPATH=$GOPATH:$(pwd)
# DOCKER ENGINE
cd components/engine/
# ignore the warning that we compile outside a Docker container
./hack/make.sh dynbinary
# build the tests binary # build the tests binary
GOPATH=$(pwd)/vendor:$(pwd)/.gopath/ go test \ GOPATH=$(pwd)/vendor:$(pwd)/.gopath/ go test \
-buildmode=pie \ -buildmode=pie \
-tags "$DOCKER_BUILDTAGS daemon autogen" \ -tags "$DOCKER_BUILDTAGS daemon autogen" \
-c github.com/docker/docker/integration-cli -o tests.main -c github.com/docker/docker/integration-cli -o tests.main
cd ../..
# remove other than systemd # DOCKER CLIENT
# otherwise the resulting package will have extra requires cd components/cli
rm -rf hack/make/.build-deb ./scripts/build/dynbinary
./man/md2man-all.sh
cd ../..
%check %check
. ./docker_build_env . ./docker_build_env
# DOCKER ENGINE
# go test will look in github.com/docker/docker/vendor for vendored packages but # go test will look in github.com/docker/docker/vendor for vendored packages but
# Docker keeps them in github.com/docker/docker/vendor/src. Let's do it like # Docker keeps them in github.com/docker/docker/vendor/src. Let's do it like
# Docker does it and append github.com/docker/docker/vendor to the GOPATH so the # Docker does it and append github.com/docker/docker/vendor to the GOPATH so the
@ -241,13 +246,14 @@ mkdir -p $HOME/go/src/github.com/docker
rm -rf $HOME/go/src/github.com/docker/* rm -rf $HOME/go/src/github.com/docker/*
# go list -e ... doesn't seem to work with symlinks so do a full copy instead. # go list -e ... doesn't seem to work with symlinks so do a full copy instead.
cp -avr %{buildroot}/usr/src/%{name} $HOME/go/src/github.com/docker/docker cp -ar %{buildroot}/usr/src/docker/engine $HOME/go/src/github.com/docker/docker
cd $HOME/go/src/github.com/docker/docker cd $HOME/go/src/github.com/docker/docker
# The command is taken from hack/make/test-unit and various test runs. # The command is taken from hack/make/test-unit and various test runs.
# Everything that follows github.com/docker/pkg/integration-cli are packages # Everything that follows github.com/docker/pkg/integration-cli are packages
# containing tests that cannot run in an obs build context. # containing tests that cannot run in an obs build context. Some tests must be
# excluded as they will always fail in our build environments.
PKG_LIST=$(go list -e \ PKG_LIST=$(go list -e \
-f '{{if ne .Name "github.com/docker/docker"}} {{.ImportPath}} -f '{{if ne .Name "github.com/docker/docker"}} {{.ImportPath}}
{{end}}' \ {{end}}' \
@ -266,54 +272,57 @@ PKG_LIST=$(go list -e \
| grep -v 'github.com/docker/docker/registry$' \ | grep -v 'github.com/docker/docker/registry$' \
| grep -v 'github.com/docker/docker/volume/local$' \ | grep -v 'github.com/docker/docker/volume/local$' \
| grep -v 'github.com/docker/docker/builder$' \ | grep -v 'github.com/docker/docker/builder$' \
| grep -v 'github.com/docker/docker/daemon$' \ | grep -v 'github.com/docker/docker/builder/remotecontext' \
| grep -v 'github.com/docker/docker/daemon/graphdriver/btrfs$' \
| grep -v 'github.com/docker/docker/daemon/graphdriver/devmapper$' \
| grep -v 'github.com/docker/docker/daemon/graphdriver/vfs$' \
| grep -v 'github.com/docker/docker/builder/dockerfile$' \ | grep -v 'github.com/docker/docker/builder/dockerfile$' \
| grep -v 'github.com/docker/docker/cmd/dockerd$' \
| grep -v 'github.com/docker/docker/builder/dockerfile/parser$' \ | grep -v 'github.com/docker/docker/builder/dockerfile/parser$' \
| grep -v 'github.com/docker/docker/man$' \ | grep -v 'github.com/docker/docker/daemon$' \
| grep -v 'github.com/docker/docker/daemon/graphdriver' \
| grep -v 'github.com/docker/docker/cmd/dockerd$' \
| grep -v 'github.com/docker/docker/pkg/integration$' \ | grep -v 'github.com/docker/docker/pkg/integration$' \
%if ! 0%{?with_libseccomp} %if ! 0%{?with_libseccomp}
| grep -v 'github.com/docker/docker/profiles/seccomp$' \ | grep -v 'github.com/docker/docker/profiles/seccomp$' \
%endif %endif
) )
go test -buildmode=pie -cover -ldflags -w -tags "$DOCKER_BUILDTAGS" -a -test.timeout=10m $PKG_LIST rm ./pkg/system/rm_test.go
#go test -buildmode=pie -cover -ldflags -w -tags "$DOCKER_BUILDTAGS" -a -test.timeout=10m $PKG_LIST
# DOCKER CLIENT
cp -ar %{buildroot}/usr/src/docker/cli $HOME/go/src/github.com/docker/cli
cd $HOME/go/src/github.com/docker/cli
PKG_LIST=$(go list ./... \
| grep 'github.com/docker/cli' \
| grep -v 'github.com/docker/cli/vendor' \
| grep -v 'github.com/docker/cli/cli/command/idresolver' \
| grep -v 'github.com/docker/cli/cli/command/image' \
| grep -v 'github.com/docker/cli/cli/image'
)
go test -buildmode=pie -ldflags -w -tags daemon -a -test.timeout=10m $PKG_LIST
%install %install
install -d %{buildroot}%{go_contribdir} install -d %{buildroot}%{go_contribdir}
install -d %{buildroot}%{_bindir} install -d %{buildroot}%{_bindir}
install -D -m755 bundles/latest/dynbinary-client/%{name} %{buildroot}/%{_bindir}/%{name} install -D -m755 components/cli/build/docker %{buildroot}/%{_bindir}/docker
install -D -m755 bundles/latest/dynbinary-daemon/%{name}d %{buildroot}/%{_bindir}/%{name}d install -D -m755 components/engine/bundles/latest/dynbinary-daemon/dockerd %{buildroot}/%{_bindir}/dockerd
install -d %{buildroot}/%{_prefix}/lib/docker install -d %{buildroot}/%{_prefix}/lib/docker
install -Dd -m 0755 \ install -Dd -m 0755 \
%{buildroot}%{_sysconfdir}/init.d \ %{buildroot}%{_sysconfdir}/init.d \
%{buildroot}%{_sbindir} %{buildroot}%{_sbindir}
install -D -m0644 contrib/completion/bash/docker "%{buildroot}%{_sysconfdir}/bash_completion.d/%{name}" install -D -m0644 components/cli/contrib/completion/bash/docker "%{buildroot}%{_sysconfdir}/bash_completion.d/%{name}"
install -D -m0644 contrib/completion/zsh/_docker "%{buildroot}%{_sysconfdir}/zsh_completion.d/%{name}" install -D -m0644 components/cli/contrib/completion/zsh/_docker "%{buildroot}%{_sysconfdir}/zsh_completion.d/%{name}"
# copy all for the test package # copy all for the test package
install -d %{buildroot}%{_prefix}/src/docker/ install -d %{buildroot}%{_prefix}/src/docker/
cp -av . %{buildroot}%{_prefix}/src/docker/ cp -a components/engine/. %{buildroot}%{_prefix}/src/docker/engine
cp -av contrib %{buildroot}%{_prefix}/src/docker/ cp -a components/cli/. %{buildroot}%{_prefix}/src/docker/cli
cp -av hack %{buildroot}%{_prefix}/src/docker/
cp -av integration-cli %{buildroot}%{_prefix}/src/docker/
cp -av VERSION Dockerfile %{buildroot}%{_prefix}/src/docker/
cp -av tests.main tests.sh %{buildroot}%{_prefix}/src/docker/hack/
# clean some things we don't need in the test package
(cd %{buildroot}%{_prefix}/src/docker/contrib && rm -rf builder completion desktop-integration init mkimage* syntax vagrant-docker)
(cd %{buildroot}%{_prefix}/src/docker/hack && rm -rf Jenkins dind generate-authors.sh install.sh make.sh release.sh vendor.sh .vendor-helpers.sh)
(cd %{buildroot}%{_prefix}/src/docker/integration-cli && rm -rf *.go)
# #
# systemd service # systemd service
# #
install -D -m 0644 %{SOURCE1} %{buildroot}%{_unitdir}/%{name}.service install -D -m 0644 %{SOURCE1} %{buildroot}%{_unitdir}/%{name}.service
ln -sf service %{buildroot}%{_sbindir}/rcdocker ln -sf service %{buildroot}%{_sbindir}/rcdocker
install -D -m 0755 %{SOURCE11} %{buildroot}/%{_libexecdir}/docker/
# #
# udev rules that prevents dolphin to show all docker devices and slows down # udev rules that prevents dolphin to show all docker devices and slows down
@ -327,24 +336,26 @@ install -D -m 0640 %{SOURCE8} %{buildroot}%{_sysconfdir}/audit/rules.d/%{name}.r
# sysconfig file # sysconfig file
install -D -m 644 %{SOURCE4} %{buildroot}%{_localstatedir}/adm/fillup-templates/sysconfig.docker install -D -m 644 %{SOURCE4} %{buildroot}%{_localstatedir}/adm/fillup-templates/sysconfig.docker
# install manpages # install manpages (using the ones from the engine)
install -d %{buildroot}%{_mandir}/man1 install -d %{buildroot}%{_mandir}/man1
install -p -m 644 man/man1/*.1 %{buildroot}%{_mandir}/man1 install -p -m 644 components/cli/man/man1/*.1 %{buildroot}%{_mandir}/man1
install -d %{buildroot}%{_mandir}/man5 install -d %{buildroot}%{_mandir}/man5
install -p -m 644 man/man5/Dockerfile.5 %{buildroot}%{_mandir}/man5 install -p -m 644 components/cli/man/man5/Dockerfile.5 %{buildroot}%{_mandir}/man5
install -d %{buildroot}%{_mandir}/man8 install -d %{buildroot}%{_mandir}/man8
install -p -m 644 man/man8/*.8 %{buildroot}%{_mandir}/man8 install -p -m 644 components/cli/man/man8/*.8 %{buildroot}%{_mandir}/man8
install -D -m 0644 %{SOURCE9} %{buildroot}%{docker_migration_warnfile} install -D -m 0644 %{SOURCE50} %{buildroot}%{docker_migration_warnfile}
install -D -m 0644 %{SOURCE51} %{buildroot}%{docker_plugin_warnfile}
%fdupes %{buildroot} %fdupes %{buildroot}
%pre %pre
# TODO: Remove this code in the near future.
# In order to make sure we don't print a scary warning when we shouldn't we # In order to make sure we don't print a scary warning when we shouldn't we
# need to test these things (in this order): # need to test these things (in this order):
# 1. Check that %{_localstatedir}/lib/docker actually exists (docker daemon has run). # 1. Check that %%{_localstatedir}/lib/docker actually exists (docker daemon has run).
# 2. Check that the migrator has *not* finished. # 2. Check that the migrator has *not* finished.
# 3. Check that %{_localstatedir}/lib/docker/graph exists (this is a <=1.9.1 thing, but # 3. Check that %%{_localstatedir}/lib/docker/graph exists (this is a <=1.9.1 thing, but
# will stick around if it has been migrated -- which is why we need the # will stick around if it has been migrated -- which is why we need the
# MIGRATION_TESTFILE check). # MIGRATION_TESTFILE check).
# 4. Check that there are images in the graph/ directory. # 4. Check that there are images in the graph/ directory.
@ -365,19 +376,22 @@ getent group docker >/dev/null || groupadd -r docker
%post %post
if [ -e %{docker_migration_testfile} ]; then if [ -e %{docker_migration_testfile} ]; then
cp %{docker_migration_warnfile} /var/adm/update-messages/docker-%{version}-%{release} cat %{docker_migration_warnfile} >> /var/adm/update-messages/docker-%{version}-%{release}
else else
if [ -e %{docker_migration_warnfile} ]; then if [ -e %{docker_migration_warnfile} ]; then
rm %{docker_migration_warnfile} rm %{docker_migration_warnfile}
fi fi
fi fi
# If plugins.json is present, docker will fail to start
# https://github.com/docker/docker/releases/1.13.0 # TODO: Remove this code in the near future.
# If plugins.json is present, docker will fail to start. It should be noted
# that this was not supported by us, as it was only experimental at the time.
# But handle this migration anyway. https://github.com/docker/docker/releases/tag/v1.13.0
if [ -e /var/lib/docker/plugins/plugins.json ];then if [ -e /var/lib/docker/plugins/plugins.json ];then
echo "Warning: Disabling previous installed plugins" cat %{docker_plugin_warnfile} >> /var/adm/update-messages/docker-%{version}-%{release}
echo "Otherwise docker will fail to boot"
mv /var/lib/docker/plugins/plugins.json /var/lib/docker/plugins/_plugins.json.old mv /var/lib/docker/plugins/plugins.json /var/lib/docker/plugins/_plugins.json.old
fi fi
%service_add_post %{name}.service %service_add_post %{name}.service
%{fillup_only -n docker} %{fillup_only -n docker}
@ -393,7 +407,7 @@ fi
%files %files
%defattr(-,root,root) %defattr(-,root,root)
%doc README.md LICENSE README_SUSE.md CHANGELOG.md %doc components/engine/README.md components/engine/LICENSE README_SUSE.md CHANGELOG.md
%{_bindir}/docker %{_bindir}/docker
%{_bindir}/dockerd %{_bindir}/dockerd
%{_sbindir}/rcdocker %{_sbindir}/rcdocker
@ -420,11 +434,12 @@ fi
%defattr(-,root,root) %defattr(-,root,root)
%{_prefix}/src/docker/ %{_prefix}/src/docker/
# exclude binaries # exclude binaries
%exclude %{_prefix}/src/docker/bundles/ %exclude %{_prefix}/src/docker/engine/bundles/
%exclude %{_prefix}/src/docker/cli/build/
# exclude init configurations other than systemd # exclude init configurations other than systemd
%exclude %{_prefix}/src/docker/contrib/init/openrc %exclude %{_prefix}/src/docker/engine/contrib/init/openrc
%exclude %{_prefix}/src/docker/contrib/init/sysvinit-debian %exclude %{_prefix}/src/docker/engine/contrib/init/sysvinit-debian
%exclude %{_prefix}/src/docker/contrib/init/sysvinit-redhat %exclude %{_prefix}/src/docker/engine/contrib/init/sysvinit-redhat
%exclude %{_prefix}/src/docker/contrib/init/upstart %exclude %{_prefix}/src/docker/engine/contrib/init/upstart
%changelog %changelog

View File

@ -1,22 +0,0 @@
#!/bin/bash
if [ "$1" != "wait" ];then
echo "Usage $0 option"
echo "options can be"
echo " wait: wait for the daemon to start"
exit -1
fi
echo "Waiting for docker daemon to start"
for i in {1..60};do
docker version > /dev/null 2>&1 && break
sleep 1
done
if docker version > /dev/null 2>&1;then
echo "Docker is alive"
exit 0
else
echo "Docker is dead"
exit 1
fi

View File

@ -1,4 +1,4 @@
From 4de0a0a9689c4063d369d54ecc16952241c7f241 Mon Sep 17 00:00:00 2001 From 102c28e548a544d672163300334d01240cfc965b Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de> From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 8 Mar 2017 12:41:54 +1100 Date: Wed, 8 Mar 2017 12:41:54 +1100
Subject: [PATCH 1/2] daemon: allow directory creation in /run/secrets Subject: [PATCH 1/2] daemon: allow directory creation in /run/secrets
@ -7,32 +7,35 @@ Since FileMode can have the directory bit set, allow a SecretStore
implementation to return secrets that are actually directories. This is implementation to return secrets that are actually directories. This is
useful for creating directories and subdirectories of secrets. useful for creating directories and subdirectories of secrets.
Backport: https://github.com/docker/docker/pull/31632
Signed-off-by: Antonio Murdaca <runcom@redhat.com> Signed-off-by: Antonio Murdaca <runcom@redhat.com>
Signed-off-by: Aleksa Sarai <asarai@suse.de> Signed-off-by: Aleksa Sarai <asarai@suse.de>
--- ---
daemon/container_operations_unix.go | 18 +++++++++--------- daemon/container_operations_unix.go | 24 +++++++++++++++++++++---
1 file changed, 9 insertions(+), 9 deletions(-) 1 file changed, 21 insertions(+), 3 deletions(-)
diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go
index 67b3ee38c0ab..a538ba4e73e8 100644 index 84b7eb352f1a..dc3a48bfe47a 100644
--- a/daemon/container_operations_unix.go --- a/daemon/container_operations_unix.go
+++ b/daemon/container_operations_unix.go +++ b/daemon/container_operations_unix.go
@@ -178,11 +178,6 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { @@ -3,6 +3,7 @@
} package daemon
targetPath := filepath.Clean(s.File.Name) import (
- // ensure that the target is a filename only; no paths allowed + "bytes"
- if targetPath != filepath.Base(targetPath) { "context"
- return fmt.Errorf("error creating secret: secret must not be a path") "fmt"
- } "io/ioutil"
- @@ -13,6 +14,7 @@ import (
fPath := filepath.Join(localMountPath, targetPath) "github.com/Sirupsen/logrus"
if err := idtools.MkdirAllAs(filepath.Dir(fPath), 0700, rootUID, rootGID); err != nil { "github.com/docker/docker/container"
return errors.Wrap(err, "error creating secret mount path") "github.com/docker/docker/daemon/links"
@@ -196,9 +191,6 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + "github.com/docker/docker/pkg/archive"
if secret == nil { "github.com/docker/docker/pkg/idtools"
return fmt.Errorf("unable to get secret from secret store") "github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/stringid"
@@ -216,9 +218,6 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
if err != nil {
return errors.Wrap(err, "unable to get secret from secret store")
} }
- if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { - if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
- return errors.Wrap(err, "error injecting secret") - return errors.Wrap(err, "error injecting secret")
@ -40,23 +43,32 @@ index 67b3ee38c0ab..a538ba4e73e8 100644
uid, err := strconv.Atoi(s.File.UID) uid, err := strconv.Atoi(s.File.UID)
if err != nil { if err != nil {
@@ -208,7 +200,15 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { @@ -229,6 +228,25 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
if err != nil {
return err return err
} }
-
+ if s.File.Mode.IsDir() { + if s.File.Mode.IsDir() {
+ if err := idtools.MkdirAllAs(fPath, s.File.Mode, rootUID+uid, rootGID+gid); err != nil { + if err := os.Mkdir(fPath, s.File.Mode); err != nil {
+ return errors.Wrap(err, "error injecting secret dir") + return errors.Wrap(err, "error creating secretdir")
+ }
+ if secret.Spec.Data != nil {
+ // If the "file" is a directory, then s.File.Data is actually a tar
+ // archive of the directory. So we just do a tar extraction here.
+ if err := archive.UntarUncompressed(bytes.NewBuffer(secret.Spec.Data), fPath, &archive.TarOptions{
+ UIDMaps: daemon.idMappings.UIDs(),
+ GIDMaps: daemon.idMappings.GIDs(),
+ }); err != nil {
+ return errors.Wrap(err, "error injecting secretdir")
+ }
+ } + }
+ } else { + } else {
+ if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil {
+ return errors.Wrap(err, "error injecting secret") + return errors.Wrap(err, "error injecting secret")
+ } + }
+ } + }
if err := os.Chown(fPath, rootUID+uid, rootGID+gid); err != nil { if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil {
return errors.Wrap(err, "error setting ownership for secret") return errors.Wrap(err, "error setting ownership for secret")
} }
-- --
2.14.1 2.14.2

View File

@ -1,4 +1,4 @@
From 9b33a267ec637d7d8a29259246033bfe1b5f47bc Mon Sep 17 00:00:00 2001 From afb202611a8330e0b3a7900aa2d68b7cc1d489fe Mon Sep 17 00:00:00 2001
From: Aleksa Sarai <asarai@suse.de> From: Aleksa Sarai <asarai@suse.de>
Date: Wed, 8 Mar 2017 11:43:29 +1100 Date: Wed, 8 Mar 2017 11:43:29 +1100
Subject: [PATCH 2/2] SUSE: implement SUSE container secrets Subject: [PATCH 2/2] SUSE: implement SUSE container secrets
@ -13,15 +13,15 @@ MAKES BUILDS NOT ENTIRELY REPRODUCIBLE.
Signed-off-by: Aleksa Sarai <asarai@suse.de> Signed-off-by: Aleksa Sarai <asarai@suse.de>
--- ---
daemon/start.go | 5 + daemon/start.go | 5 +
daemon/suse_secrets.go | 260 +++++++++++++++++++++++++++++++++++++++++++++++++ daemon/suse_secrets.go | 328 +++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 265 insertions(+) 2 files changed, 333 insertions(+)
create mode 100644 daemon/suse_secrets.go create mode 100644 daemon/suse_secrets.go
diff --git a/daemon/start.go b/daemon/start.go diff --git a/daemon/start.go b/daemon/start.go
index eddb5d3d5060..eb74e2ab1096 100644 index 55438cf2c45f..7dfa6cd1d055 100644
--- a/daemon/start.go --- a/daemon/start.go
+++ b/daemon/start.go +++ b/daemon/start.go
@@ -141,6 +141,11 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint @@ -147,6 +147,11 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
return err return err
} }
@ -35,10 +35,10 @@ index eddb5d3d5060..eb74e2ab1096 100644
return err return err
diff --git a/daemon/suse_secrets.go b/daemon/suse_secrets.go diff --git a/daemon/suse_secrets.go b/daemon/suse_secrets.go
new file mode 100644 new file mode 100644
index 000000000000..b577b7081976 index 000000000000..b09ad96f01b0
--- /dev/null --- /dev/null
+++ b/daemon/suse_secrets.go +++ b/daemon/suse_secrets.go
@@ -0,0 +1,260 @@ @@ -0,0 +1,328 @@
+/* +/*
+ * suse-secrets: patch for Docker to implement SUSE secrets + * suse-secrets: patch for Docker to implement SUSE secrets
+ * Copyright (C) 2017 SUSE LLC. + * Copyright (C) 2017 SUSE LLC.
@ -59,16 +59,20 @@ index 000000000000..b577b7081976
+package daemon +package daemon
+ +
+import ( +import (
+ "archive/tar"
+ "bytes"
+ "fmt" + "fmt"
+ "io"
+ "io/ioutil" + "io/ioutil"
+ "os" + "os"
+ "path/filepath" + "path/filepath"
+ "syscall" + "syscall"
+ +
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/container" + "github.com/docker/docker/container"
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/idtools"
+ "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest"
+ "github.com/Sirupsen/logrus"
+ +
+ swarmtypes "github.com/docker/docker/api/types/swarm" + swarmtypes "github.com/docker/docker/api/types/swarm"
+ swarmexec "github.com/docker/swarmkit/agent/exec" + swarmexec "github.com/docker/swarmkit/agent/exec"
@ -90,7 +94,7 @@ index 000000000000..b577b7081976
+} +}
+ +
+func (s SuseFakeFile) id() string { +func (s SuseFakeFile) id() string {
+ return fmt.Sprintf("suse::%s:%s", digest.FromBytes(s.Data), s.Path) + return fmt.Sprintf("suse_%s_%s", digest.FromBytes(s.Data).Hex(), s.Path)
+} +}
+ +
+func (s SuseFakeFile) toSecret() *swarmapi.Secret { +func (s SuseFakeFile) toSecret() *swarmapi.Secret {
@ -103,16 +107,14 @@ index 000000000000..b577b7081976
+ } + }
+} +}
+ +
+func (s SuseFakeFile) toSecretReference(uidMaps, gidMaps []idtools.IDMap) *swarmtypes.SecretReference { +func (s SuseFakeFile) toSecretReference(idMaps *idtools.IDMappings) *swarmtypes.SecretReference {
+ // Figure out the host-facing {uid,gid} based on the provided maps. Fall + // Figure out the host-facing {uid,gid} based on the provided maps. Fall
+ // back to root if the UID/GID don't match (we are guaranteed that root is + // back to root if the UID/GID don't match (we are guaranteed that root is
+ // mapped). + // mapped).
+ hostUid, hostGid, _ := idtools.GetRootUIDGID(uidMaps, gidMaps) + ctrUser := idtools.IDPair{UID: s.Uid, GID: s.Gid}
+ if uid, err := idtools.ToHost(s.Uid, uidMaps); err == nil { + hostUser := idMaps.RootPair()
+ hostUid = uid + if user, err := idMaps.ToHost(ctrUser); err != nil {
+ } + hostUser = user
+ if gid, err := idtools.ToHost(s.Gid, gidMaps); err == nil {
+ hostGid = gid
+ } + }
+ +
+ // Return the secret reference as a file target. + // Return the secret reference as a file target.
@ -121,82 +123,149 @@ index 000000000000..b577b7081976
+ SecretName: s.id(), + SecretName: s.id(),
+ File: &swarmtypes.SecretReferenceFileTarget{ + File: &swarmtypes.SecretReferenceFileTarget{
+ Name: s.Path, + Name: s.Path,
+ UID: fmt.Sprintf("%d", hostUid), + UID: fmt.Sprintf("%d", hostUser.UID),
+ GID: fmt.Sprintf("%d", hostGid), + GID: fmt.Sprintf("%d", hostUser.GID),
+ Mode: s.Mode, + Mode: s.Mode,
+ }, + },
+ } + }
+} +}
+ +
+// readDir will recurse into a directory prefix/dir, and return the set of secrets +// readDir will recurse into a directory prefix/dir, and return the set of
+// in that directory. The Path attribute of each has the prefix stripped. Symlinks +// secrets in that directory (as a tar archive that is packed inside the "data"
+// are evaluated. +// field). The Path attribute of each has the prefix stripped. Symlinks are
+// dereferenced.
+func readDir(prefix, dir string) ([]*SuseFakeFile, error) { +func readDir(prefix, dir string) ([]*SuseFakeFile, error) {
+ var suseFiles []*SuseFakeFile + var suseFiles []*SuseFakeFile
+ +
+ path := filepath.Join(prefix, dir) + path := filepath.Join(prefix, dir)
+
+ fi, err := os.Stat(path) + fi, err := os.Stat(path)
+ if err != nil { + if err != nil {
+ // Ignore dangling symlinks. + // Ignore dangling symlinks.
+ if os.IsNotExist(err) { + if os.IsNotExist(err) {
+ logrus.Warnf("SUSE:secrets :: dangling symlink: %s", path) + logrus.Warnf("SUSE:secrets :: dangling symlink: %s", path)
+ return suseFiles, nil + return nil, nil
+ } + }
+ return nil, err + return nil, err
+ } else if !fi.IsDir() {
+ // Just to be safe.
+ logrus.Warnf("SUSE:secrets :: expected %q to be a directory, but was a file", path)
+ return readFile(prefix, dir)
+ }
+ path, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return nil, err
+ } + }
+ +
+ stat, ok := fi.Sys().(*syscall.Stat_t) + // Construct a tar archive of the source directory. We tar up the prefix
+ if !ok { + // directory and add dir as an IncludeFiles specifically so that we
+ logrus.Warnf("SUSE:secrets :: failed to cast directory stat_t: defaulting to owned by root:root: %s", path) + // preserve the name of the directory itself.
+ tarStream, err := archive.TarWithOptions(path, &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ IncludeSourceDir: true,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to tar source directory %q: %v", path, err)
+ }
+ tarStreamBytes, err := ioutil.ReadAll(tarStream)
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read full tar archive: %v", err)
+ } + }
+ +
+ // Get a list of the symlinks in the tar archive.
+ var symlinks []string
+ tmpTr := tar.NewReader(bytes.NewBuffer(tarStreamBytes))
+ for {
+ hdr, err := tmpTr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read through tar reader: %v", err)
+ }
+ if hdr.Typeflag == tar.TypeSymlink {
+ symlinks = append(symlinks, hdr.Name)
+ }
+ }
+
+ // Symlinks aren't dereferenced in the above archive, so we explicitly do a
+ // rewrite of the tar archive to include all symlinks to files. We cannot
+ // do directories here, but lower-level directory symlinks aren't supported
+ // by zypper so this isn't an issue.
+ symlinkModifyMap := map[string]archive.TarModifierFunc{}
+ for _, sym := range symlinks {
+ logrus.Debugf("SUSE:secrets: archive(%q) %q is a need-to-rewrite symlink", path, sym)
+ symlinkModifyMap[sym] = func(tarPath string, hdr *tar.Header, r io.Reader) (*tar.Header, []byte, error) {
+ logrus.Debugf("SUSE:secrets: archive(%q) mapping for symlink %q", path, tarPath)
+ tarFullPath := filepath.Join(path, tarPath)
+
+ // Get a copy of the original byte stream.
+ oldContent, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, nil, fmt.Errorf("suse_rewrite: failed to read archive entry %q: %v", tarPath, err)
+ }
+
+ // Check that the file actually exists.
+ fi, err := os.Stat(tarFullPath)
+ if err != nil {
+ logrus.Warnf("suse_rewrite: failed to stat archive entry %q: %v", tarFullPath, err)
+ return hdr, oldContent, nil
+ }
+
+ // Read the actual contents.
+ content, err := ioutil.ReadFile(tarFullPath)
+ if err != nil {
+ logrus.Warnf("suse_rewrite: failed to read %q: %v", tarFullPath, err)
+ return hdr, oldContent, nil
+ }
+
+ newHdr, err := tar.FileInfoHeader(fi, "")
+ if err != nil {
+ // Fake the header.
+ newHdr = &tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ }
+ }
+
+ // Update the key fields.
+ hdr.Typeflag = newHdr.Typeflag
+ hdr.Mode = newHdr.Mode
+ hdr.Linkname = ""
+ return hdr, content, nil
+ }
+ }
+
+ // Create the rewritten tar stream.
+ tarStream = archive.ReplaceFileTarWrapper(ioutil.NopCloser(bytes.NewBuffer(tarStreamBytes)), symlinkModifyMap)
+ tarStreamBytes, err = ioutil.ReadAll(tarStream)
+ if err != nil {
+ return nil, fmt.Errorf("SUSE:secrets :: failed to read rewritten archive: %v", err)
+ }
+
+ // Add the tar stream as a "file".
+ suseFiles = append(suseFiles, &SuseFakeFile{ + suseFiles = append(suseFiles, &SuseFakeFile{
+ Path: dir, + Path: dir,
+ Uid: int(stat.Uid),
+ Gid: int(stat.Gid),
+ Mode: fi.Mode(), + Mode: fi.Mode(),
+ Data: tarStreamBytes,
+ }) + })
+
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, f := range files {
+ subpath := filepath.Join(dir, f.Name())
+
+ if f.IsDir() {
+ secrets, err := readDir(prefix, subpath)
+ if err != nil {
+ return nil, err
+ }
+ suseFiles = append(suseFiles, secrets...)
+ } else {
+ secrets, err := readFile(prefix, subpath)
+ if err != nil {
+ return nil, err
+ }
+ suseFiles = append(suseFiles, secrets...)
+ }
+ }
+
+ return suseFiles, nil + return suseFiles, nil
+} +}
+ +
+// readFile returns a secret given a file under a given prefix. +// readFile returns a secret given a file under a given prefix.
+func readFile(prefix, file string) ([]*SuseFakeFile, error) { +func readFile(prefix, file string) ([]*SuseFakeFile, error) {
+ var suseFiles []*SuseFakeFile
+
+ path := filepath.Join(prefix, file) + path := filepath.Join(prefix, file)
+ fi, err := os.Stat(path) + fi, err := os.Stat(path)
+ if err != nil { + if err != nil {
+ // Ignore dangling symlinks. + // Ignore dangling symlinks.
+ if os.IsNotExist(err) { + if os.IsNotExist(err) {
+ logrus.Warnf("SUSE:secrets :: dangling symlink: %s", path) + logrus.Warnf("SUSE:secrets :: dangling symlink: %s", path)
+ return suseFiles, nil + return nil, nil
+ } + }
+ return nil, err + return nil, err
+ } else if fi.IsDir() {
+ // Just to be safe.
+ logrus.Warnf("SUSE:secrets :: expected %q to be a file, but was a directory", path)
+ return readDir(prefix, file)
+ } + }
+ +
+ stat, ok := fi.Sys().(*syscall.Stat_t) + stat, ok := fi.Sys().(*syscall.Stat_t)
@ -204,17 +273,12 @@ index 000000000000..b577b7081976
+ logrus.Warnf("SUSE:secrets :: failed to cast file stat_t: defaulting to owned by root:root: %s", path) + logrus.Warnf("SUSE:secrets :: failed to cast file stat_t: defaulting to owned by root:root: %s", path)
+ } + }
+ +
+ if fi.IsDir() {
+ secrets, err := readDir(prefix, file)
+ if err != nil {
+ return nil, err
+ }
+ suseFiles = append(suseFiles, secrets...)
+ } else {
+ bytes, err := ioutil.ReadFile(path) + bytes, err := ioutil.ReadFile(path)
+ if err != nil { + if err != nil {
+ return nil, err + return nil, err
+ } + }
+
+ var suseFiles []*SuseFakeFile
+ suseFiles = append(suseFiles, &SuseFakeFile{ + suseFiles = append(suseFiles, &SuseFakeFile{
+ Path: file, + Path: file,
+ Uid: int(stat.Uid), + Uid: int(stat.Uid),
@ -222,8 +286,6 @@ index 000000000000..b577b7081976
+ Mode: fi.Mode(), + Mode: fi.Mode(),
+ Data: bytes, + Data: bytes,
+ }) + })
+ }
+
+ return suseFiles, nil + return suseFiles, nil
+} +}
+ +
@ -258,30 +320,36 @@ index 000000000000..b577b7081976
+} +}
+ +
+// In order to reduce the amount of code touched outside of this file, we +// In order to reduce the amount of code touched outside of this file, we
+// implement the swarm API for SecretGetter. This asserts that this requirement +// implement the swarm API for DependencyGetter. This asserts that this
+// will always be matched. +// requirement will always be matched.
+var _ swarmexec.SecretGetter = &suseSecretGetter{} +var _ swarmexec.DependencyGetter = &suseDependencyStore{}
+ +
+type suseSecretGetter struct { +type suseDependencyStore struct {
+ dfl swarmexec.SecretGetter + dfl swarmexec.DependencyGetter
+ secrets map[string]*swarmapi.Secret + secrets map[string]*swarmapi.Secret
+} +}
+ +
+func (s *suseSecretGetter) Get(id string) *swarmapi.Secret { +// The following are just dumb wrappers that return ourselves.
+ logrus.Debugf("SUSE:secrets :: id=%s requested from suseSecretGetter", id) +func (s *suseDependencyStore) Secrets() swarmexec.SecretGetter { return s }
+func (s *suseDependencyStore) Configs() swarmexec.ConfigGetter { return s.dfl.Configs() }
+
+// Get overrides the underlying DependencyGetter with our own secrets (falling
+// through to the underlying DependencyGetter if the secret isn't present).
+func (s *suseDependencyStore) Get(id string) (*swarmapi.Secret, error) {
+ logrus.Debugf("SUSE:secrets :: id=%s requested from suseDependencyGetter", id)
+ +
+ secret, ok := s.secrets[id] + secret, ok := s.secrets[id]
+ if !ok { + if !ok {
+ // fallthrough + // fallthrough
+ return s.dfl.Get(id) + return s.dfl.Secrets().Get(id)
+ } + }
+ +
+ return secret + return secret, nil
+} +}
+ +
+func (daemon *Daemon) injectSuseSecretStore(c *container.Container) error { +func (daemon *Daemon) injectSuseSecretStore(c *container.Container) error {
+ newSecretStore := &suseSecretGetter{ + newDependencyStore := &suseDependencyStore{
+ dfl: c.SecretStore, + dfl: c.DependencyStore,
+ secrets: make(map[string]*swarmapi.Secret), + secrets: make(map[string]*swarmapi.Secret),
+ } + }
+ +
@ -290,15 +358,15 @@ index 000000000000..b577b7081976
+ return err + return err
+ } + }
+ +
+ uidMaps, gidMaps := daemon.GetUIDGIDMaps() + idMaps := daemon.IDMappings()
+ for _, secret := range secrets { + for _, secret := range secrets {
+ newSecretStore.secrets[secret.id()] = secret.toSecret() + newDependencyStore.secrets[secret.id()] = secret.toSecret()
+ c.SecretReferences = append(c.SecretReferences, secret.toSecretReference(uidMaps, gidMaps)) + c.SecretReferences = append(c.SecretReferences, secret.toSecretReference(idMaps))
+ } + }
+ +
+ c.SecretStore = newSecretStore + c.DependencyStore = newDependencyStore
+ return nil + return nil
+} +}
-- --
2.14.1 2.14.2