- Update to version 1.1.0 Release notes https://github.com/kubevirt/kubevirt/releases/tag/v1.1.0 - Drop upstreamed patches 0001-Fix-qemu-system-lookup.patch 0003-Virtiofs-Remove-duplicated-functional-tests.patch 0005-Support-multiple-watchdogs-in-the-domain-schema.patch - Add patches 0001-Update-google.golang.org-grpc-to-1.56.3.patch (CVE-2023-44487) 0002-virt-launcher-fix-qemu-non-root-path.patch 0003-cgroupsv2-reconstruct-device-allowlist.patch OBS-URL: https://build.opensuse.org/request/show/1125817 OBS-URL: https://build.opensuse.org/package/show/Virtualization/kubevirt?expand=0&rev=139
1445 lines
60 KiB
Diff
1445 lines
60 KiB
Diff
From c39907f51b515abe552347c7fd96f499dfb1f2e3 Mon Sep 17 00:00:00 2001
|
|
From: Vasiliy Ulyanov <vulyanov@suse.de>
|
|
Date: Tue, 22 Nov 2022 13:36:30 +0100
|
|
Subject: [PATCH 1/4] cgroup v2: Allow VMI's block volume devices
|
|
|
|
When a block volume is non-hotpluggable (i.e. it is specified explicitly
|
|
in the VMI spec), the device cgroup permissions are managed purely by
|
|
Kubernetes and CRI. For v2, that means a BPF program is assigned to the
|
|
POD's cgroup. However, when we manage hotplug volumes, we overwrite the
|
|
BPF program to allow access to the new block device. The problem is that
|
|
we do not know what the existing BPF program does, hence we just follow
|
|
some assumptions about the 'default' devices that we need to allow (e.g.
|
|
/dev/kvm and some others). We need to also consider the non-hotpluggable
|
|
volumes, otherwise a VM with a block PVC or DV will fail to start if a
|
|
hotplug volume is attached to it.
|
|
|
|
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
|
|
Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
|
|
---
|
|
pkg/virt-handler/cgroup/BUILD.bazel | 2 +
|
|
pkg/virt-handler/cgroup/cgroup.go | 20 ++++--
|
|
pkg/virt-handler/cgroup/cgroup_test.go | 7 +-
|
|
pkg/virt-handler/cgroup/cgroup_v2_manager.go | 12 +++-
|
|
pkg/virt-handler/cgroup/util.go | 70 ++++++++++++++++++++
|
|
5 files changed, 100 insertions(+), 11 deletions(-)
|
|
|
|
diff --git a/pkg/virt-handler/cgroup/BUILD.bazel b/pkg/virt-handler/cgroup/BUILD.bazel
|
|
index 54e735c6c..6480a1ecc 100644
|
|
--- a/pkg/virt-handler/cgroup/BUILD.bazel
|
|
+++ b/pkg/virt-handler/cgroup/BUILD.bazel
|
|
@@ -12,6 +12,7 @@ go_library(
|
|
importpath = "kubevirt.io/kubevirt/pkg/virt-handler/cgroup",
|
|
visibility = ["//visibility:public"],
|
|
deps = [
|
|
+ "//pkg/safepath:go_default_library",
|
|
"//pkg/util:go_default_library",
|
|
"//pkg/virt-handler/isolation:go_default_library",
|
|
"//staging/src/kubevirt.io/api/core/v1:go_default_library",
|
|
@@ -23,6 +24,7 @@ go_library(
|
|
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2:go_default_library",
|
|
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
|
"//vendor/github.com/opencontainers/runc/libcontainer/devices:go_default_library",
|
|
+ "//vendor/golang.org/x/sys/unix:go_default_library",
|
|
],
|
|
)
|
|
|
|
diff --git a/pkg/virt-handler/cgroup/cgroup.go b/pkg/virt-handler/cgroup/cgroup.go
|
|
index b0b28a046..d881458cc 100644
|
|
--- a/pkg/virt-handler/cgroup/cgroup.go
|
|
+++ b/pkg/virt-handler/cgroup/cgroup.go
|
|
@@ -30,6 +30,7 @@ import (
|
|
|
|
runc_cgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
|
"github.com/opencontainers/runc/libcontainer/configs"
|
|
+ "github.com/opencontainers/runc/libcontainer/devices"
|
|
|
|
v1 "kubevirt.io/api/core/v1"
|
|
|
|
@@ -89,9 +90,9 @@ func managerPath(taskPath string) string {
|
|
return retPath
|
|
}
|
|
|
|
-// NewManagerFromPid initializes a new cgroup manager from VMI's pid.
|
|
+// newManagerFromPid initializes a new cgroup manager from VMI's pid.
|
|
// The pid is expected to VMI's pid from the host's viewpoint.
|
|
-func NewManagerFromPid(pid int) (manager Manager, err error) {
|
|
+func newManagerFromPid(pid int, deviceRules []*devices.Rule) (manager Manager, err error) {
|
|
const isRootless = false
|
|
var version CgroupVersion
|
|
|
|
@@ -102,9 +103,11 @@ func NewManagerFromPid(pid int) (manager Manager, err error) {
|
|
}
|
|
|
|
config := &configs.Cgroup{
|
|
- Path: HostCgroupBasePath,
|
|
- Resources: &configs.Resources{},
|
|
- Rootless: isRootless,
|
|
+ Path: HostCgroupBasePath,
|
|
+ Resources: &configs.Resources{
|
|
+ Devices: deviceRules,
|
|
+ },
|
|
+ Rootless: isRootless,
|
|
}
|
|
|
|
if runc_cgroups.IsCgroup2UnifiedMode() {
|
|
@@ -140,7 +143,12 @@ func NewManagerFromVM(vmi *v1.VirtualMachineInstance) (Manager, error) {
|
|
return nil, err
|
|
}
|
|
|
|
- return NewManagerFromPid(isolationRes.Pid())
|
|
+ vmiDeviceRules, err := generateDeviceRulesForVMI(vmi, isolationRes)
|
|
+ if err != nil {
|
|
+ return nil, err
|
|
+ }
|
|
+
|
|
+ return newManagerFromPid(isolationRes.Pid(), vmiDeviceRules)
|
|
}
|
|
|
|
// GetGlobalCpuSetPath returns the CPU set of the main cgroup slice
|
|
diff --git a/pkg/virt-handler/cgroup/cgroup_test.go b/pkg/virt-handler/cgroup/cgroup_test.go
|
|
index 3f982b484..7ac19d33e 100644
|
|
--- a/pkg/virt-handler/cgroup/cgroup_test.go
|
|
+++ b/pkg/virt-handler/cgroup/cgroup_test.go
|
|
@@ -43,7 +43,7 @@ var _ = Describe("cgroup manager", func() {
|
|
if version == V1 {
|
|
return newCustomizedV1Manager(mockRuncCgroupManager, false, execVirtChrootFunc, getCurrentlyDefinedRulesFunc)
|
|
} else {
|
|
- return newCustomizedV2Manager(mockRuncCgroupManager, false, execVirtChrootFunc)
|
|
+ return newCustomizedV2Manager(mockRuncCgroupManager, false, nil, execVirtChrootFunc)
|
|
}
|
|
}
|
|
|
|
@@ -85,10 +85,11 @@ var _ = Describe("cgroup manager", func() {
|
|
|
|
Expect(rulesDefined).To(ContainElement(fakeRule), "defined rule is expected to exist")
|
|
|
|
- for _, defaultRule := range GenerateDefaultDeviceRules() {
|
|
+ defaultDeviceRules := GenerateDefaultDeviceRules()
|
|
+ for _, defaultRule := range defaultDeviceRules {
|
|
Expect(rulesDefined).To(ContainElement(defaultRule), "default rules are expected to be defined")
|
|
}
|
|
-
|
|
+ Expect(rulesDefined).To(HaveLen(len(defaultDeviceRules) + 1))
|
|
},
|
|
Entry("for v1", V1),
|
|
Entry("for v2", V2),
|
|
diff --git a/pkg/virt-handler/cgroup/cgroup_v2_manager.go b/pkg/virt-handler/cgroup/cgroup_v2_manager.go
|
|
index 4b8bf63f0..989c004e8 100644
|
|
--- a/pkg/virt-handler/cgroup/cgroup_v2_manager.go
|
|
+++ b/pkg/virt-handler/cgroup/cgroup_v2_manager.go
|
|
@@ -22,6 +22,7 @@ type v2Manager struct {
|
|
runc_cgroups.Manager
|
|
dirPath string
|
|
isRootless bool
|
|
+ deviceRules []*devices.Rule
|
|
execVirtChroot execVirtChrootFunc
|
|
}
|
|
|
|
@@ -31,14 +32,20 @@ func newV2Manager(config *runc_configs.Cgroup, dirPath string) (Manager, error)
|
|
return nil, err
|
|
}
|
|
|
|
- return newCustomizedV2Manager(runcManager, config.Rootless, execVirtChrootCgroups)
|
|
+ return newCustomizedV2Manager(runcManager, config.Rootless, config.Resources.Devices, execVirtChrootCgroups)
|
|
}
|
|
|
|
-func newCustomizedV2Manager(runcManager runc_cgroups.Manager, isRootless bool, execVirtChroot execVirtChrootFunc) (Manager, error) {
|
|
+func newCustomizedV2Manager(
|
|
+ runcManager runc_cgroups.Manager,
|
|
+ isRootless bool,
|
|
+ deviceRules []*devices.Rule,
|
|
+ execVirtChroot execVirtChrootFunc,
|
|
+) (Manager, error) {
|
|
manager := v2Manager{
|
|
runcManager,
|
|
runcManager.GetPaths()[""],
|
|
isRootless,
|
|
+ deviceRules,
|
|
execVirtChroot,
|
|
}
|
|
|
|
@@ -55,6 +62,7 @@ func (v *v2Manager) Set(r *runc_configs.Resources) error {
|
|
|
|
//Add default rules
|
|
resourcesToSet.Devices = append(resourcesToSet.Devices, GenerateDefaultDeviceRules()...)
|
|
+ resourcesToSet.Devices = append(resourcesToSet.Devices, v.deviceRules...)
|
|
|
|
rulesToSet, err := addCurrentRules(rulesPerPid[v.dirPath], resourcesToSet.Devices)
|
|
if err != nil {
|
|
diff --git a/pkg/virt-handler/cgroup/util.go b/pkg/virt-handler/cgroup/util.go
|
|
index f088dc4ce..511ea7ca2 100644
|
|
--- a/pkg/virt-handler/cgroup/util.go
|
|
+++ b/pkg/virt-handler/cgroup/util.go
|
|
@@ -4,21 +4,28 @@ import (
|
|
"bufio"
|
|
"encoding/base64"
|
|
"encoding/json"
|
|
+ "errors"
|
|
"fmt"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
+ "syscall"
|
|
|
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
|
+ "golang.org/x/sys/unix"
|
|
|
|
"github.com/opencontainers/runc/libcontainer/devices"
|
|
|
|
runc_cgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
|
runc_configs "github.com/opencontainers/runc/libcontainer/configs"
|
|
|
|
+ v1 "kubevirt.io/api/core/v1"
|
|
"kubevirt.io/client-go/log"
|
|
+
|
|
+ "kubevirt.io/kubevirt/pkg/safepath"
|
|
+ "kubevirt.io/kubevirt/pkg/virt-handler/isolation"
|
|
)
|
|
|
|
type CgroupVersion string
|
|
@@ -84,6 +91,69 @@ func addCurrentRules(currentRules, newRules []*devices.Rule) ([]*devices.Rule, e
|
|
return newRules, nil
|
|
}
|
|
|
|
+// This builds up the known persistent block devices allow list for a VMI (as in, hotplugged volumes are handled separately)
|
|
+// This will be maintained and extended as new devices likely have to end up on this list as well
|
|
+// For example - https://kubernetes.io/docs/concepts/scheduling-eviction/dynamic-resource-allocation/
|
|
+func generateDeviceRulesForVMI(vmi *v1.VirtualMachineInstance, isolationRes isolation.IsolationResult) ([]*devices.Rule, error) {
|
|
+ mountRoot, err := isolationRes.MountRoot()
|
|
+ if err != nil {
|
|
+ return nil, err
|
|
+ }
|
|
+
|
|
+ var vmiDeviceRules []*devices.Rule
|
|
+ for _, volume := range vmi.Spec.Volumes {
|
|
+ switch {
|
|
+ case volume.VolumeSource.PersistentVolumeClaim != nil:
|
|
+ if volume.VolumeSource.PersistentVolumeClaim.Hotpluggable {
|
|
+ continue
|
|
+ }
|
|
+ case volume.VolumeSource.DataVolume != nil:
|
|
+ if volume.VolumeSource.DataVolume.Hotpluggable {
|
|
+ continue
|
|
+ }
|
|
+ case volume.VolumeSource.Ephemeral != nil:
|
|
+ default:
|
|
+ continue
|
|
+ }
|
|
+ path, err := safepath.JoinNoFollow(mountRoot, filepath.Join("dev", volume.Name))
|
|
+ if err != nil {
|
|
+ if errors.Is(err, os.ErrNotExist) {
|
|
+ continue
|
|
+ }
|
|
+ return nil, fmt.Errorf("failed to resolve path for volume %s: %v", volume.Name, err)
|
|
+ }
|
|
+ if deviceRule, err := newAllowedDeviceRule(path); err != nil {
|
|
+ return nil, fmt.Errorf("failed to create device rule for %s: %v", path, err)
|
|
+ } else if deviceRule != nil {
|
|
+ log.Log.V(loggingVerbosity).Infof("device rule for volume %s: %v", volume.Name, deviceRule)
|
|
+ vmiDeviceRules = append(vmiDeviceRules, deviceRule)
|
|
+ }
|
|
+ }
|
|
+ return vmiDeviceRules, nil
|
|
+}
|
|
+
|
|
+func newAllowedDeviceRule(devicePath *safepath.Path) (*devices.Rule, error) {
|
|
+ fileInfo, err := safepath.StatAtNoFollow(devicePath)
|
|
+ if err != nil {
|
|
+ return nil, err
|
|
+ }
|
|
+ if (fileInfo.Mode() & os.ModeDevice) == 0 {
|
|
+ return nil, nil //not a device file
|
|
+ }
|
|
+ deviceType := devices.BlockDevice
|
|
+ if (fileInfo.Mode() & os.ModeCharDevice) != 0 {
|
|
+ deviceType = devices.CharDevice
|
|
+ }
|
|
+ stat := fileInfo.Sys().(*syscall.Stat_t)
|
|
+ return &devices.Rule{
|
|
+ Type: deviceType,
|
|
+ Major: int64(unix.Major(stat.Rdev)),
|
|
+ Minor: int64(unix.Minor(stat.Rdev)),
|
|
+ Permissions: "rwm",
|
|
+ Allow: true,
|
|
+ }, nil
|
|
+}
|
|
+
|
|
func GenerateDefaultDeviceRules() []*devices.Rule {
|
|
if len(defaultDeviceRules) > 0 {
|
|
// To avoid re-computing default device rules
|
|
--
|
|
2.42.1
|
|
|
|
|
|
From 8d6155cd942a29ec7453065f93f913991f877895 Mon Sep 17 00:00:00 2001
|
|
From: Vasiliy Ulyanov <vulyanov@suse.de>
|
|
Date: Mon, 28 Nov 2022 09:24:12 +0100
|
|
Subject: [PATCH 2/4] Hotplug volume mounter: set cgroup manager as arg
|
|
|
|
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
|
|
Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
|
|
---
|
|
pkg/virt-handler/BUILD.bazel | 1 +
|
|
.../hotplug-disk/generated_mock_mount.go | 34 ++---
|
|
pkg/virt-handler/hotplug-disk/mount.go | 133 ++++++++----------
|
|
pkg/virt-handler/hotplug-disk/mount_test.go | 22 ++-
|
|
pkg/virt-handler/vm.go | 40 ++++--
|
|
pkg/virt-handler/vm_test.go | 81 ++++++-----
|
|
6 files changed, 153 insertions(+), 158 deletions(-)
|
|
|
|
diff --git a/pkg/virt-handler/BUILD.bazel b/pkg/virt-handler/BUILD.bazel
|
|
index a6ae28ebe..9c59fcf30 100644
|
|
--- a/pkg/virt-handler/BUILD.bazel
|
|
+++ b/pkg/virt-handler/BUILD.bazel
|
|
@@ -99,6 +99,7 @@ go_test(
|
|
"//pkg/virt-config:go_default_library",
|
|
"//pkg/virt-controller/services:go_default_library",
|
|
"//pkg/virt-handler/cache:go_default_library",
|
|
+ "//pkg/virt-handler/cgroup:go_default_library",
|
|
"//pkg/virt-handler/cmd-client:go_default_library",
|
|
"//pkg/virt-handler/container-disk:go_default_library",
|
|
"//pkg/virt-handler/hotplug-disk:go_default_library",
|
|
diff --git a/pkg/virt-handler/hotplug-disk/generated_mock_mount.go b/pkg/virt-handler/hotplug-disk/generated_mock_mount.go
|
|
index ef1432f2f..1ed10bc9a 100644
|
|
--- a/pkg/virt-handler/hotplug-disk/generated_mock_mount.go
|
|
+++ b/pkg/virt-handler/hotplug-disk/generated_mock_mount.go
|
|
@@ -7,6 +7,8 @@ import (
|
|
gomock "github.com/golang/mock/gomock"
|
|
types "k8s.io/apimachinery/pkg/types"
|
|
v1 "kubevirt.io/api/core/v1"
|
|
+
|
|
+ cgroup "kubevirt.io/kubevirt/pkg/virt-handler/cgroup"
|
|
)
|
|
|
|
// Mock of VolumeMounter interface
|
|
@@ -30,44 +32,44 @@ func (_m *MockVolumeMounter) EXPECT() *_MockVolumeMounterRecorder {
|
|
return _m.recorder
|
|
}
|
|
|
|
-func (_m *MockVolumeMounter) Mount(vmi *v1.VirtualMachineInstance) error {
|
|
- ret := _m.ctrl.Call(_m, "Mount", vmi)
|
|
+func (_m *MockVolumeMounter) Mount(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
|
|
+ ret := _m.ctrl.Call(_m, "Mount", vmi, cgroupManager)
|
|
ret0, _ := ret[0].(error)
|
|
return ret0
|
|
}
|
|
|
|
-func (_mr *_MockVolumeMounterRecorder) Mount(arg0 interface{}) *gomock.Call {
|
|
- return _mr.mock.ctrl.RecordCall(_mr.mock, "Mount", arg0)
|
|
+func (_mr *_MockVolumeMounterRecorder) Mount(arg0, arg1 interface{}) *gomock.Call {
|
|
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Mount", arg0, arg1)
|
|
}
|
|
|
|
-func (_m *MockVolumeMounter) MountFromPod(vmi *v1.VirtualMachineInstance, sourceUID types.UID) error {
|
|
- ret := _m.ctrl.Call(_m, "MountFromPod", vmi, sourceUID)
|
|
+func (_m *MockVolumeMounter) MountFromPod(vmi *v1.VirtualMachineInstance, sourceUID types.UID, cgroupManager cgroup.Manager) error {
|
|
+ ret := _m.ctrl.Call(_m, "MountFromPod", vmi, sourceUID, cgroupManager)
|
|
ret0, _ := ret[0].(error)
|
|
return ret0
|
|
}
|
|
|
|
-func (_mr *_MockVolumeMounterRecorder) MountFromPod(arg0, arg1 interface{}) *gomock.Call {
|
|
- return _mr.mock.ctrl.RecordCall(_mr.mock, "MountFromPod", arg0, arg1)
|
|
+func (_mr *_MockVolumeMounterRecorder) MountFromPod(arg0, arg1, arg2 interface{}) *gomock.Call {
|
|
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "MountFromPod", arg0, arg1, arg2)
|
|
}
|
|
|
|
-func (_m *MockVolumeMounter) Unmount(vmi *v1.VirtualMachineInstance) error {
|
|
- ret := _m.ctrl.Call(_m, "Unmount", vmi)
|
|
+func (_m *MockVolumeMounter) Unmount(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
|
|
+ ret := _m.ctrl.Call(_m, "Unmount", vmi, cgroupManager)
|
|
ret0, _ := ret[0].(error)
|
|
return ret0
|
|
}
|
|
|
|
-func (_mr *_MockVolumeMounterRecorder) Unmount(arg0 interface{}) *gomock.Call {
|
|
- return _mr.mock.ctrl.RecordCall(_mr.mock, "Unmount", arg0)
|
|
+func (_mr *_MockVolumeMounterRecorder) Unmount(arg0, arg1 interface{}) *gomock.Call {
|
|
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "Unmount", arg0, arg1)
|
|
}
|
|
|
|
-func (_m *MockVolumeMounter) UnmountAll(vmi *v1.VirtualMachineInstance) error {
|
|
- ret := _m.ctrl.Call(_m, "UnmountAll", vmi)
|
|
+func (_m *MockVolumeMounter) UnmountAll(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
|
|
+ ret := _m.ctrl.Call(_m, "UnmountAll", vmi, cgroupManager)
|
|
ret0, _ := ret[0].(error)
|
|
return ret0
|
|
}
|
|
|
|
-func (_mr *_MockVolumeMounterRecorder) UnmountAll(arg0 interface{}) *gomock.Call {
|
|
- return _mr.mock.ctrl.RecordCall(_mr.mock, "UnmountAll", arg0)
|
|
+func (_mr *_MockVolumeMounterRecorder) UnmountAll(arg0, arg1 interface{}) *gomock.Call {
|
|
+ return _mr.mock.ctrl.RecordCall(_mr.mock, "UnmountAll", arg0, arg1)
|
|
}
|
|
|
|
func (_m *MockVolumeMounter) IsMounted(vmi *v1.VirtualMachineInstance, volume string, sourceUID types.UID) (bool, error) {
|
|
diff --git a/pkg/virt-handler/hotplug-disk/mount.go b/pkg/virt-handler/hotplug-disk/mount.go
|
|
index 0dce62c32..65534de8e 100644
|
|
--- a/pkg/virt-handler/hotplug-disk/mount.go
|
|
+++ b/pkg/virt-handler/hotplug-disk/mount.go
|
|
@@ -34,8 +34,7 @@ import (
|
|
//go:generate mockgen -source $GOFILE -package=$GOPACKAGE -destination=generated_mock_$GOFILE
|
|
|
|
const (
|
|
- unableFindHotplugMountedDir = "unable to find hotplug mounted directories for vmi without uid"
|
|
- failedToCreateCgroupManagerErrTemplate = "could not create cgroup manager. err: %v"
|
|
+ unableFindHotplugMountedDir = "unable to find hotplug mounted directories for vmi without uid"
|
|
)
|
|
|
|
var (
|
|
@@ -118,10 +117,6 @@ var (
|
|
return isolation.NewSocketBasedIsolationDetector(path)
|
|
}
|
|
|
|
- getCgroupManager = func(vmi *v1.VirtualMachineInstance) (cgroup.Manager, error) {
|
|
- return cgroup.NewManagerFromVM(vmi)
|
|
- }
|
|
-
|
|
parentPathForMount = func(
|
|
parent isolation.IsolationResult,
|
|
child isolation.IsolationResult,
|
|
@@ -143,12 +138,12 @@ type volumeMounter struct {
|
|
// VolumeMounter is the interface used to mount and unmount volumes to/from a running virtlauncher pod.
|
|
type VolumeMounter interface {
|
|
// Mount any new volumes defined in the VMI
|
|
- Mount(vmi *v1.VirtualMachineInstance) error
|
|
- MountFromPod(vmi *v1.VirtualMachineInstance, sourceUID types.UID) error
|
|
+ Mount(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error
|
|
+ MountFromPod(vmi *v1.VirtualMachineInstance, sourceUID types.UID, cgroupManager cgroup.Manager) error
|
|
// Unmount any volumes no longer defined in the VMI
|
|
- Unmount(vmi *v1.VirtualMachineInstance) error
|
|
+ Unmount(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error
|
|
//UnmountAll cleans up all hotplug volumes
|
|
- UnmountAll(vmi *v1.VirtualMachineInstance) error
|
|
+ UnmountAll(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error
|
|
//IsMounted returns if the volume is mounted or not.
|
|
IsMounted(vmi *v1.VirtualMachineInstance, volume string, sourceUID types.UID) (bool, error)
|
|
}
|
|
@@ -302,13 +297,20 @@ func (m *volumeMounter) writePathToMountRecord(path string, vmi *v1.VirtualMachi
|
|
return nil
|
|
}
|
|
|
|
-func (m *volumeMounter) mountHotplugVolume(vmi *v1.VirtualMachineInstance, volumeName string, sourceUID types.UID, record *vmiMountTargetRecord, mountDirectory bool) error {
|
|
+func (m *volumeMounter) mountHotplugVolume(
|
|
+ vmi *v1.VirtualMachineInstance,
|
|
+ volumeName string,
|
|
+ sourceUID types.UID,
|
|
+ record *vmiMountTargetRecord,
|
|
+ mountDirectory bool,
|
|
+ cgroupManager cgroup.Manager,
|
|
+) error {
|
|
logger := log.DefaultLogger()
|
|
logger.V(4).Infof("Hotplug check volume name: %s", volumeName)
|
|
if sourceUID != types.UID("") {
|
|
if m.isBlockVolume(&vmi.Status, volumeName) {
|
|
logger.V(4).Infof("Mounting block volume: %s", volumeName)
|
|
- if err := m.mountBlockHotplugVolume(vmi, volumeName, sourceUID, record); err != nil {
|
|
+ if err := m.mountBlockHotplugVolume(vmi, volumeName, sourceUID, record, cgroupManager); err != nil {
|
|
if !errors.Is(err, os.ErrNotExist) {
|
|
return fmt.Errorf("failed to mount block hotplug volume %s: %v", volumeName, err)
|
|
}
|
|
@@ -325,15 +327,15 @@ func (m *volumeMounter) mountHotplugVolume(vmi *v1.VirtualMachineInstance, volum
|
|
return nil
|
|
}
|
|
|
|
-func (m *volumeMounter) Mount(vmi *v1.VirtualMachineInstance) error {
|
|
- return m.mountFromPod(vmi, types.UID(""))
|
|
+func (m *volumeMounter) Mount(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
|
|
+ return m.mountFromPod(vmi, types.UID(""), cgroupManager)
|
|
}
|
|
|
|
-func (m *volumeMounter) MountFromPod(vmi *v1.VirtualMachineInstance, sourceUID types.UID) error {
|
|
- return m.mountFromPod(vmi, sourceUID)
|
|
+func (m *volumeMounter) MountFromPod(vmi *v1.VirtualMachineInstance, sourceUID types.UID, cgroupManager cgroup.Manager) error {
|
|
+ return m.mountFromPod(vmi, sourceUID, cgroupManager)
|
|
}
|
|
|
|
-func (m *volumeMounter) mountFromPod(vmi *v1.VirtualMachineInstance, sourceUID types.UID) error {
|
|
+func (m *volumeMounter) mountFromPod(vmi *v1.VirtualMachineInstance, sourceUID types.UID, cgroupManager cgroup.Manager) error {
|
|
record, err := m.getMountTargetRecord(vmi)
|
|
if err != nil {
|
|
return err
|
|
@@ -350,7 +352,7 @@ func (m *volumeMounter) mountFromPod(vmi *v1.VirtualMachineInstance, sourceUID t
|
|
if sourceUID == types.UID("") {
|
|
sourceUID = volumeStatus.HotplugVolume.AttachPodUID
|
|
}
|
|
- if err := m.mountHotplugVolume(vmi, volumeStatus.Name, sourceUID, record, mountDirectory); err != nil {
|
|
+ if err := m.mountHotplugVolume(vmi, volumeStatus.Name, sourceUID, record, mountDirectory, cgroupManager); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
@@ -378,7 +380,13 @@ func (m *volumeMounter) isBlockVolume(vmiStatus *v1.VirtualMachineInstanceStatus
|
|
return false
|
|
}
|
|
|
|
-func (m *volumeMounter) mountBlockHotplugVolume(vmi *v1.VirtualMachineInstance, volume string, sourceUID types.UID, record *vmiMountTargetRecord) error {
|
|
+func (m *volumeMounter) mountBlockHotplugVolume(
|
|
+ vmi *v1.VirtualMachineInstance,
|
|
+ volume string,
|
|
+ sourceUID types.UID,
|
|
+ record *vmiMountTargetRecord,
|
|
+ cgroupManager cgroup.Manager,
|
|
+) error {
|
|
virtlauncherUID := m.findVirtlauncherUID(vmi)
|
|
if virtlauncherUID == "" {
|
|
// This is not the node the pod is running on.
|
|
@@ -389,11 +397,6 @@ func (m *volumeMounter) mountBlockHotplugVolume(vmi *v1.VirtualMachineInstance,
|
|
return err
|
|
}
|
|
|
|
- cgroupsManager, err := getCgroupManager(vmi)
|
|
- if err != nil {
|
|
- return fmt.Errorf(failedToCreateCgroupManagerErrTemplate, err)
|
|
- }
|
|
-
|
|
if _, err := safepath.JoinNoFollow(targetPath, volume); errors.Is(err, os.ErrNotExist) {
|
|
dev, permissions, err := m.getSourceMajorMinor(sourceUID, volume)
|
|
if err != nil {
|
|
@@ -422,36 +425,18 @@ func (m *volumeMounter) mountBlockHotplugVolume(vmi *v1.VirtualMachineInstance,
|
|
return fmt.Errorf("target device %v exists but it is not a block device", devicePath)
|
|
}
|
|
|
|
- isMigrationInProgress := vmi.Status.MigrationState != nil && !vmi.Status.MigrationState.Completed
|
|
- volumeNotReady := !m.volumeStatusReady(volume, vmi)
|
|
-
|
|
- if isMigrationInProgress || volumeNotReady {
|
|
- dev, _, err := m.getSourceMajorMinor(sourceUID, volume)
|
|
- if err != nil {
|
|
- return err
|
|
- }
|
|
- // allow block devices
|
|
- if err := m.allowBlockMajorMinor(dev, cgroupsManager); err != nil {
|
|
- return err
|
|
- }
|
|
+ dev, _, err := m.getBlockFileMajorMinor(devicePath, statDevice)
|
|
+ if err != nil {
|
|
+ return err
|
|
+ }
|
|
+ // allow block devices
|
|
+ if err := m.allowBlockMajorMinor(dev, cgroupManager); err != nil {
|
|
+ return err
|
|
}
|
|
|
|
return m.ownershipManager.SetFileOwnership(devicePath)
|
|
}
|
|
|
|
-func (m *volumeMounter) volumeStatusReady(volumeName string, vmi *v1.VirtualMachineInstance) bool {
|
|
- for _, volumeStatus := range vmi.Status.VolumeStatus {
|
|
- if volumeStatus.Name == volumeName && volumeStatus.HotplugVolume != nil {
|
|
- if volumeStatus.Phase != v1.VolumeReady {
|
|
- log.DefaultLogger().V(4).Infof("Volume %s is not ready, but the target block device exists", volumeName)
|
|
- }
|
|
- return volumeStatus.Phase == v1.VolumeReady
|
|
- }
|
|
- }
|
|
- // This should never happen, it should always find the volume status in the VMI.
|
|
- return true
|
|
-}
|
|
-
|
|
func (m *volumeMounter) getSourceMajorMinor(sourceUID types.UID, volumeName string) (uint64, os.FileMode, error) {
|
|
basePath, err := deviceBasePath(sourceUID)
|
|
if err != nil {
|
|
@@ -473,17 +458,15 @@ func (m *volumeMounter) getBlockFileMajorMinor(devicePath *safepath.Path, getter
|
|
return info.Rdev, fileInfo.Mode(), nil
|
|
}
|
|
|
|
-func (m *volumeMounter) removeBlockMajorMinor(dev uint64, manager cgroup.Manager) error {
|
|
- return m.updateBlockMajorMinor(dev, false, manager)
|
|
+func (m *volumeMounter) removeBlockMajorMinor(dev uint64, cgroupManager cgroup.Manager) error {
|
|
+ return m.updateBlockMajorMinor(dev, false, cgroupManager)
|
|
}
|
|
|
|
-func (m *volumeMounter) allowBlockMajorMinor(dev uint64, manager cgroup.Manager) error {
|
|
- return m.updateBlockMajorMinor(dev, true, manager)
|
|
+func (m *volumeMounter) allowBlockMajorMinor(dev uint64, cgroupManager cgroup.Manager) error {
|
|
+ return m.updateBlockMajorMinor(dev, true, cgroupManager)
|
|
}
|
|
|
|
-func (m *volumeMounter) updateBlockMajorMinor(dev uint64, allow bool, manager cgroup.Manager) error {
|
|
-
|
|
- var err error
|
|
+func (m *volumeMounter) updateBlockMajorMinor(dev uint64, allow bool, cgroupManager cgroup.Manager) error {
|
|
deviceRule := &devices.Rule{
|
|
Type: devices.BlockDevice,
|
|
Major: int64(unix.Major(dev)),
|
|
@@ -492,14 +475,18 @@ func (m *volumeMounter) updateBlockMajorMinor(dev uint64, allow bool, manager cg
|
|
Allow: allow,
|
|
}
|
|
|
|
- err = manager.Set(&configs.Resources{
|
|
+ if cgroupManager == nil {
|
|
+ return fmt.Errorf("failed to apply device rule %+v: cgroup manager is nil", *deviceRule)
|
|
+ }
|
|
+
|
|
+ err := cgroupManager.Set(&configs.Resources{
|
|
Devices: []*devices.Rule{deviceRule},
|
|
})
|
|
|
|
if err != nil {
|
|
- log.Log.Infof("cgroup %s had failed to set device rule. error: %v. rule: %+v", manager.GetCgroupVersion(), err, *deviceRule)
|
|
+ log.Log.Errorf("cgroup %s had failed to set device rule. error: %v. rule: %+v", cgroupManager.GetCgroupVersion(), err, *deviceRule)
|
|
} else {
|
|
- log.Log.Infof("cgroup %s device rule is set successfully. rule: %+v", manager.GetCgroupVersion(), *deviceRule)
|
|
+ log.Log.Infof("cgroup %s device rule is set successfully. rule: %+v", cgroupManager.GetCgroupVersion(), *deviceRule)
|
|
}
|
|
|
|
return err
|
|
@@ -632,7 +619,7 @@ func (m *volumeMounter) getSourcePodFilePath(sourceUID types.UID, vmi *v1.Virtua
|
|
}
|
|
|
|
// Unmount unmounts all hotplug disk that are no longer part of the VMI
|
|
-func (m *volumeMounter) Unmount(vmi *v1.VirtualMachineInstance) error {
|
|
+func (m *volumeMounter) Unmount(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
|
|
if vmi.UID != "" {
|
|
record, err := m.getMountTargetRecord(vmi)
|
|
if err != nil {
|
|
@@ -704,7 +691,7 @@ func (m *volumeMounter) Unmount(vmi *v1.VirtualMachineInstance) error {
|
|
if blockDevice, err := isBlockDevice(diskPath); err != nil {
|
|
return err
|
|
} else if blockDevice {
|
|
- if err := m.unmountBlockHotplugVolumes(diskPath, vmi); err != nil {
|
|
+ if err := m.unmountBlockHotplugVolumes(diskPath, cgroupManager); err != nil {
|
|
return err
|
|
}
|
|
} else if err := m.unmountFileSystemHotplugVolumes(diskPath); err != nil {
|
|
@@ -745,12 +732,7 @@ func (m *volumeMounter) unmountFileSystemHotplugVolumes(diskPath *safepath.Path)
|
|
return nil
|
|
}
|
|
|
|
-func (m *volumeMounter) unmountBlockHotplugVolumes(diskPath *safepath.Path, vmi *v1.VirtualMachineInstance) error {
|
|
- cgroupsManager, err := getCgroupManager(vmi)
|
|
- if err != nil {
|
|
- return fmt.Errorf(failedToCreateCgroupManagerErrTemplate, err)
|
|
- }
|
|
-
|
|
+func (m *volumeMounter) unmountBlockHotplugVolumes(diskPath *safepath.Path, cgroupManager cgroup.Manager) error {
|
|
// Get major and minor so we can deny the container.
|
|
dev, _, err := m.getBlockFileMajorMinor(diskPath, statDevice)
|
|
if err != nil {
|
|
@@ -760,14 +742,11 @@ func (m *volumeMounter) unmountBlockHotplugVolumes(diskPath *safepath.Path, vmi
|
|
if err := safepath.UnlinkAtNoFollow(diskPath); err != nil {
|
|
return err
|
|
}
|
|
- if err := m.removeBlockMajorMinor(dev, cgroupsManager); err != nil {
|
|
- return err
|
|
- }
|
|
- return nil
|
|
+ return m.removeBlockMajorMinor(dev, cgroupManager)
|
|
}
|
|
|
|
// UnmountAll unmounts all hotplug disks of a given VMI.
|
|
-func (m *volumeMounter) UnmountAll(vmi *v1.VirtualMachineInstance) error {
|
|
+func (m *volumeMounter) UnmountAll(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
|
|
if vmi.UID != "" {
|
|
logger := log.DefaultLogger().Object(vmi)
|
|
logger.Info("Cleaning up remaining hotplug volumes")
|
|
@@ -787,20 +766,20 @@ func (m *volumeMounter) UnmountAll(vmi *v1.VirtualMachineInstance) error {
|
|
logger.Infof("Device %v is not mounted anymore, continuing.", entry.TargetFile)
|
|
continue
|
|
}
|
|
- logger.Infof("Unable to unmount volume at path %s: %v", entry.TargetFile, err)
|
|
+ logger.Warningf("Unable to unmount volume at path %s: %v", entry.TargetFile, err)
|
|
continue
|
|
}
|
|
diskPath.Close()
|
|
if isBlock, err := isBlockDevice(diskPath.Path()); err != nil {
|
|
- logger.Infof("Unable to remove block device at path %s: %v", diskPath, err)
|
|
+ logger.Warningf("Unable to unmount volume at path %s: %v", diskPath, err)
|
|
} else if isBlock {
|
|
- if err := m.unmountBlockHotplugVolumes(diskPath.Path(), vmi); err != nil {
|
|
- logger.Infof("Unable to remove block device at path %s: %v", diskPath, err)
|
|
+ if err := m.unmountBlockHotplugVolumes(diskPath.Path(), cgroupManager); err != nil {
|
|
+ logger.Warningf("Unable to remove block device at path %s: %v", diskPath, err)
|
|
// Don't return error, try next.
|
|
}
|
|
} else {
|
|
if err := m.unmountFileSystemHotplugVolumes(diskPath.Path()); err != nil {
|
|
- logger.Infof("Unable to unmount volume at path %s: %v", diskPath, err)
|
|
+ logger.Warningf("Unable to unmount volume at path %s: %v", diskPath, err)
|
|
// Don't return error, try next.
|
|
}
|
|
}
|
|
diff --git a/pkg/virt-handler/hotplug-disk/mount_test.go b/pkg/virt-handler/hotplug-disk/mount_test.go
|
|
index 9737eaa09..b1a180656 100644
|
|
--- a/pkg/virt-handler/hotplug-disk/mount_test.go
|
|
+++ b/pkg/virt-handler/hotplug-disk/mount_test.go
|
|
@@ -106,10 +106,6 @@ var _ = Describe("HotplugVolume", func() {
|
|
rule1.Allow == rule2.Allow
|
|
}
|
|
|
|
- getCgroupManager = func(_ *v1.VirtualMachineInstance) (cgroup.Manager, error) {
|
|
- return cgroupManagerMock, nil
|
|
- }
|
|
-
|
|
cgroupMockSet := func(r *runc_configs.Resources) {
|
|
if expectedCgroupRule == nil {
|
|
return
|
|
@@ -372,12 +368,12 @@ var _ = Describe("HotplugVolume", func() {
|
|
By("Mounting and validating expected rule is set")
|
|
setExpectedCgroupRuns(2)
|
|
expectCgroupRule(devices.BlockDevice, 482, 64, true)
|
|
- err = m.mountBlockHotplugVolume(vmi, "testvolume", blockSourcePodUID, record)
|
|
+ err = m.mountBlockHotplugVolume(vmi, "testvolume", blockSourcePodUID, record, cgroupManagerMock)
|
|
Expect(err).ToNot(HaveOccurred())
|
|
|
|
By("Unmounting, we verify the reverse process happens")
|
|
expectCgroupRule(devices.BlockDevice, 482, 64, false)
|
|
- err = m.unmountBlockHotplugVolumes(deviceFile, vmi)
|
|
+ err = m.unmountBlockHotplugVolumes(deviceFile, cgroupManagerMock)
|
|
Expect(err).ToNot(HaveOccurred())
|
|
})
|
|
|
|
@@ -453,7 +449,7 @@ var _ = Describe("HotplugVolume", func() {
|
|
By("Mounting and validating expected rule is set")
|
|
setExpectedCgroupRuns(1)
|
|
expectCgroupRule(devices.BlockDevice, 482, 64, false)
|
|
- err = m.unmountBlockHotplugVolumes(deviceFileName, vmi)
|
|
+ err = m.unmountBlockHotplugVolumes(deviceFileName, cgroupManagerMock)
|
|
Expect(err).ToNot(HaveOccurred())
|
|
})
|
|
|
|
@@ -464,7 +460,7 @@ var _ = Describe("HotplugVolume", func() {
|
|
deviceFileName, err := newFile(tempDir, "devicefile")
|
|
Expect(err).ToNot(HaveOccurred())
|
|
os.Remove(unsafepath.UnsafeAbsolute(deviceFileName.Raw()))
|
|
- err = m.unmountBlockHotplugVolumes(deviceFileName, vmi)
|
|
+ err = m.unmountBlockHotplugVolumes(deviceFileName, cgroupManagerMock)
|
|
Expect(err).To(HaveOccurred())
|
|
Expect(err.Error()).To(ContainSubstring("no such file or directory"))
|
|
})
|
|
@@ -837,7 +833,7 @@ var _ = Describe("HotplugVolume", func() {
|
|
return nil
|
|
})
|
|
|
|
- err = m.Mount(vmi)
|
|
+ err = m.Mount(vmi, cgroupManagerMock)
|
|
Expect(err).ToNot(HaveOccurred())
|
|
By("Verifying there are 2 records in tempDir/1234")
|
|
record := &vmiMountTargetRecord{
|
|
@@ -867,7 +863,7 @@ var _ = Describe("HotplugVolume", func() {
|
|
Name: "permanent",
|
|
})
|
|
vmi.Status.VolumeStatus = volumeStatuses
|
|
- err = m.Unmount(vmi)
|
|
+ err = m.Unmount(vmi, cgroupManagerMock)
|
|
Expect(err).ToNot(HaveOccurred())
|
|
_, err = os.ReadFile(filepath.Join(tempDir, string(vmi.UID)))
|
|
Expect(err).To(HaveOccurred(), "record file still exists %s", filepath.Join(tempDir, string(vmi.UID)))
|
|
@@ -883,7 +879,7 @@ var _ = Describe("HotplugVolume", func() {
|
|
Name: "permanent",
|
|
})
|
|
vmi.Status.VolumeStatus = volumeStatuses
|
|
- Expect(m.Mount(vmi)).To(Succeed())
|
|
+ Expect(m.Mount(vmi, cgroupManagerMock)).To(Succeed())
|
|
})
|
|
|
|
It("unmountAll should cleanup regardless of vmi volumestatuses", func() {
|
|
@@ -961,7 +957,7 @@ var _ = Describe("HotplugVolume", func() {
|
|
return nil
|
|
})
|
|
|
|
- err = m.Mount(vmi)
|
|
+ err = m.Mount(vmi, cgroupManagerMock)
|
|
Expect(err).ToNot(HaveOccurred())
|
|
|
|
By("Verifying there are 2 records in tempDir/1234")
|
|
@@ -985,7 +981,7 @@ var _ = Describe("HotplugVolume", func() {
|
|
Expect(err).ToNot(HaveOccurred())
|
|
Expect(capturedPaths).To(ContainElements(expectedPaths))
|
|
|
|
- err = m.UnmountAll(vmi)
|
|
+ err = m.UnmountAll(vmi, cgroupManagerMock)
|
|
Expect(err).ToNot(HaveOccurred())
|
|
_, err = os.ReadFile(filepath.Join(tempDir, string(vmi.UID)))
|
|
Expect(err).To(HaveOccurred(), "record file still exists %s", filepath.Join(tempDir, string(vmi.UID)))
|
|
diff --git a/pkg/virt-handler/vm.go b/pkg/virt-handler/vm.go
|
|
index 7b4e50def..38c1762c8 100644
|
|
--- a/pkg/virt-handler/vm.go
|
|
+++ b/pkg/virt-handler/vm.go
|
|
@@ -181,6 +181,10 @@ var PasswordRelatedGuestAgentCommands = []string{
|
|
"guest-set-user-password",
|
|
}
|
|
|
|
+var getCgroupManager = func(vmi *v1.VirtualMachineInstance) (cgroup.Manager, error) {
|
|
+ return cgroup.NewManagerFromVM(vmi)
|
|
+}
|
|
+
|
|
func NewController(
|
|
recorder record.EventRecorder,
|
|
clientset kubecli.KubevirtClient,
|
|
@@ -2072,7 +2076,11 @@ func (d *VirtualMachineController) processVmCleanup(vmi *v1.VirtualMachineInstan
|
|
if err := d.containerDiskMounter.Unmount(vmi); err != nil {
|
|
return err
|
|
}
|
|
- if err := d.hotplugVolumeMounter.UnmountAll(vmi); err != nil {
|
|
+
|
|
+ // UnmountAll does the cleanup on the "best effort" basis: it is
|
|
+ // safe to pass a nil cgroupManager.
|
|
+ cgroupManager, _ := getCgroupManager(vmi)
|
|
+ if err := d.hotplugVolumeMounter.UnmountAll(vmi, cgroupManager); err != nil {
|
|
return err
|
|
}
|
|
|
|
@@ -2689,7 +2697,11 @@ func (d *VirtualMachineController) vmUpdateHelperMigrationTarget(origVMI *v1.Vir
|
|
|
|
// Mount hotplug disks
|
|
if attachmentPodUID := vmi.Status.MigrationState.TargetAttachmentPodUID; attachmentPodUID != types.UID("") {
|
|
- if err := d.hotplugVolumeMounter.MountFromPod(vmi, attachmentPodUID); err != nil {
|
|
+ cgroupManager, err := getCgroupManager(vmi)
|
|
+ if err != nil {
|
|
+ return err
|
|
+ }
|
|
+ if err := d.hotplugVolumeMounter.MountFromPod(vmi, attachmentPodUID, cgroupManager); err != nil {
|
|
return fmt.Errorf("failed to mount hotplug volumes: %v", err)
|
|
}
|
|
}
|
|
@@ -2799,14 +2811,8 @@ func (d *VirtualMachineController) affinePitThread(vmi *v1.VirtualMachineInstanc
|
|
return unix.SchedSetaffinity(pitpid, &Mask)
|
|
}
|
|
|
|
-func (d *VirtualMachineController) configureHousekeepingCgroup(vmi *v1.VirtualMachineInstance) error {
|
|
- cgroupManager, err := cgroup.NewManagerFromVM(vmi)
|
|
- if err != nil {
|
|
- return err
|
|
- }
|
|
-
|
|
- err = cgroupManager.CreateChildCgroup("housekeeping", "cpuset")
|
|
- if err != nil {
|
|
+func (d *VirtualMachineController) configureHousekeepingCgroup(vmi *v1.VirtualMachineInstance, cgroupManager cgroup.Manager) error {
|
|
+ if err := cgroupManager.CreateChildCgroup("housekeeping", "cpuset"); err != nil {
|
|
log.Log.Reason(err).Error("CreateChildCgroup ")
|
|
return err
|
|
}
|
|
@@ -2888,6 +2894,10 @@ func (d *VirtualMachineController) vmUpdateHelperDefault(origVMI *v1.VirtualMach
|
|
return err
|
|
}
|
|
|
|
+ cgroupManager, err := getCgroupManager(vmi)
|
|
+ if err != nil {
|
|
+ return err
|
|
+ }
|
|
var errorTolerantFeaturesError []error
|
|
disksInfo := map[string]*containerdisk.DiskInfo{}
|
|
if !vmi.IsRunning() && !vmi.IsFinal() {
|
|
@@ -2907,7 +2917,7 @@ func (d *VirtualMachineController) vmUpdateHelperDefault(origVMI *v1.VirtualMach
|
|
}
|
|
|
|
// Try to mount hotplug volume if there is any during startup.
|
|
- if err := d.hotplugVolumeMounter.Mount(vmi); err != nil {
|
|
+ if err := d.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
|
|
return err
|
|
}
|
|
|
|
@@ -2992,7 +3002,7 @@ func (d *VirtualMachineController) vmUpdateHelperDefault(origVMI *v1.VirtualMach
|
|
log.Log.Object(vmi).Error(err.Error())
|
|
}
|
|
|
|
- if err := d.hotplugVolumeMounter.Mount(vmi); err != nil {
|
|
+ if err := d.hotplugVolumeMounter.Mount(vmi, cgroupManager); err != nil {
|
|
return err
|
|
}
|
|
|
|
@@ -3045,7 +3055,7 @@ func (d *VirtualMachineController) vmUpdateHelperDefault(origVMI *v1.VirtualMach
|
|
}
|
|
|
|
if vmi.IsCPUDedicated() && vmi.Spec.Domain.CPU.IsolateEmulatorThread {
|
|
- err = d.configureHousekeepingCgroup(vmi)
|
|
+ err = d.configureHousekeepingCgroup(vmi, cgroupManager)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
@@ -3068,7 +3078,7 @@ func (d *VirtualMachineController) vmUpdateHelperDefault(origVMI *v1.VirtualMach
|
|
|
|
if vmi.IsRunning() {
|
|
// Umount any disks no longer mounted
|
|
- if err := d.hotplugVolumeMounter.Unmount(vmi); err != nil {
|
|
+ if err := d.hotplugVolumeMounter.Unmount(vmi, cgroupManager); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
@@ -3373,7 +3383,7 @@ func (d *VirtualMachineController) claimDeviceOwnership(virtLauncherRootMount *s
|
|
}
|
|
|
|
func (d *VirtualMachineController) reportDedicatedCPUSetForMigratingVMI(vmi *v1.VirtualMachineInstance) error {
|
|
- cgroupManager, err := cgroup.NewManagerFromVM(vmi)
|
|
+ cgroupManager, err := getCgroupManager(vmi)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
diff --git a/pkg/virt-handler/vm_test.go b/pkg/virt-handler/vm_test.go
|
|
index 540e88392..273d3f30c 100644
|
|
--- a/pkg/virt-handler/vm_test.go
|
|
+++ b/pkg/virt-handler/vm_test.go
|
|
@@ -47,6 +47,7 @@ import (
|
|
netcache "kubevirt.io/kubevirt/pkg/network/cache"
|
|
neterrors "kubevirt.io/kubevirt/pkg/network/errors"
|
|
"kubevirt.io/kubevirt/pkg/util"
|
|
+ "kubevirt.io/kubevirt/pkg/virt-handler/cgroup"
|
|
container_disk "kubevirt.io/kubevirt/pkg/virt-handler/container-disk"
|
|
hotplug_volume "kubevirt.io/kubevirt/pkg/virt-handler/hotplug-disk"
|
|
|
|
@@ -105,6 +106,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
var mockIsolationResult *isolation.MockIsolationResult
|
|
var mockContainerDiskMounter *container_disk.MockMounter
|
|
var mockHotplugVolumeMounter *hotplug_volume.MockVolumeMounter
|
|
+ var mockCgroupManager *cgroup.MockManager
|
|
|
|
var vmiFeeder *testutils.VirtualMachineFeeder
|
|
var domainFeeder *testutils.DomainFeeder
|
|
@@ -128,6 +130,10 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
|
|
var certDir string
|
|
|
|
+ getCgroupManager = func(_ *v1.VirtualMachineInstance) (cgroup.Manager, error) {
|
|
+ return mockCgroupManager, nil
|
|
+ }
|
|
+
|
|
BeforeEach(func() {
|
|
diskutils.MockDefaultOwnershipManager()
|
|
|
|
@@ -205,6 +211,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
|
|
mockContainerDiskMounter = container_disk.NewMockMounter(ctrl)
|
|
mockHotplugVolumeMounter = hotplug_volume.NewMockVolumeMounter(ctrl)
|
|
+ mockCgroupManager = cgroup.NewMockManager(ctrl)
|
|
|
|
migrationProxy := migrationproxy.NewMigrationProxyManager(tlsConfig, tlsConfig, config)
|
|
fakeDownwardMetricsManager := newFakeManager()
|
|
@@ -381,7 +388,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
Expect(exists).To(BeTrue())
|
|
|
|
mockQueue.Add(namespace + "/" + name)
|
|
- mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any(), mockCgroupManager).Return(nil)
|
|
client.EXPECT().Close()
|
|
controller.Execute()
|
|
|
|
@@ -556,7 +563,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
|
|
vmiFeeder.Add(vmi)
|
|
domainFeeder.Add(domain)
|
|
- mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
controller.Execute()
|
|
Expect(mockQueue.Len()).To(Equal(0))
|
|
@@ -573,7 +580,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
mockWatchdog.CreateFile(vmi)
|
|
|
|
vmiFeeder.Add(vmi)
|
|
- mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any(), mockCgroupManager).Return(nil)
|
|
client.EXPECT().Close()
|
|
controller.Execute()
|
|
Expect(mockQueue.Len()).To(Equal(0))
|
|
@@ -601,7 +608,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
vmiFeeder.Add(vmi)
|
|
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
controller.Execute()
|
|
testutils.ExpectEvent(recorder, VMIDefined)
|
|
@@ -677,7 +684,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
Expect(options.VirtualMachineSMBios.Product).To(Equal(virtconfig.SmbiosConfigDefaultProduct))
|
|
Expect(options.VirtualMachineSMBios.Manufacturer).To(Equal(virtconfig.SmbiosConfigDefaultManufacturer))
|
|
})
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
controller.Execute()
|
|
testutils.ExpectEvent(recorder, VMIDefined)
|
|
})
|
|
@@ -718,8 +725,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
Expect(vmi.Status.Machine).To(Equal(updatedVMI.Status.Machine))
|
|
return vmi, nil
|
|
})
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
controller.Execute()
|
|
})
|
|
@@ -822,8 +829,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
})
|
|
vmiInterface.EXPECT().Update(context.Background(), NewVMICondMatcher(*updatedVMI))
|
|
client.EXPECT().GetGuestInfo().Return(&v1.VirtualMachineInstanceGuestAgentInfo{}, nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
controller.Execute()
|
|
})
|
|
@@ -875,8 +882,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
Expect(options.VirtualMachineSMBios.Manufacturer).To(Equal(virtconfig.SmbiosConfigDefaultManufacturer))
|
|
})
|
|
client.EXPECT().GetGuestInfo().Return(&v1.VirtualMachineInstanceGuestAgentInfo{}, nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
controller.Execute()
|
|
})
|
|
@@ -930,8 +937,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
Expect(options.VirtualMachineSMBios.Manufacturer).To(Equal(virtconfig.SmbiosConfigDefaultManufacturer))
|
|
})
|
|
vmiInterface.EXPECT().Update(context.Background(), NewVMICondMatcher(*updatedVMI))
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
controller.Execute()
|
|
})
|
|
@@ -970,8 +977,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
vmiInterface.EXPECT().Update(context.Background(), NewVMICondMatcher(*updatedVMI))
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
controller.Execute()
|
|
|
|
@@ -1009,8 +1016,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
domainFeeder.Add(domain)
|
|
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
controller.Execute()
|
|
// should not make another event entry unless something changes
|
|
@@ -1063,8 +1070,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
vmiInterface.EXPECT().Update(context.Background(), NewVMICondMatcher(*vmiCopy))
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
controller.Execute()
|
|
expectEvent(string(v1.AccessCredentialsSyncFailed), true)
|
|
@@ -1101,8 +1108,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
domainFeeder.Add(domain)
|
|
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
vmiInterface.EXPECT().Update(context.Background(), NewVMICondMatcher(*updatedVMI))
|
|
|
|
controller.Execute()
|
|
@@ -1123,8 +1130,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
domainFeeder.Add(domain)
|
|
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
vmiInterface.EXPECT().Update(context.Background(), NewVMICondMatcher(*updatedVMI))
|
|
|
|
controller.Execute()
|
|
@@ -1190,7 +1197,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
vmiInterface.EXPECT().Update(context.Background(), gomock.Any()).Do(func(ctx context.Context, vmi *v1.VirtualMachineInstance) {
|
|
Expect(vmi.Status.Phase).To(Equal(v1.Failed))
|
|
})
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
controller.Execute()
|
|
testutils.ExpectEvent(recorder, "failed to configure vmi network:")
|
|
testutils.ExpectEvent(recorder, VMICrashed)
|
|
@@ -1226,7 +1233,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
Expect(options.VirtualMachineSMBios.Product).To(Equal(virtconfig.SmbiosConfigDefaultProduct))
|
|
Expect(options.VirtualMachineSMBios.Manufacturer).To(Equal(virtconfig.SmbiosConfigDefaultManufacturer))
|
|
})
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
vmiInterface.EXPECT().Update(context.Background(), updatedVMI)
|
|
|
|
controller.Execute()
|
|
@@ -1300,7 +1307,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
vmi.Status.Phase = v1.Scheduled
|
|
vmiFeeder.Add(vmi)
|
|
vmiInterface.EXPECT().Update(context.Background(), gomock.Any())
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
|
|
controller.Execute()
|
|
@@ -1316,8 +1323,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
vmiFeeder.Add(vmi)
|
|
domainFeeder.Add(domain)
|
|
vmiInterface.EXPECT().Update(context.Background(), gomock.Any())
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
|
|
controller.Execute()
|
|
@@ -1332,7 +1339,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
vmiFeeder.Add(vmi)
|
|
domainFeeder.Add(domain)
|
|
vmiInterface.EXPECT().Update(context.Background(), gomock.Any())
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(fmt.Errorf("Error"))
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(fmt.Errorf("Error"))
|
|
|
|
controller.Execute()
|
|
testutils.ExpectEvent(recorder, v1.SyncFailed.String())
|
|
@@ -1346,7 +1353,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
domain.Status.Status = api.Running
|
|
vmiFeeder.Add(vmi)
|
|
domainFeeder.Add(domain)
|
|
- mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any(), mockCgroupManager).Return(nil)
|
|
client.EXPECT().Close()
|
|
controller.processVmCleanup(vmi)
|
|
})
|
|
@@ -1672,7 +1679,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
vmi := api2.NewMinimalVMI("testvmi")
|
|
vmi.Status.Phase = phase
|
|
vmiFeeder.Add(vmi)
|
|
- mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any(), mockCgroupManager).Return(nil)
|
|
controller.Execute()
|
|
// expect no errors and no mock interactions
|
|
Expect(mockQueue.NumRequeues("default/testvmi")).To(Equal(0))
|
|
@@ -1804,7 +1811,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
updatedVmi := vmi.DeepCopy()
|
|
updatedVmi.Status.MigrationState.TargetNodeAddress = controller.migrationIpAddress
|
|
updatedVmi.Status.MigrationState.TargetDirectMigrationNodePorts = destSrcPorts
|
|
- mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
client.EXPECT().Close()
|
|
controller.Execute()
|
|
@@ -1845,7 +1852,7 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
Expect(err).NotTo(HaveOccurred())
|
|
defer socket.Close()
|
|
|
|
- mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().UnmountAll(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
client.EXPECT().Ping().Return(fmt.Errorf("disconnected"))
|
|
client.EXPECT().Close()
|
|
@@ -3055,8 +3062,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
domainFeeder.Add(domain)
|
|
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
|
|
expectedMemory := resource.MustParse("512Ki")
|
|
vmiInterface.EXPECT().Update(context.Background(), gomock.Any()).Do(func(ctx context.Context, arg interface{}) {
|
|
@@ -3140,8 +3147,8 @@ var _ = Describe("VirtualMachineInstance", func() {
|
|
domainFeeder.Add(domain)
|
|
|
|
client.EXPECT().SyncVirtualMachine(vmi, gomock.Any())
|
|
- mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
|
|
- mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any()).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Unmount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
+ mockHotplugVolumeMounter.EXPECT().Mount(gomock.Any(), mockCgroupManager).Return(nil)
|
|
vmiInterface.EXPECT().Update(context.Background(), gomock.Any()).Do(func(ctx context.Context, arg interface{}) {
|
|
Expect(arg.(*v1.VirtualMachineInstance).Status.VolumeStatus).To(HaveLen(2))
|
|
for _, status := range arg.(*v1.VirtualMachineInstance).Status.VolumeStatus {
|
|
--
|
|
2.42.1
|
|
|
|
|
|
From 9f078facf0d0d03f5438dae85fa243c891efb0b1 Mon Sep 17 00:00:00 2001
|
|
From: Vasiliy Ulyanov <vulyanov@suse.de>
|
|
Date: Mon, 28 Nov 2022 10:39:57 +0100
|
|
Subject: [PATCH 3/4] cgroup v2: Drop global rulesPerPid map
|
|
|
|
Keep the existing device rules within the manager state.
|
|
|
|
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
|
|
Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
|
|
---
|
|
pkg/virt-handler/cgroup/cgroup_v2_manager.go | 21 ++++++++++----------
|
|
1 file changed, 10 insertions(+), 11 deletions(-)
|
|
|
|
diff --git a/pkg/virt-handler/cgroup/cgroup_v2_manager.go b/pkg/virt-handler/cgroup/cgroup_v2_manager.go
|
|
index 989c004e8..fb874c654 100644
|
|
--- a/pkg/virt-handler/cgroup/cgroup_v2_manager.go
|
|
+++ b/pkg/virt-handler/cgroup/cgroup_v2_manager.go
|
|
@@ -16,8 +16,6 @@ import (
|
|
"kubevirt.io/kubevirt/pkg/util"
|
|
)
|
|
|
|
-var rulesPerPid = make(map[string][]*devices.Rule)
|
|
-
|
|
type v2Manager struct {
|
|
runc_cgroups.Manager
|
|
dirPath string
|
|
@@ -45,7 +43,7 @@ func newCustomizedV2Manager(
|
|
runcManager,
|
|
runcManager.GetPaths()[""],
|
|
isRootless,
|
|
- deviceRules,
|
|
+ append(deviceRules, GenerateDefaultDeviceRules()...),
|
|
execVirtChroot,
|
|
}
|
|
|
|
@@ -60,19 +58,20 @@ func (v *v2Manager) Set(r *runc_configs.Resources) error {
|
|
// We want to keep given resources untouched
|
|
resourcesToSet := *r
|
|
|
|
- //Add default rules
|
|
- resourcesToSet.Devices = append(resourcesToSet.Devices, GenerateDefaultDeviceRules()...)
|
|
- resourcesToSet.Devices = append(resourcesToSet.Devices, v.deviceRules...)
|
|
-
|
|
- rulesToSet, err := addCurrentRules(rulesPerPid[v.dirPath], resourcesToSet.Devices)
|
|
+ rulesToSet, err := addCurrentRules(v.deviceRules, resourcesToSet.Devices)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
- rulesPerPid[v.dirPath] = rulesToSet
|
|
+ v.deviceRules = rulesToSet
|
|
resourcesToSet.Devices = rulesToSet
|
|
+ for _, rule := range rulesToSet {
|
|
+ if rule == nil {
|
|
+ continue
|
|
+ }
|
|
+ log.Log.V(5).Infof("cgroupsv2 device allowlist: rule after appending current+new: type: %d permissions: %s allow: %t major: %d minor: %d", rule.Type, rule.Permissions, rule.Allow, rule.Major, rule.Minor)
|
|
+ }
|
|
|
|
- err = v.execVirtChroot(&resourcesToSet, map[string]string{"": v.dirPath}, v.isRootless, v.GetCgroupVersion())
|
|
- return err
|
|
+ return v.execVirtChroot(&resourcesToSet, map[string]string{"": v.dirPath}, v.isRootless, v.GetCgroupVersion())
|
|
}
|
|
|
|
func (v *v2Manager) GetCgroupVersion() CgroupVersion {
|
|
--
|
|
2.42.1
|
|
|
|
|
|
From 725e4d70f1454d229882e2790dfa263bbeed2334 Mon Sep 17 00:00:00 2001
|
|
From: Alex Kalenyuk <akalenyu@redhat.com>
|
|
Date: Mon, 6 Nov 2023 13:24:34 +0200
|
|
Subject: [PATCH 4/4] tests: virt handler getting killed/live migration with
|
|
hotplug and persistent disk
|
|
|
|
Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
|
|
---
|
|
tests/storage/hotplug.go | 151 ++++++++++++++++++++++++++++++++++++---
|
|
1 file changed, 141 insertions(+), 10 deletions(-)
|
|
|
|
diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go
|
|
index 172f13e74..c6c8de4e1 100644
|
|
--- a/tests/storage/hotplug.go
|
|
+++ b/tests/storage/hotplug.go
|
|
@@ -572,6 +572,119 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
)
|
|
})
|
|
|
|
+ Context("Offline VM with a block volume", func() {
|
|
+ var (
|
|
+ vm *v1.VirtualMachine
|
|
+ sc string
|
|
+ )
|
|
+
|
|
+ BeforeEach(func() {
|
|
+ var exists bool
|
|
+
|
|
+ sc, exists = libstorage.GetRWXBlockStorageClass()
|
|
+ if !exists {
|
|
+ Skip("Skip test when RWXBlock storage class is not present")
|
|
+ }
|
|
+
|
|
+ vmi, _ := tests.NewRandomVirtualMachineInstanceWithBlockDisk(
|
|
+ cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros),
|
|
+ testsuite.GetTestNamespace(nil),
|
|
+ corev1.ReadWriteMany,
|
|
+ )
|
|
+ tests.AddUserData(vmi, "cloud-init", "#!/bin/bash\necho 'hello'\n")
|
|
+
|
|
+ By("Creating VirtualMachine")
|
|
+ vm, err = virtClient.VirtualMachine(vmi.Namespace).Create(context.Background(), tests.NewRandomVirtualMachine(vmi, false))
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ })
|
|
+
|
|
+ DescribeTable("Should start with a hotplug block", func(addVolumeFunc addVolumeFunction) {
|
|
+ dv := createDataVolumeAndWaitForImport(sc, corev1.PersistentVolumeBlock)
|
|
+
|
|
+ By("Adding a hotplug block volume")
|
|
+ addVolumeFunc(vm.Name, vm.Namespace, dv.Name, dv.Name, v1.DiskBusSCSI, false, "")
|
|
+
|
|
+ By("Verifying the volume has been added to the template spec")
|
|
+ verifyVolumeAndDiskVMAdded(virtClient, vm, dv.Name)
|
|
+
|
|
+ By("Starting the VM")
|
|
+ vm = tests.StartVirtualMachine(vm)
|
|
+ vmi, err := virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+
|
|
+ By("Verifying the volume is attached and usable")
|
|
+ verifyVolumeAndDiskVMIAdded(virtClient, vmi, dv.Name)
|
|
+ verifyVolumeStatus(vmi, v1.VolumeReady, "", dv.Name)
|
|
+ getVmiConsoleAndLogin(vmi)
|
|
+ targets := verifyHotplugAttachedAndUseable(vmi, []string{dv.Name})
|
|
+ Expect(targets).To(HaveLen(1))
|
|
+ },
|
|
+ Entry("DataVolume", addDVVolumeVM),
|
|
+ Entry("PersistentVolume", addPVCVolumeVM),
|
|
+ )
|
|
+
|
|
+ It("[Serial]Should preserve access to block devices if virt-handler crashes", Serial, func() {
|
|
+ blockDevices := []string{"/dev/disk0"}
|
|
+
|
|
+ By("Adding a hotplug block volume")
|
|
+ dv := createDataVolumeAndWaitForImport(sc, corev1.PersistentVolumeBlock)
|
|
+ blockDevices = append(blockDevices, fmt.Sprintf("/var/run/kubevirt/hotplug-disks/%s", dv.Name))
|
|
+ addDVVolumeVM(vm.Name, vm.Namespace, dv.Name, dv.Name, v1.DiskBusSCSI, false, "")
|
|
+
|
|
+ By("Verifying the volume has been added to the template spec")
|
|
+ verifyVolumeAndDiskVMAdded(virtClient, vm, dv.Name)
|
|
+
|
|
+ By("Starting the VM")
|
|
+ vm = tests.StartVirtualMachine(vm)
|
|
+ vmi, err := virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vm.Name, &metav1.GetOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+
|
|
+ By("Verifying the volume is attached and usable")
|
|
+ verifyVolumeAndDiskVMIAdded(virtClient, vmi, dv.Name)
|
|
+ verifyVolumeStatus(vmi, v1.VolumeReady, "", dv.Name)
|
|
+ getVmiConsoleAndLogin(vmi)
|
|
+ targets := verifyHotplugAttachedAndUseable(vmi, []string{dv.Name})
|
|
+ Expect(targets).To(HaveLen(1))
|
|
+
|
|
+ By("Deleting virt-handler pod")
|
|
+ virtHandlerPod, err := libnode.GetVirtHandlerPod(virtClient, vmi.Status.NodeName)
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ Eventually(func() bool {
|
|
+ err := virtClient.CoreV1().
|
|
+ Pods(virtHandlerPod.GetObjectMeta().GetNamespace()).
|
|
+ Delete(context.Background(), virtHandlerPod.GetObjectMeta().GetName(), metav1.DeleteOptions{})
|
|
+ return errors.IsNotFound(err)
|
|
+ }, 60*time.Second, 1*time.Second).Should(BeTrue(), "virt-handler pod is expected to be deleted")
|
|
+
|
|
+ By("Waiting for virt-handler pod to restart")
|
|
+ Eventually(func() bool {
|
|
+ virtHandlerPod, err = libnode.GetVirtHandlerPod(virtClient, vmi.Status.NodeName)
|
|
+ return err == nil && virtHandlerPod.Status.Phase == corev1.PodRunning
|
|
+ }, 60*time.Second, 1*time.Second).Should(BeTrue(), "virt-handler pod is expected to be restarted")
|
|
+
|
|
+ By("Adding another hotplug block volume")
|
|
+ dv = createDataVolumeAndWaitForImport(sc, corev1.PersistentVolumeBlock)
|
|
+ blockDevices = append(blockDevices, fmt.Sprintf("/var/run/kubevirt/hotplug-disks/%s", dv.Name))
|
|
+ addDVVolumeVM(vm.Name, vm.Namespace, dv.Name, dv.Name, v1.DiskBusSCSI, false, "")
|
|
+
|
|
+ By("Verifying the volume is attached and usable")
|
|
+ verifyVolumeAndDiskVMIAdded(virtClient, vmi, dv.Name)
|
|
+ verifyVolumeStatus(vmi, v1.VolumeReady, "", dv.Name)
|
|
+ getVmiConsoleAndLogin(vmi)
|
|
+ targets = verifyHotplugAttachedAndUseable(vmi, []string{dv.Name})
|
|
+ Expect(targets).To(HaveLen(1))
|
|
+
|
|
+ By("Verifying the block devices are still accessible")
|
|
+ for _, dev := range blockDevices {
|
|
+ By(fmt.Sprintf("Verifying %s", dev))
|
|
+ output := tests.RunCommandOnVmiPod(vmi, []string{
|
|
+ "dd", fmt.Sprintf("if=%s", dev), "of=/dev/null", "bs=1", "count=1", "status=none",
|
|
+ })
|
|
+ Expect(output).To(BeEmpty())
|
|
+ }
|
|
+ })
|
|
+ })
|
|
+
|
|
Context("WFFC storage", func() {
|
|
var (
|
|
vm *v1.VirtualMachine
|
|
@@ -983,15 +1096,32 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
sc string
|
|
|
|
numberOfMigrations int
|
|
+ hotplugLabelKey string
|
|
sourceNode string
|
|
targetNode string
|
|
)
|
|
|
|
const (
|
|
- hotplugLabelKey = "kubevirt-test-migration-with-hotplug-disks"
|
|
hotplugLabelValue = "true"
|
|
)
|
|
|
|
+ containerDiskVMIFunc := func() *v1.VirtualMachineInstance {
|
|
+ return libvmi.NewCirros(
|
|
+ libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()),
|
|
+ libvmi.WithNetwork(v1.DefaultPodNetwork()),
|
|
+ )
|
|
+ }
|
|
+ persistentDiskVMIFunc := func() *v1.VirtualMachineInstance {
|
|
+ vmi, _ := tests.NewRandomVirtualMachineInstanceWithBlockDisk(
|
|
+ cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros),
|
|
+ testsuite.GetTestNamespace(nil),
|
|
+ corev1.ReadWriteMany,
|
|
+ )
|
|
+ tests.AddUserData(vmi, "cloud-init", "#!/bin/bash\necho 'hello'\n")
|
|
+
|
|
+ return vmi
|
|
+ }
|
|
+
|
|
BeforeEach(func() {
|
|
exists := false
|
|
sc, exists = libstorage.GetRWXBlockStorageClass()
|
|
@@ -1024,14 +1154,8 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
}
|
|
// Ensure the virt-launcher pod is scheduled on the chosen source node and then
|
|
// migrated to the proper target.
|
|
+ hotplugLabelKey = fmt.Sprintf("%s-hotplug-disk-migration", testsuite.GetTestNamespace(nil))
|
|
libnode.AddLabelToNode(sourceNode, hotplugLabelKey, hotplugLabelValue)
|
|
- vmi = libvmi.NewCirros(
|
|
- libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()),
|
|
- libvmi.WithNetwork(v1.DefaultPodNetwork()),
|
|
- )
|
|
- vmi.Spec.NodeSelector = map[string]string{hotplugLabelKey: hotplugLabelValue}
|
|
- vmi = tests.RunVMIAndExpectLaunch(vmi, 240)
|
|
- libnode.AddLabelToNode(targetNode, hotplugLabelKey, hotplugLabelValue)
|
|
})
|
|
|
|
AfterEach(func() {
|
|
@@ -1040,7 +1164,11 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
libnode.RemoveLabelFromNode(targetNode, hotplugLabelKey)
|
|
})
|
|
|
|
- It("should allow live migration with attached hotplug volumes", func() {
|
|
+ DescribeTable("should allow live migration with attached hotplug volumes", func(vmiFunc func() *v1.VirtualMachineInstance) {
|
|
+ vmi = vmiFunc()
|
|
+ vmi.Spec.NodeSelector = map[string]string{hotplugLabelKey: hotplugLabelValue}
|
|
+ vmi = tests.RunVMIAndExpectLaunch(vmi, 240)
|
|
+ libnode.AddLabelToNode(targetNode, hotplugLabelKey, hotplugLabelValue)
|
|
volumeName := "testvolume"
|
|
volumeMode := corev1.PersistentVolumeBlock
|
|
addVolumeFunc := addDVVolumeVMI
|
|
@@ -1105,7 +1233,10 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
Expect(err).ToNot(HaveOccurred())
|
|
verifyVolumeAndDiskVMIAdded(virtClient, vmi, volumeName)
|
|
verifyVolumeStatus(vmi, v1.VolumeReady, "", volumeName)
|
|
- })
|
|
+ },
|
|
+ Entry("containerDisk VMI", containerDiskVMIFunc),
|
|
+ Entry("persistent disk VMI", persistentDiskVMIFunc),
|
|
+ )
|
|
})
|
|
|
|
Context("disk mutating sidecar", func() {
|
|
--
|
|
2.42.1
|
|
|