Accepting request 1125817 from home:vulyanov:branches:Virtualization
- Update to version 1.1.0 Release notes https://github.com/kubevirt/kubevirt/releases/tag/v1.1.0 - Drop upstreamed patches 0001-Fix-qemu-system-lookup.patch 0003-Virtiofs-Remove-duplicated-functional-tests.patch 0005-Support-multiple-watchdogs-in-the-domain-schema.patch - Add patches 0001-Update-google.golang.org-grpc-to-1.56.3.patch (CVE-2023-44487) 0002-virt-launcher-fix-qemu-non-root-path.patch 0003-cgroupsv2-reconstruct-device-allowlist.patch OBS-URL: https://build.opensuse.org/request/show/1125817 OBS-URL: https://build.opensuse.org/package/show/Virtualization/kubevirt?expand=0&rev=139
This commit is contained in:
parent
fd01465396
commit
18a3b21466
@ -1,378 +0,0 @@
|
||||
From de15ee969681bbd95ed0e6a5a460c3b0cba2cf52 Mon Sep 17 00:00:00 2001
|
||||
From: Vasiliy Ulyanov <vulyanov@suse.de>
|
||||
Date: Tue, 4 Jul 2023 13:27:25 +0200
|
||||
Subject: [PATCH 1/2] Lookup qemu process by executable prefix
|
||||
|
||||
Exact matching with 'qemu-system' does not work since the executable
|
||||
name also includes a suffix with current architecture, e.g.
|
||||
qemu-system-x86_64. This leads to errors of type:
|
||||
|
||||
no QEMU process found under process 16427 child processes
|
||||
|
||||
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
|
||||
---
|
||||
pkg/virt-handler/isolation/detector.go | 6 +++---
|
||||
pkg/virt-handler/isolation/detector_test.go | 2 +-
|
||||
pkg/virt-handler/isolation/process.go | 17 ++++++++++++-----
|
||||
pkg/virt-handler/isolation/process_test.go | 20 ++++++++++----------
|
||||
4 files changed, 26 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/pkg/virt-handler/isolation/detector.go b/pkg/virt-handler/isolation/detector.go
|
||||
index 9c282e231..52a1de9ee 100644
|
||||
--- a/pkg/virt-handler/isolation/detector.go
|
||||
+++ b/pkg/virt-handler/isolation/detector.go
|
||||
@@ -185,13 +185,13 @@ func AdjustQemuProcessMemoryLimits(podIsoDetector PodIsolationDetector, vmi *v1.
|
||||
return nil
|
||||
}
|
||||
|
||||
-var qemuProcessExecutables = []string{"qemu-system", "qemu-kvm"}
|
||||
+var qemuProcessExecutablePrefixes = []string{"qemu-system", "qemu-kvm"}
|
||||
|
||||
// findIsolatedQemuProcess Returns the first occurrence of the QEMU process whose parent is PID"
|
||||
func findIsolatedQemuProcess(processes []ps.Process, pid int) (ps.Process, error) {
|
||||
processes = childProcesses(processes, pid)
|
||||
- for _, exec := range qemuProcessExecutables {
|
||||
- if qemuProcess := lookupProcessByExecutable(processes, exec); qemuProcess != nil {
|
||||
+ for _, execPrefix := range qemuProcessExecutablePrefixes {
|
||||
+ if qemuProcess := lookupProcessByExecutablePrefix(processes, execPrefix); qemuProcess != nil {
|
||||
return qemuProcess, nil
|
||||
}
|
||||
}
|
||||
diff --git a/pkg/virt-handler/isolation/detector_test.go b/pkg/virt-handler/isolation/detector_test.go
|
||||
index 29486d00d..6ada37918 100644
|
||||
--- a/pkg/virt-handler/isolation/detector_test.go
|
||||
+++ b/pkg/virt-handler/isolation/detector_test.go
|
||||
@@ -132,7 +132,7 @@ var _ = Describe("findIsolatedQemuProcess", func() {
|
||||
fakeProcess3}
|
||||
|
||||
qemuKvmProc := ProcessStub{pid: 101, ppid: virtLauncherPid, binary: "qemu-kvm"}
|
||||
- qemuSystemProc := ProcessStub{pid: 101, ppid: virtLauncherPid, binary: "qemu-system"}
|
||||
+ qemuSystemProc := ProcessStub{pid: 101, ppid: virtLauncherPid, binary: "qemu-system-x86_64"}
|
||||
|
||||
DescribeTable("should return QEMU process",
|
||||
func(processes []ps.Process, pid int, expectedProcess ps.Process) {
|
||||
diff --git a/pkg/virt-handler/isolation/process.go b/pkg/virt-handler/isolation/process.go
|
||||
index bf3072153..6a99a4d52 100644
|
||||
--- a/pkg/virt-handler/isolation/process.go
|
||||
+++ b/pkg/virt-handler/isolation/process.go
|
||||
@@ -19,7 +19,11 @@
|
||||
|
||||
package isolation
|
||||
|
||||
-import "github.com/mitchellh/go-ps"
|
||||
+import (
|
||||
+ "strings"
|
||||
+
|
||||
+ "github.com/mitchellh/go-ps"
|
||||
+)
|
||||
|
||||
// childProcesses given a list of processes, it returns the ones that are children
|
||||
// of the given PID.
|
||||
@@ -34,11 +38,14 @@ func childProcesses(processes []ps.Process, pid int) []ps.Process {
|
||||
return childProcesses
|
||||
}
|
||||
|
||||
-// lookupProcessByExecutable given list of processes, it return the first occurrence
|
||||
-// of a process that runs the given executable.
|
||||
-func lookupProcessByExecutable(processes []ps.Process, exectutable string) ps.Process {
|
||||
+// lookupProcessByExecutablePrefix given list of processes, it return the first occurrence
|
||||
+// of a process with the given executable prefix.
|
||||
+func lookupProcessByExecutablePrefix(processes []ps.Process, execPrefix string) ps.Process {
|
||||
+ if execPrefix == "" {
|
||||
+ return nil
|
||||
+ }
|
||||
for _, process := range processes {
|
||||
- if process.Executable() == exectutable {
|
||||
+ if strings.HasPrefix(process.Executable(), execPrefix) {
|
||||
return process
|
||||
}
|
||||
}
|
||||
diff --git a/pkg/virt-handler/isolation/process_test.go b/pkg/virt-handler/isolation/process_test.go
|
||||
index de7d1449d..1a882e129 100644
|
||||
--- a/pkg/virt-handler/isolation/process_test.go
|
||||
+++ b/pkg/virt-handler/isolation/process_test.go
|
||||
@@ -57,17 +57,17 @@ var _ = Describe("process", func() {
|
||||
)
|
||||
})
|
||||
|
||||
- Context("lookup process by executable", func() {
|
||||
+ Context("lookup process by executable prefix", func() {
|
||||
procStub5 := ProcessStub{ppid: 100, pid: 220, binary: processTestExecPath}
|
||||
|
||||
DescribeTable("should find no process",
|
||||
- func(processes []ps.Process, executable string) {
|
||||
- Expect(lookupProcessByExecutable(processes, executable)).To(BeNil())
|
||||
+ func(processes []ps.Process, executablePrefix string) {
|
||||
+ Expect(lookupProcessByExecutablePrefix(processes, executablePrefix)).To(BeNil())
|
||||
},
|
||||
- Entry("given no input processes and empty string as executable",
|
||||
+ Entry("given no input processes and empty string as executable prefix",
|
||||
emptyProcessList, "",
|
||||
),
|
||||
- Entry("given no input processes and executable",
|
||||
+ Entry("given no input processes and executable prefix",
|
||||
emptyProcessList, "processA",
|
||||
),
|
||||
Entry("given processes list and empty string",
|
||||
@@ -75,15 +75,15 @@ var _ = Describe("process", func() {
|
||||
),
|
||||
)
|
||||
|
||||
- DescribeTable("should return the first occurrence of a process that runs the given executable",
|
||||
- func(processes []ps.Process, executable string, expectedProcess ps.Process) {
|
||||
- Expect(lookupProcessByExecutable(processes, executable)).
|
||||
+ DescribeTable("should return the first occurrence of a process with the given executable prefix",
|
||||
+ func(processes []ps.Process, executablePrefix string, expectedProcess ps.Process) {
|
||||
+ Expect(lookupProcessByExecutablePrefix(processes, executablePrefix)).
|
||||
To(Equal(expectedProcess))
|
||||
},
|
||||
- Entry("given processes list that includes exactly one process that runs the executable",
|
||||
+ Entry("given processes list that includes exactly one process with the executable prefix",
|
||||
testProcesses, processTestExecPath, procStub1,
|
||||
),
|
||||
- Entry("given processes list that includes more than one process that runs the executable",
|
||||
+ Entry("given processes list that includes more than one process with the executable prefix",
|
||||
append(testProcesses, procStub5), processTestExecPath, procStub1,
|
||||
),
|
||||
)
|
||||
--
|
||||
2.42.0
|
||||
|
||||
|
||||
From 0254d71c567fef3cd6ce8378eb0540fc93e5666f Mon Sep 17 00:00:00 2001
|
||||
From: Vasiliy Ulyanov <vulyanov@suse.de>
|
||||
Date: Tue, 4 Jul 2023 14:04:57 +0200
|
||||
Subject: [PATCH 2/2] tests: Detect the qemu emulator binary in runtime
|
||||
|
||||
Avoid hardcoding qemu-kvm since the underlying images may use e.g.
|
||||
qemu-system-*.
|
||||
|
||||
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
|
||||
---
|
||||
tests/realtime/BUILD.bazel | 1 +
|
||||
tests/realtime/realtime.go | 28 +++++++++++++++++-----------
|
||||
tests/security_features_test.go | 14 +++++++-------
|
||||
tests/utils.go | 12 ++++++++++--
|
||||
tests/vmi_configuration_test.go | 8 ++++++--
|
||||
5 files changed, 41 insertions(+), 22 deletions(-)
|
||||
|
||||
diff --git a/tests/realtime/BUILD.bazel b/tests/realtime/BUILD.bazel
|
||||
index 3718467e1..9d66eb7df 100644
|
||||
--- a/tests/realtime/BUILD.bazel
|
||||
+++ b/tests/realtime/BUILD.bazel
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//tests/framework/kubevirt:go_default_library",
|
||||
"//tests/libvmi:go_default_library",
|
||||
"//tests/libwait:go_default_library",
|
||||
+ "//tests/testsuite:go_default_library",
|
||||
"//tests/util:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/v2:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
diff --git a/tests/realtime/realtime.go b/tests/realtime/realtime.go
|
||||
index a9e93161c..0e1249f0d 100644
|
||||
--- a/tests/realtime/realtime.go
|
||||
+++ b/tests/realtime/realtime.go
|
||||
@@ -2,6 +2,7 @@ package realtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
+ "path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
"kubevirt.io/kubevirt/tests/framework/kubevirt"
|
||||
"kubevirt.io/kubevirt/tests/libvmi"
|
||||
"kubevirt.io/kubevirt/tests/libwait"
|
||||
+ "kubevirt.io/kubevirt/tests/testsuite"
|
||||
"kubevirt.io/kubevirt/tests/util"
|
||||
)
|
||||
|
||||
@@ -56,12 +58,12 @@ func newFedoraRealtime(realtimeMask string) *v1.VirtualMachineInstance {
|
||||
)
|
||||
}
|
||||
|
||||
-func byStartingTheVMI(vmi *v1.VirtualMachineInstance, virtClient kubecli.KubevirtClient) {
|
||||
+func byStartingTheVMI(vmi *v1.VirtualMachineInstance, virtClient kubecli.KubevirtClient) *v1.VirtualMachineInstance {
|
||||
By("Starting a VirtualMachineInstance")
|
||||
var err error
|
||||
- vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(context.Background(), vmi)
|
||||
+ vmi, err = virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Create(context.Background(), vmi)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
- libwait.WaitForSuccessfulVMIStart(vmi)
|
||||
+ return libwait.WaitForSuccessfulVMIStart(vmi)
|
||||
}
|
||||
|
||||
var _ = Describe("[sig-compute-realtime][Serial]Realtime", Serial, decorators.SigComputeRealtime, func() {
|
||||
@@ -81,15 +83,17 @@ var _ = Describe("[sig-compute-realtime][Serial]Realtime", Serial, decorators.Si
|
||||
|
||||
It("when no mask is specified", func() {
|
||||
const noMask = ""
|
||||
- vmi := newFedoraRealtime(noMask)
|
||||
- byStartingTheVMI(vmi, virtClient)
|
||||
+ vmi := byStartingTheVMI(newFedoraRealtime(noMask), virtClient)
|
||||
By("Validating VCPU scheduler placement information")
|
||||
pod := tests.GetRunningPodByVirtualMachineInstance(vmi, util.NamespaceTestDefault)
|
||||
+ emulator, err := tests.GetRunningVMIEmulator(vmi)
|
||||
+ Expect(err).ToNot(HaveOccurred())
|
||||
+ emulator = filepath.Base(emulator)
|
||||
psOutput, err := exec.ExecuteCommandOnPod(
|
||||
virtClient,
|
||||
pod,
|
||||
"compute",
|
||||
- []string{tests.BinBash, "-c", "ps -LC qemu-kvm -o policy,rtprio,psr|grep FF| awk '{print $2}'"},
|
||||
+ []string{tests.BinBash, "-c", "ps -LC " + emulator + " -o policy,rtprio,psr|grep FF| awk '{print $2}'"},
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
slice := strings.Split(strings.TrimSpace(psOutput), "\n")
|
||||
@@ -102,7 +106,7 @@ var _ = Describe("[sig-compute-realtime][Serial]Realtime", Serial, decorators.Si
|
||||
virtClient,
|
||||
pod,
|
||||
"compute",
|
||||
- []string{tests.BinBash, "-c", "grep 'locked memory' /proc/$(ps -C qemu-kvm -o pid --noheader|xargs)/limits |tr -s ' '| awk '{print $4\" \"$5}'"},
|
||||
+ []string{tests.BinBash, "-c", "grep 'locked memory' /proc/$(ps -C " + emulator + " -o pid --noheader|xargs)/limits |tr -s ' '| awk '{print $4\" \"$5}'"},
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
limits := strings.Split(strings.TrimSpace(psOutput), " ")
|
||||
@@ -123,15 +127,17 @@ var _ = Describe("[sig-compute-realtime][Serial]Realtime", Serial, decorators.Si
|
||||
})
|
||||
|
||||
It("when realtime mask is specified", func() {
|
||||
- vmi := newFedoraRealtime("0-1,^1")
|
||||
- byStartingTheVMI(vmi, virtClient)
|
||||
+ vmi := byStartingTheVMI(newFedoraRealtime("0-1,^1"), virtClient)
|
||||
pod := tests.GetRunningPodByVirtualMachineInstance(vmi, util.NamespaceTestDefault)
|
||||
By("Validating VCPU scheduler placement information")
|
||||
+ emulator, err := tests.GetRunningVMIEmulator(vmi)
|
||||
+ Expect(err).ToNot(HaveOccurred())
|
||||
+ emulator = filepath.Base(emulator)
|
||||
psOutput, err := exec.ExecuteCommandOnPod(
|
||||
virtClient,
|
||||
pod,
|
||||
"compute",
|
||||
- []string{tests.BinBash, "-c", "ps -LC qemu-kvm -o policy,rtprio,psr|grep FF| awk '{print $2}'"},
|
||||
+ []string{tests.BinBash, "-c", "ps -LC " + emulator + " -o policy,rtprio,psr|grep FF| awk '{print $2}'"},
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
slice := strings.Split(strings.TrimSpace(psOutput), "\n")
|
||||
@@ -143,7 +149,7 @@ var _ = Describe("[sig-compute-realtime][Serial]Realtime", Serial, decorators.Si
|
||||
virtClient,
|
||||
pod,
|
||||
"compute",
|
||||
- []string{tests.BinBash, "-c", "ps -TcC qemu-kvm |grep CPU |awk '{print $3\" \" $8}'"},
|
||||
+ []string{tests.BinBash, "-c", "ps -TcC " + emulator + " |grep CPU |awk '{print $3\" \" $8}'"},
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
slice = strings.Split(strings.TrimSpace(psOutput), "\n")
|
||||
diff --git a/tests/security_features_test.go b/tests/security_features_test.go
|
||||
index e38291691..98230bbaf 100644
|
||||
--- a/tests/security_features_test.go
|
||||
+++ b/tests/security_features_test.go
|
||||
@@ -129,9 +129,9 @@ var _ = Describe("[Serial][sig-compute]SecurityFeatures", Serial, decorators.Sig
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
libwait.WaitForSuccessfulVMIStart(vmi)
|
||||
|
||||
- domSpec, err := tests.GetRunningVMIDomainSpec(vmi)
|
||||
+ emulator, err := tests.GetRunningVMIEmulator(vmi)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
- emulator := "[/]" + strings.TrimPrefix(domSpec.Devices.Emulator, "/")
|
||||
+ emulator = "[/]" + strings.TrimPrefix(emulator, "/")
|
||||
|
||||
pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
|
||||
qemuProcessSelinuxContext, err := exec.ExecuteCommandOnPod(
|
||||
@@ -142,10 +142,10 @@ var _ = Describe("[Serial][sig-compute]SecurityFeatures", Serial, decorators.Sig
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
- By("Checking that qemu-kvm process is of the SELinux type container_t")
|
||||
+ By("Checking that qemu process is of the SELinux type container_t")
|
||||
Expect(strings.Split(qemuProcessSelinuxContext, ":")[2]).To(Equal("container_t"))
|
||||
|
||||
- By("Checking that qemu-kvm process has SELinux category_set")
|
||||
+ By("Checking that qemu process has SELinux category_set")
|
||||
Expect(strings.Split(qemuProcessSelinuxContext, ":")).To(HaveLen(5))
|
||||
|
||||
err = virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{})
|
||||
@@ -205,9 +205,9 @@ var _ = Describe("[Serial][sig-compute]SecurityFeatures", Serial, decorators.Sig
|
||||
libwait.WaitUntilVMIReady(vmi, console.LoginToAlpine)
|
||||
|
||||
By("Fetching virt-launcher Pod")
|
||||
- domSpec, err := tests.GetRunningVMIDomainSpec(vmi)
|
||||
+ emulator, err := tests.GetRunningVMIEmulator(vmi)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
- emulator := "[/]" + strings.TrimPrefix(domSpec.Devices.Emulator, "/")
|
||||
+ emulator = "[/]" + strings.TrimPrefix(emulator, "/")
|
||||
|
||||
pod, err := libvmi.GetPodByVirtualMachineInstance(vmi, testsuite.NamespacePrivileged)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -219,7 +219,7 @@ var _ = Describe("[Serial][sig-compute]SecurityFeatures", Serial, decorators.Sig
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
- By("Checking that qemu-kvm process is of the SELinux type virt_launcher.process")
|
||||
+ By("Checking that qemu process is of the SELinux type virt_launcher.process")
|
||||
Expect(strings.Split(qemuProcessSelinuxContext, ":")[2]).To(Equal(launcherType))
|
||||
|
||||
By("Verifying SELinux context contains custom type in pod")
|
||||
diff --git a/tests/utils.go b/tests/utils.go
|
||||
index c51656291..9984000c4 100644
|
||||
--- a/tests/utils.go
|
||||
+++ b/tests/utils.go
|
||||
@@ -360,10 +360,10 @@ func GetProcessName(pod *k8sv1.Pod, pid string) (output string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
-func GetVcpuMask(pod *k8sv1.Pod, cpu string) (output string, err error) {
|
||||
+func GetVcpuMask(pod *k8sv1.Pod, emulator, cpu string) (output string, err error) {
|
||||
virtClient := kubevirt.Client()
|
||||
|
||||
- pscmd := `ps -LC qemu-kvm -o lwp,comm | grep "CPU ` + cpu + `" | cut -f1 -dC`
|
||||
+ pscmd := `ps -LC ` + emulator + ` -o lwp,comm | grep "CPU ` + cpu + `" | cut -f1 -dC`
|
||||
args := []string{BinBash, "-c", pscmd}
|
||||
Eventually(func() error {
|
||||
output, err = exec.ExecuteCommandOnPod(virtClient, pod, "compute", args)
|
||||
@@ -1746,6 +1746,14 @@ func GetRunningVMIDomainSpec(vmi *v1.VirtualMachineInstance) (*launcherApi.Domai
|
||||
return &runningVMISpec, err
|
||||
}
|
||||
|
||||
+func GetRunningVMIEmulator(vmi *v1.VirtualMachineInstance) (string, error) {
|
||||
+ domSpec, err := GetRunningVMIDomainSpec(vmi)
|
||||
+ if err != nil {
|
||||
+ return "", err
|
||||
+ }
|
||||
+ return domSpec.Devices.Emulator, nil
|
||||
+}
|
||||
+
|
||||
func ForwardPorts(pod *k8sv1.Pod, ports []string, stop chan struct{}, readyTimeout time.Duration) error {
|
||||
errChan := make(chan error, 1)
|
||||
readyChan := make(chan struct{})
|
||||
diff --git a/tests/vmi_configuration_test.go b/tests/vmi_configuration_test.go
|
||||
index 4f120127c..44590fdaa 100644
|
||||
--- a/tests/vmi_configuration_test.go
|
||||
+++ b/tests/vmi_configuration_test.go
|
||||
@@ -2639,8 +2639,12 @@ var _ = Describe("[sig-compute]Configurations", decorators.SigCompute, func() {
|
||||
&expect.BExp{R: "2"},
|
||||
}, 15)).To(Succeed())
|
||||
|
||||
+ emulator, err := tests.GetRunningVMIEmulator(vmi)
|
||||
+ Expect(err).ToNot(HaveOccurred())
|
||||
+ emulator = filepath.Base(emulator)
|
||||
+
|
||||
virtClient := kubevirt.Client()
|
||||
- pidCmd := []string{"pidof", "qemu-kvm"}
|
||||
+ pidCmd := []string{"pidof", emulator}
|
||||
qemuPid, err := exec.ExecuteCommandOnPod(virtClient, readyPod, "compute", pidCmd)
|
||||
// do not check for kvm-pit thread if qemu is not in use
|
||||
if err != nil {
|
||||
@@ -2649,7 +2653,7 @@ var _ = Describe("[sig-compute]Configurations", decorators.SigCompute, func() {
|
||||
kvmpitmask, err := tests.GetKvmPitMask(strings.TrimSpace(qemuPid), node)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
- vcpuzeromask, err := tests.GetVcpuMask(readyPod, "0")
|
||||
+ vcpuzeromask, err := tests.GetVcpuMask(readyPod, emulator, "0")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(kvmpitmask).To(Equal(vcpuzeromask))
|
||||
--
|
||||
2.42.0
|
||||
|
10918
0001-Update-google.golang.org-grpc-to-1.56.3.patch
Normal file
10918
0001-Update-google.golang.org-grpc-to-1.56.3.patch
Normal file
File diff suppressed because it is too large
Load Diff
29
0002-virt-launcher-fix-qemu-non-root-path.patch
Normal file
29
0002-virt-launcher-fix-qemu-non-root-path.patch
Normal file
@ -0,0 +1,29 @@
|
||||
From 962ce51df790f9a42885db3de5e34bef99d595de Mon Sep 17 00:00:00 2001
|
||||
From: Enrique Llorente <ellorent@redhat.com>
|
||||
Date: Wed, 8 Nov 2023 14:05:43 +0100
|
||||
Subject: [PATCH] virt-launcher: fix qemu non root path
|
||||
|
||||
The qemu log path is now under /var/run/kubevirt-private/libvirt/qemu instead of
|
||||
/var/run/libvirt/qemu. This change adapt the virt-launcher code to that
|
||||
|
||||
Signed-off-by: Enrique Llorente <ellorent@redhat.com>
|
||||
---
|
||||
pkg/virt-launcher/virtwrap/util/libvirt_helper.go | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/pkg/virt-launcher/virtwrap/util/libvirt_helper.go b/pkg/virt-launcher/virtwrap/util/libvirt_helper.go
|
||||
index 8dd514f11..b342c034f 100644
|
||||
--- a/pkg/virt-launcher/virtwrap/util/libvirt_helper.go
|
||||
+++ b/pkg/virt-launcher/virtwrap/util/libvirt_helper.go
|
||||
@@ -286,7 +286,7 @@ func startVirtlogdLogging(stopChan chan struct{}, domainName string, nonRoot boo
|
||||
go func() {
|
||||
logfile := fmt.Sprintf("/var/log/libvirt/qemu/%s.log", domainName)
|
||||
if nonRoot {
|
||||
- logfile = filepath.Join("/var", "run", "libvirt", "qemu", "log", fmt.Sprintf("%s.log", domainName))
|
||||
+ logfile = filepath.Join("/var", "run", "kubevirt-private", "libvirt", "qemu", "log", fmt.Sprintf("%s.log", domainName))
|
||||
}
|
||||
|
||||
// It can take a few seconds to the log file to be created
|
||||
--
|
||||
2.42.1
|
||||
|
@ -1,418 +0,0 @@
|
||||
From 039a1749c4326fe8937a38e67dd7674eb1d1f3a8 Mon Sep 17 00:00:00 2001
|
||||
From: Javier Cano Cano <jcanocan@redhat.com>
|
||||
Date: Tue, 2 May 2023 11:33:02 +0200
|
||||
Subject: [PATCH] Virtiofs: Remove duplicated functional tests
|
||||
|
||||
Removes virtiofs test located in tests/storage/storage.go
|
||||
which are already present in tests/virtiofs/datavolume.go.
|
||||
|
||||
Signed-off-by: Javier Cano Cano <jcanocan@redhat.com>
|
||||
---
|
||||
tests/storage/storage.go | 257 -----------------------------------
|
||||
tests/virtiofs/BUILD.bazel | 3 +
|
||||
tests/virtiofs/datavolume.go | 62 ++++++++-
|
||||
3 files changed, 62 insertions(+), 260 deletions(-)
|
||||
|
||||
diff --git a/tests/storage/storage.go b/tests/storage/storage.go
|
||||
index 672ba2355..3e5963139 100644
|
||||
--- a/tests/storage/storage.go
|
||||
+++ b/tests/storage/storage.go
|
||||
@@ -346,264 +346,7 @@ var _ = SIGDescribe("Storage", func() {
|
||||
})
|
||||
|
||||
})
|
||||
- Context("VirtIO-FS with multiple PVCs", func() {
|
||||
- pvc1 := "pvc-1"
|
||||
- pvc2 := "pvc-2"
|
||||
- createPVC := func(name string) {
|
||||
- sc, _ := libstorage.GetRWXFileSystemStorageClass()
|
||||
- pvc := libstorage.NewPVC(name, "1Gi", sc)
|
||||
- _, err = virtClient.CoreV1().PersistentVolumeClaims(testsuite.NamespacePrivileged).Create(context.Background(), pvc, metav1.CreateOptions{})
|
||||
- ExpectWithOffset(1, err).NotTo(HaveOccurred())
|
||||
- }
|
||||
-
|
||||
- BeforeEach(func() {
|
||||
- checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
|
||||
- createPVC(pvc1)
|
||||
- createPVC(pvc2)
|
||||
- })
|
||||
-
|
||||
- AfterEach(func() {
|
||||
- libstorage.DeletePVC(pvc1, testsuite.NamespacePrivileged)
|
||||
- libstorage.DeletePVC(pvc2, testsuite.NamespacePrivileged)
|
||||
- })
|
||||
-
|
||||
- DescribeTable("should be successfully started and accessible", func(option1, option2 libvmi.Option) {
|
||||
-
|
||||
- virtiofsMountPath := func(pvcName string) string { return fmt.Sprintf("/mnt/virtiofs_%s", pvcName) }
|
||||
- virtiofsTestFile := func(virtiofsMountPath string) string { return fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath) }
|
||||
- mountVirtiofsCommands := fmt.Sprintf(`#!/bin/bash
|
||||
- mkdir %s
|
||||
- mount -t virtiofs %s %s
|
||||
- touch %s
|
||||
-
|
||||
- mkdir %s
|
||||
- mount -t virtiofs %s %s
|
||||
- touch %s
|
||||
- `, virtiofsMountPath(pvc1), pvc1, virtiofsMountPath(pvc1), virtiofsTestFile(virtiofsMountPath(pvc1)),
|
||||
- virtiofsMountPath(pvc2), pvc2, virtiofsMountPath(pvc2), virtiofsTestFile(virtiofsMountPath(pvc2)))
|
||||
-
|
||||
- vmi = libvmi.NewFedora(
|
||||
- libvmi.WithCloudInitNoCloudUserData(mountVirtiofsCommands, true),
|
||||
- libvmi.WithFilesystemPVC(pvc1),
|
||||
- libvmi.WithFilesystemPVC(pvc2),
|
||||
- libvmi.WithNamespace(testsuite.NamespacePrivileged),
|
||||
- option1, option2,
|
||||
- )
|
||||
-
|
||||
- vmi = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 300)
|
||||
-
|
||||
- // Wait for cloud init to finish and start the agent inside the vmi.
|
||||
- Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected))
|
||||
-
|
||||
- By(checkingVMInstanceConsoleOut)
|
||||
- Expect(console.LoginToFedora(vmi)).To(Succeed(), "Should be able to login to the Fedora VM")
|
||||
-
|
||||
- virtioFsFileTestCmd := fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", pvc1)
|
||||
- pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
|
||||
- podVirtioFsFileExist, err := exec.ExecuteCommandOnPod(
|
||||
- virtClient,
|
||||
- pod,
|
||||
- "compute",
|
||||
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
|
||||
- )
|
||||
- Expect(err).ToNot(HaveOccurred())
|
||||
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
||||
-
|
||||
- virtioFsFileTestCmd = fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", pvc2)
|
||||
- pod = tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
|
||||
- podVirtioFsFileExist, err = exec.ExecuteCommandOnPod(
|
||||
- virtClient,
|
||||
- pod,
|
||||
- "compute",
|
||||
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
|
||||
- )
|
||||
- Expect(err).ToNot(HaveOccurred())
|
||||
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
||||
- },
|
||||
- Entry("", func(instance *virtv1.VirtualMachineInstance) {}, func(instance *virtv1.VirtualMachineInstance) {}),
|
||||
- Entry("with passt enabled", libvmi.WithPasstInterfaceWithPort(), libvmi.WithNetwork(v1.DefaultPodNetwork())),
|
||||
- )
|
||||
-
|
||||
- })
|
||||
- Context("VirtIO-FS with an empty PVC", func() {
|
||||
- var (
|
||||
- pvc = "empty-pvc1"
|
||||
- originalConfig v1.KubeVirtConfiguration
|
||||
- )
|
||||
-
|
||||
- BeforeEach(func() {
|
||||
- checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
|
||||
- originalConfig = *util.GetCurrentKv(virtClient).Spec.Configuration.DeepCopy()
|
||||
- libstorage.CreateHostPathPv(pvc, testsuite.NamespacePrivileged, filepath.Join(testsuite.HostPathBase, pvc))
|
||||
- libstorage.CreateHostPathPVC(pvc, testsuite.NamespacePrivileged, "1G")
|
||||
- })
|
||||
|
||||
- AfterEach(func() {
|
||||
- tests.UpdateKubeVirtConfigValueAndWait(originalConfig)
|
||||
- libstorage.DeletePVC(pvc, testsuite.NamespacePrivileged)
|
||||
- libstorage.DeletePV(pvc)
|
||||
- })
|
||||
-
|
||||
- It("[serial] should be successfully started and virtiofs could be accessed", Serial, func() {
|
||||
- resources := k8sv1.ResourceRequirements{
|
||||
- Requests: k8sv1.ResourceList{
|
||||
- k8sv1.ResourceCPU: resource.MustParse("2m"),
|
||||
- k8sv1.ResourceMemory: resource.MustParse("14M"),
|
||||
- },
|
||||
- Limits: k8sv1.ResourceList{
|
||||
- k8sv1.ResourceCPU: resource.MustParse("101m"),
|
||||
- k8sv1.ResourceMemory: resource.MustParse("81M"),
|
||||
- },
|
||||
- }
|
||||
- config := originalConfig.DeepCopy()
|
||||
- config.SupportContainerResources = []v1.SupportContainerResources{
|
||||
- {
|
||||
- Type: v1.VirtioFS,
|
||||
- Resources: resources,
|
||||
- },
|
||||
- }
|
||||
- tests.UpdateKubeVirtConfigValueAndWait(*config)
|
||||
- pvcName := fmt.Sprintf("disk-%s", pvc)
|
||||
- virtiofsMountPath := fmt.Sprintf("/mnt/virtiofs_%s", pvcName)
|
||||
- virtiofsTestFile := fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath)
|
||||
- mountVirtiofsCommands := fmt.Sprintf(`#!/bin/bash
|
||||
- mkdir %s
|
||||
- mount -t virtiofs %s %s
|
||||
- touch %s
|
||||
- `, virtiofsMountPath, pvcName, virtiofsMountPath, virtiofsTestFile)
|
||||
-
|
||||
- vmi = libvmi.NewFedora(
|
||||
- libvmi.WithCloudInitNoCloudUserData(mountVirtiofsCommands, true),
|
||||
- libvmi.WithFilesystemPVC(pvcName),
|
||||
- libvmi.WithNamespace(testsuite.NamespacePrivileged),
|
||||
- )
|
||||
- vmi = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 300)
|
||||
-
|
||||
- // Wait for cloud init to finish and start the agent inside the vmi.
|
||||
- Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected))
|
||||
-
|
||||
- By(checkingVMInstanceConsoleOut)
|
||||
- Expect(console.LoginToFedora(vmi)).To(Succeed(), "Should be able to login to the Fedora VM")
|
||||
-
|
||||
- virtioFsFileTestCmd := fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", pvcName)
|
||||
- pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
|
||||
- podVirtioFsFileExist, err := exec.ExecuteCommandOnPod(
|
||||
- virtClient,
|
||||
- pod,
|
||||
- "compute",
|
||||
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
|
||||
- )
|
||||
- Expect(err).ToNot(HaveOccurred())
|
||||
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
||||
- By("Finding virt-launcher pod")
|
||||
- var virtlauncherPod *k8sv1.Pod
|
||||
- Eventually(func() *k8sv1.Pod {
|
||||
- podList, err := virtClient.CoreV1().Pods(vmi.Namespace).List(context.Background(), metav1.ListOptions{})
|
||||
- if err != nil {
|
||||
- return nil
|
||||
- }
|
||||
- for _, pod := range podList.Items {
|
||||
- for _, ownerRef := range pod.GetOwnerReferences() {
|
||||
- if ownerRef.UID == vmi.GetUID() {
|
||||
- virtlauncherPod = &pod
|
||||
- break
|
||||
- }
|
||||
- }
|
||||
- }
|
||||
- return virtlauncherPod
|
||||
- }, 30*time.Second, 1*time.Second).ShouldNot(BeNil())
|
||||
- Expect(virtlauncherPod.Spec.Containers).To(HaveLen(3))
|
||||
- foundContainer := false
|
||||
- virtiofsContainerName := fmt.Sprintf("virtiofs-%s", pvcName)
|
||||
- for _, container := range virtlauncherPod.Spec.Containers {
|
||||
- if container.Name == virtiofsContainerName {
|
||||
- foundContainer = true
|
||||
- Expect(container.Resources.Requests.Cpu().Value()).To(Equal(resources.Requests.Cpu().Value()))
|
||||
- Expect(container.Resources.Requests.Memory().Value()).To(Equal(resources.Requests.Memory().Value()))
|
||||
- Expect(container.Resources.Limits.Cpu().Value()).To(Equal(resources.Limits.Cpu().Value()))
|
||||
- Expect(container.Resources.Limits.Memory().Value()).To(Equal(resources.Limits.Memory().Value()))
|
||||
- }
|
||||
- }
|
||||
- Expect(foundContainer).To(BeTrue())
|
||||
- })
|
||||
- })
|
||||
- Context("Run a VMI with VirtIO-FS and a datavolume", func() {
|
||||
- var dataVolume *cdiv1.DataVolume
|
||||
- BeforeEach(func() {
|
||||
- checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
|
||||
- if !libstorage.HasCDI() {
|
||||
- Skip("Skip DataVolume tests when CDI is not present")
|
||||
- }
|
||||
-
|
||||
- sc, exists := libstorage.GetRWOFileSystemStorageClass()
|
||||
- if !exists {
|
||||
- Skip("Skip test when Filesystem storage is not present")
|
||||
- }
|
||||
-
|
||||
- dataVolume = libdv.NewDataVolume(
|
||||
- libdv.WithRegistryURLSource(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine)),
|
||||
- libdv.WithPVC(libdv.PVCWithStorageClass(sc)),
|
||||
- )
|
||||
- })
|
||||
-
|
||||
- AfterEach(func() {
|
||||
- libstorage.DeleteDataVolume(&dataVolume)
|
||||
- })
|
||||
-
|
||||
- It("should be successfully started and virtiofs could be accessed", func() {
|
||||
- dataVolume, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.NamespacePrivileged).Create(context.Background(), dataVolume, metav1.CreateOptions{})
|
||||
- Expect(err).ToNot(HaveOccurred())
|
||||
- By("Waiting until the DataVolume is ready")
|
||||
- if libstorage.IsStorageClassBindingModeWaitForFirstConsumer(libstorage.Config.StorageRWOFileSystem) {
|
||||
- Eventually(ThisDV(dataVolume), 30).Should(Or(BeInPhase(cdiv1.WaitForFirstConsumer), BeInPhase(cdiv1.PendingPopulation)))
|
||||
- }
|
||||
-
|
||||
- virtiofsMountPath := fmt.Sprintf("/mnt/virtiofs_%s", dataVolume.Name)
|
||||
- virtiofsTestFile := fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath)
|
||||
- mountVirtiofsCommands := fmt.Sprintf(`#!/bin/bash
|
||||
- mkdir %s
|
||||
- mount -t virtiofs %s %s
|
||||
- touch %s
|
||||
- `, virtiofsMountPath, dataVolume.Name, virtiofsMountPath, virtiofsTestFile)
|
||||
-
|
||||
- vmi = libvmi.NewFedora(
|
||||
- libvmi.WithCloudInitNoCloudUserData(mountVirtiofsCommands, true),
|
||||
- libvmi.WithFilesystemDV(dataVolume.Name),
|
||||
- libvmi.WithNamespace(testsuite.NamespacePrivileged),
|
||||
- )
|
||||
- // with WFFC the run actually starts the import and then runs VM, so the timeout has to include both
|
||||
- // import and start
|
||||
- vmi = tests.RunVMIAndExpectLaunchWithDataVolume(vmi, dataVolume, 500)
|
||||
-
|
||||
- // Wait for cloud init to finish and start the agent inside the vmi.
|
||||
- Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected))
|
||||
-
|
||||
- By(checkingVMInstanceConsoleOut)
|
||||
- Expect(console.LoginToFedora(vmi)).To(Succeed(), "Should be able to login to the Fedora VM")
|
||||
-
|
||||
- By("Checking that virtio-fs is mounted")
|
||||
- listVirtioFSDisk := fmt.Sprintf("ls -l %s/*disk* | wc -l\n", virtiofsMountPath)
|
||||
- Expect(console.ExpectBatch(vmi, []expect.Batcher{
|
||||
- &expect.BSnd{S: listVirtioFSDisk},
|
||||
- &expect.BExp{R: console.RetValue("1")},
|
||||
- }, 30*time.Second)).To(Succeed(), "Should be able to access the mounted virtiofs file")
|
||||
-
|
||||
- virtioFsFileTestCmd := fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", dataVolume.Name)
|
||||
- pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
|
||||
- podVirtioFsFileExist, err := exec.ExecuteCommandOnPod(
|
||||
- virtClient,
|
||||
- pod,
|
||||
- "compute",
|
||||
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
|
||||
- )
|
||||
- Expect(err).ToNot(HaveOccurred())
|
||||
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
||||
- err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{})
|
||||
- Expect(err).ToNot(HaveOccurred())
|
||||
- libwait.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
|
||||
-
|
||||
- })
|
||||
- })
|
||||
Context("[rfe_id:3106][crit:medium][vendor:cnv-qe@redhat.com][level:component]With ephemeral alpine PVC", func() {
|
||||
var isRunOnKindInfra bool
|
||||
BeforeEach(func() {
|
||||
diff --git a/tests/virtiofs/BUILD.bazel b/tests/virtiofs/BUILD.bazel
|
||||
index f2b197bd2..f3bf5cea6 100644
|
||||
--- a/tests/virtiofs/BUILD.bazel
|
||||
+++ b/tests/virtiofs/BUILD.bazel
|
||||
@@ -27,10 +27,13 @@ go_library(
|
||||
"//tests/libvmi:go_default_library",
|
||||
"//tests/libwait:go_default_library",
|
||||
"//tests/testsuite:go_default_library",
|
||||
+ "//tests/util:go_default_library",
|
||||
"//vendor/github.com/google/goexpect:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/v2:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/pborman/uuid:go_default_library",
|
||||
+ "//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
+ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1:go_default_library",
|
||||
],
|
||||
diff --git a/tests/virtiofs/datavolume.go b/tests/virtiofs/datavolume.go
|
||||
index 69de40d44..2a0139376 100644
|
||||
--- a/tests/virtiofs/datavolume.go
|
||||
+++ b/tests/virtiofs/datavolume.go
|
||||
@@ -37,6 +37,8 @@ import (
|
||||
expect "github.com/google/goexpect"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
+ k8sv1 "k8s.io/api/core/v1"
|
||||
+ "k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
v1 "kubevirt.io/api/core/v1"
|
||||
@@ -53,6 +55,7 @@ import (
|
||||
"kubevirt.io/kubevirt/tests/libstorage"
|
||||
"kubevirt.io/kubevirt/tests/libwait"
|
||||
"kubevirt.io/kubevirt/tests/testsuite"
|
||||
+ "kubevirt.io/kubevirt/tests/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -149,21 +152,43 @@ var _ = Describe("[sig-storage] virtiofs", decorators.SigStorage, func() {
|
||||
})
|
||||
|
||||
Context("VirtIO-FS with an empty PVC", func() {
|
||||
-
|
||||
- var pvc = "empty-pvc1"
|
||||
+ var (
|
||||
+ pvc = "empty-pvc1"
|
||||
+ originalConfig v1.KubeVirtConfiguration
|
||||
+ )
|
||||
|
||||
BeforeEach(func() {
|
||||
checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
|
||||
+ originalConfig = *util.GetCurrentKv(virtClient).Spec.Configuration.DeepCopy()
|
||||
libstorage.CreateHostPathPv(pvc, testsuite.NamespacePrivileged, filepath.Join(testsuite.HostPathBase, pvc))
|
||||
libstorage.CreateHostPathPVC(pvc, testsuite.NamespacePrivileged, "1G")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
+ tests.UpdateKubeVirtConfigValueAndWait(originalConfig)
|
||||
libstorage.DeletePVC(pvc, testsuite.NamespacePrivileged)
|
||||
libstorage.DeletePV(pvc)
|
||||
})
|
||||
|
||||
- It("should be successfully started and virtiofs could be accessed", func() {
|
||||
+ It("[Serial] should be successfully started and virtiofs could be accessed", Serial, func() {
|
||||
+ resources := k8sv1.ResourceRequirements{
|
||||
+ Requests: k8sv1.ResourceList{
|
||||
+ k8sv1.ResourceCPU: resource.MustParse("2m"),
|
||||
+ k8sv1.ResourceMemory: resource.MustParse("14M"),
|
||||
+ },
|
||||
+ Limits: k8sv1.ResourceList{
|
||||
+ k8sv1.ResourceCPU: resource.MustParse("101m"),
|
||||
+ k8sv1.ResourceMemory: resource.MustParse("81M"),
|
||||
+ },
|
||||
+ }
|
||||
+ config := originalConfig.DeepCopy()
|
||||
+ config.SupportContainerResources = []v1.SupportContainerResources{
|
||||
+ {
|
||||
+ Type: v1.VirtioFS,
|
||||
+ Resources: resources,
|
||||
+ },
|
||||
+ }
|
||||
+ tests.UpdateKubeVirtConfigValueAndWait(*config)
|
||||
pvcName := fmt.Sprintf("disk-%s", pvc)
|
||||
virtiofsMountPath := fmt.Sprintf("/mnt/virtiofs_%s", pvcName)
|
||||
virtiofsTestFile := fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath)
|
||||
@@ -196,6 +221,36 @@ var _ = Describe("[sig-storage] virtiofs", decorators.SigStorage, func() {
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
||||
+ By("Finding virt-launcher pod")
|
||||
+ var virtlauncherPod *k8sv1.Pod
|
||||
+ Eventually(func() *k8sv1.Pod {
|
||||
+ podList, err := virtClient.CoreV1().Pods(vmi.Namespace).List(context.Background(), metav1.ListOptions{})
|
||||
+ if err != nil {
|
||||
+ return nil
|
||||
+ }
|
||||
+ for _, pod := range podList.Items {
|
||||
+ for _, ownerRef := range pod.GetOwnerReferences() {
|
||||
+ if ownerRef.UID == vmi.GetUID() {
|
||||
+ virtlauncherPod = &pod
|
||||
+ break
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ return virtlauncherPod
|
||||
+ }, 30*time.Second, 1*time.Second).ShouldNot(BeNil())
|
||||
+ Expect(virtlauncherPod.Spec.Containers).To(HaveLen(3))
|
||||
+ foundContainer := false
|
||||
+ virtiofsContainerName := fmt.Sprintf("virtiofs-%s", pvcName)
|
||||
+ for _, container := range virtlauncherPod.Spec.Containers {
|
||||
+ if container.Name == virtiofsContainerName {
|
||||
+ foundContainer = true
|
||||
+ Expect(container.Resources.Requests.Cpu().Value()).To(Equal(resources.Requests.Cpu().Value()))
|
||||
+ Expect(container.Resources.Requests.Memory().Value()).To(Equal(resources.Requests.Memory().Value()))
|
||||
+ Expect(container.Resources.Limits.Cpu().Value()).To(Equal(resources.Limits.Cpu().Value()))
|
||||
+ Expect(container.Resources.Limits.Memory().Value()).To(Equal(resources.Limits.Memory().Value()))
|
||||
+ }
|
||||
+ }
|
||||
+ Expect(foundContainer).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -273,6 +328,7 @@ var _ = Describe("[sig-storage] virtiofs", decorators.SigStorage, func() {
|
||||
err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
libwait.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
|
||||
+
|
||||
})
|
||||
})
|
||||
})
|
||||
--
|
||||
2.41.0
|
||||
|
1444
0003-cgroupsv2-reconstruct-device-allowlist.patch
Normal file
1444
0003-cgroupsv2-reconstruct-device-allowlist.patch
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,109 +0,0 @@
|
||||
From 12cb69406a3a33a3b38c97e35014fa905858fe72 Mon Sep 17 00:00:00 2001
|
||||
From: Vasiliy Ulyanov <vulyanov@suse.de>
|
||||
Date: Wed, 19 Jul 2023 10:36:21 +0200
|
||||
Subject: [PATCH] Support multiple watchdogs in the domain schema
|
||||
|
||||
Libvirt allows several watchdog devices since 9.1.0. The documentation
|
||||
now states:
|
||||
|
||||
Having multiple watchdogs is usually not something very common, but be
|
||||
aware that this might happen, for example, when an implicit watchdog
|
||||
device is added as part of another device. For example the iTCO watchdog
|
||||
being part of the ich9 southbridge, which is used with the q35 machine
|
||||
type.
|
||||
|
||||
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
|
||||
---
|
||||
pkg/virt-launcher/virtwrap/api/deepcopy_generated.go | 10 ++++++----
|
||||
pkg/virt-launcher/virtwrap/api/schema.go | 2 +-
|
||||
pkg/virt-launcher/virtwrap/api/schema_test.go | 10 ++++++----
|
||||
pkg/virt-launcher/virtwrap/converter/converter.go | 2 +-
|
||||
pkg/virt-launcher/virtwrap/converter/pci-placement.go | 4 ++--
|
||||
5 files changed, 16 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/pkg/virt-launcher/virtwrap/api/deepcopy_generated.go b/pkg/virt-launcher/virtwrap/api/deepcopy_generated.go
|
||||
index b5cb529e2..c1d3a781a 100644
|
||||
--- a/pkg/virt-launcher/virtwrap/api/deepcopy_generated.go
|
||||
+++ b/pkg/virt-launcher/virtwrap/api/deepcopy_generated.go
|
||||
@@ -736,10 +736,12 @@ func (in *Devices) DeepCopyInto(out *Devices) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
- if in.Watchdog != nil {
|
||||
- in, out := &in.Watchdog, &out.Watchdog
|
||||
- *out = new(Watchdog)
|
||||
- (*in).DeepCopyInto(*out)
|
||||
+ if in.Watchdogs != nil {
|
||||
+ in, out := &in.Watchdogs, &out.Watchdogs
|
||||
+ *out = make([]Watchdog, len(*in))
|
||||
+ for i := range *in {
|
||||
+ (*in)[i].DeepCopyInto(&(*out)[i])
|
||||
+ }
|
||||
}
|
||||
if in.Rng != nil {
|
||||
in, out := &in.Rng, &out.Rng
|
||||
diff --git a/pkg/virt-launcher/virtwrap/api/schema.go b/pkg/virt-launcher/virtwrap/api/schema.go
|
||||
index 465c6c6c1..ff4e6e959 100644
|
||||
--- a/pkg/virt-launcher/virtwrap/api/schema.go
|
||||
+++ b/pkg/virt-launcher/virtwrap/api/schema.go
|
||||
@@ -473,7 +473,7 @@ type Devices struct {
|
||||
Inputs []Input `xml:"input"`
|
||||
Serials []Serial `xml:"serial"`
|
||||
Consoles []Console `xml:"console"`
|
||||
- Watchdog *Watchdog `xml:"watchdog,omitempty"`
|
||||
+ Watchdogs []Watchdog `xml:"watchdog,omitempty"`
|
||||
Rng *Rng `xml:"rng,omitempty"`
|
||||
Filesystems []FilesystemDevice `xml:"filesystem,omitempty"`
|
||||
Redirs []RedirectedDevice `xml:"redirdev,omitempty"`
|
||||
diff --git a/pkg/virt-launcher/virtwrap/api/schema_test.go b/pkg/virt-launcher/virtwrap/api/schema_test.go
|
||||
index 8150ea8fd..c315cf13f 100644
|
||||
--- a/pkg/virt-launcher/virtwrap/api/schema_test.go
|
||||
+++ b/pkg/virt-launcher/virtwrap/api/schema_test.go
|
||||
@@ -348,10 +348,12 @@ var _ = ginkgo.Describe("Schema", func() {
|
||||
exampleDomain.Spec.Devices.Consoles = []Console{
|
||||
{Type: "pty"},
|
||||
}
|
||||
- exampleDomain.Spec.Devices.Watchdog = &Watchdog{
|
||||
- Model: "i6300esb",
|
||||
- Action: "poweroff",
|
||||
- Alias: NewUserDefinedAlias("mywatchdog"),
|
||||
+ exampleDomain.Spec.Devices.Watchdogs = []Watchdog{
|
||||
+ {
|
||||
+ Model: "i6300esb",
|
||||
+ Action: "poweroff",
|
||||
+ Alias: NewUserDefinedAlias("mywatchdog"),
|
||||
+ },
|
||||
}
|
||||
exampleDomain.Spec.Devices.Rng = &Rng{
|
||||
Model: v1.VirtIO,
|
||||
diff --git a/pkg/virt-launcher/virtwrap/converter/converter.go b/pkg/virt-launcher/virtwrap/converter/converter.go
|
||||
index db3c0a903..531a5ea71 100644
|
||||
--- a/pkg/virt-launcher/virtwrap/converter/converter.go
|
||||
+++ b/pkg/virt-launcher/virtwrap/converter/converter.go
|
||||
@@ -1582,7 +1582,7 @@ func Convert_v1_VirtualMachineInstance_To_api_Domain(vmi *v1.VirtualMachineInsta
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
- domain.Spec.Devices.Watchdog = newWatchdog
|
||||
+ domain.Spec.Devices.Watchdogs = append(domain.Spec.Devices.Watchdogs, *newWatchdog)
|
||||
}
|
||||
|
||||
if vmi.Spec.Domain.Devices.Rng != nil {
|
||||
diff --git a/pkg/virt-launcher/virtwrap/converter/pci-placement.go b/pkg/virt-launcher/virtwrap/converter/pci-placement.go
|
||||
index 38ca8354e..fbe17ba6e 100644
|
||||
--- a/pkg/virt-launcher/virtwrap/converter/pci-placement.go
|
||||
+++ b/pkg/virt-launcher/virtwrap/converter/pci-placement.go
|
||||
@@ -53,8 +53,8 @@ func PlacePCIDevicesOnRootComplex(spec *api.DomainSpec) (err error) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
- if spec.Devices.Watchdog != nil {
|
||||
- spec.Devices.Watchdog.Address, err = assigner.PlacePCIDeviceAtNextSlot(spec.Devices.Watchdog.Address)
|
||||
+ for i, watchdog := range spec.Devices.Watchdogs {
|
||||
+ spec.Devices.Watchdogs[i].Address, err = assigner.PlacePCIDeviceAtNextSlot(watchdog.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
--
|
||||
2.41.0
|
||||
|
2
_service
2
_service
@ -1,7 +1,7 @@
|
||||
<services>
|
||||
<service name="tar_scm" mode="manual">
|
||||
<param name="filename">kubevirt</param>
|
||||
<param name="revision">v1.0.1</param>
|
||||
<param name="revision">v1.1.0</param>
|
||||
<param name="scm">git</param>
|
||||
<param name="submodules">disable</param>
|
||||
<param name="url">https://github.com/kubevirt/kubevirt</param>
|
||||
|
@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: kubevirt-testing
|
||||
containers:
|
||||
- name: target
|
||||
image: quay.io/kubevirt/disks-images-provider:v1.0.1
|
||||
image: quay.io/kubevirt/disks-images-provider:v1.1.0
|
||||
imagePullPolicy: Always
|
||||
lifecycle:
|
||||
preStop:
|
||||
|
@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:a150bb410477b6e3f4f46bc0adba5f1c173eb1131da70c9715728c457f2b0304
|
||||
size 15854619
|
3
kubevirt-1.1.0.tar.gz
Normal file
3
kubevirt-1.1.0.tar.gz
Normal file
@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6a483208a3c5922fbcbc505d07d2f47b703f3e47a17f534a5e0b611d27f60a2c
|
||||
size 16230961
|
@ -1,3 +1,17 @@
|
||||
-------------------------------------------------------------------
|
||||
Tue Nov 14 07:36:17 UTC 2023 - Vasily Ulyanov <vasily.ulyanov@suse.com>
|
||||
|
||||
- Update to version 1.1.0
|
||||
Release notes https://github.com/kubevirt/kubevirt/releases/tag/v1.1.0
|
||||
- Drop upstreamed patches
|
||||
0001-Fix-qemu-system-lookup.patch
|
||||
0003-Virtiofs-Remove-duplicated-functional-tests.patch
|
||||
0005-Support-multiple-watchdogs-in-the-domain-schema.patch
|
||||
- Add patches
|
||||
0001-Update-google.golang.org-grpc-to-1.56.3.patch (CVE-2023-44487)
|
||||
0002-virt-launcher-fix-qemu-non-root-path.patch
|
||||
0003-cgroupsv2-reconstruct-device-allowlist.patch
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Wed Oct 18 07:41:49 UTC 2023 - Vasily Ulyanov <vasily.ulyanov@suse.com>
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
|
||||
Name: kubevirt
|
||||
Version: 1.0.1
|
||||
Version: 1.1.0
|
||||
Release: 0
|
||||
Summary: Container native virtualization
|
||||
License: Apache-2.0
|
||||
@ -28,9 +28,9 @@ Source1: kubevirt_containers_meta
|
||||
Source2: kubevirt_containers_meta.service
|
||||
Source3: %{url}/releases/download/v%{version}/disks-images-provider.yaml
|
||||
Source100: %{name}-rpmlintrc
|
||||
Patch1: 0001-Fix-qemu-system-lookup.patch
|
||||
Patch3: 0003-Virtiofs-Remove-duplicated-functional-tests.patch
|
||||
Patch5: 0005-Support-multiple-watchdogs-in-the-domain-schema.patch
|
||||
Patch1: 0001-Update-google.golang.org-grpc-to-1.56.3.patch
|
||||
Patch2: 0002-virt-launcher-fix-qemu-non-root-path.patch
|
||||
Patch3: 0003-cgroupsv2-reconstruct-device-allowlist.patch
|
||||
BuildRequires: glibc-devel-static
|
||||
BuildRequires: golang-packaging
|
||||
BuildRequires: pkgconfig
|
||||
|
Loading…
x
Reference in New Issue
Block a user