Accepting request 1099268 from home:vulyanov:branches:Virtualization

- Access ksm sysfs from the host filesystem
  0002-ksm-Access-sysfs-from-the-host-filesystem.patch
- Remove duplicated virtiofs tests
  0003-Virtiofs-Remove-duplicated-functional-tests.patch
- Fix SCSI Persistent Reservations tests
  0004-tests-leave-some-space-for-metadata-on-the-backend-P.patch

OBS-URL: https://build.opensuse.org/request/show/1099268
OBS-URL: https://build.opensuse.org/package/show/Virtualization/kubevirt?expand=0&rev=122
This commit is contained in:
Vasily Ulyanov 2023-07-18 12:29:24 +00:00 committed by Git OBS Bridge
parent 19eafc9dba
commit 6f78c51535
5 changed files with 536 additions and 0 deletions

View File

@ -0,0 +1,36 @@
From b32657feb4529888cb9d233deee8986395469c0f Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Thu, 13 Jul 2023 16:24:48 +0200
Subject: [PATCH] ksm: Access sysfs from the host filesystem
In some environments, sysfs is mounted read-only even for privileged
containers. Use the ksm path from the host filesystem.
Related issue: https://github.com/containerd/containerd/issues/8445
Co-authored-by: Luboslav Pivarc <lpivarc@redhat.com>
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
Signed-off-by: Luboslav Pivarc <lpivarc@redhat.com>
---
pkg/virt-handler/node-labeller/node_labeller.go | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/pkg/virt-handler/node-labeller/node_labeller.go b/pkg/virt-handler/node-labeller/node_labeller.go
index f5eba812e..ec1852a34 100644
--- a/pkg/virt-handler/node-labeller/node_labeller.go
+++ b/pkg/virt-handler/node-labeller/node_labeller.go
@@ -50,7 +50,10 @@ import (
"kubevirt.io/kubevirt/pkg/virt-handler/node-labeller/util"
)
-const ksmPath = "/sys/kernel/mm/ksm/run"
+// In some environments, sysfs is mounted read-only even for privileged
+// containers: https://github.com/containerd/containerd/issues/8445.
+// Use the path from the host filesystem.
+const ksmPath = "/proc/1/root/sys/kernel/mm/ksm/run"
var nodeLabellerLabels = []string{
util.DeprecatedLabelNamespace + util.DeprecatedcpuModelPrefix,
--
2.41.0

View File

@ -0,0 +1,418 @@
From 039a1749c4326fe8937a38e67dd7674eb1d1f3a8 Mon Sep 17 00:00:00 2001
From: Javier Cano Cano <jcanocan@redhat.com>
Date: Tue, 2 May 2023 11:33:02 +0200
Subject: [PATCH] Virtiofs: Remove duplicated functional tests
Removes virtiofs test located in tests/storage/storage.go
which are already present in tests/virtiofs/datavolume.go.
Signed-off-by: Javier Cano Cano <jcanocan@redhat.com>
---
tests/storage/storage.go | 257 -----------------------------------
tests/virtiofs/BUILD.bazel | 3 +
tests/virtiofs/datavolume.go | 62 ++++++++-
3 files changed, 62 insertions(+), 260 deletions(-)
diff --git a/tests/storage/storage.go b/tests/storage/storage.go
index 672ba2355..3e5963139 100644
--- a/tests/storage/storage.go
+++ b/tests/storage/storage.go
@@ -346,264 +346,7 @@ var _ = SIGDescribe("Storage", func() {
})
})
- Context("VirtIO-FS with multiple PVCs", func() {
- pvc1 := "pvc-1"
- pvc2 := "pvc-2"
- createPVC := func(name string) {
- sc, _ := libstorage.GetRWXFileSystemStorageClass()
- pvc := libstorage.NewPVC(name, "1Gi", sc)
- _, err = virtClient.CoreV1().PersistentVolumeClaims(testsuite.NamespacePrivileged).Create(context.Background(), pvc, metav1.CreateOptions{})
- ExpectWithOffset(1, err).NotTo(HaveOccurred())
- }
-
- BeforeEach(func() {
- checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
- createPVC(pvc1)
- createPVC(pvc2)
- })
-
- AfterEach(func() {
- libstorage.DeletePVC(pvc1, testsuite.NamespacePrivileged)
- libstorage.DeletePVC(pvc2, testsuite.NamespacePrivileged)
- })
-
- DescribeTable("should be successfully started and accessible", func(option1, option2 libvmi.Option) {
-
- virtiofsMountPath := func(pvcName string) string { return fmt.Sprintf("/mnt/virtiofs_%s", pvcName) }
- virtiofsTestFile := func(virtiofsMountPath string) string { return fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath) }
- mountVirtiofsCommands := fmt.Sprintf(`#!/bin/bash
- mkdir %s
- mount -t virtiofs %s %s
- touch %s
-
- mkdir %s
- mount -t virtiofs %s %s
- touch %s
- `, virtiofsMountPath(pvc1), pvc1, virtiofsMountPath(pvc1), virtiofsTestFile(virtiofsMountPath(pvc1)),
- virtiofsMountPath(pvc2), pvc2, virtiofsMountPath(pvc2), virtiofsTestFile(virtiofsMountPath(pvc2)))
-
- vmi = libvmi.NewFedora(
- libvmi.WithCloudInitNoCloudUserData(mountVirtiofsCommands, true),
- libvmi.WithFilesystemPVC(pvc1),
- libvmi.WithFilesystemPVC(pvc2),
- libvmi.WithNamespace(testsuite.NamespacePrivileged),
- option1, option2,
- )
-
- vmi = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 300)
-
- // Wait for cloud init to finish and start the agent inside the vmi.
- Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected))
-
- By(checkingVMInstanceConsoleOut)
- Expect(console.LoginToFedora(vmi)).To(Succeed(), "Should be able to login to the Fedora VM")
-
- virtioFsFileTestCmd := fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", pvc1)
- pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
- podVirtioFsFileExist, err := exec.ExecuteCommandOnPod(
- virtClient,
- pod,
- "compute",
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
- )
- Expect(err).ToNot(HaveOccurred())
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
-
- virtioFsFileTestCmd = fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", pvc2)
- pod = tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
- podVirtioFsFileExist, err = exec.ExecuteCommandOnPod(
- virtClient,
- pod,
- "compute",
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
- )
- Expect(err).ToNot(HaveOccurred())
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
- },
- Entry("", func(instance *virtv1.VirtualMachineInstance) {}, func(instance *virtv1.VirtualMachineInstance) {}),
- Entry("with passt enabled", libvmi.WithPasstInterfaceWithPort(), libvmi.WithNetwork(v1.DefaultPodNetwork())),
- )
-
- })
- Context("VirtIO-FS with an empty PVC", func() {
- var (
- pvc = "empty-pvc1"
- originalConfig v1.KubeVirtConfiguration
- )
-
- BeforeEach(func() {
- checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
- originalConfig = *util.GetCurrentKv(virtClient).Spec.Configuration.DeepCopy()
- libstorage.CreateHostPathPv(pvc, testsuite.NamespacePrivileged, filepath.Join(testsuite.HostPathBase, pvc))
- libstorage.CreateHostPathPVC(pvc, testsuite.NamespacePrivileged, "1G")
- })
- AfterEach(func() {
- tests.UpdateKubeVirtConfigValueAndWait(originalConfig)
- libstorage.DeletePVC(pvc, testsuite.NamespacePrivileged)
- libstorage.DeletePV(pvc)
- })
-
- It("[serial] should be successfully started and virtiofs could be accessed", Serial, func() {
- resources := k8sv1.ResourceRequirements{
- Requests: k8sv1.ResourceList{
- k8sv1.ResourceCPU: resource.MustParse("2m"),
- k8sv1.ResourceMemory: resource.MustParse("14M"),
- },
- Limits: k8sv1.ResourceList{
- k8sv1.ResourceCPU: resource.MustParse("101m"),
- k8sv1.ResourceMemory: resource.MustParse("81M"),
- },
- }
- config := originalConfig.DeepCopy()
- config.SupportContainerResources = []v1.SupportContainerResources{
- {
- Type: v1.VirtioFS,
- Resources: resources,
- },
- }
- tests.UpdateKubeVirtConfigValueAndWait(*config)
- pvcName := fmt.Sprintf("disk-%s", pvc)
- virtiofsMountPath := fmt.Sprintf("/mnt/virtiofs_%s", pvcName)
- virtiofsTestFile := fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath)
- mountVirtiofsCommands := fmt.Sprintf(`#!/bin/bash
- mkdir %s
- mount -t virtiofs %s %s
- touch %s
- `, virtiofsMountPath, pvcName, virtiofsMountPath, virtiofsTestFile)
-
- vmi = libvmi.NewFedora(
- libvmi.WithCloudInitNoCloudUserData(mountVirtiofsCommands, true),
- libvmi.WithFilesystemPVC(pvcName),
- libvmi.WithNamespace(testsuite.NamespacePrivileged),
- )
- vmi = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 300)
-
- // Wait for cloud init to finish and start the agent inside the vmi.
- Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected))
-
- By(checkingVMInstanceConsoleOut)
- Expect(console.LoginToFedora(vmi)).To(Succeed(), "Should be able to login to the Fedora VM")
-
- virtioFsFileTestCmd := fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", pvcName)
- pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
- podVirtioFsFileExist, err := exec.ExecuteCommandOnPod(
- virtClient,
- pod,
- "compute",
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
- )
- Expect(err).ToNot(HaveOccurred())
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
- By("Finding virt-launcher pod")
- var virtlauncherPod *k8sv1.Pod
- Eventually(func() *k8sv1.Pod {
- podList, err := virtClient.CoreV1().Pods(vmi.Namespace).List(context.Background(), metav1.ListOptions{})
- if err != nil {
- return nil
- }
- for _, pod := range podList.Items {
- for _, ownerRef := range pod.GetOwnerReferences() {
- if ownerRef.UID == vmi.GetUID() {
- virtlauncherPod = &pod
- break
- }
- }
- }
- return virtlauncherPod
- }, 30*time.Second, 1*time.Second).ShouldNot(BeNil())
- Expect(virtlauncherPod.Spec.Containers).To(HaveLen(3))
- foundContainer := false
- virtiofsContainerName := fmt.Sprintf("virtiofs-%s", pvcName)
- for _, container := range virtlauncherPod.Spec.Containers {
- if container.Name == virtiofsContainerName {
- foundContainer = true
- Expect(container.Resources.Requests.Cpu().Value()).To(Equal(resources.Requests.Cpu().Value()))
- Expect(container.Resources.Requests.Memory().Value()).To(Equal(resources.Requests.Memory().Value()))
- Expect(container.Resources.Limits.Cpu().Value()).To(Equal(resources.Limits.Cpu().Value()))
- Expect(container.Resources.Limits.Memory().Value()).To(Equal(resources.Limits.Memory().Value()))
- }
- }
- Expect(foundContainer).To(BeTrue())
- })
- })
- Context("Run a VMI with VirtIO-FS and a datavolume", func() {
- var dataVolume *cdiv1.DataVolume
- BeforeEach(func() {
- checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
- if !libstorage.HasCDI() {
- Skip("Skip DataVolume tests when CDI is not present")
- }
-
- sc, exists := libstorage.GetRWOFileSystemStorageClass()
- if !exists {
- Skip("Skip test when Filesystem storage is not present")
- }
-
- dataVolume = libdv.NewDataVolume(
- libdv.WithRegistryURLSource(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine)),
- libdv.WithPVC(libdv.PVCWithStorageClass(sc)),
- )
- })
-
- AfterEach(func() {
- libstorage.DeleteDataVolume(&dataVolume)
- })
-
- It("should be successfully started and virtiofs could be accessed", func() {
- dataVolume, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.NamespacePrivileged).Create(context.Background(), dataVolume, metav1.CreateOptions{})
- Expect(err).ToNot(HaveOccurred())
- By("Waiting until the DataVolume is ready")
- if libstorage.IsStorageClassBindingModeWaitForFirstConsumer(libstorage.Config.StorageRWOFileSystem) {
- Eventually(ThisDV(dataVolume), 30).Should(Or(BeInPhase(cdiv1.WaitForFirstConsumer), BeInPhase(cdiv1.PendingPopulation)))
- }
-
- virtiofsMountPath := fmt.Sprintf("/mnt/virtiofs_%s", dataVolume.Name)
- virtiofsTestFile := fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath)
- mountVirtiofsCommands := fmt.Sprintf(`#!/bin/bash
- mkdir %s
- mount -t virtiofs %s %s
- touch %s
- `, virtiofsMountPath, dataVolume.Name, virtiofsMountPath, virtiofsTestFile)
-
- vmi = libvmi.NewFedora(
- libvmi.WithCloudInitNoCloudUserData(mountVirtiofsCommands, true),
- libvmi.WithFilesystemDV(dataVolume.Name),
- libvmi.WithNamespace(testsuite.NamespacePrivileged),
- )
- // with WFFC the run actually starts the import and then runs VM, so the timeout has to include both
- // import and start
- vmi = tests.RunVMIAndExpectLaunchWithDataVolume(vmi, dataVolume, 500)
-
- // Wait for cloud init to finish and start the agent inside the vmi.
- Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected))
-
- By(checkingVMInstanceConsoleOut)
- Expect(console.LoginToFedora(vmi)).To(Succeed(), "Should be able to login to the Fedora VM")
-
- By("Checking that virtio-fs is mounted")
- listVirtioFSDisk := fmt.Sprintf("ls -l %s/*disk* | wc -l\n", virtiofsMountPath)
- Expect(console.ExpectBatch(vmi, []expect.Batcher{
- &expect.BSnd{S: listVirtioFSDisk},
- &expect.BExp{R: console.RetValue("1")},
- }, 30*time.Second)).To(Succeed(), "Should be able to access the mounted virtiofs file")
-
- virtioFsFileTestCmd := fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", dataVolume.Name)
- pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
- podVirtioFsFileExist, err := exec.ExecuteCommandOnPod(
- virtClient,
- pod,
- "compute",
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
- )
- Expect(err).ToNot(HaveOccurred())
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
- err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{})
- Expect(err).ToNot(HaveOccurred())
- libwait.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
-
- })
- })
Context("[rfe_id:3106][crit:medium][vendor:cnv-qe@redhat.com][level:component]With ephemeral alpine PVC", func() {
var isRunOnKindInfra bool
BeforeEach(func() {
diff --git a/tests/virtiofs/BUILD.bazel b/tests/virtiofs/BUILD.bazel
index f2b197bd2..f3bf5cea6 100644
--- a/tests/virtiofs/BUILD.bazel
+++ b/tests/virtiofs/BUILD.bazel
@@ -27,10 +27,13 @@ go_library(
"//tests/libvmi:go_default_library",
"//tests/libwait:go_default_library",
"//tests/testsuite:go_default_library",
+ "//tests/util:go_default_library",
"//vendor/github.com/google/goexpect:go_default_library",
"//vendor/github.com/onsi/ginkgo/v2:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/pborman/uuid:go_default_library",
+ "//vendor/k8s.io/api/core/v1:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1:go_default_library",
],
diff --git a/tests/virtiofs/datavolume.go b/tests/virtiofs/datavolume.go
index 69de40d44..2a0139376 100644
--- a/tests/virtiofs/datavolume.go
+++ b/tests/virtiofs/datavolume.go
@@ -37,6 +37,8 @@ import (
expect "github.com/google/goexpect"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
+ k8sv1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
@@ -53,6 +55,7 @@ import (
"kubevirt.io/kubevirt/tests/libstorage"
"kubevirt.io/kubevirt/tests/libwait"
"kubevirt.io/kubevirt/tests/testsuite"
+ "kubevirt.io/kubevirt/tests/util"
)
const (
@@ -149,21 +152,43 @@ var _ = Describe("[sig-storage] virtiofs", decorators.SigStorage, func() {
})
Context("VirtIO-FS with an empty PVC", func() {
-
- var pvc = "empty-pvc1"
+ var (
+ pvc = "empty-pvc1"
+ originalConfig v1.KubeVirtConfiguration
+ )
BeforeEach(func() {
checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
+ originalConfig = *util.GetCurrentKv(virtClient).Spec.Configuration.DeepCopy()
libstorage.CreateHostPathPv(pvc, testsuite.NamespacePrivileged, filepath.Join(testsuite.HostPathBase, pvc))
libstorage.CreateHostPathPVC(pvc, testsuite.NamespacePrivileged, "1G")
})
AfterEach(func() {
+ tests.UpdateKubeVirtConfigValueAndWait(originalConfig)
libstorage.DeletePVC(pvc, testsuite.NamespacePrivileged)
libstorage.DeletePV(pvc)
})
- It("should be successfully started and virtiofs could be accessed", func() {
+ It("[Serial] should be successfully started and virtiofs could be accessed", Serial, func() {
+ resources := k8sv1.ResourceRequirements{
+ Requests: k8sv1.ResourceList{
+ k8sv1.ResourceCPU: resource.MustParse("2m"),
+ k8sv1.ResourceMemory: resource.MustParse("14M"),
+ },
+ Limits: k8sv1.ResourceList{
+ k8sv1.ResourceCPU: resource.MustParse("101m"),
+ k8sv1.ResourceMemory: resource.MustParse("81M"),
+ },
+ }
+ config := originalConfig.DeepCopy()
+ config.SupportContainerResources = []v1.SupportContainerResources{
+ {
+ Type: v1.VirtioFS,
+ Resources: resources,
+ },
+ }
+ tests.UpdateKubeVirtConfigValueAndWait(*config)
pvcName := fmt.Sprintf("disk-%s", pvc)
virtiofsMountPath := fmt.Sprintf("/mnt/virtiofs_%s", pvcName)
virtiofsTestFile := fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath)
@@ -196,6 +221,36 @@ var _ = Describe("[sig-storage] virtiofs", decorators.SigStorage, func() {
)
Expect(err).ToNot(HaveOccurred())
Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
+ By("Finding virt-launcher pod")
+ var virtlauncherPod *k8sv1.Pod
+ Eventually(func() *k8sv1.Pod {
+ podList, err := virtClient.CoreV1().Pods(vmi.Namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ return nil
+ }
+ for _, pod := range podList.Items {
+ for _, ownerRef := range pod.GetOwnerReferences() {
+ if ownerRef.UID == vmi.GetUID() {
+ virtlauncherPod = &pod
+ break
+ }
+ }
+ }
+ return virtlauncherPod
+ }, 30*time.Second, 1*time.Second).ShouldNot(BeNil())
+ Expect(virtlauncherPod.Spec.Containers).To(HaveLen(3))
+ foundContainer := false
+ virtiofsContainerName := fmt.Sprintf("virtiofs-%s", pvcName)
+ for _, container := range virtlauncherPod.Spec.Containers {
+ if container.Name == virtiofsContainerName {
+ foundContainer = true
+ Expect(container.Resources.Requests.Cpu().Value()).To(Equal(resources.Requests.Cpu().Value()))
+ Expect(container.Resources.Requests.Memory().Value()).To(Equal(resources.Requests.Memory().Value()))
+ Expect(container.Resources.Limits.Cpu().Value()).To(Equal(resources.Limits.Cpu().Value()))
+ Expect(container.Resources.Limits.Memory().Value()).To(Equal(resources.Limits.Memory().Value()))
+ }
+ }
+ Expect(foundContainer).To(BeTrue())
})
})
@@ -273,6 +328,7 @@ var _ = Describe("[sig-storage] virtiofs", decorators.SigStorage, func() {
err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
libwait.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
+
})
})
})
--
2.41.0

View File

@ -0,0 +1,69 @@
From 7eeb9001226bde307420ab6e92a45520ff508ef6 Mon Sep 17 00:00:00 2001
From: Alice Frosi <afrosi@redhat.com>
Date: Mon, 19 Jun 2023 12:41:49 +0200
Subject: [PATCH 1/2] tests: fix error print on Expect
Signed-off-by: Alice Frosi <afrosi@redhat.com>
---
tests/storage/reservation.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/storage/reservation.go b/tests/storage/reservation.go
index a09853060..527f6f42e 100644
--- a/tests/storage/reservation.go
+++ b/tests/storage/reservation.go
@@ -70,7 +70,7 @@ var _ = SIGDescribe("[Serial]SCSI persistent reservation", Serial, func() {
Expect(err).ToNot(HaveOccurred())
stdout, stderr, err := exec.ExecuteCommandOnPodWithResults(virtClient, pod, "targetcli", cmd)
- Expect(err).ToNot(HaveOccurred(), stdout, stderr)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("command='targetcli %v' stdout='%s' stderr='%s'", args, stdout, stderr))
}
// createSCSIDisk creates a SCSI using targetcli utility and LinuxIO (see
--
2.41.0
From 56b50ec639a6870616d3bc09a21a7c6c3993ec6d Mon Sep 17 00:00:00 2001
From: Alice Frosi <afrosi@redhat.com>
Date: Tue, 20 Jun 2023 12:21:08 +0200
Subject: [PATCH 2/2] tests: leave some space for metadata on the backend PVC
In certain case, the targetcli backstores/fileio create command fails
with:
Could not expand file to 1073741824 bytes
We can try to avoid this issue by creating a smaller backend image. We
simply hardcoded 800M instead of 1G as in these tests the size of the
disk doesn't matter. This is used to test the SCSI persistent
reservation ioctls.
Signed-off-by: Alice Frosi <afrosi@redhat.com>
---
tests/storage/reservation.go | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/tests/storage/reservation.go b/tests/storage/reservation.go
index 527f6f42e..6410f0d64 100644
--- a/tests/storage/reservation.go
+++ b/tests/storage/reservation.go
@@ -94,10 +94,13 @@ var _ = SIGDescribe("[Serial]SCSI persistent reservation", Serial, func() {
By(fmt.Sprintf("ldconfig: stdout: %v stderr: %v", stdout, stderr))
Expect(err).ToNot(HaveOccurred())
- // Create backend file
+ // Create backend file. Let some room for metedata and create a
+ // slightly smaller backend image, we use 800M instead of 1G. In
+ // this case, the disk size doesn't matter as the disk is used
+ // mostly to test the SCSI persistent reservation ioctls.
executeTargetCli(podName, []string{
"backstores/fileio",
- "create", backendDisk, "/disks/disk.img", "1G"})
+ "create", backendDisk, "/disks/disk.img", "800M"})
executeTargetCli(podName, []string{
"loopback/", "create", naa})
// Create LUN
--
2.41.0

View File

@ -1,3 +1,13 @@
-------------------------------------------------------------------
Fri Jul 14 05:22:41 UTC 2023 - Vasily Ulyanov <vasily.ulyanov@suse.com>
- Access ksm sysfs from the host filesystem
0002-ksm-Access-sysfs-from-the-host-filesystem.patch
- Remove duplicated virtiofs tests
0003-Virtiofs-Remove-duplicated-functional-tests.patch
- Fix SCSI Persistent Reservations tests
0004-tests-leave-some-space-for-metadata-on-the-backend-P.patch
-------------------------------------------------------------------
Wed Jul 12 07:54:37 UTC 2023 - Vasily Ulyanov <vasily.ulyanov@suse.com>

View File

@ -29,6 +29,9 @@ Source2: kubevirt_containers_meta.service
Source3: %{url}/releases/download/v%{version}/disks-images-provider.yaml
Source100: %{name}-rpmlintrc
Patch1: 0001-Fix-qemu-system-lookup.patch
Patch2: 0002-ksm-Access-sysfs-from-the-host-filesystem.patch
Patch3: 0003-Virtiofs-Remove-duplicated-functional-tests.patch
Patch4: 0004-tests-leave-some-space-for-metadata-on-the-backend-P.patch
BuildRequires: glibc-devel-static
BuildRequires: golang-packaging
BuildRequires: pkgconfig