- Access ksm sysfs from the host filesystem 0002-ksm-Access-sysfs-from-the-host-filesystem.patch - Remove duplicated virtiofs tests 0003-Virtiofs-Remove-duplicated-functional-tests.patch - Fix SCSI Persistent Reservations tests 0004-tests-leave-some-space-for-metadata-on-the-backend-P.patch OBS-URL: https://build.opensuse.org/request/show/1099268 OBS-URL: https://build.opensuse.org/package/show/Virtualization/kubevirt?expand=0&rev=122
419 lines
18 KiB
Diff
419 lines
18 KiB
Diff
From 039a1749c4326fe8937a38e67dd7674eb1d1f3a8 Mon Sep 17 00:00:00 2001
|
|
From: Javier Cano Cano <jcanocan@redhat.com>
|
|
Date: Tue, 2 May 2023 11:33:02 +0200
|
|
Subject: [PATCH] Virtiofs: Remove duplicated functional tests
|
|
|
|
Removes virtiofs test located in tests/storage/storage.go
|
|
which are already present in tests/virtiofs/datavolume.go.
|
|
|
|
Signed-off-by: Javier Cano Cano <jcanocan@redhat.com>
|
|
---
|
|
tests/storage/storage.go | 257 -----------------------------------
|
|
tests/virtiofs/BUILD.bazel | 3 +
|
|
tests/virtiofs/datavolume.go | 62 ++++++++-
|
|
3 files changed, 62 insertions(+), 260 deletions(-)
|
|
|
|
diff --git a/tests/storage/storage.go b/tests/storage/storage.go
|
|
index 672ba2355..3e5963139 100644
|
|
--- a/tests/storage/storage.go
|
|
+++ b/tests/storage/storage.go
|
|
@@ -346,264 +346,7 @@ var _ = SIGDescribe("Storage", func() {
|
|
})
|
|
|
|
})
|
|
- Context("VirtIO-FS with multiple PVCs", func() {
|
|
- pvc1 := "pvc-1"
|
|
- pvc2 := "pvc-2"
|
|
- createPVC := func(name string) {
|
|
- sc, _ := libstorage.GetRWXFileSystemStorageClass()
|
|
- pvc := libstorage.NewPVC(name, "1Gi", sc)
|
|
- _, err = virtClient.CoreV1().PersistentVolumeClaims(testsuite.NamespacePrivileged).Create(context.Background(), pvc, metav1.CreateOptions{})
|
|
- ExpectWithOffset(1, err).NotTo(HaveOccurred())
|
|
- }
|
|
-
|
|
- BeforeEach(func() {
|
|
- checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
|
|
- createPVC(pvc1)
|
|
- createPVC(pvc2)
|
|
- })
|
|
-
|
|
- AfterEach(func() {
|
|
- libstorage.DeletePVC(pvc1, testsuite.NamespacePrivileged)
|
|
- libstorage.DeletePVC(pvc2, testsuite.NamespacePrivileged)
|
|
- })
|
|
-
|
|
- DescribeTable("should be successfully started and accessible", func(option1, option2 libvmi.Option) {
|
|
-
|
|
- virtiofsMountPath := func(pvcName string) string { return fmt.Sprintf("/mnt/virtiofs_%s", pvcName) }
|
|
- virtiofsTestFile := func(virtiofsMountPath string) string { return fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath) }
|
|
- mountVirtiofsCommands := fmt.Sprintf(`#!/bin/bash
|
|
- mkdir %s
|
|
- mount -t virtiofs %s %s
|
|
- touch %s
|
|
-
|
|
- mkdir %s
|
|
- mount -t virtiofs %s %s
|
|
- touch %s
|
|
- `, virtiofsMountPath(pvc1), pvc1, virtiofsMountPath(pvc1), virtiofsTestFile(virtiofsMountPath(pvc1)),
|
|
- virtiofsMountPath(pvc2), pvc2, virtiofsMountPath(pvc2), virtiofsTestFile(virtiofsMountPath(pvc2)))
|
|
-
|
|
- vmi = libvmi.NewFedora(
|
|
- libvmi.WithCloudInitNoCloudUserData(mountVirtiofsCommands, true),
|
|
- libvmi.WithFilesystemPVC(pvc1),
|
|
- libvmi.WithFilesystemPVC(pvc2),
|
|
- libvmi.WithNamespace(testsuite.NamespacePrivileged),
|
|
- option1, option2,
|
|
- )
|
|
-
|
|
- vmi = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 300)
|
|
-
|
|
- // Wait for cloud init to finish and start the agent inside the vmi.
|
|
- Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected))
|
|
-
|
|
- By(checkingVMInstanceConsoleOut)
|
|
- Expect(console.LoginToFedora(vmi)).To(Succeed(), "Should be able to login to the Fedora VM")
|
|
-
|
|
- virtioFsFileTestCmd := fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", pvc1)
|
|
- pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
|
|
- podVirtioFsFileExist, err := exec.ExecuteCommandOnPod(
|
|
- virtClient,
|
|
- pod,
|
|
- "compute",
|
|
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
|
|
- )
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
|
-
|
|
- virtioFsFileTestCmd = fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", pvc2)
|
|
- pod = tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
|
|
- podVirtioFsFileExist, err = exec.ExecuteCommandOnPod(
|
|
- virtClient,
|
|
- pod,
|
|
- "compute",
|
|
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
|
|
- )
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
|
- },
|
|
- Entry("", func(instance *virtv1.VirtualMachineInstance) {}, func(instance *virtv1.VirtualMachineInstance) {}),
|
|
- Entry("with passt enabled", libvmi.WithPasstInterfaceWithPort(), libvmi.WithNetwork(v1.DefaultPodNetwork())),
|
|
- )
|
|
-
|
|
- })
|
|
- Context("VirtIO-FS with an empty PVC", func() {
|
|
- var (
|
|
- pvc = "empty-pvc1"
|
|
- originalConfig v1.KubeVirtConfiguration
|
|
- )
|
|
-
|
|
- BeforeEach(func() {
|
|
- checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
|
|
- originalConfig = *util.GetCurrentKv(virtClient).Spec.Configuration.DeepCopy()
|
|
- libstorage.CreateHostPathPv(pvc, testsuite.NamespacePrivileged, filepath.Join(testsuite.HostPathBase, pvc))
|
|
- libstorage.CreateHostPathPVC(pvc, testsuite.NamespacePrivileged, "1G")
|
|
- })
|
|
|
|
- AfterEach(func() {
|
|
- tests.UpdateKubeVirtConfigValueAndWait(originalConfig)
|
|
- libstorage.DeletePVC(pvc, testsuite.NamespacePrivileged)
|
|
- libstorage.DeletePV(pvc)
|
|
- })
|
|
-
|
|
- It("[serial] should be successfully started and virtiofs could be accessed", Serial, func() {
|
|
- resources := k8sv1.ResourceRequirements{
|
|
- Requests: k8sv1.ResourceList{
|
|
- k8sv1.ResourceCPU: resource.MustParse("2m"),
|
|
- k8sv1.ResourceMemory: resource.MustParse("14M"),
|
|
- },
|
|
- Limits: k8sv1.ResourceList{
|
|
- k8sv1.ResourceCPU: resource.MustParse("101m"),
|
|
- k8sv1.ResourceMemory: resource.MustParse("81M"),
|
|
- },
|
|
- }
|
|
- config := originalConfig.DeepCopy()
|
|
- config.SupportContainerResources = []v1.SupportContainerResources{
|
|
- {
|
|
- Type: v1.VirtioFS,
|
|
- Resources: resources,
|
|
- },
|
|
- }
|
|
- tests.UpdateKubeVirtConfigValueAndWait(*config)
|
|
- pvcName := fmt.Sprintf("disk-%s", pvc)
|
|
- virtiofsMountPath := fmt.Sprintf("/mnt/virtiofs_%s", pvcName)
|
|
- virtiofsTestFile := fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath)
|
|
- mountVirtiofsCommands := fmt.Sprintf(`#!/bin/bash
|
|
- mkdir %s
|
|
- mount -t virtiofs %s %s
|
|
- touch %s
|
|
- `, virtiofsMountPath, pvcName, virtiofsMountPath, virtiofsTestFile)
|
|
-
|
|
- vmi = libvmi.NewFedora(
|
|
- libvmi.WithCloudInitNoCloudUserData(mountVirtiofsCommands, true),
|
|
- libvmi.WithFilesystemPVC(pvcName),
|
|
- libvmi.WithNamespace(testsuite.NamespacePrivileged),
|
|
- )
|
|
- vmi = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 300)
|
|
-
|
|
- // Wait for cloud init to finish and start the agent inside the vmi.
|
|
- Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected))
|
|
-
|
|
- By(checkingVMInstanceConsoleOut)
|
|
- Expect(console.LoginToFedora(vmi)).To(Succeed(), "Should be able to login to the Fedora VM")
|
|
-
|
|
- virtioFsFileTestCmd := fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", pvcName)
|
|
- pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
|
|
- podVirtioFsFileExist, err := exec.ExecuteCommandOnPod(
|
|
- virtClient,
|
|
- pod,
|
|
- "compute",
|
|
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
|
|
- )
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
|
- By("Finding virt-launcher pod")
|
|
- var virtlauncherPod *k8sv1.Pod
|
|
- Eventually(func() *k8sv1.Pod {
|
|
- podList, err := virtClient.CoreV1().Pods(vmi.Namespace).List(context.Background(), metav1.ListOptions{})
|
|
- if err != nil {
|
|
- return nil
|
|
- }
|
|
- for _, pod := range podList.Items {
|
|
- for _, ownerRef := range pod.GetOwnerReferences() {
|
|
- if ownerRef.UID == vmi.GetUID() {
|
|
- virtlauncherPod = &pod
|
|
- break
|
|
- }
|
|
- }
|
|
- }
|
|
- return virtlauncherPod
|
|
- }, 30*time.Second, 1*time.Second).ShouldNot(BeNil())
|
|
- Expect(virtlauncherPod.Spec.Containers).To(HaveLen(3))
|
|
- foundContainer := false
|
|
- virtiofsContainerName := fmt.Sprintf("virtiofs-%s", pvcName)
|
|
- for _, container := range virtlauncherPod.Spec.Containers {
|
|
- if container.Name == virtiofsContainerName {
|
|
- foundContainer = true
|
|
- Expect(container.Resources.Requests.Cpu().Value()).To(Equal(resources.Requests.Cpu().Value()))
|
|
- Expect(container.Resources.Requests.Memory().Value()).To(Equal(resources.Requests.Memory().Value()))
|
|
- Expect(container.Resources.Limits.Cpu().Value()).To(Equal(resources.Limits.Cpu().Value()))
|
|
- Expect(container.Resources.Limits.Memory().Value()).To(Equal(resources.Limits.Memory().Value()))
|
|
- }
|
|
- }
|
|
- Expect(foundContainer).To(BeTrue())
|
|
- })
|
|
- })
|
|
- Context("Run a VMI with VirtIO-FS and a datavolume", func() {
|
|
- var dataVolume *cdiv1.DataVolume
|
|
- BeforeEach(func() {
|
|
- checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
|
|
- if !libstorage.HasCDI() {
|
|
- Skip("Skip DataVolume tests when CDI is not present")
|
|
- }
|
|
-
|
|
- sc, exists := libstorage.GetRWOFileSystemStorageClass()
|
|
- if !exists {
|
|
- Skip("Skip test when Filesystem storage is not present")
|
|
- }
|
|
-
|
|
- dataVolume = libdv.NewDataVolume(
|
|
- libdv.WithRegistryURLSource(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine)),
|
|
- libdv.WithPVC(libdv.PVCWithStorageClass(sc)),
|
|
- )
|
|
- })
|
|
-
|
|
- AfterEach(func() {
|
|
- libstorage.DeleteDataVolume(&dataVolume)
|
|
- })
|
|
-
|
|
- It("should be successfully started and virtiofs could be accessed", func() {
|
|
- dataVolume, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.NamespacePrivileged).Create(context.Background(), dataVolume, metav1.CreateOptions{})
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- By("Waiting until the DataVolume is ready")
|
|
- if libstorage.IsStorageClassBindingModeWaitForFirstConsumer(libstorage.Config.StorageRWOFileSystem) {
|
|
- Eventually(ThisDV(dataVolume), 30).Should(Or(BeInPhase(cdiv1.WaitForFirstConsumer), BeInPhase(cdiv1.PendingPopulation)))
|
|
- }
|
|
-
|
|
- virtiofsMountPath := fmt.Sprintf("/mnt/virtiofs_%s", dataVolume.Name)
|
|
- virtiofsTestFile := fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath)
|
|
- mountVirtiofsCommands := fmt.Sprintf(`#!/bin/bash
|
|
- mkdir %s
|
|
- mount -t virtiofs %s %s
|
|
- touch %s
|
|
- `, virtiofsMountPath, dataVolume.Name, virtiofsMountPath, virtiofsTestFile)
|
|
-
|
|
- vmi = libvmi.NewFedora(
|
|
- libvmi.WithCloudInitNoCloudUserData(mountVirtiofsCommands, true),
|
|
- libvmi.WithFilesystemDV(dataVolume.Name),
|
|
- libvmi.WithNamespace(testsuite.NamespacePrivileged),
|
|
- )
|
|
- // with WFFC the run actually starts the import and then runs VM, so the timeout has to include both
|
|
- // import and start
|
|
- vmi = tests.RunVMIAndExpectLaunchWithDataVolume(vmi, dataVolume, 500)
|
|
-
|
|
- // Wait for cloud init to finish and start the agent inside the vmi.
|
|
- Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected))
|
|
-
|
|
- By(checkingVMInstanceConsoleOut)
|
|
- Expect(console.LoginToFedora(vmi)).To(Succeed(), "Should be able to login to the Fedora VM")
|
|
-
|
|
- By("Checking that virtio-fs is mounted")
|
|
- listVirtioFSDisk := fmt.Sprintf("ls -l %s/*disk* | wc -l\n", virtiofsMountPath)
|
|
- Expect(console.ExpectBatch(vmi, []expect.Batcher{
|
|
- &expect.BSnd{S: listVirtioFSDisk},
|
|
- &expect.BExp{R: console.RetValue("1")},
|
|
- }, 30*time.Second)).To(Succeed(), "Should be able to access the mounted virtiofs file")
|
|
-
|
|
- virtioFsFileTestCmd := fmt.Sprintf("test -f /run/kubevirt-private/vmi-disks/%s/virtiofs_test && echo exist", dataVolume.Name)
|
|
- pod := tests.GetRunningPodByVirtualMachineInstance(vmi, testsuite.GetTestNamespace(vmi))
|
|
- podVirtioFsFileExist, err := exec.ExecuteCommandOnPod(
|
|
- virtClient,
|
|
- pod,
|
|
- "compute",
|
|
- []string{tests.BinBash, "-c", virtioFsFileTestCmd},
|
|
- )
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
|
- err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{})
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- libwait.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
|
|
-
|
|
- })
|
|
- })
|
|
Context("[rfe_id:3106][crit:medium][vendor:cnv-qe@redhat.com][level:component]With ephemeral alpine PVC", func() {
|
|
var isRunOnKindInfra bool
|
|
BeforeEach(func() {
|
|
diff --git a/tests/virtiofs/BUILD.bazel b/tests/virtiofs/BUILD.bazel
|
|
index f2b197bd2..f3bf5cea6 100644
|
|
--- a/tests/virtiofs/BUILD.bazel
|
|
+++ b/tests/virtiofs/BUILD.bazel
|
|
@@ -27,10 +27,13 @@ go_library(
|
|
"//tests/libvmi:go_default_library",
|
|
"//tests/libwait:go_default_library",
|
|
"//tests/testsuite:go_default_library",
|
|
+ "//tests/util:go_default_library",
|
|
"//vendor/github.com/google/goexpect:go_default_library",
|
|
"//vendor/github.com/onsi/ginkgo/v2:go_default_library",
|
|
"//vendor/github.com/onsi/gomega:go_default_library",
|
|
"//vendor/github.com/pborman/uuid:go_default_library",
|
|
+ "//vendor/k8s.io/api/core/v1:go_default_library",
|
|
+ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
|
"//vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1:go_default_library",
|
|
],
|
|
diff --git a/tests/virtiofs/datavolume.go b/tests/virtiofs/datavolume.go
|
|
index 69de40d44..2a0139376 100644
|
|
--- a/tests/virtiofs/datavolume.go
|
|
+++ b/tests/virtiofs/datavolume.go
|
|
@@ -37,6 +37,8 @@ import (
|
|
expect "github.com/google/goexpect"
|
|
. "github.com/onsi/ginkgo/v2"
|
|
. "github.com/onsi/gomega"
|
|
+ k8sv1 "k8s.io/api/core/v1"
|
|
+ "k8s.io/apimachinery/pkg/api/resource"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
v1 "kubevirt.io/api/core/v1"
|
|
@@ -53,6 +55,7 @@ import (
|
|
"kubevirt.io/kubevirt/tests/libstorage"
|
|
"kubevirt.io/kubevirt/tests/libwait"
|
|
"kubevirt.io/kubevirt/tests/testsuite"
|
|
+ "kubevirt.io/kubevirt/tests/util"
|
|
)
|
|
|
|
const (
|
|
@@ -149,21 +152,43 @@ var _ = Describe("[sig-storage] virtiofs", decorators.SigStorage, func() {
|
|
})
|
|
|
|
Context("VirtIO-FS with an empty PVC", func() {
|
|
-
|
|
- var pvc = "empty-pvc1"
|
|
+ var (
|
|
+ pvc = "empty-pvc1"
|
|
+ originalConfig v1.KubeVirtConfiguration
|
|
+ )
|
|
|
|
BeforeEach(func() {
|
|
checks.SkipTestIfNoFeatureGate(virtconfig.VirtIOFSGate)
|
|
+ originalConfig = *util.GetCurrentKv(virtClient).Spec.Configuration.DeepCopy()
|
|
libstorage.CreateHostPathPv(pvc, testsuite.NamespacePrivileged, filepath.Join(testsuite.HostPathBase, pvc))
|
|
libstorage.CreateHostPathPVC(pvc, testsuite.NamespacePrivileged, "1G")
|
|
})
|
|
|
|
AfterEach(func() {
|
|
+ tests.UpdateKubeVirtConfigValueAndWait(originalConfig)
|
|
libstorage.DeletePVC(pvc, testsuite.NamespacePrivileged)
|
|
libstorage.DeletePV(pvc)
|
|
})
|
|
|
|
- It("should be successfully started and virtiofs could be accessed", func() {
|
|
+ It("[Serial] should be successfully started and virtiofs could be accessed", Serial, func() {
|
|
+ resources := k8sv1.ResourceRequirements{
|
|
+ Requests: k8sv1.ResourceList{
|
|
+ k8sv1.ResourceCPU: resource.MustParse("2m"),
|
|
+ k8sv1.ResourceMemory: resource.MustParse("14M"),
|
|
+ },
|
|
+ Limits: k8sv1.ResourceList{
|
|
+ k8sv1.ResourceCPU: resource.MustParse("101m"),
|
|
+ k8sv1.ResourceMemory: resource.MustParse("81M"),
|
|
+ },
|
|
+ }
|
|
+ config := originalConfig.DeepCopy()
|
|
+ config.SupportContainerResources = []v1.SupportContainerResources{
|
|
+ {
|
|
+ Type: v1.VirtioFS,
|
|
+ Resources: resources,
|
|
+ },
|
|
+ }
|
|
+ tests.UpdateKubeVirtConfigValueAndWait(*config)
|
|
pvcName := fmt.Sprintf("disk-%s", pvc)
|
|
virtiofsMountPath := fmt.Sprintf("/mnt/virtiofs_%s", pvcName)
|
|
virtiofsTestFile := fmt.Sprintf("%s/virtiofs_test", virtiofsMountPath)
|
|
@@ -196,6 +221,36 @@ var _ = Describe("[sig-storage] virtiofs", decorators.SigStorage, func() {
|
|
)
|
|
Expect(err).ToNot(HaveOccurred())
|
|
Expect(strings.Trim(podVirtioFsFileExist, "\n")).To(Equal("exist"))
|
|
+ By("Finding virt-launcher pod")
|
|
+ var virtlauncherPod *k8sv1.Pod
|
|
+ Eventually(func() *k8sv1.Pod {
|
|
+ podList, err := virtClient.CoreV1().Pods(vmi.Namespace).List(context.Background(), metav1.ListOptions{})
|
|
+ if err != nil {
|
|
+ return nil
|
|
+ }
|
|
+ for _, pod := range podList.Items {
|
|
+ for _, ownerRef := range pod.GetOwnerReferences() {
|
|
+ if ownerRef.UID == vmi.GetUID() {
|
|
+ virtlauncherPod = &pod
|
|
+ break
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ return virtlauncherPod
|
|
+ }, 30*time.Second, 1*time.Second).ShouldNot(BeNil())
|
|
+ Expect(virtlauncherPod.Spec.Containers).To(HaveLen(3))
|
|
+ foundContainer := false
|
|
+ virtiofsContainerName := fmt.Sprintf("virtiofs-%s", pvcName)
|
|
+ for _, container := range virtlauncherPod.Spec.Containers {
|
|
+ if container.Name == virtiofsContainerName {
|
|
+ foundContainer = true
|
|
+ Expect(container.Resources.Requests.Cpu().Value()).To(Equal(resources.Requests.Cpu().Value()))
|
|
+ Expect(container.Resources.Requests.Memory().Value()).To(Equal(resources.Requests.Memory().Value()))
|
|
+ Expect(container.Resources.Limits.Cpu().Value()).To(Equal(resources.Limits.Cpu().Value()))
|
|
+ Expect(container.Resources.Limits.Memory().Value()).To(Equal(resources.Limits.Memory().Value()))
|
|
+ }
|
|
+ }
|
|
+ Expect(foundContainer).To(BeTrue())
|
|
})
|
|
})
|
|
|
|
@@ -273,6 +328,7 @@ var _ = Describe("[sig-storage] virtiofs", decorators.SigStorage, func() {
|
|
err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{})
|
|
Expect(err).ToNot(HaveOccurred())
|
|
libwait.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
|
|
+
|
|
})
|
|
})
|
|
})
|
|
--
|
|
2.41.0
|
|
|