- Bump client-go (fix possible panic in discovery) 0011-Fix-Aggregated-Discovery.patch - Wait for new hotplug attachment pod to be ready 0012-Wait-for-new-hotplug-attachment-pod-to-be-ready.patch - Adapt the storage tests to the new populators flow 0013-Adapt-e2e-tests-to-CDI-1.57.0.patch OBS-URL: https://build.opensuse.org/request/show/1104204 OBS-URL: https://build.opensuse.org/package/show/Virtualization/kubevirt?expand=0&rev=130
195 lines
8.5 KiB
Diff
195 lines
8.5 KiB
Diff
From 61ca1e96363afe403465ed195b8cc808a4a04f06 Mon Sep 17 00:00:00 2001
|
|
From: Alex Kalenyuk <akalenyu@redhat.com>
|
|
Date: Wed, 12 Jul 2023 19:52:04 +0300
|
|
Subject: [PATCH 1/2] Don't wait for populator target PVC to be bound
|
|
|
|
Populator PVCs only achieve bound phase once the population is done,
|
|
as opposed to CDI population which was working on the target PVC directly.
|
|
|
|
Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
|
|
---
|
|
tests/storage/export.go | 70 ++---------------------------------------
|
|
1 file changed, 3 insertions(+), 67 deletions(-)
|
|
|
|
diff --git a/tests/storage/export.go b/tests/storage/export.go
|
|
index d456e2fb1..4fab2aec1 100644
|
|
--- a/tests/storage/export.go
|
|
+++ b/tests/storage/export.go
|
|
@@ -48,7 +48,6 @@ import (
|
|
|
|
k8sv1 "k8s.io/api/core/v1"
|
|
networkingv1 "k8s.io/api/networking/v1"
|
|
- storagev1 "k8s.io/api/storage/v1"
|
|
"k8s.io/apimachinery/pkg/api/equality"
|
|
"k8s.io/apimachinery/pkg/api/errors"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
@@ -230,68 +229,6 @@ var _ = SIGDescribe("Export", func() {
|
|
return tests.RunPod(pod)
|
|
}
|
|
|
|
- createTriggerPodForPvc := func(pvc *k8sv1.PersistentVolumeClaim) *k8sv1.Pod {
|
|
- volumeName := pvc.GetName()
|
|
- podName := fmt.Sprintf("bind-%s", volumeName)
|
|
- pod := tests.RenderPod(podName, []string{"/bin/sh", "-c", "sleep 1"}, []string{})
|
|
- pod.Spec.Volumes = append(pod.Spec.Volumes, k8sv1.Volume{
|
|
- Name: volumeName,
|
|
- VolumeSource: k8sv1.VolumeSource{
|
|
- PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
|
|
- ClaimName: pvc.GetName(),
|
|
- },
|
|
- },
|
|
- })
|
|
-
|
|
- volumeMode := pvc.Spec.VolumeMode
|
|
- if volumeMode != nil && *volumeMode == k8sv1.PersistentVolumeBlock {
|
|
- addBlockVolume(pod, volumeName)
|
|
- } else {
|
|
- addFilesystemVolume(pod, volumeName)
|
|
- }
|
|
- return tests.RunPodAndExpectCompletion(pod)
|
|
- }
|
|
-
|
|
- isWaitForFirstConsumer := func(storageClassName string) bool {
|
|
- sc, err := virtClient.StorageV1().StorageClasses().Get(context.Background(), storageClassName, metav1.GetOptions{})
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- return sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer
|
|
- }
|
|
-
|
|
- ensurePVCBound := func(pvc *k8sv1.PersistentVolumeClaim) {
|
|
- namespace := pvc.Namespace
|
|
- if !isWaitForFirstConsumer(*pvc.Spec.StorageClassName) {
|
|
- By("Checking for bound claim on non-WFFC storage")
|
|
- // Not WFFC, pvc will be bound
|
|
- Eventually(func() k8sv1.PersistentVolumeClaimPhase {
|
|
- pvc, err := virtClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), pvc.Name, metav1.GetOptions{})
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- return pvc.Status.Phase
|
|
- }, 30*time.Second, 1*time.Second).Should(Equal(k8sv1.ClaimBound))
|
|
- return
|
|
- }
|
|
- By("Checking the PVC is pending for WFFC storage")
|
|
- Eventually(func() k8sv1.PersistentVolumeClaimPhase {
|
|
- pvc, err := virtClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), pvc.Name, metav1.GetOptions{})
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- return pvc.Status.Phase
|
|
- }, 15*time.Second, 1*time.Second).Should(Equal(k8sv1.ClaimPending))
|
|
-
|
|
- By("Creating trigger pod to bind WFFC storage")
|
|
- triggerPod := createTriggerPodForPvc(pvc)
|
|
- By("Checking the PVC was bound")
|
|
- Eventually(func() k8sv1.PersistentVolumeClaimPhase {
|
|
- pvc, err := virtClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), pvc.Name, metav1.GetOptions{})
|
|
- Expect(err).ToNot(HaveOccurred())
|
|
- return pvc.Status.Phase
|
|
- }, 30*time.Second, 1*time.Second).Should(Equal(k8sv1.ClaimBound))
|
|
- By("Deleting the trigger pod")
|
|
- immediate := int64(0)
|
|
- Expect(virtClient.CoreV1().Pods(triggerPod.Namespace).Delete(context.Background(), triggerPod.Name, metav1.DeleteOptions{
|
|
- GracePeriodSeconds: &immediate,
|
|
- })).To(Succeed())
|
|
- }
|
|
-
|
|
createExportTokenSecret := func(name, namespace string) *k8sv1.Secret {
|
|
var err error
|
|
secret := &k8sv1.Secret{
|
|
@@ -352,6 +289,7 @@ var _ = SIGDescribe("Export", func() {
|
|
dv := libdv.NewDataVolume(
|
|
libdv.WithRegistryURLSourceAndPullMethod(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros), cdiv1.RegistryPullNode),
|
|
libdv.WithPVC(libdv.PVCWithStorageClass(sc), libdv.PVCWithVolumeMode(volumeMode)),
|
|
+ libdv.WithForceBindAnnotation(),
|
|
)
|
|
|
|
dv, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(nil)).Create(context.Background(), dv, metav1.CreateOptions{})
|
|
@@ -362,7 +300,6 @@ var _ = SIGDescribe("Export", func() {
|
|
pvc, err = virtClient.CoreV1().PersistentVolumeClaims(testsuite.GetTestNamespace(dv)).Get(context.Background(), dv.Name, metav1.GetOptions{})
|
|
return err
|
|
}, 60*time.Second, 1*time.Second).Should(BeNil(), "persistent volume associated with DV should be created")
|
|
- ensurePVCBound(pvc)
|
|
|
|
By("Making sure the DV is successful")
|
|
libstorage.EventuallyDV(dv, 90, HaveSucceeded())
|
|
@@ -847,6 +784,7 @@ var _ = SIGDescribe("Export", func() {
|
|
dv := libdv.NewDataVolume(
|
|
libdv.WithRegistryURLSourceAndPullMethod(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros), cdiv1.RegistryPullNode),
|
|
libdv.WithPVC(libdv.PVCWithStorageClass(sc)),
|
|
+ libdv.WithForceBindAnnotation(),
|
|
)
|
|
|
|
name := dv.Name
|
|
@@ -869,12 +807,10 @@ var _ = SIGDescribe("Export", func() {
|
|
}, 60*time.Second, 1*time.Second).Should(ContainElement(expectedCond), "export should report missing pvc")
|
|
|
|
dv, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(nil)).Create(context.Background(), dv, metav1.CreateOptions{})
|
|
- var pvc *k8sv1.PersistentVolumeClaim
|
|
Eventually(func() error {
|
|
- pvc, err = virtClient.CoreV1().PersistentVolumeClaims(testsuite.GetTestNamespace(dv)).Get(context.Background(), dv.Name, metav1.GetOptions{})
|
|
+ _, err = virtClient.CoreV1().PersistentVolumeClaims(testsuite.GetTestNamespace(dv)).Get(context.Background(), dv.Name, metav1.GetOptions{})
|
|
return err
|
|
}, 60*time.Second, 1*time.Second).Should(BeNil(), "persistent volume associated with DV should be created")
|
|
- ensurePVCBound(pvc)
|
|
|
|
By("Making sure the DV is successful")
|
|
libstorage.EventuallyDV(dv, 90, HaveSucceeded())
|
|
--
|
|
2.41.0
|
|
|
|
|
|
From 5b44741c1ca7df3b7121dff7db6a52f6599b7144 Mon Sep 17 00:00:00 2001
|
|
From: Alex Kalenyuk <akalenyu@redhat.com>
|
|
Date: Wed, 12 Jul 2023 19:57:57 +0300
|
|
Subject: [PATCH 2/2] Don't check for CloneOf/CloneRequest with populator
|
|
target PVCs
|
|
|
|
These simply don't exist (and are not needed) with populators
|
|
|
|
Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
|
|
---
|
|
tests/storage/restore.go | 11 ++++++++---
|
|
1 file changed, 8 insertions(+), 3 deletions(-)
|
|
|
|
diff --git a/tests/storage/restore.go b/tests/storage/restore.go
|
|
index dffd0f1fe..5a09ca839 100644
|
|
--- a/tests/storage/restore.go
|
|
+++ b/tests/storage/restore.go
|
|
@@ -1776,13 +1776,18 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() {
|
|
}
|
|
pvc, err := virtClient.CoreV1().PersistentVolumeClaims(vm.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
|
Expect(err).ToNot(HaveOccurred())
|
|
+ if pvc.Spec.DataSourceRef != nil {
|
|
+ // These annotations only exist pre-k8s-populators flows
|
|
+ return
|
|
+ }
|
|
for _, a := range []string{"k8s.io/CloneRequest", "k8s.io/CloneOf"} {
|
|
_, ok := pvc.Annotations[a]
|
|
Expect(ok).Should(Equal(shouldExist))
|
|
}
|
|
}
|
|
|
|
- createVMFromSource := func() *v1.VirtualMachine {
|
|
+ createNetworkCloneVMFromSource := func() *v1.VirtualMachine {
|
|
+ // TODO: consider ensuring network clone gets done here using StorageProfile CloneStrategy
|
|
dataVolume := libdv.NewDataVolume(
|
|
libdv.WithPVCSource(sourceDV.Namespace, sourceDV.Name),
|
|
libdv.WithPVC(libdv.PVCWithStorageClass(snapshotStorageClass), libdv.PVCWithVolumeSize("1Gi")),
|
|
@@ -1796,7 +1801,7 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() {
|
|
}
|
|
|
|
DescribeTable("should restore a vm that boots from a network cloned datavolumetemplate", func(restoreToNewVM, deleteSourcePVC bool) {
|
|
- vm, vmi = createAndStartVM(createVMFromSource())
|
|
+ vm, vmi = createAndStartVM(createNetworkCloneVMFromSource())
|
|
|
|
checkCloneAnnotations(vm, true)
|
|
if deleteSourcePVC {
|
|
@@ -1813,7 +1818,7 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() {
|
|
)
|
|
|
|
DescribeTable("should restore a vm that boots from a network cloned datavolume (not template)", func(restoreToNewVM, deleteSourcePVC bool) {
|
|
- vm = createVMFromSource()
|
|
+ vm = createNetworkCloneVMFromSource()
|
|
dv := orphanDataVolumeTemplate(vm, 0)
|
|
|
|
dv, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(vm.Namespace).Create(context.Background(), dv, metav1.CreateOptions{})
|
|
--
|
|
2.41.0
|
|
|