- Support multiple watchdogs in the domain schema 0005-Support-multiple-watchdogs-in-the-domain-schema.patch - Fix leaking file descriptor 0006-isolation-close-file-when-exits.patch - Fix volume detach on hotplug attachment pod delete 0007-Fix-volume-detach-on-hotplug-attachment-pod-delete.patch OBS-URL: https://build.opensuse.org/request/show/1101725 OBS-URL: https://build.opensuse.org/package/show/Virtualization/kubevirt?expand=0&rev=124
307 lines
13 KiB
Diff
307 lines
13 KiB
Diff
From 7a2b9109d82cced1603dfbd35ec7c1afbf3473bb Mon Sep 17 00:00:00 2001
|
|
From: Alexander Wels <awels@redhat.com>
|
|
Date: Mon, 24 Jul 2023 08:26:04 -0500
|
|
Subject: [PATCH 1/2] Fix volume detach on hotplug attachment pod delete
|
|
|
|
When the hotplug attachment pod is deleted, the VMI
|
|
volumestatus goes back to bound, which triggers the
|
|
manager to detach the volume from the running VM
|
|
interrupting any IO on that volume. The pod is then
|
|
re-created and the volume gets re-attached and operation
|
|
can continue, but if the volume is mounted, it needs
|
|
to be re-mounted in the VM.
|
|
|
|
This commit modifies the logic so that if the volume is
|
|
ready, it cannot go back to bound if the attachment pod
|
|
disappears. This prevents the detachments and issues with
|
|
IO on the running VM.
|
|
|
|
Signed-off-by: Alexander Wels <awels@redhat.com>
|
|
---
|
|
pkg/virt-controller/watch/vmi.go | 19 +--
|
|
.../virtwrap/converter/converter.go | 2 +-
|
|
tests/storage/hotplug.go | 122 +++++++++++++++++-
|
|
3 files changed, 127 insertions(+), 16 deletions(-)
|
|
|
|
diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
|
|
index 03725ad46..801ffd141 100644
|
|
--- a/pkg/virt-controller/watch/vmi.go
|
|
+++ b/pkg/virt-controller/watch/vmi.go
|
|
@@ -2144,13 +2144,14 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
|
|
}
|
|
attachmentPod := c.findAttachmentPodByVolumeName(volume.Name, attachmentPods)
|
|
if attachmentPod == nil {
|
|
- status.HotplugVolume.AttachPodName = ""
|
|
- status.HotplugVolume.AttachPodUID = ""
|
|
- // Pod is gone, or hasn't been created yet, check for the PVC associated with the volume to set phase and message
|
|
- phase, reason, message := c.getVolumePhaseMessageReason(&vmi.Spec.Volumes[i], vmi.Namespace)
|
|
- status.Phase = phase
|
|
- status.Message = message
|
|
- status.Reason = reason
|
|
+ if status.Phase != virtv1.VolumeReady {
|
|
+ status.HotplugVolume.AttachPodUID = ""
|
|
+ // Pod is gone, or hasn't been created yet, check for the PVC associated with the volume to set phase and message
|
|
+ phase, reason, message := c.getVolumePhaseMessageReason(&vmi.Spec.Volumes[i], vmi.Namespace)
|
|
+ status.Phase = phase
|
|
+ status.Message = message
|
|
+ status.Reason = reason
|
|
+ }
|
|
} else {
|
|
status.HotplugVolume.AttachPodName = attachmentPod.Name
|
|
if len(attachmentPod.Status.ContainerStatuses) == 1 && attachmentPod.Status.ContainerStatuses[0].Ready {
|
|
@@ -2239,8 +2240,8 @@ func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim)
|
|
}
|
|
|
|
func (c *VMIController) canMoveToAttachedPhase(currentPhase virtv1.VolumePhase) bool {
|
|
- return currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending ||
|
|
- currentPhase == virtv1.HotplugVolumeAttachedToNode
|
|
+ return (currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending ||
|
|
+ currentPhase == virtv1.HotplugVolumeAttachedToNode) && currentPhase != virtv1.VolumeReady
|
|
}
|
|
|
|
func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
|
|
diff --git a/pkg/virt-launcher/virtwrap/converter/converter.go b/pkg/virt-launcher/virtwrap/converter/converter.go
|
|
index db3c0a903..5c43acd74 100644
|
|
--- a/pkg/virt-launcher/virtwrap/converter/converter.go
|
|
+++ b/pkg/virt-launcher/virtwrap/converter/converter.go
|
|
@@ -1526,7 +1526,7 @@ func Convert_v1_VirtualMachineInstance_To_api_Domain(vmi *v1.VirtualMachineInsta
|
|
}
|
|
volume := volumes[disk.Name]
|
|
if volume == nil {
|
|
- return fmt.Errorf("No matching volume with name %s found", disk.Name)
|
|
+ return fmt.Errorf("no matching volume with name %s found", disk.Name)
|
|
}
|
|
|
|
if _, ok := c.HotplugVolumes[disk.Name]; !ok {
|
|
diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go
|
|
index 45284ed49..a85976484 100644
|
|
--- a/tests/storage/hotplug.go
|
|
+++ b/tests/storage/hotplug.go
|
|
@@ -57,7 +57,6 @@ import (
|
|
"kubevirt.io/kubevirt/tests/flags"
|
|
"kubevirt.io/kubevirt/tests/framework/checks"
|
|
"kubevirt.io/kubevirt/tests/framework/matcher"
|
|
- . "kubevirt.io/kubevirt/tests/framework/matcher"
|
|
"kubevirt.io/kubevirt/tests/libdv"
|
|
"kubevirt.io/kubevirt/tests/libnode"
|
|
"kubevirt.io/kubevirt/tests/libstorage"
|
|
@@ -503,7 +502,7 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
|
|
dvBlock, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(dvBlock)).Create(context.Background(), dvBlock, metav1.CreateOptions{})
|
|
Expect(err).ToNot(HaveOccurred())
|
|
- libstorage.EventuallyDV(dvBlock, 240, HaveSucceeded())
|
|
+ libstorage.EventuallyDV(dvBlock, 240, matcher.HaveSucceeded())
|
|
return dvBlock
|
|
}
|
|
|
|
@@ -1120,7 +1119,7 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
var err error
|
|
url := cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros)
|
|
|
|
- storageClass, foundSC := libstorage.GetRWXFileSystemStorageClass()
|
|
+ storageClass, foundSC := libstorage.GetRWOFileSystemStorageClass()
|
|
if !foundSC {
|
|
Skip("Skip test when Filesystem storage is not present")
|
|
}
|
|
@@ -1131,7 +1130,6 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
libdv.WithPVC(
|
|
libdv.PVCWithStorageClass(storageClass),
|
|
libdv.PVCWithVolumeSize("256Mi"),
|
|
- libdv.PVCWithReadWriteManyAccessMode(),
|
|
),
|
|
libdv.WithForceBindAnnotation(),
|
|
)
|
|
@@ -1140,7 +1138,7 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
Expect(err).ToNot(HaveOccurred())
|
|
|
|
By("waiting for the dv import to pvc to finish")
|
|
- libstorage.EventuallyDV(dv, 180, HaveSucceeded())
|
|
+ libstorage.EventuallyDV(dv, 180, matcher.HaveSucceeded())
|
|
|
|
By("rename disk image on PVC")
|
|
pvc, err := virtClient.CoreV1().PersistentVolumeClaims(dv.Namespace).Get(context.Background(), dv.Name, metav1.GetOptions{})
|
|
@@ -1171,6 +1169,118 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
})
|
|
})
|
|
|
|
+ Context("delete attachment pod several times", func() {
|
|
+ var (
|
|
+ vm *v1.VirtualMachine
|
|
+ hpvolume *cdiv1.DataVolume
|
|
+ )
|
|
+
|
|
+ BeforeEach(func() {
|
|
+ if !libstorage.HasCDI() {
|
|
+ Skip("Skip tests when CDI is not present")
|
|
+ }
|
|
+ _, foundSC := libstorage.GetRWXBlockStorageClass()
|
|
+ if !foundSC {
|
|
+ Skip("Skip test when block RWX storage is not present")
|
|
+ }
|
|
+ })
|
|
+
|
|
+ AfterEach(func() {
|
|
+ if vm != nil {
|
|
+ err := virtClient.VirtualMachine(vm.Namespace).Delete(context.Background(), vm.Name, &metav1.DeleteOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ vm = nil
|
|
+ }
|
|
+ })
|
|
+
|
|
+ deleteAttachmentPod := func(vmi *v1.VirtualMachineInstance) {
|
|
+ podName := ""
|
|
+ for _, volume := range vmi.Status.VolumeStatus {
|
|
+ if volume.HotplugVolume != nil {
|
|
+ podName = volume.HotplugVolume.AttachPodName
|
|
+ break
|
|
+ }
|
|
+ }
|
|
+ Expect(podName).ToNot(BeEmpty())
|
|
+ foreGround := metav1.DeletePropagationForeground
|
|
+ err := virtClient.CoreV1().Pods(vmi.Namespace).Delete(context.Background(), podName, metav1.DeleteOptions{
|
|
+ GracePeriodSeconds: pointer.Int64(0),
|
|
+ PropagationPolicy: &foreGround,
|
|
+ })
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ Eventually(func() bool {
|
|
+ _, err := virtClient.CoreV1().Pods(vmi.Namespace).Get(context.Background(), podName, metav1.GetOptions{})
|
|
+ return errors.IsNotFound(err)
|
|
+ }, 300*time.Second, 1*time.Second).Should(BeTrue())
|
|
+ }
|
|
+
|
|
+ It("should remain active", func() {
|
|
+ checkVolumeName := "checkvolume"
|
|
+ volumeMode := corev1.PersistentVolumeBlock
|
|
+ addVolumeFunc := addDVVolumeVMI
|
|
+ var err error
|
|
+ storageClass, _ := libstorage.GetRWXBlockStorageClass()
|
|
+
|
|
+ blankDv := func() *cdiv1.DataVolume {
|
|
+ return libdv.NewDataVolume(
|
|
+ libdv.WithNamespace(testsuite.GetTestNamespace(nil)),
|
|
+ libdv.WithBlankImageSource(),
|
|
+ libdv.WithPVC(
|
|
+ libdv.PVCWithStorageClass(storageClass),
|
|
+ libdv.PVCWithVolumeSize(cd.BlankVolumeSize),
|
|
+ libdv.PVCWithReadWriteManyAccessMode(),
|
|
+ libdv.PVCWithVolumeMode(volumeMode),
|
|
+ ),
|
|
+ libdv.WithForceBindAnnotation(),
|
|
+ )
|
|
+ }
|
|
+ vmi := libvmi.NewCirros()
|
|
+ vm := tests.NewRandomVirtualMachine(vmi, true)
|
|
+ vm, err = virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Create(context.Background(), vm)
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+
|
|
+ Eventually(func() bool {
|
|
+ vm, err := virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Get(context.Background(), vm.Name, &metav1.GetOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ return vm.Status.Ready
|
|
+ }, 300*time.Second, 1*time.Second).Should(BeTrue())
|
|
+ By("creating blank hotplug volumes")
|
|
+ hpvolume = blankDv()
|
|
+ dv, err := virtClient.CdiClient().CdiV1beta1().DataVolumes(hpvolume.Namespace).Create(context.Background(), hpvolume, metav1.CreateOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ By("waiting for the dv import to pvc to finish")
|
|
+ libstorage.EventuallyDV(dv, 180, matcher.HaveSucceeded())
|
|
+ vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+
|
|
+ By("hotplugging the volume check volume")
|
|
+ addVolumeFunc(vmi.Name, vmi.Namespace, checkVolumeName, hpvolume.Name, v1.DiskBusSCSI, false, "")
|
|
+ vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ verifyVolumeAndDiskVMIAdded(virtClient, vmi, checkVolumeName)
|
|
+ verifyVolumeStatus(vmi, v1.VolumeReady, "", checkVolumeName)
|
|
+ getVmiConsoleAndLogin(vmi)
|
|
+
|
|
+ By("verifying the volume is useable and creating some data on it")
|
|
+ verifyHotplugAttachedAndUseable(vmi, []string{checkVolumeName})
|
|
+ targets := getTargetsFromVolumeStatus(vmi, checkVolumeName)
|
|
+ Expect(targets).ToNot(BeEmpty())
|
|
+ verifyWriteReadData(vmi, targets[0])
|
|
+ vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ By("deleting the attachment pod a few times, try to make the currently attach volume break")
|
|
+ for i := 0; i < 10; i++ {
|
|
+ deleteAttachmentPod(vmi)
|
|
+ vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ }
|
|
+ By("verifying the volume has not been disturbed in the VM")
|
|
+ targets = getTargetsFromVolumeStatus(vmi, checkVolumeName)
|
|
+ Expect(targets).ToNot(BeEmpty())
|
|
+ verifyWriteReadData(vmi, targets[0])
|
|
+ })
|
|
+ })
|
|
+
|
|
Context("with limit range in namespace", func() {
|
|
var (
|
|
sc string
|
|
@@ -1195,7 +1305,7 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
vm.Spec.Template.Spec.Domain.Resources.Limits = corev1.ResourceList{}
|
|
vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceMemory] = *memLimitQuantity
|
|
vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceCPU] = *cpuLimitQuantity
|
|
- vm.Spec.Running = pointer.BoolPtr(true)
|
|
+ vm.Spec.Running = pointer.Bool(true)
|
|
vm, err := virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Create(context.Background(), vm)
|
|
Expect(err).ToNot(HaveOccurred())
|
|
Eventually(func() bool {
|
|
--
|
|
2.41.0
|
|
|
|
|
|
From 14854d800acaf6c17a487b60d28d4eb32bb8d9d2 Mon Sep 17 00:00:00 2001
|
|
From: Alexander Wels <awels@redhat.com>
|
|
Date: Tue, 25 Jul 2023 07:20:13 -0500
|
|
Subject: [PATCH 2/2] Address code review comments
|
|
|
|
Remove unneeded phase check, and move other check into
|
|
its own function in case we need more elaborate checks
|
|
|
|
Signed-off-by: Alexander Wels <awels@redhat.com>
|
|
---
|
|
pkg/virt-controller/watch/vmi.go | 10 +++++++---
|
|
1 file changed, 7 insertions(+), 3 deletions(-)
|
|
|
|
diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
|
|
index 801ffd141..9afaee4f0 100644
|
|
--- a/pkg/virt-controller/watch/vmi.go
|
|
+++ b/pkg/virt-controller/watch/vmi.go
|
|
@@ -2144,9 +2144,9 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
|
|
}
|
|
attachmentPod := c.findAttachmentPodByVolumeName(volume.Name, attachmentPods)
|
|
if attachmentPod == nil {
|
|
- if status.Phase != virtv1.VolumeReady {
|
|
+ if !c.volumeReady(status.Phase) {
|
|
status.HotplugVolume.AttachPodUID = ""
|
|
- // Pod is gone, or hasn't been created yet, check for the PVC associated with the volume to set phase and message
|
|
+ // Volume is not hotplugged in VM and Pod is gone, or hasn't been created yet, check for the PVC associated with the volume to set phase and message
|
|
phase, reason, message := c.getVolumePhaseMessageReason(&vmi.Spec.Volumes[i], vmi.Namespace)
|
|
status.Phase = phase
|
|
status.Message = message
|
|
@@ -2216,6 +2216,10 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
|
|
return nil
|
|
}
|
|
|
|
+func (c *VMIController) volumeReady(phase virtv1.VolumePhase) bool {
|
|
+ return phase == virtv1.VolumeReady
|
|
+}
|
|
+
|
|
func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim) (cdiv1.Percent, error) {
|
|
// To avoid conflicts, we only allow having one CDI instance
|
|
if cdiInstances := len(c.cdiInformer.GetStore().List()); cdiInstances != 1 {
|
|
@@ -2241,7 +2245,7 @@ func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim)
|
|
|
|
func (c *VMIController) canMoveToAttachedPhase(currentPhase virtv1.VolumePhase) bool {
|
|
return (currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending ||
|
|
- currentPhase == virtv1.HotplugVolumeAttachedToNode) && currentPhase != virtv1.VolumeReady
|
|
+ currentPhase == virtv1.HotplugVolumeAttachedToNode)
|
|
}
|
|
|
|
func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
|
|
--
|
|
2.41.0
|
|
|