- Bump client-go (fix possible panic in discovery) 0011-Fix-Aggregated-Discovery.patch - Wait for new hotplug attachment pod to be ready 0012-Wait-for-new-hotplug-attachment-pod-to-be-ready.patch - Adapt the storage tests to the new populators flow 0013-Adapt-e2e-tests-to-CDI-1.57.0.patch OBS-URL: https://build.opensuse.org/request/show/1104204 OBS-URL: https://build.opensuse.org/package/show/Virtualization/kubevirt?expand=0&rev=130
383 lines
16 KiB
Diff
383 lines
16 KiB
Diff
From ee3463ae990a1776908483b182ad79c79637cd5d Mon Sep 17 00:00:00 2001
|
|
From: Alexander Wels <awels@redhat.com>
|
|
Date: Fri, 11 Aug 2023 07:56:29 -0500
|
|
Subject: [PATCH 1/4] Wait for new attachemnt pod
|
|
|
|
Before deleting old attachment pod, wait for new attachment
|
|
pod to be ready, so k8s should not detach the volume from the
|
|
node since there will always be a pod using the volume from
|
|
its perspective.
|
|
|
|
Fixed issue where when adding or removing a volume the existing
|
|
volumes would still have the UID of the old attachment pod in
|
|
the VMI status which caused errors to appear in the virt-handler
|
|
logs about not being able to find the device or image.
|
|
|
|
Fixed issue where the cleanup would attempt to remove a volume
|
|
that was already gone causing errors to appear in the virt-handler
|
|
log.
|
|
|
|
Signed-off-by: Alexander Wels <awels@redhat.com>
|
|
---
|
|
pkg/virt-controller/watch/vmi.go | 60 +++++++++++++++-----------
|
|
pkg/virt-controller/watch/vmi_test.go | 52 ++++++++++++++++++++++
|
|
pkg/virt-handler/hotplug-disk/mount.go | 7 ++-
|
|
tests/storage/hotplug.go | 10 +++++
|
|
4 files changed, 103 insertions(+), 26 deletions(-)
|
|
|
|
diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
|
|
index 9afaee4f0..99af8b8cb 100644
|
|
--- a/pkg/virt-controller/watch/vmi.go
|
|
+++ b/pkg/virt-controller/watch/vmi.go
|
|
@@ -516,11 +516,7 @@ func (c *VMIController) hasOwnerVM(vmi *virtv1.VirtualMachineInstance) bool {
|
|
}
|
|
|
|
ownerVM := obj.(*virtv1.VirtualMachine)
|
|
- if controllerRef.UID == ownerVM.UID {
|
|
- return true
|
|
- }
|
|
-
|
|
- return false
|
|
+ return controllerRef.UID == ownerVM.UID
|
|
}
|
|
|
|
func (c *VMIController) updateStatus(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume, syncErr syncError) error {
|
|
@@ -1816,15 +1812,29 @@ func (c *VMIController) waitForFirstConsumerTemporaryPods(vmi *virtv1.VirtualMac
|
|
}
|
|
|
|
func (c *VMIController) needsHandleHotplug(hotplugVolumes []*virtv1.Volume, hotplugAttachmentPods []*k8sv1.Pod) bool {
|
|
+ if len(hotplugAttachmentPods) > 1 {
|
|
+ return true
|
|
+ }
|
|
// Determine if the ready volumes have changed compared to the current pod
|
|
for _, attachmentPod := range hotplugAttachmentPods {
|
|
if c.podVolumesMatchesReadyVolumes(attachmentPod, hotplugVolumes) {
|
|
- log.DefaultLogger().Infof("Don't need to handle as we have a matching attachment pod")
|
|
return false
|
|
}
|
|
- return true
|
|
}
|
|
- return len(hotplugVolumes) > 0
|
|
+ return len(hotplugVolumes) > 0 || len(hotplugAttachmentPods) > 0
|
|
+}
|
|
+
|
|
+func (c *VMIController) getActiveAndOldAttachmentPods(readyHotplugVolumes []*virtv1.Volume, hotplugAttachmentPods []*k8sv1.Pod) (*k8sv1.Pod, []*k8sv1.Pod) {
|
|
+ var currentPod *k8sv1.Pod
|
|
+ oldPods := make([]*k8sv1.Pod, 0)
|
|
+ for _, attachmentPod := range hotplugAttachmentPods {
|
|
+ if !c.podVolumesMatchesReadyVolumes(attachmentPod, readyHotplugVolumes) {
|
|
+ oldPods = append(oldPods, attachmentPod)
|
|
+ } else {
|
|
+ currentPod = attachmentPod
|
|
+ }
|
|
+ }
|
|
+ return currentPod, oldPods
|
|
}
|
|
|
|
func (c *VMIController) handleHotplugVolumes(hotplugVolumes []*virtv1.Volume, hotplugAttachmentPods []*k8sv1.Pod, vmi *virtv1.VirtualMachineInstance, virtLauncherPod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume) syncError {
|
|
@@ -1855,29 +1865,25 @@ func (c *VMIController) handleHotplugVolumes(hotplugVolumes []*virtv1.Volume, ho
|
|
readyHotplugVolumes = append(readyHotplugVolumes, volume)
|
|
}
|
|
// Determine if the ready volumes have changed compared to the current pod
|
|
- currentPod := make([]*k8sv1.Pod, 0)
|
|
- oldPods := make([]*k8sv1.Pod, 0)
|
|
- for _, attachmentPod := range hotplugAttachmentPods {
|
|
- if !c.podVolumesMatchesReadyVolumes(attachmentPod, readyHotplugVolumes) {
|
|
- oldPods = append(oldPods, attachmentPod)
|
|
- } else {
|
|
- currentPod = append(currentPod, attachmentPod)
|
|
- }
|
|
- }
|
|
+ currentPod, oldPods := c.getActiveAndOldAttachmentPods(readyHotplugVolumes, hotplugAttachmentPods)
|
|
|
|
- if len(currentPod) == 0 && len(readyHotplugVolumes) > 0 {
|
|
+ if currentPod == nil && len(readyHotplugVolumes) > 0 {
|
|
// ready volumes have changed
|
|
// Create new attachment pod that holds all the ready volumes
|
|
if err := c.createAttachmentPod(vmi, virtLauncherPod, readyHotplugVolumes); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
- // Delete old attachment pod
|
|
- for _, attachmentPod := range oldPods {
|
|
- if err := c.deleteAttachmentPodForVolume(vmi, attachmentPod); err != nil {
|
|
- return &syncErrorImpl{fmt.Errorf("Error deleting attachment pod %v", err), FailedDeletePodReason}
|
|
+
|
|
+ if len(readyHotplugVolumes) == 0 || (currentPod != nil && currentPod.Status.Phase == k8sv1.PodRunning) {
|
|
+ // Delete old attachment pod
|
|
+ for _, attachmentPod := range oldPods {
|
|
+ if err := c.deleteAttachmentPodForVolume(vmi, attachmentPod); err != nil {
|
|
+ return &syncErrorImpl{fmt.Errorf("Error deleting attachment pod %v", err), FailedDeletePodReason}
|
|
+ }
|
|
}
|
|
}
|
|
+
|
|
return nil
|
|
}
|
|
|
|
@@ -2121,6 +2127,9 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
|
|
if err != nil {
|
|
return err
|
|
}
|
|
+
|
|
+ attachmentPod, _ := c.getActiveAndOldAttachmentPods(hotplugVolumes, attachmentPods)
|
|
+
|
|
newStatus := make([]virtv1.VolumeStatus, 0)
|
|
for i, volume := range vmi.Spec.Volumes {
|
|
status := virtv1.VolumeStatus{}
|
|
@@ -2142,7 +2151,6 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
|
|
ClaimName: volume.Name,
|
|
}
|
|
}
|
|
- attachmentPod := c.findAttachmentPodByVolumeName(volume.Name, attachmentPods)
|
|
if attachmentPod == nil {
|
|
if !c.volumeReady(status.Phase) {
|
|
status.HotplugVolume.AttachPodUID = ""
|
|
@@ -2156,6 +2164,9 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
|
|
status.HotplugVolume.AttachPodName = attachmentPod.Name
|
|
if len(attachmentPod.Status.ContainerStatuses) == 1 && attachmentPod.Status.ContainerStatuses[0].Ready {
|
|
status.HotplugVolume.AttachPodUID = attachmentPod.UID
|
|
+ } else {
|
|
+ // Remove UID of old pod if a new one is available, but not yet ready
|
|
+ status.HotplugVolume.AttachPodUID = ""
|
|
}
|
|
if c.canMoveToAttachedPhase(status.Phase) {
|
|
status.Phase = virtv1.HotplugVolumeAttachedToNode
|
|
@@ -2244,8 +2255,7 @@ func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim)
|
|
}
|
|
|
|
func (c *VMIController) canMoveToAttachedPhase(currentPhase virtv1.VolumePhase) bool {
|
|
- return (currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending ||
|
|
- currentPhase == virtv1.HotplugVolumeAttachedToNode)
|
|
+ return (currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending)
|
|
}
|
|
|
|
func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
|
|
diff --git a/pkg/virt-controller/watch/vmi_test.go b/pkg/virt-controller/watch/vmi_test.go
|
|
index a9b173232..932326432 100644
|
|
--- a/pkg/virt-controller/watch/vmi_test.go
|
|
+++ b/pkg/virt-controller/watch/vmi_test.go
|
|
@@ -2700,6 +2700,58 @@ var _ = Describe("VirtualMachineInstance watcher", func() {
|
|
[]string{SuccessfulCreatePodReason}),
|
|
)
|
|
|
|
+ DescribeTable("Should properly calculate if it needs to handle hotplug volumes", func(hotplugVolumes []*virtv1.Volume, attachmentPods []*k8sv1.Pod, match gomegaTypes.GomegaMatcher) {
|
|
+ Expect(controller.needsHandleHotplug(hotplugVolumes, attachmentPods)).To(match)
|
|
+ },
|
|
+ Entry("nil volumes, nil attachmentPods", nil, nil, BeFalse()),
|
|
+ Entry("empty volumes, empty attachmentPods", []*virtv1.Volume{}, []*k8sv1.Pod{}, BeFalse()),
|
|
+ Entry("single volume, empty attachmentPods", []*virtv1.Volume{
|
|
+ {
|
|
+ Name: "test",
|
|
+ },
|
|
+ }, []*k8sv1.Pod{}, BeTrue()),
|
|
+ Entry("no volume, single attachmentPod", []*virtv1.Volume{}, makePods(0), BeTrue()),
|
|
+ Entry("matching volume, single attachmentPod", []*virtv1.Volume{
|
|
+ {
|
|
+ Name: "volume0",
|
|
+ },
|
|
+ }, makePods(0), BeFalse()),
|
|
+ Entry("mismatched volume, single attachmentPod", []*virtv1.Volume{
|
|
+ {
|
|
+ Name: "invalid",
|
|
+ },
|
|
+ }, makePods(0), BeTrue()),
|
|
+ Entry("matching volume, multiple attachmentPods", []*virtv1.Volume{
|
|
+ {
|
|
+ Name: "volume0",
|
|
+ },
|
|
+ }, []*k8sv1.Pod{makePods(0)[0], makePods(1)[0]}, BeTrue()),
|
|
+ )
|
|
+
|
|
+ DescribeTable("Should find active and old pods", func(hotplugVolumes []*virtv1.Volume, attachmentPods []*k8sv1.Pod, expectedActive *k8sv1.Pod, expectedOld []*k8sv1.Pod) {
|
|
+ active, old := controller.getActiveAndOldAttachmentPods(hotplugVolumes, attachmentPods)
|
|
+ Expect(active).To(Equal(expectedActive))
|
|
+ Expect(old).To(ContainElements(expectedOld))
|
|
+ },
|
|
+ Entry("nil volumes, nil attachmentPods", nil, nil, nil, nil),
|
|
+ Entry("empty volumes, empty attachmentPods", []*virtv1.Volume{}, []*k8sv1.Pod{}, nil, []*k8sv1.Pod{}),
|
|
+ Entry("matching volume, single attachmentPod", []*virtv1.Volume{
|
|
+ {
|
|
+ Name: "volume0",
|
|
+ },
|
|
+ }, makePods(0), makePods(0)[0], []*k8sv1.Pod{}),
|
|
+ Entry("matching volume, multiple attachmentPods, first pod matches", []*virtv1.Volume{
|
|
+ {
|
|
+ Name: "volume0",
|
|
+ },
|
|
+ }, []*k8sv1.Pod{makePods(0)[0], makePods(1)[0]}, makePods(0)[0], makePods(1)),
|
|
+ Entry("matching volume, multiple attachmentPods, second pod matches", []*virtv1.Volume{
|
|
+ {
|
|
+ Name: "volume1",
|
|
+ },
|
|
+ }, []*k8sv1.Pod{makePods(0)[0], makePods(1)[0]}, makePods(1)[0], makePods(0)),
|
|
+ )
|
|
+
|
|
It("Should get default filesystem overhead if there are multiple CDI instances", func() {
|
|
cdi := cdiv1.CDI{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
diff --git a/pkg/virt-handler/hotplug-disk/mount.go b/pkg/virt-handler/hotplug-disk/mount.go
|
|
index 942301815..43504d48d 100644
|
|
--- a/pkg/virt-handler/hotplug-disk/mount.go
|
|
+++ b/pkg/virt-handler/hotplug-disk/mount.go
|
|
@@ -508,9 +508,10 @@ func (m *volumeMounter) updateBlockMajorMinor(dev uint64, allow bool, manager cg
|
|
func (m *volumeMounter) createBlockDeviceFile(basePath *safepath.Path, deviceName string, dev uint64, blockDevicePermissions os.FileMode) error {
|
|
if _, err := safepath.JoinNoFollow(basePath, deviceName); errors.Is(err, os.ErrNotExist) {
|
|
return mknodCommand(basePath, deviceName, dev, blockDevicePermissions)
|
|
- } else {
|
|
+ } else if err != nil {
|
|
return err
|
|
}
|
|
+ return nil
|
|
}
|
|
|
|
func (m *volumeMounter) mountFileSystemHotplugVolume(vmi *v1.VirtualMachineInstance, volume string, sourceUID types.UID, record *vmiMountTargetRecord, mountDirectory bool) error {
|
|
@@ -667,6 +668,10 @@ func (m *volumeMounter) Unmount(vmi *v1.VirtualMachineInstance) error {
|
|
var err error
|
|
if m.isBlockVolume(&vmi.Status, volumeStatus.Name) {
|
|
path, err = safepath.JoinNoFollow(basePath, volumeStatus.Name)
|
|
+ if errors.Is(err, os.ErrNotExist) {
|
|
+ // already unmounted or never mounted
|
|
+ continue
|
|
+ }
|
|
} else if m.isDirectoryMounted(&vmi.Status, volumeStatus.Name) {
|
|
path, err = m.hotplugDiskManager.GetFileSystemDirectoryTargetPathFromHostView(virtlauncherUID, volumeStatus.Name, false)
|
|
if os.IsExist(err) {
|
|
diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go
|
|
index a85976484..ba9c69100 100644
|
|
--- a/tests/storage/hotplug.go
|
|
+++ b/tests/storage/hotplug.go
|
|
@@ -724,6 +724,16 @@ var _ = SIGDescribe("Hotplug", func() {
|
|
for i := range testVolumes {
|
|
verifyVolumeNolongerAccessible(vmi, targets[i])
|
|
}
|
|
+ By("Verifying there are no sync errors")
|
|
+ events, err := virtClient.CoreV1().Events(vmi.Namespace).List(context.Background(), metav1.ListOptions{})
|
|
+ Expect(err).ToNot(HaveOccurred())
|
|
+ for _, event := range events.Items {
|
|
+ if event.InvolvedObject.Kind == "VirtualMachineInstance" && event.InvolvedObject.UID == vmi.UID {
|
|
+ if event.Reason == string(v1.SyncFailed) {
|
|
+ Fail(fmt.Sprintf("Found sync failed event %v", event))
|
|
+ }
|
|
+ }
|
|
+ }
|
|
},
|
|
Entry("with VMs", addDVVolumeVM, removeVolumeVM, corev1.PersistentVolumeFilesystem, false),
|
|
Entry("with VMIs", addDVVolumeVMI, removeVolumeVMI, corev1.PersistentVolumeFilesystem, true),
|
|
--
|
|
2.41.0
|
|
|
|
|
|
From b02ab03f39e7e888c27949d24c0e9b38963d9b6c Mon Sep 17 00:00:00 2001
|
|
From: Alexander Wels <awels@redhat.com>
|
|
Date: Fri, 11 Aug 2023 15:00:59 -0500
|
|
Subject: [PATCH 2/4] Don't generate SynFail caused by a race condition.
|
|
|
|
Signed-off-by: Alexander Wels <awels@redhat.com>
|
|
---
|
|
pkg/virt-handler/hotplug-disk/mount.go | 8 ++++++--
|
|
1 file changed, 6 insertions(+), 2 deletions(-)
|
|
|
|
diff --git a/pkg/virt-handler/hotplug-disk/mount.go b/pkg/virt-handler/hotplug-disk/mount.go
|
|
index 43504d48d..9a5d24747 100644
|
|
--- a/pkg/virt-handler/hotplug-disk/mount.go
|
|
+++ b/pkg/virt-handler/hotplug-disk/mount.go
|
|
@@ -313,12 +313,16 @@ func (m *volumeMounter) mountHotplugVolume(vmi *v1.VirtualMachineInstance, volum
|
|
if m.isBlockVolume(&vmi.Status, volumeName) {
|
|
logger.V(4).Infof("Mounting block volume: %s", volumeName)
|
|
if err := m.mountBlockHotplugVolume(vmi, volumeName, sourceUID, record); err != nil {
|
|
- return fmt.Errorf("failed to mount block hotplug volume %s: %v", volumeName, err)
|
|
+ if !errors.Is(err, os.ErrNotExist) {
|
|
+ return fmt.Errorf("failed to mount block hotplug volume %s: %v", volumeName, err)
|
|
+ }
|
|
}
|
|
} else {
|
|
logger.V(4).Infof("Mounting file system volume: %s", volumeName)
|
|
if err := m.mountFileSystemHotplugVolume(vmi, volumeName, sourceUID, record, mountDirectory); err != nil {
|
|
- return fmt.Errorf("failed to mount filesystem hotplug volume %s: %v", volumeName, err)
|
|
+ if !errors.Is(err, os.ErrNotExist) {
|
|
+ return fmt.Errorf("failed to mount filesystem hotplug volume %s: %v", volumeName, err)
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
--
|
|
2.41.0
|
|
|
|
|
|
From 5012469d5179f01f5da9ae7c701949a57fb9d439 Mon Sep 17 00:00:00 2001
|
|
From: Alexander Wels <awels@redhat.com>
|
|
Date: Fri, 11 Aug 2023 18:04:28 -0500
|
|
Subject: [PATCH 3/4] Address code review comments
|
|
|
|
Signed-off-by: Alexander Wels <awels@redhat.com>
|
|
---
|
|
pkg/virt-controller/watch/vmi.go | 6 ++----
|
|
pkg/virt-handler/hotplug-disk/mount.go | 3 +--
|
|
2 files changed, 3 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
|
|
index 99af8b8cb..e031c35a8 100644
|
|
--- a/pkg/virt-controller/watch/vmi.go
|
|
+++ b/pkg/virt-controller/watch/vmi.go
|
|
@@ -1816,10 +1816,8 @@ func (c *VMIController) needsHandleHotplug(hotplugVolumes []*virtv1.Volume, hotp
|
|
return true
|
|
}
|
|
// Determine if the ready volumes have changed compared to the current pod
|
|
- for _, attachmentPod := range hotplugAttachmentPods {
|
|
- if c.podVolumesMatchesReadyVolumes(attachmentPod, hotplugVolumes) {
|
|
- return false
|
|
- }
|
|
+ if len(hotplugAttachmentPods) == 1 && c.podVolumesMatchesReadyVolumes(hotplugAttachmentPods[0], hotplugVolumes) {
|
|
+ return false
|
|
}
|
|
return len(hotplugVolumes) > 0 || len(hotplugAttachmentPods) > 0
|
|
}
|
|
diff --git a/pkg/virt-handler/hotplug-disk/mount.go b/pkg/virt-handler/hotplug-disk/mount.go
|
|
index 9a5d24747..c0b55046c 100644
|
|
--- a/pkg/virt-handler/hotplug-disk/mount.go
|
|
+++ b/pkg/virt-handler/hotplug-disk/mount.go
|
|
@@ -512,10 +512,9 @@ func (m *volumeMounter) updateBlockMajorMinor(dev uint64, allow bool, manager cg
|
|
func (m *volumeMounter) createBlockDeviceFile(basePath *safepath.Path, deviceName string, dev uint64, blockDevicePermissions os.FileMode) error {
|
|
if _, err := safepath.JoinNoFollow(basePath, deviceName); errors.Is(err, os.ErrNotExist) {
|
|
return mknodCommand(basePath, deviceName, dev, blockDevicePermissions)
|
|
- } else if err != nil {
|
|
+ } else {
|
|
return err
|
|
}
|
|
- return nil
|
|
}
|
|
|
|
func (m *volumeMounter) mountFileSystemHotplugVolume(vmi *v1.VirtualMachineInstance, volume string, sourceUID types.UID, record *vmiMountTargetRecord, mountDirectory bool) error {
|
|
--
|
|
2.41.0
|
|
|
|
|
|
From 5abf17fef7ab5433ec7dd155a82b1575660b86d3 Mon Sep 17 00:00:00 2001
|
|
From: Alexander Wels <awels@redhat.com>
|
|
Date: Mon, 14 Aug 2023 07:58:16 -0500
|
|
Subject: [PATCH 4/4] Update pkg/virt-handler/hotplug-disk/mount.go
|
|
|
|
Co-authored-by: Vasiliy Ulyanov <vulyanov@suse.de>
|
|
Signed-off-by: Alexander Wels <awels@redhat.com>
|
|
---
|
|
pkg/virt-handler/hotplug-disk/mount.go | 2 +-
|
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
|
diff --git a/pkg/virt-handler/hotplug-disk/mount.go b/pkg/virt-handler/hotplug-disk/mount.go
|
|
index c0b55046c..b1a11d93f 100644
|
|
--- a/pkg/virt-handler/hotplug-disk/mount.go
|
|
+++ b/pkg/virt-handler/hotplug-disk/mount.go
|
|
@@ -677,7 +677,7 @@ func (m *volumeMounter) Unmount(vmi *v1.VirtualMachineInstance) error {
|
|
}
|
|
} else if m.isDirectoryMounted(&vmi.Status, volumeStatus.Name) {
|
|
path, err = m.hotplugDiskManager.GetFileSystemDirectoryTargetPathFromHostView(virtlauncherUID, volumeStatus.Name, false)
|
|
- if os.IsExist(err) {
|
|
+ if errors.Is(err, os.ErrNotExist) {
|
|
// already unmounted or never mounted
|
|
continue
|
|
}
|
|
--
|
|
2.41.0
|
|
|