Accepting request 1118538 from home:vulyanov:branches:Virtualization

- Update to version 1.0.1
  Release notes https://github.com/kubevirt/kubevirt/releases/tag/v1.0.1
- Drop upstreamed patches
  0002-ksm-Access-sysfs-from-the-host-filesystem.patch
  0004-tests-leave-some-space-for-metadata-on-the-backend-P.patch
  0006-isolation-close-file-when-exits.patch
  0007-Fix-volume-detach-on-hotplug-attachment-pod-delete.patch
  0008-fix-ticker-leak.patch
  0009-tests-Run-helper-pod-as-qemu-107-user.patch
  0010-Fix-PR-leftover-mount-and-perms.patch
  0011-Fix-Aggregated-Discovery.patch
  0012-Wait-for-new-hotplug-attachment-pod-to-be-ready.patch
  0013-Adapt-e2e-tests-to-CDI-1.57.0.patch
  0014-Export-create-populator-compatible-datavolumes-from-.patch
  0015-tests-Delete-VMI-prior-to-NFS-server-pod.patch

OBS-URL: https://build.opensuse.org/request/show/1118538
OBS-URL: https://build.opensuse.org/package/show/Virtualization/kubevirt?expand=0&rev=137
This commit is contained in:
Vasily Ulyanov 2023-10-18 08:43:03 +00:00 committed by Git OBS Bridge
parent 0069e18b0a
commit fd01465396
19 changed files with 54 additions and 2782 deletions

View File

@ -1,4 +1,4 @@
From c4d429f3edffaaf8086f6cd22821b42ce2cf60c0 Mon Sep 17 00:00:00 2001
From de15ee969681bbd95ed0e6a5a460c3b0cba2cf52 Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Tue, 4 Jul 2023 13:27:25 +0200
Subject: [PATCH 1/2] Lookup qemu process by executable prefix
@ -18,7 +18,7 @@ Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
4 files changed, 26 insertions(+), 19 deletions(-)
diff --git a/pkg/virt-handler/isolation/detector.go b/pkg/virt-handler/isolation/detector.go
index 62f920025..a90281824 100644
index 9c282e231..52a1de9ee 100644
--- a/pkg/virt-handler/isolation/detector.go
+++ b/pkg/virt-handler/isolation/detector.go
@@ -185,13 +185,13 @@ func AdjustQemuProcessMemoryLimits(podIsoDetector PodIsolationDetector, vmi *v1.
@ -136,10 +136,10 @@ index de7d1449d..1a882e129 100644
),
)
--
2.41.0
2.42.0
From 30a544b57182fff64646eede6eba360cd0ceb0c3 Mon Sep 17 00:00:00 2001
From 0254d71c567fef3cd6ce8378eb0540fc93e5666f Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Tue, 4 Jul 2023 14:04:57 +0200
Subject: [PATCH 2/2] tests: Detect the qemu emulator binary in runtime
@ -152,9 +152,9 @@ Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
tests/realtime/BUILD.bazel | 1 +
tests/realtime/realtime.go | 28 +++++++++++++++++-----------
tests/security_features_test.go | 14 +++++++-------
tests/utils.go | 16 ++++++++++++----
tests/vmi_configuration_test.go | 12 ++++++++----
5 files changed, 45 insertions(+), 26 deletions(-)
tests/utils.go | 12 ++++++++++--
tests/vmi_configuration_test.go | 8 ++++++--
5 files changed, 41 insertions(+), 22 deletions(-)
diff --git a/tests/realtime/BUILD.bazel b/tests/realtime/BUILD.bazel
index 3718467e1..9d66eb7df 100644
@ -265,7 +265,7 @@ index a9e93161c..0e1249f0d 100644
Expect(err).ToNot(HaveOccurred())
slice = strings.Split(strings.TrimSpace(psOutput), "\n")
diff --git a/tests/security_features_test.go b/tests/security_features_test.go
index cc49ba038..42adfe1d6 100644
index e38291691..98230bbaf 100644
--- a/tests/security_features_test.go
+++ b/tests/security_features_test.go
@@ -129,9 +129,9 @@ var _ = Describe("[Serial][sig-compute]SecurityFeatures", Serial, decorators.Sig
@ -315,10 +315,10 @@ index cc49ba038..42adfe1d6 100644
By("Verifying SELinux context contains custom type in pod")
diff --git a/tests/utils.go b/tests/utils.go
index 6b5ab7de8..276046454 100644
index c51656291..9984000c4 100644
--- a/tests/utils.go
+++ b/tests/utils.go
@@ -363,10 +363,10 @@ func GetProcessName(pod *k8sv1.Pod, pid string) (output string, err error) {
@@ -360,10 +360,10 @@ func GetProcessName(pod *k8sv1.Pod, pid string) (output string, err error) {
return
}
@ -326,29 +326,12 @@ index 6b5ab7de8..276046454 100644
+func GetVcpuMask(pod *k8sv1.Pod, emulator, cpu string) (output string, err error) {
virtClient := kubevirt.Client()
- pscmd := "ps -LC qemu-kvm -o lwp,comm| grep \"CPU " + cpu + "\" | cut -f 1 -d \"C\""
+ pscmd := "ps -LC " + emulator + " -o lwp,comm| grep \"CPU " + cpu + "\" | cut -f 1 -d \"C\""
output, err = exec.ExecuteCommandOnPod(
virtClient,
pod,
@@ -383,14 +383,14 @@ func GetVcpuMask(pod *k8sv1.Pod, cpu string) (output string, err error) {
return output, err
}
-func GetKvmPitMask(pod *k8sv1.Pod, nodeName string) (output string, err error) {
+func GetKvmPitMask(pod *k8sv1.Pod, emulator, nodeName string) (output string, err error) {
virtClient := kubevirt.Client()
output, err = exec.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
- []string{"ps", "-C", "qemu-kvm", "-o", "pid", "--noheader"},
+ []string{"ps", "-C", emulator, "-o", "pid", "--noheader"},
)
Expect(err).ToNot(HaveOccurred())
qemupid := strings.TrimSpace(strings.Trim(output, "\n"))
@@ -1748,6 +1748,14 @@ func GetRunningVMIDomainSpec(vmi *v1.VirtualMachineInstance) (*launcherApi.Domai
- pscmd := `ps -LC qemu-kvm -o lwp,comm | grep "CPU ` + cpu + `" | cut -f1 -dC`
+ pscmd := `ps -LC ` + emulator + ` -o lwp,comm | grep "CPU ` + cpu + `" | cut -f1 -dC`
args := []string{BinBash, "-c", pscmd}
Eventually(func() error {
output, err = exec.ExecuteCommandOnPod(virtClient, pod, "compute", args)
@@ -1746,6 +1746,14 @@ func GetRunningVMIDomainSpec(vmi *v1.VirtualMachineInstance) (*launcherApi.Domai
return &runningVMISpec, err
}
@ -364,10 +347,10 @@ index 6b5ab7de8..276046454 100644
errChan := make(chan error, 1)
readyChan := make(chan struct{})
diff --git a/tests/vmi_configuration_test.go b/tests/vmi_configuration_test.go
index cfdd1afe1..a574a911d 100644
index 4f120127c..44590fdaa 100644
--- a/tests/vmi_configuration_test.go
+++ b/tests/vmi_configuration_test.go
@@ -2506,18 +2506,22 @@ var _ = Describe("[sig-compute]Configurations", decorators.SigCompute, func() {
@@ -2639,8 +2639,12 @@ var _ = Describe("[sig-compute]Configurations", decorators.SigCompute, func() {
&expect.BExp{R: "2"},
}, 15)).To(Succeed())
@ -376,17 +359,13 @@ index cfdd1afe1..a574a911d 100644
+ emulator = filepath.Base(emulator)
+
virtClient := kubevirt.Client()
- pscmd := []string{"ps", "-C", "qemu-kvm", "-o", "pid", "--noheader"}
+ pscmd := []string{"ps", "-C", emulator, "-o", "pid", "--noheader"}
_, err = exec.ExecuteCommandOnPod(
virtClient, readyPod, "compute", pscmd)
- // do not check for kvm-pit thread if qemu-kvm is not in use
+ // do not check for kvm-pit thread if qemu is not in use
- pidCmd := []string{"pidof", "qemu-kvm"}
+ pidCmd := []string{"pidof", emulator}
qemuPid, err := exec.ExecuteCommandOnPod(virtClient, readyPod, "compute", pidCmd)
// do not check for kvm-pit thread if qemu is not in use
if err != nil {
return
}
- kvmpitmask, err := tests.GetKvmPitMask(readyPod, node)
+ kvmpitmask, err := tests.GetKvmPitMask(readyPod, emulator, node)
@@ -2649,7 +2653,7 @@ var _ = Describe("[sig-compute]Configurations", decorators.SigCompute, func() {
kvmpitmask, err := tests.GetKvmPitMask(strings.TrimSpace(qemuPid), node)
Expect(err).ToNot(HaveOccurred())
- vcpuzeromask, err := tests.GetVcpuMask(readyPod, "0")
@ -395,5 +374,5 @@ index cfdd1afe1..a574a911d 100644
Expect(kvmpitmask).To(Equal(vcpuzeromask))
--
2.41.0
2.42.0

View File

@ -1,36 +0,0 @@
From b32657feb4529888cb9d233deee8986395469c0f Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Thu, 13 Jul 2023 16:24:48 +0200
Subject: [PATCH] ksm: Access sysfs from the host filesystem
In some environments, sysfs is mounted read-only even for privileged
containers. Use the ksm path from the host filesystem.
Related issue: https://github.com/containerd/containerd/issues/8445
Co-authored-by: Luboslav Pivarc <lpivarc@redhat.com>
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
Signed-off-by: Luboslav Pivarc <lpivarc@redhat.com>
---
pkg/virt-handler/node-labeller/node_labeller.go | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/pkg/virt-handler/node-labeller/node_labeller.go b/pkg/virt-handler/node-labeller/node_labeller.go
index f5eba812e..ec1852a34 100644
--- a/pkg/virt-handler/node-labeller/node_labeller.go
+++ b/pkg/virt-handler/node-labeller/node_labeller.go
@@ -50,7 +50,10 @@ import (
"kubevirt.io/kubevirt/pkg/virt-handler/node-labeller/util"
)
-const ksmPath = "/sys/kernel/mm/ksm/run"
+// In some environments, sysfs is mounted read-only even for privileged
+// containers: https://github.com/containerd/containerd/issues/8445.
+// Use the path from the host filesystem.
+const ksmPath = "/proc/1/root/sys/kernel/mm/ksm/run"
var nodeLabellerLabels = []string{
util.DeprecatedLabelNamespace + util.DeprecatedcpuModelPrefix,
--
2.41.0

View File

@ -1,69 +0,0 @@
From 7eeb9001226bde307420ab6e92a45520ff508ef6 Mon Sep 17 00:00:00 2001
From: Alice Frosi <afrosi@redhat.com>
Date: Mon, 19 Jun 2023 12:41:49 +0200
Subject: [PATCH 1/2] tests: fix error print on Expect
Signed-off-by: Alice Frosi <afrosi@redhat.com>
---
tests/storage/reservation.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/storage/reservation.go b/tests/storage/reservation.go
index a09853060..527f6f42e 100644
--- a/tests/storage/reservation.go
+++ b/tests/storage/reservation.go
@@ -70,7 +70,7 @@ var _ = SIGDescribe("[Serial]SCSI persistent reservation", Serial, func() {
Expect(err).ToNot(HaveOccurred())
stdout, stderr, err := exec.ExecuteCommandOnPodWithResults(virtClient, pod, "targetcli", cmd)
- Expect(err).ToNot(HaveOccurred(), stdout, stderr)
+ Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("command='targetcli %v' stdout='%s' stderr='%s'", args, stdout, stderr))
}
// createSCSIDisk creates a SCSI using targetcli utility and LinuxIO (see
--
2.41.0
From 56b50ec639a6870616d3bc09a21a7c6c3993ec6d Mon Sep 17 00:00:00 2001
From: Alice Frosi <afrosi@redhat.com>
Date: Tue, 20 Jun 2023 12:21:08 +0200
Subject: [PATCH 2/2] tests: leave some space for metadata on the backend PVC
In certain case, the targetcli backstores/fileio create command fails
with:
Could not expand file to 1073741824 bytes
We can try to avoid this issue by creating a smaller backend image. We
simply hardcoded 800M instead of 1G as in these tests the size of the
disk doesn't matter. This is used to test the SCSI persistent
reservation ioctls.
Signed-off-by: Alice Frosi <afrosi@redhat.com>
---
tests/storage/reservation.go | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/tests/storage/reservation.go b/tests/storage/reservation.go
index 527f6f42e..6410f0d64 100644
--- a/tests/storage/reservation.go
+++ b/tests/storage/reservation.go
@@ -94,10 +94,13 @@ var _ = SIGDescribe("[Serial]SCSI persistent reservation", Serial, func() {
By(fmt.Sprintf("ldconfig: stdout: %v stderr: %v", stdout, stderr))
Expect(err).ToNot(HaveOccurred())
- // Create backend file
+ // Create backend file. Let some room for metedata and create a
+ // slightly smaller backend image, we use 800M instead of 1G. In
+ // this case, the disk size doesn't matter as the disk is used
+ // mostly to test the SCSI persistent reservation ioctls.
executeTargetCli(podName, []string{
"backstores/fileio",
- "create", backendDisk, "/disks/disk.img", "1G"})
+ "create", backendDisk, "/disks/disk.img", "800M"})
executeTargetCli(podName, []string{
"loopback/", "create", naa})
// Create LUN
--
2.41.0

View File

@ -1,31 +0,0 @@
From 8ae5fc3c4506c53adc5aae4cd20ad2d9ac4c035a Mon Sep 17 00:00:00 2001
From: grass-lu <284555125@qq.com>
Date: Mon, 24 Jul 2023 15:22:17 +0800
Subject: [PATCH 1/3] isolation: close file when exits
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The file is not closedand the sock connection has not been truly closed, and it occupy connection for libvirtd
Signed-off-by: grass-lu <284555125@qq.com>
---
pkg/virt-handler/isolation/detector.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/pkg/virt-handler/isolation/detector.go b/pkg/virt-handler/isolation/detector.go
index 62f920025..9c282e231 100644
--- a/pkg/virt-handler/isolation/detector.go
+++ b/pkg/virt-handler/isolation/detector.go
@@ -231,6 +231,8 @@ func (s *socketBasedIsolationDetector) getPid(socket string) (int, error) {
if err != nil {
return -1, err
}
+ defer ufile.Close()
+
// This is the tricky part, which will give us the PID of the owning socket
ucreds, err := syscall.GetsockoptUcred(int(ufile.Fd()), syscall.SOL_SOCKET, syscall.SO_PEERCRED)
if err != nil {
--
2.41.0

View File

@ -1,306 +0,0 @@
From 7a2b9109d82cced1603dfbd35ec7c1afbf3473bb Mon Sep 17 00:00:00 2001
From: Alexander Wels <awels@redhat.com>
Date: Mon, 24 Jul 2023 08:26:04 -0500
Subject: [PATCH 1/2] Fix volume detach on hotplug attachment pod delete
When the hotplug attachment pod is deleted, the VMI
volumestatus goes back to bound, which triggers the
manager to detach the volume from the running VM
interrupting any IO on that volume. The pod is then
re-created and the volume gets re-attached and operation
can continue, but if the volume is mounted, it needs
to be re-mounted in the VM.
This commit modifies the logic so that if the volume is
ready, it cannot go back to bound if the attachment pod
disappears. This prevents the detachments and issues with
IO on the running VM.
Signed-off-by: Alexander Wels <awels@redhat.com>
---
pkg/virt-controller/watch/vmi.go | 19 +--
.../virtwrap/converter/converter.go | 2 +-
tests/storage/hotplug.go | 122 +++++++++++++++++-
3 files changed, 127 insertions(+), 16 deletions(-)
diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
index 03725ad46..801ffd141 100644
--- a/pkg/virt-controller/watch/vmi.go
+++ b/pkg/virt-controller/watch/vmi.go
@@ -2144,13 +2144,14 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
}
attachmentPod := c.findAttachmentPodByVolumeName(volume.Name, attachmentPods)
if attachmentPod == nil {
- status.HotplugVolume.AttachPodName = ""
- status.HotplugVolume.AttachPodUID = ""
- // Pod is gone, or hasn't been created yet, check for the PVC associated with the volume to set phase and message
- phase, reason, message := c.getVolumePhaseMessageReason(&vmi.Spec.Volumes[i], vmi.Namespace)
- status.Phase = phase
- status.Message = message
- status.Reason = reason
+ if status.Phase != virtv1.VolumeReady {
+ status.HotplugVolume.AttachPodUID = ""
+ // Pod is gone, or hasn't been created yet, check for the PVC associated with the volume to set phase and message
+ phase, reason, message := c.getVolumePhaseMessageReason(&vmi.Spec.Volumes[i], vmi.Namespace)
+ status.Phase = phase
+ status.Message = message
+ status.Reason = reason
+ }
} else {
status.HotplugVolume.AttachPodName = attachmentPod.Name
if len(attachmentPod.Status.ContainerStatuses) == 1 && attachmentPod.Status.ContainerStatuses[0].Ready {
@@ -2239,8 +2240,8 @@ func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim)
}
func (c *VMIController) canMoveToAttachedPhase(currentPhase virtv1.VolumePhase) bool {
- return currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending ||
- currentPhase == virtv1.HotplugVolumeAttachedToNode
+ return (currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending ||
+ currentPhase == virtv1.HotplugVolumeAttachedToNode) && currentPhase != virtv1.VolumeReady
}
func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
diff --git a/pkg/virt-launcher/virtwrap/converter/converter.go b/pkg/virt-launcher/virtwrap/converter/converter.go
index db3c0a903..5c43acd74 100644
--- a/pkg/virt-launcher/virtwrap/converter/converter.go
+++ b/pkg/virt-launcher/virtwrap/converter/converter.go
@@ -1526,7 +1526,7 @@ func Convert_v1_VirtualMachineInstance_To_api_Domain(vmi *v1.VirtualMachineInsta
}
volume := volumes[disk.Name]
if volume == nil {
- return fmt.Errorf("No matching volume with name %s found", disk.Name)
+ return fmt.Errorf("no matching volume with name %s found", disk.Name)
}
if _, ok := c.HotplugVolumes[disk.Name]; !ok {
diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go
index 45284ed49..a85976484 100644
--- a/tests/storage/hotplug.go
+++ b/tests/storage/hotplug.go
@@ -57,7 +57,6 @@ import (
"kubevirt.io/kubevirt/tests/flags"
"kubevirt.io/kubevirt/tests/framework/checks"
"kubevirt.io/kubevirt/tests/framework/matcher"
- . "kubevirt.io/kubevirt/tests/framework/matcher"
"kubevirt.io/kubevirt/tests/libdv"
"kubevirt.io/kubevirt/tests/libnode"
"kubevirt.io/kubevirt/tests/libstorage"
@@ -503,7 +502,7 @@ var _ = SIGDescribe("Hotplug", func() {
dvBlock, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(dvBlock)).Create(context.Background(), dvBlock, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
- libstorage.EventuallyDV(dvBlock, 240, HaveSucceeded())
+ libstorage.EventuallyDV(dvBlock, 240, matcher.HaveSucceeded())
return dvBlock
}
@@ -1120,7 +1119,7 @@ var _ = SIGDescribe("Hotplug", func() {
var err error
url := cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros)
- storageClass, foundSC := libstorage.GetRWXFileSystemStorageClass()
+ storageClass, foundSC := libstorage.GetRWOFileSystemStorageClass()
if !foundSC {
Skip("Skip test when Filesystem storage is not present")
}
@@ -1131,7 +1130,6 @@ var _ = SIGDescribe("Hotplug", func() {
libdv.WithPVC(
libdv.PVCWithStorageClass(storageClass),
libdv.PVCWithVolumeSize("256Mi"),
- libdv.PVCWithReadWriteManyAccessMode(),
),
libdv.WithForceBindAnnotation(),
)
@@ -1140,7 +1138,7 @@ var _ = SIGDescribe("Hotplug", func() {
Expect(err).ToNot(HaveOccurred())
By("waiting for the dv import to pvc to finish")
- libstorage.EventuallyDV(dv, 180, HaveSucceeded())
+ libstorage.EventuallyDV(dv, 180, matcher.HaveSucceeded())
By("rename disk image on PVC")
pvc, err := virtClient.CoreV1().PersistentVolumeClaims(dv.Namespace).Get(context.Background(), dv.Name, metav1.GetOptions{})
@@ -1171,6 +1169,118 @@ var _ = SIGDescribe("Hotplug", func() {
})
})
+ Context("delete attachment pod several times", func() {
+ var (
+ vm *v1.VirtualMachine
+ hpvolume *cdiv1.DataVolume
+ )
+
+ BeforeEach(func() {
+ if !libstorage.HasCDI() {
+ Skip("Skip tests when CDI is not present")
+ }
+ _, foundSC := libstorage.GetRWXBlockStorageClass()
+ if !foundSC {
+ Skip("Skip test when block RWX storage is not present")
+ }
+ })
+
+ AfterEach(func() {
+ if vm != nil {
+ err := virtClient.VirtualMachine(vm.Namespace).Delete(context.Background(), vm.Name, &metav1.DeleteOptions{})
+ Expect(err).ToNot(HaveOccurred())
+ vm = nil
+ }
+ })
+
+ deleteAttachmentPod := func(vmi *v1.VirtualMachineInstance) {
+ podName := ""
+ for _, volume := range vmi.Status.VolumeStatus {
+ if volume.HotplugVolume != nil {
+ podName = volume.HotplugVolume.AttachPodName
+ break
+ }
+ }
+ Expect(podName).ToNot(BeEmpty())
+ foreGround := metav1.DeletePropagationForeground
+ err := virtClient.CoreV1().Pods(vmi.Namespace).Delete(context.Background(), podName, metav1.DeleteOptions{
+ GracePeriodSeconds: pointer.Int64(0),
+ PropagationPolicy: &foreGround,
+ })
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func() bool {
+ _, err := virtClient.CoreV1().Pods(vmi.Namespace).Get(context.Background(), podName, metav1.GetOptions{})
+ return errors.IsNotFound(err)
+ }, 300*time.Second, 1*time.Second).Should(BeTrue())
+ }
+
+ It("should remain active", func() {
+ checkVolumeName := "checkvolume"
+ volumeMode := corev1.PersistentVolumeBlock
+ addVolumeFunc := addDVVolumeVMI
+ var err error
+ storageClass, _ := libstorage.GetRWXBlockStorageClass()
+
+ blankDv := func() *cdiv1.DataVolume {
+ return libdv.NewDataVolume(
+ libdv.WithNamespace(testsuite.GetTestNamespace(nil)),
+ libdv.WithBlankImageSource(),
+ libdv.WithPVC(
+ libdv.PVCWithStorageClass(storageClass),
+ libdv.PVCWithVolumeSize(cd.BlankVolumeSize),
+ libdv.PVCWithReadWriteManyAccessMode(),
+ libdv.PVCWithVolumeMode(volumeMode),
+ ),
+ libdv.WithForceBindAnnotation(),
+ )
+ }
+ vmi := libvmi.NewCirros()
+ vm := tests.NewRandomVirtualMachine(vmi, true)
+ vm, err = virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Create(context.Background(), vm)
+ Expect(err).ToNot(HaveOccurred())
+
+ Eventually(func() bool {
+ vm, err := virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Get(context.Background(), vm.Name, &metav1.GetOptions{})
+ Expect(err).ToNot(HaveOccurred())
+ return vm.Status.Ready
+ }, 300*time.Second, 1*time.Second).Should(BeTrue())
+ By("creating blank hotplug volumes")
+ hpvolume = blankDv()
+ dv, err := virtClient.CdiClient().CdiV1beta1().DataVolumes(hpvolume.Namespace).Create(context.Background(), hpvolume, metav1.CreateOptions{})
+ Expect(err).ToNot(HaveOccurred())
+ By("waiting for the dv import to pvc to finish")
+ libstorage.EventuallyDV(dv, 180, matcher.HaveSucceeded())
+ vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{})
+ Expect(err).ToNot(HaveOccurred())
+
+ By("hotplugging the volume check volume")
+ addVolumeFunc(vmi.Name, vmi.Namespace, checkVolumeName, hpvolume.Name, v1.DiskBusSCSI, false, "")
+ vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{})
+ Expect(err).ToNot(HaveOccurred())
+ verifyVolumeAndDiskVMIAdded(virtClient, vmi, checkVolumeName)
+ verifyVolumeStatus(vmi, v1.VolumeReady, "", checkVolumeName)
+ getVmiConsoleAndLogin(vmi)
+
+ By("verifying the volume is useable and creating some data on it")
+ verifyHotplugAttachedAndUseable(vmi, []string{checkVolumeName})
+ targets := getTargetsFromVolumeStatus(vmi, checkVolumeName)
+ Expect(targets).ToNot(BeEmpty())
+ verifyWriteReadData(vmi, targets[0])
+ vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{})
+ Expect(err).ToNot(HaveOccurred())
+ By("deleting the attachment pod a few times, try to make the currently attach volume break")
+ for i := 0; i < 10; i++ {
+ deleteAttachmentPod(vmi)
+ vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{})
+ Expect(err).ToNot(HaveOccurred())
+ }
+ By("verifying the volume has not been disturbed in the VM")
+ targets = getTargetsFromVolumeStatus(vmi, checkVolumeName)
+ Expect(targets).ToNot(BeEmpty())
+ verifyWriteReadData(vmi, targets[0])
+ })
+ })
+
Context("with limit range in namespace", func() {
var (
sc string
@@ -1195,7 +1305,7 @@ var _ = SIGDescribe("Hotplug", func() {
vm.Spec.Template.Spec.Domain.Resources.Limits = corev1.ResourceList{}
vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceMemory] = *memLimitQuantity
vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceCPU] = *cpuLimitQuantity
- vm.Spec.Running = pointer.BoolPtr(true)
+ vm.Spec.Running = pointer.Bool(true)
vm, err := virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Create(context.Background(), vm)
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
--
2.41.0
From 14854d800acaf6c17a487b60d28d4eb32bb8d9d2 Mon Sep 17 00:00:00 2001
From: Alexander Wels <awels@redhat.com>
Date: Tue, 25 Jul 2023 07:20:13 -0500
Subject: [PATCH 2/2] Address code review comments
Remove unneeded phase check, and move other check into
its own function in case we need more elaborate checks
Signed-off-by: Alexander Wels <awels@redhat.com>
---
pkg/virt-controller/watch/vmi.go | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
index 801ffd141..9afaee4f0 100644
--- a/pkg/virt-controller/watch/vmi.go
+++ b/pkg/virt-controller/watch/vmi.go
@@ -2144,9 +2144,9 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
}
attachmentPod := c.findAttachmentPodByVolumeName(volume.Name, attachmentPods)
if attachmentPod == nil {
- if status.Phase != virtv1.VolumeReady {
+ if !c.volumeReady(status.Phase) {
status.HotplugVolume.AttachPodUID = ""
- // Pod is gone, or hasn't been created yet, check for the PVC associated with the volume to set phase and message
+ // Volume is not hotplugged in VM and Pod is gone, or hasn't been created yet, check for the PVC associated with the volume to set phase and message
phase, reason, message := c.getVolumePhaseMessageReason(&vmi.Spec.Volumes[i], vmi.Namespace)
status.Phase = phase
status.Message = message
@@ -2216,6 +2216,10 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
return nil
}
+func (c *VMIController) volumeReady(phase virtv1.VolumePhase) bool {
+ return phase == virtv1.VolumeReady
+}
+
func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim) (cdiv1.Percent, error) {
// To avoid conflicts, we only allow having one CDI instance
if cdiInstances := len(c.cdiInformer.GetStore().List()); cdiInstances != 1 {
@@ -2241,7 +2245,7 @@ func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim)
func (c *VMIController) canMoveToAttachedPhase(currentPhase virtv1.VolumePhase) bool {
return (currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending ||
- currentPhase == virtv1.HotplugVolumeAttachedToNode) && currentPhase != virtv1.VolumeReady
+ currentPhase == virtv1.HotplugVolumeAttachedToNode)
}
func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
--
2.41.0

View File

@ -1,128 +0,0 @@
From f18f0988669908b4bdc5c4152fc7f5863d5ef3cd Mon Sep 17 00:00:00 2001
From: rokkiter <101091030+rokkiter@users.noreply.github.com>
Date: Mon, 31 Jul 2023 16:51:08 +0800
Subject: [PATCH 1/2] fix ticker leak
Signed-off-by: rokkiter <101091030+rokkiter@users.noreply.github.com>
---
cmd/virt-launcher/virt-launcher.go | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/cmd/virt-launcher/virt-launcher.go b/cmd/virt-launcher/virt-launcher.go
index e8d7c553f..c682c1bc6 100644
--- a/cmd/virt-launcher/virt-launcher.go
+++ b/cmd/virt-launcher/virt-launcher.go
@@ -237,14 +237,18 @@ func detectDomainWithUUID(domainManager virtwrap.DomainManager) *api.Domain {
func waitForDomainUUID(timeout time.Duration, events chan watch.Event, stop chan struct{}, domainManager virtwrap.DomainManager) *api.Domain {
- ticker := time.NewTicker(timeout).C
- checkEarlyExit := time.NewTicker(time.Second * 2).C
- domainCheckTicker := time.NewTicker(time.Second * 10).C
+ ticker := time.NewTicker(timeout)
+ defer ticker.Stop()
+ checkEarlyExit := time.NewTicker(time.Second * 2)
+ defer checkEarlyExit.Stop()
+ domainCheckTicker := time.NewTicker(time.Second * 10)
+ defer domainCheckTicker.Stop()
+
for {
select {
- case <-ticker:
+ case <-ticker.C:
panic(fmt.Errorf("timed out waiting for domain to be defined"))
- case <-domainCheckTicker:
+ case <-domainCheckTicker.C:
log.Log.V(3).Infof("Periodically checking for domain with UUID")
domain := detectDomainWithUUID(domainManager)
if domain != nil {
@@ -258,7 +262,7 @@ func waitForDomainUUID(timeout time.Duration, events chan watch.Event, stop chan
}
case <-stop:
return nil
- case <-checkEarlyExit:
+ case <-checkEarlyExit.C:
if cmdserver.ReceivedEarlyExitSignal() {
panic(fmt.Errorf("received early exit signal"))
}
--
2.41.0
From e8941fad18234293e10fd3b968491bc4d22a6c09 Mon Sep 17 00:00:00 2001
From: rokkiter <101091030+rokkiter@users.noreply.github.com>
Date: Wed, 2 Aug 2023 10:18:39 +0800
Subject: [PATCH 2/2] fix ticker leak
Signed-off-by: rokkiter <101091030+rokkiter@users.noreply.github.com>
---
pkg/monitoring/domainstats/downwardmetrics/scraper.go | 1 +
pkg/virt-handler/retry_manager.go | 1 +
pkg/virt-launcher/monitor.go | 3 +--
pkg/virt-launcher/virtwrap/agent-poller/agent_poller.go | 2 +-
4 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/pkg/monitoring/domainstats/downwardmetrics/scraper.go b/pkg/monitoring/domainstats/downwardmetrics/scraper.go
index 8ca6ae4c3..1ca2465cf 100644
--- a/pkg/monitoring/domainstats/downwardmetrics/scraper.go
+++ b/pkg/monitoring/domainstats/downwardmetrics/scraper.go
@@ -149,6 +149,7 @@ func RunDownwardMetricsCollector(context context.Context, nodeName string, vmiIn
go func() {
ticker := time.NewTicker(DownwardmetricsRefreshDuration)
+ defer ticker.Stop()
for {
select {
case <-ticker.C:
diff --git a/pkg/virt-handler/retry_manager.go b/pkg/virt-handler/retry_manager.go
index dc14fb66d..ba549a157 100644
--- a/pkg/virt-handler/retry_manager.go
+++ b/pkg/virt-handler/retry_manager.go
@@ -145,6 +145,7 @@ func (f *FailRetryManager) ShouldDelay(key string, isFailure func() bool) (bool,
// Run starts the manager.
func (f *FailRetryManager) Run(stopCh chan struct{}) {
ticker := time.NewTicker(f.maxWait)
+ defer ticker.Stop()
for {
select {
case <-ticker.C:
diff --git a/pkg/virt-launcher/monitor.go b/pkg/virt-launcher/monitor.go
index 6620ac4df..d82c5c669 100644
--- a/pkg/virt-launcher/monitor.go
+++ b/pkg/virt-launcher/monitor.go
@@ -181,7 +181,7 @@ func (mon *monitor) monitorLoop(startTimeout time.Duration, signalStopChan chan
log.Log.Infof("Monitoring loop: rate %v start timeout %s", rate, timeoutRepr)
ticker := time.NewTicker(rate)
-
+ defer ticker.Stop()
mon.isDone = false
mon.timeout = startTimeout
mon.start = time.Now()
@@ -200,7 +200,6 @@ func (mon *monitor) monitorLoop(startTimeout time.Duration, signalStopChan chan
}
}
- ticker.Stop()
}
func (mon *monitor) RunForever(startTimeout time.Duration, signalStopChan chan struct{}) {
diff --git a/pkg/virt-launcher/virtwrap/agent-poller/agent_poller.go b/pkg/virt-launcher/virtwrap/agent-poller/agent_poller.go
index 83265f3e5..ba681b204 100644
--- a/pkg/virt-launcher/virtwrap/agent-poller/agent_poller.go
+++ b/pkg/virt-launcher/virtwrap/agent-poller/agent_poller.go
@@ -243,10 +243,10 @@ func (p *PollerWorker) Poll(execAgentCommands agentCommandsExecutor, closeChan c
}
ticker := time.NewTicker(pollInterval)
+ defer ticker.Stop()
for {
select {
case <-closeChan:
- ticker.Stop()
return
case <-ticker.C:
execAgentCommands(p.AgentCommands)
--
2.41.0

View File

@ -1,40 +0,0 @@
From 1cfcbff44f6310628769445fad570a8ccd18fe22 Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Thu, 3 Aug 2023 13:43:51 +0200
Subject: [PATCH] tests: Run helper pod as qemu (107) user
The helper pod needs permissions to access the PVC data. In most cases,
it is owned by the qemu (107) user.
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
tests/libstorage/pvc.go | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/tests/libstorage/pvc.go b/tests/libstorage/pvc.go
index f2dbdf8d3..b9157eac1 100644
--- a/tests/libstorage/pvc.go
+++ b/tests/libstorage/pvc.go
@@ -52,7 +52,7 @@ const (
func RenderPodWithPVC(name string, cmd []string, args []string, pvc *k8sv1.PersistentVolumeClaim) *k8sv1.Pod {
volumeName := "disk0"
- nonRootUser := int64(1042)
+ nonRootUser := int64(107)
// Change to 'pod := RenderPod(name, cmd, args)' once we have a libpod package
pod := &k8sv1.Pod{
@@ -102,6 +102,10 @@ func RenderPodWithPVC(name string, cmd []string, args []string, pvc *k8sv1.Persi
if volumeMode != nil && *volumeMode == k8sv1.PersistentVolumeBlock {
pod.Spec.Containers[0].VolumeDevices = addVolumeDevices(volumeName)
} else {
+ if pod.Spec.SecurityContext == nil {
+ pod.Spec.SecurityContext = &k8sv1.PodSecurityContext{}
+ }
+ pod.Spec.SecurityContext.FSGroup = &nonRootUser
pod.Spec.Containers[0].VolumeMounts = addVolumeMounts(volumeName)
}
--
2.41.0

View File

@ -1,574 +0,0 @@
From dd782727364aaa2f2914b86ab21bd6ed34c8db7e Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Thu, 27 Jul 2023 09:15:31 +0200
Subject: [PATCH 1/8] Drop redundant use of fmt.Sprintf
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
pkg/storage/reservation/pr.go | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/pkg/storage/reservation/pr.go b/pkg/storage/reservation/pr.go
index 5ab0dec4c..afda2c8b4 100644
--- a/pkg/storage/reservation/pr.go
+++ b/pkg/storage/reservation/pr.go
@@ -1,7 +1,6 @@
package reservation
import (
- "fmt"
"path/filepath"
v1 "kubevirt.io/api/core/v1"
@@ -20,15 +19,15 @@ func GetPrResourceName() string {
}
func GetPrHelperSocketDir() string {
- return fmt.Sprintf(filepath.Join(sourceDaemonsPath, prHelperDir))
+ return filepath.Join(sourceDaemonsPath, prHelperDir)
}
func GetPrHelperHostSocketDir() string {
- return fmt.Sprintf(filepath.Join(hostSourceDaemonsPath, prHelperDir))
+ return filepath.Join(hostSourceDaemonsPath, prHelperDir)
}
func GetPrHelperSocketPath() string {
- return fmt.Sprintf(filepath.Join(GetPrHelperSocketDir(), prHelperSocket))
+ return filepath.Join(GetPrHelperSocketDir(), prHelperSocket)
}
func GetPrHelperSocket() string {
--
2.41.0
From b0e7d191686d90a61143beb73dd97e773d5d21de Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Thu, 27 Jul 2023 09:18:36 +0200
Subject: [PATCH 2/8] Run pr-helper container as qemu (107) user
The ownership of the /var/run/kubevirt/daemons/pr directory is currently
set to 107:107 while by default the container is run under a non-root
user 1001 (which does not have write permissions to that directory).
Since the container is privileged, qemu-pr-helper initially has the
capabilities to create the socket in that directory. However, after the
daemon has been initialized, it drops the capabilities and this
eventually leads to 'Permission denied' error when the daemon tries to
remove the socket during termination. Running the container under qemu
user ensures the cleanup is done properly.
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
pkg/virt-operator/resource/generate/components/BUILD.bazel | 1 +
pkg/virt-operator/resource/generate/components/daemonsets.go | 2 ++
2 files changed, 3 insertions(+)
diff --git a/pkg/virt-operator/resource/generate/components/BUILD.bazel b/pkg/virt-operator/resource/generate/components/BUILD.bazel
index 0f4625a44..4f0046de0 100644
--- a/pkg/virt-operator/resource/generate/components/BUILD.bazel
+++ b/pkg/virt-operator/resource/generate/components/BUILD.bazel
@@ -22,6 +22,7 @@ go_library(
"//pkg/certificates/triple:go_default_library",
"//pkg/certificates/triple/cert:go_default_library",
"//pkg/storage/reservation:go_default_library",
+ "//pkg/util:go_default_library",
"//pkg/virt-operator/util:go_default_library",
"//staging/src/kubevirt.io/api/clone:go_default_library",
"//staging/src/kubevirt.io/api/clone/v1alpha1:go_default_library",
diff --git a/pkg/virt-operator/resource/generate/components/daemonsets.go b/pkg/virt-operator/resource/generate/components/daemonsets.go
index 9066fd23a..c254f1ff2 100644
--- a/pkg/virt-operator/resource/generate/components/daemonsets.go
+++ b/pkg/virt-operator/resource/generate/components/daemonsets.go
@@ -13,6 +13,7 @@ import (
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/storage/reservation"
+ "kubevirt.io/kubevirt/pkg/util"
operatorutil "kubevirt.io/kubevirt/pkg/virt-operator/util"
)
@@ -41,6 +42,7 @@ func RenderPrHelperContainer(image string, pullPolicy corev1.PullPolicy) corev1.
},
},
SecurityContext: &corev1.SecurityContext{
+ RunAsUser: pointer.Int64(util.NonRootUID),
Privileged: pointer.Bool(true),
},
}
--
2.41.0
From 3ddd3d783dcab7100041f8434157adf98042978c Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Thu, 27 Jul 2023 10:38:52 +0200
Subject: [PATCH 3/8] Do not mount pr-helper-socket-vol to virt-handler
It turns out that having two host path volumes originating at the same
root (e.g. /var/run/kubevirt and /var/run/kubevirt/daemons/pr) in a pod
and bind-mounted with bidirectional propagation to a container leads to
side effects. That creates additional mount points on the host that are
not cleaned up afterward:
$ mount | grep daemon
tmpfs on /run/kubevirt/daemons/pr type tmpfs (rw,nosuid,nodev,seclabel,mode=755)
tmpfs on /run/kubevirt/daemons/pr type tmpfs (rw,nosuid,nodev,seclabel,mode=755)
tmpfs on /run/kubevirt/daemons/pr type tmpfs (rw,nosuid,nodev,seclabel,mode=755)
Since the virt-handler container already has the host path volume
/var/run/kubevirt mounted, it can be used to access the pr-helper
socket at /var/run/kubevirt/daemons/pr.
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
.../resource/generate/components/daemonsets.go | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/pkg/virt-operator/resource/generate/components/daemonsets.go b/pkg/virt-operator/resource/generate/components/daemonsets.go
index c254f1ff2..229b8e24e 100644
--- a/pkg/virt-operator/resource/generate/components/daemonsets.go
+++ b/pkg/virt-operator/resource/generate/components/daemonsets.go
@@ -276,9 +276,6 @@ func NewHandlerDaemonSet(namespace, repository, imagePrefix, version, launcherVe
{"kubelet-pods", kubeletPodsPath, kubeletPodsPath, &bidi},
{"node-labeller", "/var/lib/kubevirt-node-labeller", "/var/lib/kubevirt-node-labeller", nil},
}
- if enablePrHelper {
- volumes = append(volumes, volume{prVolumeName, reservation.GetPrHelperSocketDir(), reservation.GetPrHelperSocketDir(), &bidi})
- }
for _, volume := range volumes {
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
@@ -328,6 +325,16 @@ func NewHandlerDaemonSet(namespace, repository, imagePrefix, version, launcherVe
}
if enablePrHelper {
+ directoryOrCreate := corev1.HostPathDirectoryOrCreate
+ pod.Volumes = append(pod.Volumes, corev1.Volume{
+ Name: prVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: reservation.GetPrHelperSocketDir(),
+ Type: &directoryOrCreate,
+ },
+ },
+ })
pod.Containers = append(pod.Containers, RenderPrHelperContainer(prHelperImage, pullPolicy))
}
return daemonset, nil
--
2.41.0
From dd7807a4b3f03cee76965e5273e1ea5381b41b7a Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Thu, 27 Jul 2023 11:15:19 +0200
Subject: [PATCH 4/8] tests: Ensure proper cleanup (scsi reservation)
Check that after PersistentReservation feature gate is disabled, no
mount points or socket files are left behind.
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
tests/storage/BUILD.bazel | 1 +
tests/storage/reservation.go | 12 ++++++++++++
2 files changed, 13 insertions(+)
diff --git a/tests/storage/BUILD.bazel b/tests/storage/BUILD.bazel
index f605404fc..21414efbd 100644
--- a/tests/storage/BUILD.bazel
+++ b/tests/storage/BUILD.bazel
@@ -22,6 +22,7 @@ go_library(
"//pkg/apimachinery/patch:go_default_library",
"//pkg/certificates/triple/cert:go_default_library",
"//pkg/host-disk:go_default_library",
+ "//pkg/storage/reservation:go_default_library",
"//pkg/storage/types:go_default_library",
"//pkg/virt-config:go_default_library",
"//pkg/virt-launcher/virtwrap/converter:go_default_library",
diff --git a/tests/storage/reservation.go b/tests/storage/reservation.go
index a09853060..e233e53e4 100644
--- a/tests/storage/reservation.go
+++ b/tests/storage/reservation.go
@@ -17,12 +17,14 @@ import (
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
+ "kubevirt.io/kubevirt/pkg/storage/reservation"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/tests"
"kubevirt.io/kubevirt/tests/console"
"kubevirt.io/kubevirt/tests/exec"
"kubevirt.io/kubevirt/tests/flags"
"kubevirt.io/kubevirt/tests/framework/checks"
+ "kubevirt.io/kubevirt/tests/libnode"
"kubevirt.io/kubevirt/tests/libstorage"
"kubevirt.io/kubevirt/tests/libvmi"
"kubevirt.io/kubevirt/tests/libwait"
@@ -295,6 +297,16 @@ var _ = SIGDescribe("[Serial]SCSI persistent reservation", Serial, func() {
}
return len(ds.Spec.Template.Spec.Containers) == 1
}, time.Minute*5, time.Second*2).Should(BeTrue())
+
+ nodes := libnode.GetAllSchedulableNodes(virtClient)
+ for _, node := range nodes.Items {
+ output, err := tests.ExecuteCommandInVirtHandlerPod(node.Name, []string{"mount"})
+ Expect(err).ToNot(HaveOccurred())
+ Expect(output).ToNot(ContainSubstring("kubevirt/daemons/pr"))
+ output, err = tests.ExecuteCommandInVirtHandlerPod(node.Name, []string{"ls", reservation.GetPrHelperSocketDir()})
+ Expect(err).ToNot(HaveOccurred())
+ Expect(output).To(BeEmpty())
+ }
})
})
--
2.41.0
From fac107640550d1b9a10150ed355087b0d8a39540 Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Thu, 27 Jul 2023 13:42:42 +0200
Subject: [PATCH 5/8] tests: Ensure KubeVirt is ready (scsi reservation)
Switching the PersistentReservation feature gate on/off causes
redeployment of all the components. Ensure KubeVirt is ready before
moving on.
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
tests/storage/reservation.go | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/tests/storage/reservation.go b/tests/storage/reservation.go
index e233e53e4..ef775baed 100644
--- a/tests/storage/reservation.go
+++ b/tests/storage/reservation.go
@@ -208,6 +208,10 @@ var _ = SIGDescribe("[Serial]SCSI persistent reservation", Serial, func() {
pv, pvc, err = tests.CreatePVandPVCwithSCSIDisk(node, device, util.NamespaceTestDefault, "scsi-disks", "scsipv", "scsipvc")
Expect(err).ToNot(HaveOccurred())
waitForVirtHandlerWithPrHelperReadyOnNode(node)
+ // Switching the PersistentReservation feature gate on/off
+ // causes redeployment of all KubeVirt components.
+ By("Ensuring all KubeVirt components are ready")
+ testsuite.EnsureKubevirtReady()
})
AfterEach(func() {
@@ -298,6 +302,11 @@ var _ = SIGDescribe("[Serial]SCSI persistent reservation", Serial, func() {
return len(ds.Spec.Template.Spec.Containers) == 1
}, time.Minute*5, time.Second*2).Should(BeTrue())
+ // Switching the PersistentReservation feature gate on/off
+ // causes redeployment of all KubeVirt components.
+ By("Ensuring all KubeVirt components are ready")
+ testsuite.EnsureKubevirtReady()
+
nodes := libnode.GetAllSchedulableNodes(virtClient)
for _, node := range nodes.Items {
output, err := tests.ExecuteCommandInVirtHandlerPod(node.Name, []string{"mount"})
--
2.41.0
From bb55f6403e8714e116e97f6cfeff3ca086863286 Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Tue, 1 Aug 2023 12:47:22 +0200
Subject: [PATCH 6/8] Support relabeling of unix sockets
An attempt to open a UNIX domain socket returns ENXIO making it hard to
obtain a file descriptor. Instead, manage the selinux label attributes
using the functions that work with file paths.
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
cmd/virt-chroot/BUILD.bazel | 1 +
cmd/virt-chroot/selinux.go | 21 +++++++++++++++++++--
2 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/cmd/virt-chroot/BUILD.bazel b/cmd/virt-chroot/BUILD.bazel
index 250a25bf2..fd26041a0 100644
--- a/cmd/virt-chroot/BUILD.bazel
+++ b/cmd/virt-chroot/BUILD.bazel
@@ -17,6 +17,7 @@ go_library(
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2:go_default_library",
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
+ "//vendor/github.com/opencontainers/selinux/go-selinux:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/github.com/vishvananda/netlink:go_default_library",
"//vendor/golang.org/x/sys/unix:go_default_library",
diff --git a/cmd/virt-chroot/selinux.go b/cmd/virt-chroot/selinux.go
index b8bb3976f..e2c4a4aba 100644
--- a/cmd/virt-chroot/selinux.go
+++ b/cmd/virt-chroot/selinux.go
@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
+ "github.com/opencontainers/selinux/go-selinux"
"github.com/spf13/cobra"
"golang.org/x/sys/unix"
@@ -62,10 +63,15 @@ func RelabelCommand() *cobra.Command {
if err != nil {
return fmt.Errorf("could not open file %v. Reason: %v", safePath, err)
}
-
defer fd.Close()
filePath := fd.SafePath()
+ if fileInfo, err := safepath.StatAtNoFollow(safePath); err != nil {
+ return fmt.Errorf("could not stat file %v. Reason: %v", safePath, err)
+ } else if (fileInfo.Mode() & os.ModeSocket) != 0 {
+ return relabelUnixSocket(filePath, label)
+ }
+
writeableFD, err := os.OpenFile(filePath, os.O_APPEND|unix.S_IWRITE, os.ModePerm)
if err != nil {
return fmt.Errorf("error reopening file %s to write label %s. Reason: %v", filePath, label, err)
@@ -74,7 +80,7 @@ func RelabelCommand() *cobra.Command {
currentFileLabel, err := getLabel(writeableFD)
if err != nil {
- return fmt.Errorf("faild to get selinux label for file %v: %v", filePath, err)
+ return fmt.Errorf("failed to get selinux label for file %v: %v", filePath, err)
}
if currentFileLabel != label {
@@ -108,3 +114,14 @@ func getLabel(file *os.File) (string, error) {
}
return string(buffer[:labelLength]), nil
}
+
+func relabelUnixSocket(filePath, label string) error {
+ if currentLabel, err := selinux.FileLabel(filePath); err != nil {
+ return fmt.Errorf("could not retrieve label of file %s. Reason: %v", filePath, err)
+ } else if currentLabel != label {
+ if err := unix.Setxattr(filePath, xattrNameSelinux, []byte(label), 0); err != nil {
+ return fmt.Errorf("error relabeling file %s with label %s. Reason: %v", filePath, label, err)
+ }
+ }
+ return nil
+}
--
2.41.0
From 2867dd61c3cdb65c7a195e37c2064a23b285bcee Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Tue, 1 Aug 2023 13:04:25 +0200
Subject: [PATCH 7/8] Relabel PR helper socket in device plugin
This will ensure that a proper selinux label is set on the socket when
it is allocated to a VM pod.
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
cmd/virt-handler/virt-handler.go | 15 +--------------
pkg/util/util.go | 16 +++++++++-------
pkg/virt-handler/device-manager/socket_device.go | 9 +++++++++
3 files changed, 19 insertions(+), 21 deletions(-)
diff --git a/cmd/virt-handler/virt-handler.go b/cmd/virt-handler/virt-handler.go
index f0e379b7f..6a915d9ba 100644
--- a/cmd/virt-handler/virt-handler.go
+++ b/cmd/virt-handler/virt-handler.go
@@ -129,8 +129,6 @@ const (
// Default network-status downward API file path
defaultNetworkStatusFilePath = "/etc/podinfo/network-status"
-
- unprivilegedContainerSELinuxLabel = "system_u:object_r:container_file_t:s0"
)
type virtHandlerApp struct {
@@ -420,7 +418,7 @@ func (app *virtHandlerApp) Run() {
if err != nil {
panic(err)
}
- err = selinux.RelabelFiles(unprivilegedContainerSELinuxLabel, se.IsPermissive(), devTun, devNull)
+ err = selinux.RelabelFiles(util.UnprivilegedContainerSELinuxLabel, se.IsPermissive(), devTun, devNull)
if err != nil {
panic(fmt.Errorf("error relabeling required files: %v", err))
}
@@ -564,18 +562,7 @@ func (app *virtHandlerApp) shouldEnablePersistentReservation() {
if err != nil {
panic(err)
}
- se, exists, err := selinux.NewSELinux()
- if err == nil && exists {
- err = selinux.RelabelFiles(unprivilegedContainerSELinuxLabel, se.IsPermissive(), prSockDir)
- if err != nil {
- panic(fmt.Errorf("error relabeling required files: %v", err))
- }
- } else if err != nil {
- panic(fmt.Errorf("failed to detect the presence of selinux: %v", err))
- }
-
log.DefaultLogger().Infof("set permission for %s", reservation.GetPrHelperHostSocketDir())
-
}
func (app *virtHandlerApp) runPrometheusServer(errCh chan error) {
diff --git a/pkg/util/util.go b/pkg/util/util.go
index dbf14064a..fef626f9f 100644
--- a/pkg/util/util.go
+++ b/pkg/util/util.go
@@ -27,15 +27,17 @@ const (
HostRootMount = "/proc/1/root/"
CPUManagerOS3Path = HostRootMount + "var/lib/origin/openshift.local.volumes/cpu_manager_state"
CPUManagerPath = HostRootMount + "var/lib/kubelet/cpu_manager_state"
-)
-// Alphanums is the list of alphanumeric characters used to create a securely generated random string
-const Alphanums = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ // Alphanums is the list of alphanumeric characters used to create a securely generated random string
+ Alphanums = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+
+ NonRootUID = 107
+ NonRootUserString = "qemu"
+ RootUser = 0
+ memoryDumpOverhead = 100 * 1024 * 1024
-const NonRootUID = 107
-const NonRootUserString = "qemu"
-const RootUser = 0
-const memoryDumpOverhead = 100 * 1024 * 1024
+ UnprivilegedContainerSELinuxLabel = "system_u:object_r:container_file_t:s0"
+)
func IsNonRootVMI(vmi *v1.VirtualMachineInstance) bool {
_, ok := vmi.Annotations[v1.DeprecatedNonRootVMIAnnotation]
diff --git a/pkg/virt-handler/device-manager/socket_device.go b/pkg/virt-handler/device-manager/socket_device.go
index fdac11662..53308b648 100644
--- a/pkg/virt-handler/device-manager/socket_device.go
+++ b/pkg/virt-handler/device-manager/socket_device.go
@@ -40,6 +40,7 @@ import (
"kubevirt.io/kubevirt/pkg/safepath"
"kubevirt.io/kubevirt/pkg/util"
pluginapi "kubevirt.io/kubevirt/pkg/virt-handler/device-manager/deviceplugin/v1beta1"
+ "kubevirt.io/kubevirt/pkg/virt-handler/selinux"
)
type SocketDevicePlugin struct {
@@ -220,6 +221,14 @@ func (dpi *SocketDevicePlugin) Allocate(ctx context.Context, r *pluginapi.Alloca
return nil, fmt.Errorf("error setting the permission the socket %s/%s:%v", dpi.socketDir, dpi.socket, err)
}
+ if se, exists, err := selinux.NewSELinux(); err == nil && exists {
+ if err := selinux.RelabelFiles(util.UnprivilegedContainerSELinuxLabel, se.IsPermissive(), prSock); err != nil {
+ return nil, fmt.Errorf("error relabeling required files: %v", err)
+ }
+ } else if err != nil {
+ return nil, fmt.Errorf("failed to detect the presence of selinux: %v", err)
+ }
+
m := new(pluginapi.Mount)
m.HostPath = dpi.socketDir
m.ContainerPath = dpi.socketDir
--
2.41.0
From 128599fb4d138723991dd46e741f86dc1561488f Mon Sep 17 00:00:00 2001
From: Alice Frosi <afrosi@redhat.com>
Date: Fri, 4 Aug 2023 13:27:40 +0200
Subject: [PATCH 8/8] pr-helper: set user to root
The image is built with user 1000 by default and the container is
created automatically with this user. Setting explicitly the user to
root, it avoids permission conflicts.
Signed-off-by: Alice Frosi <afrosi@redhat.com>
---
cmd/virt-handler/BUILD.bazel | 1 -
cmd/virt-handler/virt-handler.go | 19 -------------------
.../device-manager/socket_device.go | 1 -
.../generate/components/daemonsets.go | 2 +-
4 files changed, 1 insertion(+), 22 deletions(-)
diff --git a/cmd/virt-handler/BUILD.bazel b/cmd/virt-handler/BUILD.bazel
index 4299bc688..88e684e9a 100644
--- a/cmd/virt-handler/BUILD.bazel
+++ b/cmd/virt-handler/BUILD.bazel
@@ -19,7 +19,6 @@ go_library(
"//pkg/monitoring/workqueue/prometheus:go_default_library",
"//pkg/safepath:go_default_library",
"//pkg/service:go_default_library",
- "//pkg/storage/reservation:go_default_library",
"//pkg/util:go_default_library",
"//pkg/util/ratelimiter:go_default_library",
"//pkg/util/tls:go_default_library",
diff --git a/cmd/virt-handler/virt-handler.go b/cmd/virt-handler/virt-handler.go
index 6a915d9ba..f07623453 100644
--- a/cmd/virt-handler/virt-handler.go
+++ b/cmd/virt-handler/virt-handler.go
@@ -33,7 +33,6 @@ import (
"syscall"
"time"
- "kubevirt.io/kubevirt/pkg/storage/reservation"
kvtls "kubevirt.io/kubevirt/pkg/util/tls"
"kubevirt.io/kubevirt/pkg/virt-handler/seccomp"
"kubevirt.io/kubevirt/pkg/virt-handler/vsock"
@@ -315,7 +314,6 @@ func (app *virtHandlerApp) Run() {
app.clusterConfig.SetConfigModifiedCallback(app.shouldChangeLogVerbosity)
app.clusterConfig.SetConfigModifiedCallback(app.shouldChangeRateLimiter)
app.clusterConfig.SetConfigModifiedCallback(app.shouldInstallKubevirtSeccompProfile)
- app.clusterConfig.SetConfigModifiedCallback(app.shouldEnablePersistentReservation)
if err := app.setupTLS(factory); err != nil {
glog.Fatalf("Error constructing migration tls config: %v", err)
@@ -548,23 +546,6 @@ func (app *virtHandlerApp) shouldInstallKubevirtSeccompProfile() {
}
-func (app *virtHandlerApp) shouldEnablePersistentReservation() {
- enabled := app.clusterConfig.PersistentReservationEnabled()
- if !enabled {
- log.DefaultLogger().Info("Persistent Reservation is not enabled")
- return
- }
- prSockDir, err := safepath.JoinAndResolveWithRelativeRoot("/", reservation.GetPrHelperHostSocketDir())
- if err != nil {
- panic(err)
- }
- err = safepath.ChownAtNoFollow(prSockDir, util.NonRootUID, util.NonRootUID)
- if err != nil {
- panic(err)
- }
- log.DefaultLogger().Infof("set permission for %s", reservation.GetPrHelperHostSocketDir())
-}
-
func (app *virtHandlerApp) runPrometheusServer(errCh chan error) {
mux := restful.NewContainer()
webService := new(restful.WebService)
diff --git a/pkg/virt-handler/device-manager/socket_device.go b/pkg/virt-handler/device-manager/socket_device.go
index 53308b648..14e9f86df 100644
--- a/pkg/virt-handler/device-manager/socket_device.go
+++ b/pkg/virt-handler/device-manager/socket_device.go
@@ -220,7 +220,6 @@ func (dpi *SocketDevicePlugin) Allocate(ctx context.Context, r *pluginapi.Alloca
if err != nil {
return nil, fmt.Errorf("error setting the permission the socket %s/%s:%v", dpi.socketDir, dpi.socket, err)
}
-
if se, exists, err := selinux.NewSELinux(); err == nil && exists {
if err := selinux.RelabelFiles(util.UnprivilegedContainerSELinuxLabel, se.IsPermissive(), prSock); err != nil {
return nil, fmt.Errorf("error relabeling required files: %v", err)
diff --git a/pkg/virt-operator/resource/generate/components/daemonsets.go b/pkg/virt-operator/resource/generate/components/daemonsets.go
index 229b8e24e..fccc4161a 100644
--- a/pkg/virt-operator/resource/generate/components/daemonsets.go
+++ b/pkg/virt-operator/resource/generate/components/daemonsets.go
@@ -42,7 +42,7 @@ func RenderPrHelperContainer(image string, pullPolicy corev1.PullPolicy) corev1.
},
},
SecurityContext: &corev1.SecurityContext{
- RunAsUser: pointer.Int64(util.NonRootUID),
+ RunAsUser: pointer.Int64(util.RootUser),
Privileged: pointer.Bool(true),
},
}
--
2.41.0

View File

@ -1,751 +0,0 @@
From 22a734153b37c3706069cb7a0a8eb92167040b9e Mon Sep 17 00:00:00 2001
From: Luboslav Pivarc <lpivarc@redhat.com>
Date: Wed, 9 Aug 2023 11:51:10 +0200
Subject: [PATCH] Fix Aggregated Discovery
Bump client-go dependency to update bug
with Aggregated Discovery
Signed-off-by: Luboslav Pivarc <lpivarc@redhat.com>
---
go.mod | 52 +++----
go.sum | 32 ++---
staging/src/kubevirt.io/client-go/go.mod | 50 +++----
staging/src/kubevirt.io/client-go/go.sum | 24 ++--
.../discovery/aggregated_discovery.go | 58 +++++---
.../client-go/tools/cache/controller.go | 22 +--
.../client-go/tools/cache/delta_fifo.go | 135 ++++++++++++------
.../client-go/tools/cache/shared_informer.go | 8 +-
vendor/modules.txt | 60 ++++----
9 files changed, 241 insertions(+), 200 deletions(-)
diff --git a/go.mod b/go.mod
index 64a39278f..8bc66a1c5 100644
--- a/go.mod
+++ b/go.mod
@@ -65,11 +65,11 @@ require (
gopkg.in/cheggaaa/pb.v1 v1.0.28
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.27.1
- k8s.io/apiextensions-apiserver v0.26.3
+ k8s.io/apiextensions-apiserver v0.26.4
k8s.io/apimachinery v0.27.1
k8s.io/client-go v12.0.0+incompatible
k8s.io/klog/v2 v2.90.1
- k8s.io/kube-aggregator v0.26.3
+ k8s.io/kube-aggregator v0.26.4
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f
k8s.io/kubectl v0.0.0-00010101000000-000000000000
k8s.io/utils v0.0.0-20230505201702-9f6742963106
@@ -165,32 +165,32 @@ replace (
github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47
github.com/operator-framework/operator-lifecycle-manager => github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190128024246-5eb7ae5bdb7a
- k8s.io/api => k8s.io/api v0.26.3
- k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.3
- k8s.io/apimachinery => k8s.io/apimachinery v0.26.3
- k8s.io/apiserver => k8s.io/apiserver v0.26.3
- k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.3
- k8s.io/client-go => k8s.io/client-go v0.26.3
- k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.3
- k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.3
- k8s.io/code-generator => k8s.io/code-generator v0.26.3
- k8s.io/component-base => k8s.io/component-base v0.26.3
- k8s.io/cri-api => k8s.io/cri-api v0.26.3
- k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.3
+ k8s.io/api => k8s.io/api v0.26.4
+ k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.4
+ k8s.io/apimachinery => k8s.io/apimachinery v0.26.4
+ k8s.io/apiserver => k8s.io/apiserver v0.26.4
+ k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.4
+ k8s.io/client-go => k8s.io/client-go v0.26.4
+ k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.4
+ k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.4
+ k8s.io/code-generator => k8s.io/code-generator v0.26.4
+ k8s.io/component-base => k8s.io/component-base v0.26.4
+ k8s.io/cri-api => k8s.io/cri-api v0.26.4
+ k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.4
k8s.io/klog => k8s.io/klog v0.4.0
- k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.3
- k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.3
+ k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.4
+ k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.4
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280
- k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.3
- k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.3
- k8s.io/kubectl => k8s.io/kubectl v0.26.3
- k8s.io/kubelet => k8s.io/kubelet v0.26.3
- k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.3
- k8s.io/metrics => k8s.io/metrics v0.26.3
- k8s.io/node-api => k8s.io/node-api v0.26.3
- k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.3
- k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.3
- k8s.io/sample-controller => k8s.io/sample-controller v0.26.3
+ k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.4
+ k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.4
+ k8s.io/kubectl => k8s.io/kubectl v0.26.4
+ k8s.io/kubelet => k8s.io/kubelet v0.26.4
+ k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.4
+ k8s.io/metrics => k8s.io/metrics v0.26.4
+ k8s.io/node-api => k8s.io/node-api v0.26.4
+ k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.4
+ k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.4
+ k8s.io/sample-controller => k8s.io/sample-controller v0.26.4
kubevirt.io/api => ./staging/src/kubevirt.io/api
kubevirt.io/client-go => ./staging/src/kubevirt.io/client-go
diff --git a/go.sum b/go.sum
index fdd556ec6..3f7fe3ce3 100644
--- a/go.sum
+++ b/go.sum
@@ -1927,17 +1927,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
-k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU=
-k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE=
-k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE=
-k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ=
-k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k=
-k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
-k8s.io/apiserver v0.26.3/go.mod h1:CJe/VoQNcXdhm67EvaVjYXxR3QyfwpceKPuPaeLibTA=
-k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s=
-k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ=
-k8s.io/code-generator v0.26.3/go.mod h1:ryaiIKwfxEJEaywEzx3dhWOydpVctKYbqLajJf0O8dI=
-k8s.io/component-base v0.26.3/go.mod h1:5kj1kZYwSC6ZstHJN7oHBqcJC6yyn41eR+Sqa/mQc8E=
+k8s.io/api v0.26.4 h1:qSG2PmtcD23BkYiWfoYAcak870eF/hE7NNYBYavTT94=
+k8s.io/api v0.26.4/go.mod h1:WwKEXU3R1rgCZ77AYa7DFksd9/BAIKyOmRlbVxgvjCk=
+k8s.io/apiextensions-apiserver v0.26.4 h1:9D2RTxYGxrG5uYg6D7QZRcykXvavBvcA59j5kTaedQI=
+k8s.io/apiextensions-apiserver v0.26.4/go.mod h1:cd4uGFGIgzEqUghWpRsr9KE8j2KNTjY8Ji8pnMMazyw=
+k8s.io/apimachinery v0.26.4 h1:rZccKdBLg9vP6J09JD+z8Yr99Ce8gk3Lbi9TCx05Jzs=
+k8s.io/apimachinery v0.26.4/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
+k8s.io/apiserver v0.26.4/go.mod h1:yAY3O1vBM4/0OIGAGeWcdfzQvgdwJ188VirLcuSAVnw=
+k8s.io/client-go v0.26.4 h1:/7P/IbGBuT73A+G97trf44NTPSNqvuBREpOfdLbHvD4=
+k8s.io/client-go v0.26.4/go.mod h1:6qOItWm3EwxJdl/8p5t7FWtWUOwyMdA8N9ekbW4idpI=
+k8s.io/code-generator v0.26.4/go.mod h1:ryaiIKwfxEJEaywEzx3dhWOydpVctKYbqLajJf0O8dI=
+k8s.io/component-base v0.26.4/go.mod h1:lTuWL1Xz/a4e80gmIC3YZG2JCO4xNwtKWHJWeJmsq20=
k8s.io/gengo v0.0.0-20181113154421-fd15ee9cc2f7/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
@@ -1949,13 +1949,13 @@ k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kms v0.26.3/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg=
-k8s.io/kube-aggregator v0.26.3 h1:nc4H5ymGkWPU3c9U9UM468JcmNENY/s/mDYVW3t3uRo=
-k8s.io/kube-aggregator v0.26.3/go.mod h1:SgBESB/+PfZAyceTPIanfQ7GtX9G/+mjfUbTHg3Twbo=
+k8s.io/kms v0.26.4/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg=
+k8s.io/kube-aggregator v0.26.4 h1:iGljhq5exQkbuc3bnkwUx95RPCBDExg7DkX9XaYhg6w=
+k8s.io/kube-aggregator v0.26.4/go.mod h1:eWfg4tU0+l57ebWiS5THOANIJUrKRxudSVDJ+63bqvQ=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
-k8s.io/kubectl v0.26.3 h1:bZ5SgFyeEXw6XTc1Qji0iNdtqAC76lmeIIQULg2wNXM=
-k8s.io/kubectl v0.26.3/go.mod h1:02+gv7Qn4dupzN3fi/9OvqqdW+uG/4Zi56vc4Zmsp1g=
+k8s.io/kubectl v0.26.4 h1:A0Oa0u/po4KxXnXsNCOwLojAe9cQR3TJNJabEIf7U1w=
+k8s.io/kubectl v0.26.4/go.mod h1:cWtp/+I4p+h5En3s2zO1zCry9v3/6h37EQ2tF3jNRnM=
k8s.io/kubernetes v1.11.8-beta.0.0.20190124204751-3a10094374f2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
diff --git a/staging/src/kubevirt.io/client-go/go.mod b/staging/src/kubevirt.io/client-go/go.mod
index b8b0074df..2c8edf077 100644
--- a/staging/src/kubevirt.io/client-go/go.mod
+++ b/staging/src/kubevirt.io/client-go/go.mod
@@ -16,7 +16,7 @@ require (
github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47
github.com/spf13/pflag v1.0.5
k8s.io/api v0.27.1
- k8s.io/apiextensions-apiserver v0.26.3
+ k8s.io/apiextensions-apiserver v0.26.4
k8s.io/apimachinery v0.27.1
k8s.io/client-go v12.0.0+incompatible
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f
@@ -71,32 +71,32 @@ require (
replace (
github.com/openshift/api => github.com/openshift/api v0.0.0-20210105115604-44119421ec6b
- k8s.io/api => k8s.io/api v0.26.3
- k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.3
- k8s.io/apimachinery => k8s.io/apimachinery v0.26.3
- k8s.io/apiserver => k8s.io/apiserver v0.26.3
- k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.3
- k8s.io/client-go => k8s.io/client-go v0.26.3
- k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.3
- k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.3
- k8s.io/code-generator => k8s.io/code-generator v0.26.3
- k8s.io/component-base => k8s.io/component-base v0.26.3
- k8s.io/cri-api => k8s.io/cri-api v0.26.3
- k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.3
+ k8s.io/api => k8s.io/api v0.26.4
+ k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.4
+ k8s.io/apimachinery => k8s.io/apimachinery v0.26.4
+ k8s.io/apiserver => k8s.io/apiserver v0.26.4
+ k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.4
+ k8s.io/client-go => k8s.io/client-go v0.26.4
+ k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.4
+ k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.4
+ k8s.io/code-generator => k8s.io/code-generator v0.26.4
+ k8s.io/component-base => k8s.io/component-base v0.26.4
+ k8s.io/cri-api => k8s.io/cri-api v0.26.4
+ k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.4
k8s.io/klog => k8s.io/klog v0.4.0
- k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.3
- k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.3
+ k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.4
+ k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.4
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280
- k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.3
- k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.3
- k8s.io/kubectl => k8s.io/kubectl v0.26.3
- k8s.io/kubelet => k8s.io/kubelet v0.26.3
- k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.3
- k8s.io/metrics => k8s.io/metrics v0.26.3
- k8s.io/node-api => k8s.io/node-api v0.26.3
- k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.3
- k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.3
- k8s.io/sample-controller => k8s.io/sample-controller v0.26.3
+ k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.4
+ k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.4
+ k8s.io/kubectl => k8s.io/kubectl v0.26.4
+ k8s.io/kubelet => k8s.io/kubelet v0.26.4
+ k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.4
+ k8s.io/metrics => k8s.io/metrics v0.26.4
+ k8s.io/node-api => k8s.io/node-api v0.26.4
+ k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.4
+ k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.4
+ k8s.io/sample-controller => k8s.io/sample-controller v0.26.4
kubevirt.io/api => ../api
)
diff --git a/staging/src/kubevirt.io/client-go/go.sum b/staging/src/kubevirt.io/client-go/go.sum
index 3fcf63dc1..89daf9285 100644
--- a/staging/src/kubevirt.io/client-go/go.sum
+++ b/staging/src/kubevirt.io/client-go/go.sum
@@ -1668,17 +1668,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
-k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU=
-k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE=
-k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE=
-k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ=
-k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k=
-k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
-k8s.io/apiserver v0.26.3/go.mod h1:CJe/VoQNcXdhm67EvaVjYXxR3QyfwpceKPuPaeLibTA=
-k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s=
-k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ=
-k8s.io/code-generator v0.26.3/go.mod h1:ryaiIKwfxEJEaywEzx3dhWOydpVctKYbqLajJf0O8dI=
-k8s.io/component-base v0.26.3/go.mod h1:5kj1kZYwSC6ZstHJN7oHBqcJC6yyn41eR+Sqa/mQc8E=
+k8s.io/api v0.26.4 h1:qSG2PmtcD23BkYiWfoYAcak870eF/hE7NNYBYavTT94=
+k8s.io/api v0.26.4/go.mod h1:WwKEXU3R1rgCZ77AYa7DFksd9/BAIKyOmRlbVxgvjCk=
+k8s.io/apiextensions-apiserver v0.26.4 h1:9D2RTxYGxrG5uYg6D7QZRcykXvavBvcA59j5kTaedQI=
+k8s.io/apiextensions-apiserver v0.26.4/go.mod h1:cd4uGFGIgzEqUghWpRsr9KE8j2KNTjY8Ji8pnMMazyw=
+k8s.io/apimachinery v0.26.4 h1:rZccKdBLg9vP6J09JD+z8Yr99Ce8gk3Lbi9TCx05Jzs=
+k8s.io/apimachinery v0.26.4/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
+k8s.io/apiserver v0.26.4/go.mod h1:yAY3O1vBM4/0OIGAGeWcdfzQvgdwJ188VirLcuSAVnw=
+k8s.io/client-go v0.26.4 h1:/7P/IbGBuT73A+G97trf44NTPSNqvuBREpOfdLbHvD4=
+k8s.io/client-go v0.26.4/go.mod h1:6qOItWm3EwxJdl/8p5t7FWtWUOwyMdA8N9ekbW4idpI=
+k8s.io/code-generator v0.26.4/go.mod h1:ryaiIKwfxEJEaywEzx3dhWOydpVctKYbqLajJf0O8dI=
+k8s.io/component-base v0.26.4/go.mod h1:lTuWL1Xz/a4e80gmIC3YZG2JCO4xNwtKWHJWeJmsq20=
k8s.io/gengo v0.0.0-20190907103519-ebc107f98eab/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
@@ -1692,7 +1692,7 @@ k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kms v0.26.3/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg=
+k8s.io/kms v0.26.4/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
diff --git a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go
index 758b0a3ac..7470259dc 100644
--- a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go
+++ b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go
@@ -92,12 +92,18 @@ func convertAPIGroup(g apidiscovery.APIGroupDiscovery) (
resourceList := &metav1.APIResourceList{}
resourceList.GroupVersion = gv.String()
for _, r := range v.Resources {
- resource := convertAPIResource(r)
- resourceList.APIResources = append(resourceList.APIResources, resource)
+ resource, err := convertAPIResource(r)
+ if err == nil {
+ resourceList.APIResources = append(resourceList.APIResources, resource)
+ }
// Subresources field in new format get transformed into full APIResources.
+ // It is possible a partial result with an error was returned to be used
+ // as the parent resource for the subresource.
for _, subresource := range r.Subresources {
- sr := convertAPISubresource(resource, subresource)
- resourceList.APIResources = append(resourceList.APIResources, sr)
+ sr, err := convertAPISubresource(resource, subresource)
+ if err == nil {
+ resourceList.APIResources = append(resourceList.APIResources, sr)
+ }
}
}
gvResources[gv] = resourceList
@@ -105,30 +111,44 @@ func convertAPIGroup(g apidiscovery.APIGroupDiscovery) (
return group, gvResources, failedGVs
}
-// convertAPIResource tranforms a APIResourceDiscovery to an APIResource.
-func convertAPIResource(in apidiscovery.APIResourceDiscovery) metav1.APIResource {
- return metav1.APIResource{
+// convertAPIResource tranforms a APIResourceDiscovery to an APIResource. We are
+// resilient to missing GVK, since this resource might be the parent resource
+// for a subresource. If the parent is missing a GVK, it is not returned in
+// discovery, and the subresource MUST have the GVK.
+func convertAPIResource(in apidiscovery.APIResourceDiscovery) (metav1.APIResource, error) {
+ result := metav1.APIResource{
Name: in.Resource,
SingularName: in.SingularResource,
Namespaced: in.Scope == apidiscovery.ScopeNamespace,
- Group: in.ResponseKind.Group,
- Version: in.ResponseKind.Version,
- Kind: in.ResponseKind.Kind,
Verbs: in.Verbs,
ShortNames: in.ShortNames,
Categories: in.Categories,
}
+ var err error
+ if in.ResponseKind != nil {
+ result.Group = in.ResponseKind.Group
+ result.Version = in.ResponseKind.Version
+ result.Kind = in.ResponseKind.Kind
+ } else {
+ err = fmt.Errorf("discovery resource %s missing GVK", in.Resource)
+ }
+ // Can return partial result with error, which can be the parent for a
+ // subresource. Do not add this result to the returned discovery resources.
+ return result, err
}
// convertAPISubresource tranforms a APISubresourceDiscovery to an APIResource.
-func convertAPISubresource(parent metav1.APIResource, in apidiscovery.APISubresourceDiscovery) metav1.APIResource {
- return metav1.APIResource{
- Name: fmt.Sprintf("%s/%s", parent.Name, in.Subresource),
- SingularName: parent.SingularName,
- Namespaced: parent.Namespaced,
- Group: in.ResponseKind.Group,
- Version: in.ResponseKind.Version,
- Kind: in.ResponseKind.Kind,
- Verbs: in.Verbs,
+func convertAPISubresource(parent metav1.APIResource, in apidiscovery.APISubresourceDiscovery) (metav1.APIResource, error) {
+ result := metav1.APIResource{}
+ if in.ResponseKind == nil {
+ return result, fmt.Errorf("subresource %s/%s missing GVK", parent.Name, in.Subresource)
}
+ result.Name = fmt.Sprintf("%s/%s", parent.Name, in.Subresource)
+ result.SingularName = parent.SingularName
+ result.Namespaced = parent.Namespaced
+ result.Group = in.ResponseKind.Group
+ result.Version = in.ResponseKind.Version
+ result.Kind = in.ResponseKind.Kind
+ result.Verbs = in.Verbs
+ return result, nil
}
diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go
index 0762da3be..96005ff58 100644
--- a/vendor/k8s.io/client-go/tools/cache/controller.go
+++ b/vendor/k8s.io/client-go/tools/cache/controller.go
@@ -353,17 +353,6 @@ func NewIndexerInformer(
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil)
}
-// TransformFunc allows for transforming an object before it will be processed
-// and put into the controller cache and before the corresponding handlers will
-// be called on it.
-// TransformFunc (similarly to ResourceEventHandler functions) should be able
-// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown
-//
-// The most common usage pattern is to clean-up some parts of the object to
-// reduce component memory usage if a given component doesn't care about them.
-// given controller doesn't care for them
-type TransformFunc func(interface{}) (interface{}, error)
-
// NewTransformingInformer returns a Store and a controller for populating
// the store while also providing event notifications. You should only used
// the returned Store for Get/List operations; Add/Modify/Deletes will cause
@@ -411,19 +400,11 @@ func processDeltas(
// Object which receives event notifications from the given deltas
handler ResourceEventHandler,
clientState Store,
- transformer TransformFunc,
deltas Deltas,
) error {
// from oldest to newest
for _, d := range deltas {
obj := d.Object
- if transformer != nil {
- var err error
- obj, err = transformer(obj)
- if err != nil {
- return err
- }
- }
switch d.Type {
case Sync, Replaced, Added, Updated:
@@ -475,6 +456,7 @@ func newInformer(
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
KnownObjects: clientState,
EmitDeltaTypeReplaced: true,
+ Transformer: transformer,
})
cfg := &Config{
@@ -486,7 +468,7 @@ func newInformer(
Process: func(obj interface{}) error {
if deltas, ok := obj.(Deltas); ok {
- return processDeltas(h, clientState, transformer, deltas)
+ return processDeltas(h, clientState, deltas)
}
return errors.New("object given as Process argument is not Deltas")
},
diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
index 0c13a41f0..84f3ab9ca 100644
--- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
+++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
@@ -51,6 +51,10 @@ type DeltaFIFOOptions struct {
// When true, `Replaced` events will be sent for items passed to a Replace() call.
// When false, `Sync` events will be sent instead.
EmitDeltaTypeReplaced bool
+
+ // If set, will be called for objects before enqueueing them. Please
+ // see the comment on TransformFunc for details.
+ Transformer TransformFunc
}
// DeltaFIFO is like FIFO, but differs in two ways. One is that the
@@ -129,8 +133,32 @@ type DeltaFIFO struct {
// emitDeltaTypeReplaced is whether to emit the Replaced or Sync
// DeltaType when Replace() is called (to preserve backwards compat).
emitDeltaTypeReplaced bool
+
+ // Called with every object if non-nil.
+ transformer TransformFunc
}
+// TransformFunc allows for transforming an object before it will be processed.
+// TransformFunc (similarly to ResourceEventHandler functions) should be able
+// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown.
+//
+// New in v1.27: In such cases, the contained object will already have gone
+// through the transform object separately (when it was added / updated prior
+// to the delete), so the TransformFunc can likely safely ignore such objects
+// (i.e., just return the input object).
+//
+// The most common usage pattern is to clean-up some parts of the object to
+// reduce component memory usage if a given component doesn't care about them.
+//
+// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc
+// sees the object before any other actor, and it is now safe to mutate the
+// object in place instead of making a copy.
+//
+// Note that TransformFunc is called while inserting objects into the
+// notification queue and is therefore extremely performance sensitive; please
+// do not do anything that will take a long time.
+type TransformFunc func(interface{}) (interface{}, error)
+
// DeltaType is the type of a change (addition, deletion, etc)
type DeltaType string
@@ -227,6 +255,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
knownObjects: opts.KnownObjects,
emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced,
+ transformer: opts.Transformer,
}
f.cond.L = &f.lock
return f
@@ -411,6 +440,21 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
if err != nil {
return KeyError{obj, err}
}
+
+ // Every object comes through this code path once, so this is a good
+ // place to call the transform func. If obj is a
+ // DeletedFinalStateUnknown tombstone, then the containted inner object
+ // will already have gone through the transformer, but we document that
+ // this can happen. In cases involving Replace(), such an object can
+ // come through multiple times.
+ if f.transformer != nil {
+ var err error
+ obj, err = f.transformer(obj)
+ if err != nil {
+ return err
+ }
+ }
+
oldDeltas := f.items[id]
newDeltas := append(oldDeltas, Delta{actionType, obj})
newDeltas = dedupDeltas(newDeltas)
@@ -566,12 +610,11 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
// using the Sync or Replace DeltaType and then (2) it does some deletions.
// In particular: for every pre-existing key K that is not the key of
// an object in `list` there is the effect of
-// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object
-// of K. If `f.knownObjects == nil` then the pre-existing keys are
-// those in `f.items` and the current object of K is the `.Newest()`
-// of the Deltas associated with K. Otherwise the pre-existing keys
-// are those listed by `f.knownObjects` and the current object of K is
-// what `f.knownObjects.GetByKey(K)` returns.
+// `Delete(DeletedFinalStateUnknown{K, O})` where O is the latest known
+// object of K. The pre-existing keys are those in the union set of the keys in
+// `f.items` and `f.knownObjects` (if not nil). The last known object for key K is
+// the one present in the last delta in `f.items`. If there is no delta for K
+// in `f.items`, it is the object in `f.knownObjects`
func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
f.lock.Lock()
defer f.lock.Unlock()
@@ -595,51 +638,23 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
}
}
- if f.knownObjects == nil {
- // Do deletion detection against our own list.
- queuedDeletions := 0
- for k, oldItem := range f.items {
- if keys.Has(k) {
- continue
- }
- // Delete pre-existing items not in the new list.
- // This could happen if watch deletion event was missed while
- // disconnected from apiserver.
- var deletedObj interface{}
- if n := oldItem.Newest(); n != nil {
- deletedObj = n.Object
- }
- queuedDeletions++
- if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
- return err
- }
- }
-
- if !f.populated {
- f.populated = true
- // While there shouldn't be any queued deletions in the initial
- // population of the queue, it's better to be on the safe side.
- f.initialPopulationCount = keys.Len() + queuedDeletions
- }
-
- return nil
- }
-
- // Detect deletions not already in the queue.
- knownKeys := f.knownObjects.ListKeys()
+ // Do deletion detection against objects in the queue
queuedDeletions := 0
- for _, k := range knownKeys {
+ for k, oldItem := range f.items {
if keys.Has(k) {
continue
}
-
- deletedObj, exists, err := f.knownObjects.GetByKey(k)
- if err != nil {
- deletedObj = nil
- klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
- } else if !exists {
- deletedObj = nil
- klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
+ // Delete pre-existing items not in the new list.
+ // This could happen if watch deletion event was missed while
+ // disconnected from apiserver.
+ var deletedObj interface{}
+ if n := oldItem.Newest(); n != nil {
+ deletedObj = n.Object
+
+ // if the previous object is a DeletedFinalStateUnknown, we have to extract the actual Object
+ if d, ok := deletedObj.(DeletedFinalStateUnknown); ok {
+ deletedObj = d.Obj
+ }
}
queuedDeletions++
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
@@ -647,6 +662,32 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
}
}
+ if f.knownObjects != nil {
+ // Detect deletions for objects not present in the queue, but present in KnownObjects
+ knownKeys := f.knownObjects.ListKeys()
+ for _, k := range knownKeys {
+ if keys.Has(k) {
+ continue
+ }
+ if len(f.items[k]) > 0 {
+ continue
+ }
+
+ deletedObj, exists, err := f.knownObjects.GetByKey(k)
+ if err != nil {
+ deletedObj = nil
+ klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
+ } else if !exists {
+ deletedObj = nil
+ klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
+ }
+ queuedDeletions++
+ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
+ return err
+ }
+ }
+ }
+
if !f.populated {
f.populated = true
f.initialPopulationCount = keys.Len() + queuedDeletions
diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
index f5c7316a1..4979642ce 100644
--- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go
+++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
@@ -198,10 +198,7 @@ type SharedInformer interface {
//
// Must be set before starting the informer.
//
- // Note: Since the object given to the handler may be already shared with
- // other goroutines, it is advisable to copy the object being
- // transform before mutating it at all and returning the copy to prevent
- // data races.
+ // Please see the comment on TransformFunc for more details.
SetTransform(handler TransformFunc) error
// IsStopped reports whether the informer has already been stopped.
@@ -422,6 +419,7 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
KnownObjects: s.indexer,
EmitDeltaTypeReplaced: true,
+ Transformer: s.transform,
})
cfg := &Config{
@@ -585,7 +583,7 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
defer s.blockDeltas.Unlock()
if deltas, ok := obj.(Deltas); ok {
- return processDeltas(s, s.indexer, s.transform, deltas)
+ return processDeltas(s, s.indexer, deltas)
}
return errors.New("object given as Process argument is not Deltas")
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 2806f50a8..a863ab45f 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -658,7 +658,7 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# k8s.io/api v0.27.1 => k8s.io/api v0.26.3
+# k8s.io/api v0.27.1 => k8s.io/api v0.26.4
## explicit; go 1.19
k8s.io/api/admission/v1
k8s.io/api/admissionregistration/v1
@@ -712,7 +712,7 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
-# k8s.io/apiextensions-apiserver v0.26.3 => k8s.io/apiextensions-apiserver v0.26.3
+# k8s.io/apiextensions-apiserver v0.26.4 => k8s.io/apiextensions-apiserver v0.26.4
## explicit; go 1.19
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
@@ -724,7 +724,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextension
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake
-# k8s.io/apimachinery v0.27.1 => k8s.io/apimachinery v0.26.3
+# k8s.io/apimachinery v0.27.1 => k8s.io/apimachinery v0.26.4
## explicit; go 1.19
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
@@ -777,7 +777,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.26.3
+# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.26.4
## explicit; go 1.19
k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
@@ -1096,7 +1096,7 @@ k8s.io/klog/v2/internal/clock
k8s.io/klog/v2/internal/dbg
k8s.io/klog/v2/internal/serialize
k8s.io/klog/v2/internal/severity
-# k8s.io/kube-aggregator v0.26.3 => k8s.io/kube-aggregator v0.26.3
+# k8s.io/kube-aggregator v0.26.4 => k8s.io/kube-aggregator v0.26.4
## explicit; go 1.19
k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@@ -1124,7 +1124,7 @@ k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/validation/errors
k8s.io/kube-openapi/pkg/validation/spec
-# k8s.io/kubectl v0.0.0-00010101000000-000000000000 => k8s.io/kubectl v0.26.3
+# k8s.io/kubectl v0.0.0-00010101000000-000000000000 => k8s.io/kubectl v0.26.4
## explicit; go 1.19
k8s.io/kubectl/pkg/cmd/util/podcmd
# k8s.io/utils v0.0.0-20230505201702-9f6742963106
@@ -1253,32 +1253,32 @@ sigs.k8s.io/yaml
# github.com/openshift/api => github.com/openshift/api v0.0.0-20191219222812-2987a591a72c
# github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47
# github.com/operator-framework/operator-lifecycle-manager => github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190128024246-5eb7ae5bdb7a
-# k8s.io/api => k8s.io/api v0.26.3
-# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.3
-# k8s.io/apimachinery => k8s.io/apimachinery v0.26.3
-# k8s.io/apiserver => k8s.io/apiserver v0.26.3
-# k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.3
-# k8s.io/client-go => k8s.io/client-go v0.26.3
-# k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.3
-# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.3
-# k8s.io/code-generator => k8s.io/code-generator v0.26.3
-# k8s.io/component-base => k8s.io/component-base v0.26.3
-# k8s.io/cri-api => k8s.io/cri-api v0.26.3
-# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.3
+# k8s.io/api => k8s.io/api v0.26.4
+# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.4
+# k8s.io/apimachinery => k8s.io/apimachinery v0.26.4
+# k8s.io/apiserver => k8s.io/apiserver v0.26.4
+# k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.4
+# k8s.io/client-go => k8s.io/client-go v0.26.4
+# k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.4
+# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.4
+# k8s.io/code-generator => k8s.io/code-generator v0.26.4
+# k8s.io/component-base => k8s.io/component-base v0.26.4
+# k8s.io/cri-api => k8s.io/cri-api v0.26.4
+# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.4
# k8s.io/klog => k8s.io/klog v0.4.0
-# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.3
-# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.3
+# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.4
+# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.4
# k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280
-# k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.3
-# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.3
-# k8s.io/kubectl => k8s.io/kubectl v0.26.3
-# k8s.io/kubelet => k8s.io/kubelet v0.26.3
-# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.3
-# k8s.io/metrics => k8s.io/metrics v0.26.3
-# k8s.io/node-api => k8s.io/node-api v0.26.3
-# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.3
-# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.3
-# k8s.io/sample-controller => k8s.io/sample-controller v0.26.3
+# k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.4
+# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.4
+# k8s.io/kubectl => k8s.io/kubectl v0.26.4
+# k8s.io/kubelet => k8s.io/kubelet v0.26.4
+# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.4
+# k8s.io/metrics => k8s.io/metrics v0.26.4
+# k8s.io/node-api => k8s.io/node-api v0.26.4
+# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.4
+# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.4
+# k8s.io/sample-controller => k8s.io/sample-controller v0.26.4
# kubevirt.io/api => ./staging/src/kubevirt.io/api
# kubevirt.io/client-go => ./staging/src/kubevirt.io/client-go
# sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.6.2
--
2.41.0

View File

@ -1,382 +0,0 @@
From ee3463ae990a1776908483b182ad79c79637cd5d Mon Sep 17 00:00:00 2001
From: Alexander Wels <awels@redhat.com>
Date: Fri, 11 Aug 2023 07:56:29 -0500
Subject: [PATCH 1/4] Wait for new attachemnt pod
Before deleting old attachment pod, wait for new attachment
pod to be ready, so k8s should not detach the volume from the
node since there will always be a pod using the volume from
its perspective.
Fixed issue where when adding or removing a volume the existing
volumes would still have the UID of the old attachment pod in
the VMI status which caused errors to appear in the virt-handler
logs about not being able to find the device or image.
Fixed issue where the cleanup would attempt to remove a volume
that was already gone causing errors to appear in the virt-handler
log.
Signed-off-by: Alexander Wels <awels@redhat.com>
---
pkg/virt-controller/watch/vmi.go | 60 +++++++++++++++-----------
pkg/virt-controller/watch/vmi_test.go | 52 ++++++++++++++++++++++
pkg/virt-handler/hotplug-disk/mount.go | 7 ++-
tests/storage/hotplug.go | 10 +++++
4 files changed, 103 insertions(+), 26 deletions(-)
diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
index 9afaee4f0..99af8b8cb 100644
--- a/pkg/virt-controller/watch/vmi.go
+++ b/pkg/virt-controller/watch/vmi.go
@@ -516,11 +516,7 @@ func (c *VMIController) hasOwnerVM(vmi *virtv1.VirtualMachineInstance) bool {
}
ownerVM := obj.(*virtv1.VirtualMachine)
- if controllerRef.UID == ownerVM.UID {
- return true
- }
-
- return false
+ return controllerRef.UID == ownerVM.UID
}
func (c *VMIController) updateStatus(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume, syncErr syncError) error {
@@ -1816,15 +1812,29 @@ func (c *VMIController) waitForFirstConsumerTemporaryPods(vmi *virtv1.VirtualMac
}
func (c *VMIController) needsHandleHotplug(hotplugVolumes []*virtv1.Volume, hotplugAttachmentPods []*k8sv1.Pod) bool {
+ if len(hotplugAttachmentPods) > 1 {
+ return true
+ }
// Determine if the ready volumes have changed compared to the current pod
for _, attachmentPod := range hotplugAttachmentPods {
if c.podVolumesMatchesReadyVolumes(attachmentPod, hotplugVolumes) {
- log.DefaultLogger().Infof("Don't need to handle as we have a matching attachment pod")
return false
}
- return true
}
- return len(hotplugVolumes) > 0
+ return len(hotplugVolumes) > 0 || len(hotplugAttachmentPods) > 0
+}
+
+func (c *VMIController) getActiveAndOldAttachmentPods(readyHotplugVolumes []*virtv1.Volume, hotplugAttachmentPods []*k8sv1.Pod) (*k8sv1.Pod, []*k8sv1.Pod) {
+ var currentPod *k8sv1.Pod
+ oldPods := make([]*k8sv1.Pod, 0)
+ for _, attachmentPod := range hotplugAttachmentPods {
+ if !c.podVolumesMatchesReadyVolumes(attachmentPod, readyHotplugVolumes) {
+ oldPods = append(oldPods, attachmentPod)
+ } else {
+ currentPod = attachmentPod
+ }
+ }
+ return currentPod, oldPods
}
func (c *VMIController) handleHotplugVolumes(hotplugVolumes []*virtv1.Volume, hotplugAttachmentPods []*k8sv1.Pod, vmi *virtv1.VirtualMachineInstance, virtLauncherPod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume) syncError {
@@ -1855,29 +1865,25 @@ func (c *VMIController) handleHotplugVolumes(hotplugVolumes []*virtv1.Volume, ho
readyHotplugVolumes = append(readyHotplugVolumes, volume)
}
// Determine if the ready volumes have changed compared to the current pod
- currentPod := make([]*k8sv1.Pod, 0)
- oldPods := make([]*k8sv1.Pod, 0)
- for _, attachmentPod := range hotplugAttachmentPods {
- if !c.podVolumesMatchesReadyVolumes(attachmentPod, readyHotplugVolumes) {
- oldPods = append(oldPods, attachmentPod)
- } else {
- currentPod = append(currentPod, attachmentPod)
- }
- }
+ currentPod, oldPods := c.getActiveAndOldAttachmentPods(readyHotplugVolumes, hotplugAttachmentPods)
- if len(currentPod) == 0 && len(readyHotplugVolumes) > 0 {
+ if currentPod == nil && len(readyHotplugVolumes) > 0 {
// ready volumes have changed
// Create new attachment pod that holds all the ready volumes
if err := c.createAttachmentPod(vmi, virtLauncherPod, readyHotplugVolumes); err != nil {
return err
}
}
- // Delete old attachment pod
- for _, attachmentPod := range oldPods {
- if err := c.deleteAttachmentPodForVolume(vmi, attachmentPod); err != nil {
- return &syncErrorImpl{fmt.Errorf("Error deleting attachment pod %v", err), FailedDeletePodReason}
+
+ if len(readyHotplugVolumes) == 0 || (currentPod != nil && currentPod.Status.Phase == k8sv1.PodRunning) {
+ // Delete old attachment pod
+ for _, attachmentPod := range oldPods {
+ if err := c.deleteAttachmentPodForVolume(vmi, attachmentPod); err != nil {
+ return &syncErrorImpl{fmt.Errorf("Error deleting attachment pod %v", err), FailedDeletePodReason}
+ }
}
}
+
return nil
}
@@ -2121,6 +2127,9 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
if err != nil {
return err
}
+
+ attachmentPod, _ := c.getActiveAndOldAttachmentPods(hotplugVolumes, attachmentPods)
+
newStatus := make([]virtv1.VolumeStatus, 0)
for i, volume := range vmi.Spec.Volumes {
status := virtv1.VolumeStatus{}
@@ -2142,7 +2151,6 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
ClaimName: volume.Name,
}
}
- attachmentPod := c.findAttachmentPodByVolumeName(volume.Name, attachmentPods)
if attachmentPod == nil {
if !c.volumeReady(status.Phase) {
status.HotplugVolume.AttachPodUID = ""
@@ -2156,6 +2164,9 @@ func (c *VMIController) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, v
status.HotplugVolume.AttachPodName = attachmentPod.Name
if len(attachmentPod.Status.ContainerStatuses) == 1 && attachmentPod.Status.ContainerStatuses[0].Ready {
status.HotplugVolume.AttachPodUID = attachmentPod.UID
+ } else {
+ // Remove UID of old pod if a new one is available, but not yet ready
+ status.HotplugVolume.AttachPodUID = ""
}
if c.canMoveToAttachedPhase(status.Phase) {
status.Phase = virtv1.HotplugVolumeAttachedToNode
@@ -2244,8 +2255,7 @@ func (c *VMIController) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim)
}
func (c *VMIController) canMoveToAttachedPhase(currentPhase virtv1.VolumePhase) bool {
- return (currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending ||
- currentPhase == virtv1.HotplugVolumeAttachedToNode)
+ return (currentPhase == "" || currentPhase == virtv1.VolumeBound || currentPhase == virtv1.VolumePending)
}
func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
diff --git a/pkg/virt-controller/watch/vmi_test.go b/pkg/virt-controller/watch/vmi_test.go
index a9b173232..932326432 100644
--- a/pkg/virt-controller/watch/vmi_test.go
+++ b/pkg/virt-controller/watch/vmi_test.go
@@ -2700,6 +2700,58 @@ var _ = Describe("VirtualMachineInstance watcher", func() {
[]string{SuccessfulCreatePodReason}),
)
+ DescribeTable("Should properly calculate if it needs to handle hotplug volumes", func(hotplugVolumes []*virtv1.Volume, attachmentPods []*k8sv1.Pod, match gomegaTypes.GomegaMatcher) {
+ Expect(controller.needsHandleHotplug(hotplugVolumes, attachmentPods)).To(match)
+ },
+ Entry("nil volumes, nil attachmentPods", nil, nil, BeFalse()),
+ Entry("empty volumes, empty attachmentPods", []*virtv1.Volume{}, []*k8sv1.Pod{}, BeFalse()),
+ Entry("single volume, empty attachmentPods", []*virtv1.Volume{
+ {
+ Name: "test",
+ },
+ }, []*k8sv1.Pod{}, BeTrue()),
+ Entry("no volume, single attachmentPod", []*virtv1.Volume{}, makePods(0), BeTrue()),
+ Entry("matching volume, single attachmentPod", []*virtv1.Volume{
+ {
+ Name: "volume0",
+ },
+ }, makePods(0), BeFalse()),
+ Entry("mismatched volume, single attachmentPod", []*virtv1.Volume{
+ {
+ Name: "invalid",
+ },
+ }, makePods(0), BeTrue()),
+ Entry("matching volume, multiple attachmentPods", []*virtv1.Volume{
+ {
+ Name: "volume0",
+ },
+ }, []*k8sv1.Pod{makePods(0)[0], makePods(1)[0]}, BeTrue()),
+ )
+
+ DescribeTable("Should find active and old pods", func(hotplugVolumes []*virtv1.Volume, attachmentPods []*k8sv1.Pod, expectedActive *k8sv1.Pod, expectedOld []*k8sv1.Pod) {
+ active, old := controller.getActiveAndOldAttachmentPods(hotplugVolumes, attachmentPods)
+ Expect(active).To(Equal(expectedActive))
+ Expect(old).To(ContainElements(expectedOld))
+ },
+ Entry("nil volumes, nil attachmentPods", nil, nil, nil, nil),
+ Entry("empty volumes, empty attachmentPods", []*virtv1.Volume{}, []*k8sv1.Pod{}, nil, []*k8sv1.Pod{}),
+ Entry("matching volume, single attachmentPod", []*virtv1.Volume{
+ {
+ Name: "volume0",
+ },
+ }, makePods(0), makePods(0)[0], []*k8sv1.Pod{}),
+ Entry("matching volume, multiple attachmentPods, first pod matches", []*virtv1.Volume{
+ {
+ Name: "volume0",
+ },
+ }, []*k8sv1.Pod{makePods(0)[0], makePods(1)[0]}, makePods(0)[0], makePods(1)),
+ Entry("matching volume, multiple attachmentPods, second pod matches", []*virtv1.Volume{
+ {
+ Name: "volume1",
+ },
+ }, []*k8sv1.Pod{makePods(0)[0], makePods(1)[0]}, makePods(1)[0], makePods(0)),
+ )
+
It("Should get default filesystem overhead if there are multiple CDI instances", func() {
cdi := cdiv1.CDI{
ObjectMeta: metav1.ObjectMeta{
diff --git a/pkg/virt-handler/hotplug-disk/mount.go b/pkg/virt-handler/hotplug-disk/mount.go
index 942301815..43504d48d 100644
--- a/pkg/virt-handler/hotplug-disk/mount.go
+++ b/pkg/virt-handler/hotplug-disk/mount.go
@@ -508,9 +508,10 @@ func (m *volumeMounter) updateBlockMajorMinor(dev uint64, allow bool, manager cg
func (m *volumeMounter) createBlockDeviceFile(basePath *safepath.Path, deviceName string, dev uint64, blockDevicePermissions os.FileMode) error {
if _, err := safepath.JoinNoFollow(basePath, deviceName); errors.Is(err, os.ErrNotExist) {
return mknodCommand(basePath, deviceName, dev, blockDevicePermissions)
- } else {
+ } else if err != nil {
return err
}
+ return nil
}
func (m *volumeMounter) mountFileSystemHotplugVolume(vmi *v1.VirtualMachineInstance, volume string, sourceUID types.UID, record *vmiMountTargetRecord, mountDirectory bool) error {
@@ -667,6 +668,10 @@ func (m *volumeMounter) Unmount(vmi *v1.VirtualMachineInstance) error {
var err error
if m.isBlockVolume(&vmi.Status, volumeStatus.Name) {
path, err = safepath.JoinNoFollow(basePath, volumeStatus.Name)
+ if errors.Is(err, os.ErrNotExist) {
+ // already unmounted or never mounted
+ continue
+ }
} else if m.isDirectoryMounted(&vmi.Status, volumeStatus.Name) {
path, err = m.hotplugDiskManager.GetFileSystemDirectoryTargetPathFromHostView(virtlauncherUID, volumeStatus.Name, false)
if os.IsExist(err) {
diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go
index a85976484..ba9c69100 100644
--- a/tests/storage/hotplug.go
+++ b/tests/storage/hotplug.go
@@ -724,6 +724,16 @@ var _ = SIGDescribe("Hotplug", func() {
for i := range testVolumes {
verifyVolumeNolongerAccessible(vmi, targets[i])
}
+ By("Verifying there are no sync errors")
+ events, err := virtClient.CoreV1().Events(vmi.Namespace).List(context.Background(), metav1.ListOptions{})
+ Expect(err).ToNot(HaveOccurred())
+ for _, event := range events.Items {
+ if event.InvolvedObject.Kind == "VirtualMachineInstance" && event.InvolvedObject.UID == vmi.UID {
+ if event.Reason == string(v1.SyncFailed) {
+ Fail(fmt.Sprintf("Found sync failed event %v", event))
+ }
+ }
+ }
},
Entry("with VMs", addDVVolumeVM, removeVolumeVM, corev1.PersistentVolumeFilesystem, false),
Entry("with VMIs", addDVVolumeVMI, removeVolumeVMI, corev1.PersistentVolumeFilesystem, true),
--
2.41.0
From b02ab03f39e7e888c27949d24c0e9b38963d9b6c Mon Sep 17 00:00:00 2001
From: Alexander Wels <awels@redhat.com>
Date: Fri, 11 Aug 2023 15:00:59 -0500
Subject: [PATCH 2/4] Don't generate SynFail caused by a race condition.
Signed-off-by: Alexander Wels <awels@redhat.com>
---
pkg/virt-handler/hotplug-disk/mount.go | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/pkg/virt-handler/hotplug-disk/mount.go b/pkg/virt-handler/hotplug-disk/mount.go
index 43504d48d..9a5d24747 100644
--- a/pkg/virt-handler/hotplug-disk/mount.go
+++ b/pkg/virt-handler/hotplug-disk/mount.go
@@ -313,12 +313,16 @@ func (m *volumeMounter) mountHotplugVolume(vmi *v1.VirtualMachineInstance, volum
if m.isBlockVolume(&vmi.Status, volumeName) {
logger.V(4).Infof("Mounting block volume: %s", volumeName)
if err := m.mountBlockHotplugVolume(vmi, volumeName, sourceUID, record); err != nil {
- return fmt.Errorf("failed to mount block hotplug volume %s: %v", volumeName, err)
+ if !errors.Is(err, os.ErrNotExist) {
+ return fmt.Errorf("failed to mount block hotplug volume %s: %v", volumeName, err)
+ }
}
} else {
logger.V(4).Infof("Mounting file system volume: %s", volumeName)
if err := m.mountFileSystemHotplugVolume(vmi, volumeName, sourceUID, record, mountDirectory); err != nil {
- return fmt.Errorf("failed to mount filesystem hotplug volume %s: %v", volumeName, err)
+ if !errors.Is(err, os.ErrNotExist) {
+ return fmt.Errorf("failed to mount filesystem hotplug volume %s: %v", volumeName, err)
+ }
}
}
}
--
2.41.0
From 5012469d5179f01f5da9ae7c701949a57fb9d439 Mon Sep 17 00:00:00 2001
From: Alexander Wels <awels@redhat.com>
Date: Fri, 11 Aug 2023 18:04:28 -0500
Subject: [PATCH 3/4] Address code review comments
Signed-off-by: Alexander Wels <awels@redhat.com>
---
pkg/virt-controller/watch/vmi.go | 6 ++----
pkg/virt-handler/hotplug-disk/mount.go | 3 +--
2 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
index 99af8b8cb..e031c35a8 100644
--- a/pkg/virt-controller/watch/vmi.go
+++ b/pkg/virt-controller/watch/vmi.go
@@ -1816,10 +1816,8 @@ func (c *VMIController) needsHandleHotplug(hotplugVolumes []*virtv1.Volume, hotp
return true
}
// Determine if the ready volumes have changed compared to the current pod
- for _, attachmentPod := range hotplugAttachmentPods {
- if c.podVolumesMatchesReadyVolumes(attachmentPod, hotplugVolumes) {
- return false
- }
+ if len(hotplugAttachmentPods) == 1 && c.podVolumesMatchesReadyVolumes(hotplugAttachmentPods[0], hotplugVolumes) {
+ return false
}
return len(hotplugVolumes) > 0 || len(hotplugAttachmentPods) > 0
}
diff --git a/pkg/virt-handler/hotplug-disk/mount.go b/pkg/virt-handler/hotplug-disk/mount.go
index 9a5d24747..c0b55046c 100644
--- a/pkg/virt-handler/hotplug-disk/mount.go
+++ b/pkg/virt-handler/hotplug-disk/mount.go
@@ -512,10 +512,9 @@ func (m *volumeMounter) updateBlockMajorMinor(dev uint64, allow bool, manager cg
func (m *volumeMounter) createBlockDeviceFile(basePath *safepath.Path, deviceName string, dev uint64, blockDevicePermissions os.FileMode) error {
if _, err := safepath.JoinNoFollow(basePath, deviceName); errors.Is(err, os.ErrNotExist) {
return mknodCommand(basePath, deviceName, dev, blockDevicePermissions)
- } else if err != nil {
+ } else {
return err
}
- return nil
}
func (m *volumeMounter) mountFileSystemHotplugVolume(vmi *v1.VirtualMachineInstance, volume string, sourceUID types.UID, record *vmiMountTargetRecord, mountDirectory bool) error {
--
2.41.0
From 5abf17fef7ab5433ec7dd155a82b1575660b86d3 Mon Sep 17 00:00:00 2001
From: Alexander Wels <awels@redhat.com>
Date: Mon, 14 Aug 2023 07:58:16 -0500
Subject: [PATCH 4/4] Update pkg/virt-handler/hotplug-disk/mount.go
Co-authored-by: Vasiliy Ulyanov <vulyanov@suse.de>
Signed-off-by: Alexander Wels <awels@redhat.com>
---
pkg/virt-handler/hotplug-disk/mount.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/virt-handler/hotplug-disk/mount.go b/pkg/virt-handler/hotplug-disk/mount.go
index c0b55046c..b1a11d93f 100644
--- a/pkg/virt-handler/hotplug-disk/mount.go
+++ b/pkg/virt-handler/hotplug-disk/mount.go
@@ -677,7 +677,7 @@ func (m *volumeMounter) Unmount(vmi *v1.VirtualMachineInstance) error {
}
} else if m.isDirectoryMounted(&vmi.Status, volumeStatus.Name) {
path, err = m.hotplugDiskManager.GetFileSystemDirectoryTargetPathFromHostView(virtlauncherUID, volumeStatus.Name, false)
- if os.IsExist(err) {
+ if errors.Is(err, os.ErrNotExist) {
// already unmounted or never mounted
continue
}
--
2.41.0

View File

@ -1,194 +0,0 @@
From 61ca1e96363afe403465ed195b8cc808a4a04f06 Mon Sep 17 00:00:00 2001
From: Alex Kalenyuk <akalenyu@redhat.com>
Date: Wed, 12 Jul 2023 19:52:04 +0300
Subject: [PATCH 1/2] Don't wait for populator target PVC to be bound
Populator PVCs only achieve bound phase once the population is done,
as opposed to CDI population which was working on the target PVC directly.
Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
---
tests/storage/export.go | 70 ++---------------------------------------
1 file changed, 3 insertions(+), 67 deletions(-)
diff --git a/tests/storage/export.go b/tests/storage/export.go
index d456e2fb1..4fab2aec1 100644
--- a/tests/storage/export.go
+++ b/tests/storage/export.go
@@ -48,7 +48,6 @@ import (
k8sv1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
- storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@@ -230,68 +229,6 @@ var _ = SIGDescribe("Export", func() {
return tests.RunPod(pod)
}
- createTriggerPodForPvc := func(pvc *k8sv1.PersistentVolumeClaim) *k8sv1.Pod {
- volumeName := pvc.GetName()
- podName := fmt.Sprintf("bind-%s", volumeName)
- pod := tests.RenderPod(podName, []string{"/bin/sh", "-c", "sleep 1"}, []string{})
- pod.Spec.Volumes = append(pod.Spec.Volumes, k8sv1.Volume{
- Name: volumeName,
- VolumeSource: k8sv1.VolumeSource{
- PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
- ClaimName: pvc.GetName(),
- },
- },
- })
-
- volumeMode := pvc.Spec.VolumeMode
- if volumeMode != nil && *volumeMode == k8sv1.PersistentVolumeBlock {
- addBlockVolume(pod, volumeName)
- } else {
- addFilesystemVolume(pod, volumeName)
- }
- return tests.RunPodAndExpectCompletion(pod)
- }
-
- isWaitForFirstConsumer := func(storageClassName string) bool {
- sc, err := virtClient.StorageV1().StorageClasses().Get(context.Background(), storageClassName, metav1.GetOptions{})
- Expect(err).ToNot(HaveOccurred())
- return sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer
- }
-
- ensurePVCBound := func(pvc *k8sv1.PersistentVolumeClaim) {
- namespace := pvc.Namespace
- if !isWaitForFirstConsumer(*pvc.Spec.StorageClassName) {
- By("Checking for bound claim on non-WFFC storage")
- // Not WFFC, pvc will be bound
- Eventually(func() k8sv1.PersistentVolumeClaimPhase {
- pvc, err := virtClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), pvc.Name, metav1.GetOptions{})
- Expect(err).ToNot(HaveOccurred())
- return pvc.Status.Phase
- }, 30*time.Second, 1*time.Second).Should(Equal(k8sv1.ClaimBound))
- return
- }
- By("Checking the PVC is pending for WFFC storage")
- Eventually(func() k8sv1.PersistentVolumeClaimPhase {
- pvc, err := virtClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), pvc.Name, metav1.GetOptions{})
- Expect(err).ToNot(HaveOccurred())
- return pvc.Status.Phase
- }, 15*time.Second, 1*time.Second).Should(Equal(k8sv1.ClaimPending))
-
- By("Creating trigger pod to bind WFFC storage")
- triggerPod := createTriggerPodForPvc(pvc)
- By("Checking the PVC was bound")
- Eventually(func() k8sv1.PersistentVolumeClaimPhase {
- pvc, err := virtClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), pvc.Name, metav1.GetOptions{})
- Expect(err).ToNot(HaveOccurred())
- return pvc.Status.Phase
- }, 30*time.Second, 1*time.Second).Should(Equal(k8sv1.ClaimBound))
- By("Deleting the trigger pod")
- immediate := int64(0)
- Expect(virtClient.CoreV1().Pods(triggerPod.Namespace).Delete(context.Background(), triggerPod.Name, metav1.DeleteOptions{
- GracePeriodSeconds: &immediate,
- })).To(Succeed())
- }
-
createExportTokenSecret := func(name, namespace string) *k8sv1.Secret {
var err error
secret := &k8sv1.Secret{
@@ -352,6 +289,7 @@ var _ = SIGDescribe("Export", func() {
dv := libdv.NewDataVolume(
libdv.WithRegistryURLSourceAndPullMethod(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros), cdiv1.RegistryPullNode),
libdv.WithPVC(libdv.PVCWithStorageClass(sc), libdv.PVCWithVolumeMode(volumeMode)),
+ libdv.WithForceBindAnnotation(),
)
dv, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(nil)).Create(context.Background(), dv, metav1.CreateOptions{})
@@ -362,7 +300,6 @@ var _ = SIGDescribe("Export", func() {
pvc, err = virtClient.CoreV1().PersistentVolumeClaims(testsuite.GetTestNamespace(dv)).Get(context.Background(), dv.Name, metav1.GetOptions{})
return err
}, 60*time.Second, 1*time.Second).Should(BeNil(), "persistent volume associated with DV should be created")
- ensurePVCBound(pvc)
By("Making sure the DV is successful")
libstorage.EventuallyDV(dv, 90, HaveSucceeded())
@@ -847,6 +784,7 @@ var _ = SIGDescribe("Export", func() {
dv := libdv.NewDataVolume(
libdv.WithRegistryURLSourceAndPullMethod(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros), cdiv1.RegistryPullNode),
libdv.WithPVC(libdv.PVCWithStorageClass(sc)),
+ libdv.WithForceBindAnnotation(),
)
name := dv.Name
@@ -869,12 +807,10 @@ var _ = SIGDescribe("Export", func() {
}, 60*time.Second, 1*time.Second).Should(ContainElement(expectedCond), "export should report missing pvc")
dv, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(nil)).Create(context.Background(), dv, metav1.CreateOptions{})
- var pvc *k8sv1.PersistentVolumeClaim
Eventually(func() error {
- pvc, err = virtClient.CoreV1().PersistentVolumeClaims(testsuite.GetTestNamespace(dv)).Get(context.Background(), dv.Name, metav1.GetOptions{})
+ _, err = virtClient.CoreV1().PersistentVolumeClaims(testsuite.GetTestNamespace(dv)).Get(context.Background(), dv.Name, metav1.GetOptions{})
return err
}, 60*time.Second, 1*time.Second).Should(BeNil(), "persistent volume associated with DV should be created")
- ensurePVCBound(pvc)
By("Making sure the DV is successful")
libstorage.EventuallyDV(dv, 90, HaveSucceeded())
--
2.41.0
From 5b44741c1ca7df3b7121dff7db6a52f6599b7144 Mon Sep 17 00:00:00 2001
From: Alex Kalenyuk <akalenyu@redhat.com>
Date: Wed, 12 Jul 2023 19:57:57 +0300
Subject: [PATCH 2/2] Don't check for CloneOf/CloneRequest with populator
target PVCs
These simply don't exist (and are not needed) with populators
Signed-off-by: Alex Kalenyuk <akalenyu@redhat.com>
---
tests/storage/restore.go | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/tests/storage/restore.go b/tests/storage/restore.go
index dffd0f1fe..5a09ca839 100644
--- a/tests/storage/restore.go
+++ b/tests/storage/restore.go
@@ -1776,13 +1776,18 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() {
}
pvc, err := virtClient.CoreV1().PersistentVolumeClaims(vm.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
+ if pvc.Spec.DataSourceRef != nil {
+ // These annotations only exist pre-k8s-populators flows
+ return
+ }
for _, a := range []string{"k8s.io/CloneRequest", "k8s.io/CloneOf"} {
_, ok := pvc.Annotations[a]
Expect(ok).Should(Equal(shouldExist))
}
}
- createVMFromSource := func() *v1.VirtualMachine {
+ createNetworkCloneVMFromSource := func() *v1.VirtualMachine {
+ // TODO: consider ensuring network clone gets done here using StorageProfile CloneStrategy
dataVolume := libdv.NewDataVolume(
libdv.WithPVCSource(sourceDV.Namespace, sourceDV.Name),
libdv.WithPVC(libdv.PVCWithStorageClass(snapshotStorageClass), libdv.PVCWithVolumeSize("1Gi")),
@@ -1796,7 +1801,7 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() {
}
DescribeTable("should restore a vm that boots from a network cloned datavolumetemplate", func(restoreToNewVM, deleteSourcePVC bool) {
- vm, vmi = createAndStartVM(createVMFromSource())
+ vm, vmi = createAndStartVM(createNetworkCloneVMFromSource())
checkCloneAnnotations(vm, true)
if deleteSourcePVC {
@@ -1813,7 +1818,7 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() {
)
DescribeTable("should restore a vm that boots from a network cloned datavolume (not template)", func(restoreToNewVM, deleteSourcePVC bool) {
- vm = createVMFromSource()
+ vm = createNetworkCloneVMFromSource()
dv := orphanDataVolumeTemplate(vm, 0)
dv, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(vm.Namespace).Create(context.Background(), dv, metav1.CreateOptions{})
--
2.41.0

View File

@ -1,56 +0,0 @@
From 28f503c1417df30de3c7db8c14ced7c8985c9612 Mon Sep 17 00:00:00 2001
From: Alexander Wels <awels@redhat.com>
Date: Thu, 13 Jul 2023 14:33:29 -0500
Subject: [PATCH] Export create populator compatible datavolumes from VM
The generated DataVolumes were not compatible with populator
populated sources. In particular the populators would have
a datasource or datasourceRef set.
This commit clears the values so that the target CDI can
properly generate PVCs from the Datavolume
Signed-off-by: Alexander Wels <awels@redhat.com>
---
pkg/storage/export/export/export.go | 3 +++
pkg/storage/export/export/export_test.go | 4 ++++
2 files changed, 7 insertions(+)
diff --git a/pkg/storage/export/export/export.go b/pkg/storage/export/export/export.go
index c1ba57174..51eb69df6 100644
--- a/pkg/storage/export/export/export.go
+++ b/pkg/storage/export/export/export.go
@@ -1429,6 +1429,9 @@ func (ctrl *VMExportController) createExportHttpDvFromPVC(namespace, name string
if pvc != nil {
pvc.Spec.VolumeName = ""
pvc.Spec.StorageClassName = nil
+ // Don't copy datasources, will be populated by CDI with the datavolume
+ pvc.Spec.DataSource = nil
+ pvc.Spec.DataSourceRef = nil
return &cdiv1.DataVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
diff --git a/pkg/storage/export/export/export_test.go b/pkg/storage/export/export/export_test.go
index 15941984d..a341bdca6 100644
--- a/pkg/storage/export/export/export_test.go
+++ b/pkg/storage/export/export/export_test.go
@@ -1310,12 +1310,16 @@ var _ = Describe("Export controller", func() {
It("Should generate DataVolumes from VM", func() {
pvc := createPVC("pvc", string(cdiv1.DataVolumeKubeVirt))
+ pvc.Spec.DataSource = &k8sv1.TypedLocalObjectReference{}
+ pvc.Spec.DataSourceRef = &k8sv1.TypedObjectReference{}
pvcInformer.GetStore().Add(pvc)
vm := createVMWithDVTemplateAndPVC()
dvs := controller.generateDataVolumesFromVm(vm)
Expect(dvs).To(HaveLen(1))
Expect(dvs[0]).ToNot(BeNil())
Expect(dvs[0].Name).To((Equal("pvc")))
+ Expect(dvs[0].Spec.PVC.DataSource).To(BeNil())
+ Expect(dvs[0].Spec.PVC.DataSourceRef).To(BeNil())
})
})
--
2.41.0

View File

@ -1,147 +0,0 @@
From 1d2feb4ac5ac5f26ccd4abf2270caf0599ae893c Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Mon, 21 Aug 2023 11:32:56 +0200
Subject: [PATCH 1/3] tests: Delete VMI prior to NFS server pod
Kubelet fails to umount the NFS volume and gets stuck when performing
pod cleanup if the server has already gone. Ensure that the VMI is
deleted first and the corresponding volume is released (current global
tests cleanup hook does not guarantee deletion order).
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
tests/storage/storage.go | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/tests/storage/storage.go b/tests/storage/storage.go
index 672ba2355..a4dfc3dd5 100644
--- a/tests/storage/storage.go
+++ b/tests/storage/storage.go
@@ -216,6 +216,11 @@ var _ = SIGDescribe("Storage", func() {
var pvName string
var nfsPod *k8sv1.Pod
AfterEach(func() {
+ // Ensure VMI is deleted before bringing down the NFS server
+ err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{})
+ Expect(err).ToNot(HaveOccurred(), failedDeleteVMI)
+ libwait.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
+
if targetImagePath != testsuite.HostPathAlpine {
tests.DeleteAlpineWithNonQEMUPermissions()
}
--
2.41.0
From 029cb1fd6fee273f5b43615674d8c54143a3ef47 Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Wed, 23 Aug 2023 14:41:57 +0200
Subject: [PATCH 2/3] tests: Delete AlpineWithNonQEMUPermissions image
Previously the disk image was not deleted because targetImagePath was
not set properly. The condition in AfterEach was never positive:
if targetImagePath != testsuite.HostPathAlpine {
tests.DeleteAlpineWithNonQEMUPermissions()
}
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
tests/storage/storage.go | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/tests/storage/storage.go b/tests/storage/storage.go
index a4dfc3dd5..90bb6e871 100644
--- a/tests/storage/storage.go
+++ b/tests/storage/storage.go
@@ -231,11 +231,10 @@ var _ = SIGDescribe("Storage", func() {
var nodeName string
// Start the VirtualMachineInstance with the PVC attached
if storageEngine == "nfs" {
- targetImage := targetImagePath
if !imageOwnedByQEMU {
- targetImage, nodeName = tests.CopyAlpineWithNonQEMUPermissions()
+ targetImagePath, nodeName = tests.CopyAlpineWithNonQEMUPermissions()
}
- nfsPod = storageframework.InitNFS(targetImage, nodeName)
+ nfsPod = storageframework.InitNFS(targetImagePath, nodeName)
pvName = createNFSPvAndPvc(family, nfsPod)
} else {
pvName = tests.DiskAlpineHostPath
--
2.41.0
From ca0be7fc564ce755d3e88c6f0ea6afcfc32b60b7 Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulyanov@suse.de>
Date: Thu, 24 Aug 2023 11:34:47 +0200
Subject: [PATCH 3/3] Hack nfs-server image to run it 'graceless'
The NFS grace period is set to 90 seconds and it stalls the clients
trying to access the share right after the server start. This may affect
the tests and lead to timeouts so disable the setting.
Signed-off-by: Vasiliy Ulyanov <vulyanov@suse.de>
---
images/nfs-server/BUILD.bazel | 12 ++++++++++++
images/nfs-server/entrypoint.sh | 12 ++++++++++++
2 files changed, 24 insertions(+)
create mode 100644 images/nfs-server/entrypoint.sh
diff --git a/images/nfs-server/BUILD.bazel b/images/nfs-server/BUILD.bazel
index 343d72cf1..8494fcac5 100644
--- a/images/nfs-server/BUILD.bazel
+++ b/images/nfs-server/BUILD.bazel
@@ -2,6 +2,14 @@ load(
"@io_bazel_rules_docker//container:container.bzl",
"container_image",
)
+load("@rules_pkg//:pkg.bzl", "pkg_tar")
+
+pkg_tar(
+ name = "entrypoint",
+ srcs = [":entrypoint.sh"],
+ mode = "0775",
+ package_dir = "/",
+)
container_image(
name = "nfs-server-image",
@@ -13,6 +21,7 @@ container_image(
"@io_bazel_rules_go//go/platform:linux_arm64": "@nfs-server_aarch64//image",
"//conditions:default": "@nfs-server//image",
}),
+ cmd = ["/entrypoint.sh"],
ports = [
"111/udp",
"2049/udp",
@@ -25,5 +34,8 @@ container_image(
"32766/tcp",
"32767/tcp",
],
+ tars = [
+ ":entrypoint",
+ ],
visibility = ["//visibility:public"],
)
diff --git a/images/nfs-server/entrypoint.sh b/images/nfs-server/entrypoint.sh
new file mode 100644
index 000000000..aa40154cd
--- /dev/null
+++ b/images/nfs-server/entrypoint.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+set -euxo pipefail
+
+# The NFS grace period is set to 90 seconds and it stalls the clients
+# trying to access the share right after the server start. This may affect
+# the tests and lead to timeouts so disable the setting.
+sed -i"" \
+ -e "s#Grace_Period = 90#Graceless = true#g" \
+ /opt/start_nfs.sh
+
+exec /opt/start_nfs.sh
--
2.41.0

View File

@ -1,7 +1,7 @@
<services>
<service name="tar_scm" mode="disabled">
<service name="tar_scm" mode="manual">
<param name="filename">kubevirt</param>
<param name="revision">v1.0.0</param>
<param name="revision">v1.0.1</param>
<param name="scm">git</param>
<param name="submodules">disable</param>
<param name="url">https://github.com/kubevirt/kubevirt</param>
@ -9,10 +9,10 @@
<param name="versionrewrite-pattern">[v]?([^\+]+)(.*)</param>
<param name="versionrewrite-replacement">\1</param>
</service>
<service name="recompress" mode="disabled">
<service name="recompress" mode="manual">
<param name="file">*.tar</param>
<param name="compression">gz</param>
</service>
<service name="set_version" mode="disabled"/>
<service name="download_files" mode="disabled"/>
<service name="set_version" mode="manual"/>
<service name="download_files" mode="manual"/>
</services>

View File

@ -22,7 +22,7 @@ spec:
serviceAccountName: kubevirt-testing
containers:
- name: target
image: quay.io/kubevirt/disks-images-provider:v1.0.0
image: quay.io/kubevirt/disks-images-provider:v1.0.1
imagePullPolicy: Always
lifecycle:
preStop:

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:af2e9ec3e7422b7a3fa769080d66db3faba49aaf25b41a04e451fc5171018952
size 15289113

3
kubevirt-1.0.1.tar.gz Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a150bb410477b6e3f4f46bc0adba5f1c173eb1131da70c9715728c457f2b0304
size 15854619

View File

@ -1,3 +1,22 @@
-------------------------------------------------------------------
Wed Oct 18 07:41:49 UTC 2023 - Vasily Ulyanov <vasily.ulyanov@suse.com>
- Update to version 1.0.1
Release notes https://github.com/kubevirt/kubevirt/releases/tag/v1.0.1
- Drop upstreamed patches
0002-ksm-Access-sysfs-from-the-host-filesystem.patch
0004-tests-leave-some-space-for-metadata-on-the-backend-P.patch
0006-isolation-close-file-when-exits.patch
0007-Fix-volume-detach-on-hotplug-attachment-pod-delete.patch
0008-fix-ticker-leak.patch
0009-tests-Run-helper-pod-as-qemu-107-user.patch
0010-Fix-PR-leftover-mount-and-perms.patch
0011-Fix-Aggregated-Discovery.patch
0012-Wait-for-new-hotplug-attachment-pod-to-be-ready.patch
0013-Adapt-e2e-tests-to-CDI-1.57.0.patch
0014-Export-create-populator-compatible-datavolumes-from-.patch
0015-tests-Delete-VMI-prior-to-NFS-server-pod.patch
-------------------------------------------------------------------
Mon Sep 18 12:32:00 UTC 2023 - Vasily Ulyanov <vasily.ulyanov@suse.com>

View File

@ -17,7 +17,7 @@
Name: kubevirt
Version: 1.0.0
Version: 1.0.1
Release: 0
Summary: Container native virtualization
License: Apache-2.0
@ -29,20 +29,8 @@ Source2: kubevirt_containers_meta.service
Source3: %{url}/releases/download/v%{version}/disks-images-provider.yaml
Source100: %{name}-rpmlintrc
Patch1: 0001-Fix-qemu-system-lookup.patch
Patch2: 0002-ksm-Access-sysfs-from-the-host-filesystem.patch
Patch3: 0003-Virtiofs-Remove-duplicated-functional-tests.patch
Patch4: 0004-tests-leave-some-space-for-metadata-on-the-backend-P.patch
Patch5: 0005-Support-multiple-watchdogs-in-the-domain-schema.patch
Patch6: 0006-isolation-close-file-when-exits.patch
Patch7: 0007-Fix-volume-detach-on-hotplug-attachment-pod-delete.patch
Patch8: 0008-fix-ticker-leak.patch
Patch9: 0009-tests-Run-helper-pod-as-qemu-107-user.patch
Patch10: 0010-Fix-PR-leftover-mount-and-perms.patch
Patch11: 0011-Fix-Aggregated-Discovery.patch
Patch12: 0012-Wait-for-new-hotplug-attachment-pod-to-be-ready.patch
Patch13: 0013-Adapt-e2e-tests-to-CDI-1.57.0.patch
Patch14: 0014-Export-create-populator-compatible-datavolumes-from-.patch
Patch15: 0015-tests-Delete-VMI-prior-to-NFS-server-pod.patch
BuildRequires: glibc-devel-static
BuildRequires: golang-packaging
BuildRequires: pkgconfig