Accepting request 242277 from Virtualization

Bug fixes for os13.2

OBS-URL: https://build.opensuse.org/request/show/242277
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/virt-manager?expand=0&rev=106
This commit is contained in:
Stephan Kulow 2014-07-26 07:41:56 +00:00 committed by Git OBS Bridge
commit 406c2cd70c
7 changed files with 147 additions and 13 deletions

View File

@ -1,3 +1,24 @@
-------------------------------------------------------------------
Tue Jul 22 16:36:11 MDT 2014 - carnold@suse.com
- bnc#888251 - sles 12 Xen PV guest fails to install using network
NFS install method
virtinst-nfs-install-sanitize.patch
-------------------------------------------------------------------
Tue Jul 22 09:17:12 MDT 2014 - carnold@suse.com
- bnc#887868 - libvirt: shouldn't detect pool's status while
connecting to hypervisor
virtinst-refresh_before_fetch_pool.patch (Chun Yan Liu)
-------------------------------------------------------------------
Thu Jul 21 09:38:19 MDT 2014 - carnold@suse.com
- bnc#888173 - KVM: Unable to install: no console output from
virt-install
virtman-add-s390x-arch-support.patch
-------------------------------------------------------------------
Thu Jul 17 15:45:19 MDT 2014 - carnold@suse.com

View File

@ -15,7 +15,6 @@
# Please submit bugfixes or comments via http://bugs.opensuse.org/
#
%define with_guestfs 0
%define askpass_package "openssh-askpass"
%define qemu_user "qemu"
@ -28,7 +27,7 @@
Name: virt-manager
Version: 1.0.1
Release: 0
Release: 10.5
Summary: Virtual Machine Manager
License: GPL-2.0+
Group: System/Monitoring
@ -119,6 +118,8 @@ Patch160: virtinst-detect-windows-media.patch
Patch161: virtinst-xenbus-disk-index-fix.patch
Patch162: virtinst-set-cache-mode-unsafe-for-install.patch
Patch163: virtinst-add-default-rng-device.patch
Patch164: virtinst-refresh_before_fetch_pool.patch
Patch165: virtinst-nfs-install-sanitize.patch
BuildArch: noarch
BuildRoot: %{_tmppath}/%{name}-%{version}-build
@ -290,6 +291,8 @@ machine).
%patch161 -p1
%patch162 -p1
%patch163 -p1
%patch164 -p1
%patch165 -p1
%build
%if %{qemu_user}

View File

@ -1,8 +1,10 @@
bnc#885308
--- virt-manager-1.0.1/virtinst/guest.py.orig 2014-07-17 15:40:21.724772127 -0600
+++ virt-manager-1.0.1/virtinst/guest.py 2014-07-17 15:40:27.921811664 -0600
@@ -625,6 +625,15 @@ class Guest(XMLBuilder):
Index: virt-manager-1.0.1/virtinst/guest.py
===================================================================
--- virt-manager-1.0.1.orig/virtinst/guest.py
+++ virt-manager-1.0.1/virtinst/guest.py
@@ -627,6 +627,15 @@ class Guest(XMLBuilder):
return
self.add_device(virtinst.VirtualGraphics(self.conn))
@ -18,7 +20,7 @@ bnc#885308
def add_default_devices(self):
self.add_default_graphics()
self.add_default_video_device()
@@ -632,6 +641,7 @@ class Guest(XMLBuilder):
@@ -634,6 +643,7 @@ class Guest(XMLBuilder):
self.add_default_console_device()
self.add_default_usb_controller()
self.add_default_channels()

View File

@ -0,0 +1,64 @@
bnc#888251
Index: virt-manager-1.0.1/virtinst/util.py
===================================================================
--- virt-manager-1.0.1.orig/virtinst/util.py
+++ virt-manager-1.0.1/virtinst/util.py
@@ -626,3 +626,22 @@ def getInstallRepos(enabled_sources_only
zypper_output.insert(0, dom0_inst_source)
return (index_dom0, zypper_output)
+def sanitize_url(url):
+ """
+ Do nothing for http or ftp, but make sure nfs is in the expected format
+ """
+ if url.startswith("nfs://"):
+ # Convert RFC compliant NFS nfs://server/path/to/distro
+ # to what mount/anaconda expect nfs:server:/path/to/distro
+ # and carry the latter form around internally
+ url = "nfs:" + url[6:]
+
+ # If we need to add the : after the server
+ index = url.find("/", 4)
+ if index == -1:
+ raise ValueError(_("Invalid NFS format: No path specified."))
+ if url[index - 1] != ":":
+ url = url[:index] + ":" + url[index:]
+
+ return url
+
Index: virt-manager-1.0.1/virtinst/distroinstaller.py
===================================================================
--- virt-manager-1.0.1.orig/virtinst/distroinstaller.py
+++ virt-manager-1.0.1/virtinst/distroinstaller.py
@@ -50,6 +50,8 @@ def _sanitize_url(url):
"""
Do nothing for http or ftp, but make sure nfs is in the expected format
"""
+ # This sanitize will be done later
+ return url
if url.startswith("nfs://"):
# Convert RFC compliant NFS nfs://server/path/to/distro
# to what mount/anaconda expect nfs:server:/path/to/distro
Index: virt-manager-1.0.1/virtinst/urlfetcher.py
===================================================================
--- virt-manager-1.0.1.orig/virtinst/urlfetcher.py
+++ virt-manager-1.0.1/virtinst/urlfetcher.py
@@ -33,6 +33,7 @@ import urlparse
import urlgrabber.grabber as grabber
from virtinst import osdict
+from virtinst import util
#########################################################################
@@ -210,7 +211,8 @@ class _MountedImageFetcher(_LocalImageFe
logging.debug("Preparing mount at " + self.srcdir)
if self.location.startswith("nfs:"):
- cmd = [mountcmd, "-o", "ro", self.location[4:], self.srcdir]
+ url = util.sanitize_url(self.location)
+ cmd = [mountcmd, "-o", "ro", url[4:], self.srcdir]
else:
if stat.S_ISBLK(os.stat(self.location)[stat.ST_MODE]):
mountopt = "ro"

View File

@ -0,0 +1,38 @@
Refresh pools status before fetch_pools.
Currently, when connecting to hypervisor, if there are pools active
but in fact target path already deleted (or for other reasons the
pool is not working), libvirtd not refresh status yet, fetch_pools
will fail, that will cause "connecting to hypervisor" process
reporting error and exit. The whole connection work failed.
With the patch, always refresh pool status before fetch pools. Let
the libvirtd pool status reflect the reality, avoid the non-synced
status affects the hypervisor connection.
Signed-off-by: Chunyan Liu <cyliu@suse.com>
Index: virt-manager-1.0.1/virtinst/pollhelpers.py
===================================================================
--- virt-manager-1.0.1.orig/virtinst/pollhelpers.py
+++ virt-manager-1.0.1/virtinst/pollhelpers.py
@@ -138,6 +138,19 @@ def fetch_pools(backend, origmap, build_
if backend.check_support(
backend.SUPPORT_CONN_LISTALLSTORAGEPOOLS):
+
+ # Refresh pools before poll_helper. For those
+ # 'active' but target path not exist (or other reasons
+ # causing the pool not working), but libvirtd not
+ # refresh the status, this will make it refreshed
+ # and mark that pool as 'inactive'.
+ objs = backend.listAllStoragePools()
+ for obj in objs:
+ try:
+ obj.refresh(0)
+ except Exception, e:
+ pass
+
return _new_poll_helper(origmap, name,
backend.listAllStoragePools,
"UUIDString", build_func)

View File

@ -10,7 +10,7 @@ Index: virt-manager-1.0.1/virtinst/guest.py
import logging
import urlgrabber.progress as progress
@@ -727,14 +728,22 @@ class Guest(XMLBuilder):
@@ -729,14 +730,22 @@ class Guest(XMLBuilder):
self.emulator = None
return

View File

@ -42,23 +42,29 @@ Index: virt-manager-1.0.1/virtinst/guest.py
self.skip_default_sound = False
self.skip_default_usbredir = False
- self.skip_default_graphics = False
+ if self.os.arch == "s390x":
+ if self.os.is_s390x():
+ self.skip_default_graphics = True
+ else:
+ self.skip_default_graphics = False
self.x86_cpu_default = self.cpu.SPECIAL_MODE_HOST_MODEL_ONLY
self._os_variant = None
@@ -553,7 +556,7 @@ class Guest(XMLBuilder):
@@ -549,11 +552,13 @@ class Guest(XMLBuilder):
self.conn.check_support(
self.conn.SUPPORT_CONN_VIRTIO_CONSOLE)):
dev.target_type = "virtio"
+ elif self.os.is_s390x():
+ dev.target_type = "sclp"
self.add_device(dev)
def add_default_video_device(self):
- if self.os.is_container():
+ if self.os.is_container() or self.os.arch == "s390x":
+ if self.os.is_container() or self.os.is_s390x():
return
if self.get_devices("video"):
return
@@ -598,7 +601,7 @@ class Guest(XMLBuilder):
@@ -598,7 +603,7 @@ class Guest(XMLBuilder):
return
if self.os.is_container():
return
@ -67,12 +73,12 @@ Index: virt-manager-1.0.1/virtinst/guest.py
return
self.add_device(virtinst.VirtualGraphics(self.conn))
@@ -804,7 +807,7 @@ class Guest(XMLBuilder):
@@ -804,7 +809,7 @@ class Guest(XMLBuilder):
if not self._lookup_osdict_key(key, False):
return False
- if self.os.is_x86():
+ if self.os.is_x86() or self.os.arch == "s390x":
+ if self.os.is_x86() or self.os.is_s390x():
return True
if (self.os.is_arm_vexpress() and
self.os.dtb and