8547e28bd5
23233-hvm-cr-access.patch 23234-svm-decode-assist-base.patch 23235-svm-decode-assist-crs.patch 23236-svm-decode-assist-invlpg.patch 23238-svm-decode-assist-insn-fetch.patch 23303-cpufreq-misc.patch 23304-amd-oprofile-strings.patch 23305-amd-fam15-xenoprof.patch 23306-amd-fam15-vpmu.patch 23334-amd-fam12+14-vpmu.patch 23338-vtd-force-intremap.patch - fate#310957 - Update to Xen 4.1.1-rc1 c/s 23064 - xentrace: dynamic tracebuffer allocation xen-unstable.xentrace.dynamic_tbuf.patch xen-unstable.xentrace.empty_t_info_pages.patch xen-unstable.xentrace.verbose.patch xen-unstable.xentrace.no_gdprintk.patch xen-unstable.xentrace.comments.patch xen-unstable.xentrace.printk_prefix.patch xen-unstable.xentrace.remove_debug_printk.patch xen-unstable.xentrace.t_info_pages-formula.patch xen-unstable.xentrace.register_cpu_notifier-boot_time.patch xen-unstable.xentrace.t_info_page-overflow.patch xen-unstable.xentrace.t_info_first_offset.patch xen-unstable.xentrace.data_size__read_mostly.patch xen-unstable.xentrace.__insert_record-dst-type.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=124
57 lines
2.3 KiB
Diff
57 lines
2.3 KiB
Diff
Improve check_device_status to handle HA cases
|
|
|
|
In HA environment, sometimes xenstore status has changed but ev.wait() cannot
|
|
get the signal, it will wait until timeout, thus incorrect device status is
|
|
returned. To fix this problem, we do not depend on ev.wait() result, but read
|
|
xenstore directly to get correct device status.
|
|
|
|
Index: xen-4.1.1-testing/tools/python/xen/xend/server/DevController.py
|
|
===================================================================
|
|
--- xen-4.1.1-testing.orig/tools/python/xen/xend/server/DevController.py
|
|
+++ xen-4.1.1-testing/tools/python/xen/xend/server/DevController.py
|
|
@@ -149,7 +149,10 @@ class DevController:
|
|
(status, err) = self.waitForBackend(devid)
|
|
|
|
if status == Timeout:
|
|
- self.destroyDevice(devid, False)
|
|
+ #Clean timeout backend resource
|
|
+ dev = self.convertToDeviceNumber(devid)
|
|
+ self.writeBackend(dev, HOTPLUG_STATUS_NODE, HOTPLUG_STATUS_ERROR)
|
|
+ self.destroyDevice(devid, True)
|
|
raise VmError("Device %s (%s) could not be connected. "
|
|
"Hotplug scripts not working." %
|
|
(devid, self.deviceClass))
|
|
@@ -554,7 +557,17 @@ class DevController:
|
|
|
|
xswatch(statusPath, hotplugStatusCallback, ev, result)
|
|
|
|
- ev.wait(DEVICE_CREATE_TIMEOUT)
|
|
+ for i in range(1, 50):
|
|
+ ev.wait(DEVICE_CREATE_TIMEOUT/50)
|
|
+ status = xstransact.Read(statusPath)
|
|
+ if status is not None:
|
|
+ if status == HOTPLUG_STATUS_ERROR:
|
|
+ result['status'] = Error
|
|
+ elif status == HOTPLUG_STATUS_BUSY:
|
|
+ result['status'] = Busy
|
|
+ else:
|
|
+ result['status'] = Connected
|
|
+ break
|
|
|
|
err = xstransact.Read(backpath, HOTPLUG_ERROR_NODE)
|
|
|
|
@@ -571,7 +584,12 @@ class DevController:
|
|
|
|
xswatch(statusPath, deviceDestroyCallback, ev, result)
|
|
|
|
- ev.wait(DEVICE_DESTROY_TIMEOUT)
|
|
+ for i in range(1, 50):
|
|
+ ev.wait(DEVICE_DESTROY_TIMEOUT/50)
|
|
+ status = xstransact.Read(statusPath)
|
|
+ if status is None:
|
|
+ result['status'] = Disconnected
|
|
+ break
|
|
|
|
return result['status']
|
|
|