05bedd5488
named ext" xm-create-xflag.patch - Fix xen-utils to cope with xen-unstable c/s 21483 - xz-devel is available since 11.2, make it optional for SLES11SP1 - bnc#665610 - xm console > 1 to same VM messes up both consoles Upstream rejected due to portability concern, see http://lists.xensource.com/archives/html/xen-devel/2011-02/msg00942.html xenconsole-no-multiple-connections.patch - Enable support for kernel decompression for gzip, bzip2, and LZMA so that kernels compressed with any of these methods can be launched. - Update to Xen 4.1.0 c/s 22861 OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=102
285 lines
9.1 KiB
Diff
285 lines
9.1 KiB
Diff
Index: xen-4.1.0-testing/tools/examples/xend-config.sxp
|
|
===================================================================
|
|
--- xen-4.1.0-testing.orig/tools/examples/xend-config.sxp
|
|
+++ xen-4.1.0-testing/tools/examples/xend-config.sxp
|
|
@@ -321,6 +321,65 @@
|
|
# device assignment could really work properly even after we do this.
|
|
#(pci-passthrough-strict-check yes)
|
|
|
|
+# Domain Locking
|
|
+# In a multihost environment, domain locking prevents simultaneously
|
|
+# running a domain on more than one host.
|
|
+#
|
|
+# If enabled, xend will execute a external lock utility (defined below)
|
|
+# on each domain start and stop event. Disabled by default. Set to yes
|
|
+# to enable domain locking.
|
|
+#
|
|
+#(xend-domain-lock no)
|
|
+
|
|
+# Path where domain lock is stored if xend-domain-lock is enabled.
|
|
+# Note: This path must be accessible to all VM Servers participating
|
|
+# in domain locking, e.g. by specifying a shared mount point.
|
|
+# Lock is placed in /<xend-domain-lock-path>/<domain-uuid>.
|
|
+# Default is /var/lib/xen/images/vm_locks/
|
|
+#
|
|
+#(xend-domain-lock-path /var/lib/images/vm_locks)
|
|
+
|
|
+# External locking utility called by xend for acquiring/releasing
|
|
+# domain lock. By default /etc/xen/scripts/domain-lock will be used
|
|
+# if xend-domain-lock is set to yes. Set to path of custom locking
|
|
+# utility to override the default.
|
|
+#
|
|
+# Synopsis of lock-util:
|
|
+# lock-util [-l|-u] -n <vm name> -i <vm uuid> -p <physical host> path"
|
|
+# -l Acquire (create) lock
|
|
+# -u Remove lock
|
|
+# -n vm-name Name of domain
|
|
+# -i vm-id Id or UUID of domain
|
|
+# -p phy-host Name of physical host (dom0)
|
|
+# path /<xend-domain-lock-path>/<vm-uuid>
|
|
+# Return 0 on success, non-zero on error.
|
|
+#
|
|
+# lock-util [-s] path"
|
|
+# -s Lock status. If lock is acquired, print any contents
|
|
+# on stdout and return 0. Return non-zero if lock is
|
|
+# available.
|
|
+# path /<xend-domain-lock-path>/<vm-uuid>
|
|
+# If lock is acquired, print any contents on stdout and return 0.
|
|
+# Return non-zero if lock is available.
|
|
+#
|
|
+# Default lock-util behavior:
|
|
+# On domain start event, domain-lock will create and flock(1)
|
|
+# /<xend-domain-lock-path>/<vm-uuid>/lock. Every two seconds it
|
|
+# will write <vm-name>, <vm-id>, <vm-host>, and <tick> to the lock.
|
|
+# <tick> is running counter.
|
|
+# On domain stop event, domain-lock will unlock and remove
|
|
+# /<xend-domain-lock-path>/<vm-uuid>/lock.
|
|
+#
|
|
+# Note: If xend-domain-lock-path is a cluster-unaware file system,
|
|
+# administrator intervention may be required to remove stale
|
|
+# locks. Consider two hosts using NFS for xend-domain-lock-path
|
|
+# when HostA, running vm1, crashes. HostB could not acquire a
|
|
+# lock for vm1 since the NFS server holds an exclusive lock
|
|
+# acquired by HostA. The lock file must be manually removed
|
|
+# before starting vm1 on HostA.
|
|
+#
|
|
+#(xend-domain-lock-utility domain-lock)
|
|
+
|
|
# If we have a very big scsi device configuration, start of xend is slow,
|
|
# because xend scans all the device paths to build its internal PSCSI device
|
|
# list. If we need only a few devices for assigning to a guest, we can reduce
|
|
Index: xen-4.1.0-testing/tools/python/xen/xend/XendOptions.py
|
|
===================================================================
|
|
--- xen-4.1.0-testing.orig/tools/python/xen/xend/XendOptions.py
|
|
+++ xen-4.1.0-testing/tools/python/xen/xend/XendOptions.py
|
|
@@ -154,6 +154,17 @@ class XendOptions:
|
|
use loose check automatically if necessary."""
|
|
pci_dev_assign_strict_check_default = True
|
|
|
|
+ """Default for the flag indicating whether xend should create
|
|
+ a lock file for domains when they are started."""
|
|
+ xend_domain_lock = 'no'
|
|
+
|
|
+ """Default domain lock storage path."""
|
|
+ xend_domain_lock_path_default = '/var/lib/xen/images/vm_locks'
|
|
+
|
|
+ """Default script to acquire/release domain lock"""
|
|
+ xend_domain_lock_utility = auxbin.scripts_dir() + "/domain-lock"
|
|
+
|
|
+
|
|
def __init__(self):
|
|
self.configure()
|
|
|
|
@@ -401,6 +412,24 @@ class XendOptions:
|
|
else:
|
|
return None
|
|
|
|
+ def get_xend_domain_lock(self):
|
|
+ """Get the flag indicating whether xend should create a lock file
|
|
+ for domains when they are started."""
|
|
+ return self.get_config_bool("xend-domain-lock", self.xend_domain_lock)
|
|
+
|
|
+ def get_xend_domain_lock_path(self):
|
|
+ """ Get the path for domain lock storage
|
|
+ """
|
|
+ return self.get_config_string("xend-domain-lock-path", self.xend_domain_lock_path_default)
|
|
+
|
|
+ def get_xend_domain_lock_utility(self):
|
|
+ s = self.get_config_string('xend-domain-lock-utility')
|
|
+
|
|
+ if s:
|
|
+ return os.path.join(auxbin.scripts_dir(), s)
|
|
+ else:
|
|
+ return self.xend_domain_lock_utility
|
|
+
|
|
|
|
def get_vnc_tls(self):
|
|
return self.get_config_string('vnc-tls', self.xend_vnc_tls)
|
|
Index: xen-4.1.0-testing/tools/python/xen/xend/XendCheckpoint.py
|
|
===================================================================
|
|
--- xen-4.1.0-testing.orig/tools/python/xen/xend/XendCheckpoint.py
|
|
+++ xen-4.1.0-testing/tools/python/xen/xend/XendCheckpoint.py
|
|
@@ -133,6 +133,8 @@ def save(fd, dominfo, network, live, dst
|
|
dominfo.shutdown('suspend')
|
|
dominfo.waitForSuspend()
|
|
if line in ('suspend', 'suspended'):
|
|
+ if checkpoint == False:
|
|
+ dominfo.release_running_lock(domain_name)
|
|
dominfo.migrateDevices(network, dst, DEV_MIGRATE_STEP2,
|
|
domain_name)
|
|
log.info("Domain %d suspended.", dominfo.getDomid())
|
|
@@ -410,6 +412,7 @@ def restore(xd, fd, dominfo = None, paus
|
|
if not paused:
|
|
dominfo.unpause()
|
|
|
|
+ dominfo.acquire_running_lock()
|
|
return dominfo
|
|
except Exception, exn:
|
|
dominfo.destroy()
|
|
Index: xen-4.1.0-testing/tools/hotplug/Linux/Makefile
|
|
===================================================================
|
|
--- xen-4.1.0-testing.orig/tools/hotplug/Linux/Makefile
|
|
+++ xen-4.1.0-testing/tools/hotplug/Linux/Makefile
|
|
@@ -22,6 +22,7 @@ XEN_SCRIPTS += vtpm vtpm-delete
|
|
XEN_SCRIPTS += xen-hotplug-cleanup
|
|
XEN_SCRIPTS += external-device-migrate
|
|
XEN_SCRIPTS += vscsi
|
|
+XEN_SCRIPTS += domain-lock vm-monitor
|
|
XEN_SCRIPT_DATA = xen-script-common.sh locking.sh logging.sh
|
|
XEN_SCRIPT_DATA += xen-hotplug-common.sh xen-network-common.sh vif-common.sh
|
|
XEN_SCRIPT_DATA += block-common.sh vtpm-common.sh vtpm-hotplug-common.sh
|
|
Index: xen-4.1.0-testing/tools/hotplug/Linux/domain-lock
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ xen-4.1.0-testing/tools/hotplug/Linux/domain-lock
|
|
@@ -0,0 +1,83 @@
|
|
+#!/bin/bash
|
|
+
|
|
+basedir=$(dirname "$0")
|
|
+
|
|
+usage() {
|
|
+ echo "usage: domain-lock [-l|-u] -n <vm name> -i <vm uuid> -p <physical host> path"
|
|
+ echo "usage: domain-lock [-s] path"
|
|
+ echo ""
|
|
+ echo "-l lock"
|
|
+ echo "-u unlock"
|
|
+ echo "-s status (default)"
|
|
+ echo "-n Virtual Machine name"
|
|
+ echo "-i Virtual Machine Id or UUID"
|
|
+ echo "-p Virtual Machine Server (physical host) name"
|
|
+ echo "path A per-VM, unique location where external lock will be managed"
|
|
+ exit 1
|
|
+}
|
|
+
|
|
+remove_lock(){
|
|
+ local path=$1/lock
|
|
+ local name=$2
|
|
+
|
|
+ pid=`ps -efwww | grep vm-monitor | grep $name | awk '{print $2}'`
|
|
+ if [ -n "$pid" ]; then
|
|
+ kill $pid
|
|
+ rm -f $path
|
|
+ fi
|
|
+}
|
|
+
|
|
+get_status(){
|
|
+ local path=$1/lock
|
|
+ [ -f $path ] || exit 1
|
|
+
|
|
+ rc=`flock -xn $path /bin/true`
|
|
+ cat $path
|
|
+ exit $rc
|
|
+}
|
|
+
|
|
+mode="status"
|
|
+
|
|
+while getopts ":lusn:i:p:" opt; do
|
|
+ case $opt in
|
|
+ l )
|
|
+ mode="lock"
|
|
+ ;;
|
|
+ u )
|
|
+ mode="unlock"
|
|
+ ;;
|
|
+ s )
|
|
+ mode="status"
|
|
+ ;;
|
|
+ p )
|
|
+ vm_host=$OPTARG
|
|
+ ;;
|
|
+ n )
|
|
+ vm_name=$OPTARG
|
|
+ ;;
|
|
+ i )
|
|
+ vm_uuid=$OPTARG
|
|
+ ;;
|
|
+ \? )
|
|
+ usage
|
|
+ ;;
|
|
+ esac
|
|
+done
|
|
+
|
|
+shift $(($OPTIND - 1))
|
|
+vm_path=$1
|
|
+
|
|
+case $mode in
|
|
+ lock )
|
|
+ [ -z "$vm_path" ] || [ -z "$vm_name" ] || [ -z "$vm_uuid" ] || [ -z "$vm_host" ] && usage
|
|
+ $basedir/set-lock $vm_path $vm_name $vm_uuid $vm_host
|
|
+ ;;
|
|
+ unlock )
|
|
+ [ -z "$vm_path" ] || [ -z "$vm_name" ] || [ -z "$vm_uuid" ] || [ -z "$vm_host" ] && usage
|
|
+ remove_lock $vm_path $vm_name $vm_uuid $vm_host
|
|
+ ;;
|
|
+ status )
|
|
+ [ -z "$vm_path" ] && usage
|
|
+ get_status $vm_path
|
|
+ ;;
|
|
+esac
|
|
Index: xen-4.1.0-testing/tools/hotplug/Linux/vm-monitor
|
|
===================================================================
|
|
--- /dev/null
|
|
+++ xen-4.1.0-testing/tools/hotplug/Linux/vm-monitor
|
|
@@ -0,0 +1,41 @@
|
|
+#!/bin/bash
|
|
+
|
|
+basedir=$(dirname "$0")
|
|
+HA_TICK=2
|
|
+
|
|
+monitor() {
|
|
+ local path=$1
|
|
+ local name=$2
|
|
+ local uuid=$3
|
|
+ local host=$4
|
|
+ local count=0
|
|
+ path=$path/lock
|
|
+
|
|
+ while :
|
|
+ do
|
|
+ echo "name=$name uuid=$uuid host=$host count=$count" > $path
|
|
+ count=$(($count+1))
|
|
+ sleep $HA_TICK
|
|
+ done&
|
|
+}
|
|
+
|
|
+create_lock() {
|
|
+ local path=$1/lock
|
|
+ local rc=0
|
|
+
|
|
+ [ -f $path ] || touch $path
|
|
+ flock -x -w $HA_TICK $path $basedir/vm-monitor $*
|
|
+ rc=$?
|
|
+ if [ $rc -eq 1 ]; then
|
|
+ echo `cat $path`
|
|
+ exit 1
|
|
+ else
|
|
+ exit $rc
|
|
+ fi
|
|
+}
|
|
+
|
|
+if [ $0 = "$basedir/set-lock" ]; then
|
|
+ create_lock $*
|
|
+elif [ $0 = "$basedir/vm-monitor" ]; then
|
|
+ monitor $*
|
|
+fi
|