xen/xend-domain-lock-sfex.patch
Charles Arnold af6dcd473d - Enable blktapctrl when qemu-traditional is required to satisfy
build dependencies.  Remove binaries after build if xend is
  disabled

- update ifarch usage in xen.spec to cover also arm
- blktapctrl is used only by xend
- fix xend-tools-xend sub pkg handling 
- default to gcc47 for sles11sp3 builds
- remove all latex packages from BuildRequires
- aarch64-rename-PSR_MODE_ELxx-to-match-linux-headers.patch

- add arch dependent install suffix for /boot/xen files

- Set max_cpus==4 for non-x86_64 builds

- Update to Xen 4.4.0 RC3 c/s 28321

- Add flex and bison to BuildRequires, needed by previous patch

- fate#316071: add discard support for file backed storage (qdisk)
  libxl.add-option-for-discard-support-to-xl-disk-conf.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=296
2014-02-03 23:53:57 +00:00

352 lines
10 KiB
Diff

Index: xen-4.4.0-testing/tools/examples/xend-config.sxp
===================================================================
--- xen-4.4.0-testing.orig/tools/examples/xend-config.sxp
+++ xen-4.4.0-testing/tools/examples/xend-config.sxp
@@ -357,7 +357,7 @@
# path /<xend-domain-lock-path>/<vm-uuid>
# Return 0 on success, non-zero on error.
#
-# lock-util [-s] path"
+# lock-util [-s] -i <vm uuid> path"
# -s Lock status. If lock is acquired, print any contents
# on stdout and return 0. Return non-zero if lock is
# available.
@@ -383,6 +383,11 @@
#
#(xend-domain-lock-utility domain-lock)
+# Some locking mechanism provide cluster wide locking service like sfex.
+# And that requires a shared locking device.
+#(xend-domain-lock-utility domain-lock-sfex)
+#(xend-domain-lock-device "/dev/iwmvg/hbdevice")
+
# If we have a very big scsi device configuration, start of xend is slow,
# because xend scans all the device paths to build its internal PSCSI device
# list. If we need only a few devices for assigning to a guest, we can reduce
Index: xen-4.4.0-testing/tools/hotplug/Linux/Makefile
===================================================================
--- xen-4.4.0-testing.orig/tools/hotplug/Linux/Makefile
+++ xen-4.4.0-testing/tools/hotplug/Linux/Makefile
@@ -24,6 +24,7 @@ XEN_SCRIPTS += external-device-migrate
XEN_SCRIPTS += vscsi
XEN_SCRIPTS += block-iscsi
XEN_SCRIPTS += domain-lock vm-monitor
+XEN_SCRIPTS += domain-lock-sfex
XEN_SCRIPTS += $(XEN_SCRIPTS-y)
XEN_SCRIPT_DATA = xen-script-common.sh locking.sh logging.sh
Index: xen-4.4.0-testing/tools/hotplug/Linux/domain-lock
===================================================================
--- xen-4.4.0-testing.orig/tools/hotplug/Linux/domain-lock
+++ xen-4.4.0-testing/tools/hotplug/Linux/domain-lock
@@ -4,7 +4,7 @@ basedir=$(dirname "$0")
usage() {
echo "usage: domain-lock [-l|-u] -n <vm name> -i <vm uuid> -p <physical host> path"
- echo "usage: domain-lock [-s] path"
+ echo "usage: domain-lock [-s] -i <vm uuid> path"
echo ""
echo "-l lock"
echo "-u unlock"
Index: xen-4.4.0-testing/tools/hotplug/Linux/domain-lock-sfex
===================================================================
--- /dev/null
+++ xen-4.4.0-testing/tools/hotplug/Linux/domain-lock-sfex
@@ -0,0 +1,166 @@
+#!/bin/bash
+
+# pre-condition
+# 1. device is ready: logical volume activated if used
+# 2. device already initialized
+# 3. index is assigned correctly
+
+#error code:
+# 0: success
+# 1: error
+
+if [ `uname -m` = "x86_64" ]; then
+ SFEX_DAEMON=/usr/lib64/heartbeat/sfex_daemon
+else
+ SFEX_DAEMON=/usr/lib/heartbeat/sfex_daemon
+fi
+SFEX_INIT=/usr/sbin/sfex_init
+COLLISION_TIMEOUT=1
+LOCK_TIMEOUT=3
+MONITOR_INTERVAL=2
+LOCAL_LOCK_FILE=/var/lock/sfex
+
+usage() {
+ echo "usage: domain-lock-sfex [-l|-u|-s] -i <vm uuid> -x <sfex device>"
+ echo ""
+ echo "-l lock"
+ echo "-u unlock"
+ echo "-s status (default)"
+ echo "-i Virtual Machine Id or UUID"
+ echo "-x SFEX device which used for sfex lock"
+ exit 1
+}
+
+get_lock_host() {
+ local rscname=$1
+ local device=$2
+ r=`$SFEX_DAEMON -s -u $rscname $device`
+ echo $r
+}
+
+get_status() {
+ local rscname=$1
+ if /usr/bin/pgrep -f "$SFEX_DAEMON .* ${rscname} " > /dev/null 2>&1; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+acquire_lock() {
+ local rscname=$1
+ local device=$2
+ get_status $rscname
+ ## We assume xend will take care to avoid starting same VM twice on the same machine.
+ if [ $? -eq 0 ]; then
+ return 0
+ fi
+ $SFEX_DAEMON -c $COLLISION_TIMEOUT -t $LOCK_TIMEOUT -m $MONITOR_INTERVAL -u $rscname $device
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ return $rc
+ fi
+ sleep 4
+ get_status $rscname
+ if [ $? -eq 0 ]; then
+ return 0
+ fi
+ return 1
+}
+
+# release has to success
+release_lock(){
+ local rscname=$1
+
+ ## If the lock is already released
+ get_status $rscname
+ if [ $? -ne 0 ]; then
+ return 0
+ fi
+
+ pid=`/usr/bin/pgrep -f "$SFEX_DAEMON .* ${rscname} "`
+ /bin/kill $pid
+
+ count=0
+ while [ $count -lt 10 ]
+ do
+ get_status $rscname
+ if [ $? -eq 1 ]; then
+ return 0
+ fi
+ count=`expr $count + 1`
+ sleep 1
+ done
+
+ /bin/kill -9 $pid
+ while :
+ do
+ get_status $rscname
+ if [ $? -eq 1 ]; then
+ break;
+ fi
+ sleep 1
+ done
+
+ return 0
+}
+
+mode="status"
+
+while getopts ":lusn:i:p:x:" opt; do
+case $opt in
+l )
+mode="lock"
+;;
+u )
+mode="unlock"
+;;
+s )
+mode="status"
+;;
+n )
+vm_name=$OPTARG
+;;
+i )
+vm_uuid=$OPTARG
+;;
+p )
+vm_host=$OPTARG
+;;
+x )
+vm_sfex_device=$OPTARG
+;;
+\? )
+usage
+;;
+esac
+done
+
+shift $(($OPTIND - 1))
+[ -z $vm_uuid ] && usage
+[ -z $vm_sfex_device ] && usage
+
+case $mode in
+lock )
+ (
+ flock -x 200
+ acquire_lock $vm_uuid $vm_sfex_device
+ rc=$?
+ flock -u 200
+ exit $rc
+ ) 200>$LOCAL_LOCK_FILE-$vm_uuid
+;;
+unlock )
+ (
+ flock -x 200
+ release_lock $vm_uuid
+ rc=$?
+ flock -u 200
+ exit $rc
+ ) 200>$LOCAL_LOCK_FILE-$vm_uuid
+;;
+status )
+ get_lock_host $vm_uuid $vm_sfex_device
+;;
+esac
+
Index: xen-4.4.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.4.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.4.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -4559,8 +4559,14 @@ class XendDomainInfo:
# Return name of host contained in lock file.
def get_lock_host(self, path):
- fin = os.popen(xoptions.get_xend_domain_lock_utility() + \
- ' -s ' + path, 'r')
+ lock_cmd = '%s -s -i %s ' % \
+ (xoptions.get_xend_domain_lock_utility(), \
+ self.info['uuid'])
+ lock_dev = xoptions.get_xend_domain_lock_device()
+ if lock_dev:
+ lock_cmd += '-x %d ' % lock_dev
+ lock_cmd += path
+ fin = os.popen(lock_cmd, 'r')
hostname = "unknown"
try:
@@ -4582,6 +4588,16 @@ class XendDomainInfo:
path = xoptions.get_xend_domain_lock_path()
path = os.path.join(path, self.get_uuid())
+ lock_cmd = '%s -l -p %s -n %s -i %s ' % \
+ (xoptions.get_xend_domain_lock_utility(), \
+ XendNode.instance().get_name(), \
+ self.info['name_label'], \
+ self.info['uuid'])
+ lock_dev = xoptions.get_xend_domain_lock_device()
+ if lock_dev:
+ lock_cmd += '-x %d ' % lock_dev
+ lock_cmd += path
+
try:
if not os.path.exists(path):
mkdir.parents(path, stat.S_IRWXU)
@@ -4589,12 +4605,7 @@ class XendDomainInfo:
log.exception("%s could not be created." % path)
raise XendError("%s could not be created." % path)
- status = os.system('%s -l -p %s -n %s -i %s %s' % \
- (xoptions.get_xend_domain_lock_utility(), \
- XendNode.instance().get_name(), \
- self.info['name_label'], \
- self.info['uuid'], \
- path))
+ status = os.system(lock_cmd) >> 8
if status != 0:
log.debug("Failed to aqcuire lock: status = %d" % status)
raise XendError("The VM is locked and appears to be running on host %s." % self.get_lock_host(path))
@@ -4611,12 +4622,18 @@ class XendDomainInfo:
path = xoptions.get_xend_domain_lock_path()
path = os.path.join(path, self.get_uuid())
- status = os.system('%s -u -p %s -n %s -i %s %s' % \
- (xoptions.get_xend_domain_lock_utility(), \
- XendNode.instance().get_name(), \
- dom_name, \
- self.info['uuid'], \
- path))
+
+ lock_cmd = '%s -u -p %s -n %s -i %s ' % \
+ (xoptions.get_xend_domain_lock_utility(), \
+ XendNode.instance().get_name(), \
+ dom_name, \
+ self.info['uuid'])
+ lock_dev = xoptions.get_xend_domain_lock_device()
+ if lock_dev:
+ lock_cmd += '-x %d ' % lock_dev
+ lock_cmd += path
+
+ status = os.system(lock_cmd) >> 8
if status != 0:
log.exception("Failed to release lock: status = %s" % status)
try:
Index: xen-4.4.0-testing/tools/python/xen/xend/XendNode.py
===================================================================
--- xen-4.4.0-testing.orig/tools/python/xen/xend/XendNode.py
+++ xen-4.4.0-testing/tools/python/xen/xend/XendNode.py
@@ -162,6 +162,7 @@ class XendNode:
self._init_cpu_pools()
+ self._init_lock_devices()
def _init_networks(self):
# Initialise networks
@@ -382,6 +383,17 @@ class XendNode:
XendCPUPool.recreate_active_pools()
+ def _init_lock_devices(self):
+ if xendoptions().get_xend_domain_lock():
+ if xendoptions().get_xend_domain_lock_utility().endswith("domain-lock-sfex"):
+ lock_device = xendoptions().get_xend_domain_lock_device()
+ if not lock_device:
+ raise XendError("The block device for sfex is not properly configured")
+ status = os.system("lvchange -ay %s" % lock_device) >> 8
+ if status != 0:
+ raise XendError("The block device for sfex could not be initialized")
+
+
def add_network(self, interface):
# TODO
log.debug("add_network(): Not implemented.")
Index: xen-4.4.0-testing/tools/python/xen/xend/XendOptions.py
===================================================================
--- xen-4.4.0-testing.orig/tools/python/xen/xend/XendOptions.py
+++ xen-4.4.0-testing/tools/python/xen/xend/XendOptions.py
@@ -164,6 +164,9 @@ class XendOptions:
"""Default script to acquire/release domain lock"""
xend_domain_lock_utility = auxbin.scripts_dir() + "/domain-lock"
+ """Default block device for lock service"""
+ xend_domain_lock_device = ""
+
def __init__(self):
self.configure()
@@ -430,6 +433,8 @@ class XendOptions:
else:
return self.xend_domain_lock_utility
+ def get_xend_domain_lock_device(self):
+ return self.get_config_string('xend-domain-lock-device', self.xend_domain_lock_device)
def get_vnc_tls(self):
return self.get_config_string('vnc-tls', self.xend_vnc_tls)