SHA256
1
0
forked from pool/xen

- fate#311000 - Extend Xen domain lock framework to support

more alternative
  xend-domain-lock-sfex.patch

- fate#311371 - Enhance yast to configure live migration for
  Xen and KVM
  add firewall service file for xen-tools

- Add man page for xen-list utility
  updated xen-utils-0.1.tar.bz2

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=126
This commit is contained in:
Charles Arnold 2011-06-14 19:01:54 +00:00 committed by Git OBS Bridge
parent cee8cb13bb
commit c27ed3a4ef
16 changed files with 409 additions and 24 deletions

View File

@ -167,9 +167,9 @@ Index: xen-4.1.1-testing/xen/drivers/passthrough/vtd/iommu.c
===================================================================
--- xen-4.1.1-testing.orig/xen/drivers/passthrough/vtd/iommu.c
+++ xen-4.1.1-testing/xen/drivers/passthrough/vtd/iommu.c
@@ -1997,7 +1997,7 @@ static int init_vtd_hw(void)
if ( enable_intremap(iommu, 0) != 0 )
@@ -1998,7 +1998,7 @@ static int init_vtd_hw(void)
{
iommu_intremap = 0;
dprintk(XENLOG_WARNING VTDPREFIX,
- "Failed to enable Interrupt Remapping!\n");
+ "Interrupt Remapping not enabled\n");

View File

@ -109,7 +109,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/vmx/vmx.c
===================================================================
--- xen-4.1.1-testing.orig/xen/arch/x86/hvm/vmx/vmx.c
+++ xen-4.1.1-testing/xen/arch/x86/hvm/vmx/vmx.c
@@ -1546,182 +1546,42 @@ static void vmx_invlpg_intercept(unsigne
@@ -1545,182 +1545,42 @@ static void vmx_invlpg_intercept(unsigne
vpid_sync_vcpu_gva(curr, vaddr);
}
@ -315,7 +315,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/vmx/vmx.c
}
static const struct lbr_info {
@@ -2526,7 +2386,7 @@ asmlinkage void vmx_vmexit_handler(struc
@@ -2525,7 +2385,7 @@ asmlinkage void vmx_vmexit_handler(struc
case EXIT_REASON_CR_ACCESS:
{
exit_qualification = __vmread(EXIT_QUALIFICATION);

View File

@ -19,7 +19,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- xen-4.1.1-testing.orig/xen/arch/x86/hvm/svm/svm.c
+++ xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
@@ -1040,6 +1040,22 @@ static void svm_vmexit_do_cpuid(struct c
@@ -1039,6 +1039,22 @@ static void svm_vmexit_do_cpuid(struct c
__update_guest_eip(regs, inst_len);
}
@ -42,7 +42,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
{
HVMTRACE_0D(DR_WRITE);
@@ -1621,11 +1637,19 @@ asmlinkage void svm_vmexit_handler(struc
@@ -1620,11 +1636,19 @@ asmlinkage void svm_vmexit_handler(struc
int dir = (vmcb->exitinfo1 & 1) ? IOREQ_READ : IOREQ_WRITE;
if ( handle_pio(port, bytes, dir) )
__update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip);

View File

@ -58,7 +58,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- xen-4.1.1-testing.orig/xen/arch/x86/hvm/svm/svm.c
+++ xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
@@ -1651,11 +1651,22 @@ asmlinkage void svm_vmexit_handler(struc
@@ -1650,11 +1650,22 @@ asmlinkage void svm_vmexit_handler(struc
break;
case VMEXIT_INVLPG:

View File

@ -58,7 +58,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
{
char *p;
@@ -1449,7 +1464,8 @@ static struct hvm_function_table __read_
@@ -1448,7 +1463,8 @@ static struct hvm_function_table __read_
.msr_read_intercept = svm_msr_read_intercept,
.msr_write_intercept = svm_msr_write_intercept,
.invlpg_intercept = svm_invlpg_intercept,
@ -68,7 +68,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
};
asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
@@ -1555,7 +1571,12 @@ asmlinkage void svm_vmexit_handler(struc
@@ -1554,7 +1570,12 @@ asmlinkage void svm_vmexit_handler(struc
(unsigned long)regs->ecx, (unsigned long)regs->edx,
(unsigned long)regs->esi, (unsigned long)regs->edi);
@ -82,7 +82,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
{
if ( trace_will_trace_event(TRC_SHADOW) )
break;
@@ -1721,7 +1742,10 @@ asmlinkage void svm_vmexit_handler(struc
@@ -1720,7 +1741,10 @@ asmlinkage void svm_vmexit_handler(struc
case VMEXIT_NPF:
perfc_incra(svmexits, VMEXIT_NPF_PERFC);
regs->error_code = vmcb->exitinfo1;

View File

@ -17,7 +17,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- xen-4.1.1-testing.orig/xen/arch/x86/hvm/svm/svm.c
+++ xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
@@ -1143,6 +1143,18 @@ static int svm_msr_read_intercept(unsign
@@ -1142,6 +1142,18 @@ static int svm_msr_read_intercept(unsign
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
@ -36,7 +36,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/svm/svm.c
vpmu_do_rdmsr(msr, msr_content);
break;
@@ -1238,6 +1250,18 @@ static int svm_msr_write_intercept(unsig
@@ -1237,6 +1249,18 @@ static int svm_msr_write_intercept(unsig
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:

View File

@ -26,8 +26,8 @@ Index: xen-4.1.1-testing/xen/drivers/passthrough/vtd/iommu.c
break;
}
}
@@ -1998,6 +2000,9 @@ static int init_vtd_hw(void)
{
@@ -1999,6 +2001,9 @@ static int init_vtd_hw(void)
iommu_intremap = 0;
dprintk(XENLOG_WARNING VTDPREFIX,
"Interrupt Remapping not enabled\n");
+

View File

@ -15,7 +15,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/traps.c
===================================================================
--- xen-4.1.1-testing.orig/xen/arch/x86/traps.c
+++ xen-4.1.1-testing/xen/arch/x86/traps.c
@@ -1377,6 +1377,7 @@ asmlinkage void __init do_early_page_fau
@@ -1386,6 +1386,7 @@ asmlinkage void __init do_early_page_fau
unsigned long *stk = (unsigned long *)regs;
printk("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n",
regs->cs, _p(regs->eip), _p(cr2), regs->error_code);

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fe402be2467f4d333366ed3cccb584f55bea31d72f2c8ed9bc90d1ae32d77dfc
size 10774044
oid sha256:efea2b4adcb2a238dc5a26791ef2829d4bd56bd44d8a2b860afcbc04fe260e5e
size 10773743

View File

@ -9,9 +9,9 @@ Index: xen-4.1.1-testing/Config.mk
-CONFIG_QEMU ?= $(QEMU_REMOTE)
+CONFIG_QEMU ?= ioemu-qemu-xen
QEMU_TAG := xen-4.1.1-rc1
QEMU_TAG := xen-4.1.1-rc2
#QEMU_TAG ?= e073e69457b4d99b6da0b6536296e3498f7f6599
@@ -204,7 +204,7 @@ QEMU_TAG := xen-4.1.1-rc1
@@ -204,7 +204,7 @@ QEMU_TAG := xen-4.1.1-rc2
# Optional components
XENSTAT_XENTOP ?= y
VTPM_TOOLS ?= n

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fd14ba8dac0acc960105eed40d9b4044f2ec551a976a5709d363fb5ac0027353
size 6812
oid sha256:3a4144a91d26495e1307de8e03c3a077f2f23a38e794bd601f92a20de759bdb6
size 8368

View File

@ -473,7 +473,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/hvm/hvm.c
struct irq_desc *desc;
struct domain *d = v->domain;
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
@@ -3582,7 +3583,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
@@ -3583,7 +3584,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
{
p2m_type_t t;
p2m_type_t nt;
@ -731,7 +731,7 @@ Index: xen-4.1.1-testing/xen/arch/x86/traps.c
===================================================================
--- xen-4.1.1-testing.orig/xen/arch/x86/traps.c
+++ xen-4.1.1-testing/xen/arch/x86/traps.c
@@ -1742,7 +1742,11 @@ static int emulate_privileged_op(struct
@@ -1751,7 +1751,11 @@ static int emulate_privileged_op(struct
struct vcpu *v = current;
unsigned long *reg, eip = regs->eip;
u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0, lock = 0, rex = 0;

View File

@ -1,3 +1,23 @@
-------------------------------------------------------------------
Tue Jun 14 11:26:30 CST 2011 - lidongyang@novell.com
- fate#311000 - Extend Xen domain lock framework to support
more alternative
xend-domain-lock-sfex.patch
-------------------------------------------------------------------
Mon Jun 13 14:50:32 CST 2011 - lidongyang@novell.com
- fate#311371 - Enhance yast to configure live migration for
Xen and KVM
add firewall service file for xen-tools
-------------------------------------------------------------------
Fri Jun 10 09:14:07 MDT 2011 - jfehlig@novell.com
- Add man page for xen-list utility
updated xen-utils-0.1.tar.bz2
-------------------------------------------------------------------
Thu May 26 06:36:49 MDT 2011 - carnold@novell.com

View File

@ -22,10 +22,11 @@ Name: xen
ExclusiveArch: %ix86 x86_64
%define xvers 4.1
%define xvermaj 4
%define changeset 23064
%define changeset 23076
%define xen_build_dir xen-4.1.1-testing
%define with_kmp 1
%define with_stubdom 0
%define _fwdefdir /etc/sysconfig/SuSEfirewall2.d/services
%if %suse_version > 1140
%define with_xend 0
%else
@ -116,6 +117,8 @@ Source23: etc_pam.d_xen-api
Source24: xenapiusers
# sysconfig hook script for Xen
Source25: xen-updown.sh
# Firewall service file for xend relocation server
Source26: xend-relocation-server.fw
Source99: baselibs.conf
# http://xenbits.xensource.com/ext/xenalyze
Source20000: xenalyze.hg.tar.bz2
@ -232,6 +235,7 @@ Patch446: xend-disable-internal-logrotate.patch
Patch447: xend-config-enable-dump-comment.patch
# Jim's domain lock patch
Patch450: xend-domain-lock.patch
Patch451: xend-domain-lock-sfex.patch
# Hypervisor and PV driver Patches
Patch500: 32on64-extra-mem.patch
Patch501: x86-ioapic-ack-default.patch
@ -714,6 +718,7 @@ tar xfj %{SOURCE2} -C $RPM_BUILD_DIR/%{xen_build_dir}/tools
%patch446 -p1
%patch447 -p1
%patch450 -p1
%patch451 -p1
%patch500 -p1
%patch501 -p1
%patch502 -p1
@ -933,6 +938,10 @@ rm -f $RPM_BUILD_ROOT/%{_bindir}/qemu-nbd-xen
# This is necessary because of the build of libconfig for libxl
#rm -rf $RPM_BUILD_ROOT/$RPM_BUILD_ROOT
rm -rf $RPM_BUILD_ROOT/%{_libdir}/debug
#install firewall definitions format is described here:
#/usr/share/SuSEfirewall2/services/TEMPLATE
mkdir -p $RPM_BUILD_ROOT/%{_fwdefdir}
install -m 644 %{S:26} $RPM_BUILD_ROOT/%{_fwdefdir}/xend-relocation-server
%files
%defattr(-,root,root)
@ -1088,6 +1097,7 @@ rm -rf $RPM_BUILD_ROOT/%{_libdir}/debug
/usr/lib/xen/boot/pv-grub-x86_64.gz
%endif
%endif
%config %{_fwdefdir}/xend-relocation-server
%files tools-domU
%defattr(-,root,root)

351
xend-domain-lock-sfex.patch Normal file
View File

@ -0,0 +1,351 @@
Index: xen-4.1.1-testing/tools/examples/xend-config.sxp
===================================================================
--- xen-4.1.1-testing.orig/tools/examples/xend-config.sxp
+++ xen-4.1.1-testing/tools/examples/xend-config.sxp
@@ -357,7 +357,7 @@
# path /<xend-domain-lock-path>/<vm-uuid>
# Return 0 on success, non-zero on error.
#
-# lock-util [-s] path"
+# lock-util [-s] -i <vm uuid> path"
# -s Lock status. If lock is acquired, print any contents
# on stdout and return 0. Return non-zero if lock is
# available.
@@ -383,6 +383,11 @@
#
#(xend-domain-lock-utility domain-lock)
+# Some locking mechanism provide cluster wide locking service like sfex.
+# And that requires a shared locking device.
+#(xend-domain-lock-utility domain-lock-sfex)
+#(xend-domain-lock-device "/dev/iwmvg/hbdevice")
+
# If we have a very big scsi device configuration, start of xend is slow,
# because xend scans all the device paths to build its internal PSCSI device
# list. If we need only a few devices for assigning to a guest, we can reduce
Index: xen-4.1.1-testing/tools/hotplug/Linux/Makefile
===================================================================
--- xen-4.1.1-testing.orig/tools/hotplug/Linux/Makefile
+++ xen-4.1.1-testing/tools/hotplug/Linux/Makefile
@@ -23,6 +23,7 @@ XEN_SCRIPTS += xen-hotplug-cleanup
XEN_SCRIPTS += external-device-migrate
XEN_SCRIPTS += vscsi
XEN_SCRIPTS += domain-lock vm-monitor
+XEN_SCRIPTS += domain-lock-sfex
XEN_SCRIPT_DATA = xen-script-common.sh locking.sh logging.sh
XEN_SCRIPT_DATA += xen-hotplug-common.sh xen-network-common.sh vif-common.sh
XEN_SCRIPT_DATA += block-common.sh vtpm-common.sh vtpm-hotplug-common.sh
Index: xen-4.1.1-testing/tools/hotplug/Linux/domain-lock
===================================================================
--- xen-4.1.1-testing.orig/tools/hotplug/Linux/domain-lock
+++ xen-4.1.1-testing/tools/hotplug/Linux/domain-lock
@@ -4,7 +4,7 @@ basedir=$(dirname "$0")
usage() {
echo "usage: domain-lock [-l|-u] -n <vm name> -i <vm uuid> -p <physical host> path"
- echo "usage: domain-lock [-s] path"
+ echo "usage: domain-lock [-s] -i <vm uuid> path"
echo ""
echo "-l lock"
echo "-u unlock"
Index: xen-4.1.1-testing/tools/hotplug/Linux/domain-lock-sfex
===================================================================
--- /dev/null
+++ xen-4.1.1-testing/tools/hotplug/Linux/domain-lock-sfex
@@ -0,0 +1,166 @@
+#!/bin/bash
+
+# pre-condition
+# 1. device is ready: logical volume activated if used
+# 2. device already initialized
+# 3. index is assigned correctly
+
+#error code:
+# 0: success
+# 1: error
+
+if [ `uname -m` = "x86_64" ]; then
+ SFEX_DAEMON=/usr/lib64/heartbeat/sfex_daemon
+else
+ SFEX_DAEMON=/usr/lib/heartbeat/sfex_daemon
+fi
+SFEX_INIT=/usr/sbin/sfex_init
+COLLISION_TIMEOUT=1
+LOCK_TIMEOUT=3
+MONITOR_INTERVAL=2
+LOCAL_LOCK_FILE=/var/lock/sfex
+
+usage() {
+ echo "usage: domain-lock-sfex [-l|-u|-s] -i <vm uuid> -x <sfex device>"
+ echo ""
+ echo "-l lock"
+ echo "-u unlock"
+ echo "-s status (default)"
+ echo "-i Virtual Machine Id or UUID"
+ echo "-x SFEX device which used for sfex lock"
+ exit 1
+}
+
+get_lock_host() {
+ local rscname=$1
+ local device=$2
+ r=`$SFEX_DAEMON -s -u $rscname $device`
+ echo $r
+}
+
+get_status() {
+ local rscname=$1
+ if /usr/bin/pgrep -f "$SFEX_DAEMON .* ${rscname} " > /dev/null 2>&1; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+acquire_lock() {
+ local rscname=$1
+ local device=$2
+ get_status $rscname
+ ## We assume xend will take care to avoid starting same VM twice on the same machine.
+ if [ $? -eq 0 ]; then
+ return 0
+ fi
+ $SFEX_DAEMON -c $COLLISION_TIMEOUT -t $LOCK_TIMEOUT -m $MONITOR_INTERVAL -u $rscname $device
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ return $rc
+ fi
+ sleep 4
+ get_status $rscname
+ if [ $? -eq 0 ]; then
+ return 0
+ fi
+ return 1
+}
+
+# release has to success
+release_lock(){
+ local rscname=$1
+
+ ## If the lock is already released
+ get_status $rscname
+ if [ $? -ne 0 ]; then
+ return 0
+ fi
+
+ pid=`/usr/bin/pgrep -f "$SFEX_DAEMON .* ${rscname} "`
+ /bin/kill $pid
+
+ count=0
+ while [ $count -lt 10 ]
+ do
+ get_status $rscname
+ if [ $? -eq 1 ]; then
+ return 0
+ fi
+ count=`expr $count + 1`
+ sleep 1
+ done
+
+ /bin/kill -9 $pid
+ while :
+ do
+ get_status $rscname
+ if [ $? -eq 1 ]; then
+ break;
+ fi
+ sleep 1
+ done
+
+ return 0
+}
+
+mode="status"
+
+while getopts ":lusn:i:p:x:" opt; do
+case $opt in
+l )
+mode="lock"
+;;
+u )
+mode="unlock"
+;;
+s )
+mode="status"
+;;
+n )
+vm_name=$OPTARG
+;;
+i )
+vm_uuid=$OPTARG
+;;
+p )
+vm_host=$OPTARG
+;;
+x )
+vm_sfex_device=$OPTARG
+;;
+\? )
+usage
+;;
+esac
+done
+
+shift $(($OPTIND - 1))
+[ -z $vm_uuid ] && usage
+[ -z $vm_sfex_device ] && usage
+
+case $mode in
+lock )
+ (
+ flock -x 200
+ acquire_lock $vm_uuid $vm_sfex_device
+ rc=$?
+ flock -u 200
+ exit $rc
+ ) 200>$LOCAL_LOCK_FILE-$vm_uuid
+;;
+unlock )
+ (
+ flock -x 200
+ release_lock $vm_uuid
+ rc=$?
+ flock -u 200
+ exit $rc
+ ) 200>$LOCAL_LOCK_FILE-$vm_uuid
+;;
+status )
+ get_lock_host $vm_uuid $vm_sfex_device
+;;
+esac
+
Index: xen-4.1.1-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.1.1-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.1.1-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -4499,8 +4499,14 @@ class XendDomainInfo:
# Return name of host contained in lock file.
def get_lock_host(self, path):
- fin = os.popen(xoptions.get_xend_domain_lock_utility() + \
- ' -s ' + path, 'r')
+ lock_cmd = '%s -s -i %s ' % \
+ (xoptions.get_xend_domain_lock_utility(), \
+ self.info['uuid'])
+ lock_dev = xoptions.get_xend_domain_lock_device()
+ if lock_dev:
+ lock_cmd += '-x %d ' % lock_dev
+ lock_cmd += path
+ fin = os.popen(lock_cmd, 'r')
hostname = "unknown"
try:
@@ -4522,6 +4528,16 @@ class XendDomainInfo:
path = xoptions.get_xend_domain_lock_path()
path = os.path.join(path, self.get_uuid())
+ lock_cmd = '%s -l -p %s -n %s -i %s ' % \
+ (xoptions.get_xend_domain_lock_utility(), \
+ XendNode.instance().get_name(), \
+ self.info['name_label'], \
+ self.info['uuid'])
+ lock_dev = xoptions.get_xend_domain_lock_device()
+ if lock_dev:
+ lock_cmd += '-x %d ' % lock_dev
+ lock_cmd += path
+
try:
if not os.path.exists(path):
mkdir.parents(path, stat.S_IRWXU)
@@ -4529,12 +4545,7 @@ class XendDomainInfo:
log.exception("%s could not be created." % path)
raise XendError("%s could not be created." % path)
- status = os.system('%s -l -p %s -n %s -i %s %s' % \
- (xoptions.get_xend_domain_lock_utility(), \
- XendNode.instance().get_name(), \
- self.info['name_label'], \
- self.info['uuid'], \
- path))
+ status = os.system(lock_cmd) >> 8
if status != 0:
log.debug("Failed to aqcuire lock: status = %d" % status)
raise XendError("The VM is locked and appears to be running on host %s." % self.get_lock_host(path))
@@ -4551,12 +4562,18 @@ class XendDomainInfo:
path = xoptions.get_xend_domain_lock_path()
path = os.path.join(path, self.get_uuid())
- status = os.system('%s -u -p %s -n %s -i %s %s' % \
- (xoptions.get_xend_domain_lock_utility(), \
- XendNode.instance().get_name(), \
- dom_name, \
- self.info['uuid'], \
- path))
+
+ lock_cmd = '%s -u -p %s -n %s -i %s ' % \
+ (xoptions.get_xend_domain_lock_utility(), \
+ XendNode.instance().get_name(), \
+ dom_name, \
+ self.info['uuid'])
+ lock_dev = xoptions.get_xend_domain_lock_device()
+ if lock_dev:
+ lock_cmd += '-x %d ' % lock_dev
+ lock_cmd += path
+
+ status = os.system(lock_cmd) >> 8
if status != 0:
log.exception("Failed to release lock: status = %s" % status)
try:
Index: xen-4.1.1-testing/tools/python/xen/xend/XendNode.py
===================================================================
--- xen-4.1.1-testing.orig/tools/python/xen/xend/XendNode.py
+++ xen-4.1.1-testing/tools/python/xen/xend/XendNode.py
@@ -162,6 +162,7 @@ class XendNode:
self._init_cpu_pools()
+ self._init_lock_devices()
def _init_networks(self):
# Initialise networks
@@ -382,6 +383,17 @@ class XendNode:
XendCPUPool.recreate_active_pools()
+ def _init_lock_devices(self):
+ if xendoptions().get_xend_domain_lock():
+ if xendoptions().get_xend_domain_lock_utility().endswith("domain-lock-sfex"):
+ lock_device = xendoptions().get_xend_domain_lock_device()
+ if not lock_device:
+ raise XendError("The block device for sfex is not properly configured")
+ status = os.system("lvchange -ay %s" % lock_device) >> 8
+ if status != 0:
+ raise XendError("The block device for sfex could not be initialized")
+
+
def add_network(self, interface):
# TODO
log.debug("add_network(): Not implemented.")
Index: xen-4.1.1-testing/tools/python/xen/xend/XendOptions.py
===================================================================
--- xen-4.1.1-testing.orig/tools/python/xen/xend/XendOptions.py
+++ xen-4.1.1-testing/tools/python/xen/xend/XendOptions.py
@@ -164,6 +164,9 @@ class XendOptions:
"""Default script to acquire/release domain lock"""
xend_domain_lock_utility = auxbin.scripts_dir() + "/domain-lock"
+ """Default block device for lock service"""
+ xend_domain_lock_device = ""
+
def __init__(self):
self.configure()
@@ -430,6 +433,8 @@ class XendOptions:
else:
return self.xend_domain_lock_utility
+ def get_xend_domain_lock_device(self):
+ return self.get_config_string('xend-domain-lock-device', self.xend_domain_lock_device)
def get_vnc_tls(self):
return self.get_config_string('vnc-tls', self.xend_vnc_tls)

View File

@ -0,0 +1,4 @@
## Name: Xend Relocation Server
## Description: Enables xend relocation service
TCP="8002 8003"