- bnc#603583 - Fix migration of domUs using tapdisk devices

21317-xend-blkif-util-tap2.patch
  suse-disable-tap2-default.patch

- Match upstreams cpu pools switch from domctl to sysctl
- Upstream replacements for two of our custom patches (to ease
  applying further backports)
- Fixed dump-exec-state.patch (could previously hang the system, as
  could - with lower probability - the un-patched implementation)

- bnc#593536 - xen hypervisor takes very long to initialize Dom0 on
  128 CPUs and 256Gb
  21272-x86-dom0-alloc-performance.patch
  21266-vmx-disabled-check.patch
  21271-x86-cache-flush-global.patch

- bnc#558815 - using multiple npiv luns with same wwpn/wwnn broken
- bnc#601104 - Xen /etc/xen/scripts/block-npiv script fails when
  accessing multiple disks using NPIV
  block-npiv

- bnc#595124 - VT-d can not be enabled on 32PAE Xen on Nehalem-EX
  platform
  21234-x86-bad-srat-clear-pxm2node.patch
  bnc#585371 - kdump fails to load with xen: locate_hole failed
  21235-crashkernel-advanced.patch

- bnc#588918 - Attaching a U-disk to domain's failed by
  "xm usb-attach"
  init.xend

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=44
This commit is contained in:
Charles Arnold 2010-05-07 19:40:54 +00:00 committed by Git OBS Bridge
parent 15e24c8338
commit 4b4fa7f68d
24 changed files with 994 additions and 316 deletions

View File

@ -1,3 +1,21 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1271925757 -3600
# Node ID 5b72f9832cc21ebdbbd8569836578817353314d8
# Parent 11d20f17f82c1c6fc6c003c363835a964338b311
xend: don't drop device config on domain start failure
If domain creation in xend fails before devices are configured, e.g.
insufficient memory, device config is dropped from xend's managed
domain config.
Once xend is restarted, the domain's devices are lost.
This patch fixes a bug in XendConfig where only the device
controller was consulted for device configuration.
Signed-off-by: Jim Fehlig <jfehlig@novell.com>
Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py

View File

@ -1,3 +1,5 @@
References: bnc#600794
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1271954636 -3600

View File

@ -0,0 +1,34 @@
References: bnc#595124
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1272280290 -3600
# Node ID d01ea51fc929c57c7d5f427e1aafa0de7a436473
# Parent f766f2142a8515d1dc71783355dc4fc1813d95c4
x86: make pxm_to_node() return sane values when disabling NUMA internally
Otherwise, pass-through code may call memory allocation functions with
invalid node IDs, causing the allocations to fail.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -25,7 +25,7 @@ static struct acpi_table_slit *__read_mo
static nodemask_t nodes_parsed __initdata;
static nodemask_t nodes_found __initdata;
static struct node nodes[MAX_NUMNODES] __initdata;
-static u8 __read_mostly pxm2node[256] = { [0 ... 255] = 0xff };
+static u8 __read_mostly pxm2node[256] = { [0 ... 255] = NUMA_NO_NODE };
static int num_node_memblks;
@@ -112,6 +112,8 @@ static __init void bad_srat(void)
acpi_numa = -1;
for (i = 0; i < MAX_LOCAL_APIC; i++)
apicid_to_node[i] = NUMA_NO_NODE;
+ for (i = 0; i < ARRAY_SIZE(pxm2node); i++)
+ pxm2node[i] = NUMA_NO_NODE;
}
#ifdef CONFIG_X86_64

View File

@ -0,0 +1,175 @@
References: bnc#585371
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1272280332 -3600
# Node ID 3ce824963dc41827bdf1617b37a40e5e5e9dce29
# Parent d01ea51fc929c57c7d5f427e1aafa0de7a436473
Support Linux's advanced crashkernel= syntax
Quoting the original Linux patch's description:
"This patch adds a extended crashkernel syntax that makes the value of
reserved system RAM dependent on the system RAM itself:
crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset]
range=start-[end]
For example:
crashkernel=512M-2G:64M,2G-:128M
The motivation comes from distributors that configure their
crashkernel command line automatically with some configuration tool
(YaST, you know ;)). Of course that tool knows the value of System
RAM, but if the user removes RAM, then the system becomes unbootable
or at least unusable and error handling is very difficult."
For x86, other than Linux we pass the actual amount of RAM rather than
the highest page's address (to cope with sparse physical address
maps).
This still needs to be hooked up for ia64.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -642,6 +642,11 @@ void __init __start_xen(unsigned long mb
memcpy(&boot_e820, &e820, sizeof(e820));
/* Early kexec reservation (explicit static start address). */
+ nr_pages = 0;
+ for ( i = 0; i < e820.nr_map; i++ )
+ if ( e820.map[i].type == E820_RAM )
+ nr_pages += e820.map[i].size >> PAGE_SHIFT;
+ set_kexec_crash_area_size((u64)nr_pages << PAGE_SHIFT);
kexec_reserve_area(&boot_e820);
/*
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -47,15 +47,109 @@ static unsigned char vmcoreinfo_data[VMC
static size_t vmcoreinfo_size = 0;
xen_kexec_reserve_t kexec_crash_area;
+static struct {
+ u64 start, end;
+ unsigned long size;
+} ranges[16] __initdata;
+/*
+ * Parse command lines in the format
+ *
+ * crashkernel=<ramsize-range>:<size>[,...][@<offset>]
+ *
+ * with <ramsize-range> being of form
+ *
+ * <start>-[<end>]
+ *
+ * as well as the legacy ones in the format
+ *
+ * crashkernel=<size>[@<offset>]
+ */
static void __init parse_crashkernel(const char *str)
{
- kexec_crash_area.size = parse_size_and_unit(str, &str);
- if ( *str == '@' )
- kexec_crash_area.start = parse_size_and_unit(str+1, NULL);
+ const char *cur;
+
+ if ( strchr(str, ':' ) )
+ {
+ unsigned int idx = 0;
+
+ do {
+ if ( idx >= ARRAY_SIZE(ranges) )
+ {
+ printk(XENLOG_WARNING "crashkernel: too many ranges\n");
+ cur = NULL;
+ str = strchr(str, '@');
+ break;
+ }
+
+ ranges[idx].start = parse_size_and_unit(cur = str + !!idx, &str);
+ if ( cur == str )
+ break;
+
+ if ( *str != '-' )
+ {
+ printk(XENLOG_WARNING "crashkernel: '-' expected\n");
+ break;
+ }
+
+ if ( *++str != ':' )
+ {
+ ranges[idx].end = parse_size_and_unit(cur = str, &str);
+ if ( cur == str )
+ break;
+ if ( ranges[idx].end <= ranges[idx].start )
+ {
+ printk(XENLOG_WARNING "crashkernel: end <= start\n");
+ break;
+ }
+ }
+ else
+ ranges[idx].end = -1;
+
+ if ( *str != ':' )
+ {
+ printk(XENLOG_WARNING "crashkernel: ':' expected\n");
+ break;
+ }
+
+ ranges[idx].size = parse_size_and_unit(cur = str + 1, &str);
+ if ( cur == str )
+ break;
+
+ ++idx;
+ } while ( *str == ',' );
+ if ( idx < ARRAY_SIZE(ranges) )
+ ranges[idx].size = 0;
+ }
+ else
+ kexec_crash_area.size = parse_size_and_unit(cur = str, &str);
+ if ( cur != str && *str == '@' )
+ kexec_crash_area.start = parse_size_and_unit(cur = str + 1, &str);
+ if ( cur == str )
+ printk(XENLOG_WARNING "crashkernel: memory value expected\n");
}
custom_param("crashkernel", parse_crashkernel);
+void __init set_kexec_crash_area_size(u64 system_ram)
+{
+ unsigned int idx;
+
+ for ( idx = 0; idx < ARRAY_SIZE(ranges) && !kexec_crash_area.size; ++idx )
+ {
+ if ( !ranges[idx].size )
+ break;
+
+ if ( ranges[idx].size >= system_ram )
+ {
+ printk(XENLOG_WARNING "crashkernel: invalid size\n");
+ continue;
+ }
+
+ if ( ranges[idx].start <= system_ram && ranges[idx].end > system_ram )
+ kexec_crash_area.size = ranges[idx].size;
+ }
+}
+
static void one_cpu_only(void)
{
/* Only allow the first cpu to continue - force other cpus to spin */
--- a/xen/include/xen/kexec.h
+++ b/xen/include/xen/kexec.h
@@ -12,6 +12,8 @@ typedef struct xen_kexec_reserve {
extern xen_kexec_reserve_t kexec_crash_area;
+void set_kexec_crash_area_size(u64 system_ram);
+
/* We have space for 4 images to support atomic update
* of images. This is important for CRASH images since
* a panic can happen at any time...

View File

@ -0,0 +1,24 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1272972545 -3600
# Node ID 6d21da6feb59db33eceb3aa6bcb8afac15ac7fd8
# Parent 71cf9b12ac9effe2b7cf0aec058f92ee36430e0a
vmx, tboot: Check the correct in/outside-SMX flag when init'ing VMX.
Signed-off-by: Shane Wang <shane.wang@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -360,8 +360,9 @@ int vmx_cpu_up(void)
bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK);
if ( bios_locked )
{
- if ( !(eax & (IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX |
- IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
+ if ( !(eax & (tboot_in_measured_env()
+ ? IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX
+ : IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX)) )
{
printk("CPU%d: VMX disabled by BIOS.\n", cpu);
return 0;

View File

@ -0,0 +1,65 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1272973271 -3600
# Node ID 6c7b905b03ff1cf171187bafe7129e3e213e5787
# Parent bd52f2e040e5a5ca58e956b3d0780a86934a429e
x86: add support for domain-initiated global cache flush
Newer Linux' AGP code wants to flush caches on all CPUs under certain
circumstances. Since doing this on all vCPU-s of the domain in
question doesn't yield the intended effect, this needs to be done in
the hypervisor. Add a new MMUEXT operation for this.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2887,6 +2887,27 @@ int do_mmuext_op(
}
break;
+ case MMUEXT_FLUSH_CACHE_GLOBAL:
+ if ( unlikely(foreigndom != DOMID_SELF) )
+ okay = 0;
+ else if ( likely(cache_flush_permitted(d)) )
+ {
+ unsigned int cpu;
+ cpumask_t mask = CPU_MASK_NONE;
+
+ for_each_online_cpu(cpu)
+ if ( !cpus_intersects(mask,
+ per_cpu(cpu_sibling_map, cpu)) )
+ cpu_set(cpu, mask);
+ flush_mask(&mask, FLUSH_CACHE);
+ }
+ else
+ {
+ MEM_LOG("Non-physdev domain tried to FLUSH_CACHE_GLOBAL");
+ okay = 0;
+ }
+ break;
+
case MMUEXT_SET_LDT:
{
unsigned long ptr = op.arg1.linear_addr;
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -239,6 +239,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
*
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
*
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
@@ -268,6 +272,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define MMUEXT_NEW_USER_BASEPTR 15
#define MMUEXT_CLEAR_PAGE 16
#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
#ifndef __ASSEMBLY__
struct mmuext_op {

View File

@ -0,0 +1,99 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1272973341 -3600
# Node ID b07edd50661e7f768088c08215dabb9becb5c5b6
# Parent 6c7b905b03ff1cf171187bafe7129e3e213e5787
x86: fix Dom0 booting time regression
References: bnc#593536
Unfortunately the changes in c/s 21035 caused boot time to go up
significantly on certain large systems. To rectify this without going
back to the old behavior, introduce a new memory allocation flag so
that Dom0 allocations can exhaust non-DMA memory before starting to
consume DMA memory. For the latter, the behavior introduced in
aforementioned c/s gets retained, while for the former we can now even
try larger chunks first.
This builds on the fact that alloc_chunk() gets called with non-
increasing 'max_pages' arguments, end hence it can store locally the
allocation order last used (as larger order allocations can't succeed
during subsequent invocations if they failed once).
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -125,26 +125,36 @@ string_param("dom0_ioports_disable", opt
static struct page_info * __init alloc_chunk(
struct domain *d, unsigned long max_pages)
{
+ static unsigned int __initdata last_order = MAX_ORDER;
+ static unsigned int __initdata memflags = MEMF_no_dma;
struct page_info *page;
- unsigned int order, free_order;
+ unsigned int order = get_order_from_pages(max_pages), free_order;
- /*
- * Allocate up to 2MB at a time: It prevents allocating very large chunks
- * from DMA pools before the >4GB pool is fully depleted.
- */
- if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
- max_pages = 2UL << (20 - PAGE_SHIFT);
- order = get_order_from_pages(max_pages);
- if ( (max_pages & (max_pages-1)) != 0 )
- order--;
- while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
+ if ( order > last_order )
+ order = last_order;
+ else if ( max_pages & (max_pages - 1) )
+ --order;
+ while ( (page = alloc_domheap_pages(d, order, memflags)) == NULL )
if ( order-- == 0 )
break;
+ if ( page )
+ last_order = order;
+ else if ( memflags )
+ {
+ /*
+ * Allocate up to 2MB at a time: It prevents allocating very large
+ * chunks from DMA pools before the >4GB pool is fully depleted.
+ */
+ last_order = 21 - PAGE_SHIFT;
+ memflags = 0;
+ return alloc_chunk(d, max_pages);
+ }
+
/*
* Make a reasonable attempt at finding a smaller chunk at a higher
* address, to avoid allocating from low memory as much as possible.
*/
- for ( free_order = order; page && order--; )
+ for ( free_order = order; !memflags && page && order--; )
{
struct page_info *pg2;
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1157,8 +1157,9 @@ struct page_info *alloc_domheap_pages(
pg = alloc_heap_pages(dma_zone + 1, zone_hi, node, order, memflags);
if ( (pg == NULL) &&
- ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
- node, order, memflags)) == NULL) )
+ ((memflags & MEMF_no_dma) ||
+ ((pg = alloc_heap_pages(MEMZONE_XEN + 1, zone_hi,
+ node, order, memflags)) == NULL)) )
return NULL;
if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -79,6 +79,8 @@ int assign_pages(
#define MEMF_populate_on_demand (1U<<_MEMF_populate_on_demand)
#define _MEMF_tmem 2
#define MEMF_tmem (1U<<_MEMF_tmem)
+#define _MEMF_no_dma 3
+#define MEMF_no_dma (1U<<_MEMF_no_dma)
#define _MEMF_node 8
#define MEMF_node(n) ((((n)+1)&0xff)<<_MEMF_node)
#define _MEMF_bits 24

View File

@ -1,7 +1,18 @@
Index: xen-4.0.0-testing/xen/arch/x86/hvm/hvm.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/hvm/hvm.c
+++ xen-4.0.0-testing/xen/arch/x86/hvm/hvm.c
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273142634 -3600
# Node ID 69c85f5b0a07e7a95945d117ea478a80d21c6b9e
# Parent 26da9bb87405c64c02def8d5f11c66f15847bd02
svm: support EFER.LMSLE for guests
Now that the feature is officially documented (see
http://support.amd.com/us/Processor_TechDocs/24593.pdf), I think it
makes sense to also allow HVM guests to make use of it.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -629,11 +629,12 @@ static int hvm_load_cpu_ctxt(struct doma
return -EINVAL;
}
@ -29,23 +40,19 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hvm.c
(!cpu_has_ffxsr && (value & EFER_FFXSE)) )
{
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
Index: xen-4.0.0-testing/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/hvm/svm/svm.c
+++ xen-4.0.0-testing/xen/arch/x86/hvm/svm/svm.c
@@ -56,6 +56,11 @@
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -56,6 +56,9 @@
u32 svm_feature_flags;
+#ifdef __x86_64__
+/* indicate whether guest may use EFER.LMSLE */
+unsigned char cpu_has_lmsl = 0;
+#endif
+/* Indicates whether guests may use EFER.LMSLE. */
+bool_t cpu_has_lmsl;
+
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
@@ -847,6 +852,29 @@ static int svm_cpu_up(struct cpuinfo_x86
@@ -847,6 +850,29 @@ static int svm_cpu_up(struct cpuinfo_x86
/* Initialize core's ASID handling. */
svm_asid_init(c);
@ -75,20 +82,13 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/svm/svm.c
return 1;
}
Index: xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/asm-x86/hvm/hvm.h
+++ xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm.h
@@ -132,6 +132,12 @@ struct hvm_function_table {
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -131,6 +131,7 @@ struct hvm_function_table {
extern struct hvm_function_table hvm_funcs;
extern int hvm_enabled;
+extern bool_t cpu_has_lmsl;
+#ifdef __i386__
+# define cpu_has_lmsl 0
+#else
+extern unsigned char cpu_has_lmsl;
+#endif
+
int hvm_domain_initialise(struct domain *d);
void hvm_domain_relinquish_resources(struct domain *d);
void hvm_domain_destroy(struct domain *d);

View File

@ -1,7 +1,18 @@
Index: xen-4.0.0-testing/xen/common/keyhandler.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/keyhandler.c
+++ xen-4.0.0-testing/xen/common/keyhandler.c
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273143595 -3600
# Node ID ccae861f52f7f25aa2ab404a6110831402845dac
# Parent 924f54145fda26df64bf6f57010793893b29866f
Reduce '0' debug key's global impact
On large systems, dumping state may cause time management to get
stalled for so long a period that it wouldn't recover. Therefore add
a tasklet-based alternative mechanism to handle Dom0 state dumps.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -19,6 +19,7 @@
static struct keyhandler *key_table[256];
@ -20,15 +31,15 @@ Index: xen-4.0.0-testing/xen/common/keyhandler.c
+{
+ struct vcpu *v = (void *)arg;
+
+ for ( ; ; ) {
+ for ( ; ; )
+ {
+ vcpu_show_execution_state(v);
+ v = v->next_in_list;
+ if ( !v )
+ if ( (v = v->next_in_list) == NULL )
+ break;
+ if ( softirq_pending(smp_processor_id()) )
+ {
+ dump_dom0_tasklet.data = (unsigned long)v;
+ tasklet_schedule_cpu(&dump_dom0_tasklet, v->processor);
+ tasklet_schedule_on_cpu(&dump_dom0_tasklet, v->processor);
+ break;
+ }
+ }
@ -47,7 +58,7 @@ Index: xen-4.0.0-testing/xen/common/keyhandler.c
+ tasklet_kill(&dump_dom0_tasklet);
+ tasklet_init(&dump_dom0_tasklet, dump_dom0_action,
+ (unsigned long)v);
+ tasklet_schedule_cpu(&dump_dom0_tasklet, v->processor);
+ tasklet_schedule_on_cpu(&dump_dom0_tasklet, v->processor);
+ return;
+ }
vcpu_show_execution_state(v);

View File

@ -0,0 +1,23 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273220860 -3600
# Node ID 5ac6e33fa3a7fe6645eaeb7de530a29be2865c34
# Parent 7af536406c255bdc9d3429679710b0827d52bdaf
xend: support blktap2 in xend blkif utils
Support tap2 device type in xend blkif utils parse method.
Signed-off-by: Jim Fehlig <jfehlig@novell.com>
diff -r 7af536406c25 -r 5ac6e33fa3a7 tools/python/xen/util/blkif.py
--- a/tools/python/xen/util/blkif.py Fri May 07 09:26:49 2010 +0100
+++ b/tools/python/xen/util/blkif.py Fri May 07 09:27:40 2010 +0100
@@ -86,7 +86,7 @@
else:
fn = "/dev/%s" %(fn,)
- if typ == "tap":
+ if typ in ("tap", "tap2"):
(taptype, fn) = fn.split(":", 1)
return (fn, taptype)

View File

@ -39,6 +39,8 @@ case "$command" in
VPORTWWNN=`echo $VPORTWWNN | tr A-Z a-z`
FABRICNM=`echo $FABRICNM | tr A-Z a-z`
claim_lock "npiv"
find_vhost $VPORTWWPN
if test -z "$vhost" ; then
create_vport $FABRICNM $VPORTWWPN $VPORTWWNN
@ -53,6 +55,9 @@ case "$command" in
sleep 2
find_sdev $vhost $TGTWWPN $LUN
fi
release_lock "npiv"
if test ! -z "$dev"; then
xenstore-write $XENBUS_PATH/node /dev/$dev
write_dev /dev/$dev

View File

@ -10,7 +10,7 @@
CTRL_SRCS-y += xc_misc.c
--- /dev/null
+++ b/tools/libxc/xc_cpupool.c
@@ -0,0 +1,154 @@
@@ -0,0 +1,165 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
@ -22,34 +22,45 @@
+#include <stdarg.h>
+#include "xc_private.h"
+
+static int do_sysctl_save(int xc_handle, struct xen_sysctl *sysctl)
+{
+ int ret;
+
+ do {
+ ret = do_sysctl(xc_handle, sysctl);
+ } while ( (ret < 0) && (errno == EAGAIN) );
+
+ return ret;
+}
+
+int xc_cpupool_create(int xc_handle,
+ uint32_t *ppoolid,
+ uint32_t sched_id)
+{
+ int err;
+ DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
+ domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+ XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
+ domctl.u.cpupool_op.sched_id = sched_id;
+ if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_CREATE;
+ sysctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+ XEN_SYSCTL_CPUPOOL_PAR_ANY : *ppoolid;
+ sysctl.u.cpupool_op.sched_id = sched_id;
+ if ( (err = do_sysctl_save(xc_handle, &sysctl)) != 0 )
+ return err;
+
+ *ppoolid = domctl.u.cpupool_op.cpupool_id;
+ *ppoolid = sysctl.u.cpupool_op.cpupool_id;
+ return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+ uint32_t poolid)
+{
+ DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ return do_domctl_save(xc_handle, &domctl);
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_DESTROY;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ return do_sysctl_save(xc_handle, &sysctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle,
@ -61,34 +72,34 @@
+ int p;
+ uint32_t poolid = first_poolid;
+ uint8_t local[sizeof (info->cpumap)];
+ DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
+
+ memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+ for (p = 0; p < n_max; p++)
+ {
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ break;
+ }
+ err = do_domctl_save(xc_handle, &domctl);
+ err = do_sysctl_save(xc_handle, &sysctl);
+ unlock_pages(local, sizeof (local));
+
+ if ( err < 0 )
+ break;
+
+ info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
+ info->sched_id = domctl.u.cpupool_op.sched_id;
+ info->n_dom = domctl.u.cpupool_op.n_dom;
+ info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+ info->sched_id = sysctl.u.cpupool_op.sched_id;
+ info->n_dom = sysctl.u.cpupool_op.n_dom;
+ bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+ poolid = domctl.u.cpupool_op.cpupool_id + 1;
+ poolid = sysctl.u.cpupool_op.cpupool_id + 1;
+ info++;
+ }
+
@ -102,39 +113,39 @@
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_ADDCPU;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_sysctl_save(xc_handle, &sysctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+ uint32_t poolid,
+ int cpu)
+{
+ DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_domctl_save(xc_handle, &domctl);
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_RMCPU;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+ return do_sysctl_save(xc_handle, &sysctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+ uint32_t poolid,
+ uint32_t domid)
+{
+ DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
+ domctl.u.cpupool_op.cpupool_id = poolid;
+ domctl.u.cpupool_op.domid = domid;
+ return do_domctl_save(xc_handle, &domctl);
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ sysctl.u.cpupool_op.domid = domid;
+ return do_sysctl_save(xc_handle, &sysctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
@ -142,12 +153,12 @@
+{
+ int err;
+ uint8_t local[sizeof (*cpumap)];
+ DECLARE_DOMCTL;
+ DECLARE_SYSCTL;
+
+ domctl.cmd = XEN_DOMCTL_cpupool_op;
+ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
+ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
+ set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+ if ( (err = lock_pages(local, sizeof(local))) != 0 )
+ {
@ -155,7 +166,7 @@
+ return err;
+ }
+
+ err = do_domctl_save(xc_handle, &domctl);
+ err = do_sysctl_save(xc_handle, &sysctl);
+ unlock_pages(local, sizeof (local));
+
+ if (err < 0)
@ -175,28 +186,6 @@
memcpy(info->handle, domctl.u.getdomaininfo.handle,
sizeof(xen_domain_handle_t));
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl
return ret;
}
+static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
+{
+ int ret;
+
+ do
+ {
+ ret = do_domctl(xc_handle, domctl);
+ }
+ while ( (ret < 0 ) && (errno == EAGAIN) );
+
+ return ret;
+}
+
static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
{
int ret = -1;
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -171,6 +171,7 @@ typedef struct xc_dominfo {

View File

@ -1,5 +1,7 @@
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c
+++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c
@@ -106,8 +106,8 @@ static PyObject *pyxc_domain_create(XcOb
static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", NULL };
@ -287,8 +289,10 @@
{ NULL, NULL, 0, NULL }
};
Index: xen-4.0.0-testing/tools/python/xen/util/sxputils.py
===================================================================
--- /dev/null
+++ b/tools/python/xen/util/sxputils.py
+++ xen-4.0.0-testing/tools/python/xen/util/sxputils.py
@@ -0,0 +1,64 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -354,8 +358,10 @@
+ return sxphash
+
+
--- a/tools/python/xen/xend/XendAPI.py
+++ b/tools/python/xen/xend/XendAPI.py
Index: xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendAPI.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py
@@ -51,6 +51,7 @@ from XendDPCI import XendDPCI
from XendPSCSI import XendPSCSI, XendPSCSI_HBA
from XendDSCSI import XendDSCSI, XendDSCSI_HBA
@ -547,8 +553,10 @@
# Xen API: Class VBD
# ----------------------------------------------------------------
Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
===================================================================
--- /dev/null
+++ b/tools/python/xen/xend/XendCPUPool.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py
@@ -0,0 +1,903 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -1453,8 +1461,10 @@
+
+ unbound_cpus = classmethod(unbound_cpus)
+
--- a/tools/python/xen/xend/XendConfig.py
+++ b/tools/python/xen/xend/XendConfig.py
Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py
@@ -128,6 +128,7 @@ XENAPI_CFG_TO_LEGACY_CFG = {
'PV_bootloader': 'bootloader',
'PV_bootloader_args': 'bootloader_args',
@ -1495,8 +1505,10 @@
'superpages': 0,
'description': '',
}
--- a/tools/python/xen/xend/XendConstants.py
+++ b/tools/python/xen/xend/XendConstants.py
Index: xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConstants.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py
@@ -133,6 +133,8 @@ VTPM_DELETE_SCRIPT = auxbin.scripts_dir(
XS_VMROOT = "/vm/"
@ -1506,8 +1518,10 @@
NR_PCI_FUNC = 8
NR_PCI_DEV = 32
NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
@@ -60,6 +60,7 @@ from xen.xend.xenstore.xsutil import Get
from xen.xend.xenstore.xswatch import xswatch
from xen.xend.XendConstants import *
@ -1560,8 +1574,10 @@
def get_power_state(self):
return XEN_API_VM_POWER_STATE[self._stateGet()]
def get_platform(self):
--- a/tools/python/xen/xend/XendError.py
+++ b/tools/python/xen/xend/XendError.py
Index: xen-4.0.0-testing/tools/python/xen/xend/XendError.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendError.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendError.py
@@ -18,6 +18,7 @@
from xmlrpclib import Fault
@ -1597,8 +1613,10 @@
class VDIError(XendAPIError):
def __init__(self, error, vdi):
XendAPIError.__init__(self)
--- a/tools/python/xen/xend/XendNode.py
+++ b/tools/python/xen/xend/XendNode.py
Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendNode.py
+++ xen-4.0.0-testing/tools/python/xen/xend/XendNode.py
@@ -43,6 +43,7 @@ from XendStateStore import XendStateStor
from XendMonitor import XendMonitor
from XendPPCI import XendPPCI
@ -1672,8 +1690,10 @@
'max_free_memory',
'max_para_memory',
'max_hvm_memory',
--- a/tools/python/xen/xend/server/SrvServer.py
+++ b/tools/python/xen/xend/server/SrvServer.py
Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/SrvServer.py
+++ xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py
@@ -52,6 +52,7 @@ from xen.xend import XendNode, XendOptio
from xen.xend.XendLogging import log
from xen.xend.XendClient import XEN_API_SOCKET
@ -1695,8 +1715,10 @@
# Reaching this point means we can auto start domains
try:
xenddomain().autostart_domains()
--- a/tools/python/xen/xend/server/XMLRPCServer.py
+++ b/tools/python/xen/xend/server/XMLRPCServer.py
Index: xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/XMLRPCServer.py
+++ xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py
@@ -33,6 +33,7 @@ from xen.xend.XendClient import XML_RPC_
from xen.xend.XendConstants import DOM_STATE_RUNNING
from xen.xend.XendLogging import log
@ -1728,8 +1750,10 @@
# Functions in XendNode and XendDmesg
for type, lst, n in [(XendNode,
['info', 'pciinfo', 'send_debug_keys',
--- a/tools/python/xen/xm/create.dtd
+++ b/tools/python/xen/xm/create.dtd
Index: xen-4.0.0-testing/tools/python/xen/xm/create.dtd
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.dtd
+++ xen-4.0.0-testing/tools/python/xen/xm/create.dtd
@@ -50,6 +50,7 @@
s3_integrity CDATA #REQUIRED
vcpus_max CDATA #REQUIRED
@ -1738,8 +1762,10 @@
actions_after_shutdown %NORMAL_EXIT; #REQUIRED
actions_after_reboot %NORMAL_EXIT; #REQUIRED
actions_after_crash %CRASH_BEHAVIOUR; #REQUIRED
--- a/tools/python/xen/xm/create.py
+++ b/tools/python/xen/xm/create.py
Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.py
+++ xen-4.0.0-testing/tools/python/xen/xm/create.py
@@ -659,6 +659,10 @@ gopts.var('suppress_spurious_page_faults
fn=set_bool, default=None,
use="""Do not inject spurious page faults into this guest""")
@ -1751,7 +1777,7 @@
gopts.var('pci_msitranslate', val='TRANSLATE',
fn=set_int, default=1,
use="""Global PCI MSI-INTx translation flag (0=disable;
@@ -1147,6 +1151,8 @@ def make_config(vals):
@@ -1149,6 +1153,8 @@ def make_config(vals):
config.append(['localtime', vals.localtime])
if vals.oos:
config.append(['oos', vals.oos])
@ -1760,8 +1786,10 @@
config_image = configure_image(vals)
if vals.bootloader:
--- a/tools/python/xen/xm/main.py
+++ b/tools/python/xen/xm/main.py
Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
@@ -56,6 +56,7 @@ from xen.util.xmlrpcclient import Server
import xen.util.xsm.xsm as security
from xen.util.xsm.xsm import XSMError
@ -1933,7 +1961,7 @@
"total_memory": int(host_metrics_record["memory_total"])/1024/1024,
"free_memory": int(host_metrics_record["memory_free"])/1024/1024,
"xen_major": getVal(["software_version", "xen_major"]),
@@ -3449,6 +3510,169 @@ def xm_tmem_shared_auth(args):
@@ -3451,6 +3512,169 @@ def xm_tmem_shared_auth(args):
else:
return server.xend.node.tmem_shared_auth(domid,uuid_str,auth)
@ -2103,7 +2131,7 @@
commands = {
"shell": xm_shell,
@@ -3534,6 +3758,14 @@ commands = {
@@ -3536,6 +3760,14 @@ commands = {
"usb-list-assignable-devices": xm_usb_list_assignable_devices,
"usb-hc-create": xm_usb_hc_create,
"usb-hc-destroy": xm_usb_hc_destroy,
@ -2118,7 +2146,7 @@
# tmem
"tmem-thaw": xm_tmem_thaw,
"tmem-freeze": xm_tmem_freeze,
@@ -3565,6 +3797,8 @@ IMPORTED_COMMANDS = [
@@ -3567,6 +3799,8 @@ IMPORTED_COMMANDS = [
'resetpolicy',
'getenforce',
'setenforce',
@ -2127,8 +2155,10 @@
]
for c in IMPORTED_COMMANDS:
Index: xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
===================================================================
--- /dev/null
+++ b/tools/python/xen/xm/pool-create.py
+++ xen-4.0.0-testing/tools/python/xen/xm/pool-create.py
@@ -0,0 +1,51 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -2181,8 +2211,10 @@
+if __name__ == '__main__':
+ main(sys.argv)
+
Index: xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
===================================================================
--- /dev/null
+++ b/tools/python/xen/xm/pool-new.py
+++ xen-4.0.0-testing/tools/python/xen/xm/pool-new.py
@@ -0,0 +1,50 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -2234,8 +2266,10 @@
+if __name__ == '__main__':
+ main(sys.argv)
+
Index: xen-4.0.0-testing/tools/python/xen/xm/pool.py
===================================================================
--- /dev/null
+++ b/tools/python/xen/xm/pool.py
+++ xen-4.0.0-testing/tools/python/xen/xm/pool.py
@@ -0,0 +1,236 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
@ -2473,8 +2507,10 @@
+def help():
+ return str(GOPTS)
+
--- a/tools/python/xen/xm/xenapi_create.py
+++ b/tools/python/xen/xm/xenapi_create.py
Index: xen-4.0.0-testing/tools/python/xen/xm/xenapi_create.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/xenapi_create.py
+++ xen-4.0.0-testing/tools/python/xen/xm/xenapi_create.py
@@ -310,6 +310,8 @@ class xenapi_create:
get_child_nodes_as_dict(vm, "platform", "key", "value"),
"other_config":

View File

@ -1,4 +1,5 @@
- fix tasklet_schedule_cpu() when invoked from the tasklet's handler
(and rename to tasklet_schedule_on_cpu() to match upstream)
- properly balance (un-)pausing in continue_hypercall_on_cpu() code
paths
- bump domctl interface version (due to the addition of the "cpupool"
@ -18,14 +19,16 @@
}
else
{
@@ -1591,7 +1592,6 @@ int continue_hypercall_on_cpu(int cpu, v
@@ -1591,8 +1592,7 @@ int continue_hypercall_on_cpu(int cpu, v
info->func = func;
info->data = data;
- vcpu_pause_nosync(v);
tasklet_schedule_cpu(&info->tasklet, cpu);
- tasklet_schedule_cpu(&info->tasklet, cpu);
+ tasklet_schedule_on_cpu(&info->tasklet, cpu);
raise_softirq(SCHEDULE_SOFTIRQ);
/* Dummy return value will be overwritten by new schedule_tail. */
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -176,7 +176,6 @@ struct csched_private {
@ -70,6 +73,15 @@
}
spin_unlock_irqrestore(&tasklet_lock, flags);
@@ -120,7 +123,7 @@ void tasklet_schedule(struct tasklet *t)
tasklet_schedule_list(t, &tasklet_list, smp_processor_id());
}
-void tasklet_schedule_cpu(struct tasklet *t, int cpu)
+void tasklet_schedule_on_cpu(struct tasklet *t, int cpu)
{
tasklet_schedule_list(t, &per_cpu(tasklet_list_pcpu, cpu), cpu);
}
@@ -156,7 +159,15 @@ static void tasklet_action(void)
if ( t->is_scheduled )
{
@ -100,7 +112,7 @@
XEN_GUEST_HANDLE_64(uint8) bitmap;
--- a/xen/include/xen/softirq.h
+++ b/xen/include/xen/softirq.h
@@ -50,12 +50,14 @@ struct tasklet
@@ -50,15 +50,17 @@ struct tasklet
bool_t is_scheduled;
bool_t is_running;
bool_t is_dead;
@ -115,4 +127,8 @@
+ func, data }
void tasklet_schedule(struct tasklet *t);
void tasklet_schedule_cpu(struct tasklet *t, int cpu);
-void tasklet_schedule_cpu(struct tasklet *t, int cpu);
+void tasklet_schedule_on_cpu(struct tasklet *t, int cpu);
void tasklet_kill(struct tasklet *t);
void tasklet_init(
struct tasklet *t, void (*func)(unsigned long), unsigned long data);

View File

@ -154,7 +154,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
opt_dom0_max_vcpus = MAX_VIRT_CPUS;
@@ -277,7 +278,7 @@ int __init construct_dom0(
@@ -287,7 +288,7 @@ int __init construct_dom0(
unsigned long _initrd_start, unsigned long initrd_len,
char *cmdline)
{
@ -163,7 +163,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
struct cpu_user_regs *regs;
unsigned long pfn, mfn;
unsigned long nr_pages;
@@ -776,8 +777,12 @@ int __init construct_dom0(
@@ -786,8 +787,12 @@ int __init construct_dom0(
printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
@ -314,7 +314,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
if ( idle_domain == NULL )
BUG();
idle_domain->vcpu = idle_vcpu;
@@ -1089,8 +1090,13 @@ void __init __start_xen(unsigned long mb
@@ -1094,8 +1095,13 @@ void __init __start_xen(unsigned long mb
if ( !tboot_protect_mem_regions() )
panic("Could not protect TXT memory regions\n");
@ -857,9 +857,9 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+}
+
+/*
+ * do cpupool related domctl operations
+ * do cpupool related sysctl operations
+ */
+int cpupool_do_domctl(struct xen_domctl_cpupool_op *op)
+int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
+{
+ int ret;
+ struct cpupool *c;
@ -867,12 +867,12 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ switch ( op->op )
+ {
+
+ case XEN_DOMCTL_CPUPOOL_OP_CREATE:
+ case XEN_SYSCTL_CPUPOOL_OP_CREATE:
+ {
+ int poolid;
+ struct scheduler *sched;
+
+ poolid = (op->cpupool_id == XEN_DOMCTL_CPUPOOL_PAR_ANY) ?
+ poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
+ CPUPOOLID_NONE: op->cpupool_id;
+ sched = scheduler_get_by_id(op->sched_id);
+ ret = -ENOENT;
@ -887,7 +887,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ }
+ break;
+
+ case XEN_DOMCTL_CPUPOOL_OP_DESTROY:
+ case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
+ {
+ spin_lock(&cpupool_lock);
+ c = cpupool_find_by_id(op->cpupool_id, 1);
@ -899,7 +899,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ }
+ break;
+
+ case XEN_DOMCTL_CPUPOOL_OP_INFO:
+ case XEN_SYSCTL_CPUPOOL_OP_INFO:
+ {
+ spin_lock(&cpupool_lock);
+ c = cpupool_find_by_id(op->cpupool_id, 0);
@ -915,7 +915,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ }
+ break;
+
+ case XEN_DOMCTL_CPUPOOL_OP_ADDCPU:
+ case XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
+ {
+ unsigned cpu;
+
@ -923,7 +923,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ printk(XENLOG_DEBUG "cpupool_assign_cpu(pool=%d,cpu=%d)\n",
+ op->cpupool_id, cpu);
+ spin_lock(&cpupool_lock);
+ if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
+ if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
+ cpu = first_cpu(cpupool_free_cpus);
+ ret = -EINVAL;
+ if ( cpu >= NR_CPUS )
@ -943,7 +943,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ }
+ break;
+
+ case XEN_DOMCTL_CPUPOOL_OP_RMCPU:
+ case XEN_SYSCTL_CPUPOOL_OP_RMCPU:
+ {
+ unsigned cpu;
+
@ -954,7 +954,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ if ( c == NULL )
+ break;
+ cpu = op->cpu;
+ if ( cpu == XEN_DOMCTL_CPUPOOL_PAR_ANY )
+ if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
+ cpu = last_cpu(c->cpu_valid);
+ ret = -EINVAL;
+ if ( cpu >= NR_CPUS )
@ -966,7 +966,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ }
+ break;
+
+ case XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN:
+ case XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
+ {
+ struct domain *d;
+
@ -1010,7 +1010,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ }
+ break;
+
+ case XEN_DOMCTL_CPUPOOL_OP_FREEINFO:
+ case XEN_SYSCTL_CPUPOOL_OP_FREEINFO:
+ {
+ cpumask_to_xenctl_cpumap(&(op->cpumap),
+ &cpupool_free_cpus);
@ -1176,21 +1176,6 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
if ( alloc_vcpu(d, i, cpu) == NULL )
goto maxvcpu_out;
@@ -961,6 +967,14 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
}
break;
+ case XEN_DOMCTL_cpupool_op:
+ {
+ ret = cpupool_do_domctl(&op->u.cpupool_op);
+ if ( (ret == 0) && copy_to_guest(u_domctl, op, 1) )
+ ret = -EFAULT;
+ }
+ break;
+
default:
ret = arch_do_domctl(op, u_domctl);
break;
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -70,11 +70,15 @@
@ -2994,6 +2979,23 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
}
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -314,6 +314,14 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc
}
break;
+ case XEN_SYSCTL_cpupool_op:
+ {
+ ret = cpupool_do_sysctl(&op->u.cpupool_op);
+ if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
+ ret = -EFAULT;
+ }
+ break;
+
default:
ret = arch_do_sysctl(op, u_sysctl);
break;
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -451,7 +451,8 @@ struct arch_vcpu
@ -3038,23 +3040,31 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
};
typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
@@ -781,6 +782,30 @@ struct xen_domctl_mem_sharing_op {
@@ -781,7 +782,6 @@ struct xen_domctl_mem_sharing_op {
typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
+/*
+ * cpupool operations
+ */
+/* XEN_DOMCTL_cpupool_op */
+#define XEN_DOMCTL_CPUPOOL_OP_CREATE 1 /* C */
+#define XEN_DOMCTL_CPUPOOL_OP_DESTROY 2 /* D */
+#define XEN_DOMCTL_CPUPOOL_OP_INFO 3 /* I */
+#define XEN_DOMCTL_CPUPOOL_OP_ADDCPU 4 /* A */
+#define XEN_DOMCTL_CPUPOOL_OP_RMCPU 5 /* R */
+#define XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN 6 /* M */
+#define XEN_DOMCTL_CPUPOOL_OP_FREEINFO 7 /* F */
+#define XEN_DOMCTL_CPUPOOL_PAR_ANY 0xFFFFFFFF
+struct xen_domctl_cpupool_op {
-
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -491,6 +491,28 @@ struct xen_sysctl_lockprof_op {
typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
+#define XEN_SYSCTL_cpupool_op 18
+/* XEN_SYSCTL_cpupool_op */
+#define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
+#define XEN_SYSCTL_CPUPOOL_OP_DESTROY 2 /* D */
+#define XEN_SYSCTL_CPUPOOL_OP_INFO 3 /* I */
+#define XEN_SYSCTL_CPUPOOL_OP_ADDCPU 4 /* A */
+#define XEN_SYSCTL_CPUPOOL_OP_RMCPU 5 /* R */
+#define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN 6 /* M */
+#define XEN_SYSCTL_CPUPOOL_OP_FREEINFO 7 /* F */
+#define XEN_SYSCTL_CPUPOOL_PAR_ANY 0xFFFFFFFF
+struct xen_sysctl_cpupool_op {
+ uint32_t op; /* IN */
+ uint32_t cpupool_id; /* IN: CDIARM OUT: CI */
+ uint32_t sched_id; /* IN: C OUT: I */
@ -3063,28 +3073,20 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+ uint32_t n_dom; /* OUT: I */
+ struct xenctl_cpumap cpumap; /* OUT: IF */
+};
+typedef struct xen_domctl_cpupool_op xen_domctl_cpupool_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpupool_op_t);
+typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
+
struct xen_domctl {
struct xen_sysctl {
uint32_t cmd;
@@ -842,6 +867,7 @@ struct xen_domctl {
#define XEN_DOMCTL_gettscinfo 59
#define XEN_DOMCTL_settscinfo 60
#define XEN_DOMCTL_getpageframeinfo3 61
+#define XEN_DOMCTL_cpupool_op 62
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -890,6 +916,7 @@ struct xen_domctl {
struct xen_domctl_debug_op debug_op;
struct xen_domctl_mem_event_op mem_event_op;
struct xen_domctl_mem_sharing_op mem_sharing_op;
+ struct xen_domctl_cpupool_op cpupool_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
#endif
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
@@ -509,6 +531,7 @@ struct xen_sysctl {
struct xen_sysctl_pm_op pm_op;
struct xen_sysctl_page_offline_op page_offline;
struct xen_sysctl_lockprof_op lockprof_op;
+ struct xen_sysctl_cpupool_op cpupool_op;
uint8_t pad[128];
} u;
};
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -10,16 +10,29 @@
@ -3183,7 +3185,15 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
#endif /* __XEN_SCHED_IF_H__ */
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -132,8 +132,6 @@ struct vcpu
@@ -9,6 +9,7 @@
#include <xen/shared.h>
#include <public/xen.h>
#include <public/domctl.h>
+#include <public/sysctl.h>
#include <public/vcpu.h>
#include <public/xsm/acm.h>
#include <xen/time.h>
@@ -132,8 +133,6 @@ struct vcpu
bool_t defer_shutdown;
/* VCPU is paused following shutdown request (d->is_shutting_down)? */
bool_t paused_for_shutdown;
@ -3192,7 +3202,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
/*
* > 0: a single port is being polled;
@@ -211,6 +209,7 @@ struct domain
@@ -211,6 +210,7 @@ struct domain
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */
@ -3200,7 +3210,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
struct domain *next_in_list;
struct domain *next_in_hashbucket;
@@ -383,7 +382,7 @@ static inline struct domain *get_current
@@ -383,7 +383,7 @@ static inline struct domain *get_current
}
struct domain *domain_create(
@ -3209,7 +3219,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
/* DOMCRF_hvm: Create an HVM domain, as opposed to a PV domain. */
#define _DOMCRF_hvm 0
#define DOMCRF_hvm (1U<<_DOMCRF_hvm)
@@ -471,6 +470,7 @@ int sched_init_vcpu(struct vcpu *v, uns
@@ -471,6 +471,7 @@ int sched_init_vcpu(struct vcpu *v, uns
void sched_destroy_vcpu(struct vcpu *v);
int sched_init_domain(struct domain *d);
void sched_destroy_domain(struct domain *d);
@ -3217,7 +3227,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
int sched_id(void);
void sched_tick_suspend(void);
@@ -578,12 +578,14 @@ void domain_pause_by_systemcontroller(st
@@ -578,12 +579,14 @@ void domain_pause_by_systemcontroller(st
void domain_unpause_by_systemcontroller(struct domain *d);
void cpu_init(void);
@ -3236,7 +3246,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);
@@ -606,6 +608,18 @@ extern enum cpufreq_controller {
@@ -606,6 +609,18 @@ extern enum cpufreq_controller {
FREQCTL_none, FREQCTL_dom0_kernel, FREQCTL_xen
} cpufreq_controller;
@ -3249,7 +3259,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
+void cpupool_cpu_add(unsigned int cpu);
+int cpupool_add_domain(struct domain *d, int poolid);
+void cpupool_rm_domain(struct domain *d);
+int cpupool_do_domctl(struct xen_domctl_cpupool_op *op);
+int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
+#define num_cpupool_cpus(c) (cpus_weight((c)->cpu_valid))
+
#endif /* __SCHED_H__ */

View File

@ -1,54 +1,79 @@
Index: xen-4.0.0-testing/xen/arch/ia64/linux-xen/smp.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/ia64/linux-xen/smp.c
+++ xen-4.0.0-testing/xen/arch/ia64/linux-xen/smp.c
@@ -189,7 +189,7 @@ handle_IPI (int irq, void *dev_id, struc
* At this point the structure may be gone unless
* wait is true.
*/
- (*func)(info);
+ (*func)(info ?: regs);
--- a/xen/arch/ia64/linux-xen/smp.c
+++ b/xen/arch/ia64/linux-xen/smp.c
@@ -94,6 +94,7 @@ static volatile struct call_data_struct
/* Notify the sending CPU that the task is done. */
mb();
Index: xen-4.0.0-testing/xen/arch/x86/smp.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/smp.c
+++ xen-4.0.0-testing/xen/arch/x86/smp.c
@@ -395,7 +395,7 @@ static void __smp_call_function_interrup
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
+#define IPI_STATE_DUMP 2
if ( call_data.wait )
{
- (*func)(info);
+ (*func)(info ?: get_irq_regs());
mb();
atomic_inc(&call_data.finished);
}
@@ -403,7 +403,7 @@ static void __smp_call_function_interrup
{
mb();
atomic_inc(&call_data.started);
- (*func)(info);
+ (*func)(info ?: get_irq_regs());
}
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
@@ -202,6 +203,10 @@ handle_IPI (int irq, void *dev_id, struc
stop_this_cpu();
break;
irq_exit();
Index: xen-4.0.0-testing/xen/common/keyhandler.c
===================================================================
--- xen-4.0.0-testing.orig/xen/common/keyhandler.c
+++ xen-4.0.0-testing/xen/common/keyhandler.c
@@ -71,20 +71,45 @@ static struct keyhandler show_handlers_k
+ case IPI_STATE_DUMP:
+ dump_execstate(regs);
+ break;
+
default:
printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
break;
@@ -479,6 +484,12 @@ smp_send_stop (void)
send_IPI_allbutself(IPI_CPU_STOP);
}
+void
+smp_send_state_dump (unsigned int cpu)
+{
+ send_IPI_single(cpu, IPI_STATE_DUMP);
+}
+
int __init
setup_profiling_timer (unsigned int multiplier)
{
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -375,11 +375,24 @@ void smp_send_nmi_allbutself(void)
send_IPI_mask(&cpu_online_map, APIC_DM_NMI);
}
+void smp_send_state_dump(unsigned int cpu)
+{
+ state_dump_pending(cpu) = 1;
+ smp_send_event_check_cpu(cpu);
+}
+
fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
{
struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
perfc_incr(ipis);
+ if ( unlikely(state_dump_pending(smp_processor_id())) )
+ {
+ irq_enter();
+ state_dump_pending(smp_processor_id()) = 0;
+ dump_execstate(regs);
+ irq_exit();
+ }
set_irq_regs(old_regs);
}
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -71,19 +71,52 @@ static struct keyhandler show_handlers_k
.desc = "show this message"
};
-static void __dump_execstate(void *unused)
+#ifdef CONFIG_SMP
+static cpumask_t dump_execstate_mask;
+#endif
+
+static void __dump_execstate(void *_regs)
+void dump_execstate(struct cpu_user_regs *regs)
{
- dump_execution_state();
- printk("*** Dumping CPU%d guest state: ***\n", smp_processor_id());
+ struct cpu_user_regs *regs = _regs;
+ unsigned int cpu = smp_processor_id();
+
+ if ( !guest_mode(regs) )
@ -67,12 +92,14 @@ Index: xen-4.0.0-testing/xen/common/keyhandler.c
show_execution_state(guest_cpu_user_regs());
+ }
+
+#ifdef CONFIG_SMP
+ cpu_clear(cpu, dump_execstate_mask);
+ if ( !alt_key_handling )
+ return;
+
+ cpu = cycle_cpu(cpu, dump_execstate_mask);
+ if ( cpu < NR_CPUS )
+ {
+ cpu_clear(cpu, dump_execstate_mask);
+ on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 0);
+ }
+ smp_send_state_dump(cpu);
+ else
+ {
+ printk("\n");
@ -80,44 +107,52 @@ Index: xen-4.0.0-testing/xen/common/keyhandler.c
+ console_end_sync();
+ watchdog_enable();
+ }
+#endif
}
static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
{
- unsigned int cpu;
-
+#ifdef CONFIG_SMP
unsigned int cpu;
+#endif
/* We want to get everything out that we possibly can. */
watchdog_disable();
console_start_sync();
@@ -92,21 +117,9 @@ static void dump_registers(unsigned char
@@ -91,17 +124,28 @@ static void dump_registers(unsigned char
printk("'%c' pressed -> dumping registers\n", key);
+#ifdef CONFIG_SMP
+ if ( alt_key_handling )
+ dump_execstate_mask = cpu_online_map;
+#endif
+
/* Get local execution state out immediately, in case we get stuck. */
- printk("\n*** Dumping CPU%d host state: ***\n", smp_processor_id());
- __dump_execstate(NULL);
-
- for_each_online_cpu ( cpu )
- {
- if ( cpu == smp_processor_id() )
- continue;
+ dump_execstate(regs);
+
+#ifdef CONFIG_SMP
+ if ( alt_key_handling )
+ return;
for_each_online_cpu ( cpu )
{
if ( cpu == smp_processor_id() )
continue;
- printk("\n*** Dumping CPU%d host state: ***\n", cpu);
- on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1);
- }
-
- printk("\n");
-
- console_end_sync();
- watchdog_enable();
+ cpus_andnot(dump_execstate_mask, cpu_online_map,
+ cpumask_of_cpu(smp_processor_id()));
+ __dump_execstate(regs);
}
+ cpu_set(cpu, dump_execstate_mask);
+ smp_send_state_dump(cpu);
+ while ( cpu_isset(cpu, dump_execstate_mask) )
+ cpu_relax();
}
+#endif
static struct keyhandler dump_registers_keyhandler = {
Index: xen-4.0.0-testing/xen/include/asm-ia64/linux-xen/asm/ptrace.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/asm-ia64/linux-xen/asm/ptrace.h
+++ xen-4.0.0-testing/xen/include/asm-ia64/linux-xen/asm/ptrace.h
printk("\n");
--- a/xen/include/asm-ia64/linux-xen/asm/ptrace.h
+++ b/xen/include/asm-ia64/linux-xen/asm/ptrace.h
@@ -278,7 +278,7 @@ struct switch_stack {
# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
@ -127,3 +162,43 @@ Index: xen-4.0.0-testing/xen/include/asm-ia64/linux-xen/asm/ptrace.h
# define guest_kernel_mode(regs) (ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL)
# define vmx_guest_kernel_mode(regs) (ia64_psr(regs)->cpl == 0)
# define regs_increment_iip(regs) \
--- a/xen/include/asm-x86/hardirq.h
+++ b/xen/include/asm-x86/hardirq.h
@@ -8,6 +8,7 @@ typedef struct {
unsigned long __softirq_pending;
unsigned int __local_irq_count;
unsigned int __nmi_count;
+ bool_t __state_dump_pending;
} __cacheline_aligned irq_cpustat_t;
#include <xen/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
--- a/xen/include/xen/irq_cpustat.h
+++ b/xen/include/xen/irq_cpustat.h
@@ -26,5 +26,6 @@ extern irq_cpustat_t irq_stat[];
#define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending)
#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count)
#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count)
+#define state_dump_pending(cpu) __IRQ_STAT((cpu), __state_dump_pending)
#endif /* __irq_cpustat_h */
--- a/xen/include/xen/lib.h
+++ b/xen/include/xen/lib.h
@@ -111,4 +111,7 @@ extern int tainted;
extern char *print_tainted(char *str);
extern void add_taint(unsigned);
+struct cpu_user_regs;
+void dump_execstate(struct cpu_user_regs *);
+
#endif /* __LIB_H__ */
--- a/xen/include/xen/smp.h
+++ b/xen/include/xen/smp.h
@@ -13,6 +13,8 @@ extern void smp_send_event_check_mask(co
#define smp_send_event_check_cpu(cpu) \
smp_send_event_check_mask(cpumask_of(cpu))
+extern void smp_send_state_dump(unsigned int cpu);
+
/*
* Prepare machine for booting other CPUs.
*/

View File

@ -87,7 +87,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
gopts.var('acpi', val='ACPI',
fn=set_int, default=1,
use="Disable or enable ACPI of HVM domain.")
@@ -1081,7 +1085,7 @@ def configure_hvm(config_image, vals):
@@ -1083,7 +1087,7 @@ def configure_hvm(config_image, vals):
'timer_mode',
'usb', 'usbdevice',
'vcpus', 'vnc', 'vncconsole', 'vncdisplay', 'vnclisten',

View File

@ -92,6 +92,7 @@ case "$1" in
modprobe xenblk 2>/dev/null || true
modprobe netbk 2>/dev/null || true
modprobe gntdev 2>/dev/null || true
modprobe usbbk 2>/dev/null || true
xend start
await_daemons_up
;;

View File

@ -647,7 +647,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
def xm_save(args):
arg_check(args, "save", 2, 4)
@@ -3694,6 +3769,10 @@ commands = {
@@ -3696,6 +3771,10 @@ commands = {
"restore": xm_restore,
"resume": xm_resume,
"save": xm_save,

View File

@ -0,0 +1,32 @@
Index: xen-4.0.0-testing/tools/python/xen/xm/main.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py
+++ xen-4.0.0-testing/tools/python/xen/xm/main.py
@@ -2489,8 +2489,10 @@ def xm_usb_list_assignable_devices(args)
def parse_block_configuration(args):
dom = args[0]
- if args[1].startswith('tap:'):
+ if args[1].startswith('tap2:'):
cls = 'tap2'
+ elif args[1].startswith('tap:'):
+ cls = 'tap'
else:
cls = 'vbd'
Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
===================================================================
--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.py
+++ xen-4.0.0-testing/tools/python/xen/xm/create.py
@@ -783,8 +783,10 @@ def configure_disks(config_devs, vals):
"""Create the config for disks (virtual block devices).
"""
for (uname, dev, mode, backend, protocol) in vals.disk:
- if uname.startswith('tap:'):
+ if uname.startswith('tap2:'):
cls = 'tap2'
+ elif uname.startswith('tap:'):
+ cls = 'tap'
else:
cls = 'vbd'

View File

@ -15,7 +15,7 @@ Index: xen-4.0.0-testing/tools/python/xen/util/blkif.py
@@ -88,15 +88,18 @@ def _parse_uname(uname):
if typ == "tap":
if typ in ("tap", "tap2"):
(taptype, fn) = fn.split(":", 1)
- return (fn, taptype)
+ if taptype in ("tapdisk", "ioemu"):
@ -154,7 +154,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py
+ def _shouldMount(types):
+ if types[0] in ('file', 'phy'):
+ return False
+ if types[0] == 'tap' or types[0] == 'tap2':
+ if types[0] in ('tap', 'tap2'):
+ if types[1] in ('aio', 'sync'):
+ return False
+ else:

View File

@ -1,3 +1,52 @@
-------------------------------------------------------------------
Fri May 7 09:00:12 MDT 2010 - jfehlig@novell.com
- bnc#603583 - Fix migration of domUs using tapdisk devices
21317-xend-blkif-util-tap2.patch
suse-disable-tap2-default.patch
-------------------------------------------------------------------
Thu May 6 08:33:22 MDT 2010 - carnold@novell.com
- Match upstreams cpu pools switch from domctl to sysctl
- Upstream replacements for two of our custom patches (to ease
applying further backports)
- Fixed dump-exec-state.patch (could previously hang the system, as
could - with lower probability - the un-patched implementation)
-------------------------------------------------------------------
Wed May 5 08:20:45 MDT 2010 - carnold@novell.com
- bnc#593536 - xen hypervisor takes very long to initialize Dom0 on
128 CPUs and 256Gb
21272-x86-dom0-alloc-performance.patch
21266-vmx-disabled-check.patch
21271-x86-cache-flush-global.patch
-------------------------------------------------------------------
Tue May 4 09:46:22 MDT 2010 - carnold@novell.com
- bnc#558815 - using multiple npiv luns with same wwpn/wwnn broken
- bnc#601104 - Xen /etc/xen/scripts/block-npiv script fails when
accessing multiple disks using NPIV
block-npiv
-------------------------------------------------------------------
Fri Apr 30 08:10:52 MDT 2010 - carnold@novell.com
- bnc#595124 - VT-d can not be enabled on 32PAE Xen on Nehalem-EX
platform
21234-x86-bad-srat-clear-pxm2node.patch
bnc#585371 - kdump fails to load with xen: locate_hole failed
21235-crashkernel-advanced.patch
-------------------------------------------------------------------
Thu Apr 29 08:02:49 MDT 2010 - carnold@novell.com
- bnc#588918 - Attaching a U-disk to domain's failed by
"xm usb-attach"
init.xend
-------------------------------------------------------------------
Wed Apr 21 21:15:04 MDT 2010 - jfehlig@novell.com

View File

@ -1,5 +1,5 @@
#
# spec file for package xen (Version 4.0.0_21091_01)
# spec file for package xen (Version 4.0.0_21091_05)
#
# Copyright (c) 2010 SUSE LINUX Products GmbH, Nuernberg, Germany.
#
@ -38,9 +38,9 @@ BuildRequires: glibc-32bit glibc-devel-32bit
%if %{?with_kmp}0
BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11
%endif
Version: 4.0.0_21091_01
Release: 2
License: GPLv2+
Version: 4.0.0_21091_05
Release: 1
License: GPLv2
Group: System/Kernel
AutoReqProv: on
PreReq: %insserv_prereq %fillup_prereq
@ -82,8 +82,16 @@ Patch4: 21160-sysctl-debug-keys.patch
Patch5: 21189-x86-emulate-clflush.patch
Patch6: 21193-blktap-script.patch
Patch7: 21194-ioemu-subtype.patch
Patch8: 21225-conring-iommu.patch
Patch9: xend-preserve-devs.patch
Patch8: 21223-xend-preserve-devs.patch
Patch9: 21225-conring-iommu.patch
Patch10: 21234-x86-bad-srat-clear-pxm2node.patch
Patch11: 21235-crashkernel-advanced.patch
Patch12: 21266-vmx-disabled-check.patch
Patch13: 21271-x86-cache-flush-global.patch
Patch14: 21272-x86-dom0-alloc-performance.patch
Patch15: 21301-svm-lmsl.patch
Patch16: 21304-keyhandler-alternative.patch
Patch17: 21317-xend-blkif-util-tap2.patch
# Our patches
Patch300: xen-config.diff
Patch301: xend-config.diff
@ -132,13 +140,13 @@ Patch355: tools-gdbserver-build.diff
Patch356: ioemu-vnc-resize.patch
Patch357: ioemu-debuginfo.patch
Patch358: vif-bridge-no-iptables.patch
Patch359: suse-disable-tap2-default.patch
# Needs to go upstream
Patch359: checkpoint-rename.patch
Patch360: xm-save-check-file.patch
Patch361: xm-create-xflag.patch
Patch362: cpupools-core.patch
Patch363: cpupools-core-fixup.patch
Patch364: keyhandler-alternative.patch
Patch360: checkpoint-rename.patch
Patch361: xm-save-check-file.patch
Patch362: xm-create-xflag.patch
Patch363: cpupools-core.patch
Patch364: cpupools-core-fixup.patch
Patch365: cpu-pools-libxc.patch
Patch366: cpu-pools-python.patch
Patch367: cpu-pools-libxen.patch
@ -170,20 +178,19 @@ Patch430: del_usb_xend_entry.patch
# Jim's domain lock patch
Patch450: xend-domain-lock.patch
# Hypervisor and PV driver Patches
Patch500: svm-lmsl.patch
Patch501: 32on64-extra-mem.patch
Patch502: x86-ioapic-ack-default.patch
Patch503: x86-cpufreq-report.patch
Patch504: dump-exec-state.patch
Patch505: dom-print.patch
Patch506: pvdrv-import-shared-info.patch
Patch507: x86-show-page-walk-early.patch
Patch508: x86-extra-trap-info.patch
Patch509: pvdrv_emulation_control.patch
Patch510: blktap-pv-cdrom.patch
Patch511: pv-driver-build.patch
Patch512: supported_module.diff
Patch513: magic_ioport_compat.patch
Patch500: 32on64-extra-mem.patch
Patch501: x86-ioapic-ack-default.patch
Patch502: x86-cpufreq-report.patch
Patch503: dump-exec-state.patch
Patch504: dom-print.patch
Patch505: pvdrv-import-shared-info.patch
Patch506: x86-show-page-walk-early.patch
Patch507: x86-extra-trap-info.patch
Patch508: pvdrv_emulation_control.patch
Patch509: blktap-pv-cdrom.patch
Patch510: pv-driver-build.patch
Patch511: supported_module.diff
Patch512: magic_ioport_compat.patch
Patch650: disable_emulated_device.diff
Patch651: ioemu-disable-scsi.patch
# novell_shim patches
@ -535,6 +542,14 @@ Authors:
%patch7 -p1
%patch8 -p1
%patch9 -p1
%patch10 -p1
%patch11 -p1
%patch12 -p1
%patch13 -p1
%patch14 -p1
%patch15 -p1
%patch16 -p1
%patch17 -p1
%patch300 -p1
%patch301 -p1
%patch302 -p1
@ -627,7 +642,6 @@ Authors:
%patch510 -p1
%patch511 -p1
%patch512 -p1
%patch513 -p1
%patch650 -p1
%patch651 -p1
%patch700 -p1

View File

@ -11,7 +11,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
from xen.util import utils, auxbin
from xen.util.pci import dev_dict_to_sxp, \
parse_pci_name_extended, PciDeviceParseError
@@ -1512,7 +1512,7 @@ def main(argv):
@@ -1514,7 +1514,7 @@ def main(argv):
except IOError, exn:
raise OptionError("Cannot read file %s: %s" % (config, exn[1]))
@ -20,7 +20,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xm/create.py
from xen.xm.xenapi_create import sxp2xml
sxp2xml_inst = sxp2xml()
doc = sxp2xml_inst.convert_sxp_to_xml(config, transient=True)
@@ -1520,7 +1520,7 @@ def main(argv):
@@ -1522,7 +1522,7 @@ def main(argv):
if opts.vals.dryrun and not opts.is_xml:
SXPPrettyPrint.prettyprint(config)