Updating link to change in openSUSE:Factory/xen revision 91.0

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=09344c5eaa8b11326333fb14f09b2a45
This commit is contained in:
OBS User buildservice-autocommit 2010-05-19 18:47:46 +00:00 committed by Git OBS Bridge
parent dc0c233056
commit 41f2467ce8
13 changed files with 651 additions and 133 deletions

View File

@ -0,0 +1,41 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273256548 -3600
# Node ID bbf009817ffbe15a7cbbda8bddd82640f290ce0c
# Parent a97ef0eb0b9d520b8b6b97d240771cc27de978e3
svm: Avoid VINTR injection during NMI shadow
It is invalid because we get vmexit via IRET interception in this
case. VINTR is unaware of NMI shadows and may vmexit early, leaving us
in an endless loop of VINTR injections and interceptions.
Signed-off-by: Wei Wang <wei.wang2@amd.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -88,10 +88,21 @@ static void enable_intr_window(struct vc
* guest can accept the real interrupt.
*
* TODO: Better NMI handling. We need a way to skip a MOV SS interrupt
- * shadow. This is hard to do without hardware support. We should also
- * track 'NMI blocking' from NMI injection until IRET. This can be done
- * quite easily in software by intercepting the unblocking IRET.
+ * shadow. This is hard to do without hardware support. Also we should
+ * not be waiting for EFLAGS.IF to become 1.
*/
+
+ /*
+ * NMI-blocking window is handled by IRET interception. We should not
+ * inject a VINTR in this case as VINTR is unaware of NMI-blocking and
+ * hence we can enter an endless loop (VINTR intercept fires, yet
+ * hvm_interrupt_blocked() still indicates NMI-blocking is active, so
+ * we inject a VINTR, ...).
+ */
+ if ( (intack.source == hvm_intsrc_nmi) &&
+ (vmcb->general1_intercepts & GENERAL1_INTERCEPT_IRET) )
+ return;
+
intr = vmcb->vintr;
intr.fields.irq = 1;
intr.fields.vector = 0;

View File

@ -0,0 +1,24 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273479772 -3600
# Node ID 93c5beba9d04d3e8d2522241bad14107433c3121
# Parent 3b6e33ebb57b670a48ad138aecb47cbd32a4e986
xentrace: fix bug in t_info size
t_info size should be in bytes, not pages. This fixes a bug
that crashes the hypervisor if the total number of all pages
is more than 1024 but less than 2048.
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -297,7 +297,7 @@ int tb_control(xen_sysctl_tbuf_op_t *tbc
case XEN_SYSCTL_TBUFOP_get_info:
tbc->evt_mask = tb_event_mask;
tbc->buffer_mfn = t_info ? virt_to_mfn(t_info) : 0;
- tbc->size = T_INFO_PAGES;
+ tbc->size = T_INFO_PAGES * PAGE_SIZE;
break;
case XEN_SYSCTL_TBUFOP_set_cpu_mask:
xenctl_cpumap_to_cpumask(&tb_cpu_mask, &tbc->cpu_mask);

View File

@ -0,0 +1,56 @@
References: bnc#603008
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273563345 -3600
# Node ID 220fa418eaae499cdc14359e83092401290a6211
# Parent 804304d4e05d98289de9a107c6b26c5a1db09cd8
VT-d: prevent watchdog timer from kicking in when
initializing on systems with huge amounts of memory
Process pending soft-IRQs every 4G worth of pages initialized for Dom0
to keep timekeeping happy and prevent the NMI watchdog (when enabled)
from kicking in.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/drivers/passthrough/vtd/ia64/vtd.c
+++ b/xen/drivers/passthrough/vtd/ia64/vtd.c
@@ -19,6 +19,7 @@
*/
#include <xen/sched.h>
+#include <xen/softirq.h>
#include <xen/domain_page.h>
#include <xen/iommu.h>
#include <xen/numa.h>
@@ -110,6 +111,9 @@ static int do_dom0_iommu_mapping(unsigne
iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j));
page_addr += PAGE_SIZE;
+
+ if (!(pfn & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
+ process_pending_softirqs();
}
return 0;
}
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -19,6 +19,7 @@
*/
#include <xen/sched.h>
+#include <xen/softirq.h>
#include <xen/domain_page.h>
#include <asm/paging.h>
#include <xen/iommu.h>
@@ -153,6 +154,9 @@ void iommu_set_dom0_mapping(struct domai
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
iommu_map_page(d, (i*tmp+j), (i*tmp+j));
+
+ if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
+ process_pending_softirqs();
}
}

View File

@ -0,0 +1,67 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273573287 -3600
# Node ID 7d4deb86b9661b8585fc2b5c41334f2b34b0922b
# Parent 2077d6ad60780bbe0631c0dee1ef2e2b8eb03867
x86: Detect and handle unexpected platform-timer counter wrap.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -571,26 +571,48 @@ static u64 plt_stamp64; /* 64-b
static u64 plt_stamp; /* hardware-width platform counter stamp */
static struct timer plt_overflow_timer;
+static s_time_t __read_platform_stime(u64 platform_time)
+{
+ u64 diff = platform_time - platform_timer_stamp;
+ ASSERT(spin_is_locked(&platform_timer_lock));
+ return (stime_platform_stamp + scale_delta(diff, &plt_scale));
+}
+
static void plt_overflow(void *unused)
{
+ int i;
u64 count;
+ s_time_t now, plt_now, plt_wrap;
spin_lock_irq(&platform_timer_lock);
+
count = plt_src.read_counter();
plt_stamp64 += (count - plt_stamp) & plt_mask;
plt_stamp = count;
+
+ now = NOW();
+ plt_wrap = __read_platform_stime(plt_stamp64);
+ for ( i = 0; i < 10; i++ )
+ {
+ plt_now = plt_wrap;
+ plt_wrap = __read_platform_stime(plt_stamp64 + plt_mask + 1);
+ if ( __builtin_llabs(plt_wrap - now) > __builtin_llabs(plt_now - now) )
+ break;
+ plt_stamp64 += plt_mask + 1;
+ }
+ if ( i != 0 )
+ {
+ static bool_t warned_once;
+ if ( !test_and_set_bool(warned_once) )
+ printk("Platform timer appears to have unexpectedly wrapped "
+ "%u%s times.\n", i, (i == 10) ? " or more" : "");
+ }
+
spin_unlock_irq(&platform_timer_lock);
set_timer(&plt_overflow_timer, NOW() + plt_overflow_period);
}
-static s_time_t __read_platform_stime(u64 platform_time)
-{
- u64 diff = platform_time - platform_timer_stamp;
- ASSERT(spin_is_locked(&platform_timer_lock));
- return (stime_platform_stamp + scale_delta(diff, &plt_scale));
-}
-
static s_time_t read_platform_stime(void)
{
u64 count;

313
21349-x86-memcpy.patch Normal file
View File

@ -0,0 +1,313 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273583128 -3600
# Node ID d77a88f938c635c3ccfedaa00f946e4d9ed26098
# Parent 2b5e14e4c5e57b0064d1c0415d9c4629bd0aac9b
x86: Replace our own specialised versions of memset and memcpy with
direct use of gcc's built-in versions.
This dramatically simplifies our code while also avoiding compile
warnings with certain intermediate versions of gcc.
This patch is based on an initial version by Jan Beulich.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Index: xen-4.0.0-testing/xen/arch/x86/string.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/string.c
+++ xen-4.0.0-testing/xen/arch/x86/string.c
@@ -14,25 +14,12 @@ void *memcpy(void *dest, const void *src
long d0, d1, d2;
asm volatile (
-#ifdef __i386__
- " rep movsl ; "
-#else
- " rep movsq ; "
- " testb $4,%b4 ; "
- " je 0f ; "
- " movsl ; "
- "0: ; "
-#endif
- " testb $2,%b4 ; "
- " je 1f ; "
- " movsw ; "
- "1: testb $1,%b4 ; "
- " je 2f ; "
- " movsb ; "
- "2: "
+ " rep ; movs"__OS" ; "
+ " mov %4,%3 ; "
+ " rep ; movsb "
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (n/sizeof(long)), "q" (n), "1" (dest), "2" (src)
- : "memory");
+ : "0" (n/BYTES_PER_LONG), "r" (n%BYTES_PER_LONG), "1" (dest), "2" (src)
+ : "memory" );
return dest;
}
@@ -55,7 +42,7 @@ void *memset(void *s, int c, size_t n)
void *memmove(void *dest, const void *src, size_t n)
{
long d0, d1, d2;
-
+
if ( dest < src )
return memcpy(dest, src, n);
Index: xen-4.0.0-testing/xen/include/asm-x86/string.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/asm-x86/string.h
+++ xen-4.0.0-testing/xen/include/asm-x86/string.h
@@ -3,246 +3,14 @@
#include <xen/config.h>
-static inline void *__variable_memcpy(void *to, const void *from, size_t n)
-{
- long d0, d1, d2;
- __asm__ __volatile__ (
- " rep ; movs"__OS"\n"
- " mov %4,%3 \n"
- " rep ; movsb \n"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (n/BYTES_PER_LONG), "r" (n%BYTES_PER_LONG), "1" (to), "2" (from)
- : "memory" );
- return to;
-}
-
-/*
- * This looks horribly ugly, but the compiler can optimize it totally,
- * as the count is constant.
- */
-static always_inline void * __constant_memcpy(
- void * to, const void * from, size_t n)
-{
- switch ( n )
- {
- case 0:
- return to;
- case 1:
- *(u8 *)to = *(const u8 *)from;
- return to;
- case 2:
- *(u16 *)to = *(const u16 *)from;
- return to;
- case 3:
- *(u16 *)to = *(const u16 *)from;
- *(2+(u8 *)to) = *(2+(const u8 *)from);
- return to;
- case 4:
- *(u32 *)to = *(const u32 *)from;
- return to;
- case 5:
- *(u32 *)to = *(const u32 *)from;
- *(4+(u8 *)to) = *(4+(const u8 *)from);
- return to;
- case 6:
- *(u32 *)to = *(const u32 *)from;
- *(2+(u16 *)to) = *(2+(const u16 *)from);
- return to;
- case 7:
- *(u32 *)to = *(const u32 *)from;
- *(2+(u16 *)to) = *(2+(const u16 *)from);
- *(6+(u8 *)to) = *(6+(const u8 *)from);
- return to;
- case 8:
- *(u64 *)to = *(const u64 *)from;
- return to;
- case 12:
- *(u64 *)to = *(const u64 *)from;
- *(2+(u32 *)to) = *(2+(const u32 *)from);
- return to;
- case 16:
- *(u64 *)to = *(const u64 *)from;
- *(1+(u64 *)to) = *(1+(const u64 *)from);
- return to;
- case 20:
- *(u64 *)to = *(const u64 *)from;
- *(1+(u64 *)to) = *(1+(const u64 *)from);
- *(4+(u32 *)to) = *(4+(const u32 *)from);
- return to;
- }
-#define COMMON(x) \
- __asm__ __volatile__ ( \
- "rep ; movs"__OS \
- x \
- : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
- : "0" (n/BYTES_PER_LONG), "1" (to), "2" (from) \
- : "memory" );
- {
- long d0, d1, d2;
- switch ( n % BYTES_PER_LONG )
- {
- case 0: COMMON(""); return to;
- case 1: COMMON("\n\tmovsb"); return to;
- case 2: COMMON("\n\tmovsw"); return to;
- case 3: COMMON("\n\tmovsw\n\tmovsb"); return to;
- case 4: COMMON("\n\tmovsl"); return to;
- case 5: COMMON("\n\tmovsl\n\tmovsb"); return to;
- case 6: COMMON("\n\tmovsl\n\tmovsw"); return to;
- case 7: COMMON("\n\tmovsl\n\tmovsw\n\tmovsb"); return to;
- }
- }
-#undef COMMON
- return to;
-}
-
#define __HAVE_ARCH_MEMCPY
-/* align source to a 64-bit boundary */
-static always_inline
-void *__var_memcpy(void *t, const void *f, size_t n)
-{
- int off = (unsigned long)f & 0x7;
- /* just do alignment if needed and if size is worth */
- if ( (n > 32) && off ) {
- size_t n1 = 8 - off;
- __variable_memcpy(t, f, n1);
- __variable_memcpy(t + n1, f + n1, n - n1);
- return t;
- } else {
- return (__variable_memcpy(t, f, n));
- }
-}
-
-#define memcpy(t,f,n) (__memcpy((t),(f),(n)))
-static always_inline
-void *__memcpy(void *t, const void *f, size_t n)
-{
- return (__builtin_constant_p(n) ?
- __constant_memcpy((t),(f),(n)) :
- __var_memcpy((t),(f),(n)));
-}
+#define memcpy(t,f,n) (__builtin_memcpy((t),(f),(n)))
-/* Some version of gcc don't have this builtin. It's non-critical anyway. */
+/* Some versions of gcc don't have this builtin. It's non-critical anyway. */
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *dest, const void *src, size_t n);
-static inline void *__memset_generic(void *s, char c, size_t count)
-{
- long d0, d1;
- __asm__ __volatile__ (
- "rep ; stosb"
- : "=&c" (d0), "=&D" (d1) : "a" (c), "1" (s), "0" (count) : "memory" );
- return s;
-}
-
-/* we might want to write optimized versions of these later */
-#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
-
-/*
- * memset(x,0,y) is a reasonably common thing to do, so we want to fill
- * things 32 bits at a time even when we don't know the size of the
- * area at compile-time..
- */
-static inline void *__constant_c_memset(void *s, unsigned long c, size_t count)
-{
- long d0, d1;
- __asm__ __volatile__(
- " rep ; stos"__OS"\n"
- " mov %3,%4 \n"
- " rep ; stosb \n"
- : "=&c" (d0), "=&D" (d1)
- : "a" (c), "r" (count%BYTES_PER_LONG),
- "0" (count/BYTES_PER_LONG), "1" (s)
- : "memory" );
- return s;
-}
-
-/*
- * This looks horribly ugly, but the compiler can optimize it totally,
- * as we by now know that both pattern and count is constant..
- */
-static always_inline void *__constant_c_and_count_memset(
- void *s, unsigned long pattern, size_t count)
-{
- switch ( count )
- {
- case 0:
- return s;
- case 1:
- *(u8 *)s = pattern;
- return s;
- case 2:
- *(u16 *)s = pattern;
- return s;
- case 3:
- *(u16 *)s = pattern;
- *(2+(u8 *)s) = pattern;
- return s;
- case 4:
- *(u32 *)s = pattern;
- return s;
- case 5:
- *(u32 *)s = pattern;
- *(4+(u8 *)s) = pattern;
- return s;
- case 6:
- *(u32 *)s = pattern;
- *(2+(u16 *)s) = pattern;
- return s;
- case 7:
- *(u32 *)s = pattern;
- *(2+(u16 *)s) = pattern;
- *(6+(u8 *)s) = pattern;
- return s;
- case 8:
- *(u64 *)s = pattern;
- return s;
- }
-#define COMMON(x) \
- __asm__ __volatile__ ( \
- "rep ; stos"__OS \
- x \
- : "=&c" (d0), "=&D" (d1) \
- : "a" (pattern), "0" (count/BYTES_PER_LONG), "1" (s) \
- : "memory" )
- {
- long d0, d1;
- switch ( count % BYTES_PER_LONG )
- {
- case 0: COMMON(""); return s;
- case 1: COMMON("\n\tstosb"); return s;
- case 2: COMMON("\n\tstosw"); return s;
- case 3: COMMON("\n\tstosw\n\tstosb"); return s;
- case 4: COMMON("\n\tstosl"); return s;
- case 5: COMMON("\n\tstosl\n\tstosb"); return s;
- case 6: COMMON("\n\tstosl\n\tstosw"); return s;
- case 7: COMMON("\n\tstosl\n\tstosw\n\tstosb"); return s;
- }
- }
-#undef COMMON
- return s;
-}
-
-#define __constant_c_x_memset(s, c, count) \
-(__builtin_constant_p(count) ? \
- __constant_c_and_count_memset((s),(c),(count)) : \
- __constant_c_memset((s),(c),(count)))
-
-#define __var_x_memset(s, c, count) \
-(__builtin_constant_p(count) ? \
- __constant_count_memset((s),(c),(count)) : \
- __memset_generic((s),(c),(count)))
-
-#ifdef CONFIG_X86_64
-#define MEMSET_PATTERN_MUL 0x0101010101010101UL
-#else
-#define MEMSET_PATTERN_MUL 0x01010101UL
-#endif
-
#define __HAVE_ARCH_MEMSET
-#define memset(s, c, count) (__memset((s),(c),(count)))
-#define __memset(s, c, count) \
-(__builtin_constant_p(c) ? \
- __constant_c_x_memset((s),(MEMSET_PATTERN_MUL*(unsigned char)(c)),(count)) : \
- __var_x_memset((s),(c),(count)))
+#define memset(s,c,n) (__builtin_memset((s),(c),(n)))
#endif /* __X86_STRING_H__ */

View File

@ -0,0 +1,20 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273651780 -3600
# Node ID fa94385978e6317732e2c12000923ca6a5e0d2ed
# Parent 0079f76e906f378f81044da4e135df2fbb878fa5
mce: MCE polling logic should check mce_disabled during initialisation.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -91,7 +91,7 @@ static int __init init_nonfatal_mce_chec
struct cpuinfo_x86 *c = &boot_cpu_data;
/* Check for MCE support */
- if (!mce_available(c))
+ if (mce_disabled || !mce_available(c))
return -ENODEV;
/*

View File

@ -0,0 +1,42 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273740260 -3600
# Node ID 6c22d2dfed4882d5febf98d2afbb96ec9bedd043
# Parent 97da07c523ddc2565da67b6cfeec006d742bb06d
x86: Fix a few on_{selected,each}_cpus callers who should wait for completion.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -195,7 +195,7 @@ static void drv_write(struct drv_cmd *cm
cpu_isset(smp_processor_id(), cmd->mask))
do_drv_write((void *)cmd);
else
- on_selected_cpus(&cmd->mask, do_drv_write, cmd, 0);
+ on_selected_cpus(&cmd->mask, do_drv_write, cmd, 1);
}
static u32 get_cur_val(cpumask_t mask)
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -122,7 +122,7 @@ static int powernow_cpufreq_target(struc
cmd.val = next_perf_state;
- on_selected_cpus(&cmd.mask, transition_pstate, &cmd, 0);
+ on_selected_cpus(&cmd.mask, transition_pstate, &cmd, 1);
for_each_cpu_mask(j, online_policy_cpus)
cpufreq_statistic_update(j, perf->state, next_perf_state);
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -1610,7 +1610,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
add_taint(TAINT_ERROR_INJECT);
if ( mce_broadcast )
- on_each_cpu(x86_mc_mceinject, mc_mceinject, 0);
+ on_each_cpu(x86_mc_mceinject, mc_mceinject, 1);
else
on_selected_cpus(cpumask_of(target), x86_mc_mceinject,
mc_mceinject, 1);

View File

@ -0,0 +1,48 @@
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1273741742 -3600
# Node ID 1ec412805622fd7ec0e88bf3b7ab681d024994bc
# Parent 6c22d2dfed4882d5febf98d2afbb96ec9bedd043
Even 'dummy' domains (e.g., dom_xen a.k.a. DOMID_XEN) must have valid I/O caps.
Ensure the rangesets are always initialised. Certain (privileged)
invocations of hypercalls such as mmu_update can end up interrogating
these special domains for possible I/O capabilities.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -245,12 +245,17 @@ struct domain *domain_create(
d->disable_migrate = 1;
}
- if ( domcr_flags & DOMCRF_dummy )
- return d;
-
rangeset_domain_initialise(d);
init_status |= INIT_rangeset;
+ d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
+ d->irq_caps = rangeset_new(d, "Interrupts", 0);
+ if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
+ goto fail;
+
+ if ( domcr_flags & DOMCRF_dummy )
+ return d;
+
if ( !is_idle_domain(d) )
{
if ( xsm_domain_create(d, ssidref) != 0 )
@@ -285,11 +290,6 @@ struct domain *domain_create(
goto fail;
init_status |= INIT_arch;
- d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
- d->irq_caps = rangeset_new(d, "Interrupts", 0);
- if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
- goto fail;
-
if ( sched_init_domain(d) != 0 )
goto fail;

View File

@ -1081,8 +1081,8 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
struct domain *d, **pd;
enum { INIT_xsm = 1u<<0, INIT_rangeset = 1u<<1, INIT_evtchn = 1u<<2,
@@ -290,6 +290,9 @@ struct domain *domain_create(
if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
goto fail;
init_status |= INIT_arch;
+ if ( cpupool_add_domain(d, poolid) != 0 )
+ goto fail;
@ -3054,7 +3054,7 @@ From: Juergen Gross <juergen.gross@ts.fujitsu.com>
typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
+#define XEN_SYSCTL_cpupool_op 18
+#define XEN_SYSCTL_cpupool_op 18
+/* XEN_SYSCTL_cpupool_op */
+#define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
+#define XEN_SYSCTL_CPUPOOL_OP_DESTROY 2 /* D */

View File

@ -87,6 +87,7 @@ case "$1" in
# (in xen-network-common.sh and block-common.sh)
# - xenblk when xend prepares for bootloader
# but for now it's safest to have them loaded when xend starts in dom0.
modprobe evtchn 2>/dev/null || true
modprobe blktap 2>/dev/null || true
modprobe blkbk 2>/dev/null || true
modprobe xenblk 2>/dev/null || true

View File

@ -1,128 +0,0 @@
Following a change in Linux 2.6.33, make x86-32 always use
__builtin_mem{cpy,set}() on gcc 4.0+. This particularly works around
certain intermediate gcc revisions generating out-of-range-array-index
warnings with the current inline implementation.
It may be worthwhile considering to make this the case for x86-64 too.
At the same time eliminate the redundant inline assembly in the C
file, and instead use the inline functions coming from the header.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Index: xen-4.0.0-testing/xen/arch/x86/string.c
===================================================================
--- xen-4.0.0-testing.orig/xen/arch/x86/string.c
+++ xen-4.0.0-testing/xen/arch/x86/string.c
@@ -11,44 +11,13 @@
#undef memcpy
void *memcpy(void *dest, const void *src, size_t n)
{
- long d0, d1, d2;
-
- asm volatile (
-#ifdef __i386__
- " rep movsl ; "
-#else
- " rep movsq ; "
- " testb $4,%b4 ; "
- " je 0f ; "
- " movsl ; "
- "0: ; "
-#endif
- " testb $2,%b4 ; "
- " je 1f ; "
- " movsw ; "
- "1: testb $1,%b4 ; "
- " je 2f ; "
- " movsb ; "
- "2: "
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (n/sizeof(long)), "q" (n), "1" (dest), "2" (src)
- : "memory");
-
- return dest;
+ return __variable_memcpy(dest, src, n);
}
#undef memset
void *memset(void *s, int c, size_t n)
{
- long d0, d1;
-
- asm volatile (
- "rep stosb"
- : "=&c" (d0), "=&D" (d1)
- : "a" (c), "1" (s), "0" (n)
- : "memory");
-
- return s;
+ return __memset_generic(s, c, n);
}
#undef memmove
Index: xen-4.0.0-testing/xen/include/asm-x86/string.h
===================================================================
--- xen-4.0.0-testing.orig/xen/include/asm-x86/string.h
+++ xen-4.0.0-testing/xen/include/asm-x86/string.h
@@ -16,6 +16,11 @@ static inline void *__variable_memcpy(vo
return to;
}
+#define __HAVE_ARCH_MEMCPY
+#if defined(__i386__) && __GNUC__ >= 4
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
+#else
+
/*
* This looks horribly ugly, but the compiler can optimize it totally,
* as the count is constant.
@@ -95,7 +100,6 @@ static always_inline void * __constant_m
return to;
}
-#define __HAVE_ARCH_MEMCPY
/* align source to a 64-bit boundary */
static always_inline
void *__var_memcpy(void *t, const void *f, size_t n)
@@ -121,11 +125,13 @@ void *__memcpy(void *t, const void *f, s
__var_memcpy((t),(f),(n)));
}
+#endif /* !__i386__ || __GNUC__ < 4 */
+
/* Some version of gcc don't have this builtin. It's non-critical anyway. */
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *dest, const void *src, size_t n);
-static inline void *__memset_generic(void *s, char c, size_t count)
+static inline void *__memset_generic(void *s, int c, size_t count)
{
long d0, d1;
__asm__ __volatile__ (
@@ -134,6 +140,11 @@ static inline void *__memset_generic(voi
return s;
}
+#define __HAVE_ARCH_MEMSET
+#if defined(__i386__) && __GNUC__ >= 4
+#define memset(s, c, n) __builtin_memset(s, c, n)
+#else
+
/* we might want to write optimized versions of these later */
#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
@@ -238,11 +249,12 @@ static always_inline void *__constant_c_
#define MEMSET_PATTERN_MUL 0x01010101UL
#endif
-#define __HAVE_ARCH_MEMSET
#define memset(s, c, count) (__memset((s),(c),(count)))
#define __memset(s, c, count) \
(__builtin_constant_p(c) ? \
__constant_c_x_memset((s),(MEMSET_PATTERN_MUL*(unsigned char)(c)),(count)) : \
__var_x_memset((s),(c),(count)))
+#endif /* !__i386__ || __GNUC__ < 4 */
+
#endif /* __X86_STRING_H__ */

View File

@ -1,3 +1,23 @@
-------------------------------------------------------------------
Wed May 19 08:09:41 MDT 2010 - carnold@novell.com
- Added modprobe of evtchn to init.xend. The kernel will also need
to build evtchn as a module for this to be meaningful.
-------------------------------------------------------------------
Mon May 17 08:31:24 MDT 2010 - carnold@novell.com
- bnc#603008 - On an 8 Socket Nehalem-EX system, the fix for 593536
causes a hang during network setup.
- Upstream patches from Jan.
21360-x86-mce-polling-diabled-init.patch
21372-x86-cross-cpu-wait.patch
21331-svm-vintr-during-nmi.patch
21333-xentrace-t_info-size.patch
21340-vtd-dom0-mapping-latency.patch
21346-x86-platform-timer-wrap.patch
21373-dummy-domain-io-caps.patch
-------------------------------------------------------------------
Wed May 12 08:43:20 MDT 2010 - carnold@novell.com

View File

@ -39,7 +39,7 @@ BuildRequires: glibc-32bit glibc-devel-32bit
BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11
%endif
Version: 4.0.0_21091_05
Release: 1
Release: 2
License: GPLv2+
Group: System/Kernel
AutoReqProv: on
@ -94,6 +94,14 @@ Patch16: 21301-svm-lmsl.patch
Patch17: 21304-keyhandler-alternative.patch
Patch18: 21317-xend-blkif-util-tap2.patch
Patch19: passthrough-hotplug-segfault.patch
Patch20: 21331-svm-vintr-during-nmi.patch
Patch21: 21333-xentrace-t_info-size.patch
Patch22: 21340-vtd-dom0-mapping-latency.patch
Patch23: 21346-x86-platform-timer-wrap.patch
Patch24: 21349-x86-memcpy.patch
Patch25: 21360-x86-mce-polling-diabled-init.patch
Patch26: 21372-x86-cross-cpu-wait.patch
Patch27: 21373-dummy-domain-io-caps.patch
# Our patches
Patch300: xen-config.diff
Patch301: xend-config.diff
@ -154,7 +162,6 @@ Patch366: cpu-pools-python.patch
Patch367: cpu-pools-libxen.patch
Patch368: cpu-pools-xmtest.patch
Patch369: cpu-pools-docs.patch
Patch370: x86-memcpy.patch
# Patches for snapshot support
Patch400: snapshot-ioemu-save.patch
Patch401: snapshot-ioemu-restore.patch
@ -555,6 +562,14 @@ Authors:
%patch17 -p1
%patch18 -p1
%patch19 -p1
%patch20 -p1
%patch21 -p1
%patch22 -p1
%patch23 -p1
%patch24 -p1
%patch25 -p1
%patch26 -p1
%patch27 -p1
%patch300 -p1
%patch301 -p1
%patch302 -p1
@ -613,7 +628,6 @@ Authors:
%patch367 -p1
%patch368 -p1
%patch369 -p1
%patch370 -p1
%patch400 -p1
%patch401 -p1
%patch402 -p1