08af757235
25927-x86-domctl-ioport-mapping-range.patch 25929-tmem-restore-pool-version.patch 25931-x86-domctl-iomem-mapping-checks.patch 25940-x86-S3-flush-cache.patch 25952-x86-MMIO-remap-permissions.patch 25961-x86-HPET-interrupts.patch 25962-x86-assign-irq-vector-old.patch 25965-x86-ucode-Intel-resume.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=205
258 lines
8.7 KiB
Diff
258 lines
8.7 KiB
Diff
# HG changeset patch
|
|
# User Jan Beulich <jbeulich@suse.com>
|
|
# Date 1348125713 -7200
|
|
# Node ID 14980591956954584f3cd87bf4e8d306360e7f27
|
|
# Parent 4e93cbeac98b3e6be98e5ec0881b44b68ee95974
|
|
x86: tighten checks in XEN_DOMCTL_memory_mapping handler
|
|
|
|
Properly checking the MFN implies knowing the physical address width
|
|
supported by the platform, so to obtain this consistently the
|
|
respective code gets moved out of the MTRR subdir.
|
|
|
|
Btw., the model specific workaround in that code is likely unnecessary
|
|
- I believe those CPU models don't support 64-bit mode. But I wasn't
|
|
able to formally verify this, so I preferred to retain that code for
|
|
now.
|
|
|
|
But domctl code here also was lacking other error checks (as was,
|
|
looking at it again from that angle) the XEN_DOMCTL_ioport_mapping one.
|
|
Besides adding the missing checks, printing is also added for the case
|
|
where revoking access permissions didn't work (as that may have
|
|
implications for the host operator, e.g. wanting to not pass through
|
|
affected devices to another guest until the one previously using them
|
|
did actually die).
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Acked-by: Keir Fraser <keir@xen.org>
|
|
|
|
--- a/xen/arch/x86/cpu/centaur.c
|
|
+++ b/xen/arch/x86/cpu/centaur.c
|
|
@@ -56,6 +56,9 @@ static void __init init_c3(struct cpuinf
|
|
if (c->x86_model >=6 && c->x86_model <9)
|
|
set_bit(X86_FEATURE_3DNOW, c->x86_capability);
|
|
|
|
+ if (cpuid_eax(0x80000000) < 0x80000008)
|
|
+ paddr_bits = 32;
|
|
+
|
|
get_model_name(c);
|
|
display_cacheinfo(c);
|
|
}
|
|
--- a/xen/arch/x86/cpu/common.c
|
|
+++ b/xen/arch/x86/cpu/common.c
|
|
@@ -36,6 +36,8 @@ integer_param("cpuid_mask_ext_edx", opt_
|
|
|
|
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
|
|
|
|
+unsigned int paddr_bits __read_mostly = 36;
|
|
+
|
|
/*
|
|
* Default host IA32_CR_PAT value to cover all memory types.
|
|
* BIOS usually sets it to 0x07040600070406.
|
|
@@ -265,6 +267,8 @@ static void __cpuinit generic_identify(s
|
|
}
|
|
if ( xlvl >= 0x80000004 )
|
|
get_model_name(c); /* Default name */
|
|
+ if ( xlvl >= 0x80000008 )
|
|
+ paddr_bits = cpuid_eax(0x80000008) & 0xff;
|
|
}
|
|
|
|
/* Intel-defined flags: level 0x00000007 */
|
|
--- a/xen/arch/x86/cpu/cyrix.c
|
|
+++ b/xen/arch/x86/cpu/cyrix.c
|
|
@@ -255,7 +255,9 @@ static void __init init_cyrix(struct cpu
|
|
}
|
|
safe_strcpy(c->x86_model_id, Cx86_model[dir0_msn & 7]);
|
|
if (p) safe_strcat(c->x86_model_id, p);
|
|
- return;
|
|
+
|
|
+ if (cpu_has_cyrix_arr)
|
|
+ paddr_bits = 32;
|
|
}
|
|
|
|
/*
|
|
--- a/xen/arch/x86/cpu/intel.c
|
|
+++ b/xen/arch/x86/cpu/intel.c
|
|
@@ -144,6 +144,11 @@ void __devinit early_intel_workaround(st
|
|
c->cpuid_level);
|
|
}
|
|
}
|
|
+
|
|
+ /* CPUID workaround for Intel 0F33/0F34 CPU */
|
|
+ if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 &&
|
|
+ (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
|
|
+ paddr_bits = 36;
|
|
}
|
|
|
|
/*
|
|
--- a/xen/arch/x86/cpu/mtrr/main.c
|
|
+++ b/xen/arch/x86/cpu/mtrr/main.c
|
|
@@ -587,8 +587,6 @@ struct mtrr_value {
|
|
unsigned long lsize;
|
|
};
|
|
|
|
-unsigned int paddr_bits __read_mostly = 36;
|
|
-
|
|
/**
|
|
* mtrr_bp_init - initialize mtrrs on the boot CPU
|
|
*
|
|
@@ -602,48 +600,12 @@ void __init mtrr_bp_init(void)
|
|
|
|
if (cpu_has_mtrr) {
|
|
mtrr_if = &generic_mtrr_ops;
|
|
- size_or_mask = 0xff000000; /* 36 bits */
|
|
- size_and_mask = 0x00f00000;
|
|
-
|
|
- /* This is an AMD specific MSR, but we assume(hope?) that
|
|
- Intel will implement it to when they extend the address
|
|
- bus of the Xeon. */
|
|
- if (cpuid_eax(0x80000000) >= 0x80000008) {
|
|
- paddr_bits = cpuid_eax(0x80000008) & 0xff;
|
|
- /* CPUID workaround for Intel 0F33/0F34 CPU */
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
|
- boot_cpu_data.x86 == 0xF &&
|
|
- boot_cpu_data.x86_model == 0x3 &&
|
|
- (boot_cpu_data.x86_mask == 0x3 ||
|
|
- boot_cpu_data.x86_mask == 0x4))
|
|
- paddr_bits = 36;
|
|
-
|
|
- size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
|
|
- size_and_mask = ~size_or_mask & 0xfffff00000ULL;
|
|
- } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
|
|
- boot_cpu_data.x86 == 6) {
|
|
- /* VIA C* family have Intel style MTRRs, but
|
|
- don't support PAE */
|
|
- size_or_mask = 0xfff00000; /* 32 bits */
|
|
- size_and_mask = 0;
|
|
- }
|
|
} else {
|
|
#ifndef CONFIG_X86_64
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
- case X86_VENDOR_AMD:
|
|
- if (cpu_has_k6_mtrr) {
|
|
- /* Pre-Athlon (K6) AMD CPU MTRRs */
|
|
- mtrr_if = mtrr_ops[X86_VENDOR_AMD];
|
|
- size_or_mask = 0xfff00000; /* 32 bits */
|
|
- size_and_mask = 0;
|
|
- }
|
|
- break;
|
|
case X86_VENDOR_CYRIX:
|
|
- if (cpu_has_cyrix_arr) {
|
|
+ if (cpu_has_cyrix_arr)
|
|
mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
|
|
- size_or_mask = 0xfff00000; /* 32 bits */
|
|
- size_and_mask = 0;
|
|
- }
|
|
break;
|
|
default:
|
|
break;
|
|
@@ -652,6 +614,8 @@ void __init mtrr_bp_init(void)
|
|
}
|
|
|
|
if (mtrr_if) {
|
|
+ size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
|
|
+ size_and_mask = ~size_or_mask & 0xfffff00000ULL;
|
|
set_num_var_ranges();
|
|
init_table();
|
|
if (use_intel())
|
|
--- a/xen/arch/x86/domctl.c
|
|
+++ b/xen/arch/x86/domctl.c
|
|
@@ -829,10 +829,12 @@ long arch_do_domctl(
|
|
unsigned long mfn = domctl->u.memory_mapping.first_mfn;
|
|
unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
|
|
int add = domctl->u.memory_mapping.add_mapping;
|
|
- int i;
|
|
+ unsigned long i;
|
|
|
|
ret = -EINVAL;
|
|
- if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
|
|
+ if ( (mfn + nr_mfns - 1) < mfn || /* wrap? */
|
|
+ ((mfn | (mfn + nr_mfns - 1)) >> (paddr_bits - PAGE_SHIFT)) ||
|
|
+ (gfn + nr_mfns - 1) < gfn ) /* wrap? */
|
|
break;
|
|
|
|
ret = -EPERM;
|
|
@@ -857,8 +859,25 @@ long arch_do_domctl(
|
|
d->domain_id, gfn, mfn, nr_mfns);
|
|
|
|
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
|
|
- for ( i = 0; i < nr_mfns; i++ )
|
|
- set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
|
|
+ if ( !ret && paging_mode_translate(d) )
|
|
+ {
|
|
+ for ( i = 0; !ret && i < nr_mfns; i++ )
|
|
+ if ( !set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i)) )
|
|
+ ret = -EIO;
|
|
+ if ( ret )
|
|
+ {
|
|
+ printk(XENLOG_G_WARNING
|
|
+ "memory_map:fail: dom%d gfn=%lx mfn=%lx\n",
|
|
+ d->domain_id, gfn + i, mfn + i);
|
|
+ while ( i-- )
|
|
+ clear_mmio_p2m_entry(d, gfn + i);
|
|
+ if ( iomem_deny_access(d, mfn, mfn + nr_mfns - 1) &&
|
|
+ IS_PRIV(current->domain) )
|
|
+ printk(XENLOG_ERR
|
|
+ "memory_map: failed to deny dom%d access to [%lx,%lx]\n",
|
|
+ d->domain_id, mfn, mfn + nr_mfns - 1);
|
|
+ }
|
|
+ }
|
|
}
|
|
else
|
|
{
|
|
@@ -866,9 +885,17 @@ long arch_do_domctl(
|
|
"memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
|
|
d->domain_id, gfn, mfn, nr_mfns);
|
|
|
|
- for ( i = 0; i < nr_mfns; i++ )
|
|
- clear_mmio_p2m_entry(d, gfn+i);
|
|
+ if ( paging_mode_translate(d) )
|
|
+ for ( i = 0; i < nr_mfns; i++ )
|
|
+ add |= !clear_mmio_p2m_entry(d, gfn + i);
|
|
ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
|
|
+ if ( !ret && add )
|
|
+ ret = -EIO;
|
|
+ if ( ret && IS_PRIV(current->domain) )
|
|
+ printk(XENLOG_ERR
|
|
+ "memory_map: error %ld %s dom%d access to [%lx,%lx]\n",
|
|
+ ret, add ? "removing" : "denying", d->domain_id,
|
|
+ mfn, mfn + nr_mfns - 1);
|
|
}
|
|
|
|
rcu_unlock_domain(d);
|
|
@@ -930,12 +957,23 @@ long arch_do_domctl(
|
|
if ( !found )
|
|
{
|
|
g2m_ioport = xmalloc(struct g2m_ioport);
|
|
+ if ( !g2m_ioport )
|
|
+ ret = -ENOMEM;
|
|
+ }
|
|
+ if ( !found && !ret )
|
|
+ {
|
|
g2m_ioport->gport = fgp;
|
|
g2m_ioport->mport = fmp;
|
|
g2m_ioport->np = np;
|
|
list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
|
|
}
|
|
- ret = ioports_permit_access(d, fmp, fmp + np - 1);
|
|
+ if ( !ret )
|
|
+ ret = ioports_permit_access(d, fmp, fmp + np - 1);
|
|
+ if ( ret && !found && g2m_ioport )
|
|
+ {
|
|
+ list_del(&g2m_ioport->list);
|
|
+ xfree(g2m_ioport);
|
|
+ }
|
|
}
|
|
else
|
|
{
|
|
@@ -950,6 +988,10 @@ long arch_do_domctl(
|
|
break;
|
|
}
|
|
ret = ioports_deny_access(d, fmp, fmp + np - 1);
|
|
+ if ( ret && IS_PRIV(current->domain) )
|
|
+ printk(XENLOG_ERR
|
|
+ "ioport_map: error %ld denying dom%d access to [%x,%x]\n",
|
|
+ ret, d->domain_id, fmp, fmp + np - 1);
|
|
}
|
|
rcu_unlock_domain(d);
|
|
}
|