a89d75605e
on Xen 576001df-x86-time-use-local-stamp-in-TSC-calibration-fast-path.patch 5769106e-x86-generate-assembler-equates-for-synthesized.patch 57a1e603-x86-time-adjust-local-system-time-initialization.patch 57a1e64c-x86-time-introduce-and-use-rdtsc_ordered.patch 57a2f6ac-x86-time-calibrate-TSC-against-platform-timer.patch - bsc#991934 - xen hypervisor crash in csched_acct 57973099-have-schedulers-revise-initial-placement.patch 579730e6-remove-buggy-initial-placement-algorithm.patch - bsc#988675 - VUL-0: CVE-2016-6258: xen: x86: Privilege escalation in PV guests (XSA-182) 57976073-x86-remove-unsafe-bits-from-mod_lN_entry-fastpath.patch - bsc#988676 - VUL-0: CVE-2016-6259: xen: x86: Missing SMAP whitelisting in 32-bit exception / event delivery (XSA-183) 57976078-x86-avoid-SMAP-violation-in-compat_create_bounce_frame.patch - Upstream patches from Jan 57a30261-x86-support-newer-Intel-CPU-models.patch - bsc#985503 - vif-route broken vif-route.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=445
201 lines
6.7 KiB
Diff
201 lines
6.7 KiB
Diff
# Commit 350bc1a9d4ebc03b18a43cdafcb626618caace55
|
|
# Date 2016-08-04 10:52:49 +0200
|
|
# Author Jan Beulich <jbeulich@suse.com>
|
|
# Committer Jan Beulich <jbeulich@suse.com>
|
|
x86: support newer Intel CPU models
|
|
|
|
... as per the June 2016 edition of the SDM.
|
|
|
|
Also remove a couple of dead break statements as well as unused
|
|
*MSR_PM_LASTBRANCH* #define-s.
|
|
|
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
Acked-by: Kevin Tian <kevin.tian@intel.com>
|
|
|
|
--- a/xen/arch/x86/acpi/cpu_idle.c
|
|
+++ b/xen/arch/x86/acpi/cpu_idle.c
|
|
@@ -61,14 +61,14 @@
|
|
|
|
#define GET_HW_RES_IN_NS(msr, val) \
|
|
do { rdmsrl(msr, val); val = tsc_ticks2ns(val); } while( 0 )
|
|
-#define GET_MC6_RES(val) GET_HW_RES_IN_NS(0x664, val) /* Atom E3000 only */
|
|
+#define GET_MC6_RES(val) GET_HW_RES_IN_NS(0x664, val)
|
|
#define GET_PC2_RES(val) GET_HW_RES_IN_NS(0x60D, val) /* SNB onwards */
|
|
#define GET_PC3_RES(val) GET_HW_RES_IN_NS(0x3F8, val)
|
|
#define GET_PC6_RES(val) GET_HW_RES_IN_NS(0x3F9, val)
|
|
#define GET_PC7_RES(val) GET_HW_RES_IN_NS(0x3FA, val)
|
|
-#define GET_PC8_RES(val) GET_HW_RES_IN_NS(0x630, val) /* some Haswells only */
|
|
-#define GET_PC9_RES(val) GET_HW_RES_IN_NS(0x631, val) /* some Haswells only */
|
|
-#define GET_PC10_RES(val) GET_HW_RES_IN_NS(0x632, val) /* some Haswells only */
|
|
+#define GET_PC8_RES(val) GET_HW_RES_IN_NS(0x630, val)
|
|
+#define GET_PC9_RES(val) GET_HW_RES_IN_NS(0x631, val)
|
|
+#define GET_PC10_RES(val) GET_HW_RES_IN_NS(0x632, val)
|
|
#define GET_CC1_RES(val) GET_HW_RES_IN_NS(0x660, val) /* Silvermont only */
|
|
#define GET_CC3_RES(val) GET_HW_RES_IN_NS(0x3FC, val)
|
|
#define GET_CC6_RES(val) GET_HW_RES_IN_NS(0x3FD, val)
|
|
@@ -142,6 +142,8 @@ static void do_get_hw_residencies(void *
|
|
{
|
|
/* 4th generation Intel Core (Haswell) */
|
|
case 0x45:
|
|
+ /* Xeon E5/E7 v4 (Broadwell) */
|
|
+ case 0x4F:
|
|
GET_PC8_RES(hw_res->pc8);
|
|
GET_PC9_RES(hw_res->pc9);
|
|
GET_PC10_RES(hw_res->pc10);
|
|
@@ -158,10 +160,11 @@ static void do_get_hw_residencies(void *
|
|
case 0x46:
|
|
/* Broadwell */
|
|
case 0x3D:
|
|
- case 0x4F:
|
|
+ case 0x47:
|
|
case 0x56:
|
|
- /* future */
|
|
+ /* Skylake */
|
|
case 0x4E:
|
|
+ case 0x5E:
|
|
GET_PC2_RES(hw_res->pc2);
|
|
GET_CC7_RES(hw_res->cc7);
|
|
/* fall through */
|
|
@@ -198,18 +201,28 @@ static void do_get_hw_residencies(void *
|
|
break;
|
|
/* Silvermont */
|
|
case 0x37:
|
|
- GET_MC6_RES(hw_res->mc6);
|
|
- /* fall through */
|
|
case 0x4A:
|
|
case 0x4D:
|
|
case 0x5A:
|
|
case 0x5D:
|
|
/* Airmont */
|
|
case 0x4C:
|
|
+ GET_MC6_RES(hw_res->mc6);
|
|
GET_PC7_RES(hw_res->pc6); /* abusing GET_PC7_RES */
|
|
GET_CC1_RES(hw_res->cc1);
|
|
GET_CC6_RES(hw_res->cc6);
|
|
break;
|
|
+ /* Goldmont */
|
|
+ case 0x5C:
|
|
+ case 0x5F:
|
|
+ GET_PC2_RES(hw_res->pc2);
|
|
+ GET_PC3_RES(hw_res->pc3);
|
|
+ GET_PC6_RES(hw_res->pc6);
|
|
+ GET_PC10_RES(hw_res->pc10);
|
|
+ GET_CC1_RES(hw_res->cc1);
|
|
+ GET_CC3_RES(hw_res->cc3);
|
|
+ GET_CC6_RES(hw_res->cc6);
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
--- a/xen/arch/x86/hvm/vmx/vmx.c
|
|
+++ b/xen/arch/x86/hvm/vmx/vmx.c
|
|
@@ -2526,6 +2526,14 @@ static const struct lbr_info {
|
|
{ MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
|
|
{ MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
|
|
{ 0, 0 }
|
|
+}, sk_lbr[] = {
|
|
+ { MSR_IA32_LASTINTFROMIP, 1 },
|
|
+ { MSR_IA32_LASTINTTOIP, 1 },
|
|
+ { MSR_SKL_LASTBRANCH_TOS, 1 },
|
|
+ { MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH },
|
|
+ { MSR_SKL_LASTBRANCH_0_TO_IP, NUM_MSR_SKL_LASTBRANCH },
|
|
+ { MSR_SKL_LASTBRANCH_0_INFO, NUM_MSR_SKL_LASTBRANCH },
|
|
+ { 0, 0 }
|
|
}, at_lbr[] = {
|
|
{ MSR_IA32_LASTINTFROMIP, 1 },
|
|
{ MSR_IA32_LASTINTTOIP, 1 },
|
|
@@ -2533,6 +2541,13 @@ static const struct lbr_info {
|
|
{ MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
|
|
{ MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
|
|
{ 0, 0 }
|
|
+}, gm_lbr[] = {
|
|
+ { MSR_IA32_LASTINTFROMIP, 1 },
|
|
+ { MSR_IA32_LASTINTTOIP, 1 },
|
|
+ { MSR_GM_LASTBRANCH_TOS, 1 },
|
|
+ { MSR_GM_LASTBRANCH_0_FROM_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO },
|
|
+ { MSR_GM_LASTBRANCH_0_TO_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO },
|
|
+ { 0, 0 }
|
|
};
|
|
|
|
static const struct lbr_info *last_branch_msr_get(void)
|
|
@@ -2547,7 +2562,6 @@ static const struct lbr_info *last_branc
|
|
/* Enhanced Core */
|
|
case 23:
|
|
return c2_lbr;
|
|
- break;
|
|
/* Nehalem */
|
|
case 26: case 30: case 31: case 46:
|
|
/* Westmere */
|
|
@@ -2559,11 +2573,13 @@ static const struct lbr_info *last_branc
|
|
/* Haswell */
|
|
case 60: case 63: case 69: case 70:
|
|
/* Broadwell */
|
|
- case 61: case 79: case 86:
|
|
- /* future */
|
|
- case 78:
|
|
+ case 61: case 71: case 79: case 86:
|
|
return nh_lbr;
|
|
- break;
|
|
+ /* Skylake */
|
|
+ case 78: case 94:
|
|
+ /* future */
|
|
+ case 142: case 158:
|
|
+ return sk_lbr;
|
|
/* Atom */
|
|
case 28: case 38: case 39: case 53: case 54:
|
|
/* Silvermont */
|
|
@@ -2573,7 +2589,9 @@ static const struct lbr_info *last_branc
|
|
/* Airmont */
|
|
case 76:
|
|
return at_lbr;
|
|
- break;
|
|
+ /* Goldmont */
|
|
+ case 92: case 95:
|
|
+ return gm_lbr;
|
|
}
|
|
break;
|
|
|
|
@@ -2583,7 +2601,6 @@ static const struct lbr_info *last_branc
|
|
/* Pentium4/Xeon with em64t */
|
|
case 3: case 4: case 6:
|
|
return p4_lbr;
|
|
- break;
|
|
}
|
|
break;
|
|
}
|
|
--- a/xen/include/asm-x86/msr-index.h
|
|
+++ b/xen/include/asm-x86/msr-index.h
|
|
@@ -458,11 +458,6 @@
|
|
#define MSR_P4_LASTBRANCH_0_TO_LIP 0x000006c0
|
|
#define NUM_MSR_P4_LASTBRANCH_FROM_TO 16
|
|
|
|
-/* Pentium M (and Core) last-branch recording */
|
|
-#define MSR_PM_LASTBRANCH_TOS 0x000001c9
|
|
-#define MSR_PM_LASTBRANCH_0 0x00000040
|
|
-#define NUM_MSR_PM_LASTBRANCH 8
|
|
-
|
|
/* Core 2 and Atom last-branch recording */
|
|
#define MSR_C2_LASTBRANCH_TOS 0x000001c9
|
|
#define MSR_C2_LASTBRANCH_0_FROM_IP 0x00000040
|
|
@@ -470,6 +465,19 @@
|
|
#define NUM_MSR_C2_LASTBRANCH_FROM_TO 4
|
|
#define NUM_MSR_ATOM_LASTBRANCH_FROM_TO 8
|
|
|
|
+/* Skylake (and newer) last-branch recording */
|
|
+#define MSR_SKL_LASTBRANCH_TOS 0x000001c9
|
|
+#define MSR_SKL_LASTBRANCH_0_FROM_IP 0x00000680
|
|
+#define MSR_SKL_LASTBRANCH_0_TO_IP 0x000006c0
|
|
+#define MSR_SKL_LASTBRANCH_0_INFO 0x00000dc0
|
|
+#define NUM_MSR_SKL_LASTBRANCH 32
|
|
+
|
|
+/* Goldmont last-branch recording */
|
|
+#define MSR_GM_LASTBRANCH_TOS 0x000001c9
|
|
+#define MSR_GM_LASTBRANCH_0_FROM_IP 0x00000680
|
|
+#define MSR_GM_LASTBRANCH_0_TO_IP 0x000006c0
|
|
+#define NUM_MSR_GM_LASTBRANCH_FROM_TO 32
|
|
+
|
|
/* Intel Core-based CPU performance counters */
|
|
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
|
|
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
|