2011-05-31 19:35:29 +02:00
|
|
|
References: FATE#309893, FATE#309906
|
|
|
|
|
|
|
|
# HG changeset patch
|
|
|
|
# User Jacob Shin <jacob.shin@amd.com>
|
|
|
|
# Date 1304931286 -3600
|
|
|
|
# Node ID e787d4f2e5acdba48728a9390710de800315a540
|
|
|
|
# Parent 014ee4e09644bd3ae55919d267f742c1d60c337a
|
|
|
|
hvm: vpmu: Add support for AMD Family 15h processors
|
|
|
|
|
|
|
|
AMD Family 15h CPU mirrors legacy K7 performance monitor counters to
|
|
|
|
a new location, and adds 2 new counters. This patch updates HVM VPMU
|
|
|
|
to take advantage of the new counters.
|
|
|
|
|
|
|
|
Signed-off-by: Jacob Shin <jacob.shin@amd.com>
|
|
|
|
|
2011-09-15 23:43:21 +02:00
|
|
|
Index: xen-4.1.2-testing/xen/arch/x86/hvm/svm/svm.c
|
2011-05-31 19:35:29 +02:00
|
|
|
===================================================================
|
2011-09-15 23:43:21 +02:00
|
|
|
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/svm/svm.c
|
|
|
|
+++ xen-4.1.2-testing/xen/arch/x86/hvm/svm/svm.c
|
2011-06-14 21:01:54 +02:00
|
|
|
@@ -1142,6 +1142,18 @@ static int svm_msr_read_intercept(unsign
|
2011-05-31 19:35:29 +02:00
|
|
|
case MSR_K7_EVNTSEL1:
|
|
|
|
case MSR_K7_EVNTSEL2:
|
|
|
|
case MSR_K7_EVNTSEL3:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR0:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR1:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR2:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR3:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR4:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR5:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL0:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL1:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL2:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL3:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL4:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL5:
|
|
|
|
vpmu_do_rdmsr(msr, msr_content);
|
|
|
|
break;
|
|
|
|
|
2011-06-14 21:01:54 +02:00
|
|
|
@@ -1237,6 +1249,18 @@ static int svm_msr_write_intercept(unsig
|
2011-05-31 19:35:29 +02:00
|
|
|
case MSR_K7_EVNTSEL1:
|
|
|
|
case MSR_K7_EVNTSEL2:
|
|
|
|
case MSR_K7_EVNTSEL3:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR0:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR1:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR2:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR3:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR4:
|
|
|
|
+ case MSR_AMD_FAM15H_PERFCTR5:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL0:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL1:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL2:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL3:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL4:
|
|
|
|
+ case MSR_AMD_FAM15H_EVNTSEL5:
|
|
|
|
vpmu_do_wrmsr(msr, msr_content);
|
|
|
|
break;
|
|
|
|
|
2011-09-15 23:43:21 +02:00
|
|
|
Index: xen-4.1.2-testing/xen/arch/x86/hvm/svm/vpmu.c
|
2011-05-31 19:35:29 +02:00
|
|
|
===================================================================
|
2011-09-15 23:43:21 +02:00
|
|
|
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/svm/vpmu.c
|
|
|
|
+++ xen-4.1.2-testing/xen/arch/x86/hvm/svm/vpmu.c
|
2011-05-31 19:35:29 +02:00
|
|
|
@@ -36,7 +36,9 @@
|
|
|
|
#include <public/hvm/save.h>
|
|
|
|
#include <asm/hvm/vpmu.h>
|
|
|
|
|
|
|
|
-#define NUM_COUNTERS 4
|
|
|
|
+#define F10H_NUM_COUNTERS 4
|
|
|
|
+#define F15H_NUM_COUNTERS 6
|
|
|
|
+#define MAX_NUM_COUNTERS F15H_NUM_COUNTERS
|
|
|
|
|
|
|
|
#define MSR_F10H_EVNTSEL_GO_SHIFT 40
|
|
|
|
#define MSR_F10H_EVNTSEL_EN_SHIFT 22
|
|
|
|
@@ -47,6 +49,11 @@
|
|
|
|
#define set_guest_mode(msr) (msr |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
|
|
|
|
#define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH-1))))
|
|
|
|
|
|
|
|
+static int __read_mostly num_counters = 0;
|
|
|
|
+static u32 __read_mostly *counters = NULL;
|
|
|
|
+static u32 __read_mostly *ctrls = NULL;
|
|
|
|
+static bool_t __read_mostly k7_counters_mirrored = 0;
|
|
|
|
+
|
|
|
|
/* PMU Counter MSRs. */
|
|
|
|
u32 AMD_F10H_COUNTERS[] = {
|
|
|
|
MSR_K7_PERFCTR0,
|
|
|
|
@@ -63,10 +70,28 @@ u32 AMD_F10H_CTRLS[] = {
|
|
|
|
MSR_K7_EVNTSEL3
|
|
|
|
};
|
|
|
|
|
|
|
|
+u32 AMD_F15H_COUNTERS[] = {
|
|
|
|
+ MSR_AMD_FAM15H_PERFCTR0,
|
|
|
|
+ MSR_AMD_FAM15H_PERFCTR1,
|
|
|
|
+ MSR_AMD_FAM15H_PERFCTR2,
|
|
|
|
+ MSR_AMD_FAM15H_PERFCTR3,
|
|
|
|
+ MSR_AMD_FAM15H_PERFCTR4,
|
|
|
|
+ MSR_AMD_FAM15H_PERFCTR5
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+u32 AMD_F15H_CTRLS[] = {
|
|
|
|
+ MSR_AMD_FAM15H_EVNTSEL0,
|
|
|
|
+ MSR_AMD_FAM15H_EVNTSEL1,
|
|
|
|
+ MSR_AMD_FAM15H_EVNTSEL2,
|
|
|
|
+ MSR_AMD_FAM15H_EVNTSEL3,
|
|
|
|
+ MSR_AMD_FAM15H_EVNTSEL4,
|
|
|
|
+ MSR_AMD_FAM15H_EVNTSEL5
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
/* storage for context switching */
|
|
|
|
struct amd_vpmu_context {
|
|
|
|
- u64 counters[NUM_COUNTERS];
|
|
|
|
- u64 ctrls[NUM_COUNTERS];
|
|
|
|
+ u64 counters[MAX_NUM_COUNTERS];
|
|
|
|
+ u64 ctrls[MAX_NUM_COUNTERS];
|
|
|
|
u32 hw_lapic_lvtpc;
|
|
|
|
};
|
|
|
|
|
|
|
|
@@ -78,10 +103,45 @@ static inline int get_pmu_reg_type(u32 a
|
|
|
|
if ( (addr >= MSR_K7_PERFCTR0) && (addr <= MSR_K7_PERFCTR3) )
|
|
|
|
return MSR_TYPE_COUNTER;
|
|
|
|
|
|
|
|
+ if ( (addr >= MSR_AMD_FAM15H_EVNTSEL0) &&
|
|
|
|
+ (addr <= MSR_AMD_FAM15H_PERFCTR5 ) )
|
|
|
|
+ {
|
|
|
|
+ if (addr & 1)
|
|
|
|
+ return MSR_TYPE_COUNTER;
|
|
|
|
+ else
|
|
|
|
+ return MSR_TYPE_CTRL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
/* unsupported registers */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static inline u32 get_fam15h_addr(u32 addr)
|
|
|
|
+{
|
|
|
|
+ switch ( addr )
|
|
|
|
+ {
|
|
|
|
+ case MSR_K7_PERFCTR0:
|
|
|
|
+ return MSR_AMD_FAM15H_PERFCTR0;
|
|
|
|
+ case MSR_K7_PERFCTR1:
|
|
|
|
+ return MSR_AMD_FAM15H_PERFCTR1;
|
|
|
|
+ case MSR_K7_PERFCTR2:
|
|
|
|
+ return MSR_AMD_FAM15H_PERFCTR2;
|
|
|
|
+ case MSR_K7_PERFCTR3:
|
|
|
|
+ return MSR_AMD_FAM15H_PERFCTR3;
|
|
|
|
+ case MSR_K7_EVNTSEL0:
|
|
|
|
+ return MSR_AMD_FAM15H_EVNTSEL0;
|
|
|
|
+ case MSR_K7_EVNTSEL1:
|
|
|
|
+ return MSR_AMD_FAM15H_EVNTSEL1;
|
|
|
|
+ case MSR_K7_EVNTSEL2:
|
|
|
|
+ return MSR_AMD_FAM15H_EVNTSEL2;
|
|
|
|
+ case MSR_K7_EVNTSEL3:
|
|
|
|
+ return MSR_AMD_FAM15H_EVNTSEL3;
|
|
|
|
+ default:
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return addr;
|
|
|
|
+}
|
|
|
|
|
|
|
|
static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
|
|
|
|
{
|
|
|
|
@@ -110,12 +170,12 @@ static inline void context_restore(struc
|
|
|
|
struct vpmu_struct *vpmu = vcpu_vpmu(v);
|
|
|
|
struct amd_vpmu_context *ctxt = vpmu->context;
|
|
|
|
|
|
|
|
- for ( i = 0; i < NUM_COUNTERS; i++ )
|
|
|
|
- wrmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]);
|
|
|
|
+ for ( i = 0; i < num_counters; i++ )
|
|
|
|
+ wrmsrl(ctrls[i], ctxt->ctrls[i]);
|
|
|
|
|
|
|
|
- for ( i = 0; i < NUM_COUNTERS; i++ )
|
|
|
|
+ for ( i = 0; i < num_counters; i++ )
|
|
|
|
{
|
|
|
|
- wrmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]);
|
|
|
|
+ wrmsrl(counters[i], ctxt->counters[i]);
|
|
|
|
|
|
|
|
/* Force an interrupt to allow guest reset the counter,
|
|
|
|
if the value is positive */
|
|
|
|
@@ -147,11 +207,11 @@ static inline void context_save(struct v
|
|
|
|
struct vpmu_struct *vpmu = vcpu_vpmu(v);
|
|
|
|
struct amd_vpmu_context *ctxt = vpmu->context;
|
|
|
|
|
|
|
|
- for ( i = 0; i < NUM_COUNTERS; i++ )
|
|
|
|
- rdmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]);
|
|
|
|
+ for ( i = 0; i < num_counters; i++ )
|
|
|
|
+ rdmsrl(counters[i], ctxt->counters[i]);
|
|
|
|
|
|
|
|
- for ( i = 0; i < NUM_COUNTERS; i++ )
|
|
|
|
- rdmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]);
|
|
|
|
+ for ( i = 0; i < num_counters; i++ )
|
|
|
|
+ rdmsrl(ctrls[i], ctxt->ctrls[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_vpmu_save(struct vcpu *v)
|
|
|
|
@@ -175,12 +235,18 @@ static void context_update(unsigned int
|
|
|
|
struct vpmu_struct *vpmu = vcpu_vpmu(v);
|
|
|
|
struct amd_vpmu_context *ctxt = vpmu->context;
|
|
|
|
|
|
|
|
- for ( i = 0; i < NUM_COUNTERS; i++ )
|
|
|
|
- if ( msr == AMD_F10H_COUNTERS[i] )
|
|
|
|
+ if ( k7_counters_mirrored &&
|
|
|
|
+ ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) )
|
|
|
|
+ {
|
|
|
|
+ msr = get_fam15h_addr(msr);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for ( i = 0; i < num_counters; i++ )
|
|
|
|
+ if ( msr == counters[i] )
|
|
|
|
ctxt->counters[i] = msr_content;
|
|
|
|
|
|
|
|
- for ( i = 0; i < NUM_COUNTERS; i++ )
|
|
|
|
- if ( msr == AMD_F10H_CTRLS[i] )
|
|
|
|
+ for ( i = 0; i < num_counters; i++ )
|
|
|
|
+ if ( msr == ctrls[i] )
|
|
|
|
ctxt->ctrls[i] = msr_content;
|
|
|
|
|
|
|
|
ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
|
|
|
|
@@ -235,10 +301,31 @@ static void amd_vpmu_initialise(struct v
|
|
|
|
{
|
|
|
|
struct amd_vpmu_context *ctxt = NULL;
|
|
|
|
struct vpmu_struct *vpmu = vcpu_vpmu(v);
|
|
|
|
+ __u8 family = current_cpu_data.x86;
|
|
|
|
|
|
|
|
if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED )
|
|
|
|
return;
|
|
|
|
|
|
|
|
+ if ( counters == NULL )
|
|
|
|
+ {
|
|
|
|
+ switch ( family )
|
|
|
|
+ {
|
|
|
|
+ case 0x15:
|
|
|
|
+ num_counters = F15H_NUM_COUNTERS;
|
|
|
|
+ counters = AMD_F15H_COUNTERS;
|
|
|
|
+ ctrls = AMD_F15H_CTRLS;
|
|
|
|
+ k7_counters_mirrored = 1;
|
|
|
|
+ break;
|
|
|
|
+ case 0x10:
|
|
|
|
+ default:
|
|
|
|
+ num_counters = F10H_NUM_COUNTERS;
|
|
|
|
+ counters = AMD_F10H_COUNTERS;
|
|
|
|
+ ctrls = AMD_F10H_CTRLS;
|
|
|
|
+ k7_counters_mirrored = 0;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
ctxt = xmalloc_bytes(sizeof(struct amd_vpmu_context));
|
|
|
|
|
|
|
|
if ( !ctxt )
|
2011-09-15 23:43:21 +02:00
|
|
|
Index: xen-4.1.2-testing/xen/arch/x86/hvm/vpmu.c
|
2011-05-31 19:35:29 +02:00
|
|
|
===================================================================
|
2011-09-15 23:43:21 +02:00
|
|
|
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/vpmu.c
|
|
|
|
+++ xen-4.1.2-testing/xen/arch/x86/hvm/vpmu.c
|
2011-05-31 19:35:29 +02:00
|
|
|
@@ -101,6 +101,7 @@ void vpmu_initialise(struct vcpu *v)
|
|
|
|
switch ( family )
|
|
|
|
{
|
|
|
|
case 0x10:
|
|
|
|
+ case 0x15:
|
|
|
|
vpmu->arch_vpmu_ops = &amd_vpmu_ops;
|
|
|
|
break;
|
|
|
|
default:
|