%patch
Index: xen-3.4.1-testing/xen/include/asm-x86/hvm/hvm_extensions.h
===================================================================
--- /dev/null
+++ xen-3.4.1-testing/xen/include/asm-x86/hvm/hvm_extensions.h
@@ -0,0 +1,165 @@
+/****************************************************************************
+ |
+ | Copyright (c) [2007, 2008] Novell, Inc.
+ | All Rights Reserved.
+ |
+ | This program is free software; you can redistribute it and/or
+ | modify it under the terms of version 2 of the GNU General Public License as
+ | published by the Free Software Foundation.
+ |
+ | This program is distributed in the hope that it will be useful,
+ | but WITHOUT ANY WARRANTY; without even the implied warranty of
+ | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ | GNU General Public License for more details.
+ |
+ | You should have received a copy of the GNU General Public License
+ | along with this program; if not, contact Novell, Inc.
+ |
+ | To contact Novell about this file by physical or electronic mail,
+ | you may find current contact information at www.novell.com
+ |
+ |***************************************************************************
+*/
+
+/*
+ * hvm_extensions.h
+ * Implement Hyperv extensions.
+ * Engineering Contact: K. Y. Srinivasan
+ */
+
+#ifndef HVM_EXTENSION_H
+#define HVM_EXTENSION_H
+
+#include <xen/sched.h>
+#include <asm/domain.h>
+#include <xen/timer.h>
+#include <xen/time.h>
+#include <asm/regs.h>
+#include <asm/types.h>
+#include <asm/hvm/io.h>
+#include <asm/hvm/hvm.h>
+#include <asm/hvm/domain.h>
+
+int
+hyperv_dom_create(struct domain *d);
+void
+hyperv_dom_destroy(struct domain *d);
+int
+hyperv_vcpu_initialize(struct vcpu *v);
+void
+hyperv_vcpu_up(struct vcpu *v);
+void
+hyperv_vcpu_destroy(struct vcpu *v);
+int
+hyperv_do_cpu_id(uint32_t input, unsigned int *eax, unsigned int *ebx,
+                                 unsigned int *ecx, unsigned int *edx);
+int
+hyperv_do_rd_msr(uint32_t idx, struct cpu_user_regs *regs);
+int
+hyperv_do_wr_msr(uint32_t idx, struct cpu_user_regs *regs);
+int
+hyperv_do_hypercall(struct cpu_user_regs *pregs);
+int
+hyperv_initialize(struct domain *d);
+
+
+
+
+static inline int
+hyperx_intercept_domain_create(struct domain *d)
+{
+    if (d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] ==1) {
+        return(hyperv_dom_create(d));
+    }
+    return (0);
+}
+
+static inline void
+hyperx_intercept_domain_destroy(struct domain *d)
+{
+    if (d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] ==1)
+    {
+        hyperv_dom_destroy(d);
+    }
+}
+
+static inline int
+hyperx_intercept_vcpu_initialize(struct vcpu *v)
+{
+    struct domain *d = v->domain;
+    if (d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] ==1)
+    {
+        return(hyperv_vcpu_initialize(v));
+    }
+    return (0);
+}
+
+
+static inline void
+hyperx_intercept_vcpu_up(struct vcpu *v)
+{
+    struct domain *d = current->domain;
+    if (d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] ==1)
+    {
+        hyperv_vcpu_up(v);
+    }
+}
+
+static inline void
+hyperx_intercept_vcpu_destroy(struct vcpu *v)
+{
+    struct domain *d = v->domain;
+    if (d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] ==1)
+    {
+        hyperv_vcpu_destroy(v);
+    }
+}
+
+static inline int
+hyperx_intercept_do_cpuid(uint32_t idx, unsigned int *eax, unsigned int *ebx,
+                                        unsigned int *ecx, unsigned int *edx)
+{
+    struct domain *d = current->domain;
+    if (d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] ==1)
+    {
+        return(hyperv_do_cpu_id(idx, eax, ebx, ecx, edx));
+    }
+    return (0);
+}
+
+static inline int
+hyperx_intercept_do_msr_read(uint32_t idx, struct cpu_user_regs *regs)
+{
+    struct domain *d = current->domain;
+    if (d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] ==1)
+    {
+        return(hyperv_do_rd_msr(idx, regs));
+    }
+    return (0);
+}
+
+static inline int
+hyperx_intercept_do_msr_write(uint32_t idx, struct cpu_user_regs *regs)
+{
+    struct domain *d = current->domain;
+    if (d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] ==1)
+    {
+	return(hyperv_do_wr_msr(idx, regs));
+    }
+    return (0);
+}
+
+static inline int
+hyperx_intercept_do_hypercall(struct cpu_user_regs *regs)
+{
+    struct domain *d = current->domain;
+    if (d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] ==1)
+    {
+        return(hyperv_do_hypercall(regs));
+    }
+    return (0);
+}
+
+int hyperx_initialize(struct domain *d);
+
+#endif
Index: xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/Makefile
===================================================================
--- /dev/null
+++ xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/Makefile
@@ -0,0 +1,2 @@
+obj-y += hv_intercept.o
+obj-y += hv_hypercall.o
Index: xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_errno.h
===================================================================
--- /dev/null
+++ xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_errno.h
@@ -0,0 +1,62 @@
+/****************************************************************************
+ |
+ | Copyright (c) [2007, 2008] Novell, Inc.
+ | All Rights Reserved.
+ |
+ | This program is free software; you can redistribute it and/or
+ | modify it under the terms of version 2 of the GNU General Public License as
+ | published by the Free Software Foundation.
+ |
+ | This program is distributed in the hope that it will be useful,
+ | but WITHOUT ANY WARRANTY; without even the implied warranty of
+ | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ | GNU General Public License for more details.
+ |
+ | You should have received a copy of the GNU General Public License
+ | along with this program; if not, contact Novell, Inc.
+ |
+ | To contact Novell about this file by physical or electronic mail,
+ | you may find current contact information at www.novell.com
+ |
+ |***************************************************************************
+*/
+
+/*
+ * hv_errno.h
+ * Error codes for the  Novell Shim.
+ *
+ * Engineering Contact: K. Y. Srinivasan
+ */
+
+#ifndef HV_ERRNO_H
+#define HV_ERRNO_H
+
+#define HV_STATUS_SUCCESS			0x0000
+#define HV_STATUS_INVALID_HYPERCALL_CODE	0x0002
+#define HV_STATUS_INVALID_HYPERCALL_INPUT	0x0003
+#define HV_STATUS_INVALID_ALIGNMENT		0x0004
+#define HV_STATUS_INVALID_PARAMETER		0x0005
+#define HV_STATUS_ACCESS_DENIED			0x0006
+#define HV_STATUS_INVALID_PARTITION_STATE	0x0007
+#define HV_STATUS_OPERATION_DENIED		0x0008
+#define HV_STATUS_UNKNOWN_PROPERTY		0x0009
+#define HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE	0x000A
+#define HV_STATUS_INSUFFICIENT_MEMORY		0x000B
+#define HV_STATUS_PARTITION_TOO_DEEP		0x000C
+#define HV_STATUS_INVALID_PARTITION_ID		0x000D
+#define HV_STATUS_INVALID_VP_INDEX		0x000E
+#define HV_STATUS_UNABLE_TO_RESTORE_STATE	0x000F
+#define HV_STATUS_NOT_FOUND			0x0010
+#define HV_STATUS_INVALID_PORT_ID		0x0011
+#define HV_STATUS_INVALID_CONNECTION_ID		0x0012
+#define HV_STATUS_INSUFFICIENT_BUFFERS		0x0013
+#define HV_STATUS_NOT_ACKNOWLEDGED		0x0014
+#define HV_STATUS_INVALID_VP_STATE		0x0015
+#define HV_STATUS_ACKNOWLEDGED			0x0016
+#define HV_STATUS_INVALID_SAVE_RESTORE_STATE	0x0017
+#define	HV_STATUS_NO_MEMORY_4PAGES		0x0100
+#define	HV_STATUS_NO_MEMORY_16PAGES		0x0101
+#define	HV_STATUS_NO_MEMORY_64PAGES		0x0102
+#define	HV_STATUS_NO_MEMORY_256PAGES		0x0103
+#define	HV_STATUS_NO_MEMORY_1024PAGES		0x0104
+#endif
Index: xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c
===================================================================
--- /dev/null
+++ xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c
@@ -0,0 +1,153 @@
+/****************************************************************************
+ |
+ | Copyright (c) [2007, 2008] Novell, Inc.
+ | All Rights Reserved.
+ |
+ | This program is free software; you can redistribute it and/or
+ | modify it under the terms of version 2 of the GNU General Public License as
+ | published by the Free Software Foundation.
+ |
+ | This program is distributed in the hope that it will be useful,
+ | but WITHOUT ANY WARRANTY; without even the implied warranty of
+ | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ | GNU General Public License for more details.
+ |
+ | You should have received a copy of the GNU General Public License
+ | along with this program; if not, contact Novell, Inc.
+ |
+ | To contact Novell about this file by physical or electronic mail,
+ | you may find current contact information at www.novell.com
+ |
+ |***************************************************************************
+*/
+
+/*
+ * nshypercall.c.
+ * This file implements the hypercall component of the hyperv Shim.
+ *
+ * Engineering Contact: K. Y. Srinivasan
+ */
+
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/hvm/support.h>
+#include <xen/cpumask.h>
+#include <xen/event.h>
+#include <xen/domain.h>
+#include <xen/hypercall.h>
+#include <public/sched.h>
+
+#include <asm/hvm/hvm_extensions.h>
+#include "hv_shim.h"
+#include "hv_errno.h"
+#include "hv_hypercall.h"
+
+
+void
+hv_print_stats(hv_partition_t *curp, int i)
+{
+    hv_vcpu_t *v;
+    v = &curp->vcpu_state[i];
+    printk("Printing stats for vcpu ID: %d\n", i);
+
+    printk("Number of context switches: %lu\n", v->stats.num_switches);
+    printk("Number of long spin waits: %lu\n", v->stats.num_long_spinwaits);
+    printk("Number of TPR reads: %lu\n", v->stats.num_tpr_reads);
+    printk("Number of ICR reads: %lu\n", v->stats.num_icr_reads);
+    printk("Number of Eoi writes: %lu\n", v->stats.num_eoi_writes);
+    printk("Number of Tpr writes: %lu\n", v->stats.num_tpr_writes);
+    printk("Number of Icr writes: %lu\n", v->stats.num_icr_writes);
+
+}
+
+static int
+hv_switch_va(hv_vcpu_t *vcpup, int fast, paddr_t input)
+{
+    paddr_t new_cr3;
+    int ret_val = HV_STATUS_SUCCESS;
+
+    /*
+     * XXXKYS: the spec sys the asID is passed via memory at offset 0 of
+     * the page whose GPA is in the input register. However, it appears
+     * the current build of longhorn (longhorn-2007-02-06-x86_64-fv-02)
+     * passes the asID in the input register instead. Need to check if
+     * future builds do this.
+     * The fast bit defines how the parameter is passed.
+     */
+    if (fast)
+    {
+        hvm_set_cr3(input);
+    } else
+    {
+        /*
+         * Slow path; copy the new value.
+	 */
+        if (!hvm_copy_from_guest_phys(&new_cr3, input, sizeof(paddr_t)))
+            hvm_set_cr3(new_cr3);
+        else
+            ret_val = HV_STATUS_INVALID_PARAMETER;
+    }
+
+#ifdef HV_STATS
+    vcpup->stats.num_switches++;
+#endif
+    return (ret_val);
+}
+
+void
+hv_handle_hypercall(u64 opcode, u64 input, u64 output,
+          u64 *ret_val)
+{
+    unsigned short    verb;
+    int               fast;
+    unsigned short    rep_count;
+    unsigned short    start_index;
+    hv_partition_t   *curp = hv_get_current_partition();
+    hv_vcpu_t *vcpup = &curp->vcpu_state[hv_get_current_vcpu_index()];
+    u64        partition_id;
+
+
+    fast = (int)((opcode >>16) & 0x1);
+    verb = (short)(opcode & 0xffff);
+    rep_count = (short)((opcode >>32) & 0xfff);
+    start_index = (short)((opcode >> 48) & 0xfff);
+    switch (verb)
+    {
+    case HV_GET_PARTITION_ID:
+        if (!hv_privilege_check(curp, HV_ACCESS_PARTITION_ID))
+        {
+            *ret_val =
+            hv_build_hcall_retval(HV_STATUS_ACCESS_DENIED, 0);
+            return;
+        }
+        partition_id = (u64)current->domain->domain_id;
+        if (hvm_copy_to_guest_phys(output, &partition_id, 8))
+        {
+            /*
+             * Invalid output area.
+             */
+            *ret_val =
+            hv_build_hcall_retval(HV_STATUS_INVALID_PARAMETER, 0);
+            return;
+        }
+        *ret_val = hv_build_hcall_retval(HV_STATUS_SUCCESS, 0);
+        return;
+    case HV_SWITCH_VA:
+        *ret_val = hv_build_hcall_retval(hv_switch_va(vcpup, fast, input), 0);
+        return;
+
+    case HV_NOTIFY_LONG_SPIN_WAIT:
+#ifdef HV_STATS
+    vcpup->stats.num_long_spinwaits++;
+#endif
+        do_sched_op_compat(SCHEDOP_yield, 0);
+        *ret_val = hv_build_hcall_retval(HV_STATUS_SUCCESS, 0);
+        return;
+
+    default:
+        printk("Unkown/Unsupported  hypercall: verb is: %d\n", verb);
+        *ret_val =
+        hv_build_hcall_retval(HV_STATUS_INVALID_HYPERCALL_CODE, 0);
+        return;
+    }
+}
Index: xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h
===================================================================
--- /dev/null
+++ xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h
@@ -0,0 +1,46 @@
+/****************************************************************************
+ |
+ | Copyright (c) [2007, 2008] Novell, Inc.
+ | All Rights Reserved.
+ |
+ | This program is free software; you can redistribute it and/or
+ | modify it under the terms of version 2 of the GNU General Public License as
+ | published by the Free Software Foundation.
+ |
+ | This program is distributed in the hope that it will be useful,
+ | but WITHOUT ANY WARRANTY; without even the implied warranty of
+ | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ | GNU General Public License for more details.
+ |
+ | You should have received a copy of the GNU General Public License
+ | along with this program; if not, contact Novell, Inc.
+ |
+ | To contact Novell about this file by physical or electronic mail,
+ | you may find current contact information at www.novell.com
+ |
+ |***************************************************************************
+*/
+
+/*
+ * hv_hypercall.h
+ *
+ * Engineering Contact: K. Y. Srinivasan
+ */
+
+#ifndef HV_HYPERCALL_H
+#define HV_HYPERCALL_H
+
+
+/*
+ * Hypercall verbs.
+ */
+
+#define HV_GET_PARTITION_PROPERTY 0x0044
+#define HV_SET_PARTITION_PROPERTY 0x0045
+#define HV_GET_PARTITION_ID	0x0046
+#define HV_SWITCH_VA		0x0001
+#define HV_FLUSH_VA		0x0002
+#define HV_FLUSH_VA_LIST	0x0003
+#define HV_NOTIFY_LONG_SPIN_WAIT	0x0008
+
+#endif /* HV_HYPERCALL_H */
Index: xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
===================================================================
--- /dev/null
+++ xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c
@@ -0,0 +1,1008 @@
+/****************************************************************************
+ |
+ | Copyright (c) [2007, 2008] Novell, Inc.
+ | All Rights Reserved.
+ |
+ | This program is free software; you can redistribute it and/or
+ | modify it under the terms of version 2 of the GNU General Public License as
+ | published by the Free Software Foundation.
+ |
+ | This program is distributed in the hope that it will be useful,
+ | but WITHOUT ANY WARRANTY; without even the implied warranty of
+ | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ | GNU General Public License for more details.
+ |
+ | You should have received a copy of the GNU General Public License
+ | along with this program; if not, contact Novell, Inc.
+ |
+ | To contact Novell about this file by physical or electronic mail,
+ | you may find current contact information at www.novell.com
+ |
+ |***************************************************************************
+*/
+
+/*
+ * nsintercept.c.
+ * This file implements the intercepts to support the  Hyperv Shim.
+ *
+ * Engineering Contact: K. Y. Srinivasan
+ */
+
+#include <asm/hvm/hvm_extensions.h>
+
+
+#include <asm/config.h>
+#include <asm/hvm/io.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/apicdef.h>
+#include <asm/regs.h>
+#include <asm/msr.h>
+#include <asm-x86/event.h>
+#include <asm-x86/apicdef.h>
+
+#include <xen/string.h>
+#include <xen/errno.h>
+#include <xen/init.h>
+#include <xen/compile.h>
+#include <xen/hvm/save.h>
+#include <public/sched.h>
+
+
+/*
+ * Local includes; extension specific.
+ */
+#include "hv_errno.h"
+#include "hv_shim.h"
+
+
+/*
+ * Implement the Hyperv Shim.
+ */
+
+extern struct hvm_mmio_handler vlapic_mmio_handler;
+
+
+static inline void
+hv_hypercall_page_initialize(void *hypercall_page);
+
+static inline void *
+get_virt_from_gmfn(struct domain *d, unsigned long gmfn)
+{
+    unsigned long mfn = gmfn_to_mfn(d, gmfn);
+    if (mfn == INVALID_MFN)
+        return (NULL);
+    return (map_domain_page(mfn));
+}
+
+static inline void
+inject_interrupt(struct vcpu *v, int vector, int type)
+{
+    struct vlapic *vlapic = vcpu_vlapic(v);
+
+    /*
+     * XXXKYS: Check the trigger mode.
+     */
+    if (vlapic_set_irq(vlapic, vector, 1))
+        vcpu_kick(v);
+}
+
+
+static inline void
+hv_write_guestid_msr(hv_partition_t *curp, hv_vcpu_t *cur_vcpu, u64 msr_content)
+{
+    curp->guest_id_msr = msr_content;
+    if (curp->guest_id_msr == 0)
+    {
+        /*
+         * Guest has cleared the guest ID;
+         * clear the hypercall page.
+         */
+        if (curp->hypercall_msr)
+            cur_vcpu->flags &= ~HV_VCPU_UP;
+    }
+}
+
+
+static inline void
+hv_write_hypercall_msr(hv_partition_t *curp,
+          hv_vcpu_t    *cur_vcpu,
+          u64        msr_content)
+{
+    unsigned long gmfn;
+    void    *hypercall_page;
+    struct domain    *d = cur_vcpu->xen_vcpu->domain;
+
+    spin_lock(&curp->lock);
+    gmfn = (msr_content >> 12);
+    if (curp->guest_id_msr == 0)
+    {
+        /* Nothing to do if the guest is not registered*/
+        spin_unlock(&curp->lock);
+        return;
+    }
+    /*
+     * Guest is registered; see if we can turn-on the
+     * hypercall page.
+     * XXXKYS: Can the guest write the GPA in one call and
+     * subsequently enable it? Check. For now assume that all the
+     * info is specified in one call.
+     */
+    if (((u32)msr_content & (0x00000001)) == 0)
+    {
+        /*
+         * The client is not enabling the hypercall; just
+         * ignore everything.
+         */
+        spin_unlock(&curp->lock);
+        return;
+    }
+    hypercall_page = get_virt_from_gmfn(d,gmfn);
+    if (hypercall_page == NULL)
+    {
+        /*
+         * The guest specified a bogus GPA; inject a GP fault
+         * into the guest.
+         */
+        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        spin_unlock(&curp->lock);
+        return;
+    }
+    hv_hypercall_page_initialize(hypercall_page);
+    unmap_domain_page(hypercall_page);
+    if (hvm_funcs.guest_x86_mode(current) == 8)
+        curp->long_mode_guest = 1;
+    else
+        curp->long_mode_guest = 0;
+    curp->hypercall_msr = msr_content;
+    spin_unlock(&curp->lock);
+    cur_vcpu->flags |= HV_VCPU_UP;
+}
+
+
+int
+hyperv_initialize(struct domain *d)
+{
+    int i;
+    printk("Hyperv extensions initialized\n");
+    if (hyperv_dom_create(d))
+    {
+        printk("Hyperv dom create failed\n");
+        return (1);
+    }
+    for (i=0; i < MAX_VIRT_CPUS; i++)
+    {
+        if (d->vcpu[i] != NULL)
+        {
+            if (hyperv_vcpu_initialize(d->vcpu[i]))
+            {
+                int j;
+                for (j= (i-1); j >=0; j--)
+                {
+                    hyperv_vcpu_destroy(d->vcpu[j]);
+                }
+                hyperv_dom_destroy(d);
+                return (1);
+            }
+        }
+    }
+    return (0);
+}
+
+static inline u64
+hv_get_time_since_boot(hv_partition_t *curp)
+{
+    u64    curTime = get_s_time();
+    return ((curTime - curp->domain_boot_time)/100) ;
+}
+
+static inline int
+hv_call_from_bios(void)
+{
+    if (hvm_paging_enabled(current))
+        return (0);
+    else
+        return (1);
+}
+
+
+static inline int
+hv_os_registered(void)
+{
+    hv_partition_t    *curp = hv_get_current_partition();
+    return (curp->guest_id_msr != 0?1:0);
+}
+
+
+
+static inline void
+hv_set_partition_privileges(hv_partition_t *hvpp)
+{
+    /*
+     * This is based on the hypervisor spec under section 5.2.3.
+     */
+    hvpp->privileges = HV_SHIM_PRIVILEGES;
+}
+
+static inline u32
+hv_get_recommendations(void)
+{
+    /*
+     *For now we recommend all the features. Need to validate.
+     */
+    if ( paging_mode_hap(current->domain))
+        /*
+         * If HAP is enabled; the guest should not use TLB flush
+         * related enlightenments.
+         */
+        return (USE_CSWITCH_HCALL | USE_APIC_MSRS | USE_RESET_MSR | USE_RELAXED_TIMING);
+    else
+        /*
+         * For now disable TLB flush enlightenments.
+         */
+        return (USE_CSWITCH_HCALL | USE_APIC_MSRS | USE_RESET_MSR | USE_RELAXED_TIMING);
+}
+
+static inline u32
+hv_get_max_vcpus_supported(void)
+{
+    return (MAX_VIRT_CPUS);
+}
+
+
+static inline void
+hv_read_icr(u64 *icr_content)
+{
+    unsigned long    icr_low, icr_high;
+
+    icr_low = vlapic_mmio_handler.read_handler(current,
+         (vlapic_base_address(vcpu_vlapic(current)) + APIC_ICR), 4, &icr_low);
+    icr_high = vlapic_mmio_handler.read_handler(current,
+         (vlapic_base_address(vcpu_vlapic(current)) + APIC_ICR2), 4, &icr_high);
+    *icr_content = (((u64)icr_high<< 32) | icr_low);
+
+}
+
+static inline void
+hv_read_tpr(u64 *tpr_content)
+{
+
+    vlapic_mmio_handler.read_handler(current,
+         (vlapic_base_address(vcpu_vlapic(current)) + APIC_TASKPRI),
+         4, (unsigned long *)&tpr_content);
+}
+
+static inline void
+hv_write_eoi(u64 msr_content)
+{
+    u32 eoi = (u32)msr_content;
+
+    vlapic_mmio_handler.write_handler(current,
+         (vlapic_base_address(vcpu_vlapic(current)) + APIC_EOI), 4, eoi);
+
+}
+
+static inline void
+hv_write_icr(u64 msr_content)
+{
+    u32    icr_low, icr_high;
+    icr_low = (u32)msr_content;
+    icr_high = (u32)(msr_content >> 32);
+
+    if (icr_high != 0)
+    {
+        vlapic_mmio_handler.write_handler(current,
+         (vlapic_base_address(vcpu_vlapic(current)) + APIC_ICR2), 4,
+        icr_high);
+    }
+    if (icr_low != 0)
+    {
+        vlapic_mmio_handler.write_handler(current,
+         (vlapic_base_address(vcpu_vlapic(current)) + APIC_ICR), 4,
+        icr_low);
+    }
+
+}
+
+static inline void
+hv_write_tpr(u64 msr_content)
+{
+    u32 tpr = (u32)msr_content;
+
+
+    vlapic_mmio_handler.write_handler(current,
+         (vlapic_base_address(vcpu_vlapic(current)) + APIC_TASKPRI), 4, tpr);
+
+}
+
+static inline void
+hv_hypercall_page_initialize(void *hypercall_page)
+{
+    char *p;
+
+    memset(hypercall_page, 0, PAGE_SIZE);
+    p = (char *)(hypercall_page) ;
+    /*
+     * We need to differentiate hypercalls that are to be processed by Xen
+     * from those that need to be processed by the hyperV shim. Xen hypercalls
+     * use eax to pass the opcode; set the high order bit in eax for hypercalls
+     * destined for the hyperV shim (thanks Steven)
+     */
+    *(u8  *)(p + 0) = 0x0d; /* eax or imm32 */
+    *(u8  *)(p + 1) = 0x00;
+    *(u8  *)(p + 2) = 0x00;
+    *(u8  *)(p + 3) = 0x00;
+    *(u8  *)(p + 4) = 0x80; /* eax |= HYPERV_HCALL */
+
+    *(u8  *)(p + 5) = 0x0f; /* vmcall */
+    *(u8  *)(p + 6) = 0x01;
+    if (boot_cpu_data.x86_vendor == 0)
+        *(u8  *)(p + 7) = 0xc1;
+    else
+        *(u8  *)(p + 7) = 0xd9;
+    *(u8  *)(p + 8) = 0xc3; /* ret */
+}
+
+static inline int
+hv_access_time_refcnt(hv_partition_t *curp, u64 *msr_content)
+{
+    if (!hv_privilege_check(curp, HV_ACCESS_TIME_REF_CNT))
+    {
+        /*
+         * The partition does not have the privilege to
+         * read this; return error.
+         */
+        return (0);
+    }
+    *msr_content = hv_get_time_since_boot(curp);
+    return (1);
+}
+
+
+void
+hyperv_vcpu_up(struct vcpu *v)
+{
+    hv_partition_t    *curp = hv_get_current_partition();
+    hv_vcpu_t        *vcpup;
+    vcpup  =  &curp->vcpu_state[v->vcpu_id];
+    vcpup->flags |= HV_VCPU_UP;
+}
+
+int
+hyperv_do_hypercall(struct cpu_user_regs *pregs)
+{
+    hv_partition_t    *curp = hv_get_current_partition();
+    hv_vcpu_t        *vcpup;
+    int    long_mode_guest = curp->long_mode_guest;
+
+    if (pregs->_eax & HYPERV_HCALL)
+    {
+        u64    opcode, input, output, ret_val;
+        vcpup  =  &curp->vcpu_state[hv_get_current_vcpu_index()];
+
+        pregs->_eax &= ~HYPERV_HCALL;
+        /*
+         * This is an extension hypercall; process it; but first make
+         * sure that the CPU is in the right state for invoking
+         * the hypercall - protected mode at CPL 0.
+         */
+        if (hv_invalid_cpu_state())
+        {
+            hvm_inject_exception(TRAP_gp_fault, 0, 0);
+            ret_val = hv_build_hcall_retval(HV_STATUS_INVALID_VP_STATE, 0);
+            hv_set_syscall_retval(pregs, long_mode_guest, ret_val);
+            return (1);
+        }
+        if (long_mode_guest)
+        {
+            opcode = pregs->ecx;
+            input = pregs->edx;
+            output = pregs->r8;
+        } else
+        {
+            opcode =
+            ((((u64)pregs->edx) << 32) | ((u64)pregs->eax));
+            input =
+            ((((u64)pregs->ebx) << 32) | ((u64)pregs->ecx));
+            output =
+            ((((u64)pregs->edi) << 32) | ((u64)pregs->esi));
+        }
+        hv_handle_hypercall(opcode, input, output, &ret_val);
+        hv_set_syscall_retval(pregs, long_mode_guest, ret_val);
+        return (1);
+    }
+    /*
+     * This hypercall page is not the page for the Veridian extension.
+     */
+    return (0);
+}
+
+
+int
+hyperv_dom_create(struct domain *d)
+{
+    hv_partition_t    *hvpp;
+    hvpp = xmalloc_bytes(sizeof(hv_partition_t));
+    if (hvpp == NULL)
+    {
+        printk("Hyprv Dom Create: Memory allocation failed\n");
+        return (1);
+    }
+    memset(hvpp, 0, sizeof(*hvpp));
+    spin_lock_init(&hvpp->lock);
+    /*
+     * Set the partition wide privilege; We can start with no privileges
+     * and progressively turn on fancier hypervisor features.
+     */
+    hv_set_partition_privileges(hvpp);
+    /*
+     * Stash away pointer to our state in the hvm domain structure.
+     */
+    d->arch.hvm_domain.hyperv_handle = hvpp;
+    hvpp->domain_boot_time = get_s_time();
+    return (0);
+}
+
+void
+hyperv_dom_destroy(struct domain *d)
+{
+    int i;
+    hv_partition_t *curp = d->arch.hvm_domain.hyperv_handle;
+    printk("Hyper-V Domain Being Destroyed\n");
+    ASSERT(curp != NULL);
+#ifdef HV_STATS
+    printk("DUMP STATS\n");
+    for (i=0; i < MAX_VIRT_CPUS; i++)
+        if (d->vcpu[i] != NULL)
+            hv_print_stats(curp, i);
+#endif
+
+    xfree(d->arch.hvm_domain.hyperv_handle);
+    d->arch.hvm_domain.hyperv_handle = NULL;
+}
+
+int
+hyperv_vcpu_initialize(struct vcpu *v)
+{
+    hv_vcpu_t    *vcpup;
+    hv_partition_t    *curp = v->domain->arch.hvm_domain.hyperv_handle;
+    vcpup = &curp->vcpu_state[v->vcpu_id];
+    atomic_inc(&curp->vcpus_active);
+    if (v->vcpu_id == 0)
+        vcpup->flags |= HV_VCPU_BOOT_CPU;
+    /*
+     * Initialize all the synthetic MSRs corresponding to this VCPU.
+     * Note that all state is set to 0 to begin
+     * with.
+     */
+    vcpup->version_msr = 0x00000001;
+    vcpup->xen_vcpu = v;
+
+    return (0);
+}
+
+void
+hyperv_vcpu_destroy(struct vcpu *v)
+{
+    hv_vcpu_t    *vcpup;
+    hv_partition_t    *curp = v->domain->arch.hvm_domain.hyperv_handle;
+
+    vcpup = &curp->vcpu_state[v->vcpu_id];
+    atomic_dec(&curp->vcpus_active);
+    vcpup->flags &= ~HV_VCPU_UP;
+}
+
+static int
+hyperv_vcpu_save(struct domain *d, hvm_domain_context_t *h)
+{
+    struct vcpu *v;
+    struct hvm_hyperv_cpu ctxt;
+
+    hv_vcpu_t    *vcpup;
+    hv_partition_t    *curp = d->arch.hvm_domain.hyperv_handle;
+
+    if (curp == NULL)
+        return 0;
+
+    for_each_vcpu(d, v)
+    {
+        vcpup = &curp->vcpu_state[v->vcpu_id];
+
+        /*
+         * We don't need to save state for a
+         * vcpu that is down; the restore
+         * code will leave it down if there is nothing saved.
+         */
+        if ( test_bit(_VPF_down, &v->pause_flags) )
+            continue;
+        ctxt.control_msr = vcpup->control_msr;
+        ctxt.version_msr = vcpup->version_msr;
+        if (hvm_save_entry(HYPERV_CPU, v->vcpu_id, h, &ctxt) != 0 )
+            return 1;
+    }
+
+    return 0;
+}
+
+static int
+hyperv_vcpu_restore(struct domain *d, hvm_domain_context_t *h)
+{
+    int vcpuid;
+    struct hvm_hyperv_cpu ctxt;
+
+    hv_vcpu_t    *vcpup;
+    hv_partition_t    *curp = d->arch.hvm_domain.hyperv_handle;
+
+    if (curp == NULL)
+        return 0;
+    /* Which vcpu is this? */
+    vcpuid = hvm_load_instance(h);
+    vcpup = &curp->vcpu_state[vcpuid];
+    ASSERT(vcpup != NULL);
+    if ( hvm_load_entry(HYPERV_CPU, h, &ctxt) != 0 )
+            return -EINVAL;
+
+    vcpup->control_msr = ctxt.control_msr;
+    vcpup->version_msr = ctxt.version_msr;
+
+    vcpup->flags |=  HV_VCPU_UP;
+    return 0;
+}
+
+static int
+hyperv_dom_save(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_hyperv_dom ctxt;
+    hv_partition_t    *curp = d->arch.hvm_domain.hyperv_handle;
+
+    if (curp == NULL) {
+        return 0;
+    }
+
+    ctxt.guestid_msr = curp->guest_id_msr;
+    ctxt.hypercall_msr = curp->hypercall_msr;
+    ctxt.long_mode = curp->long_mode_guest;
+    ctxt.ext_id = d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR];
+    return (hvm_save_entry(HYPERV_DOM, 0, h, &ctxt));
+}
+
+static int
+hyperv_dom_restore(struct domain *d, hvm_domain_context_t *h)
+{
+    struct hvm_hyperv_dom ctxt;
+    hv_partition_t    *curp;
+    void	      *hypercall_page;
+
+    if ( hvm_load_entry(HYPERV_DOM, h, &ctxt) != 0 )
+            return -EINVAL;
+    d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR] = 1;
+    if (hyperv_initialize(d))
+        return -EINVAL;
+    curp = d->arch.hvm_domain.hyperv_handle;
+
+    curp->guest_id_msr = ctxt.guestid_msr;
+    curp->hypercall_msr = ctxt.hypercall_msr;
+    /*
+     * We may have migrated from a sles10 host; re-initialize the
+     * hypercall page.
+     */
+    hypercall_page = get_virt_from_gmfn(d, (curp->hypercall_msr >>12));
+    if (hypercall_page == NULL)
+        return -EINVAL;
+    hv_hypercall_page_initialize(hypercall_page);
+    unmap_domain_page(hypercall_page);
+    curp->long_mode_guest = ctxt.long_mode;
+    return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(HYPERV_DOM, hyperv_dom_save, hyperv_dom_restore,
+                          1, HVMSR_PER_DOM);
+
+
+HVM_REGISTER_SAVE_RESTORE(HYPERV_CPU,hyperv_vcpu_save,hyperv_vcpu_restore,
+                          1, HVMSR_PER_VCPU);
+
+
+static int
+hv_preprocess_cpuid_leaves(unsigned int input, unsigned int *eax,
+                           unsigned int *ebx, unsigned int *ecx,
+                           unsigned int *edx )
+{
+    struct domain    *d = current->domain;
+    int    extid = d->arch.hvm_domain.params[HVM_PARAM_EXTEND_HYPERVISOR];
+
+    if (extid == 1) {
+        /*
+         * Enlightened Windows guest; need to remap and handle
+         * leaves used by PV front-end drivers.
+         */
+        if ((input >= 0x40000000) && (input <= 0x40000005))
+            return (0);
+        /*
+         * PV drivers use cpuid to query the hypervisor for details. On
+         * Windows we will use the following leaves for this:
+         *
+         * 4096: VMM Sinature (corresponds to 0x40000000 on Linux)
+         * 4097: VMM Version (corresponds to 0x40000001 on Linux)
+         * 4098: Hypercall details (corresponds to 0x40000002 on Linux)
+         */
+        if ((input >= 0x40001000) && (input <= 0x40001002))
+        {
+            /* Get rid of the offset and let xen handle it */
+            input -= 0x1000;
+            cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx);
+            /* Setup hypercall MSR value - add the offset*/
+            if (input == 0x40000002)
+                *ebx |= 0x1000;
+            return (1);
+        }
+    }
+    return (0);
+}
+
+int
+hyperv_do_cpu_id(unsigned int input, unsigned int *eax, unsigned int *ebx,
+                                     unsigned int *ecx, unsigned int *edx)
+{
+    uint32_t idx;
+    hv_partition_t    *curp = hv_get_current_partition();
+
+    /*
+     * hvmloader uses cpuid to set up a hypercall page; we don't want to
+     * intercept calls coming from the bootstrap (bios) code in the HVM
+     * guest; we discriminate based on if paging is enabled or not.
+     */
+    if (hv_call_from_bios())
+        /*
+         * We don't intercept this.
+         */
+        return (0);
+
+    if (input == 0x00000001)
+    {
+        *ecx |=  0x80000000;
+        return (1);
+    }
+
+    if (hv_preprocess_cpuid_leaves(input, eax, ebx, ecx, edx))
+        return (1);
+    idx = (input - 0x40000000);
+
+    switch (idx)
+    {
+    case 0:
+        /*
+         * 0x40000000: Hypervisor identification.
+         */
+        *eax = 0x40000005; /* For now clamp this */
+        *ebx = 0x65766f4e; /* "Nove" */
+        *ecx = 0x68536c6c; /* "llSh" */
+        *edx = 0x76486d69; /* "imHv" */
+        break;
+
+    case 1:
+        /*
+         * 0x40000001: Hypervisor identification.
+         */
+        *eax = 0x31237648; /* "Hv#1*/
+        *ebx = 0; /* Reserved */
+        *ecx = 0; /* Reserved */
+        *edx = 0; /* Reserved */
+        break;
+    case 2:
+        /*
+         * 0x40000002: Guest Info
+         */
+        if (hv_os_registered())
+        {
+            u64 guest_id = curp->guest_id_msr;
+            u32 guest_major, guest_minor;
+
+            guest_major = ((guest_id >> 32) & 0xff);
+            guest_minor = ((guest_id >> 24) & 0xff);
+            *eax = (guest_id & 0xffff); /* Build # 15:0 */
+            *ebx =
+            (guest_major << 16) | (guest_minor); /*Major: 31:16; Minor: 15:0*/
+            *ecx = ((guest_id >>16) & 0xff);  /*Service Pack/Version: 23:16*/
+            /*
+             * Service branch (31:24)|Service number (23:0)
+             * Not sure what these numbers are: XXXKYS.
+             */
+            *edx = 0; /*Service branch (31:24)|Service number (23:0) */
+        } else
+        {
+            *eax = 0;
+            *ebx = 0;
+            *ecx = 0;
+            *edx = 0;
+        }
+        break;
+    case 3:
+        /*
+         * 0x40000003: Feature identification.
+         */
+        *eax = HV_SHIM_SUPPORTED_MSRS;
+        /* We only support AcessSelfPartitionId bit 1 */
+        *ebx = (HV_SHIM_PRIVILEGES>>32);
+        *ecx = 0; /* Reserved */
+        *edx = 0; /*No MWAIT (bit 0), No debugging (bit 1)*/
+        break;
+    case 4:
+        /*
+         * 0x40000004: Imlementation recommendations.
+         */
+        *eax = hv_get_recommendations();
+        *ebx = 2047;
+        *ebx = 0; /* Reserved */
+        *ecx = 0; /* Reserved */
+        *edx = 0; /* Reserved */
+        break;
+    case 5:
+        /*
+         * 0x40000005: Implementation limits.
+         * Currently we retrieve maximum number of vcpus and
+         * logical processors (hardware threads) supported.
+         */
+        *eax = hv_get_max_vcpus_supported();
+        *ebx = 0; /* Reserved */
+        *ecx = 0; /* Reserved */
+        *edx = 0; /* Reserved */
+        break;
+    case 6:
+	/*
+	 * Implementation hardware features; for now set them all to zero.
+	 */
+        *eax = 0;
+        *ebx = 0; /* Reserved */
+        *ecx = 0; /* Reserved */
+        *edx = 0; /* Reserved */
+        break;
+
+    default:
+        /*
+         * We don't handle this leaf.
+         */
+        return (0);
+
+    }
+    return (1);
+}
+
+int
+hyperv_do_rd_msr(uint32_t idx, struct cpu_user_regs *regs)
+{
+    hv_partition_t    *curp = hv_get_current_partition();
+    unsigned int    vcp_index = hv_get_current_vcpu_index();
+    u64 msr_content = 0;
+    hv_vcpu_t    *cur_vcpu = &curp->vcpu_state[vcp_index];
+
+    /*
+     * hvmloader uses rdmsr; we don't want to
+     * intercept calls coming from the bootstrap (bios) code in the HVM
+     * guest; we descriminate based on the instruction pointer.
+     */
+    if (hv_call_from_bios())
+        /*
+         * We don't intercept this.
+         */
+        return (0);
+    switch (idx)
+    {
+    case HV_MSR_GUEST_OS_ID:
+        spin_lock(&curp->lock);
+        regs->eax = (u32)(curp->guest_id_msr & 0xFFFFFFFF);
+        regs->edx = (u32)(curp->guest_id_msr >> 32);
+        spin_unlock(&curp->lock);
+        break;
+    case HV_MSR_HYPERCALL:
+        spin_lock(&curp->lock);
+        regs->eax = (u32)(curp->hypercall_msr & 0xFFFFFFFF);
+        regs->edx = (u32)(curp->hypercall_msr >> 32);
+        spin_unlock(&curp->lock);
+        if ((((u32)curp->hypercall_msr) & (0x00000001)) != 0) {
+            cur_vcpu->flags |= HV_VCPU_UP;
+        }
+        break;
+    case HV_MSR_VP_INDEX:
+        regs->eax = (u32)(vcp_index);
+        regs->edx = (u32)(0x0);
+        break;
+    case HV_MSR_ICR:
+        if (!hv_privilege_check(curp, HV_ACCESS_APIC_MSRS)) {
+            goto msr_read_error;
+        }
+        hv_read_icr(&msr_content);
+#ifdef HV_STATS
+        cur_vcpu->stats.num_icr_reads++;
+#endif
+        regs->eax = (u32)(msr_content & 0xFFFFFFFF);
+        regs->edx = (u32)(msr_content >> 32);
+        break;
+    case HV_MSR_TPR:
+        if (!hv_privilege_check(curp, HV_ACCESS_APIC_MSRS)) {
+            goto msr_read_error;
+        }
+        hv_read_tpr(&msr_content);
+#ifdef HV_STATS
+        cur_vcpu->stats.num_tpr_reads++;
+#endif
+        regs->eax = (u32)(msr_content & 0xFFFFFFFF);
+        regs->edx = (u32)(msr_content >> 32);
+        break;
+    /*
+     * The following synthetic MSRs are implemented in the Novell Shim.
+     */
+    case HV_MSR_SCONTROL:
+        if (!hv_privilege_check(curp, HV_ACCESS_SYNC_MSRS)) {
+            goto msr_read_error;
+        }
+        regs->eax = (u32)(cur_vcpu->control_msr & 0xFFFFFFFF);
+        regs->edx = (u32)(cur_vcpu->control_msr >> 32);
+        break;
+    case HV_MSR_SVERSION:
+        if (!hv_privilege_check(curp, HV_ACCESS_SYNC_MSRS)) {
+            goto msr_read_error;
+        }
+        regs->eax = (u32)(cur_vcpu->version_msr & 0xFFFFFFFF);
+        regs->edx = (u32)(cur_vcpu->version_msr >> 32);
+        break;
+    case HV_MSR_TIME_REF_COUNT:
+        if (!hv_access_time_refcnt(curp, &msr_content)) {
+            goto msr_read_error;
+        }
+        regs->eax = (u32)(msr_content & 0xFFFFFFFF);
+        regs->edx = (u32)(msr_content >> 32);
+        break;
+    case HV_MSR_PVDRV_HCALL:
+        regs->eax = 0;
+        regs->edx = 0;
+        break;
+    case HV_MSR_SYSTEM_RESET:
+        regs->eax = 0;
+        regs->edx = 0;
+        break;
+    case HV_MSR_APIC_ASSIST_PAGE:
+        /*
+         * For now ignore this.
+         */
+        regs->eax = 0;
+        regs->edx = 0;
+        break;
+    default:
+        /*
+         * We did not handle the MSR address specified;
+         * let the caller figure out
+         * What to do.
+         */
+        return (0);
+    }
+    return (1);
+msr_read_error:
+    /*
+     * Have to inject #GP fault.
+     */
+    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    return (1);
+}
+
+int
+hyperv_do_wr_msr(uint32_t idx, struct cpu_user_regs *regs)
+{
+    hv_partition_t    *curp = hv_get_current_partition();
+    unsigned int    vcp_index = hv_get_current_vcpu_index();
+    u64 msr_content = 0;
+    hv_vcpu_t    *cur_vcpu = &curp->vcpu_state[vcp_index];
+    struct domain    *d = current->domain;
+
+    /*
+     * hvmloader uses wrmsr; we don't want to
+     * intercept calls coming from the bootstrap (bios) code in the HVM
+     * guest; we descriminate based on the instruction pointer.
+     */
+    if (hv_call_from_bios())
+        /*
+         * We don't intercept this.
+         */
+        return (0);
+    msr_content =
+    (u32)regs->eax | ((u64)regs->edx << 32);
+
+    switch (idx)
+    {
+    case HV_MSR_GUEST_OS_ID:
+        hv_write_guestid_msr(curp, cur_vcpu,  msr_content);
+        break;
+    case HV_MSR_HYPERCALL:
+        hv_write_hypercall_msr(curp, cur_vcpu, msr_content);
+        break;
+
+    case HV_MSR_VP_INDEX:
+        goto msr_write_error;
+
+    case HV_MSR_EOI:
+        if (!hv_privilege_check(curp, HV_ACCESS_APIC_MSRS)) {
+            goto msr_write_error;
+        }
+        hv_write_eoi(msr_content);
+#ifdef HV_STATS
+        cur_vcpu->stats.num_eoi_writes++;
+#endif
+        break;
+    case HV_MSR_ICR:
+        if (!hv_privilege_check(curp, HV_ACCESS_APIC_MSRS)) {
+            goto msr_write_error;
+        }
+        hv_write_icr(msr_content);
+#ifdef HV_STATS
+        cur_vcpu->stats.num_icr_writes++;
+#endif
+        break;
+    case HV_MSR_TPR:
+        if (!hv_privilege_check(curp, HV_ACCESS_APIC_MSRS)) {
+            goto msr_write_error;
+        }
+        hv_write_tpr(msr_content);
+#ifdef HV_STATS
+        cur_vcpu->stats.num_tpr_writes++;
+#endif
+        break;
+
+    /*
+     * The following MSRs are synthetic MSRs supported in the Novell Shim.
+     */
+    case HV_MSR_SCONTROL:
+        if (!hv_privilege_check(curp, HV_ACCESS_SYNC_MSRS)) {
+            goto msr_write_error;
+        }
+        cur_vcpu->control_msr = msr_content;
+        break;
+    case HV_MSR_SVERSION:
+        if (!hv_privilege_check(curp, HV_ACCESS_SYNC_MSRS)) {
+            goto msr_write_error;
+        }
+        /*
+         * This is a read-only MSR; generate #GP
+         */
+        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        break;
+    case HV_MSR_TIME_REF_COUNT:
+        /*
+         * This is a read-only msr.
+         */
+        goto msr_write_error;
+        break;
+    case HV_MSR_PVDRV_HCALL:
+        /*
+         * Establish the hypercall page for PV drivers.
+         */
+        wrmsr_hypervisor_regs(0x40000000, regs->eax, regs->edx);
+        break;
+    case HV_MSR_SYSTEM_RESET:
+        /*
+         * Shutdown the domain/partition.
+         */
+        if (msr_content & 0x1) {
+            domain_shutdown(d, SHUTDOWN_reboot);
+        }
+        break;
+    case HV_MSR_APIC_ASSIST_PAGE:
+        /*
+         * For now ignore this.
+         */
+        break;
+
+    default:
+        /*
+         * We did not handle the MSR address;
+         * let the caller deal with this.
+         */
+        return (0);
+    }
+    return (1);
+msr_write_error:
+    /*
+     * Have to inject #GP fault.
+     */
+    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    return (1);
+}
Index: xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_shim.h
===================================================================
--- /dev/null
+++ xen-3.4.1-testing/xen/arch/x86/hvm/hyperv/hv_shim.h
@@ -0,0 +1,285 @@
+/****************************************************************************
+ |
+ | Copyright (c) [2007, 2008] Novell, Inc.
+ | All Rights Reserved.
+ |
+ | This program is free software; you can redistribute it and/or
+ | modify it under the terms of version 2 of the GNU General Public License as
+ | published by the Free Software Foundation.
+ |
+ | This program is distributed in the hope that it will be useful,
+ | but WITHOUT ANY WARRANTY; without even the implied warranty of
+ | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ | GNU General Public License for more details.
+ |
+ | You should have received a copy of the GNU General Public License
+ | along with this program; if not, contact Novell, Inc.
+ |
+ | To contact Novell about this file by physical or electronic mail,
+ | you may find current contact information at www.novell.com
+ |
+ |***************************************************************************
+*/
+
+/*
+ * Hyperv Shim Implementation.
+ *
+ * Engineering Contact: K. Y. Srinivasan
+ */
+
+#ifndef HV_SHIM_H
+#define HV_SHIM_H
+
+#include <xen/sched.h>
+#include <xen/types.h>
+#include <xen/timer.h>
+#include <asm/current.h>
+#include <asm/domain.h>
+#include <asm/shadow.h>
+#include <public/xen.h>
+
+#include "hv_hypercall.h"
+
+/*
+ * Synthetic MSR addresses
+ */
+#define HV_MSR_GUEST_OS_ID	0x40000000
+#define HV_MSR_HYPERCALL	0x40000001
+#define HV_MSR_VP_INDEX		0x40000002
+#define HV_MSR_SYSTEM_RESET	0x40000003
+#define HV_MSR_TIME_REF_COUNT	0x40000020
+#define HV_MSR_EOI		0x40000070
+#define HV_MSR_ICR		0x40000071
+#define HV_MSR_TPR		0x40000072
+#define HV_MSR_APIC_ASSIST_PAGE	0x40000073
+
+#define HV_MSR_SCONTROL		0x40000080
+#define HV_MSR_SVERSION		0x40000081
+#define HV_MSR_SIEFP		0x40000082
+#define HV_MSR_SIMP		0x40000083
+#define HV_MSR_SEOM		0x40000084
+#define HV_MSR_SINT0		0x40000090
+#define HV_MSR_SINT1		0x40000091
+#define HV_MSR_SINT2		0x40000092
+#define HV_MSR_SINT3		0x40000093
+#define HV_MSR_SINT4		0x40000094
+#define HV_MSR_SINT5		0x40000095
+#define HV_MSR_SINT6		0x40000096
+#define HV_MSR_SINT7		0x40000097
+#define HV_MSR_SINT8		0x40000098
+#define HV_MSR_SINT9		0x40000099
+#define HV_MSR_SINT10		0x4000009A
+#define HV_MSR_SINT11		0x4000009B
+#define HV_MSR_SINT12		0x4000009C
+#define HV_MSR_SINT13		0x4000009D
+#define HV_MSR_SINT14		0x4000009E
+#define HV_MSR_SINT15		0x4000009F
+
+#define HV_MSR_TIMER0_CONFIG	0x400000B0
+#define HV_MSR_TIMER0_COUNT	0x400000B1
+#define HV_MSR_TIMER1_CONFIG	0x400000B2
+#define HV_MSR_TIMER1_COUNT	0x400000B3
+#define HV_MSR_TIMER2_CONFIG	0x400000B4
+#define HV_MSR_TIMER2_COUNT	0x400000B5
+#define HV_MSR_TIMER3_CONFIG	0x400000B6
+#define HV_MSR_TIMER3_COUNT	0x400000B7
+
+/*
+ * Domain privilege flags
+ */
+
+#define _ACCESS_VP_RUNTIME  0
+#define ACCESS_VP_RUNTIME (1L<<_ACCESS_VP_RUNTIME)
+#define _ACCESS_TIME_REF_COUNT 1
+#define ACCESS_TIME_REF_COUNT (1L<<_ACCESS_TIME_REF_COUNT)
+#define _ACCESS_SYNIC_MSRS 2
+#define ACCESS_SYNIC_MSRS (1L<<_ACCESS_TIME_REF_COUNT)
+#define _ACCESS_SYNTHETIC_TIMERS 3
+#define ACCESS_SYNTHETIC_TIMERS (1L<<_ACCESS_SYNTHETIC_TIMERS)
+#define _ACCESS_APIC_MSRS 4
+#define ACCESS_APIC_MSRS (1L<<_ACCESS_APIC_MSRS)
+#define _ACCESS_HYPERCALL_MSRS 5
+#define ACCESS_HYPERCALL_MSRS (1L<<_ACCESS_HYPERCALL_MSRS)
+#define _ACCESS_VP_INDEX 6
+#define ACCESS_VP_INDEX (1L<<_ACCESS_VP_INDEX)
+#define _ACCESS_SELF_PARTITION_ID 33
+#define ACCESS_SELF_PARTITION_ID (1L<<_ACCESS_SELF_PARTITION_ID)
+
+#define HV_SHIM_PRIVILEGES \
+    (ACCESS_TIME_REF_COUNT | ACCESS_APIC_MSRS | ACCESS_HYPERCALL_MSRS | \
+     ACCESS_VP_INDEX |ACCESS_SELF_PARTITION_ID)
+
+/*
+ * Guest recommendations
+ */
+#define _USE_CSWITCH_HCALL   0
+#define USE_CSWITCH_HCALL (1U<<_USE_CSWITCH_HCALL)
+#define _USE_TLBFLUSH_HCALL  1
+#define USE_TLBFLUSH_HCALL (1U<<_USE_TLBFLUSH_HCALL)
+#define _USE_REMOTE_TLBFLUSH_HCALL 2
+#define USE_REMOTE_TLBFLUSH_HCALL (1U<<_USE_REMOTE_TLBFLUSH_HCALL)
+#define _USE_APIC_MSRS  3
+#define USE_APIC_MSRS   (1U<<_USE_APIC_MSRS)
+#define _USE_RESET_MSR  4
+#define USE_RESET_MSR   (1U<<_USE_RESET_MSR)
+#define _USE_RELAXED_TIMING 5
+#define USE_RELAXED_TIMING (1U<<_USE_RELAXED_TIMING)
+
+/*
+ * Supported Synthetic MSRs. 0.83 HyperV spec, section 3.4
+ * Supported features.
+ */
+#define _MSR_VP_RUNTIME   0
+#define MSR_VP_RUNTIME    (1U<<_MSR_VP_RUNTIME)
+#define _MSR_TIME_REF_CNT   1
+#define MSR_TIME_REF_CNT  (1U<<_MSR_TIME_REF_CNT)
+#define _MSR_SYN_IC         2
+#define MSR_SYN_IC        (1U<<_MSR_SYN_IC)
+#define _MSR_SYN_TIMER      3
+#define MSR_SYN_TIMER     (1U<<_MSR_SYN_TIMER)
+#define _APIC_MSRS          4
+#define APIC_MSRS         (1U<<_APIC_MSRS)
+#define _HYPERCALL_MSRS     5
+#define HYPERCALL_MSRS    (1U<<_HYPERCALL_MSRS)
+#define _MSR_VP_INDEX       6
+#define MSR_VP_INDEX      (1U<<_MSR_VP_INDEX)
+#define _RESET_MSR           7
+#define RESET_MSR         (1U<<_RESET_MSR)
+
+#define HV_SHIM_SUPPORTED_MSRS \
+    (MSR_TIME_REF_CNT|APIC_MSRS|HYPERCALL_MSRS|MSR_VP_INDEX|RESET_MSR)
+
+/*
+ * MSR for supporting PV drivers on longhorn.
+ */
+#define HV_MSR_PVDRV_HCALL	0x40001000
+
+/*
+ * Tag for HyperV hcalls.
+ */
+#define HYPERV_HCALL    0x80000000
+
+/*
+ * Hyperv Shim VCPU flags.
+ */
+#define HV_VCPU_BOOT_CPU	0x00000001
+#define HV_VCPU_UP		0x00000002
+
+
+/*
+ * Stats structure.
+ */
+
+typedef struct {
+    u64	num_switches;
+    u64 num_long_spinwaits;
+    u64	num_tpr_reads;
+    u64	num_icr_reads;
+    u64	num_eoi_writes;
+    u64	num_tpr_writes;
+    u64	num_icr_writes;
+} hv_vcpu_stats_t;
+
+typedef struct hv_vcpu {
+    /*
+     * Per-vcpu state to support the hyperv shim;
+     */
+    unsigned long	flags;
+    /*
+     * Synthetic msrs.
+     */
+    u64	control_msr;
+    u64	version_msr;
+    struct vcpu	*xen_vcpu; /*corresponding xen vcpu*/
+    hv_vcpu_stats_t	stats;
+} hv_vcpu_t;
+
+
+#define HV_STATS //KYS: Temporary
+
+typedef struct hv_partition {
+    /*
+     * State maintained on a per guest basis to implement
+     * the Hyperv shim.
+     */
+    s_time_t    domain_boot_time;
+    spinlock_t	lock;
+    atomic_t	vcpus_active;
+    u64		guest_id_msr;
+    u64		hypercall_msr;
+    u64		privileges;
+    int		long_mode_guest;
+    /*
+     * Each VCPU here corresponds to the vcpu in the underlying hypervisor;
+     * they share the same ID.
+     */
+    hv_vcpu_t	vcpu_state[MAX_VIRT_CPUS];
+} hv_partition_t;
+
+
+/*
+ * Privilege flags.
+ */
+
+#define HV_ACCESS_VP_RUNTIME	(1ULL << 0)
+#define HV_ACCESS_TIME_REF_CNT	(1ULL << 1)
+#define HV_ACCESS_SYNC_MSRS	(1ULL << 2)
+#define HV_ACCESS_SYNC_TIMERS	(1ULL << 3)
+#define HV_ACCESS_APIC_MSRS	(1ULL << 4)
+#define HV_ACCESS_PARTITION_ID	(1ULL << 33)
+
+#define hv_get_current_partition() \
+((current)->domain->arch.hvm_domain.hyperv_handle)
+
+#define hv_get_current_vcpu_index() (current)->vcpu_id
+
+
+static inline int
+hv_invalid_cpu_state(void)
+{
+    int state;
+    state = hvm_funcs.guest_x86_mode(current);
+    if ((state == 4) || (state == 8))
+    {
+        return (0);
+    }
+    return (1);
+}
+
+static inline u64
+hv_build_hcall_retval(int code, int reps)
+{
+    u64	ret_val=0;
+    ret_val |= (code & 0xff);
+    ret_val |= (((long long)(reps & 0xfff)) << 32);
+    return (ret_val);
+}
+
+static inline void  hv_set_syscall_retval(struct cpu_user_regs *pregs,
+				int long_mode, u64 ret_val)
+{
+    if (long_mode)
+    {
+        pregs->eax = ret_val;
+    }
+    else
+    {
+        pregs->edx = (u32)(ret_val >> 32);
+        pregs->eax = (u32)(ret_val);
+    }
+}
+
+static inline int
+hv_privilege_check(hv_partition_t *curp, u64 flags)
+{
+    return ((curp->privileges & flags)? 1: 0);
+}
+
+void
+hv_handle_hypercall(u64 opcode, u64 input, u64 output,
+		  u64 *ret_val);
+
+
+void hv_print_stats(hv_partition_t *curp, int i);
+
+#endif /*HV_SHIM_H */