valgrind/valgrind.xen.patch

1560 lines
60 KiB
Diff

https://bugs.kde.org/show_bug.cgi?id=390553
https://github.com/olafhering/valgrind/compare/master...xen
coregrind/m_syswrap/syswrap-linux.c | 17 +
coregrind/m_syswrap/syswrap-xen.c | 549 ++++++++++++++++++++++++++++++------
include/vki/vki-linux.h | 15
include/vki/vki-xen-domctl.h | 215 +++++++++++++-
include/vki/vki-xen-gnttab.h | 1
include/vki/vki-xen-memory.h | 12
include/vki/vki-xen-physdev.h | 9
include/vki/vki-xen-schedop.h | 2
include/vki/vki-xen-sysctl.h | 88 +++++
include/vki/vki-xen-version.h | 11
include/vki/vki-xen.h | 4
11 files changed, 831 insertions(+), 92 deletions(-)
Index: valgrind-3.13.0/coregrind/m_syswrap/syswrap-xen.c
===================================================================
--- valgrind-3.13.0.orig/coregrind/m_syswrap/syswrap-xen.c
+++ valgrind-3.13.0/coregrind/m_syswrap/syswrap-xen.c
@@ -234,6 +234,16 @@ PRE(memory_op)
case VKI_XENMEM_get_sharing_shared_pages:
break;
+ case VKI_XENMEM_get_pod_target:
+ case VKI_XENMEM_set_pod_target: {
+ struct vki_xen_pod_target *arg =
+ (struct vki_xen_pod_target *)ARG2;
+ PRE_MEM_READ("XENMEM_set_pod_target target_pages",
+ (Addr)&arg->target_pages, sizeof(arg->target_pages));
+ PRE_MEM_READ("XENMEM_set_pod_target domid",
+ (Addr)&arg->domid, sizeof(arg->domid));
+ break;
+ }
case VKI_XENMEM_access_op: {
struct vki_xen_mem_event_op *arg =
(struct vki_xen_mem_event_op *)ARG2;
@@ -532,6 +542,7 @@ PRE(xen_version)
case VKI_XENVER_pagesize:
case VKI_XENVER_guest_handle:
case VKI_XENVER_commandline:
+ case VKI_XENVER_build_id:
/* No inputs */
break;
@@ -584,6 +595,11 @@ PRE(sysctl) {
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
break;
default:
bad_intf_version(tid, layout, arrghs, status, flags,
@@ -626,16 +642,11 @@ PRE(sysctl) {
break;
case 0x0000000a:
case 0x0000000b:
+ default:
PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, first_domain);
PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, max_domains);
PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, buffer);
break;
- default:
- VG_(dmsg)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
- "%"PRIx32" not implemented yet\n",
- sysctl->interface_version);
- SET_STATUS_Failure(VKI_EINVAL);
- return;
}
break;
@@ -681,17 +692,69 @@ PRE(sysctl) {
break;
case VKI_XEN_SYSCTL_topologyinfo:
- PRE_XEN_SYSCTL_READ(topologyinfo, max_cpu_index);
- PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_core);
- PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_socket);
- PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_node);
+// case VKI_XEN_SYSCTL_cputopoinfo:
+ switch (sysctl->interface_version)
+ {
+ case 0x00000007:
+ case 0x00000008:
+ case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
+ PRE_XEN_SYSCTL_READ(topologyinfo, max_cpu_index);
+ PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_core);
+ PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_socket);
+ PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_node);
+ break;
+ case 0x0000000c:
+ default:
+ PRE_XEN_SYSCTL_READ(cputopoinfo_0000000c, num_cpus);
+ PRE_XEN_SYSCTL_READ(cputopoinfo_0000000c, cputopo);
+ break;
+ }
break;
case VKI_XEN_SYSCTL_numainfo:
- PRE_XEN_SYSCTL_READ(numainfo, max_node_index);
- PRE_XEN_SYSCTL_READ(numainfo, node_to_memsize);
- PRE_XEN_SYSCTL_READ(numainfo, node_to_memfree);
- PRE_XEN_SYSCTL_READ(numainfo, node_to_node_distance);
+ switch (sysctl->interface_version)
+ {
+ case 0x00000007:
+ case 0x00000008:
+ case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
+ PRE_XEN_SYSCTL_READ(numainfo_0000000b, max_node_index);
+ PRE_XEN_SYSCTL_READ(numainfo_0000000b, node_to_memsize);
+ PRE_XEN_SYSCTL_READ(numainfo_0000000b, node_to_memfree);
+ PRE_XEN_SYSCTL_READ(numainfo_0000000b, node_to_node_distance);
+ break;
+ case 0x0000000c:
+ default:
+ PRE_XEN_SYSCTL_READ(numainfo_0000000c, num_nodes);
+ PRE_XEN_SYSCTL_READ(numainfo_0000000c, meminfo);
+ PRE_XEN_SYSCTL_READ(numainfo_0000000c, distance);
+ break;
+ }
+ break;
+
+ case VKI_XEN_SYSCTL_pcitopoinfo:
+ switch (sysctl->interface_version)
+ {
+ case 0x0000000c:
+ default:
+ PRE_XEN_SYSCTL_READ(pcitopoinfo_0000000c, num_devs);
+ PRE_XEN_SYSCTL_READ(pcitopoinfo_0000000c, devs);
+ break;
+ }
+ break;
+
+ case VKI_XEN_SYSCTL_get_cpu_featureset:
+ switch (sysctl->interface_version)
+ {
+ case 0x0000000c:
+ default:
+ PRE_XEN_SYSCTL_READ(cpu_featureset_0000000d, index);
+ PRE_XEN_SYSCTL_READ(cpu_featureset_0000000d, nr_features);
+ break;
+ }
break;
default:
@@ -730,6 +793,8 @@ PRE(domctl)
case 0x0000000a:
case 0x0000000b:
case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
break;
default:
bad_intf_version(tid, layout, arrghs, status, flags,
@@ -757,9 +822,42 @@ PRE(domctl)
break;
case VKI_XEN_DOMCTL_createdomain:
- PRE_XEN_DOMCTL_READ(createdomain, ssidref);
- PRE_XEN_DOMCTL_READ(createdomain, handle);
- PRE_XEN_DOMCTL_READ(createdomain, flags);
+ switch (domctl->interface_version) {
+ case 0x00000007:
+ case 0x00000008:
+ case 0x00000009:
+ case 0x0000000a:
+ PRE_XEN_DOMCTL_READ(createdomain_0000000a, ssidref);
+ PRE_XEN_DOMCTL_READ(createdomain_0000000a, handle);
+ PRE_XEN_DOMCTL_READ(createdomain_0000000a, flags);
+ break;
+ case 0x0000000b:
+ PRE_XEN_DOMCTL_READ(createdomain_0000000b, ssidref);
+ PRE_XEN_DOMCTL_READ(createdomain_0000000b, handle);
+ PRE_XEN_DOMCTL_READ(createdomain_0000000b, flags);
+#if defined(__i386__) || defined(__x86_64__)
+ __PRE_XEN_DOMCTL_READ(createdomain_0000000b, createdomain_0000000b, config.dummy);
+#endif
+#if defined(__arm__) || defined(__aarch64__)
+ __PRE_XEN_DOMCTL_READ(createdomain_0000000b, createdomain_0000000b, config.gic_version);
+ __PRE_XEN_DOMCTL_READ(createdomain_0000000b, createdomain_0000000b, config.nr_spis);
+ __PRE_XEN_DOMCTL_READ(createdomain_0000000b, createdomain_0000000b, config.clock_frequency);
+#endif
+ break;
+ case 0x0000000c:
+ default:
+ PRE_XEN_DOMCTL_READ(createdomain_0000000c, ssidref);
+ PRE_XEN_DOMCTL_READ(createdomain_0000000c, handle);
+ PRE_XEN_DOMCTL_READ(createdomain_0000000c, flags);
+#if defined(__i386__) || defined(__x86_64__)
+ __PRE_XEN_DOMCTL_READ(createdomain_0000000c, createdomain_0000000c, config.emulation_flags);
+#endif
+#if defined(__arm__) || defined(__aarch64__)
+ __PRE_XEN_DOMCTL_READ(createdomain_0000000c, createdomain_0000000c, config.gic_version);
+ __PRE_XEN_DOMCTL_READ(createdomain_0000000c, createdomain_0000000c, config.nr_spis);
+ __PRE_XEN_DOMCTL_READ(createdomain_0000000c, createdomain_0000000c, config.clock_frequency);
+#endif
+ }
break;
case VKI_XEN_DOMCTL_gethvmcontext:
@@ -780,30 +878,67 @@ PRE(domctl)
break;
case VKI_XEN_DOMCTL_gethvmcontext_partial:
- __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, type);
- __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, instance);
- __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, buffer);
-
- switch (domctl->u.hvmcontext_partial.type) {
- case VKI_HVM_SAVE_CODE(CPU):
- if ( domctl->u.hvmcontext_partial.buffer.p )
- PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
- (Addr)domctl->u.hvmcontext_partial.buffer.p,
- VKI_HVM_SAVE_LENGTH(CPU));
- break;
- case VKI_HVM_SAVE_CODE(MTRR):
- if ( domctl->u.hvmcontext_partial.buffer.p )
- PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
- (Addr)domctl->u.hvmcontext_partial.buffer.p,
- VKI_HVM_SAVE_LENGTH(MTRR));
- break;
- default:
- bad_subop(tid, layout, arrghs, status, flags,
+ switch (domctl->interface_version) {
+ case 0x00000007:
+ case 0x00000008:
+ case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000d, type);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000d, instance);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000d, buffer);
+
+ switch (domctl->u.hvmcontext_partial_0000000d.type) {
+ case VKI_HVM_SAVE_CODE(CPU):
+ if ( domctl->u.hvmcontext_partial_0000000d.buffer.p )
+ PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
+ (Addr)domctl->u.hvmcontext_partial_0000000d.buffer.p,
+ VKI_HVM_SAVE_LENGTH(CPU));
+ break;
+ case VKI_HVM_SAVE_CODE(MTRR):
+ if ( domctl->u.hvmcontext_partial_0000000d.buffer.p )
+ PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
+ (Addr)domctl->u.hvmcontext_partial_0000000d.buffer.p,
+ VKI_HVM_SAVE_LENGTH(MTRR));
+ break;
+ default:
+ bad_subop(tid, layout, arrghs, status, flags,
"__HYPERVISOR_domctl_gethvmcontext_partial type",
- domctl->u.hvmcontext_partial.type);
- break;
- }
- break;
+ domctl->u.hvmcontext_partial_0000000d.type);
+ break;
+ }
+ break;
+ case 0x0000000e:
+ default:
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000e, type);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000e, instance);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000e, bufsz);
+ __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial_0000000e, buffer);
+
+ switch (domctl->u.hvmcontext_partial_0000000e.type) {
+ case VKI_HVM_SAVE_CODE(CPU):
+ if ( domctl->u.hvmcontext_partial_0000000e.buffer.p )
+ PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
+ (Addr)domctl->u.hvmcontext_partial_0000000e.buffer.p,
+ VKI_HVM_SAVE_LENGTH(CPU));
+ break;
+ case VKI_HVM_SAVE_CODE(MTRR):
+ if ( domctl->u.hvmcontext_partial_0000000e.buffer.p )
+ PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
+ (Addr)domctl->u.hvmcontext_partial_0000000e.buffer.p,
+ VKI_HVM_SAVE_LENGTH(MTRR));
+ break;
+ default:
+ bad_subop(tid, layout, arrghs, status, flags,
+ "__HYPERVISOR_domctl_gethvmcontext_partial type",
+ domctl->u.hvmcontext_partial_0000000e.type);
+ break;
+ }
+ break;
+ }
+ break;
case VKI_XEN_DOMCTL_max_mem:
PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
@@ -822,6 +957,8 @@ PRE(domctl)
__PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_00000007, machine_sbdf);
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
__PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000b, dev);
__PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000b, flag);
switch (domctl->u.assign_device_0000000b.dev) {
@@ -841,6 +978,27 @@ PRE(domctl)
break;
}
break;
+ case 0x0000000e:
+ default:
+ __PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000e, dev);
+ __PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000e, flags);
+ switch (domctl->u.assign_device_0000000e.dev) {
+ case VKI_XEN_DOMCTL_DEV_PCI:
+ __PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000e, u.pci);
+ break;
+ case VKI_XEN_DOMCTL_DEV_DT:
+ __PRE_XEN_DOMCTL_READ(test_assign_device, assign_device_0000000e, u.dt);
+ PRE_MEM_READ("XEN_DOMTCL_test_assign_device.dt",
+ (Addr)domctl->u.assign_device_0000000e.u.dt.path.p,
+ domctl->u.assign_device_0000000e.u.dt.size);
+ break;
+ default:
+ bad_subop(tid, layout, arrghs, status, flags,
+ "__HYPERVISOR_domctl_test_assign_device dev",
+ domctl->u.assign_device_0000000e.dev);
+ break;
+ }
+ break;
}
break;
case VKI_XEN_DOMCTL_assign_device:
@@ -852,6 +1010,8 @@ PRE(domctl)
__PRE_XEN_DOMCTL_READ(assign_device, assign_device_00000007, machine_sbdf);
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
__PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000b, dev);
__PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000b, flag);
switch (domctl->u.assign_device_0000000b.dev) {
@@ -871,6 +1031,27 @@ PRE(domctl)
break;
}
break;
+ case 0x0000000e:
+ default:
+ __PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000e, dev);
+ __PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000e, flags);
+ switch (domctl->u.assign_device_0000000e.dev) {
+ case VKI_XEN_DOMCTL_DEV_PCI:
+ __PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000e, u.pci);
+ break;
+ case VKI_XEN_DOMCTL_DEV_DT:
+ __PRE_XEN_DOMCTL_READ(assign_device, assign_device_0000000e, u.dt);
+ PRE_MEM_READ("XEN_DOMTCL_assign_device.dt",
+ (Addr)domctl->u.assign_device_0000000e.u.dt.path.p,
+ domctl->u.assign_device_0000000e.u.dt.size);
+ break;
+ default:
+ bad_subop(tid, layout, arrghs, status, flags,
+ "__HYPERVISOR_domctl_assign_device dev",
+ domctl->u.assign_device_0000000e.dev);
+ break;
+ }
+ break;
}
break;
case VKI_XEN_DOMCTL_deassign_device:
@@ -882,6 +1063,8 @@ PRE(domctl)
__PRE_XEN_DOMCTL_READ(deassign_device, assign_device_00000007, machine_sbdf);
break;
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
__PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000b, dev);
__PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000b, flag);
switch (domctl->u.assign_device_0000000b.dev) {
@@ -901,6 +1084,27 @@ PRE(domctl)
break;
}
break;
+ case 0x0000000e:
+ default:
+ __PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000e, dev);
+ __PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000e, flags);
+ switch (domctl->u.assign_device_0000000e.dev) {
+ case VKI_XEN_DOMCTL_DEV_PCI:
+ __PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000e, u.pci);
+ break;
+ case VKI_XEN_DOMCTL_DEV_DT:
+ __PRE_XEN_DOMCTL_READ(deassign_device, assign_device_0000000e, u.dt);
+ PRE_MEM_READ("XEN_DOMTCL_assign_device.dt",
+ (Addr)domctl->u.assign_device_0000000e.u.dt.path.p,
+ domctl->u.assign_device_0000000e.u.dt.size);
+ break;
+ default:
+ bad_subop(tid, layout, arrghs, status, flags,
+ "__HYPERVISOR_domctl_deassign_device dev",
+ domctl->u.assign_device_0000000e.dev);
+ break;
+ }
+ break;
}
break;
@@ -916,6 +1120,7 @@ PRE(domctl)
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_00000007, info.elapsed_nsec);
break;
case 0x0000000b:
+ default:
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_0000000b, tsc_mode);
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_0000000b, gtsc_khz);
__PRE_XEN_DOMCTL_READ(settscinfo, tsc_info_0000000b, incarnation);
@@ -946,8 +1151,19 @@ PRE(domctl)
break;
case VKI_XEN_DOMCTL_settimeoffset:
- PRE_XEN_DOMCTL_READ(settimeoffset, time_offset_seconds);
- break;
+ switch (domctl->interface_version) {
+ case 0x00000007:
+ case 0x00000008:
+ case 0x00000009:
+ case 0x0000000a:
+ PRE_XEN_DOMCTL_READ(settimeoffset_0000000a, time_offset_seconds);
+ break;
+ case 0x0000000b:
+ default:
+ PRE_XEN_DOMCTL_READ(settimeoffset_0000000b, time_offset_seconds);
+ break;
+ }
+ break;
case VKI_XEN_DOMCTL_getvcpuinfo:
PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
@@ -991,6 +1207,7 @@ PRE(domctl)
__PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_00000009, cpumap.nr_bits);
break;
case 0x0000000a:
+ default:
__PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_0000000a, vcpu);
if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
__PRE_XEN_DOMCTL_READ(
@@ -1014,6 +1231,7 @@ PRE(domctl)
domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
break;
case 0x0000000a:
+ default:
__PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, vcpu);
__PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, flags);
if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD) {
@@ -1070,13 +1288,8 @@ PRE(domctl)
break;
case 0x00000009:
- __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
- break;
-
default:
- VG_(dmsg)("WARNING: VKI_XEN_DOMCTL_get_ext_vcpucontext domctl version %#"
- PRIx32" not implemented\n", domctl->interface_version);
- SET_STATUS_Failure(VKI_EINVAL);
+ __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
break;
}
break;
@@ -1110,6 +1323,7 @@ PRE(domctl)
break;
case 0x00000009:
+ default:
__PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
__PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, size);
#if defined(__i386__) || defined(__x86_64__)
@@ -1138,12 +1352,6 @@ PRE(domctl)
}
#endif
break;
-
- default:
- VG_(dmsg)("WARNING: VKI_XEN_DOMCTL_set_ext_vcpucontext domctl version %#"
- PRIx32" not implemented\n", domctl->interface_version);
- SET_STATUS_Failure(VKI_EINVAL);
- break;
}
break;
@@ -1248,6 +1456,7 @@ PRE(domctl)
__PRE_XEN_DOMCTL_READ(mem_event_op, mem_event_op_00000007, mode);
break;
case 0x0000000b:
+ default:
__PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_0000000b, op);
__PRE_XEN_DOMCTL_READ(vm_event_op, vm_event_op_0000000b, mode);
break;
@@ -1278,8 +1487,9 @@ PRE(domctl)
case VKI_XEN_DOMCTL_monitor_op:
switch (domctl->interface_version) {
case 0x000000b:
+ default:
if (domctl->u.monitor_op_0000000b.op == VKI_XEN_DOMCTL_MONITOR_OP_ENABLE ||
- domctl->u.monitor_op_0000000b.op == VKI_XEN_DOMCTL_MONITOR_OP_ENABLE) {
+ domctl->u.monitor_op_0000000b.op == VKI_XEN_DOMCTL_MONITOR_OP_DISABLE) {
switch (domctl->u.monitor_op_0000000b.event) {
case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
__PRE_XEN_DOMCTL_READ(monitor_op, monitor_op_0000000b, u.mov_to_cr);
@@ -1299,6 +1509,16 @@ PRE(domctl)
}
break;
+ case VKI_XEN_DOMCTL_set_gnttab_limits:
+ switch (domctl->interface_version) {
+ case 0x000000e:
+ default:
+ PRE_XEN_DOMCTL_READ(set_gnttab_limits_0000000e, grant_frames);
+ PRE_XEN_DOMCTL_READ(set_gnttab_limits_0000000e, maptrack_frames);
+ break;
+ }
+ break;
+
default:
bad_subop(tid, layout, arrghs, status, flags,
"__HYPERVISOR_domctl", domctl->cmd);
@@ -1474,6 +1694,7 @@ POST(memory_op)
case VKI_XENMEM_claim_pages:
case VKI_XENMEM_maximum_gpfn:
case VKI_XENMEM_remove_from_physmap:
+ case VKI_XENMEM_set_pod_target:
case VKI_XENMEM_access_op:
/* No outputs */
break;
@@ -1518,6 +1739,15 @@ POST(memory_op)
case VKI_XENMEM_get_sharing_shared_pages:
/* No outputs */
break;
+ case VKI_XENMEM_get_pod_target: {
+ struct vki_xen_pod_target *arg =
+ (struct vki_xen_pod_target *)ARG2;
+ POST_MEM_WRITE((Addr)&arg->tot_pages, sizeof(arg->tot_pages));
+ POST_MEM_WRITE((Addr)&arg->pod_cache_pages, sizeof(arg->pod_cache_pages));
+ POST_MEM_WRITE((Addr)&arg->pod_entries, sizeof(arg->pod_entries));
+ }
+ break;
+
}
}
@@ -1643,6 +1873,9 @@ POST(xen_version)
case VKI_XENVER_commandline:
POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_commandline_t));
break;
+ case VKI_XENVER_build_id:
+ POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_build_id));
+ break;
}
}
@@ -1672,6 +1905,11 @@ POST(sysctl)
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
+ case 0x00000010:
break;
default:
return;
@@ -1706,6 +1944,7 @@ POST(sysctl)
break;
case 0x0000000a:
case 0x0000000b:
+ default:
POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a, num_domains);
POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_0000000a.buffer.p,
sizeof(*sysctl->u.getdomaininfolist_0000000a.buffer.p)
@@ -1751,6 +1990,10 @@ POST(sysctl)
break;
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
+ case 0x0000000f:
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
@@ -1765,30 +2008,107 @@ POST(sysctl)
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
break;
+ case 0x00000010:
+ default:
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, threads_per_core);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, cores_per_socket);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, nr_cpus);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, max_cpu_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, nr_nodes);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, max_node_id);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, cpu_khz);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, capabilities);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, total_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, free_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, scrub_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, outstanding_pages);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, max_mfn);
+ POST_XEN_SYSCTL_WRITE(physinfo_00000010, hw_cap[8]);
+ break;
}
break;
case VKI_XEN_SYSCTL_topologyinfo:
- POST_XEN_SYSCTL_WRITE(topologyinfo, max_cpu_index);
- if (sysctl->u.topologyinfo.cpu_to_core.p)
- POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_core.p,
- sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
- if (sysctl->u.topologyinfo.cpu_to_socket.p)
- POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_socket.p,
- sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
- if (sysctl->u.topologyinfo.cpu_to_node.p)
- POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_node.p,
- sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
+// case VKI_XEN_SYSCTL_cputopoinfo:
+ switch (sysctl->interface_version)
+ {
+ case 0x00000007:
+ case 0x00000008:
+ case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
+ POST_XEN_SYSCTL_WRITE(topologyinfo, max_cpu_index);
+ if (sysctl->u.topologyinfo.cpu_to_core.p)
+ POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_core.p,
+ sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
+ if (sysctl->u.topologyinfo.cpu_to_socket.p)
+ POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_socket.p,
+ sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
+ if (sysctl->u.topologyinfo.cpu_to_node.p)
+ POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_node.p,
+ sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
+ break;
+ case 0x0000000c:
+ default:
+ POST_XEN_SYSCTL_WRITE(cputopoinfo_0000000c, num_cpus);
+ if (sysctl->u.cputopoinfo_0000000c.cputopo.p)
+ POST_MEM_WRITE((Addr)sysctl->u.cputopoinfo_0000000c.cputopo.p,
+ sizeof(vki_xen_sysctl_cputopo_0000000c_t) * sysctl->u.cputopoinfo_0000000c.num_cpus);
+ break;
+ }
break;
case VKI_XEN_SYSCTL_numainfo:
- POST_XEN_SYSCTL_WRITE(numainfo, max_node_index);
- POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memsize.p,
- sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
- POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memfree.p,
- sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
- POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_node_distance.p,
- sizeof(uint32_t) * sysctl->u.numainfo.max_node_index);
+ switch (sysctl->interface_version)
+ {
+ case 0x00000007:
+ case 0x00000008:
+ case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
+ POST_XEN_SYSCTL_WRITE(numainfo_0000000b, max_node_index);
+ POST_MEM_WRITE((Addr)sysctl->u.numainfo_0000000b.node_to_memsize.p,
+ sizeof(uint64_t) * sysctl->u.numainfo_0000000b.max_node_index);
+ POST_MEM_WRITE((Addr)sysctl->u.numainfo_0000000b.node_to_memfree.p,
+ sizeof(uint64_t) * sysctl->u.numainfo_0000000b.max_node_index);
+ POST_MEM_WRITE((Addr)sysctl->u.numainfo_0000000b.node_to_node_distance.p,
+ sizeof(uint32_t) *
+ (sysctl->u.numainfo_0000000b.max_node_index * sysctl->u.numainfo_0000000b.max_node_index));
+ break;
+ case 0x0000000c:
+ default:
+ POST_XEN_SYSCTL_WRITE(numainfo_0000000c, num_nodes);
+ POST_MEM_WRITE((Addr)sysctl->u.numainfo_0000000c.meminfo.p,
+ sizeof(uint64_t) * sysctl->u.numainfo_0000000c.num_nodes);
+ POST_MEM_WRITE((Addr)sysctl->u.numainfo_0000000c.distance.p,
+ sizeof(uint32_t) *
+ (sysctl->u.numainfo_0000000c.num_nodes * sysctl->u.numainfo_0000000c.num_nodes));
+ break;
+ }
+ break;
+
+ case VKI_XEN_SYSCTL_pcitopoinfo:
+ switch (sysctl->interface_version)
+ {
+ case 0x0000000c:
+ default:
+ POST_XEN_SYSCTL_WRITE(pcitopoinfo_0000000c, num_devs);
+ POST_MEM_WRITE((Addr)sysctl->u.pcitopoinfo_0000000c.nodes.p,
+ sizeof(uint32_t) * sysctl->u.pcitopoinfo_0000000c.num_devs);
+ break;
+ }
+ break;
+
+ case VKI_XEN_SYSCTL_get_cpu_featureset:
+ switch (sysctl->interface_version)
+ {
+ case 0x0000000c:
+ default:
+ POST_XEN_SYSCTL_WRITE(cpu_featureset_0000000d, nr_features);
+ POST_MEM_WRITE((Addr)sysctl->u.cpu_featureset_0000000d.features.p,
+ sizeof(uint32_t) * sysctl->u.cpu_featureset_0000000d.nr_features);
+ break;
+ }
break;
/* No outputs */
@@ -1808,6 +2128,9 @@ POST(domctl){
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ case 0x0000000e:
break;
default:
return;
@@ -1869,6 +2192,7 @@ POST(domctl){
sizeof(vki_xen_guest_tsc_info_t));
break;
case 0x0000000b:
+ default:
__POST_XEN_DOMCTL_WRITE(gettscinfo, tsc_info_0000000b, tsc_mode);
__POST_XEN_DOMCTL_WRITE(gettscinfo, tsc_info_0000000b, gtsc_khz);
__POST_XEN_DOMCTL_WRITE(gettscinfo, tsc_info_0000000b, incarnation);
@@ -1897,14 +2221,33 @@ POST(domctl){
break;
case VKI_XEN_DOMCTL_gethvmcontext_partial:
- switch (domctl->u.hvmcontext_partial.type) {
- case VKI_HVM_SAVE_CODE(CPU):
- if ( domctl->u.hvmcontext_partial.buffer.p )
- POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial.buffer.p,
- VKI_HVM_SAVE_LENGTH(CPU));
- break;
- }
- break;
+ switch (domctl->interface_version) {
+ case 0x00000007:
+ case 0x00000008:
+ case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
+ case 0x0000000c:
+ case 0x0000000d:
+ switch (domctl->u.hvmcontext_partial_0000000d.type) {
+ case VKI_HVM_SAVE_CODE(CPU):
+ if ( domctl->u.hvmcontext_partial_0000000d.buffer.p )
+ POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial_0000000d.buffer.p,
+ VKI_HVM_SAVE_LENGTH(CPU));
+ break;
+ }
+ case 0x0000000e:
+ default:
+ switch (domctl->u.hvmcontext_partial_0000000e.type) {
+ case VKI_HVM_SAVE_CODE(CPU):
+ if ( domctl->u.hvmcontext_partial_0000000e.buffer.p )
+ POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial_0000000e.buffer.p,
+ VKI_HVM_SAVE_LENGTH(CPU));
+ break;
+ }
+ break;
+ }
+ break;
case VKI_XEN_DOMCTL_scheduler_op:
if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
@@ -1943,6 +2286,7 @@ POST(domctl){
domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
break;
case 0x0000000a:
+ default:
if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
POST_MEM_WRITE(
(Addr)domctl->u.vcpuaffinity_0000000a.cpumap_hard.bitmap.p,
@@ -1992,6 +2336,7 @@ POST(domctl){
break;
case 0x00000009:
case 0x0000000a:
+ default:
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
@@ -2044,6 +2389,7 @@ POST(domctl){
break;
case 0x00000009:
+ default:
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, size);
#if defined(__i386__) || defined(__x86_64__)
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
@@ -2120,6 +2466,7 @@ POST(domctl){
__POST_XEN_DOMCTL_WRITE(mem_event_op, mem_event_op_00000007, port);
break;
case 0x0000000b:
+ default:
__POST_XEN_DOMCTL_WRITE(vm_event_op, vm_event_op_0000000b, port);
break;
}
@@ -2143,6 +2490,46 @@ POST(domctl){
}
break;
+ case 0x000000c:
+ case 0x000000d:
+ if (domctl->u.monitor_op_0000000c.op == VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES) {
+ switch(domctl->u.monitor_op_0000000c.event) {
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000c, u.mov_to_cr);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000c, u.mov_to_msr);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000c, u.guest_request);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000c, u.debug_exception);
+ break;
+ }
+ }
+
+ break;
+ case 0x000000e:
+ default:
+ if (domctl->u.monitor_op_0000000e.op == VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES) {
+ switch(domctl->u.monitor_op_0000000e.event) {
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000e, u.mov_to_cr);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000e, u.mov_to_msr);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000e, u.guest_request);
+ break;
+ case VKI_XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION:
+ __POST_XEN_DOMCTL_WRITE(monitor_op, monitor_op_0000000e, u.debug_exception);
+ break;
+ }
+ }
+
+ break;
}
break;
}
@@ -2264,6 +2651,8 @@ static XenHypercallTableEntry hypercall_
HYPXY(__VKI_XEN_domctl, domctl, 1), // 36
// __VKI_XEN_kexec_op // 37
HYPXY(__VKI_XEN_tmem_op, tmem_op, 1), // 38
+ // __VKI_XEN_xenpmu_op // 40
+ // __VKI_XEN_dm_op // 41
};
static void bad_before ( ThreadId tid,
Index: valgrind-3.13.0/coregrind/m_syswrap/syswrap-linux.c
===================================================================
--- valgrind-3.13.0.orig/coregrind/m_syswrap/syswrap-linux.c
+++ valgrind-3.13.0/coregrind/m_syswrap/syswrap-linux.c
@@ -7977,6 +7977,17 @@ PRE(sys_ioctl)
(Addr)args->arr, sizeof(*(args->arr)) * args->num);
break;
}
+ case VKI_XEN_IOCTL_PRIVCMD_DM_OP: {
+ struct vki_xen_privcmd_dm_op *args =
+ (struct vki_xen_privcmd_dm_op *)(ARG3);
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_DM_OP(dom)",
+ (Addr)&args->dom, sizeof(args->dom));
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_DM_OP(num)",
+ (Addr)&args->num, sizeof(args->num));
+ PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_DM_OP(ubufs)",
+ (Addr)args->ubufs, sizeof(*(args->ubufs)) * args->num);
+ break;
+ }
case VKI_XEN_IOCTL_EVTCHN_BIND_VIRQ: {
struct vki_xen_ioctl_evtchn_bind_virq *args =
@@ -10404,6 +10415,12 @@ POST(sys_ioctl)
POST_MEM_WRITE((Addr)args->err, sizeof(*(args->err)) * args->num);
}
break;
+ case VKI_XEN_IOCTL_PRIVCMD_DM_OP: {
+ struct vki_xen_privcmd_dm_op *args =
+ (struct vki_xen_privcmd_dm_op *)(ARG3);
+ POST_MEM_WRITE((Addr)args->ubufs, sizeof(*(args->ubufs)) * args->num);
+ }
+ break;
case VKI_XEN_IOCTL_EVTCHN_BIND_VIRQ:
case VKI_XEN_IOCTL_EVTCHN_BIND_INTERDOMAIN:
Index: valgrind-3.13.0/include/vki/vki-linux.h
===================================================================
--- valgrind-3.13.0.orig/include/vki/vki-linux.h
+++ valgrind-3.13.0/include/vki/vki-linux.h
@@ -3329,12 +3329,27 @@ struct vki_xen_privcmd_mmapbatch_v2 {
int __user *err; /* array of error codes */
};
+struct vki_xen_privcmd_dm_op_buf {
+ void __user *uptr;
+ __vki_kernel_size_t size;
+};
+
+struct vki_xen_privcmd_dm_op {
+ __vki_u16 dom;
+ __vki_u16 num;
+ const struct vki_xen_privcmd_dm_op_buf __user *ubufs;
+};
+
+
#define VKI_XEN_IOCTL_PRIVCMD_HYPERCALL _VKI_IOC(_VKI_IOC_NONE, 'P', 0, sizeof(struct vki_xen_privcmd_hypercall))
#define VKI_XEN_IOCTL_PRIVCMD_MMAP _VKI_IOC(_VKI_IOC_NONE, 'P', 2, sizeof(struct vki_xen_privcmd_mmap))
#define VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH _VKI_IOC(_VKI_IOC_NONE, 'P', 3, sizeof(struct vki_xen_privcmd_mmapbatch))
#define VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2 _VKI_IOC(_VKI_IOC_NONE, 'P', 4, sizeof(struct vki_xen_privcmd_mmapbatch_v2))
+#define VKI_XEN_IOCTL_PRIVCMD_DM_OP _VKI_IOC(_VKI_IOC_NONE, 'P', 5, sizeof(struct vki_xen_privcmd_dm_op))
+#define VKI_XEN_IOCTL_PRIVCMD_RESTRICT _VKI_IOC(_VKI_IOC_NONE, 'P', 6, sizeof(__vki_u16))
+
//----------------------------------------------------------------------
// Xen evtchn IOCTL
//----------------------------------------------------------------------
Index: valgrind-3.13.0/include/vki/vki-xen-domctl.h
===================================================================
--- valgrind-3.13.0.orig/include/vki/vki-xen-domctl.h
+++ valgrind-3.13.0/include/vki/vki-xen-domctl.h
@@ -37,7 +37,9 @@
* - 0x00000009: Xen 4.3 & 4.4
* - 0x0000000a: Xen 4.5
* - 0x0000000b: Xen 4.6
- * - 0x0000000c: Xen 4.7
+ * - 0x0000000c: Xen 4.8
+ * - 0x0000000d: Xen 4.9
+ * - 0x0000000e: Xen 4.10
*
* When adding a new subop be sure to include the variants used by all
* of the above, both here and in syswrap-xen.c
@@ -119,19 +121,69 @@
#define VKI_XEN_DOMCTL_cacheflush 71
#define VKI_XEN_DOMCTL_get_vcpu_msrs 72
#define VKI_XEN_DOMCTL_set_vcpu_msrs 73
+#define VKI_XEN_DOMCTL_setvnumainfo 74
+#define VKI_XEN_DOMCTL_psr_cmt_op 75
+#define VKI_XEN_DOMCTL_arm_configure_domain 76
#define VKI_XEN_DOMCTL_monitor_op 77 /* new in 4.6 */
+#define VKI_XEN_DOMCTL_psr_cat_op 78
+#define VKI_XEN_DOMCTL_soft_reset 79
+#define VKI_XEN_DOMCTL_set_gnttab_limits 80
+#define VKI_XEN_DOMCTL_vuart_op 81
#define VKI_XEN_DOMCTL_gdbsx_guestmemio 1000
#define VKI_XEN_DOMCTL_gdbsx_pausevcpu 1001
#define VKI_XEN_DOMCTL_gdbsx_unpausevcpu 1002
#define VKI_XEN_DOMCTL_gdbsx_domstatus 1003
-struct vki_xen_domctl_createdomain {
+struct vki_xen_domctl_createdomain_0000000a {
/* IN parameters */
vki_uint32_t ssidref;
vki_xen_domain_handle_t handle;
vki_uint32_t flags;
};
+struct vki_xen_arch_domainconfig_0000000b {
+#if defined(__i386__) || defined(__x86_64__)
+ vki_uint8_t dummy;
+#endif
+#if defined(__arm__) || defined(__aarch64__)
+ /* IN/OUT */
+ vki_uint8_t gic_version;
+ /* IN */
+ vki_uint32_t nr_spis;
+ vki_uint32_t clock_frequency;
+#endif
+};
+
+struct vki_xen_domctl_createdomain_0000000b {
+ /* IN parameters */
+ vki_uint32_t ssidref;
+ vki_xen_domain_handle_t handle;
+ vki_uint32_t flags;
+ struct vki_xen_arch_domainconfig_0000000b config;
+};
+
+/* The layout changed in 4.07, which shared domctl with 4.06 */
+struct vki_xen_arch_domainconfig_0000000c {
+#if defined(__i386__) || defined(__x86_64__)
+ vki_uint32_t emulation_flags;
+#endif
+#if defined(__arm__) || defined(__aarch64__)
+ /* IN/OUT */
+ vki_uint8_t gic_version;
+ /* IN */
+ vki_uint32_t nr_spis;
+ vki_uint32_t clock_frequency;
+#endif
+};
+
+struct vki_xen_domctl_createdomain_0000000c {
+ /* IN parameters */
+ vki_uint32_t ssidref;
+ vki_xen_domain_handle_t handle;
+ vki_uint32_t flags;
+ struct vki_xen_arch_domainconfig_0000000c config;
+};
+
struct vki_xen_domctl_getdomaininfo_00000007 {
/* OUT variables. */
vki_xen_domid_t domain;
@@ -215,6 +267,7 @@ struct vki_xen_domctl_vcpuaffinity_00000
vki_uint32_t vcpu; /* IN */
#define VKI_XEN_VCPUAFFINITY_HARD (1U<<0)
#define VKI_XEN_VCPUAFFINITY_SOFT (1U<<1)
+#define VKI_XEN_VCPUAFFINITY_FORCE (1U<<2)
vki_uint32_t flags; /* IN */
struct vki_xenctl_bitmap cpumap_hard; /* IN/OUT */
struct vki_xenctl_bitmap cpumap_soft; /* IN/OUT */
@@ -282,9 +335,12 @@ struct vki_xen_domctl_scheduler_op {
#define VKI_XEN_SCHEDULER_CREDIT2 6
#define VKI_XEN_SCHEDULER_ARINC653 7
#define VKI_XEN_SCHEDULER_RTDS 8
+#define VKI_XEN_SCHEDULER_NULL 9
vki_uint32_t cmd; /* VKI_XEN_DOMCTL_SCHEDOP_* */
#define VKI_XEN_DOMCTL_SCHEDOP_putinfo 0
#define VKI_XEN_DOMCTL_SCHEDOP_getinfo 1
+#define VKI_XEN_DOMCTL_SCHEDOP_putvcpuinfo 2
+#define VKI_XEN_DOMCTL_SCHEDOP_getvcpuinfo 3
union {
struct xen_domctl_sched_sedf {
vki_xen_uint64_aligned_t period;
@@ -333,10 +389,14 @@ struct vki_xen_domctl_hypercall_init {
vki_xen_uint64_aligned_t gmfn; /* GMFN to be initialised */
};
-struct vki_xen_domctl_settimeoffset {
+struct vki_xen_domctl_settimeoffset_0000000a {
vki_int32_t time_offset_seconds;
};
+struct vki_xen_domctl_settimeoffset_0000000b {
+ vki_int64_t time_offset_seconds;
+};
+
struct vki_xen_domctl_cpuid {
vki_uint32_t input[2];
vki_uint32_t eax;
@@ -378,14 +438,22 @@ struct vki_xen_domctl_hvmcontext {
typedef struct vki_xen_domctl_hvmcontext vki_xen_domctl_hvmcontext_t;
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_t);
-struct vki_xen_domctl_hvmcontext_partial {
+struct vki_xen_domctl_hvmcontext_partial_0000000d {
vki_uint32_t type; /* IN */
vki_uint32_t instance; /* IN */
VKI_XEN_GUEST_HANDLE_64(vki_uint8) buffer; /* IN/OUT buffer */
};
-typedef struct vki_xen_domctl_hvmcontext_partial vki_xen_domctl_hvmcontext_partial_t;
-DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_t);
+typedef struct vki_xen_domctl_hvmcontext_partial_0000000d vki_xen_domctl_hvmcontext_partial_0000000d_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_0000000d_t);
+struct vki_xen_domctl_hvmcontext_partial_0000000e {
+ vki_uint32_t type; /* IN */
+ vki_uint32_t instance; /* IN */
+ vki_xen_uint64_aligned_t bufsz; /* IN */
+ VKI_XEN_GUEST_HANDLE_64(vki_uint8) buffer; /* IN/OUT buffer */
+};
+typedef struct vki_xen_domctl_hvmcontext_partial_0000000e vki_xen_domctl_hvmcontext_partial_0000000e_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_hvmcontext_partial_0000000e_t);
struct vki_xen_domctl_pin_mem_cacheattr {
vki_xen_uint64_aligned_t start, end; /* IN */
@@ -464,6 +532,20 @@ struct vki_xen_domctl_assign_device_0000
vki_uint32_t flag; /* flag of assigned device */
};
+struct vki_xen_domctl_assign_device_0000000e {
+ vki_uint32_t dev; /* XEN_DOMCTL_DEV_* */
+ vki_uint32_t flags;
+ union {
+ struct {
+ vki_uint32_t machine_sbdf; /* machine PCI ID of assigned device */
+ } pci;
+ struct {
+ vki_uint32_t size; /* Length of the path */
+ VKI_XEN_GUEST_HANDLE_64(vki_uint8) path; /* path to the device tree node */
+ } dt;
+ } u;
+};
+
struct vki_xen_domctl_debug_op {
vki_uint32_t op; /* IN */
vki_uint32_t vcpu; /* IN */
@@ -515,6 +597,12 @@ struct vki_xen_domctl_vcpu_msrs {
#define VKI_XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP 2
#define VKI_XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT 3
#define VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST 4
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION 5
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_CPUID 6
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL 7
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_INTERRUPT 8
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS 9
+#define VKI_XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED 10
struct vki_xen_domctl_monitor_op_0000000b {
vki_uint32_t op; /* vki_xen_DOMCTL_MONITOR_OP_* */
@@ -553,6 +641,97 @@ struct vki_xen_domctl_monitor_op_0000000
} u;
};
+struct vki_xen_domctl_monitor_op_0000000c {
+ vki_uint32_t op; /* vki_xen_DOMCTL_MONITOR_OP_* */
+
+ /*
+ * When used with ENABLE/DISABLE this has to be set to
+ * the requested vki_xen_DOMCTL_MONITOR_EVENT_* value.
+ * With GET_CAPABILITIES this field returns a bitmap of
+ * events supported by the platform, in the format
+ * (1 << vki_xen_DOMCTL_MONITOR_EVENT_*).
+ */
+ vki_uint32_t event;
+
+ /*
+ * Further options when issuing vki_xen_DOMCTL_MONITOR_OP_ENABLE.
+ */
+ union {
+ struct {
+ /* Which control register */
+ vki_uint8_t index;
+ /* Pause vCPU until response */
+ vki_uint8_t sync;
+ /* Send event only on a change of value */
+ vki_uint8_t onchangeonly;
+ } mov_to_cr;
+
+ struct {
+ vki_uint32_t msr;
+ } mov_to_msr;
+
+ struct {
+ /* Pause vCPU until response */
+ vki_uint8_t sync;
+ } guest_request;
+
+ struct {
+ /* Pause vCPU until response */
+ vki_uint8_t sync;
+ } debug_exception;
+ } u;
+};
+
+struct vki_xen_domctl_monitor_op_0000000e {
+ vki_uint32_t op; /* vki_xen_DOMCTL_MONITOR_OP_* */
+
+ /*
+ * When used with ENABLE/DISABLE this has to be set to
+ * the requested vki_xen_DOMCTL_MONITOR_EVENT_* value.
+ * With GET_CAPABILITIES this field returns a bitmap of
+ * events supported by the platform, in the format
+ * (1 << vki_xen_DOMCTL_MONITOR_EVENT_*).
+ */
+ vki_uint32_t event;
+
+ /*
+ * Further options when issuing vki_xen_DOMCTL_MONITOR_OP_ENABLE.
+ */
+ union {
+ struct {
+ /* Which control register */
+ vki_uint8_t index;
+ /* Pause vCPU until response */
+ vki_uint8_t sync;
+ /* Send event only on a change of value */
+ vki_uint8_t onchangeonly;
+ /* Allignment padding */
+ vki_uint8_t pad1;
+ vki_uint32_t pad2;
+ /*
+ * Send event only if the changed bit in the control register
+ * is not masked.
+ */
+ vki_xen_uint64_aligned_t bitmask;
+ } mov_to_cr;
+
+ struct {
+ vki_uint32_t msr;
+ } mov_to_msr;
+
+ struct {
+ /* Pause vCPU until response */
+ vki_uint8_t sync;
+ vki_uint8_t allow_userspace;
+ } guest_request;
+
+ struct {
+ /* Pause vCPU until response */
+ vki_uint8_t sync;
+ } debug_exception;
+ } u;
+};
+
struct vki_xen_domctl_monitor_op {
vki_uint32_t op;
@@ -576,12 +755,20 @@ struct vki_xen_domctl_monitor_op {
} u;
};
+struct vki_xen_domctl_set_gnttab_limits_0000000e {
+ vki_uint32_t grant_frames;
+ vki_uint32_t maptrack_frames;
+};
+
struct vki_xen_domctl {
vki_uint32_t cmd;
vki_uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
vki_xen_domid_t domain;
union {
- struct vki_xen_domctl_createdomain createdomain;
+ struct vki_xen_domctl_createdomain_0000000a createdomain_0000000a;
+ struct vki_xen_domctl_createdomain_0000000b createdomain_0000000b;
+ struct vki_xen_domctl_createdomain_0000000c createdomain_0000000c;
+ //struct vki_xen_domctl_arm_configuredomain configuredomain;
struct vki_xen_domctl_getdomaininfo_00000007 getdomaininfo_00000007;
struct vki_xen_domctl_getdomaininfo_00000008 getdomaininfo_00000008;
struct vki_xen_domctl_getdomaininfo_00000009 getdomaininfo_00000009;
@@ -605,18 +792,21 @@ struct vki_xen_domctl {
struct vki_xen_domctl_ioport_permission ioport_permission;
struct vki_xen_domctl_hypercall_init hypercall_init;
//struct vki_xen_domctl_arch_setup arch_setup;
- struct vki_xen_domctl_settimeoffset settimeoffset;
+ struct vki_xen_domctl_settimeoffset_0000000a settimeoffset_0000000a;
+ struct vki_xen_domctl_settimeoffset_0000000b settimeoffset_0000000b;
//struct vki_xen_domctl_disable_migrate disable_migrate;
struct vki_xen_domctl_tsc_info_00000007 tsc_info_00000007;
struct vki_xen_domctl_tsc_info_0000000b tsc_info_0000000b;
//struct vki_xen_domctl_real_mode_area real_mode_area;
struct vki_xen_domctl_hvmcontext hvmcontext;
- struct vki_xen_domctl_hvmcontext_partial hvmcontext_partial;
+ struct vki_xen_domctl_hvmcontext_partial_0000000d hvmcontext_partial_0000000d;
+ struct vki_xen_domctl_hvmcontext_partial_0000000e hvmcontext_partial_0000000e;
struct vki_xen_domctl_address_size address_size;
//struct vki_xen_domctl_sendtrigger sendtrigger;
//struct vki_xen_domctl_get_device_group get_device_group;
struct vki_xen_domctl_assign_device_00000007 assign_device_00000007;
struct vki_xen_domctl_assign_device_0000000b assign_device_0000000b;
+ struct vki_xen_domctl_assign_device_0000000e assign_device_0000000e;
//struct vki_xen_domctl_bind_pt_irq bind_pt_irq;
//struct vki_xen_domctl_memory_mapping memory_mapping;
//struct vki_xen_domctl_ioport_mapping ioport_mapping;
@@ -644,6 +834,13 @@ struct vki_xen_domctl {
//struct vki_xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
//struct vki_xen_domctl_gdbsx_domstatus gdbsx_domstatus;
struct vki_xen_domctl_monitor_op_0000000b monitor_op_0000000b;
+ struct vki_xen_domctl_monitor_op_0000000c monitor_op_0000000c;
+ struct vki_xen_domctl_monitor_op_0000000e monitor_op_0000000e;
+ //struct vki_xen_domctl_vnuma vnuma;
+ //struct vki_xen_domctl_psr_cmt_op psr_cmt_op;
+ //struct vki_xen_domctl_psr_cat_op psr_cat_op;
+ struct vki_xen_domctl_set_gnttab_limits_0000000e set_gnttab_limits_0000000e;
+ //struct vki_xen_domctl_vuart_op vuart_op;
vki_uint8_t pad[128];
} u;
};
Index: valgrind-3.13.0/include/vki/vki-xen-gnttab.h
===================================================================
--- valgrind-3.13.0.orig/include/vki/vki-xen-gnttab.h
+++ valgrind-3.13.0/include/vki/vki-xen-gnttab.h
@@ -42,6 +42,7 @@ typedef vki_uint32_t vki_xen_grant_ref_t
#define VKI_XEN_GNTTABOP_get_status_frames 9
#define VKI_XEN_GNTTABOP_get_version 10
#define VKI_XEN_GNTTABOP_swap_grant_ref 11
+#define VKI_XEN_GNTTABOP_cache_flush 12
struct vki_xen_gnttab_setup_table {
/* IN parameters. */
Index: valgrind-3.13.0/include/vki/vki-xen-memory.h
===================================================================
--- valgrind-3.13.0.orig/include/vki/vki-xen-memory.h
+++ valgrind-3.13.0/include/vki/vki-xen-memory.h
@@ -48,8 +48,12 @@
#define VKI_XENMEM_get_sharing_freed_pages 18
#define VKI_XENMEM_get_sharing_shared_pages 19
#define VKI_XENMEM_access_op 21
+#define VKI_XENMEM_sharing_op 22
+#define VKI_XENMEM_add_to_physmap_batch 23
#define VKI_XENMEM_claim_pages 24
#define VKI_XENMEM_machphys_compat_mfn_list 25
+#define VKI_XENMEM_get_vnumainfo 26
+#define VKI_XENMEM_reserved_device_memory_map 27
struct vki_xen_memory_map {
unsigned int nr_entries;
@@ -95,6 +99,14 @@ struct vki_xen_remove_from_physmap {
vki_xen_pfn_t gpfn;
};
+struct vki_xen_pod_target {
+ vki_uint64_t target_pages;
+ vki_uint64_t tot_pages;
+ vki_uint64_t pod_cache_pages;
+ vki_uint64_t pod_entries;
+ vki_xen_domid_t domid;
+};
+
struct vki_xen_mem_event_op {
vki_uint8_t op;
vki_xen_domid_t domain;
Index: valgrind-3.13.0/include/vki/vki-xen-physdev.h
===================================================================
--- valgrind-3.13.0.orig/include/vki/vki-xen-physdev.h
+++ valgrind-3.13.0/include/vki/vki-xen-physdev.h
@@ -60,6 +60,15 @@ struct vki_xen_physdev_unmap_pirq {
int pirq;
};
+struct vki_physdev_pci_device {
+ /* IN */
+ vki_uint16_t seg;
+ vki_uint8_t bus;
+ vki_uint8_t devfn;
+};
+typedef struct vki_physdev_pci_device vki_physdev_pci_device_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_physdev_pci_device_t);
+
#endif // __VKI_XEN_PHYSDEV_H
/*--------------------------------------------------------------------*/
Index: valgrind-3.13.0/include/vki/vki-xen-schedop.h
===================================================================
--- valgrind-3.13.0.orig/include/vki/vki-xen-schedop.h
+++ valgrind-3.13.0/include/vki/vki-xen-schedop.h
@@ -47,4 +47,6 @@ typedef struct vki_xen_remote_shutdown v
#define VKI_XEN_SCHEDOP_watchdog 6
+#define VKI_XEN_SCHEDOP_pin_override 7
+
#endif /* __VKI_XEN_SCHED_OP_H */
Index: valgrind-3.13.0/include/vki/vki-xen-sysctl.h
===================================================================
--- valgrind-3.13.0.orig/include/vki/vki-xen-sysctl.h
+++ valgrind-3.13.0/include/vki/vki-xen-sysctl.h
@@ -9,6 +9,11 @@
* - 0x00000009: Xen 4.2
* - 0x0000000a: Xen 4.3 & 4.4
* - 0x0000000b: Xen 4.5
+ * - 0x0000000c: Xen 4.6
+ * - 0x0000000d: Xen 4.7
+ * - 0x0000000e: Xen 4.8
+ * - 0x0000000f: Xen 4.9
+ * - 0x00000010: Xen 4.10
*
* When adding a new subop be sure to include the variants used by all
* of the above, both here and in syswrap-xen.c
@@ -34,10 +39,20 @@
#define VKI_XEN_SYSCTL_page_offline_op 14
#define VKI_XEN_SYSCTL_lockprof_op 15
#define VKI_XEN_SYSCTL_topologyinfo 16
+#define VKI_XEN_SYSCTL_cputopoinfo 16 /* Since xen-4.6 */
#define VKI_XEN_SYSCTL_numainfo 17
#define VKI_XEN_SYSCTL_cpupool_op 18
#define VKI_XEN_SYSCTL_scheduler_op 19
#define VKI_XEN_SYSCTL_coverage_op 20
+#define VKI_XEN_SYSCTL_gcov_op 20 /* Since xen-4.9 */
+#define VKI_XEN_SYSCTL_psr_cmt_op 21
+#define VKI_XEN_SYSCTL_pcitopoinfo 22
+#define VKI_XEN_SYSCTL_psr_cat_op 23
+#define VKI_XEN_SYSCTL_tmem_op 24
+#define VKI_XEN_SYSCTL_get_cpu_levelling_caps 25
+#define VKI_XEN_SYSCTL_get_cpu_featureset 26
+#define VKI_XEN_SYSCTL_livepatch_op 27
+#define VKI_XEN_SYSCTL_set_parameter 28
struct vki_xen_sysctl_readconsole {
/* IN */
@@ -120,12 +135,45 @@ struct vki_xen_sysctl_topologyinfo {
VKI_XEN_GUEST_HANDLE_64(vki_uint32) cpu_to_node;
};
-struct vki_xen_sysctl_numainfo {
+struct vki_xen_sysctl_cputopo_0000000c {
+ vki_uint32_t core;
+ vki_uint32_t socket;
+ vki_uint32_t node;
+};
+typedef struct vki_xen_sysctl_cputopo_0000000c vki_xen_sysctl_cputopo_0000000c_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_sysctl_cputopo_0000000c_t);
+
+struct vki_xen_sysctl_cputopoinfo_0000000c {
+ vki_uint32_t num_cpus;
+ VKI_XEN_GUEST_HANDLE_64(vki_xen_sysctl_cputopo_0000000c_t) cputopo;
+};
+
+struct vki_xen_sysctl_pcitopoinfo_0000000c {
+ vki_uint32_t num_devs;
+ VKI_XEN_GUEST_HANDLE_64(vki_physdev_pci_device_t) devs;
+ VKI_XEN_GUEST_HANDLE_64(vki_uint32) nodes;
+};
+
+struct vki_xen_sysctl_numainfo_0000000b {
vki_uint32_t max_node_index;
VKI_XEN_GUEST_HANDLE_64(vki_uint64) node_to_memsize;
VKI_XEN_GUEST_HANDLE_64(vki_uint64) node_to_memfree;
VKI_XEN_GUEST_HANDLE_64(vki_uint32) node_to_node_distance;
};
+
+struct vki_xen_xen_sysctl_meminfo_0000000c {
+ vki_uint64_t memsize;
+ vki_uint64_t memfree;
+};
+typedef struct vki_xen_xen_sysctl_meminfo_0000000c vki_xen_xen_sysctl_meminfo_0000000c_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_xen_sysctl_meminfo_0000000c_t);
+
+struct vki_xen_sysctl_numainfo_0000000c {
+ vki_uint32_t num_nodes;
+ VKI_XEN_GUEST_HANDLE_64(vki_xen_xen_sysctl_meminfo_0000000c_t) meminfo;
+ VKI_XEN_GUEST_HANDLE_64(vki_uint32) distance;
+};
+
struct vki_xen_sysctl_physinfo_00000008 {
vki_uint32_t threads_per_core;
vki_uint32_t cores_per_socket;
@@ -159,6 +207,23 @@ struct vki_xen_sysctl_physinfo_0000000a
vki_uint32_t capabilities;
};
+struct vki_xen_sysctl_physinfo_00000010 {
+ vki_uint32_t threads_per_core;
+ vki_uint32_t cores_per_socket;
+ vki_uint32_t nr_cpus; /* # CPUs currently online */
+ vki_uint32_t max_cpu_id; /* Largest possible CPU ID on this host */
+ vki_uint32_t nr_nodes; /* # nodes currently online */
+ vki_uint32_t max_node_id; /* Largest possible node ID on this host */
+ vki_uint32_t cpu_khz;
+ vki_uint32_t capabilities; /* XEN_SYSCTL_PHYSCAP_??? */
+ vki_xen_uint64_aligned_t total_pages;
+ vki_xen_uint64_aligned_t free_pages;
+ vki_xen_uint64_aligned_t scrub_pages;
+ vki_xen_uint64_aligned_t outstanding_pages;
+ vki_xen_uint64_aligned_t max_mfn; /* Largest possible MFN on this host */
+ vki_uint32_t hw_cap[8];
+};
+
/* vki_xen_sysctl_physinfo_0000000b is the same as 0000000a */
struct vki_xen_sysctl_sched_id {
@@ -166,6 +231,12 @@ struct vki_xen_sysctl_sched_id {
vki_uint32_t sched_id;
};
+struct vki_xen_sysctl_cpu_featureset_0000000d {
+ vki_uint32_t index;
+ vki_uint32_t nr_features;
+ VKI_XEN_GUEST_HANDLE_64(vki_uint32) features;
+};
+
struct vki_xen_sysctl {
vki_uint32_t cmd;
vki_uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
@@ -174,8 +245,13 @@ struct vki_xen_sysctl {
//struct vki_xen_sysctl_tbuf_op tbuf_op;
struct vki_xen_sysctl_physinfo_00000008 physinfo_00000008;
struct vki_xen_sysctl_physinfo_0000000a physinfo_0000000a;
+ struct vki_xen_sysctl_physinfo_00000010 physinfo_00000010;
struct vki_xen_sysctl_topologyinfo topologyinfo;
- struct vki_xen_sysctl_numainfo numainfo;
+ struct vki_xen_sysctl_cputopoinfo_0000000c cputopoinfo_0000000c;
+ struct vki_xen_sysctl_pcitopoinfo_0000000c pcitopoinfo_0000000c;
+ //struct vki_xen_sysctl_cputopoinfo cputopoinfo_0000000c;
+ struct vki_xen_sysctl_numainfo_0000000b numainfo_0000000b;
+ struct vki_xen_sysctl_numainfo_0000000c numainfo_0000000c;
struct vki_xen_sysctl_sched_id sched_id;
//struct vki_xen_sysctl_perfc_op perfc_op;
struct vki_xen_sysctl_getdomaininfolist_00000008 getdomaininfolist_00000008;
@@ -192,6 +268,14 @@ struct vki_xen_sysctl {
struct vki_xen_sysctl_cpupool_op cpupool_op;
//struct vki_xen_sysctl_scheduler_op scheduler_op;
//struct vki_xen_sysctl_coverage_op coverage_op;
+ //struct vki_xen_sysctl_gcov_op gcov_op;
+ //struct vki_xen_sysctl_psr_cmt_op psr_cmt_op;
+ //struct vki_xen_sysctl_psr_cat_op psr_cat_op;
+ //struct vki_xen_sysctl_tmem_op tmem_op;
+ //struct vki_xen_sysctl_cpu_levelling_caps cpu_levelling_caps;
+ struct vki_xen_sysctl_cpu_featureset_0000000d cpu_featureset_0000000d;
+ //struct vki_xen_sysctl_livepatch_op livepatch;
+ //struct vki_xen_sysctl_set_parameter set_parameter;
vki_uint8_t pad[128];
} u;
Index: valgrind-3.13.0/include/vki/vki-xen-version.h
===================================================================
--- valgrind-3.13.0.orig/include/vki/vki-xen-version.h
+++ valgrind-3.13.0/include/vki/vki-xen-version.h
@@ -38,6 +38,7 @@
#define VKI_XENVER_pagesize 7
#define VKI_XENVER_guest_handle 8
#define VKI_XENVER_commandline 9
+#define VKI_XENVER_build_id 10
typedef char vki_xen_extraversion_t[16];
@@ -63,6 +64,16 @@ struct vki_xen_feature_info {
typedef char vki_xen_commandline_t[1024];
+struct vki_xen_build_id {
+ unsigned int len; /* IN: size of buf[]. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ unsigned char buf[];
+#elif defined(__GNUC__)
+ unsigned char buf[1]; /* OUT: Variable length buffer with build_id. */
+#endif
+
+};
+
#endif // __VKI_XEN_VERSION_H
/*--------------------------------------------------------------------*/
Index: valgrind-3.13.0/include/vki/vki-xen.h
===================================================================
--- valgrind-3.13.0.orig/include/vki/vki-xen.h
+++ valgrind-3.13.0/include/vki/vki-xen.h
@@ -69,6 +69,8 @@
#define __VKI_XEN_kexec_op 37
#define __VKI_XEN_tmem_op 38
#define __VKI_XEN_xc_reserved_op 39 /* reserved for XenClient */
+#define __VKI_XEN_xenpmu_op 40
+#define __VKI_XEN_dm_op 41
#define __DEFINE_VKI_XEN_GUEST_HANDLE(name, type) \
___DEFINE_VKI_XEN_GUEST_HANDLE(name, type); \
@@ -104,6 +106,7 @@ struct vki_xenctl_bitmap {
vki_uint32_t nr_bits;
};
+#include <vki/vki-xen-physdev.h>
#include <vki/vki-xen-domctl.h>
#include <vki/vki-xen-sysctl.h>
#include <vki/vki-xen-mmuext.h>
@@ -115,7 +118,6 @@ struct vki_xenctl_bitmap {
#include <vki/vki-xen-hvm.h>
#include <vki/vki-xen-tmem.h>
#include <vki/vki-xen-xsm.h>
-#include <vki/vki-xen-physdev.h>
#endif // __VKI_XEN_H