Compare commits
130 Commits
qdev-array
...
tdx-qemu-u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d20f93da31 | ||
|
|
18f152401f | ||
|
|
5c8d76a6e4 | ||
|
|
f226ab5a34 | ||
|
|
e954cd4b22 | ||
|
|
2dbde0df24 | ||
|
|
9faa2b03a9 | ||
|
|
d04ea558aa | ||
|
|
378e61b145 | ||
|
|
fdd53e1a81 | ||
|
|
e1ce29aa33 | ||
|
|
ffe9a6dccb | ||
|
|
7163da7797 | ||
|
|
a5368c6202 | ||
|
|
f597afe309 | ||
|
|
a23e29229e | ||
|
|
addb6b676d | ||
|
|
824f58fdba | ||
|
|
9955e91601 | ||
|
|
80417c539e | ||
|
|
511b09205f | ||
|
|
85a8152691 | ||
|
|
08c095f67a | ||
|
|
d8da77238e | ||
|
|
ab857b85a5 | ||
|
|
517fb00637 | ||
|
|
359be44b10 | ||
|
|
0222b6f05d | ||
|
|
8b7763f7ca | ||
|
|
33edcc8426 | ||
|
|
19195c88df | ||
|
|
954b9e7a42 | ||
|
|
5aad9329d2 | ||
|
|
583aae3302 | ||
|
|
80af3f2547 | ||
|
|
ef90193248 | ||
|
|
769b8158f6 | ||
|
|
d7db7d2ea1 | ||
|
|
004db71f60 | ||
|
|
bbe50409ce | ||
|
|
2ac24a3f82 | ||
|
|
7671a8d293 | ||
|
|
5bf04c14d8 | ||
|
|
f503878704 | ||
|
|
98f599ec0b | ||
|
|
a1b994d89a | ||
|
|
04fc588ea9 | ||
|
|
7e454d2ca4 | ||
|
|
b4a0470949 | ||
|
|
5fdefc08b3 | ||
|
|
d54122cefb | ||
|
|
ef64621235 | ||
|
|
575bfcd358 | ||
|
|
38b04243ce | ||
|
|
c64b4c4e28 | ||
|
|
871c9f21ee | ||
|
|
3770c1f6cb | ||
|
|
636758fe40 | ||
|
|
c9636b4bf5 | ||
|
|
d39ad1ccfd | ||
|
|
552a4e57a8 | ||
|
|
105fa48cab | ||
|
|
9824769418 | ||
|
|
02fda3dd28 | ||
|
|
609e0ca1d8 | ||
|
|
d21132fe6e | ||
|
|
902dc0c4a7 | ||
|
|
7bd3bf6642 | ||
|
|
843bbbd03b | ||
|
|
3091ad45a5 | ||
|
|
8d635c6681 | ||
|
|
f5fd218755 | ||
|
|
d5591ff440 | ||
|
|
605d572f9c | ||
|
|
9c673a41ee | ||
|
|
6d44474b3b | ||
|
|
52105c6458 | ||
|
|
9f7c4f60cc | ||
|
|
3b06e4058d | ||
|
|
315088134f | ||
|
|
d12a91e0ba | ||
|
|
e72629e514 | ||
|
|
816644b121 | ||
|
|
04c0a003dd | ||
|
|
0b2af475e9 | ||
|
|
f88131d931 | ||
|
|
ec6f3fc3ef | ||
|
|
e274d2a777 | ||
|
|
8066102df1 | ||
|
|
fa71b4f84f | ||
|
|
451d993d58 | ||
|
|
17fe594c59 | ||
|
|
2a23f0f118 | ||
|
|
e722e5a112 | ||
|
|
576fc9376d | ||
|
|
e5d487c972 | ||
|
|
f6e8d1ef05 | ||
|
|
0034d0395e | ||
|
|
4c7ae73caf | ||
|
|
52c773ce89 | ||
|
|
4d044472ab | ||
|
|
fc58891d04 | ||
|
|
1d675e59ea | ||
|
|
f78ea7ddb0 | ||
|
|
05fa22770a | ||
|
|
c86a59fd34 | ||
|
|
616425d452 | ||
|
|
c96c116e10 | ||
|
|
4409a6d855 | ||
|
|
f9a19bd8d2 | ||
|
|
2e990d81d9 | ||
|
|
00ac955b06 | ||
|
|
d229996b40 | ||
|
|
7c7e1f6017 | ||
|
|
42c31682ba | ||
|
|
261c1281e8 | ||
|
|
4940da2096 | ||
|
|
8011b508cf | ||
|
|
0ab3565840 | ||
|
|
34aee9c946 | ||
|
|
5c24c3e2f3 | ||
|
|
c375f05ef5 | ||
|
|
6d133eef98 | ||
|
|
569205e4e9 | ||
|
|
364eff6885 | ||
|
|
d90014fc33 | ||
|
|
fe73674af1 | ||
|
|
6ab4f1c9e2 | ||
|
|
881d1073d0 | ||
|
|
69680740ea |
18
MAINTAINERS
18
MAINTAINERS
@@ -131,6 +131,17 @@ K: ^Subject:.*(?i)mips
|
||||
F: docs/system/target-mips.rst
|
||||
F: configs/targets/mips*
|
||||
|
||||
X86 general architecture support
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: configs/devices/i386-softmmu/default.mak
|
||||
F: configs/targets/i386-softmmu.mak
|
||||
F: configs/targets/x86_64-softmmu.mak
|
||||
F: docs/system/target-i386*
|
||||
F: target/i386/*.[ch]
|
||||
F: target/i386/Kconfig
|
||||
F: target/i386/meson.build
|
||||
|
||||
Guest CPU cores (TCG)
|
||||
---------------------
|
||||
Overall TCG CPUs
|
||||
@@ -657,6 +668,7 @@ F: include/hw/dma/pl080.h
|
||||
F: hw/dma/pl330.c
|
||||
F: hw/gpio/pl061.c
|
||||
F: hw/input/pl050.c
|
||||
F: include/hw/input/pl050.h
|
||||
F: hw/intc/pl190.c
|
||||
F: hw/sd/pl181.c
|
||||
F: hw/ssi/pl022.c
|
||||
@@ -927,6 +939,7 @@ F: hw/*/pxa2xx*
|
||||
F: hw/display/tc6393xb.c
|
||||
F: hw/gpio/max7310.c
|
||||
F: hw/gpio/zaurus.c
|
||||
F: hw/input/ads7846.c
|
||||
F: hw/misc/mst_fpga.c
|
||||
F: hw/adc/max111x.c
|
||||
F: include/hw/adc/max111x.h
|
||||
@@ -979,7 +992,9 @@ M: Peter Maydell <peter.maydell@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/*/stellaris*
|
||||
F: hw/display/ssd03*
|
||||
F: include/hw/input/gamepad.h
|
||||
F: include/hw/timer/stellaris-gptm.h
|
||||
F: docs/system/arm/stellaris.rst
|
||||
|
||||
STM32VLDISCOVERY
|
||||
@@ -994,6 +1009,7 @@ M: Peter Maydell <peter.maydell@linaro.org>
|
||||
L: qemu-arm@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/arm/vexpress.c
|
||||
F: hw/display/sii9022.c
|
||||
F: docs/system/arm/vexpress.rst
|
||||
|
||||
Versatile PB
|
||||
@@ -2241,7 +2257,7 @@ M: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
S: Supported
|
||||
F: hw/virtio/vhost-user-fs*
|
||||
F: include/hw/virtio/vhost-user-fs.h
|
||||
L: virtio-fs@redhat.com
|
||||
L: virtio-fs@lists.linux.dev
|
||||
|
||||
virtio-input
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
|
||||
@@ -101,6 +101,8 @@ bool kvm_msi_use_devid;
|
||||
bool kvm_has_guest_debug;
|
||||
static int kvm_sstep_flags;
|
||||
static bool kvm_immediate_exit;
|
||||
static bool kvm_guest_memfd_supported;
|
||||
static uint64_t kvm_supported_memory_attributes;
|
||||
static hwaddr kvm_max_slot_size = ~0;
|
||||
|
||||
static const KVMCapabilityInfo kvm_required_capabilites[] = {
|
||||
@@ -292,34 +294,69 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
|
||||
static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
|
||||
{
|
||||
KVMState *s = kvm_state;
|
||||
struct kvm_userspace_memory_region mem;
|
||||
struct kvm_userspace_memory_region2 mem;
|
||||
static int cap_user_memory2 = -1;
|
||||
int ret;
|
||||
|
||||
if (cap_user_memory2 == -1) {
|
||||
cap_user_memory2 = kvm_check_extension(s, KVM_CAP_USER_MEMORY2);
|
||||
}
|
||||
|
||||
if (!cap_user_memory2 && slot->guest_memfd >= 0) {
|
||||
error_report("%s, KVM doesn't support KVM_CAP_USER_MEMORY2,"
|
||||
" which is required by guest memfd!", __func__);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
mem.slot = slot->slot | (kml->as_id << 16);
|
||||
mem.guest_phys_addr = slot->start_addr;
|
||||
mem.userspace_addr = (unsigned long)slot->ram;
|
||||
mem.flags = slot->flags;
|
||||
mem.guest_memfd = slot->guest_memfd;
|
||||
mem.guest_memfd_offset = slot->guest_memfd_offset;
|
||||
|
||||
if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
|
||||
/* Set the slot size to 0 before setting the slot to the desired
|
||||
* value. This is needed based on KVM commit 75d61fbc. */
|
||||
mem.memory_size = 0;
|
||||
ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
|
||||
|
||||
if (cap_user_memory2) {
|
||||
ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
|
||||
} else {
|
||||
ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
|
||||
}
|
||||
if (ret < 0) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
mem.memory_size = slot->memory_size;
|
||||
ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
|
||||
if (cap_user_memory2) {
|
||||
ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem);
|
||||
} else {
|
||||
ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
|
||||
}
|
||||
slot->old_flags = mem.flags;
|
||||
err:
|
||||
trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
|
||||
mem.memory_size, mem.userspace_addr, ret);
|
||||
trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags,
|
||||
mem.guest_phys_addr, mem.memory_size,
|
||||
mem.userspace_addr, mem.guest_memfd,
|
||||
mem.guest_memfd_offset, ret);
|
||||
if (ret < 0) {
|
||||
error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
|
||||
" start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
|
||||
__func__, mem.slot, slot->start_addr,
|
||||
(uint64_t)mem.memory_size, strerror(errno));
|
||||
if (cap_user_memory2) {
|
||||
error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d,"
|
||||
" start=0x%" PRIx64 ", size=0x%" PRIx64 ","
|
||||
" flags=0x%" PRIx32 ", guest_memfd=%" PRId32 ","
|
||||
" guest_memfd_offset=0x%" PRIx64 ": %s",
|
||||
__func__, mem.slot, slot->start_addr,
|
||||
(uint64_t)mem.memory_size, mem.flags,
|
||||
mem.guest_memfd, (uint64_t)mem.guest_memfd_offset,
|
||||
strerror(errno));
|
||||
} else {
|
||||
error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
|
||||
" start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
|
||||
__func__, mem.slot, slot->start_addr,
|
||||
(uint64_t)mem.memory_size, strerror(errno));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -391,6 +428,11 @@ static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
|
||||
return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
|
||||
}
|
||||
|
||||
int __attribute__ ((weak)) kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_init_vcpu(CPUState *cpu, Error **errp)
|
||||
{
|
||||
KVMState *s = kvm_state;
|
||||
@@ -399,15 +441,27 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
|
||||
|
||||
trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
|
||||
|
||||
/*
|
||||
* tdx_pre_create_vcpu() may call cpu_x86_cpuid(). It in turn may call
|
||||
* kvm_vm_ioctl(). Set cpu->kvm_state in advance to avoid NULL pointer
|
||||
* dereference.
|
||||
*/
|
||||
cpu->kvm_state = s;
|
||||
ret = kvm_arch_pre_create_vcpu(cpu, errp);
|
||||
if (ret < 0) {
|
||||
cpu->kvm_state = NULL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
|
||||
kvm_arch_vcpu_id(cpu));
|
||||
cpu->kvm_state = NULL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
cpu->kvm_fd = ret;
|
||||
cpu->kvm_state = s;
|
||||
cpu->vcpu_dirty = true;
|
||||
cpu->dirty_pages = 0;
|
||||
cpu->throttle_us_per_full = 0;
|
||||
@@ -475,6 +529,9 @@ static int kvm_mem_flags(MemoryRegion *mr)
|
||||
if (readonly && kvm_readonly_mem_allowed) {
|
||||
flags |= KVM_MEM_READONLY;
|
||||
}
|
||||
if (memory_region_has_guest_memfd(mr)) {
|
||||
flags |= KVM_MEM_PRIVATE;
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
@@ -1266,6 +1323,44 @@ void kvm_set_max_memslot_size(hwaddr max_slot_size)
|
||||
kvm_max_slot_size = max_slot_size;
|
||||
}
|
||||
|
||||
static int kvm_set_memory_attributes(hwaddr start, hwaddr size, uint64_t attr)
|
||||
{
|
||||
struct kvm_memory_attributes attrs;
|
||||
int r;
|
||||
|
||||
attrs.attributes = attr;
|
||||
attrs.address = start;
|
||||
attrs.size = size;
|
||||
attrs.flags = 0;
|
||||
|
||||
r = kvm_vm_ioctl(kvm_state, KVM_SET_MEMORY_ATTRIBUTES, &attrs);
|
||||
if (r) {
|
||||
warn_report("%s: failed to set memory (0x%lx+%#zx) with attr 0x%lx error '%s'",
|
||||
__func__, start, size, attr, strerror(errno));
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_set_memory_attributes_private(hwaddr start, hwaddr size)
|
||||
{
|
||||
if (!(kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
|
||||
error_report("KVM doesn't support PRIVATE memory attribute\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
|
||||
}
|
||||
|
||||
int kvm_set_memory_attributes_shared(hwaddr start, hwaddr size)
|
||||
{
|
||||
if (!(kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
|
||||
error_report("KVM doesn't support PRIVATE memory attribute\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return kvm_set_memory_attributes(start, size, 0);
|
||||
}
|
||||
|
||||
/* Called with KVMMemoryListener.slots_lock held */
|
||||
static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
MemoryRegionSection *section, bool add)
|
||||
@@ -1362,6 +1457,9 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
mem->ram_start_offset = ram_start_offset;
|
||||
mem->ram = ram;
|
||||
mem->flags = kvm_mem_flags(mr);
|
||||
mem->guest_memfd = mr->ram_block->guest_memfd;
|
||||
mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host;
|
||||
|
||||
kvm_slot_init_dirty_bitmap(mem);
|
||||
err = kvm_set_user_memory_region(kml, mem, true);
|
||||
if (err) {
|
||||
@@ -1369,6 +1467,16 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||
strerror(-err));
|
||||
abort();
|
||||
}
|
||||
|
||||
if (memory_region_is_default_private(mr)) {
|
||||
err = kvm_set_memory_attributes_private(start_addr, slot_size);
|
||||
if (err) {
|
||||
error_report("%s: failed to set memory attribute private: %s\n",
|
||||
__func__, strerror(-err));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
start_addr += slot_size;
|
||||
ram_start_offset += slot_size;
|
||||
ram += slot_size;
|
||||
@@ -2396,6 +2504,11 @@ static int kvm_init(MachineState *ms)
|
||||
}
|
||||
s->as = g_new0(struct KVMAs, s->nr_as);
|
||||
|
||||
kvm_guest_memfd_supported = kvm_check_extension(s, KVM_CAP_GUEST_MEMFD);
|
||||
|
||||
ret = kvm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES);
|
||||
kvm_supported_memory_attributes = ret > 0 ? ret : 0;
|
||||
|
||||
if (object_property_find(OBJECT(current_machine), "kvm-type")) {
|
||||
g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
|
||||
"kvm-type",
|
||||
@@ -2816,6 +2929,78 @@ static void kvm_eat_signals(CPUState *cpu)
|
||||
} while (sigismember(&chkset, SIG_IPI));
|
||||
}
|
||||
|
||||
int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
|
||||
{
|
||||
MemoryRegionSection section;
|
||||
ram_addr_t offset;
|
||||
MemoryRegion *mr;
|
||||
RAMBlock *rb;
|
||||
void *addr;
|
||||
int ret = -1;
|
||||
|
||||
trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared");
|
||||
section = memory_region_find(get_system_memory(), start, size);
|
||||
mr = section.mr;
|
||||
if (!mr) {
|
||||
/*
|
||||
* Ignore converting non-assigned region to shared.
|
||||
*
|
||||
* TDX requires vMMIO region to be shared to inject #VE to guest.
|
||||
* OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region,
|
||||
* and vIO-APIC 0xFEC00000 4K page.
|
||||
* OVMF assigns 32bit PCI MMIO region to
|
||||
* [top of low memory: typically 2GB=0xC000000, 0xFC00000)
|
||||
*/
|
||||
if (!to_private) {
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (memory_region_has_guest_memfd(mr)) {
|
||||
if (to_private) {
|
||||
ret = kvm_set_memory_attributes_private(start, size);
|
||||
} else {
|
||||
ret = kvm_set_memory_attributes_shared(start, size);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
memory_region_unref(section.mr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
addr = memory_region_get_ram_ptr(section.mr) +
|
||||
section.offset_within_region;
|
||||
rb = qemu_ram_block_from_host(addr, false, &offset);
|
||||
/*
|
||||
* With KVM_SET_MEMORY_ATTRIBUTES by kvm_set_memory_attributes(),
|
||||
* operation on underlying file descriptor is only for releasing
|
||||
* unnecessary pages.
|
||||
*/
|
||||
ram_block_convert_range(rb, offset, size, to_private);
|
||||
} else {
|
||||
/*
|
||||
* Because vMMIO region must be shared, guest TD may convert vMMIO
|
||||
* region to shared explicitly. Don't complain such case. See
|
||||
* memory_region_type() for checking if the region is MMIO region.
|
||||
*/
|
||||
if (!to_private &&
|
||||
!memory_region_is_ram(mr) &&
|
||||
!memory_region_is_ram_device(mr) &&
|
||||
!memory_region_is_rom(mr) &&
|
||||
!memory_region_is_romd(mr)) {
|
||||
ret = 0;
|
||||
} else {
|
||||
warn_report("Convert non guest_memfd backed memory region "
|
||||
"(0x%"HWADDR_PRIx" ,+ 0x%"HWADDR_PRIx") to %s",
|
||||
start, size, to_private ? "private" : "shared");
|
||||
}
|
||||
}
|
||||
|
||||
memory_region_unref(section.mr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_cpu_exec(CPUState *cpu)
|
||||
{
|
||||
struct kvm_run *run = cpu->kvm_run;
|
||||
@@ -2883,18 +3068,20 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
ret = EXCP_INTERRUPT;
|
||||
break;
|
||||
}
|
||||
fprintf(stderr, "error: kvm run failed %s\n",
|
||||
strerror(-run_ret));
|
||||
if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) {
|
||||
fprintf(stderr, "error: kvm run failed %s\n",
|
||||
strerror(-run_ret));
|
||||
#ifdef TARGET_PPC
|
||||
if (run_ret == -EBUSY) {
|
||||
fprintf(stderr,
|
||||
"This is probably because your SMT is enabled.\n"
|
||||
"VCPU can only run on primary threads with all "
|
||||
"secondary threads offline.\n");
|
||||
}
|
||||
if (run_ret == -EBUSY) {
|
||||
fprintf(stderr,
|
||||
"This is probably because your SMT is enabled.\n"
|
||||
"VCPU can only run on primary threads with all "
|
||||
"secondary threads offline.\n");
|
||||
}
|
||||
#endif
|
||||
ret = -1;
|
||||
break;
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
|
||||
@@ -2981,6 +3168,16 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case KVM_EXIT_MEMORY_FAULT:
|
||||
if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) {
|
||||
error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64,
|
||||
(uint64_t)run->memory_fault.flags);
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size,
|
||||
run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE);
|
||||
break;
|
||||
default:
|
||||
DPRINTF("kvm_arch_handle_exit\n");
|
||||
ret = kvm_arch_handle_exit(cpu, run);
|
||||
@@ -4077,3 +4274,24 @@ void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
|
||||
query_stats_schema_vcpu(first_cpu, &stats_args);
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
|
||||
{
|
||||
int fd;
|
||||
struct kvm_create_guest_memfd guest_memfd = {
|
||||
.size = size,
|
||||
.flags = flags,
|
||||
};
|
||||
|
||||
if (!kvm_guest_memfd_supported) {
|
||||
error_setg(errp, "KVM doesn't support guest memfd\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
|
||||
if (fd < 0) {
|
||||
error_setg_errno(errp, errno, "%s: error creating kvm guest memfd\n", __func__);
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
|
||||
kvm_irqchip_release_virq(int virq) "virq %d"
|
||||
kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
|
||||
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
|
||||
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
|
||||
kvm_set_user_memory(uint16_t as, uint16_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint32_t fd, uint64_t fd_offset, int ret) "AddrSpace#%d Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " guest_memfd=%d" " guest_memfd_offset=0x%" PRIx64 " ret=%d"
|
||||
kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
|
||||
kvm_resample_fd_notify(int gsi) "gsi %d"
|
||||
kvm_dirty_ring_full(int id) "vcpu %d"
|
||||
@@ -25,4 +25,4 @@ kvm_dirty_ring_reaper(const char *s) "%s"
|
||||
kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
|
||||
kvm_dirty_ring_reaper_kick(const char *reason) "%s"
|
||||
kvm_dirty_ring_flush(int finished) "%d"
|
||||
|
||||
kvm_convert_memory(uint64_t start, uint64_t size, const char *msg) "start 0x%" PRIx64 " size 0x%" PRIx64 " %s"
|
||||
|
||||
@@ -84,6 +84,7 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
|
||||
ram_flags |= fb->readonly ? RAM_READONLY_FD : 0;
|
||||
ram_flags |= fb->rom == ON_OFF_AUTO_ON ? RAM_READONLY : 0;
|
||||
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
|
||||
ram_flags |= backend->require_guest_memfd ? RAM_GUEST_MEMFD : 0;
|
||||
ram_flags |= fb->is_pmem ? RAM_PMEM : 0;
|
||||
ram_flags |= RAM_NAMED_FILE;
|
||||
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name,
|
||||
|
||||
@@ -55,6 +55,7 @@ memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
|
||||
name = host_memory_backend_get_name(backend);
|
||||
ram_flags = backend->share ? RAM_SHARED : 0;
|
||||
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
|
||||
ram_flags |= backend->require_guest_memfd ? RAM_GUEST_MEMFD : 0;
|
||||
memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name,
|
||||
backend->size, ram_flags, fd, 0, errp);
|
||||
g_free(name);
|
||||
|
||||
@@ -30,6 +30,7 @@ ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
|
||||
name = host_memory_backend_get_name(backend);
|
||||
ram_flags = backend->share ? RAM_SHARED : 0;
|
||||
ram_flags |= backend->reserve ? 0 : RAM_NORESERVE;
|
||||
ram_flags |= backend->require_guest_memfd ? RAM_GUEST_MEMFD : 0;
|
||||
memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), name,
|
||||
backend->size, ram_flags, errp);
|
||||
g_free(name);
|
||||
|
||||
@@ -279,6 +279,7 @@ static void host_memory_backend_init(Object *obj)
|
||||
/* TODO: convert access to globals to compat properties */
|
||||
backend->merge = machine_mem_merge(machine);
|
||||
backend->dump = machine_dump_guest_core(machine);
|
||||
backend->require_guest_memfd = machine_require_guest_memfd(machine);
|
||||
backend->reserve = true;
|
||||
backend->prealloc_threads = machine->smp.cpus;
|
||||
}
|
||||
|
||||
@@ -629,7 +629,6 @@ int bdrv_all_goto_snapshot(const char *name,
|
||||
while (iterbdrvs) {
|
||||
BlockDriverState *bs = iterbdrvs->data;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
int ret = 0;
|
||||
bool all_snapshots_includes_bs;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
@@ -637,9 +636,8 @@ int bdrv_all_goto_snapshot(const char *name,
|
||||
all_snapshots_includes_bs = bdrv_all_snapshots_includes_bs(bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (devices || all_snapshots_includes_bs) {
|
||||
ret = bdrv_snapshot_goto(bs, name, errp);
|
||||
}
|
||||
ret = (devices || all_snapshots_includes_bs) ?
|
||||
bdrv_snapshot_goto(bs, name, errp) : 0;
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#CONFIG_QXL=n
|
||||
#CONFIG_SEV=n
|
||||
#CONFIG_SGA=n
|
||||
#CONFIG_TDX=n
|
||||
#CONFIG_TEST_DEVICES=n
|
||||
#CONFIG_TPM_CRB=n
|
||||
#CONFIG_TPM_TIS_ISA=n
|
||||
|
||||
@@ -668,11 +668,11 @@ suppressing it. More information on the file format can be found here:
|
||||
|
||||
https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions
|
||||
|
||||
tests/tsan/blacklist.tsan - Has TSan warnings we wish to disable
|
||||
tests/tsan/ignore.tsan - Has TSan warnings we wish to disable
|
||||
at compile time for test or debug.
|
||||
Add flags to configure to enable:
|
||||
|
||||
"--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/blacklist.tsan"
|
||||
"--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/ignore.tsan"
|
||||
|
||||
More information on the file format can be found here under "Blacklist Format":
|
||||
|
||||
|
||||
@@ -515,7 +515,7 @@ class QAPIDocDirective(Directive):
|
||||
except QAPIError as err:
|
||||
# Launder QAPI parse errors into Sphinx extension errors
|
||||
# so they are displayed nicely to the user
|
||||
raise ExtensionError(str(err))
|
||||
raise ExtensionError(str(err)) from err
|
||||
|
||||
def do_parse(self, rstlist, node):
|
||||
"""Parse rST source lines and add them to the specified node
|
||||
|
||||
@@ -38,6 +38,7 @@ Supported mechanisms
|
||||
Currently supported confidential guest mechanisms are:
|
||||
|
||||
* AMD Secure Encrypted Virtualization (SEV) (see :doc:`i386/amd-memory-encryption`)
|
||||
* Intel Trust Domain Extension (TDX) (see :doc:`i386/tdx`)
|
||||
* POWER Protected Execution Facility (PEF) (see :ref:`power-papr-protected-execution-facility-pef`)
|
||||
* s390x Protected Virtualization (PV) (see :doc:`s390x/protvirt`)
|
||||
|
||||
|
||||
113
docs/system/i386/tdx.rst
Normal file
113
docs/system/i386/tdx.rst
Normal file
@@ -0,0 +1,113 @@
|
||||
Intel Trusted Domain eXtension (TDX)
|
||||
====================================
|
||||
|
||||
Intel Trusted Domain eXtensions (TDX) refers to an Intel technology that extends
|
||||
Virtual Machine Extensions (VMX) and Multi-Key Total Memory Encryption (MKTME)
|
||||
with a new kind of virtual machine guest called a Trust Domain (TD). A TD runs
|
||||
in a CPU mode that is designed to protect the confidentiality of its memory
|
||||
contents and its CPU state from any other software, including the hosting
|
||||
Virtual Machine Monitor (VMM), unless explicitly shared by the TD itself.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
To run TD, the physical machine needs to have TDX module loaded and initialized
|
||||
while KVM hypervisor has TDX support and has TDX enabled. If those requirements
|
||||
are met, the ``KVM_CAP_VM_TYPES`` will report the support of ``KVM_X86_TDX_VM``.
|
||||
|
||||
Trust Domain Virtual Firmware (TDVF)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Trust Domain Virtual Firmware (TDVF) is required to provide TD services to boot
|
||||
TD Guest OS. TDVF needs to be copied to guest private memory and measured before
|
||||
a TD boots.
|
||||
|
||||
The VM scope ``MEMORY_ENCRYPT_OP`` ioctl provides command ``KVM_TDX_INIT_MEM_REGION``
|
||||
to copy the TDVF image to TD's private memory space.
|
||||
|
||||
Since TDX doesn't support readonly memslot, TDVF cannot be mapped as pflash
|
||||
device and it actually works as RAM. "-bios" option is chosen to load TDVF.
|
||||
|
||||
OVMF is the opensource firmware that implements the TDVF support. Thus the
|
||||
command line to specify and load TDVF is ``-bios OVMF.fd``
|
||||
|
||||
KVM private gmem
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
TD's memory (RAM) needs to be able to be transformed between private and shared.
|
||||
And its BIOS (OVMF/TDVF) needs to be mapped as private. Thus QEMU needs to
|
||||
allocate private gmem for them via KVM's IOCTL (KVM_CREATE_GUEST_MEMFD), which
|
||||
requires KVM is newer enough that reports KVM_CAP_GUEST_MEMFD.
|
||||
|
||||
Feature Control
|
||||
---------------
|
||||
|
||||
Unlike non-TDX VM, the CPU features (enumerated by CPU or MSR) of a TD is not
|
||||
under full control of VMM. VMM can only configure part of features of a TD on
|
||||
``KVM_TDX_INIT_VM`` command of VM scope ``MEMORY_ENCRYPT_OP`` ioctl.
|
||||
|
||||
The configurable features have three types:
|
||||
|
||||
- Attributes:
|
||||
- PKS (bit 30) controls whether Supervisor Protection Keys is exposed to TD,
|
||||
which determines related CPUID bit and CR4 bit;
|
||||
- PERFMON (bit 63) controls whether PMU is exposed to TD.
|
||||
|
||||
- XSAVE related features (XFAM):
|
||||
XFAM is a 64b mask, which has the same format as XCR0 or IA32_XSS MSR. It
|
||||
determines the set of extended features available for use by the guest TD.
|
||||
|
||||
- CPUID features:
|
||||
Only some bits of some CPUID leaves are directly configurable by VMM.
|
||||
|
||||
What features can be configured is reported via TDX capabilities.
|
||||
|
||||
TDX capabilities
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
The VM scope ``MEMORY_ENCRYPT_OP`` ioctl provides command ``KVM_TDX_CAPABILITIES``
|
||||
to get the TDX capabilities from KVM. It returns a data structure of
|
||||
``struct kvm_tdx_capabilites``, which tells the supported configuration of
|
||||
attributes, XFAM and CPUIDs.
|
||||
|
||||
Launching a TD (TDX VM)
|
||||
-----------------------
|
||||
|
||||
To launch a TDX guest, below are new added and required:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
|qemu_system_x86| \\
|
||||
-object tdx-guest,id=tdx0 \\
|
||||
-machine ...,kernel-irqchip=split,confidential-guest-support=tdx0 \\
|
||||
-bios OVMF.fd \\
|
||||
|
||||
Debugging
|
||||
---------
|
||||
|
||||
Bit 0 of TD attributes, is DEBUG bit, which decides if the TD runs in off-TD
|
||||
debug mode. When in off-TD debug mode, TD's VCPU state and private memory are
|
||||
accessible via given SEAMCALLs. This requires KVM to expose APIs to invoke those
|
||||
SEAMCALLs and resonponding QEMU change.
|
||||
|
||||
It's targeted as future work.
|
||||
|
||||
restrictions
|
||||
------------
|
||||
|
||||
- kernel-irqchip must be split;
|
||||
|
||||
- No readonly support for private memory;
|
||||
|
||||
- No SMM support: SMM support requires manipulating the guset register states
|
||||
which is not allowed;
|
||||
|
||||
Live Migration
|
||||
--------------
|
||||
|
||||
TODO
|
||||
|
||||
References
|
||||
----------
|
||||
|
||||
- `TDX Homepage <https://www.intel.com/content/www/us/en/developer/articles/technical/intel-trust-domain-extensions.html>`__
|
||||
@@ -29,6 +29,7 @@ Architectural features
|
||||
i386/kvm-pv
|
||||
i386/sgx
|
||||
i386/amd-memory-encryption
|
||||
i386/tdx
|
||||
|
||||
OS requirements
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -96,6 +96,10 @@ uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
|
||||
|
||||
static int dump_cleanup(DumpState *s)
|
||||
{
|
||||
if (s->dump_info.arch_cleanup_fn) {
|
||||
s->dump_info.arch_cleanup_fn(s);
|
||||
}
|
||||
|
||||
guest_phys_blocks_free(&s->guest_phys_blocks);
|
||||
memory_mapping_list_free(&s->list);
|
||||
close(s->fd);
|
||||
|
||||
@@ -28,7 +28,7 @@ atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
static inline Int128 ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
__int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Aligned *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Alias r, c, n;
|
||||
|
||||
c.s = cmp;
|
||||
|
||||
@@ -58,7 +58,7 @@ atomic16_read_rw(Int128 *ptr)
|
||||
static inline void ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
__int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Aligned *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
__int128_t old;
|
||||
Int128Alias new;
|
||||
|
||||
|
||||
@@ -576,7 +576,8 @@ static void fdt_add_gic_node(VirtMachineState *vms)
|
||||
|
||||
if (vms->virt) {
|
||||
qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
|
||||
GIC_FDT_IRQ_TYPE_PPI,
|
||||
INTID_TO_PPI(ARCH_GIC_MAINT_IRQ),
|
||||
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
|
||||
}
|
||||
} else {
|
||||
@@ -600,7 +601,8 @@ static void fdt_add_gic_node(VirtMachineState *vms)
|
||||
2, vms->memmap[VIRT_GIC_VCPU].base,
|
||||
2, vms->memmap[VIRT_GIC_VCPU].size);
|
||||
qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts",
|
||||
GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
|
||||
GIC_FDT_IRQ_TYPE_PPI,
|
||||
INTID_TO_PPI(ARCH_GIC_MAINT_IRQ),
|
||||
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -670,8 +670,13 @@ static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel,
|
||||
cnt += (transferred + d->leftover) >> 2;
|
||||
|
||||
if (s->sctl & loop_sel) {
|
||||
/* Bah, how stupid is that having a 0 represent true value?
|
||||
i just spent few hours on this shit */
|
||||
/*
|
||||
* loop_sel tells us which bit in the SCTL register to look at
|
||||
* (either P1_LOOP_SEL, P2_LOOP_SEL or R1_LOOP_SEL). The sense
|
||||
* of these bits is 0 for loop mode (set interrupt and keep recording
|
||||
* when the sample count reaches zero) or 1 for stop mode (set
|
||||
* interrupt and stop recording).
|
||||
*/
|
||||
AUD_log ("es1370: warning", "non looping mode\n");
|
||||
} else {
|
||||
d->frame_cnt = size;
|
||||
|
||||
@@ -35,7 +35,8 @@
|
||||
GlobalProperty hw_compat_8_1[] = {
|
||||
{ TYPE_PCI_BRIDGE, "x-pci-express-writeable-slt-bug", "true" },
|
||||
{ "ramfb", "x-migrate", "off" },
|
||||
{ "vfio-pci-nohotplug", "x-ramfb-migrate", "off" }
|
||||
{ "vfio-pci-nohotplug", "x-ramfb-migrate", "off" },
|
||||
{ "igb", "x-pcie-flr-init", "off" },
|
||||
};
|
||||
const size_t hw_compat_8_1_len = G_N_ELEMENTS(hw_compat_8_1);
|
||||
|
||||
@@ -1188,6 +1189,11 @@ bool machine_mem_merge(MachineState *machine)
|
||||
return machine->mem_merge;
|
||||
}
|
||||
|
||||
bool machine_require_guest_memfd(MachineState *machine)
|
||||
{
|
||||
return machine->require_guest_memfd;
|
||||
}
|
||||
|
||||
static char *cpu_slot_to_string(const CPUArchId *cpu)
|
||||
{
|
||||
GString *s = g_string_new(NULL);
|
||||
|
||||
@@ -336,8 +336,8 @@ static inline bool vmsvga_verify_rect(DisplaySurface *surface,
|
||||
return false;
|
||||
}
|
||||
if (h > SVGA_MAX_HEIGHT) {
|
||||
trace_vmware_verify_rect_greater_than_bound(name, "y", SVGA_MAX_HEIGHT,
|
||||
y);
|
||||
trace_vmware_verify_rect_greater_than_bound(name, "h", SVGA_MAX_HEIGHT,
|
||||
h);
|
||||
return false;
|
||||
}
|
||||
if (y + h > surface_height(surface)) {
|
||||
|
||||
@@ -34,9 +34,10 @@
|
||||
#include "net/net.h"
|
||||
#include "qemu/log.h"
|
||||
|
||||
#define MIN_SEABIOS_HPPA_VERSION 10 /* require at least this fw version */
|
||||
#define MIN_SEABIOS_HPPA_VERSION 12 /* require at least this fw version */
|
||||
|
||||
#define HPA_POWER_BUTTON (FIRMWARE_END - 0x10)
|
||||
/* Power button address at &PAGE0->pad[4] */
|
||||
#define HPA_POWER_BUTTON (0x40 + 4 * sizeof(uint32_t))
|
||||
|
||||
#define enable_lasi_lan() 0
|
||||
|
||||
|
||||
@@ -10,6 +10,11 @@ config SGX
|
||||
bool
|
||||
depends on KVM
|
||||
|
||||
config TDX
|
||||
bool
|
||||
select X86_FW_OVMF
|
||||
depends on KVM
|
||||
|
||||
config PC
|
||||
bool
|
||||
imply APPLESMC
|
||||
@@ -26,6 +31,7 @@ config PC
|
||||
imply QXL
|
||||
imply SEV
|
||||
imply SGX
|
||||
imply TDX
|
||||
imply TEST_DEVICES
|
||||
imply TPM_CRB
|
||||
imply TPM_TIS_ISA
|
||||
|
||||
@@ -975,7 +975,8 @@ static void build_dbg_aml(Aml *table)
|
||||
aml_append(table, scope);
|
||||
}
|
||||
|
||||
static Aml *build_link_dev(const char *name, uint8_t uid, Aml *reg)
|
||||
static Aml *build_link_dev(const char *name, uint8_t uid, Aml *reg,
|
||||
bool level_trigger_unsupported)
|
||||
{
|
||||
Aml *dev;
|
||||
Aml *crs;
|
||||
@@ -987,7 +988,10 @@ static Aml *build_link_dev(const char *name, uint8_t uid, Aml *reg)
|
||||
aml_append(dev, aml_name_decl("_UID", aml_int(uid)));
|
||||
|
||||
crs = aml_resource_template();
|
||||
aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
|
||||
aml_append(crs, aml_interrupt(AML_CONSUMER,
|
||||
level_trigger_unsupported ?
|
||||
AML_EDGE : AML_LEVEL,
|
||||
AML_ACTIVE_HIGH,
|
||||
AML_SHARED, irqs, ARRAY_SIZE(irqs)));
|
||||
aml_append(dev, aml_name_decl("_PRS", crs));
|
||||
|
||||
@@ -1011,7 +1015,8 @@ static Aml *build_link_dev(const char *name, uint8_t uid, Aml *reg)
|
||||
return dev;
|
||||
}
|
||||
|
||||
static Aml *build_gsi_link_dev(const char *name, uint8_t uid, uint8_t gsi)
|
||||
static Aml *build_gsi_link_dev(const char *name, uint8_t uid,
|
||||
uint8_t gsi, bool level_trigger_unsupported)
|
||||
{
|
||||
Aml *dev;
|
||||
Aml *crs;
|
||||
@@ -1024,7 +1029,10 @@ static Aml *build_gsi_link_dev(const char *name, uint8_t uid, uint8_t gsi)
|
||||
|
||||
crs = aml_resource_template();
|
||||
irqs = gsi;
|
||||
aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
|
||||
aml_append(crs, aml_interrupt(AML_CONSUMER,
|
||||
level_trigger_unsupported ?
|
||||
AML_EDGE : AML_LEVEL,
|
||||
AML_ACTIVE_HIGH,
|
||||
AML_SHARED, &irqs, 1));
|
||||
aml_append(dev, aml_name_decl("_PRS", crs));
|
||||
|
||||
@@ -1043,7 +1051,7 @@ static Aml *build_gsi_link_dev(const char *name, uint8_t uid, uint8_t gsi)
|
||||
}
|
||||
|
||||
/* _CRS method - get current settings */
|
||||
static Aml *build_iqcr_method(bool is_piix4)
|
||||
static Aml *build_iqcr_method(bool is_piix4, bool level_trigger_unsupported)
|
||||
{
|
||||
Aml *if_ctx;
|
||||
uint32_t irqs;
|
||||
@@ -1051,7 +1059,9 @@ static Aml *build_iqcr_method(bool is_piix4)
|
||||
Aml *crs = aml_resource_template();
|
||||
|
||||
irqs = 0;
|
||||
aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL,
|
||||
aml_append(crs, aml_interrupt(AML_CONSUMER,
|
||||
level_trigger_unsupported ?
|
||||
AML_EDGE : AML_LEVEL,
|
||||
AML_ACTIVE_HIGH, AML_SHARED, &irqs, 1));
|
||||
aml_append(method, aml_name_decl("PRR0", crs));
|
||||
|
||||
@@ -1085,7 +1095,7 @@ static Aml *build_irq_status_method(void)
|
||||
return method;
|
||||
}
|
||||
|
||||
static void build_piix4_pci0_int(Aml *table)
|
||||
static void build_piix4_pci0_int(Aml *table, bool level_trigger_unsupported)
|
||||
{
|
||||
Aml *dev;
|
||||
Aml *crs;
|
||||
@@ -1098,12 +1108,16 @@ static void build_piix4_pci0_int(Aml *table)
|
||||
aml_append(sb_scope, pci0_scope);
|
||||
|
||||
aml_append(sb_scope, build_irq_status_method());
|
||||
aml_append(sb_scope, build_iqcr_method(true));
|
||||
aml_append(sb_scope, build_iqcr_method(true, level_trigger_unsupported));
|
||||
|
||||
aml_append(sb_scope, build_link_dev("LNKA", 0, aml_name("PRQ0")));
|
||||
aml_append(sb_scope, build_link_dev("LNKB", 1, aml_name("PRQ1")));
|
||||
aml_append(sb_scope, build_link_dev("LNKC", 2, aml_name("PRQ2")));
|
||||
aml_append(sb_scope, build_link_dev("LNKD", 3, aml_name("PRQ3")));
|
||||
aml_append(sb_scope, build_link_dev("LNKA", 0, aml_name("PRQ0"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKB", 1, aml_name("PRQ1"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKC", 2, aml_name("PRQ2"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKD", 3, aml_name("PRQ3"),
|
||||
level_trigger_unsupported));
|
||||
|
||||
dev = aml_device("LNKS");
|
||||
{
|
||||
@@ -1112,7 +1126,9 @@ static void build_piix4_pci0_int(Aml *table)
|
||||
|
||||
crs = aml_resource_template();
|
||||
irqs = 9;
|
||||
aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL,
|
||||
aml_append(crs, aml_interrupt(AML_CONSUMER,
|
||||
level_trigger_unsupported ?
|
||||
AML_EDGE : AML_LEVEL,
|
||||
AML_ACTIVE_HIGH, AML_SHARED,
|
||||
&irqs, 1));
|
||||
aml_append(dev, aml_name_decl("_PRS", crs));
|
||||
@@ -1198,7 +1214,7 @@ static Aml *build_q35_routing_table(const char *str)
|
||||
return pkg;
|
||||
}
|
||||
|
||||
static void build_q35_pci0_int(Aml *table)
|
||||
static void build_q35_pci0_int(Aml *table, bool level_trigger_unsupported)
|
||||
{
|
||||
Aml *method;
|
||||
Aml *sb_scope = aml_scope("_SB");
|
||||
@@ -1237,25 +1253,41 @@ static void build_q35_pci0_int(Aml *table)
|
||||
aml_append(sb_scope, pci0_scope);
|
||||
|
||||
aml_append(sb_scope, build_irq_status_method());
|
||||
aml_append(sb_scope, build_iqcr_method(false));
|
||||
aml_append(sb_scope, build_iqcr_method(false, level_trigger_unsupported));
|
||||
|
||||
aml_append(sb_scope, build_link_dev("LNKA", 0, aml_name("PRQA")));
|
||||
aml_append(sb_scope, build_link_dev("LNKB", 1, aml_name("PRQB")));
|
||||
aml_append(sb_scope, build_link_dev("LNKC", 2, aml_name("PRQC")));
|
||||
aml_append(sb_scope, build_link_dev("LNKD", 3, aml_name("PRQD")));
|
||||
aml_append(sb_scope, build_link_dev("LNKE", 4, aml_name("PRQE")));
|
||||
aml_append(sb_scope, build_link_dev("LNKF", 5, aml_name("PRQF")));
|
||||
aml_append(sb_scope, build_link_dev("LNKG", 6, aml_name("PRQG")));
|
||||
aml_append(sb_scope, build_link_dev("LNKH", 7, aml_name("PRQH")));
|
||||
aml_append(sb_scope, build_link_dev("LNKA", 0, aml_name("PRQA"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKB", 1, aml_name("PRQB"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKC", 2, aml_name("PRQC"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKD", 3, aml_name("PRQD"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKE", 4, aml_name("PRQE"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKF", 5, aml_name("PRQF"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKG", 6, aml_name("PRQG"),
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_link_dev("LNKH", 7, aml_name("PRQH"),
|
||||
level_trigger_unsupported));
|
||||
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIA", 0x10, 0x10));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIB", 0x11, 0x11));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIC", 0x12, 0x12));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSID", 0x13, 0x13));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIE", 0x14, 0x14));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIF", 0x15, 0x15));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIG", 0x16, 0x16));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIH", 0x17, 0x17));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIA", 0x10, 0x10,
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIB", 0x11, 0x11,
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIC", 0x12, 0x12,
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSID", 0x13, 0x13,
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIE", 0x14, 0x14,
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIF", 0x15, 0x15,
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIG", 0x16, 0x16,
|
||||
level_trigger_unsupported));
|
||||
aml_append(sb_scope, build_gsi_link_dev("GSIH", 0x17, 0x17,
|
||||
level_trigger_unsupported));
|
||||
|
||||
aml_append(table, sb_scope);
|
||||
}
|
||||
@@ -1436,6 +1468,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
|
||||
PCMachineState *pcms = PC_MACHINE(machine);
|
||||
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(machine);
|
||||
X86MachineState *x86ms = X86_MACHINE(machine);
|
||||
bool level_trigger_unsupported = x86ms->eoi_intercept_unsupported;
|
||||
AcpiMcfgInfo mcfg;
|
||||
bool mcfg_valid = !!acpi_get_mcfg(&mcfg);
|
||||
uint32_t nr_mem = machine->ram_slots;
|
||||
@@ -1468,7 +1501,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
|
||||
if (pm->pcihp_bridge_en || pm->pcihp_root_en) {
|
||||
build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base);
|
||||
}
|
||||
build_piix4_pci0_int(dsdt);
|
||||
build_piix4_pci0_int(dsdt, level_trigger_unsupported);
|
||||
} else if (q35) {
|
||||
sb_scope = aml_scope("_SB");
|
||||
dev = aml_device("PCI0");
|
||||
@@ -1512,7 +1545,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
|
||||
if (pm->pcihp_bridge_en) {
|
||||
build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base);
|
||||
}
|
||||
build_q35_pci0_int(dsdt);
|
||||
build_q35_pci0_int(dsdt, level_trigger_unsupported);
|
||||
}
|
||||
|
||||
if (misc->has_hpet) {
|
||||
|
||||
@@ -103,6 +103,7 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker,
|
||||
const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(x86ms));
|
||||
AcpiTable table = { .sig = "APIC", .rev = 3, .oem_id = oem_id,
|
||||
.oem_table_id = oem_table_id };
|
||||
bool level_trigger_unsupported = x86ms->eoi_intercept_unsupported;
|
||||
|
||||
acpi_table_begin(&table, table_data);
|
||||
/* Local APIC Address */
|
||||
@@ -122,18 +123,43 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker,
|
||||
IO_APIC_SECONDARY_ADDRESS, IO_APIC_SECONDARY_IRQBASE);
|
||||
}
|
||||
|
||||
if (x86ms->apic_xrupt_override) {
|
||||
build_xrupt_override(table_data, 0, 2,
|
||||
0 /* Flags: Conforms to the specifications of the bus */);
|
||||
}
|
||||
|
||||
for (i = 1; i < 16; i++) {
|
||||
if (!(x86ms->pci_irq_mask & (1 << i))) {
|
||||
/* No need for a INT source override structure. */
|
||||
continue;
|
||||
if (level_trigger_unsupported) {
|
||||
/* Force edge trigger */
|
||||
if (x86ms->apic_xrupt_override) {
|
||||
build_xrupt_override(table_data, 0, 2,
|
||||
/* Flags: active high, edge triggered */
|
||||
1 | (1 << 2));
|
||||
}
|
||||
|
||||
for (i = x86ms->apic_xrupt_override ? 1 : 0; i < 16; i++) {
|
||||
build_xrupt_override(table_data, i, i,
|
||||
/* Flags: active high, edge triggered */
|
||||
1 | (1 << 2));
|
||||
}
|
||||
|
||||
if (x86ms->ioapic2) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
build_xrupt_override(table_data, IO_APIC_SECONDARY_IRQBASE + i,
|
||||
IO_APIC_SECONDARY_IRQBASE + i,
|
||||
/* Flags: active high, edge triggered */
|
||||
1 | (1 << 2));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (x86ms->apic_xrupt_override) {
|
||||
build_xrupt_override(table_data, 0, 2,
|
||||
0 /* Flags: Conforms to the specifications of the bus */);
|
||||
}
|
||||
|
||||
for (i = 1; i < 16; i++) {
|
||||
if (!(x86ms->pci_irq_mask & (1 << i))) {
|
||||
/* No need for a INT source override structure. */
|
||||
continue;
|
||||
}
|
||||
build_xrupt_override(table_data, i, i,
|
||||
0xd /* Flags: Active high, Level Triggered */);
|
||||
|
||||
}
|
||||
build_xrupt_override(table_data, i, i,
|
||||
0xd /* Flags: Active high, Level Triggered */);
|
||||
}
|
||||
|
||||
if (x2apic_mode) {
|
||||
|
||||
@@ -27,6 +27,7 @@ i386_ss.add(when: 'CONFIG_PC', if_true: files(
|
||||
'port92.c'))
|
||||
i386_ss.add(when: 'CONFIG_X86_FW_OVMF', if_true: files('pc_sysfw_ovmf.c'),
|
||||
if_false: files('pc_sysfw_ovmf-stubs.c'))
|
||||
i386_ss.add(when: 'CONFIG_TDX', if_true: files('tdvf.c', 'tdvf-hob.c'))
|
||||
|
||||
subdir('kvm')
|
||||
subdir('xen')
|
||||
|
||||
26
hw/i386/pc.c
26
hw/i386/pc.c
@@ -43,6 +43,7 @@
|
||||
#include "sysemu/xen.h"
|
||||
#include "sysemu/reset.h"
|
||||
#include "kvm/kvm_i386.h"
|
||||
#include "kvm/tdx.h"
|
||||
#include "hw/xen/xen.h"
|
||||
#include "qapi/qmp/qlist.h"
|
||||
#include "qemu/error-report.h"
|
||||
@@ -1036,16 +1037,18 @@ void pc_memory_init(PCMachineState *pcms,
|
||||
/* Initialize PC system firmware */
|
||||
pc_system_firmware_init(pcms, rom_memory);
|
||||
|
||||
option_rom_mr = g_malloc(sizeof(*option_rom_mr));
|
||||
memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE,
|
||||
&error_fatal);
|
||||
if (pcmc->pci_enabled) {
|
||||
memory_region_set_readonly(option_rom_mr, true);
|
||||
if (!is_tdx_vm()) {
|
||||
option_rom_mr = g_malloc(sizeof(*option_rom_mr));
|
||||
memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE,
|
||||
&error_fatal);
|
||||
if (pcmc->pci_enabled) {
|
||||
memory_region_set_readonly(option_rom_mr, true);
|
||||
}
|
||||
memory_region_add_subregion_overlap(rom_memory,
|
||||
PC_ROM_MIN_VGA,
|
||||
option_rom_mr,
|
||||
1);
|
||||
}
|
||||
memory_region_add_subregion_overlap(rom_memory,
|
||||
PC_ROM_MIN_VGA,
|
||||
option_rom_mr,
|
||||
1);
|
||||
|
||||
fw_cfg = fw_cfg_arch_create(machine,
|
||||
x86ms->boot_cpus, x86ms->apic_id_limit);
|
||||
@@ -1755,11 +1758,6 @@ static void pc_machine_initfn(Object *obj)
|
||||
cxl_machine_init(obj, &pcms->cxl_devices_state);
|
||||
}
|
||||
|
||||
int pc_machine_kvm_type(MachineState *machine, const char *kvm_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pc_machine_reset(MachineState *machine, ShutdownCause reason)
|
||||
{
|
||||
CPUState *cs;
|
||||
|
||||
@@ -236,6 +236,8 @@ static void pc_q35_init(MachineState *machine)
|
||||
x86ms->above_4g_mem_size, NULL);
|
||||
object_property_set_bool(phb, PCI_HOST_BYPASS_IOMMU,
|
||||
pcms->default_bus_bypass_iommu, NULL);
|
||||
object_property_set_bool(phb, PCI_HOST_PROP_SMM_RANGES,
|
||||
x86_machine_is_smm_enabled(x86ms), NULL);
|
||||
sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal);
|
||||
|
||||
/* pci */
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
#include "hw/block/flash.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sev.h"
|
||||
#include "kvm/tdx.h"
|
||||
|
||||
#define FLASH_SECTOR_SIZE 4096
|
||||
|
||||
@@ -265,5 +266,11 @@ void x86_firmware_configure(void *ptr, int size)
|
||||
}
|
||||
|
||||
sev_encrypt_flash(ptr, size, &error_fatal);
|
||||
} else if (is_tdx_vm()) {
|
||||
ret = tdx_parse_tdvf(ptr, size);
|
||||
if (ret) {
|
||||
error_report("failed to parse TDVF for TDX VM");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
147
hw/i386/tdvf-hob.c
Normal file
147
hw/i386/tdvf-hob.c
Normal file
@@ -0,0 +1,147 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
* Copyright (c) 2020 Intel Corporation
|
||||
* Author: Isaku Yamahata <isaku.yamahata at gmail.com>
|
||||
* <isaku.yamahata at intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "e820_memory_layout.h"
|
||||
#include "hw/i386/pc.h"
|
||||
#include "hw/i386/x86.h"
|
||||
#include "hw/pci/pcie_host.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "standard-headers/uefi/uefi.h"
|
||||
#include "tdvf-hob.h"
|
||||
|
||||
typedef struct TdvfHob {
|
||||
hwaddr hob_addr;
|
||||
void *ptr;
|
||||
int size;
|
||||
|
||||
/* working area */
|
||||
void *current;
|
||||
void *end;
|
||||
} TdvfHob;
|
||||
|
||||
static uint64_t tdvf_current_guest_addr(const TdvfHob *hob)
|
||||
{
|
||||
return hob->hob_addr + (hob->current - hob->ptr);
|
||||
}
|
||||
|
||||
static void tdvf_align(TdvfHob *hob, size_t align)
|
||||
{
|
||||
hob->current = QEMU_ALIGN_PTR_UP(hob->current, align);
|
||||
}
|
||||
|
||||
static void *tdvf_get_area(TdvfHob *hob, uint64_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (hob->current + size > hob->end) {
|
||||
error_report("TD_HOB overrun, size = 0x%" PRIx64, size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ret = hob->current;
|
||||
hob->current += size;
|
||||
tdvf_align(hob, 8);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tdvf_hob_add_memory_resources(TdxGuest *tdx, TdvfHob *hob)
|
||||
{
|
||||
EFI_HOB_RESOURCE_DESCRIPTOR *region;
|
||||
EFI_RESOURCE_ATTRIBUTE_TYPE attr;
|
||||
EFI_RESOURCE_TYPE resource_type;
|
||||
|
||||
TdxRamEntry *e;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < tdx->nr_ram_entries; i++) {
|
||||
e = &tdx->ram_entries[i];
|
||||
|
||||
if (e->type == TDX_RAM_UNACCEPTED) {
|
||||
resource_type = EFI_RESOURCE_MEMORY_UNACCEPTED;
|
||||
attr = EFI_RESOURCE_ATTRIBUTE_TDVF_UNACCEPTED;
|
||||
} else if (e->type == TDX_RAM_ADDED){
|
||||
resource_type = EFI_RESOURCE_SYSTEM_MEMORY;
|
||||
attr = EFI_RESOURCE_ATTRIBUTE_TDVF_PRIVATE;
|
||||
} else {
|
||||
error_report("unknown TDX_RAM_ENTRY type %d", e->type);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
region = tdvf_get_area(hob, sizeof(*region));
|
||||
*region = (EFI_HOB_RESOURCE_DESCRIPTOR) {
|
||||
.Header = {
|
||||
.HobType = EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,
|
||||
.HobLength = cpu_to_le16(sizeof(*region)),
|
||||
.Reserved = cpu_to_le32(0),
|
||||
},
|
||||
.Owner = EFI_HOB_OWNER_ZERO,
|
||||
.ResourceType = cpu_to_le32(resource_type),
|
||||
.ResourceAttribute = cpu_to_le32(attr),
|
||||
.PhysicalStart = cpu_to_le64(e->address),
|
||||
.ResourceLength = cpu_to_le64(e->length),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
void tdvf_hob_create(TdxGuest *tdx, TdxFirmwareEntry *td_hob)
|
||||
{
|
||||
TdvfHob hob = {
|
||||
.hob_addr = td_hob->address,
|
||||
.size = td_hob->size,
|
||||
.ptr = td_hob->mem_ptr,
|
||||
|
||||
.current = td_hob->mem_ptr,
|
||||
.end = td_hob->mem_ptr + td_hob->size,
|
||||
};
|
||||
|
||||
EFI_HOB_GENERIC_HEADER *last_hob;
|
||||
EFI_HOB_HANDOFF_INFO_TABLE *hit;
|
||||
|
||||
/* Note, Efi{Free}Memory{Bottom,Top} are ignored, leave 'em zeroed. */
|
||||
hit = tdvf_get_area(&hob, sizeof(*hit));
|
||||
*hit = (EFI_HOB_HANDOFF_INFO_TABLE) {
|
||||
.Header = {
|
||||
.HobType = EFI_HOB_TYPE_HANDOFF,
|
||||
.HobLength = cpu_to_le16(sizeof(*hit)),
|
||||
.Reserved = cpu_to_le32(0),
|
||||
},
|
||||
.Version = cpu_to_le32(EFI_HOB_HANDOFF_TABLE_VERSION),
|
||||
.BootMode = cpu_to_le32(0),
|
||||
.EfiMemoryTop = cpu_to_le64(0),
|
||||
.EfiMemoryBottom = cpu_to_le64(0),
|
||||
.EfiFreeMemoryTop = cpu_to_le64(0),
|
||||
.EfiFreeMemoryBottom = cpu_to_le64(0),
|
||||
.EfiEndOfHobList = cpu_to_le64(0), /* initialized later */
|
||||
};
|
||||
|
||||
tdvf_hob_add_memory_resources(tdx, &hob);
|
||||
|
||||
last_hob = tdvf_get_area(&hob, sizeof(*last_hob));
|
||||
*last_hob = (EFI_HOB_GENERIC_HEADER) {
|
||||
.HobType = EFI_HOB_TYPE_END_OF_HOB_LIST,
|
||||
.HobLength = cpu_to_le16(sizeof(*last_hob)),
|
||||
.Reserved = cpu_to_le32(0),
|
||||
};
|
||||
hit->EfiEndOfHobList = tdvf_current_guest_addr(&hob);
|
||||
}
|
||||
24
hw/i386/tdvf-hob.h
Normal file
24
hw/i386/tdvf-hob.h
Normal file
@@ -0,0 +1,24 @@
|
||||
#ifndef HW_I386_TD_HOB_H
|
||||
#define HW_I386_TD_HOB_H
|
||||
|
||||
#include "hw/i386/tdvf.h"
|
||||
#include "target/i386/kvm/tdx.h"
|
||||
|
||||
void tdvf_hob_create(TdxGuest *tdx, TdxFirmwareEntry *td_hob);
|
||||
|
||||
#define EFI_RESOURCE_ATTRIBUTE_TDVF_PRIVATE \
|
||||
(EFI_RESOURCE_ATTRIBUTE_PRESENT | \
|
||||
EFI_RESOURCE_ATTRIBUTE_INITIALIZED | \
|
||||
EFI_RESOURCE_ATTRIBUTE_TESTED)
|
||||
|
||||
#define EFI_RESOURCE_ATTRIBUTE_TDVF_UNACCEPTED \
|
||||
(EFI_RESOURCE_ATTRIBUTE_PRESENT | \
|
||||
EFI_RESOURCE_ATTRIBUTE_INITIALIZED | \
|
||||
EFI_RESOURCE_ATTRIBUTE_TESTED)
|
||||
|
||||
#define EFI_RESOURCE_ATTRIBUTE_TDVF_MMIO \
|
||||
(EFI_RESOURCE_ATTRIBUTE_PRESENT | \
|
||||
EFI_RESOURCE_ATTRIBUTE_INITIALIZED | \
|
||||
EFI_RESOURCE_ATTRIBUTE_UNCACHEABLE)
|
||||
|
||||
#endif
|
||||
200
hw/i386/tdvf.c
Normal file
200
hw/i386/tdvf.c
Normal file
@@ -0,0 +1,200 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
* Copyright (c) 2020 Intel Corporation
|
||||
* Author: Isaku Yamahata <isaku.yamahata at gmail.com>
|
||||
* <isaku.yamahata at intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
#include "hw/i386/pc.h"
|
||||
#include "hw/i386/tdvf.h"
|
||||
#include "sysemu/kvm.h"
|
||||
|
||||
#define TDX_METADATA_OFFSET_GUID "e47a6535-984a-4798-865e-4685a7bf8ec2"
|
||||
#define TDX_METADATA_VERSION 1
|
||||
#define TDVF_SIGNATURE 0x46564454 /* TDVF as little endian */
|
||||
|
||||
typedef struct {
|
||||
uint32_t DataOffset;
|
||||
uint32_t RawDataSize;
|
||||
uint64_t MemoryAddress;
|
||||
uint64_t MemoryDataSize;
|
||||
uint32_t Type;
|
||||
uint32_t Attributes;
|
||||
} TdvfSectionEntry;
|
||||
|
||||
typedef struct {
|
||||
uint32_t Signature;
|
||||
uint32_t Length;
|
||||
uint32_t Version;
|
||||
uint32_t NumberOfSectionEntries;
|
||||
TdvfSectionEntry SectionEntries[];
|
||||
} TdvfMetadata;
|
||||
|
||||
struct tdx_metadata_offset {
|
||||
uint32_t offset;
|
||||
};
|
||||
|
||||
static TdvfMetadata *tdvf_get_metadata(void *flash_ptr, int size)
|
||||
{
|
||||
TdvfMetadata *metadata;
|
||||
uint32_t offset = 0;
|
||||
uint8_t *data;
|
||||
|
||||
if ((uint32_t) size != size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (pc_system_ovmf_table_find(TDX_METADATA_OFFSET_GUID, &data, NULL)) {
|
||||
offset = size - le32_to_cpu(((struct tdx_metadata_offset *)data)->offset);
|
||||
|
||||
if (offset + sizeof(*metadata) > size) {
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
error_report("Cannot find TDX_METADATA_OFFSET_GUID");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
metadata = flash_ptr + offset;
|
||||
|
||||
/* Finally, verify the signature to determine if this is a TDVF image. */
|
||||
metadata->Signature = le32_to_cpu(metadata->Signature);
|
||||
if (metadata->Signature != TDVF_SIGNATURE) {
|
||||
error_report("Invalid TDVF signature in metadata!");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Sanity check that the TDVF doesn't overlap its own metadata. */
|
||||
metadata->Length = le32_to_cpu(metadata->Length);
|
||||
if (offset + metadata->Length > size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Only version 1 is supported/defined. */
|
||||
metadata->Version = le32_to_cpu(metadata->Version);
|
||||
if (metadata->Version != TDX_METADATA_VERSION) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
static int tdvf_parse_and_check_section_entry(const TdvfSectionEntry *src,
|
||||
TdxFirmwareEntry *entry)
|
||||
{
|
||||
entry->data_offset = le32_to_cpu(src->DataOffset);
|
||||
entry->data_len = le32_to_cpu(src->RawDataSize);
|
||||
entry->address = le64_to_cpu(src->MemoryAddress);
|
||||
entry->size = le64_to_cpu(src->MemoryDataSize);
|
||||
entry->type = le32_to_cpu(src->Type);
|
||||
entry->attributes = le32_to_cpu(src->Attributes);
|
||||
|
||||
/* sanity check */
|
||||
if (entry->size < entry->data_len) {
|
||||
error_report("Broken metadata RawDataSize 0x%x MemoryDataSize 0x%lx",
|
||||
entry->data_len, entry->size);
|
||||
return -1;
|
||||
}
|
||||
if (!QEMU_IS_ALIGNED(entry->address, TARGET_PAGE_SIZE)) {
|
||||
error_report("MemoryAddress 0x%lx not page aligned", entry->address);
|
||||
return -1;
|
||||
}
|
||||
if (!QEMU_IS_ALIGNED(entry->size, TARGET_PAGE_SIZE)) {
|
||||
error_report("MemoryDataSize 0x%lx not page aligned", entry->size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (entry->type) {
|
||||
case TDVF_SECTION_TYPE_BFV:
|
||||
case TDVF_SECTION_TYPE_CFV:
|
||||
/* The sections that must be copied from firmware image to TD memory */
|
||||
if (entry->data_len == 0) {
|
||||
error_report("%d section with RawDataSize == 0", entry->type);
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case TDVF_SECTION_TYPE_TD_HOB:
|
||||
case TDVF_SECTION_TYPE_TEMP_MEM:
|
||||
/* The sections that no need to be copied from firmware image */
|
||||
if (entry->data_len != 0) {
|
||||
error_report("%d section with RawDataSize 0x%x != 0",
|
||||
entry->type, entry->data_len);
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
error_report("TDVF contains unsupported section type %d", entry->type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tdvf_parse_metadata(TdxFirmware *fw, void *flash_ptr, int size)
|
||||
{
|
||||
TdvfSectionEntry *sections;
|
||||
TdvfMetadata *metadata;
|
||||
ssize_t entries_size;
|
||||
uint32_t len, i;
|
||||
|
||||
metadata = tdvf_get_metadata(flash_ptr, size);
|
||||
if (!metadata) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
//load and parse metadata entries
|
||||
fw->nr_entries = le32_to_cpu(metadata->NumberOfSectionEntries);
|
||||
if (fw->nr_entries < 2) {
|
||||
error_report("Invalid number of fw entries (%u) in TDVF", fw->nr_entries);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
len = le32_to_cpu(metadata->Length);
|
||||
entries_size = fw->nr_entries * sizeof(TdvfSectionEntry);
|
||||
if (len != sizeof(*metadata) + entries_size) {
|
||||
error_report("TDVF metadata len (0x%x) mismatch, expected (0x%x)",
|
||||
len, (uint32_t)(sizeof(*metadata) + entries_size));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fw->entries = g_new(TdxFirmwareEntry, fw->nr_entries);
|
||||
sections = g_new(TdvfSectionEntry, fw->nr_entries);
|
||||
|
||||
if (!memcpy(sections, (void *)metadata + sizeof(*metadata), entries_size)) {
|
||||
error_report("Failed to read TDVF section entries");
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < fw->nr_entries; i++) {
|
||||
if (tdvf_parse_and_check_section_entry(§ions[i], &fw->entries[i])) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
g_free(sections);
|
||||
|
||||
fw->mem_ptr = flash_ptr;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
g_free(sections);
|
||||
fw->entries = 0;
|
||||
g_free(fw->entries);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -47,6 +47,7 @@
|
||||
#include "hw/intc/i8259.h"
|
||||
#include "hw/rtc/mc146818rtc.h"
|
||||
#include "target/i386/sev.h"
|
||||
#include "kvm/tdx.h"
|
||||
|
||||
#include "hw/acpi/cpu_hotplug.h"
|
||||
#include "hw/irq.h"
|
||||
@@ -1145,9 +1146,17 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware,
|
||||
(bios_size % 65536) != 0) {
|
||||
goto bios_error;
|
||||
}
|
||||
|
||||
bios = g_malloc(sizeof(*bios));
|
||||
memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal);
|
||||
if (sev_enabled()) {
|
||||
if (is_tdx_vm()) {
|
||||
memory_region_init_ram_guest_memfd(bios, NULL, "pc.bios", bios_size,
|
||||
&error_fatal);
|
||||
tdx_set_tdvf_region(bios);
|
||||
} else {
|
||||
memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal);
|
||||
}
|
||||
|
||||
if (sev_enabled() || is_tdx_vm()) {
|
||||
/*
|
||||
* The concept of a "reset" simply doesn't exist for
|
||||
* confidential computing guests, we have to destroy and
|
||||
@@ -1169,17 +1178,20 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware,
|
||||
}
|
||||
g_free(filename);
|
||||
|
||||
/* map the last 128KB of the BIOS in ISA space */
|
||||
isa_bios_size = MIN(bios_size, 128 * KiB);
|
||||
isa_bios = g_malloc(sizeof(*isa_bios));
|
||||
memory_region_init_alias(isa_bios, NULL, "isa-bios", bios,
|
||||
bios_size - isa_bios_size, isa_bios_size);
|
||||
memory_region_add_subregion_overlap(rom_memory,
|
||||
0x100000 - isa_bios_size,
|
||||
isa_bios,
|
||||
1);
|
||||
if (!isapc_ram_fw) {
|
||||
memory_region_set_readonly(isa_bios, true);
|
||||
/* For TDX, alias different GPAs to same private memory is not supported */
|
||||
if (!is_tdx_vm()) {
|
||||
/* map the last 128KB of the BIOS in ISA space */
|
||||
isa_bios_size = MIN(bios_size, 128 * KiB);
|
||||
isa_bios = g_malloc(sizeof(*isa_bios));
|
||||
memory_region_init_alias(isa_bios, NULL, "isa-bios", bios,
|
||||
bios_size - isa_bios_size, isa_bios_size);
|
||||
memory_region_add_subregion_overlap(rom_memory,
|
||||
0x100000 - isa_bios_size,
|
||||
isa_bios,
|
||||
1);
|
||||
if (!isapc_ram_fw) {
|
||||
memory_region_set_readonly(isa_bios, true);
|
||||
}
|
||||
}
|
||||
|
||||
/* map all the bios at the top of memory */
|
||||
@@ -1377,6 +1389,17 @@ static void machine_set_sgx_epc(Object *obj, Visitor *v, const char *name,
|
||||
qapi_free_SgxEPCList(list);
|
||||
}
|
||||
|
||||
static int x86_kvm_type(MachineState *ms, const char *vm_type)
|
||||
{
|
||||
X86MachineState *x86ms = X86_MACHINE(ms);
|
||||
int kvm_type;
|
||||
|
||||
kvm_type = kvm_get_vm_type(ms, vm_type);
|
||||
x86ms->vm_type = kvm_type;
|
||||
|
||||
return kvm_type;
|
||||
}
|
||||
|
||||
static void x86_machine_initfn(Object *obj)
|
||||
{
|
||||
X86MachineState *x86ms = X86_MACHINE(obj);
|
||||
@@ -1390,6 +1413,7 @@ static void x86_machine_initfn(Object *obj)
|
||||
x86ms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
|
||||
x86ms->bus_lock_ratelimit = 0;
|
||||
x86ms->above_4g_mem_start = 4 * GiB;
|
||||
x86ms->eoi_intercept_unsupported = false;
|
||||
}
|
||||
|
||||
static void x86_machine_class_init(ObjectClass *oc, void *data)
|
||||
@@ -1401,6 +1425,7 @@ static void x86_machine_class_init(ObjectClass *oc, void *data)
|
||||
mc->cpu_index_to_instance_props = x86_cpu_index_to_props;
|
||||
mc->get_default_cpu_node_id = x86_get_default_cpu_node_id;
|
||||
mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids;
|
||||
mc->kvm_type = x86_kvm_type;
|
||||
x86mc->save_tsc_khz = true;
|
||||
x86mc->fwcfg_dma_enabled = true;
|
||||
nc->nmi_monitor_handler = x86_nmi;
|
||||
|
||||
@@ -46,6 +46,7 @@ config LOONGSON3V
|
||||
select PCI_EXPRESS_GENERIC_BRIDGE
|
||||
select MSI_NONBROKEN
|
||||
select FW_CFG_MIPS
|
||||
select UNIMP
|
||||
|
||||
config MIPS_CPS
|
||||
bool
|
||||
|
||||
15
hw/net/igb.c
15
hw/net/igb.c
@@ -78,6 +78,7 @@ struct IGBState {
|
||||
uint32_t ioaddr;
|
||||
|
||||
IGBCore core;
|
||||
bool has_flr;
|
||||
};
|
||||
|
||||
#define IGB_CAP_SRIOV_OFFSET (0x160)
|
||||
@@ -101,6 +102,9 @@ static void igb_write_config(PCIDevice *dev, uint32_t addr,
|
||||
|
||||
trace_igb_write_config(addr, val, len);
|
||||
pci_default_write_config(dev, addr, val, len);
|
||||
if (s->has_flr) {
|
||||
pcie_cap_flr_write_config(dev, addr, val, len);
|
||||
}
|
||||
|
||||
if (range_covers_byte(addr, len, PCI_COMMAND) &&
|
||||
(dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
|
||||
@@ -122,6 +126,12 @@ igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
|
||||
igb_core_write(&s->core, addr, val, size);
|
||||
}
|
||||
|
||||
void igb_vf_reset(void *opaque, uint16_t vfn)
|
||||
{
|
||||
IGBState *s = opaque;
|
||||
igb_core_vf_reset(&s->core, vfn);
|
||||
}
|
||||
|
||||
static bool
|
||||
igb_io_get_reg_index(IGBState *s, uint32_t *idx)
|
||||
{
|
||||
@@ -427,6 +437,10 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp)
|
||||
}
|
||||
|
||||
/* PCIe extended capabilities (in order) */
|
||||
if (s->has_flr) {
|
||||
pcie_cap_flr_init(pci_dev);
|
||||
}
|
||||
|
||||
if (pcie_aer_init(pci_dev, 1, 0x100, 0x40, errp) < 0) {
|
||||
hw_error("Failed to initialize AER capability");
|
||||
}
|
||||
@@ -582,6 +596,7 @@ static const VMStateDescription igb_vmstate = {
|
||||
|
||||
static Property igb_properties[] = {
|
||||
DEFINE_NIC_PROPERTIES(IGBState, conf),
|
||||
DEFINE_PROP_BOOL("x-pcie-flr-init", IGBState, has_flr, true),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
||||
@@ -152,5 +152,6 @@ enum {
|
||||
|
||||
uint64_t igb_mmio_read(void *opaque, hwaddr addr, unsigned size);
|
||||
void igb_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size);
|
||||
void igb_vf_reset(void *opaque, uint16_t vfn);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -2477,11 +2477,13 @@ static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
|
||||
}
|
||||
}
|
||||
|
||||
static void igb_vf_reset(IGBCore *core, uint16_t vfn)
|
||||
void igb_core_vf_reset(IGBCore *core, uint16_t vfn)
|
||||
{
|
||||
uint16_t qn0 = vfn;
|
||||
uint16_t qn1 = vfn + IGB_NUM_VM_POOLS;
|
||||
|
||||
trace_igb_core_vf_reset(vfn);
|
||||
|
||||
/* disable Rx and Tx for the VF*/
|
||||
core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
|
||||
core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
|
||||
@@ -2560,7 +2562,7 @@ static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val)
|
||||
|
||||
if (val & E1000_CTRL_RST) {
|
||||
vfn = (index - PVTCTRL0) / 0x40;
|
||||
igb_vf_reset(core, vfn);
|
||||
igb_core_vf_reset(core, vfn);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -130,6 +130,9 @@ igb_core_set_link_status(IGBCore *core);
|
||||
void
|
||||
igb_core_pci_uninit(IGBCore *core);
|
||||
|
||||
void
|
||||
igb_core_vf_reset(IGBCore *core, uint16_t vfn);
|
||||
|
||||
bool
|
||||
igb_can_receive(IGBCore *core);
|
||||
|
||||
|
||||
@@ -204,6 +204,10 @@ static void igbvf_write_config(PCIDevice *dev, uint32_t addr, uint32_t val,
|
||||
{
|
||||
trace_igbvf_write_config(addr, val, len);
|
||||
pci_default_write_config(dev, addr, val, len);
|
||||
if (object_property_get_bool(OBJECT(pcie_sriov_get_pf(dev)),
|
||||
"x-pcie-flr-init", &error_abort)) {
|
||||
pcie_cap_flr_write_config(dev, addr, val, len);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t igbvf_mmio_read(void *opaque, hwaddr addr, unsigned size)
|
||||
@@ -266,6 +270,11 @@ static void igbvf_pci_realize(PCIDevice *dev, Error **errp)
|
||||
hw_error("Failed to initialize PCIe capability");
|
||||
}
|
||||
|
||||
if (object_property_get_bool(OBJECT(pcie_sriov_get_pf(dev)),
|
||||
"x-pcie-flr-init", &error_abort)) {
|
||||
pcie_cap_flr_init(dev);
|
||||
}
|
||||
|
||||
if (pcie_aer_init(dev, 1, 0x100, 0x40, errp) < 0) {
|
||||
hw_error("Failed to initialize AER capability");
|
||||
}
|
||||
@@ -273,6 +282,13 @@ static void igbvf_pci_realize(PCIDevice *dev, Error **errp)
|
||||
pcie_ari_init(dev, 0x150);
|
||||
}
|
||||
|
||||
static void igbvf_qdev_reset_hold(Object *obj)
|
||||
{
|
||||
PCIDevice *vf = PCI_DEVICE(obj);
|
||||
|
||||
igb_vf_reset(pcie_sriov_get_pf(vf), pcie_sriov_vf_number(vf));
|
||||
}
|
||||
|
||||
static void igbvf_pci_uninit(PCIDevice *dev)
|
||||
{
|
||||
IgbVfState *s = IGBVF(dev);
|
||||
@@ -287,6 +303,7 @@ static void igbvf_class_init(ObjectClass *class, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(class);
|
||||
PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
|
||||
ResettableClass *rc = RESETTABLE_CLASS(class);
|
||||
|
||||
c->realize = igbvf_pci_realize;
|
||||
c->exit = igbvf_pci_uninit;
|
||||
@@ -295,6 +312,8 @@ static void igbvf_class_init(ObjectClass *class, void *data)
|
||||
c->revision = 1;
|
||||
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
|
||||
|
||||
rc->phases.hold = igbvf_qdev_reset_hold;
|
||||
|
||||
dc->desc = "Intel 82576 Virtual Function";
|
||||
dc->user_creatable = false;
|
||||
|
||||
|
||||
@@ -274,6 +274,7 @@ igb_core_mdic_read(uint32_t addr, uint32_t data) "MDIC READ: PHY[%u] = 0x%x"
|
||||
igb_core_mdic_read_unhandled(uint32_t addr) "MDIC READ: PHY[%u] UNHANDLED"
|
||||
igb_core_mdic_write(uint32_t addr, uint32_t data) "MDIC WRITE: PHY[%u] = 0x%x"
|
||||
igb_core_mdic_write_unhandled(uint32_t addr) "MDIC WRITE: PHY[%u] UNHANDLED"
|
||||
igb_core_vf_reset(uint16_t vfn) "VF%d"
|
||||
|
||||
igb_link_set_ext_params(bool asd_check, bool speed_select_bypass, bool pfrstd) "Set extended link params: ASD check: %d, Speed select bypass: %d, PF reset done: %d"
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "hw/pci-host/astro.h"
|
||||
#include "hw/hppa/hppa_hardware.h"
|
||||
#include "migration/vmstate.h"
|
||||
#include "target/hppa/cpu.h"
|
||||
#include "trace.h"
|
||||
#include "qom/object.h"
|
||||
|
||||
@@ -268,22 +269,6 @@ static const MemoryRegionOps elroy_config_addr_ops = {
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* A subroutine of astro_translate_iommu that builds an IOMMUTLBEntry using the
|
||||
* given translated address and mask.
|
||||
*/
|
||||
static bool make_iommu_tlbe(hwaddr addr, hwaddr taddr, hwaddr mask,
|
||||
IOMMUTLBEntry *ret)
|
||||
{
|
||||
hwaddr tce_mask = ~((1ull << 12) - 1);
|
||||
ret->target_as = &address_space_memory;
|
||||
ret->iova = addr & tce_mask;
|
||||
ret->translated_addr = taddr & tce_mask;
|
||||
ret->addr_mask = ~tce_mask;
|
||||
ret->perm = IOMMU_RW;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Handle PCI-to-system address translation. */
|
||||
static IOMMUTLBEntry astro_translate_iommu(IOMMUMemoryRegion *iommu,
|
||||
hwaddr addr,
|
||||
@@ -291,53 +276,59 @@ static IOMMUTLBEntry astro_translate_iommu(IOMMUMemoryRegion *iommu,
|
||||
int iommu_idx)
|
||||
{
|
||||
AstroState *s = container_of(iommu, AstroState, iommu);
|
||||
IOMMUTLBEntry ret = {
|
||||
.target_as = &address_space_memory,
|
||||
.iova = addr,
|
||||
.translated_addr = 0,
|
||||
.addr_mask = ~(hwaddr)0,
|
||||
.perm = IOMMU_NONE,
|
||||
};
|
||||
hwaddr pdir_ptr, index, a, ibase;
|
||||
hwaddr pdir_ptr, index, ibase;
|
||||
hwaddr addr_mask = 0xfff; /* 4k translation */
|
||||
uint64_t entry;
|
||||
|
||||
#define IOVP_SHIFT 12 /* equals PAGE_SHIFT */
|
||||
#define PDIR_INDEX(iovp) ((iovp) >> IOVP_SHIFT)
|
||||
#define IOVP_MASK PAGE_MASK
|
||||
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
|
||||
|
||||
addr &= ~addr_mask;
|
||||
|
||||
/*
|
||||
* Default translation: "32-bit PCI Addressing on 40-bit Runway".
|
||||
* For addresses in the 32-bit memory address range ... and then
|
||||
* language which not-coincidentally matches the PSW.W=0 mapping.
|
||||
*/
|
||||
if (addr <= UINT32_MAX) {
|
||||
entry = hppa_abs_to_phys_pa2_w0(addr);
|
||||
} else {
|
||||
entry = addr;
|
||||
}
|
||||
|
||||
/* "range enable" flag cleared? */
|
||||
if ((s->tlb_ibase & 1) == 0) {
|
||||
make_iommu_tlbe(addr, addr, addr_mask, &ret);
|
||||
return ret;
|
||||
goto skip;
|
||||
}
|
||||
|
||||
a = addr;
|
||||
ibase = s->tlb_ibase & ~1ULL;
|
||||
if ((a & s->tlb_imask) != ibase) {
|
||||
if ((addr & s->tlb_imask) != ibase) {
|
||||
/* do not translate this one! */
|
||||
make_iommu_tlbe(addr, addr, addr_mask, &ret);
|
||||
return ret;
|
||||
goto skip;
|
||||
}
|
||||
index = PDIR_INDEX(a);
|
||||
|
||||
index = PDIR_INDEX(addr);
|
||||
pdir_ptr = s->tlb_pdir_base + index * sizeof(entry);
|
||||
entry = ldq_le_phys(&address_space_memory, pdir_ptr);
|
||||
|
||||
if (!(entry & SBA_PDIR_VALID_BIT)) { /* I/O PDIR entry valid ? */
|
||||
g_assert_not_reached();
|
||||
goto failure;
|
||||
/* failure */
|
||||
return (IOMMUTLBEntry) { .perm = IOMMU_NONE };
|
||||
}
|
||||
|
||||
entry &= ~SBA_PDIR_VALID_BIT;
|
||||
entry >>= IOVP_SHIFT;
|
||||
entry <<= 12;
|
||||
entry |= addr & 0xfff;
|
||||
make_iommu_tlbe(addr, entry, addr_mask, &ret);
|
||||
goto success;
|
||||
|
||||
failure:
|
||||
ret = (IOMMUTLBEntry) { .perm = IOMMU_NONE };
|
||||
success:
|
||||
return ret;
|
||||
skip:
|
||||
return (IOMMUTLBEntry) {
|
||||
.target_as = &address_space_memory,
|
||||
.iova = addr,
|
||||
.translated_addr = entry,
|
||||
.addr_mask = addr_mask,
|
||||
.perm = IOMMU_RW,
|
||||
};
|
||||
}
|
||||
|
||||
static AddressSpace *elroy_pcihost_set_iommu(PCIBus *bus, void *opaque,
|
||||
|
||||
@@ -29,7 +29,7 @@ pci_ss.add(when: 'CONFIG_MV64361', if_true: files('mv64361.c'))
|
||||
pci_ss.add(when: 'CONFIG_VERSATILE_PCI', if_true: files('versatile.c'))
|
||||
|
||||
# HPPA devices
|
||||
pci_ss.add(when: 'CONFIG_ASTRO', if_true: files('astro.c'))
|
||||
specific_ss.add(when: 'CONFIG_ASTRO', if_true: files('astro.c'))
|
||||
pci_ss.add(when: 'CONFIG_DINO', if_true: files('dino.c'))
|
||||
|
||||
system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss)
|
||||
|
||||
@@ -179,6 +179,8 @@ static Property q35_host_props[] = {
|
||||
mch.below_4g_mem_size, 0),
|
||||
DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MEM_SIZE, Q35PCIHost,
|
||||
mch.above_4g_mem_size, 0),
|
||||
DEFINE_PROP_BOOL(PCI_HOST_PROP_SMM_RANGES, Q35PCIHost,
|
||||
mch.has_smm_ranges, true),
|
||||
DEFINE_PROP_BOOL("x-pci-hole64-fix", Q35PCIHost, pci_hole64_fix, true),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
@@ -214,6 +216,7 @@ static void q35_host_initfn(Object *obj)
|
||||
/* mch's object_initialize resets the default value, set it again */
|
||||
qdev_prop_set_uint64(DEVICE(s), PCI_HOST_PROP_PCI_HOLE64_SIZE,
|
||||
Q35_PCI_HOST_HOLE64_SIZE_DEFAULT);
|
||||
|
||||
object_property_add(obj, PCI_HOST_PROP_PCI_HOLE_START, "uint32",
|
||||
q35_host_get_pci_hole_start,
|
||||
NULL, NULL, NULL);
|
||||
@@ -476,6 +479,10 @@ static void mch_write_config(PCIDevice *d,
|
||||
mch_update_pciexbar(mch);
|
||||
}
|
||||
|
||||
if (!mch->has_smm_ranges) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (ranges_overlap(address, len, MCH_HOST_BRIDGE_SMRAM,
|
||||
MCH_HOST_BRIDGE_SMRAM_SIZE)) {
|
||||
mch_update_smram(mch);
|
||||
@@ -494,10 +501,13 @@ static void mch_write_config(PCIDevice *d,
|
||||
static void mch_update(MCHPCIState *mch)
|
||||
{
|
||||
mch_update_pciexbar(mch);
|
||||
|
||||
mch_update_pam(mch);
|
||||
mch_update_smram(mch);
|
||||
mch_update_ext_tseg_mbytes(mch);
|
||||
mch_update_smbase_smram(mch);
|
||||
if (mch->has_smm_ranges) {
|
||||
mch_update_smram(mch);
|
||||
mch_update_ext_tseg_mbytes(mch);
|
||||
mch_update_smbase_smram(mch);
|
||||
}
|
||||
|
||||
/*
|
||||
* pci hole goes from end-of-low-ram to io-apic.
|
||||
@@ -538,19 +548,21 @@ static void mch_reset(DeviceState *qdev)
|
||||
pci_set_quad(d->config + MCH_HOST_BRIDGE_PCIEXBAR,
|
||||
MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT);
|
||||
|
||||
d->config[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_DEFAULT;
|
||||
d->config[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_DEFAULT;
|
||||
d->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK;
|
||||
d->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK;
|
||||
if (mch->has_smm_ranges) {
|
||||
d->config[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_DEFAULT;
|
||||
d->config[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_DEFAULT;
|
||||
d->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK;
|
||||
d->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK;
|
||||
|
||||
if (mch->ext_tseg_mbytes > 0) {
|
||||
pci_set_word(d->config + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES,
|
||||
MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY);
|
||||
if (mch->ext_tseg_mbytes > 0) {
|
||||
pci_set_word(d->config + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES,
|
||||
MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY);
|
||||
}
|
||||
|
||||
d->config[MCH_HOST_BRIDGE_F_SMBASE] = 0;
|
||||
d->wmask[MCH_HOST_BRIDGE_F_SMBASE] = 0xff;
|
||||
}
|
||||
|
||||
d->config[MCH_HOST_BRIDGE_F_SMBASE] = 0;
|
||||
d->wmask[MCH_HOST_BRIDGE_F_SMBASE] = 0xff;
|
||||
|
||||
mch_update(mch);
|
||||
}
|
||||
|
||||
@@ -568,6 +580,20 @@ static void mch_realize(PCIDevice *d, Error **errp)
|
||||
/* setup pci memory mapping */
|
||||
pc_pci_as_mapping_init(mch->system_memory, mch->pci_address_space);
|
||||
|
||||
/* PAM */
|
||||
init_pam(&mch->pam_regions[0], OBJECT(mch), mch->ram_memory,
|
||||
mch->system_memory, mch->pci_address_space,
|
||||
PAM_BIOS_BASE, PAM_BIOS_SIZE);
|
||||
for (i = 0; i < ARRAY_SIZE(mch->pam_regions) - 1; ++i) {
|
||||
init_pam(&mch->pam_regions[i + 1], OBJECT(mch), mch->ram_memory,
|
||||
mch->system_memory, mch->pci_address_space,
|
||||
PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE);
|
||||
}
|
||||
|
||||
if (!mch->has_smm_ranges) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* if *disabled* show SMRAM to all CPUs */
|
||||
memory_region_init_alias(&mch->smram_region, OBJECT(mch), "smram-region",
|
||||
mch->pci_address_space, MCH_HOST_BRIDGE_SMRAM_C_BASE,
|
||||
@@ -634,15 +660,6 @@ static void mch_realize(PCIDevice *d, Error **errp)
|
||||
|
||||
object_property_add_const_link(qdev_get_machine(), "smram",
|
||||
OBJECT(&mch->smram));
|
||||
|
||||
init_pam(&mch->pam_regions[0], OBJECT(mch), mch->ram_memory,
|
||||
mch->system_memory, mch->pci_address_space,
|
||||
PAM_BIOS_BASE, PAM_BIOS_SIZE);
|
||||
for (i = 0; i < ARRAY_SIZE(mch->pam_regions) - 1; ++i) {
|
||||
init_pam(&mch->pam_regions[i + 1], OBJECT(mch), mch->ram_memory,
|
||||
mch->system_memory, mch->pci_address_space,
|
||||
PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t mch_mcfg_base(void)
|
||||
|
||||
@@ -66,6 +66,10 @@ S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s,
|
||||
|
||||
assert(vpdev);
|
||||
|
||||
if (!vpdev->vbasedev.group) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
id = vpdev->vbasedev.group->container->fd;
|
||||
|
||||
if (!s390_pci_update_dma_avail(id, &avail)) {
|
||||
@@ -132,7 +136,7 @@ static void s390_pci_read_base(S390PCIBusDevice *pbdev,
|
||||
* to the guest based upon the vfio DMA limit.
|
||||
*/
|
||||
vfio_size = pbdev->iommu->max_dma_limit << TARGET_PAGE_BITS;
|
||||
if (vfio_size < (cap->end_dma - cap->start_dma + 1)) {
|
||||
if (vfio_size > 0 && vfio_size < cap->end_dma - cap->start_dma + 1) {
|
||||
pbdev->zpci_fn.edma = cap->start_dma + vfio_size - 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -525,9 +525,7 @@ static void virtio_mem_activate_memslots_to_plug(VirtIOMEM *vmem,
|
||||
vmem->memslot_size;
|
||||
unsigned int idx;
|
||||
|
||||
if (!vmem->dynamic_memslots) {
|
||||
return;
|
||||
}
|
||||
assert(vmem->dynamic_memslots);
|
||||
|
||||
/* Activate all involved memslots in a single transaction. */
|
||||
memory_region_transaction_begin();
|
||||
@@ -547,9 +545,7 @@ static void virtio_mem_deactivate_unplugged_memslots(VirtIOMEM *vmem,
|
||||
vmem->memslot_size;
|
||||
unsigned int idx;
|
||||
|
||||
if (!vmem->dynamic_memslots) {
|
||||
return;
|
||||
}
|
||||
assert(vmem->dynamic_memslots);
|
||||
|
||||
/* Deactivate all memslots with unplugged blocks in a single transaction. */
|
||||
memory_region_transaction_begin();
|
||||
@@ -598,7 +594,9 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa,
|
||||
virtio_mem_notify_unplug(vmem, offset, size);
|
||||
virtio_mem_set_range_unplugged(vmem, start_gpa, size);
|
||||
/* Deactivate completely unplugged memslots after updating the state. */
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, offset, size);
|
||||
if (vmem->dynamic_memslots) {
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, offset, size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -635,9 +633,11 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa,
|
||||
* blocks we are plugging here. The following notification will inform
|
||||
* registered listeners about the blocks we're plugging.
|
||||
*/
|
||||
virtio_mem_activate_memslots_to_plug(vmem, offset, size);
|
||||
if (vmem->dynamic_memslots) {
|
||||
virtio_mem_activate_memslots_to_plug(vmem, offset, size);
|
||||
}
|
||||
ret = virtio_mem_notify_plug(vmem, offset, size);
|
||||
if (ret) {
|
||||
if (ret && vmem->dynamic_memslots) {
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, offset, size);
|
||||
}
|
||||
}
|
||||
@@ -749,7 +749,9 @@ static int virtio_mem_unplug_all(VirtIOMEM *vmem)
|
||||
notifier_list_notify(&vmem->size_change_notifiers, &vmem->size);
|
||||
|
||||
/* Deactivate all memslots after updating the state. */
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, 0, region_size);
|
||||
if (vmem->dynamic_memslots) {
|
||||
virtio_mem_deactivate_unplugged_memslots(vmem, 0, region_size);
|
||||
}
|
||||
}
|
||||
|
||||
trace_virtio_mem_unplugged_all();
|
||||
|
||||
@@ -175,6 +175,8 @@ typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
|
||||
|
||||
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
|
||||
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
|
||||
int ram_block_convert_range(RAMBlock *rb, uint64_t start, size_t length,
|
||||
bool shared_to_private);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -243,6 +243,12 @@ typedef struct IOMMUTLBEvent {
|
||||
/* RAM FD is opened read-only */
|
||||
#define RAM_READONLY_FD (1 << 11)
|
||||
|
||||
/* RAM can be private that has kvm gmem backend */
|
||||
#define RAM_GUEST_MEMFD (1 << 12)
|
||||
|
||||
/* RAM is default private */
|
||||
#define RAM_DEFAULT_PRIVATE (1 << 13)
|
||||
|
||||
static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
|
||||
IOMMUNotifierFlag flags,
|
||||
hwaddr start, hwaddr end,
|
||||
@@ -844,6 +850,7 @@ struct IOMMUMemoryRegion {
|
||||
#define MEMORY_LISTENER_PRIORITY_MIN 0
|
||||
#define MEMORY_LISTENER_PRIORITY_ACCEL 10
|
||||
#define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
|
||||
#define MEMORY_LISTENER_PRIORITY_ACCEL_HIGH 20
|
||||
|
||||
/**
|
||||
* struct MemoryListener: callbacks structure for updates to the physical memory map
|
||||
@@ -1583,6 +1590,12 @@ void memory_region_init_ram(MemoryRegion *mr,
|
||||
uint64_t size,
|
||||
Error **errp);
|
||||
|
||||
void memory_region_init_ram_guest_memfd(MemoryRegion *mr,
|
||||
Object *owner,
|
||||
const char *name,
|
||||
uint64_t size,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* memory_region_init_rom: Initialize a ROM memory region.
|
||||
*
|
||||
@@ -1702,6 +1715,19 @@ static inline bool memory_region_is_romd(MemoryRegion *mr)
|
||||
*/
|
||||
bool memory_region_is_protected(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_has_guest_memfd: check whether a memory region has guest_memfd
|
||||
* associated
|
||||
*
|
||||
* Returns %true if a memory region's ram_block has valid guest_memfd assigned.
|
||||
*
|
||||
* @mr: the memory region being queried
|
||||
*/
|
||||
bool memory_region_has_guest_memfd(MemoryRegion *mr);
|
||||
|
||||
void memory_region_set_default_private(MemoryRegion *mr);
|
||||
bool memory_region_is_default_private(MemoryRegion *mr);
|
||||
|
||||
/**
|
||||
* memory_region_get_iommu: check whether a memory region is an iommu
|
||||
*
|
||||
|
||||
@@ -41,6 +41,7 @@ struct RAMBlock {
|
||||
QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
|
||||
int fd;
|
||||
uint64_t fd_offset;
|
||||
int guest_memfd;
|
||||
size_t page_size;
|
||||
/* dirty bitmap used during migration */
|
||||
unsigned long *bmap;
|
||||
|
||||
@@ -30,6 +30,7 @@ bool machine_usb(MachineState *machine);
|
||||
int machine_phandle_start(MachineState *machine);
|
||||
bool machine_dump_guest_core(MachineState *machine);
|
||||
bool machine_mem_merge(MachineState *machine);
|
||||
bool machine_require_guest_memfd(MachineState *machine);
|
||||
HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine);
|
||||
void machine_set_cpu_numa_node(MachineState *machine,
|
||||
const CpuInstanceProperties *props,
|
||||
@@ -364,6 +365,7 @@ struct MachineState {
|
||||
char *dt_compatible;
|
||||
bool dump_guest_core;
|
||||
bool mem_merge;
|
||||
bool require_guest_memfd;
|
||||
bool usb;
|
||||
bool usb_disabled;
|
||||
char *firmware;
|
||||
|
||||
@@ -165,6 +165,7 @@ void pc_guest_info_init(PCMachineState *pcms);
|
||||
#define PCI_HOST_PROP_PCI_HOLE64_SIZE "pci-hole64-size"
|
||||
#define PCI_HOST_BELOW_4G_MEM_SIZE "below-4g-mem-size"
|
||||
#define PCI_HOST_ABOVE_4G_MEM_SIZE "above-4g-mem-size"
|
||||
#define PCI_HOST_PROP_SMM_RANGES "smm-ranges"
|
||||
|
||||
|
||||
void pc_pci_as_mapping_init(MemoryRegion *system_memory,
|
||||
@@ -309,15 +310,12 @@ extern const size_t pc_compat_1_5_len;
|
||||
extern GlobalProperty pc_compat_1_4[];
|
||||
extern const size_t pc_compat_1_4_len;
|
||||
|
||||
int pc_machine_kvm_type(MachineState *machine, const char *vm_type);
|
||||
|
||||
#define DEFINE_PC_MACHINE(suffix, namestr, initfn, optsfn) \
|
||||
static void pc_machine_##suffix##_class_init(ObjectClass *oc, void *data) \
|
||||
{ \
|
||||
MachineClass *mc = MACHINE_CLASS(oc); \
|
||||
optsfn(mc); \
|
||||
mc->init = initfn; \
|
||||
mc->kvm_type = pc_machine_kvm_type; \
|
||||
} \
|
||||
static const TypeInfo pc_machine_type_##suffix = { \
|
||||
.name = namestr TYPE_MACHINE_SUFFIX, \
|
||||
|
||||
58
include/hw/i386/tdvf.h
Normal file
58
include/hw/i386/tdvf.h
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
* Copyright (c) 2020 Intel Corporation
|
||||
* Author: Isaku Yamahata <isaku.yamahata at gmail.com>
|
||||
* <isaku.yamahata at intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef HW_I386_TDVF_H
|
||||
#define HW_I386_TDVF_H
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#define TDVF_SECTION_TYPE_BFV 0
|
||||
#define TDVF_SECTION_TYPE_CFV 1
|
||||
#define TDVF_SECTION_TYPE_TD_HOB 2
|
||||
#define TDVF_SECTION_TYPE_TEMP_MEM 3
|
||||
|
||||
#define TDVF_SECTION_ATTRIBUTES_MR_EXTEND (1U << 0)
|
||||
#define TDVF_SECTION_ATTRIBUTES_PAGE_AUG (1U << 1)
|
||||
|
||||
typedef struct TdxFirmwareEntry {
|
||||
uint32_t data_offset;
|
||||
uint32_t data_len;
|
||||
uint64_t address;
|
||||
uint64_t size;
|
||||
uint32_t type;
|
||||
uint32_t attributes;
|
||||
|
||||
void *mem_ptr;
|
||||
} TdxFirmwareEntry;
|
||||
|
||||
typedef struct TdxFirmware {
|
||||
void *mem_ptr;
|
||||
|
||||
uint32_t nr_entries;
|
||||
TdxFirmwareEntry *entries;
|
||||
} TdxFirmware;
|
||||
|
||||
#define for_each_tdx_fw_entry(fw, e) \
|
||||
for (e = (fw)->entries; e != (fw)->entries + (fw)->nr_entries; e++)
|
||||
|
||||
int tdvf_parse_metadata(TdxFirmware *fw, void *flash_ptr, int size);
|
||||
|
||||
#endif /* HW_I386_TDVF_H */
|
||||
@@ -41,6 +41,7 @@ struct X86MachineState {
|
||||
MachineState parent;
|
||||
|
||||
/*< public >*/
|
||||
unsigned int vm_type;
|
||||
|
||||
/* Pointers to devices and objects: */
|
||||
ISADevice *rtc;
|
||||
@@ -58,6 +59,7 @@ struct X86MachineState {
|
||||
|
||||
/* CPU and apic information: */
|
||||
bool apic_xrupt_override;
|
||||
bool eoi_intercept_unsupported;
|
||||
unsigned pci_irq_mask;
|
||||
unsigned apic_id_limit;
|
||||
uint16_t boot_cpus;
|
||||
|
||||
@@ -50,6 +50,7 @@ struct MCHPCIState {
|
||||
MemoryRegion tseg_blackhole, tseg_window;
|
||||
MemoryRegion smbase_blackhole, smbase_window;
|
||||
bool has_smram_at_smbase;
|
||||
bool has_smm_ranges;
|
||||
Range pci_hole;
|
||||
uint64_t below_4g_mem_size;
|
||||
uint64_t above_4g_mem_size;
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
*/
|
||||
#if defined(CONFIG_INT128) && !defined(CONFIG_TCG_INTERPRETER)
|
||||
typedef __int128_t Int128;
|
||||
typedef __int128_t __attribute__((aligned(16))) Int128Aligned;
|
||||
|
||||
static inline Int128 int128_make64(uint64_t a)
|
||||
{
|
||||
@@ -224,6 +225,7 @@ static inline Int128 int128_rems(Int128 a, Int128 b)
|
||||
#else /* !CONFIG_INT128 */
|
||||
|
||||
typedef struct Int128 Int128;
|
||||
typedef struct Int128 __attribute__((aligned(16))) Int128Aligned;
|
||||
|
||||
/*
|
||||
* We guarantee that the in-memory byte representation of an
|
||||
|
||||
198
include/standard-headers/uefi/uefi.h
Normal file
198
include/standard-headers/uefi/uefi.h
Normal file
@@ -0,0 +1,198 @@
|
||||
/*
|
||||
* Copyright (C) 2020 Intel Corporation
|
||||
*
|
||||
* Author: Isaku Yamahata <isaku.yamahata at gmail.com>
|
||||
* <isaku.yamahata at intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef HW_I386_UEFI_H
|
||||
#define HW_I386_UEFI_H
|
||||
|
||||
/***************************************************************************/
|
||||
/*
|
||||
* basic EFI definitions
|
||||
* supplemented with UEFI Specification Version 2.8 (Errata A)
|
||||
* released February 2020
|
||||
*/
|
||||
/* UEFI integer is little endian */
|
||||
|
||||
typedef struct {
|
||||
uint32_t Data1;
|
||||
uint16_t Data2;
|
||||
uint16_t Data3;
|
||||
uint8_t Data4[8];
|
||||
} EFI_GUID;
|
||||
|
||||
typedef enum {
|
||||
EfiReservedMemoryType,
|
||||
EfiLoaderCode,
|
||||
EfiLoaderData,
|
||||
EfiBootServicesCode,
|
||||
EfiBootServicesData,
|
||||
EfiRuntimeServicesCode,
|
||||
EfiRuntimeServicesData,
|
||||
EfiConventionalMemory,
|
||||
EfiUnusableMemory,
|
||||
EfiACPIReclaimMemory,
|
||||
EfiACPIMemoryNVS,
|
||||
EfiMemoryMappedIO,
|
||||
EfiMemoryMappedIOPortSpace,
|
||||
EfiPalCode,
|
||||
EfiPersistentMemory,
|
||||
EfiUnacceptedMemoryType,
|
||||
EfiMaxMemoryType
|
||||
} EFI_MEMORY_TYPE;
|
||||
|
||||
#define EFI_HOB_HANDOFF_TABLE_VERSION 0x0009
|
||||
|
||||
#define EFI_HOB_TYPE_HANDOFF 0x0001
|
||||
#define EFI_HOB_TYPE_MEMORY_ALLOCATION 0x0002
|
||||
#define EFI_HOB_TYPE_RESOURCE_DESCRIPTOR 0x0003
|
||||
#define EFI_HOB_TYPE_GUID_EXTENSION 0x0004
|
||||
#define EFI_HOB_TYPE_FV 0x0005
|
||||
#define EFI_HOB_TYPE_CPU 0x0006
|
||||
#define EFI_HOB_TYPE_MEMORY_POOL 0x0007
|
||||
#define EFI_HOB_TYPE_FV2 0x0009
|
||||
#define EFI_HOB_TYPE_LOAD_PEIM_UNUSED 0x000A
|
||||
#define EFI_HOB_TYPE_UEFI_CAPSULE 0x000B
|
||||
#define EFI_HOB_TYPE_FV3 0x000C
|
||||
#define EFI_HOB_TYPE_UNUSED 0xFFFE
|
||||
#define EFI_HOB_TYPE_END_OF_HOB_LIST 0xFFFF
|
||||
|
||||
typedef struct {
|
||||
uint16_t HobType;
|
||||
uint16_t HobLength;
|
||||
uint32_t Reserved;
|
||||
} EFI_HOB_GENERIC_HEADER;
|
||||
|
||||
typedef uint64_t EFI_PHYSICAL_ADDRESS;
|
||||
typedef uint32_t EFI_BOOT_MODE;
|
||||
|
||||
typedef struct {
|
||||
EFI_HOB_GENERIC_HEADER Header;
|
||||
uint32_t Version;
|
||||
EFI_BOOT_MODE BootMode;
|
||||
EFI_PHYSICAL_ADDRESS EfiMemoryTop;
|
||||
EFI_PHYSICAL_ADDRESS EfiMemoryBottom;
|
||||
EFI_PHYSICAL_ADDRESS EfiFreeMemoryTop;
|
||||
EFI_PHYSICAL_ADDRESS EfiFreeMemoryBottom;
|
||||
EFI_PHYSICAL_ADDRESS EfiEndOfHobList;
|
||||
} EFI_HOB_HANDOFF_INFO_TABLE;
|
||||
|
||||
#define EFI_RESOURCE_SYSTEM_MEMORY 0x00000000
|
||||
#define EFI_RESOURCE_MEMORY_MAPPED_IO 0x00000001
|
||||
#define EFI_RESOURCE_IO 0x00000002
|
||||
#define EFI_RESOURCE_FIRMWARE_DEVICE 0x00000003
|
||||
#define EFI_RESOURCE_MEMORY_MAPPED_IO_PORT 0x00000004
|
||||
#define EFI_RESOURCE_MEMORY_RESERVED 0x00000005
|
||||
#define EFI_RESOURCE_IO_RESERVED 0x00000006
|
||||
#define EFI_RESOURCE_MEMORY_UNACCEPTED 0x00000007
|
||||
#define EFI_RESOURCE_MAX_MEMORY_TYPE 0x00000008
|
||||
|
||||
#define EFI_RESOURCE_ATTRIBUTE_PRESENT 0x00000001
|
||||
#define EFI_RESOURCE_ATTRIBUTE_INITIALIZED 0x00000002
|
||||
#define EFI_RESOURCE_ATTRIBUTE_TESTED 0x00000004
|
||||
#define EFI_RESOURCE_ATTRIBUTE_SINGLE_BIT_ECC 0x00000008
|
||||
#define EFI_RESOURCE_ATTRIBUTE_MULTIPLE_BIT_ECC 0x00000010
|
||||
#define EFI_RESOURCE_ATTRIBUTE_ECC_RESERVED_1 0x00000020
|
||||
#define EFI_RESOURCE_ATTRIBUTE_ECC_RESERVED_2 0x00000040
|
||||
#define EFI_RESOURCE_ATTRIBUTE_READ_PROTECTED 0x00000080
|
||||
#define EFI_RESOURCE_ATTRIBUTE_WRITE_PROTECTED 0x00000100
|
||||
#define EFI_RESOURCE_ATTRIBUTE_EXECUTION_PROTECTED 0x00000200
|
||||
#define EFI_RESOURCE_ATTRIBUTE_UNCACHEABLE 0x00000400
|
||||
#define EFI_RESOURCE_ATTRIBUTE_WRITE_COMBINEABLE 0x00000800
|
||||
#define EFI_RESOURCE_ATTRIBUTE_WRITE_THROUGH_CACHEABLE 0x00001000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_WRITE_BACK_CACHEABLE 0x00002000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_16_BIT_IO 0x00004000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_32_BIT_IO 0x00008000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_64_BIT_IO 0x00010000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_UNCACHED_EXPORTED 0x00020000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_READ_ONLY_PROTECTED 0x00040000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_READ_ONLY_PROTECTABLE 0x00080000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_READ_PROTECTABLE 0x00100000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_WRITE_PROTECTABLE 0x00200000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_EXECUTION_PROTECTABLE 0x00400000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_PERSISTENT 0x00800000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_PERSISTABLE 0x01000000
|
||||
#define EFI_RESOURCE_ATTRIBUTE_MORE_RELIABLE 0x02000000
|
||||
|
||||
typedef uint32_t EFI_RESOURCE_TYPE;
|
||||
typedef uint32_t EFI_RESOURCE_ATTRIBUTE_TYPE;
|
||||
|
||||
typedef struct {
|
||||
EFI_HOB_GENERIC_HEADER Header;
|
||||
EFI_GUID Owner;
|
||||
EFI_RESOURCE_TYPE ResourceType;
|
||||
EFI_RESOURCE_ATTRIBUTE_TYPE ResourceAttribute;
|
||||
EFI_PHYSICAL_ADDRESS PhysicalStart;
|
||||
uint64_t ResourceLength;
|
||||
} EFI_HOB_RESOURCE_DESCRIPTOR;
|
||||
|
||||
typedef struct {
|
||||
EFI_HOB_GENERIC_HEADER Header;
|
||||
EFI_GUID Name;
|
||||
|
||||
/* guid specific data follows */
|
||||
} EFI_HOB_GUID_TYPE;
|
||||
|
||||
typedef struct {
|
||||
EFI_HOB_GENERIC_HEADER Header;
|
||||
EFI_PHYSICAL_ADDRESS BaseAddress;
|
||||
uint64_t Length;
|
||||
} EFI_HOB_FIRMWARE_VOLUME;
|
||||
|
||||
typedef struct {
|
||||
EFI_HOB_GENERIC_HEADER Header;
|
||||
EFI_PHYSICAL_ADDRESS BaseAddress;
|
||||
uint64_t Length;
|
||||
EFI_GUID FvName;
|
||||
EFI_GUID FileName;
|
||||
} EFI_HOB_FIRMWARE_VOLUME2;
|
||||
|
||||
typedef struct {
|
||||
EFI_HOB_GENERIC_HEADER Header;
|
||||
EFI_PHYSICAL_ADDRESS BaseAddress;
|
||||
uint64_t Length;
|
||||
uint32_t AuthenticationStatus;
|
||||
bool ExtractedFv;
|
||||
EFI_GUID FvName;
|
||||
EFI_GUID FileName;
|
||||
} EFI_HOB_FIRMWARE_VOLUME3;
|
||||
|
||||
typedef struct {
|
||||
EFI_HOB_GENERIC_HEADER Header;
|
||||
uint8_t SizeOfMemorySpace;
|
||||
uint8_t SizeOfIoSpace;
|
||||
uint8_t Reserved[6];
|
||||
} EFI_HOB_CPU;
|
||||
|
||||
typedef struct {
|
||||
EFI_HOB_GENERIC_HEADER Header;
|
||||
} EFI_HOB_MEMORY_POOL;
|
||||
|
||||
typedef struct {
|
||||
EFI_HOB_GENERIC_HEADER Header;
|
||||
|
||||
EFI_PHYSICAL_ADDRESS BaseAddress;
|
||||
uint64_t Length;
|
||||
} EFI_HOB_UEFI_CAPSULE;
|
||||
|
||||
#define EFI_HOB_OWNER_ZERO \
|
||||
((EFI_GUID){ 0x00000000, 0x0000, 0x0000, \
|
||||
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } })
|
||||
|
||||
#endif
|
||||
@@ -24,6 +24,7 @@ typedef struct ArchDumpInfo {
|
||||
void (*arch_sections_add_fn)(DumpState *s);
|
||||
uint64_t (*arch_sections_write_hdr_fn)(DumpState *s, uint8_t *buff);
|
||||
int (*arch_sections_write_fn)(DumpState *s, uint8_t *buff);
|
||||
void (*arch_cleanup_fn)(DumpState *s);
|
||||
} ArchDumpInfo;
|
||||
|
||||
struct GuestPhysBlockList; /* memory_mapping.h */
|
||||
|
||||
@@ -66,6 +66,7 @@ struct HostMemoryBackend {
|
||||
uint64_t size;
|
||||
bool merge, dump, use_canonical_path;
|
||||
bool prealloc, is_mapped, share, reserve;
|
||||
bool require_guest_memfd;
|
||||
uint32_t prealloc_threads;
|
||||
ThreadContext *prealloc_context;
|
||||
DECLARE_BITMAP(host_nodes, MAX_NODES + 1);
|
||||
|
||||
@@ -341,6 +341,7 @@ int kvm_arch_get_default_type(MachineState *ms);
|
||||
|
||||
int kvm_arch_init(MachineState *ms, KVMState *s);
|
||||
|
||||
int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp);
|
||||
int kvm_arch_init_vcpu(CPUState *cpu);
|
||||
int kvm_arch_destroy_vcpu(CPUState *cpu);
|
||||
|
||||
@@ -538,4 +539,11 @@ bool kvm_arch_cpu_check_are_resettable(void);
|
||||
bool kvm_dirty_ring_enabled(void);
|
||||
|
||||
uint32_t kvm_dirty_ring_size(void);
|
||||
|
||||
int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp);
|
||||
|
||||
int kvm_set_memory_attributes_private(hwaddr start, hwaddr size);
|
||||
int kvm_set_memory_attributes_shared(hwaddr start, hwaddr size);
|
||||
|
||||
int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private);
|
||||
#endif
|
||||
|
||||
@@ -30,6 +30,8 @@ typedef struct KVMSlot
|
||||
int as_id;
|
||||
/* Cache of the offset in ram address space */
|
||||
ram_addr_t ram_start_offset;
|
||||
int guest_memfd;
|
||||
hwaddr guest_memfd_offset;
|
||||
} KVMSlot;
|
||||
|
||||
typedef struct KVMMemoryUpdate {
|
||||
|
||||
@@ -560,4 +560,98 @@ struct kvm_pmu_event_filter {
|
||||
/* x86-specific KVM_EXIT_HYPERCALL flags. */
|
||||
#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0)
|
||||
|
||||
#define KVM_X86_DEFAULT_VM 0
|
||||
#define KVM_X86_SW_PROTECTED_VM 1
|
||||
#define KVM_X86_TDX_VM 2
|
||||
#define KVM_X86_SNP_VM 3
|
||||
|
||||
/* Trust Domain eXtension sub-ioctl() commands. */
|
||||
enum kvm_tdx_cmd_id {
|
||||
KVM_TDX_CAPABILITIES = 0,
|
||||
KVM_TDX_INIT_VM,
|
||||
KVM_TDX_INIT_VCPU,
|
||||
KVM_TDX_INIT_MEM_REGION,
|
||||
KVM_TDX_FINALIZE_VM,
|
||||
KVM_TDX_RELEASE_VM,
|
||||
|
||||
KVM_TDX_CMD_NR_MAX,
|
||||
};
|
||||
|
||||
struct kvm_tdx_cmd {
|
||||
/* enum kvm_tdx_cmd_id */
|
||||
__u32 id;
|
||||
/* flags for sub-commend. If sub-command doesn't use this, set zero. */
|
||||
__u32 flags;
|
||||
/*
|
||||
* data for each sub-command. An immediate or a pointer to the actual
|
||||
* data in process virtual address. If sub-command doesn't use it,
|
||||
* set zero.
|
||||
*/
|
||||
__u64 data;
|
||||
/*
|
||||
* Auxiliary error code. The sub-command may return TDX SEAMCALL
|
||||
* status code in addition to -Exxx.
|
||||
* Defined for consistency with struct kvm_sev_cmd.
|
||||
*/
|
||||
__u64 error;
|
||||
};
|
||||
|
||||
struct kvm_tdx_cpuid_config {
|
||||
__u32 leaf;
|
||||
__u32 sub_leaf;
|
||||
__u32 eax;
|
||||
__u32 ebx;
|
||||
__u32 ecx;
|
||||
__u32 edx;
|
||||
};
|
||||
|
||||
struct kvm_tdx_capabilities {
|
||||
__u64 attrs_fixed0;
|
||||
__u64 attrs_fixed1;
|
||||
__u64 xfam_fixed0;
|
||||
__u64 xfam_fixed1;
|
||||
#define TDX_CAP_GPAW_48 (1 << 0)
|
||||
#define TDX_CAP_GPAW_52 (1 << 1)
|
||||
__u32 supported_gpaw;
|
||||
__u32 padding;
|
||||
__u64 reserved[251];
|
||||
|
||||
__u32 nr_cpuid_configs;
|
||||
struct kvm_tdx_cpuid_config cpuid_configs[];
|
||||
};
|
||||
|
||||
struct kvm_tdx_init_vm {
|
||||
__u64 attributes;
|
||||
__u64 mrconfigid[6]; /* sha384 digest */
|
||||
__u64 mrowner[6]; /* sha384 digest */
|
||||
__u64 mrownerconfig[6]; /* sha348 digest */
|
||||
/*
|
||||
* For future extensibility to make sizeof(struct kvm_tdx_init_vm) = 8KB.
|
||||
* This should be enough given sizeof(TD_PARAMS) = 1024.
|
||||
* 8KB was chosen given because
|
||||
* sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES(=256) = 8KB.
|
||||
*/
|
||||
__u64 reserved[1004];
|
||||
|
||||
/*
|
||||
* Call KVM_TDX_INIT_VM before vcpu creation, thus before
|
||||
* KVM_SET_CPUID2.
|
||||
* This configuration supersedes KVM_SET_CPUID2s for VCPUs because the
|
||||
* TDX module directly virtualizes those CPUIDs without VMM. The user
|
||||
* space VMM, e.g. qemu, should make KVM_SET_CPUID2 consistent with
|
||||
* those values. If it doesn't, KVM may have wrong idea of vCPUIDs of
|
||||
* the guest, and KVM may wrongly emulate CPUIDs or MSRs that the TDX
|
||||
* module doesn't virtualize.
|
||||
*/
|
||||
struct kvm_cpuid2 cpuid;
|
||||
};
|
||||
|
||||
#define KVM_TDX_MEASURE_MEMORY_REGION (1UL << 0)
|
||||
|
||||
struct kvm_tdx_init_mem_region {
|
||||
__u64 source_addr;
|
||||
__u64 gpa;
|
||||
__u64 nr_pages;
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_KVM_H */
|
||||
|
||||
@@ -95,6 +95,19 @@ struct kvm_userspace_memory_region {
|
||||
__u64 userspace_addr; /* start of the userspace allocated memory */
|
||||
};
|
||||
|
||||
/* for KVM_SET_USER_MEMORY_REGION2 */
|
||||
struct kvm_userspace_memory_region2 {
|
||||
__u32 slot;
|
||||
__u32 flags;
|
||||
__u64 guest_phys_addr;
|
||||
__u64 memory_size;
|
||||
__u64 userspace_addr;
|
||||
__u64 guest_memfd_offset;
|
||||
__u32 guest_memfd;
|
||||
__u32 pad1;
|
||||
__u64 pad2[14];
|
||||
};
|
||||
|
||||
/*
|
||||
* The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
|
||||
* userspace, other bits are reserved for kvm internal use which are defined
|
||||
@@ -102,6 +115,7 @@ struct kvm_userspace_memory_region {
|
||||
*/
|
||||
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
|
||||
#define KVM_MEM_READONLY (1UL << 1)
|
||||
#define KVM_MEM_PRIVATE (1UL << 2)
|
||||
|
||||
/* for KVM_IRQ_LINE */
|
||||
struct kvm_irq_level {
|
||||
@@ -223,6 +237,92 @@ struct kvm_xen_exit {
|
||||
} u;
|
||||
};
|
||||
|
||||
/* masks for reg_mask to indicate which registers are passed. */
|
||||
#define TDX_VMCALL_REG_MASK_RBX BIT_ULL(2)
|
||||
#define TDX_VMCALL_REG_MASK_RDX BIT_ULL(3)
|
||||
#define TDX_VMCALL_REG_MASK_RSI BIT_ULL(6)
|
||||
#define TDX_VMCALL_REG_MASK_RDI BIT_ULL(7)
|
||||
#define TDX_VMCALL_REG_MASK_R8 BIT_ULL(8)
|
||||
#define TDX_VMCALL_REG_MASK_R9 BIT_ULL(9)
|
||||
#define TDX_VMCALL_REG_MASK_R10 BIT_ULL(10)
|
||||
#define TDX_VMCALL_REG_MASK_R11 BIT_ULL(11)
|
||||
#define TDX_VMCALL_REG_MASK_R12 BIT_ULL(12)
|
||||
#define TDX_VMCALL_REG_MASK_R13 BIT_ULL(13)
|
||||
#define TDX_VMCALL_REG_MASK_R14 BIT_ULL(14)
|
||||
#define TDX_VMCALL_REG_MASK_R15 BIT_ULL(15)
|
||||
|
||||
struct kvm_tdx_exit {
|
||||
#define KVM_EXIT_TDX_VMCALL 1
|
||||
__u32 type;
|
||||
__u32 pad;
|
||||
|
||||
union {
|
||||
struct kvm_tdx_vmcall {
|
||||
/*
|
||||
* RAX(bit 0), RCX(bit 1) and RSP(bit 4) are reserved.
|
||||
* RAX(bit 0): TDG.VP.VMCALL status code.
|
||||
* RCX(bit 1): bitmap for used registers.
|
||||
* RSP(bit 4): the caller stack.
|
||||
*/
|
||||
union {
|
||||
__u64 in_rcx;
|
||||
__u64 reg_mask;
|
||||
};
|
||||
|
||||
/*
|
||||
* Guest-Host-Communication Interface for TDX spec
|
||||
* defines the ABI for TDG.VP.VMCALL.
|
||||
*/
|
||||
/* Input parameters: guest -> VMM */
|
||||
union {
|
||||
__u64 in_r10;
|
||||
__u64 type;
|
||||
};
|
||||
union {
|
||||
__u64 in_r11;
|
||||
__u64 subfunction;
|
||||
};
|
||||
/*
|
||||
* Subfunction specific.
|
||||
* Registers are used in this order to pass input
|
||||
* arguments. r12=arg0, r13=arg1, etc.
|
||||
*/
|
||||
__u64 in_r12;
|
||||
__u64 in_r13;
|
||||
__u64 in_r14;
|
||||
__u64 in_r15;
|
||||
__u64 in_rbx;
|
||||
__u64 in_rdi;
|
||||
__u64 in_rsi;
|
||||
__u64 in_r8;
|
||||
__u64 in_r9;
|
||||
__u64 in_rdx;
|
||||
|
||||
/* Output parameters: VMM -> guest */
|
||||
union {
|
||||
__u64 out_r10;
|
||||
__u64 status_code;
|
||||
};
|
||||
/*
|
||||
* Subfunction specific.
|
||||
* Registers are used in this order to output return
|
||||
* values. r11=ret0, r12=ret1, etc.
|
||||
*/
|
||||
__u64 out_r11;
|
||||
__u64 out_r12;
|
||||
__u64 out_r13;
|
||||
__u64 out_r14;
|
||||
__u64 out_r15;
|
||||
__u64 out_rbx;
|
||||
__u64 out_rdi;
|
||||
__u64 out_rsi;
|
||||
__u64 out_r8;
|
||||
__u64 out_r9;
|
||||
__u64 out_rdx;
|
||||
} vmcall;
|
||||
} u;
|
||||
};
|
||||
|
||||
#define KVM_S390_GET_SKEYS_NONE 1
|
||||
#define KVM_S390_SKEYS_MAX 1048576
|
||||
|
||||
@@ -264,6 +364,8 @@ struct kvm_xen_exit {
|
||||
#define KVM_EXIT_RISCV_SBI 35
|
||||
#define KVM_EXIT_RISCV_CSR 36
|
||||
#define KVM_EXIT_NOTIFY 37
|
||||
#define KVM_EXIT_MEMORY_FAULT 39
|
||||
#define KVM_EXIT_TDX 40
|
||||
|
||||
/* For KVM_EXIT_INTERNAL_ERROR */
|
||||
/* Emulate instruction failed. */
|
||||
@@ -506,6 +608,15 @@ struct kvm_run {
|
||||
#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
|
||||
__u32 flags;
|
||||
} notify;
|
||||
/* KVM_EXIT_MEMORY_FAULT */
|
||||
struct {
|
||||
#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3)
|
||||
__u64 flags;
|
||||
__u64 gpa;
|
||||
__u64 size;
|
||||
} memory_fault;
|
||||
/* KVM_EXIT_TDX_VMCALL */
|
||||
struct kvm_tdx_exit tdx;
|
||||
/* Fix the size of the union. */
|
||||
char padding[256];
|
||||
};
|
||||
@@ -1188,6 +1299,11 @@ struct kvm_ppc_resize_hpt {
|
||||
#define KVM_CAP_COUNTER_OFFSET 227
|
||||
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
|
||||
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
|
||||
#define KVM_CAP_USER_MEMORY2 231
|
||||
#define KVM_CAP_MEMORY_FAULT_INFO 232
|
||||
#define KVM_CAP_MEMORY_ATTRIBUTES 233
|
||||
#define KVM_CAP_GUEST_MEMFD 234
|
||||
#define KVM_CAP_VM_TYPES 235
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
@@ -1469,6 +1585,8 @@ struct kvm_vfio_spapr_tce {
|
||||
struct kvm_userspace_memory_region)
|
||||
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
|
||||
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
|
||||
#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
|
||||
struct kvm_userspace_memory_region2)
|
||||
|
||||
/* enable ucontrol for s390 */
|
||||
struct kvm_s390_ucas_mapping {
|
||||
@@ -2252,4 +2370,26 @@ struct kvm_s390_zpci_op {
|
||||
/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
|
||||
#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
|
||||
|
||||
/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
|
||||
#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
|
||||
|
||||
struct kvm_memory_attributes {
|
||||
__u64 address;
|
||||
__u64 size;
|
||||
__u64 attributes;
|
||||
__u64 flags;
|
||||
};
|
||||
|
||||
#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
|
||||
|
||||
#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
|
||||
|
||||
struct kvm_create_guest_memfd {
|
||||
__u64 size;
|
||||
__u64 flags;
|
||||
__u64 reserved[6];
|
||||
};
|
||||
|
||||
#define KVM_GUEST_MEMFD_ALLOW_HUGEPAGE (1ULL << 0)
|
||||
|
||||
#endif /* __LINUX_KVM_H */
|
||||
|
||||
@@ -462,6 +462,7 @@ warn_flags = [
|
||||
'-Wno-tautological-type-limit-compare',
|
||||
'-Wno-psabi',
|
||||
'-Wno-gnu-variable-sized-type-not-at-end',
|
||||
'-Wshadow=local',
|
||||
]
|
||||
|
||||
if targetos != 'darwin'
|
||||
|
||||
Binary file not shown.
@@ -878,6 +878,33 @@
|
||||
'reduced-phys-bits': 'uint32',
|
||||
'*kernel-hashes': 'bool' } }
|
||||
|
||||
##
|
||||
# @TdxGuestProperties:
|
||||
#
|
||||
# Properties for tdx-guest objects.
|
||||
#
|
||||
# @sept-ve-disable: toggle bit 28 of TD attributes to control disabling
|
||||
# of EPT violation conversion to #VE on guest TD access of PENDING
|
||||
# pages. Some guest OS (e.g., Linux TD guest) may require this to
|
||||
# be set, otherwise they refuse to boot.
|
||||
#
|
||||
# @mrconfigid: base64 encoded MRCONFIGID SHA384 digest
|
||||
#
|
||||
# @mrowner: base64 encoded MROWNER SHA384 digest
|
||||
#
|
||||
# @mrownerconfig: base64 MROWNERCONFIG SHA384 digest
|
||||
#
|
||||
# @quote-generation-socket: socket address for Quote Generation Service(QGS)
|
||||
#
|
||||
# Since: 8.2
|
||||
##
|
||||
{ 'struct': 'TdxGuestProperties',
|
||||
'data': { '*sept-ve-disable': 'bool',
|
||||
'*mrconfigid': 'str',
|
||||
'*mrowner': 'str',
|
||||
'*mrownerconfig': 'str',
|
||||
'*quote-generation-socket': 'SocketAddress' } }
|
||||
|
||||
##
|
||||
# @ThreadContextProperties:
|
||||
#
|
||||
@@ -956,6 +983,7 @@
|
||||
'sev-guest',
|
||||
'thread-context',
|
||||
's390-pv-guest',
|
||||
'tdx-guest',
|
||||
'throttle-group',
|
||||
'tls-creds-anon',
|
||||
'tls-creds-psk',
|
||||
@@ -1022,6 +1050,7 @@
|
||||
'secret_keyring': { 'type': 'SecretKeyringProperties',
|
||||
'if': 'CONFIG_SECRET_KEYRING' },
|
||||
'sev-guest': 'SevGuestProperties',
|
||||
'tdx-guest': 'TdxGuestProperties',
|
||||
'thread-context': 'ThreadContextProperties',
|
||||
'throttle-group': 'ThrottleGroupProperties',
|
||||
'tls-creds-anon': 'TlsCredsAnonProperties',
|
||||
|
||||
@@ -496,10 +496,12 @@
|
||||
#
|
||||
# @s390: s390 guest panic information type (Since: 2.12)
|
||||
#
|
||||
# @tdx: tdx guest panic information type (Since: 8.2)
|
||||
#
|
||||
# Since: 2.9
|
||||
##
|
||||
{ 'enum': 'GuestPanicInformationType',
|
||||
'data': [ 'hyper-v', 's390' ] }
|
||||
'data': [ 'hyper-v', 's390', 'tdx' ] }
|
||||
|
||||
##
|
||||
# @GuestPanicInformation:
|
||||
@@ -514,7 +516,8 @@
|
||||
'base': {'type': 'GuestPanicInformationType'},
|
||||
'discriminator': 'type',
|
||||
'data': {'hyper-v': 'GuestPanicInformationHyperV',
|
||||
's390': 'GuestPanicInformationS390'}}
|
||||
's390': 'GuestPanicInformationS390',
|
||||
'tdx' : 'GuestPanicInformationTdx'}}
|
||||
|
||||
##
|
||||
# @GuestPanicInformationHyperV:
|
||||
@@ -577,6 +580,26 @@
|
||||
'psw-addr': 'uint64',
|
||||
'reason': 'S390CrashReason'}}
|
||||
|
||||
##
|
||||
# @GuestPanicInformationTdx:
|
||||
#
|
||||
# TDX GHCI TDG.VP.VMCALL<ReportFatalError> specific guest panic information
|
||||
#
|
||||
# @error-code: TD-specific error code
|
||||
#
|
||||
# @gpa: 4KB-aligned guest physical address of the page that containing
|
||||
# additional error data
|
||||
#
|
||||
# @message: TD guest provided message string. (It's not so trustable
|
||||
# and cannot be assumed to be well formed because it comes from guest)
|
||||
#
|
||||
# Since: 8.2
|
||||
##
|
||||
{'struct': 'GuestPanicInformationTdx',
|
||||
'data': {'error-code': 'uint64',
|
||||
'gpa': 'uint64',
|
||||
'message': 'str'}}
|
||||
|
||||
##
|
||||
# @MEMORY_FAILURE:
|
||||
#
|
||||
|
||||
Submodule roms/seabios-hppa updated: fd5b6cf823...2a23dd388f
@@ -76,7 +76,8 @@ class QAPISchemaEntity:
|
||||
def __repr__(self):
|
||||
if self.name is None:
|
||||
return "<%s at 0x%x>" % (type(self).__name__, id(self))
|
||||
return "<%s:%s at 0x%x>" % type(self).__name__, self.name, id(self)
|
||||
return "<%s:%s at 0x%x>" % (type(self).__name__, self.name,
|
||||
id(self))
|
||||
|
||||
def c_name(self):
|
||||
return c_name(self.name)
|
||||
|
||||
@@ -1862,6 +1862,24 @@ bool memory_region_is_protected(MemoryRegion *mr)
|
||||
return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
|
||||
}
|
||||
|
||||
bool memory_region_has_guest_memfd(MemoryRegion *mr)
|
||||
{
|
||||
return mr->ram_block && mr->ram_block->guest_memfd >= 0;
|
||||
}
|
||||
|
||||
bool memory_region_is_default_private(MemoryRegion *mr)
|
||||
{
|
||||
return memory_region_has_guest_memfd(mr) &&
|
||||
(mr->ram_block->flags & RAM_DEFAULT_PRIVATE);
|
||||
}
|
||||
|
||||
void memory_region_set_default_private(MemoryRegion *mr)
|
||||
{
|
||||
if (memory_region_has_guest_memfd(mr)) {
|
||||
mr->ram_block->flags |= RAM_DEFAULT_PRIVATE;
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
|
||||
{
|
||||
uint8_t mask = mr->dirty_log_mask;
|
||||
@@ -3614,6 +3632,33 @@ void memory_region_init_ram(MemoryRegion *mr,
|
||||
vmstate_register_ram(mr, owner_dev);
|
||||
}
|
||||
|
||||
void memory_region_init_ram_guest_memfd(MemoryRegion *mr,
|
||||
Object *owner,
|
||||
const char *name,
|
||||
uint64_t size,
|
||||
Error **errp)
|
||||
{
|
||||
DeviceState *owner_dev;
|
||||
Error *err = NULL;
|
||||
|
||||
memory_region_init_ram_flags_nomigrate(mr, owner, name, size,
|
||||
RAM_GUEST_MEMFD, &err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
return;
|
||||
}
|
||||
memory_region_set_default_private(mr);
|
||||
|
||||
/* This will assert if owner is neither NULL nor a DeviceState.
|
||||
* We only want the owner here for the purposes of defining a
|
||||
* unique name for migration. TODO: Ideally we should implement
|
||||
* a naming scheme for Objects which are not DeviceStates, in
|
||||
* which case we can relax this restriction.
|
||||
*/
|
||||
owner_dev = DEVICE(owner);
|
||||
vmstate_register_ram(mr, owner_dev);
|
||||
}
|
||||
|
||||
void memory_region_init_rom(MemoryRegion *mr,
|
||||
Object *owner,
|
||||
const char *name,
|
||||
|
||||
151
system/physmem.c
151
system/physmem.c
@@ -1803,6 +1803,40 @@ static void dirty_memory_extend(ram_addr_t old_ram_size,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
#define HPAGE_PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
|
||||
#define DEFAULT_PMD_SIZE (1ul << 21)
|
||||
|
||||
static uint32_t get_thp_size(void)
|
||||
{
|
||||
gchar *content = NULL;
|
||||
const char *endptr;
|
||||
static uint64_t thp_size = 0;
|
||||
uint64_t tmp;
|
||||
|
||||
if (thp_size != 0) {
|
||||
return thp_size;
|
||||
}
|
||||
|
||||
if (g_file_get_contents(HPAGE_PMD_SIZE_PATH, &content, NULL, NULL) &&
|
||||
!qemu_strtou64(content, &endptr, 0, &tmp) &&
|
||||
(!endptr || *endptr == '\n')) {
|
||||
/* Sanity-check the value and fallback to something reasonable. */
|
||||
if (!tmp || !is_power_of_2(tmp)) {
|
||||
warn_report("Read unsupported THP size: %" PRIx64, tmp);
|
||||
} else {
|
||||
thp_size = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
if (!thp_size) {
|
||||
thp_size = DEFAULT_PMD_SIZE;
|
||||
}
|
||||
|
||||
return thp_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ram_block_add(RAMBlock *new_block, Error **errp)
|
||||
{
|
||||
const bool noreserve = qemu_ram_is_noreserve(new_block);
|
||||
@@ -1841,6 +1875,20 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
if (kvm_enabled() && new_block->flags & RAM_GUEST_MEMFD &&
|
||||
new_block->guest_memfd < 0) {
|
||||
uint64_t flags = QEMU_IS_ALIGNED(new_block->max_length, get_thp_size()) ?
|
||||
KVM_GUEST_MEMFD_ALLOW_HUGEPAGE : 0;
|
||||
new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length,
|
||||
flags, errp);
|
||||
if (new_block->guest_memfd < 0) {
|
||||
qemu_mutex_unlock_ramlist();
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
new_ram_size = MAX(old_ram_size,
|
||||
(new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
|
||||
if (new_ram_size > old_ram_size) {
|
||||
@@ -1903,7 +1951,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
|
||||
/* Just support these ram flags by now. */
|
||||
assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE |
|
||||
RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY |
|
||||
RAM_READONLY_FD)) == 0);
|
||||
RAM_READONLY_FD | RAM_GUEST_MEMFD)) == 0);
|
||||
|
||||
if (xen_enabled()) {
|
||||
error_setg(errp, "-mem-path not supported with Xen");
|
||||
@@ -1938,6 +1986,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
|
||||
new_block->used_length = size;
|
||||
new_block->max_length = size;
|
||||
new_block->flags = ram_flags;
|
||||
new_block->guest_memfd = -1;
|
||||
new_block->host = file_ram_alloc(new_block, size, fd, !file_size, offset,
|
||||
errp);
|
||||
if (!new_block->host) {
|
||||
@@ -2016,7 +2065,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
|
||||
Error *local_err = NULL;
|
||||
|
||||
assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC |
|
||||
RAM_NORESERVE)) == 0);
|
||||
RAM_NORESERVE| RAM_GUEST_MEMFD)) == 0);
|
||||
assert(!host ^ (ram_flags & RAM_PREALLOC));
|
||||
|
||||
size = HOST_PAGE_ALIGN(size);
|
||||
@@ -2028,6 +2077,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
|
||||
new_block->max_length = max_size;
|
||||
assert(max_size >= size);
|
||||
new_block->fd = -1;
|
||||
new_block->guest_memfd = -1;
|
||||
new_block->page_size = qemu_real_host_page_size();
|
||||
new_block->host = host;
|
||||
new_block->flags = ram_flags;
|
||||
@@ -2050,7 +2100,7 @@ RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
||||
RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags,
|
||||
MemoryRegion *mr, Error **errp)
|
||||
{
|
||||
assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE)) == 0);
|
||||
assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
|
||||
return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp);
|
||||
}
|
||||
|
||||
@@ -2078,6 +2128,11 @@ static void reclaim_ramblock(RAMBlock *block)
|
||||
} else {
|
||||
qemu_anon_ram_free(block->host, block->max_length);
|
||||
}
|
||||
|
||||
if (block->guest_memfd >= 0) {
|
||||
close(block->guest_memfd);
|
||||
}
|
||||
|
||||
g_free(block);
|
||||
}
|
||||
|
||||
@@ -3477,17 +3532,16 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
|
||||
|
||||
uint8_t *host_startaddr = rb->host + start;
|
||||
|
||||
if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
|
||||
error_report("ram_block_discard_range: Unaligned start address: %p",
|
||||
host_startaddr);
|
||||
if (!QEMU_PTR_IS_ALIGNED(host_startaddr, qemu_host_page_size)) {
|
||||
error_report("%s: Unaligned start address: %p",
|
||||
__func__, host_startaddr);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((start + length) <= rb->max_length) {
|
||||
bool need_madvise, need_fallocate;
|
||||
if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
|
||||
error_report("ram_block_discard_range: Unaligned length: %zx",
|
||||
length);
|
||||
error_report("%s: Unaligned length: %zx", __func__, length);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@@ -3511,8 +3565,8 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
|
||||
* proper error message.
|
||||
*/
|
||||
if (rb->flags & RAM_READONLY_FD) {
|
||||
error_report("ram_block_discard_range: Discarding RAM"
|
||||
" with readonly files is not supported");
|
||||
error_report("%s: Discarding RAM with readonly files is not"
|
||||
" supported", __func__);
|
||||
goto err;
|
||||
|
||||
}
|
||||
@@ -3527,27 +3581,26 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
|
||||
* file.
|
||||
*/
|
||||
if (!qemu_ram_is_shared(rb)) {
|
||||
warn_report_once("ram_block_discard_range: Discarding RAM"
|
||||
warn_report_once("%s: Discarding RAM"
|
||||
" in private file mappings is possibly"
|
||||
" dangerous, because it will modify the"
|
||||
" underlying file and will affect other"
|
||||
" users of the file");
|
||||
" users of the file", __func__);
|
||||
}
|
||||
|
||||
ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
||||
start, length);
|
||||
if (ret) {
|
||||
ret = -errno;
|
||||
error_report("ram_block_discard_range: Failed to fallocate "
|
||||
"%s:%" PRIx64 " +%zx (%d)",
|
||||
rb->idstr, start, length, ret);
|
||||
error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
|
||||
__func__, rb->idstr, start, length, ret);
|
||||
goto err;
|
||||
}
|
||||
#else
|
||||
ret = -ENOSYS;
|
||||
error_report("ram_block_discard_range: fallocate not available/file"
|
||||
error_report("%s: fallocate not available/file"
|
||||
"%s:%" PRIx64 " +%zx (%d)",
|
||||
rb->idstr, start, length, ret);
|
||||
__func__, rb->idstr, start, length, ret);
|
||||
goto err;
|
||||
#endif
|
||||
}
|
||||
@@ -3565,31 +3618,52 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
|
||||
}
|
||||
if (ret) {
|
||||
ret = -errno;
|
||||
error_report("ram_block_discard_range: Failed to discard range "
|
||||
error_report("%s: Failed to discard range "
|
||||
"%s:%" PRIx64 " +%zx (%d)",
|
||||
rb->idstr, start, length, ret);
|
||||
__func__, rb->idstr, start, length, ret);
|
||||
goto err;
|
||||
}
|
||||
#else
|
||||
ret = -ENOSYS;
|
||||
error_report("ram_block_discard_range: MADVISE not available"
|
||||
"%s:%" PRIx64 " +%zx (%d)",
|
||||
rb->idstr, start, length, ret);
|
||||
error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)",
|
||||
__func__, rb->idstr, start, length, ret);
|
||||
goto err;
|
||||
#endif
|
||||
}
|
||||
trace_ram_block_discard_range(rb->idstr, host_startaddr, length,
|
||||
need_madvise, need_fallocate, ret);
|
||||
} else {
|
||||
error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
|
||||
"/%zx/" RAM_ADDR_FMT")",
|
||||
rb->idstr, start, length, rb->max_length);
|
||||
error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")",
|
||||
__func__, rb->idstr, start, length, rb->max_length);
|
||||
}
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
|
||||
size_t length)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
|
||||
ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
||||
start, length);
|
||||
|
||||
if (ret) {
|
||||
ret = -errno;
|
||||
error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
|
||||
__func__, rb->idstr, start, length, ret);
|
||||
}
|
||||
#else
|
||||
ret = -ENOSYS;
|
||||
error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)",
|
||||
__func__, rb->idstr, start, length, ret);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool ramblock_is_pmem(RAMBlock *rb)
|
||||
{
|
||||
return rb->flags & RAM_PMEM;
|
||||
@@ -3777,3 +3851,30 @@ bool ram_block_discard_is_required(void)
|
||||
return qatomic_read(&ram_block_discard_required_cnt) ||
|
||||
qatomic_read(&ram_block_coordinated_discard_required_cnt);
|
||||
}
|
||||
|
||||
int ram_block_convert_range(RAMBlock *rb, uint64_t start, size_t length,
|
||||
bool shared_to_private)
|
||||
{
|
||||
if (!rb || rb->guest_memfd < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!QEMU_PTR_IS_ALIGNED(start, qemu_host_page_size) ||
|
||||
!QEMU_PTR_IS_ALIGNED(length, qemu_host_page_size)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!length) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (start + length > rb->max_length) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (shared_to_private) {
|
||||
return ram_block_discard_range(rb, start, length);
|
||||
} else {
|
||||
return ram_block_discard_guest_memfd_range(rb, start, length);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -518,6 +518,52 @@ static void qemu_system_wakeup(void)
|
||||
}
|
||||
}
|
||||
|
||||
static char* tdx_parse_panic_message(char *message)
|
||||
{
|
||||
bool printable = false;
|
||||
char *buf = NULL;
|
||||
int len = 0, i;
|
||||
|
||||
/*
|
||||
* Although message is defined as a json string, we shouldn't
|
||||
* unconditionally treat it as is because the guest generated it and
|
||||
* it's not necessarily trustable.
|
||||
*/
|
||||
if (message) {
|
||||
/* The caller guarantees the NUL-terminated string. */
|
||||
len = strlen(message);
|
||||
|
||||
printable = len > 0;
|
||||
for (i = 0; i < len; i++) {
|
||||
if (!(0x20 <= message[i] && message[i] <= 0x7e)) {
|
||||
printable = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!printable && len) {
|
||||
/* 3 = length of "%02x " */
|
||||
buf = g_malloc(len * 3);
|
||||
for (i = 0; i < len; i++) {
|
||||
if (message[i] == '\0') {
|
||||
break;
|
||||
} else {
|
||||
sprintf(buf + 3 * i, "%02x ", message[i]);
|
||||
}
|
||||
}
|
||||
if (i > 0)
|
||||
/* replace the last ' '(space) to NUL */
|
||||
buf[i * 3 - 1] = '\0';
|
||||
else
|
||||
buf[0] = '\0';
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
void qemu_system_guest_panicked(GuestPanicInformation *info)
|
||||
{
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "Guest crashed");
|
||||
@@ -559,7 +605,15 @@ void qemu_system_guest_panicked(GuestPanicInformation *info)
|
||||
S390CrashReason_str(info->u.s390.reason),
|
||||
info->u.s390.psw_mask,
|
||||
info->u.s390.psw_addr);
|
||||
} else if (info->type == GUEST_PANIC_INFORMATION_TYPE_TDX) {
|
||||
qemu_log_mask(LOG_GUEST_ERROR,
|
||||
" TDX guest reports fatal error:\"%s\""
|
||||
" error code: 0x%016" PRIx64 " gpa page: 0x%016" PRIx64 "\n",
|
||||
tdx_parse_panic_message(info->u.tdx.message),
|
||||
info->u.tdx.error_code,
|
||||
info->u.tdx.gpa);
|
||||
}
|
||||
|
||||
qapi_free_GuestPanicInformation(info);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -351,6 +351,7 @@ static void cortex_a8_initfn(Object *obj)
|
||||
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
|
||||
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
|
||||
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
||||
set_feature(&cpu->env, ARM_FEATURE_PMU);
|
||||
cpu->midr = 0x410fc080;
|
||||
cpu->reset_fpsid = 0x410330c0;
|
||||
cpu->isar.mvfr0 = 0x11110222;
|
||||
@@ -418,6 +419,7 @@ static void cortex_a9_initfn(Object *obj)
|
||||
set_feature(&cpu->env, ARM_FEATURE_NEON);
|
||||
set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
|
||||
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
||||
set_feature(&cpu->env, ARM_FEATURE_PMU);
|
||||
/*
|
||||
* Note that A9 supports the MP extensions even for
|
||||
* A9UP and single-core A9MP (which are both different
|
||||
|
||||
@@ -1101,10 +1101,18 @@ uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||
uint32_t n;
|
||||
|
||||
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||
/* True probe; this will never fault */
|
||||
/*
|
||||
* True probe; this will never fault. Note that our caller passes
|
||||
* us a pointer to the end of the region, but allocation_tag_mem_probe()
|
||||
* wants a pointer to the start. Because we know we don't span a page
|
||||
* boundary and that allocation_tag_mem_probe() doesn't otherwise care
|
||||
* about the size, pass in a size of 1 byte. This is simpler than
|
||||
* adjusting the ptr to point to the start of the region and then having
|
||||
* to adjust the returned 'mem' to get the end of the tag memory.
|
||||
*/
|
||||
mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
|
||||
w ? MMU_DATA_STORE : MMU_DATA_LOAD,
|
||||
size, MMU_DATA_LOAD, true, 0);
|
||||
1, MMU_DATA_LOAD, true, 0);
|
||||
if (!mem) {
|
||||
return size;
|
||||
}
|
||||
|
||||
@@ -2351,6 +2351,8 @@ static bool trans_SVC(DisasContext *s, arg_i *a)
|
||||
|
||||
static bool trans_HVC(DisasContext *s, arg_i *a)
|
||||
{
|
||||
int target_el = s->current_el == 3 ? 3 : 2;
|
||||
|
||||
if (s->current_el == 0) {
|
||||
unallocated_encoding(s);
|
||||
return true;
|
||||
@@ -2363,7 +2365,7 @@ static bool trans_HVC(DisasContext *s, arg_i *a)
|
||||
gen_helper_pre_hvc(tcg_env);
|
||||
/* Architecture requires ss advance before we do the actual work */
|
||||
gen_ss_advance(s);
|
||||
gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), 2);
|
||||
gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), target_el);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
# define TARGET_PHYS_ADDR_SPACE_BITS 32
|
||||
# define TARGET_VIRT_ADDR_SPACE_BITS 32
|
||||
#else
|
||||
# define TARGET_PHYS_ADDR_SPACE_BITS 64
|
||||
/* ??? PA-8000 through 8600 have 40 bits; PA-8700 and 8900 have 44 bits. */
|
||||
# define TARGET_PHYS_ADDR_SPACE_BITS 40
|
||||
# define TARGET_VIRT_ADDR_SPACE_BITS 64
|
||||
#endif
|
||||
|
||||
|
||||
@@ -31,23 +31,25 @@
|
||||
basis. It's probably easier to fall back to a strong memory model. */
|
||||
#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
|
||||
|
||||
#define MMU_KERNEL_IDX 7
|
||||
#define MMU_KERNEL_P_IDX 8
|
||||
#define MMU_PL1_IDX 9
|
||||
#define MMU_PL1_P_IDX 10
|
||||
#define MMU_PL2_IDX 11
|
||||
#define MMU_PL2_P_IDX 12
|
||||
#define MMU_USER_IDX 13
|
||||
#define MMU_USER_P_IDX 14
|
||||
#define MMU_PHYS_IDX 15
|
||||
#define MMU_ABS_W_IDX 6
|
||||
#define MMU_ABS_IDX 7
|
||||
#define MMU_KERNEL_IDX 8
|
||||
#define MMU_KERNEL_P_IDX 9
|
||||
#define MMU_PL1_IDX 10
|
||||
#define MMU_PL1_P_IDX 11
|
||||
#define MMU_PL2_IDX 12
|
||||
#define MMU_PL2_P_IDX 13
|
||||
#define MMU_USER_IDX 14
|
||||
#define MMU_USER_P_IDX 15
|
||||
|
||||
#define MMU_IDX_MMU_DISABLED(MIDX) ((MIDX) < MMU_KERNEL_IDX)
|
||||
#define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2)
|
||||
#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1)
|
||||
#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX)
|
||||
|
||||
#define TARGET_INSN_START_EXTRA_WORDS 2
|
||||
|
||||
/* No need to flush MMU_PHYS_IDX */
|
||||
/* No need to flush MMU_ABS*_IDX */
|
||||
#define HPPA_MMU_FLUSH_MASK \
|
||||
(1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \
|
||||
1 << MMU_PL1_IDX | 1 << MMU_PL1_P_IDX | \
|
||||
@@ -287,7 +289,8 @@ static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
|
||||
if (env->psw & (ifetch ? PSW_C : PSW_D)) {
|
||||
return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
|
||||
}
|
||||
return MMU_PHYS_IDX; /* mmu disabled */
|
||||
/* mmu disabled */
|
||||
return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ void hppa_cpu_do_interrupt(CPUState *cs)
|
||||
env->cr[CR_IIASQ] =
|
||||
hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32;
|
||||
env->cr_back[0] =
|
||||
hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32;
|
||||
hppa_form_gva_psw(old_psw, env->iasq_b, env->iaoq_b) >> 32;
|
||||
} else {
|
||||
env->cr[CR_IIASQ] = 0;
|
||||
env->cr_back[0] = 0;
|
||||
|
||||
@@ -27,41 +27,39 @@
|
||||
|
||||
hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
|
||||
{
|
||||
if (likely(extract64(addr, 58, 4) != 0xf)) {
|
||||
/* Memory address space */
|
||||
return addr & MAKE_64BIT_MASK(0, 62);
|
||||
}
|
||||
if (extract64(addr, 54, 4) != 0) {
|
||||
/* I/O address space */
|
||||
return addr | MAKE_64BIT_MASK(62, 2);
|
||||
}
|
||||
/* PDC address space */
|
||||
return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4);
|
||||
/*
|
||||
* Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
|
||||
* an algorithm in which a 62-bit absolute address is transformed to
|
||||
* a 64-bit physical address. This must then be combined with that
|
||||
* pictured in Figure H-11 "Physical Address Space Mapping", in which
|
||||
* the full physical address is truncated to the N-bit physical address
|
||||
* supported by the implementation.
|
||||
*
|
||||
* Since the supported physical address space is below 54 bits, the
|
||||
* H-8 algorithm is moot and all that is left is to truncate.
|
||||
*/
|
||||
QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
|
||||
return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
|
||||
}
|
||||
|
||||
hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
|
||||
{
|
||||
/*
|
||||
* See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
|
||||
* combined with Figure H-11, as above.
|
||||
*/
|
||||
if (likely(extract32(addr, 28, 4) != 0xf)) {
|
||||
/* Memory address space */
|
||||
return addr & MAKE_64BIT_MASK(0, 32);
|
||||
}
|
||||
if (extract32(addr, 24, 4) != 0) {
|
||||
addr = (uint32_t)addr;
|
||||
} else if (extract32(addr, 24, 4) != 0) {
|
||||
/* I/O address space */
|
||||
return addr | MAKE_64BIT_MASK(32, 32);
|
||||
}
|
||||
/* PDC address space */
|
||||
return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4);
|
||||
}
|
||||
|
||||
static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr)
|
||||
{
|
||||
if (!hppa_is_pa20(env)) {
|
||||
return addr;
|
||||
} else if (env->psw & PSW_W) {
|
||||
return hppa_abs_to_phys_pa2_w1(addr);
|
||||
addr = (int32_t)addr;
|
||||
} else {
|
||||
return hppa_abs_to_phys_pa2_w0(addr);
|
||||
/* PDC address space */
|
||||
addr &= MAKE_64BIT_MASK(0, 24);
|
||||
addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
|
||||
@@ -161,9 +159,22 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||
*tlb_entry = NULL;
|
||||
}
|
||||
|
||||
/* Virtual translation disabled. Direct map virtual to physical. */
|
||||
if (mmu_idx == MMU_PHYS_IDX) {
|
||||
phys = addr;
|
||||
/* Virtual translation disabled. Map absolute to physical. */
|
||||
if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
|
||||
switch (mmu_idx) {
|
||||
case MMU_ABS_W_IDX:
|
||||
phys = hppa_abs_to_phys_pa2_w1(addr);
|
||||
break;
|
||||
case MMU_ABS_IDX:
|
||||
if (hppa_is_pa20(env)) {
|
||||
phys = hppa_abs_to_phys_pa2_w0(addr);
|
||||
} else {
|
||||
phys = (uint32_t)addr;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
goto egress;
|
||||
}
|
||||
@@ -261,7 +272,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||
}
|
||||
|
||||
egress:
|
||||
*pphys = phys = hppa_abs_to_phys(env, phys);
|
||||
*pphys = phys;
|
||||
*pprot = prot;
|
||||
trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
|
||||
return ret;
|
||||
@@ -271,16 +282,15 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
{
|
||||
HPPACPU *cpu = HPPA_CPU(cs);
|
||||
hwaddr phys;
|
||||
int prot, excp;
|
||||
int prot, excp, mmu_idx;
|
||||
|
||||
/* If the (data) mmu is disabled, bypass translation. */
|
||||
/* ??? We really ought to know if the code mmu is disabled too,
|
||||
in order to get the correct debugging dumps. */
|
||||
if (!(cpu->env.psw & PSW_D)) {
|
||||
return hppa_abs_to_phys(&cpu->env, addr);
|
||||
}
|
||||
mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
|
||||
cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
|
||||
|
||||
excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
|
||||
excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
|
||||
&phys, &prot, NULL);
|
||||
|
||||
/* Since we're translating for debugging, the only error that is a
|
||||
@@ -367,8 +377,8 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
|
||||
|
||||
/* Failure. Raise the indicated exception. */
|
||||
raise_exception_with_ior(env, excp, retaddr,
|
||||
addr, mmu_idx == MMU_PHYS_IDX);
|
||||
raise_exception_with_ior(env, excp, retaddr, addr,
|
||||
MMU_IDX_MMU_DISABLED(mmu_idx));
|
||||
}
|
||||
|
||||
trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
|
||||
@@ -450,7 +460,7 @@ static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
|
||||
int mask_shift;
|
||||
|
||||
mask_shift = 2 * (r1 & 0xf);
|
||||
va_size = TARGET_PAGE_SIZE << mask_shift;
|
||||
va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
|
||||
va_b &= -va_size;
|
||||
va_e = va_b + va_size - 1;
|
||||
|
||||
@@ -459,7 +469,14 @@ static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
|
||||
|
||||
ent->itree.start = va_b;
|
||||
ent->itree.last = va_e;
|
||||
ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift);
|
||||
|
||||
/* Extract all 52 bits present in the page table entry. */
|
||||
ent->pa = r1 << (TARGET_PAGE_BITS - 5);
|
||||
/* Align per the page size. */
|
||||
ent->pa &= TARGET_PAGE_MASK << mask_shift;
|
||||
/* Ignore the bits beyond physical address space. */
|
||||
ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
|
||||
|
||||
ent->t = extract64(r2, 61, 1);
|
||||
ent->d = extract64(r2, 60, 1);
|
||||
ent->b = extract64(r2, 59, 1);
|
||||
@@ -505,7 +522,7 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
|
||||
*/
|
||||
end = start & 0xf;
|
||||
start &= TARGET_PAGE_MASK;
|
||||
end = TARGET_PAGE_SIZE << (2 * end);
|
||||
end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
|
||||
end = start + end - 1;
|
||||
|
||||
hppa_flush_tlb_range(env, start, end);
|
||||
|
||||
@@ -338,7 +338,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return page_check_range(addr, 1, want);
|
||||
#else
|
||||
int prot, excp;
|
||||
int prot, excp, mmu_idx;
|
||||
hwaddr phys;
|
||||
|
||||
trace_hppa_tlb_probe(addr, level, want);
|
||||
@@ -347,7 +347,8 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
excp = hppa_get_physical_address(env, addr, level, 0, &phys,
|
||||
mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
|
||||
excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys,
|
||||
&prot, NULL);
|
||||
if (excp >= 0) {
|
||||
if (env->psw & PSW_Q) {
|
||||
|
||||
@@ -69,19 +69,24 @@ typedef struct DisasContext {
|
||||
} DisasContext;
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define UNALIGN(C) (C)->unalign
|
||||
#define UNALIGN(C) (C)->unalign
|
||||
#define MMU_DISABLED(C) false
|
||||
#else
|
||||
#define UNALIGN(C) MO_ALIGN
|
||||
#define UNALIGN(C) MO_ALIGN
|
||||
#define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx)
|
||||
#endif
|
||||
|
||||
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
|
||||
static int expand_sm_imm(DisasContext *ctx, int val)
|
||||
{
|
||||
if (val & PSW_SM_E) {
|
||||
val = (val & ~PSW_SM_E) | PSW_E;
|
||||
}
|
||||
if (val & PSW_SM_W) {
|
||||
val = (val & ~PSW_SM_W) | PSW_W;
|
||||
/* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
|
||||
if (ctx->is_pa20) {
|
||||
if (val & PSW_SM_W) {
|
||||
val |= PSW_W;
|
||||
}
|
||||
val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
|
||||
} else {
|
||||
val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
@@ -1372,7 +1377,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
|
||||
assert(ctx->null_cond.c == TCG_COND_NEVER);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
|
||||
if (modify) {
|
||||
save_gpr(ctx, rb, ofs);
|
||||
@@ -1390,7 +1395,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
|
||||
assert(ctx->null_cond.c == TCG_COND_NEVER);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
|
||||
if (modify) {
|
||||
save_gpr(ctx, rb, ofs);
|
||||
@@ -1408,7 +1413,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
|
||||
assert(ctx->null_cond.c == TCG_COND_NEVER);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
|
||||
if (modify) {
|
||||
save_gpr(ctx, rb, ofs);
|
||||
@@ -1426,7 +1431,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
|
||||
assert(ctx->null_cond.c == TCG_COND_NEVER);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
|
||||
if (modify) {
|
||||
save_gpr(ctx, rb, ofs);
|
||||
@@ -2294,7 +2299,7 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a)
|
||||
form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
|
||||
|
||||
if (a->imm) {
|
||||
level = tcg_constant_i32(a->ri);
|
||||
level = tcg_constant_i32(a->ri & 3);
|
||||
} else {
|
||||
level = tcg_temp_new_i32();
|
||||
tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
|
||||
@@ -3075,7 +3080,7 @@ static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
|
||||
}
|
||||
|
||||
form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
|
||||
a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
a->disp, a->sp, a->m, MMU_DISABLED(ctx));
|
||||
|
||||
/*
|
||||
* For hppa1.1, LDCW is undefined unless aligned mod 16.
|
||||
@@ -3105,7 +3110,7 @@ static bool trans_stby(DisasContext *ctx, arg_stby *a)
|
||||
nullify_over(ctx);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
val = load_gpr(ctx, a->r);
|
||||
if (a->a) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
|
||||
@@ -3139,7 +3144,7 @@ static bool trans_stdby(DisasContext *ctx, arg_stby *a)
|
||||
nullify_over(ctx);
|
||||
|
||||
form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
|
||||
ctx->mmu_idx == MMU_PHYS_IDX);
|
||||
MMU_DISABLED(ctx));
|
||||
val = load_gpr(ctx, a->r);
|
||||
if (a->a) {
|
||||
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
|
||||
@@ -3167,7 +3172,7 @@ static bool trans_lda(DisasContext *ctx, arg_ldst *a)
|
||||
int hold_mmu_idx = ctx->mmu_idx;
|
||||
|
||||
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
|
||||
ctx->mmu_idx = MMU_PHYS_IDX;
|
||||
ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
|
||||
trans_ld(ctx, a);
|
||||
ctx->mmu_idx = hold_mmu_idx;
|
||||
return true;
|
||||
@@ -3178,7 +3183,7 @@ static bool trans_sta(DisasContext *ctx, arg_ldst *a)
|
||||
int hold_mmu_idx = ctx->mmu_idx;
|
||||
|
||||
CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
|
||||
ctx->mmu_idx = MMU_PHYS_IDX;
|
||||
ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
|
||||
trans_st(ctx, a);
|
||||
ctx->mmu_idx = hold_mmu_idx;
|
||||
return true;
|
||||
@@ -4430,7 +4435,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
|
||||
ctx->mmu_idx = (ctx->tb_flags & PSW_D
|
||||
? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
|
||||
: MMU_PHYS_IDX);
|
||||
: ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
|
||||
|
||||
/* Recover the IAOQ values from the GVA + PRIV. */
|
||||
uint64_t cs_base = ctx->base.tb->cs_base;
|
||||
|
||||
@@ -20,6 +20,15 @@
|
||||
#ifndef I386_CPU_INTERNAL_H
|
||||
#define I386_CPU_INTERNAL_H
|
||||
|
||||
typedef struct FeatureMask {
|
||||
FeatureWord index;
|
||||
uint64_t mask;
|
||||
} FeatureMask;
|
||||
|
||||
typedef struct FeatureDep {
|
||||
FeatureMask from, to;
|
||||
} FeatureDep;
|
||||
|
||||
typedef enum FeatureWordType {
|
||||
CPUID_FEATURE_WORD,
|
||||
MSR_FEATURE_WORD,
|
||||
|
||||
@@ -1442,15 +1442,6 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
|
||||
},
|
||||
};
|
||||
|
||||
typedef struct FeatureMask {
|
||||
FeatureWord index;
|
||||
uint64_t mask;
|
||||
} FeatureMask;
|
||||
|
||||
typedef struct FeatureDep {
|
||||
FeatureMask from, to;
|
||||
} FeatureDep;
|
||||
|
||||
static FeatureDep feature_dependencies[] = {
|
||||
{
|
||||
.from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
|
||||
@@ -1575,9 +1566,6 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
|
||||
};
|
||||
#undef REGISTER
|
||||
|
||||
/* CPUID feature bits available in XSS */
|
||||
#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
|
||||
|
||||
ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
|
||||
[XSTATE_FP_BIT] = {
|
||||
/* x87 FP state component is always enabled if XSAVE is supported */
|
||||
|
||||
@@ -588,6 +588,9 @@ typedef enum X86Seg {
|
||||
XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
|
||||
XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
|
||||
|
||||
/* CPUID feature bits available in XSS */
|
||||
#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
|
||||
|
||||
/* CPUID feature words */
|
||||
typedef enum FeatureWord {
|
||||
FEAT_1_EDX, /* CPUID[1].EDX */
|
||||
@@ -780,6 +783,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
||||
|
||||
/* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
|
||||
#define CPUID_7_0_EBX_FSGSBASE (1U << 0)
|
||||
/* Support for TSC adjustment MSR 0x3B */
|
||||
#define CPUID_7_0_EBX_TSC_ADJUST (1U << 1)
|
||||
/* Support SGX */
|
||||
#define CPUID_7_0_EBX_SGX (1U << 2)
|
||||
/* 1st Group of Advanced Bit Manipulation Extensions */
|
||||
@@ -798,8 +803,12 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
||||
#define CPUID_7_0_EBX_INVPCID (1U << 10)
|
||||
/* Restricted Transactional Memory */
|
||||
#define CPUID_7_0_EBX_RTM (1U << 11)
|
||||
/* Cache QoS Monitoring */
|
||||
#define CPUID_7_0_EBX_PQM (1U << 12)
|
||||
/* Memory Protection Extension */
|
||||
#define CPUID_7_0_EBX_MPX (1U << 14)
|
||||
/* Resource Director Technology Allocation */
|
||||
#define CPUID_7_0_EBX_RDT_A (1U << 15)
|
||||
/* AVX-512 Foundation */
|
||||
#define CPUID_7_0_EBX_AVX512F (1U << 16)
|
||||
/* AVX-512 Doubleword & Quadword Instruction */
|
||||
@@ -855,12 +864,20 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
||||
#define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
|
||||
/* Support for VPOPCNT[B,W] and VPSHUFBITQMB */
|
||||
#define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
|
||||
/* Intel Total Memory Encryption */
|
||||
#define CPUID_7_0_ECX_TME (1U << 13)
|
||||
/* POPCNT for vectors of DW/QW */
|
||||
#define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14)
|
||||
/* Placeholder for bit 15 */
|
||||
#define CPUID_7_0_ECX_FZM (1U << 15)
|
||||
/* 5-level Page Tables */
|
||||
#define CPUID_7_0_ECX_LA57 (1U << 16)
|
||||
/* MAWAU for MPX */
|
||||
#define CPUID_7_0_ECX_MAWAU (31U << 17)
|
||||
/* Read Processor ID */
|
||||
#define CPUID_7_0_ECX_RDPID (1U << 22)
|
||||
/* KeyLocker */
|
||||
#define CPUID_7_0_ECX_KeyLocker (1U << 23)
|
||||
/* Bus Lock Debug Exception */
|
||||
#define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
|
||||
/* Cache Line Demote Instruction */
|
||||
@@ -869,6 +886,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
||||
#define CPUID_7_0_ECX_MOVDIRI (1U << 27)
|
||||
/* Move 64 Bytes as Direct Store Instruction */
|
||||
#define CPUID_7_0_ECX_MOVDIR64B (1U << 28)
|
||||
/* ENQCMD and ENQCMDS instructions */
|
||||
#define CPUID_7_0_ECX_ENQCMD (1U << 29)
|
||||
/* Support SGX Launch Control */
|
||||
#define CPUID_7_0_ECX_SGX_LC (1U << 30)
|
||||
/* Protection Keys for Supervisor-mode Pages */
|
||||
@@ -886,6 +905,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
||||
#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
|
||||
/* TSX Suspend Load Address Tracking instruction */
|
||||
#define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
|
||||
/* PCONFIG instruction */
|
||||
#define CPUID_7_0_EDX_PCONFIG (1U << 18)
|
||||
/* Architectural LBRs */
|
||||
#define CPUID_7_0_EDX_ARCH_LBR (1U << 19)
|
||||
/* AMX_BF16 instruction */
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "hw/boards.h"
|
||||
|
||||
#include "tdx.h"
|
||||
#include "kvm_i386.h"
|
||||
#include "hw/core/accel-cpu.h"
|
||||
|
||||
@@ -60,6 +61,10 @@ static bool lmce_supported(void)
|
||||
if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_tdx_vm())
|
||||
return false;
|
||||
|
||||
return !!(mce_cap & MCG_LMCE_P);
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "sysemu/runstate.h"
|
||||
#include "kvm_i386.h"
|
||||
#include "sev.h"
|
||||
#include "tdx.h"
|
||||
#include "xen-emu.h"
|
||||
#include "hyperv.h"
|
||||
#include "hyperv-proto.h"
|
||||
@@ -61,6 +62,7 @@
|
||||
#include "migration/blocker.h"
|
||||
#include "exec/memattrs.h"
|
||||
#include "trace.h"
|
||||
#include "tdx.h"
|
||||
|
||||
#include CONFIG_DEVICES
|
||||
|
||||
@@ -161,6 +163,36 @@ static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES];
|
||||
static RateLimit bus_lock_ratelimit_ctrl;
|
||||
static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
|
||||
|
||||
static const char* vm_type_name[] = {
|
||||
[KVM_X86_DEFAULT_VM] = "default",
|
||||
[KVM_X86_SW_PROTECTED_VM] = "sw-protected-vm",
|
||||
[KVM_X86_TDX_VM] = "tdx",
|
||||
};
|
||||
|
||||
int kvm_get_vm_type(MachineState *ms, const char *vm_type)
|
||||
{
|
||||
int kvm_type = KVM_X86_DEFAULT_VM;
|
||||
|
||||
if (ms->cgs && object_dynamic_cast(OBJECT(ms->cgs), TYPE_TDX_GUEST)) {
|
||||
kvm_type = KVM_X86_TDX_VM;
|
||||
}
|
||||
|
||||
/*
|
||||
* old KVM doesn't support KVM_CAP_VM_TYPES and KVM_X86_DEFAULT_VM
|
||||
* is always supported
|
||||
*/
|
||||
if (kvm_type == KVM_X86_DEFAULT_VM) {
|
||||
return kvm_type;
|
||||
}
|
||||
|
||||
if (!(kvm_check_extension(KVM_STATE(ms->accelerator), KVM_CAP_VM_TYPES) & BIT(kvm_type))) {
|
||||
error_report("vm-type %s not supported by KVM", vm_type_name[kvm_type]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return kvm_type;
|
||||
}
|
||||
|
||||
bool kvm_has_smm(void)
|
||||
{
|
||||
return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM);
|
||||
@@ -247,7 +279,7 @@ void kvm_synchronize_all_tsc(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
if (kvm_enabled()) {
|
||||
if (kvm_enabled() && !is_tdx_vm()) {
|
||||
CPU_FOREACH(cpu) {
|
||||
run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
|
||||
}
|
||||
@@ -490,6 +522,10 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
|
||||
ret |= 1U << KVM_HINTS_REALTIME;
|
||||
}
|
||||
|
||||
if (is_tdx_vm()) {
|
||||
tdx_get_supported_cpuid(function, index, reg, &ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -759,6 +795,15 @@ static int kvm_arch_set_tsc_khz(CPUState *cs)
|
||||
int r, cur_freq;
|
||||
bool set_ioctl = false;
|
||||
|
||||
/*
|
||||
* TSC of TD vcpu is immutable, it cannot be set/changed via vcpu scope
|
||||
* VM_SET_TSC_KHZ, but only be initialized via VM scope VM_SET_TSC_KHZ
|
||||
* before ioctl KVM_TDX_INIT_VM in tdx_pre_create_vcpu()
|
||||
*/
|
||||
if (is_tdx_vm()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!env->tsc_khz) {
|
||||
return 0;
|
||||
}
|
||||
@@ -1655,8 +1700,6 @@ static int hyperv_init_vcpu(X86CPU *cpu)
|
||||
|
||||
static Error *invtsc_mig_blocker;
|
||||
|
||||
#define KVM_MAX_CPUID_ENTRIES 100
|
||||
|
||||
static void kvm_init_xsave(CPUX86State *env)
|
||||
{
|
||||
if (has_xsave2) {
|
||||
@@ -1699,6 +1742,236 @@ static void kvm_init_nested_state(CPUX86State *env)
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t kvm_x86_arch_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries,
|
||||
uint32_t cpuid_i)
|
||||
{
|
||||
uint32_t limit, i, j;
|
||||
uint32_t unused;
|
||||
struct kvm_cpuid_entry2 *c;
|
||||
|
||||
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
|
||||
|
||||
for (i = 0; i <= limit; i++) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "unsupported level value: 0x%x\n", limit);
|
||||
abort();
|
||||
}
|
||||
c = &entries[cpuid_i++];
|
||||
|
||||
switch (i) {
|
||||
case 2: {
|
||||
/* Keep reading function 2 till all the input is received */
|
||||
int times;
|
||||
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
|
||||
KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
times = c->eax & 0xff;
|
||||
|
||||
for (j = 1; j < times; ++j) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:2):eax & 0xf = 0x%x\n", times);
|
||||
abort();
|
||||
}
|
||||
c = &entries[cpuid_i++];
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0x1f:
|
||||
if (env->nr_dies < 2) {
|
||||
cpuid_i--;
|
||||
break;
|
||||
}
|
||||
/* fallthrough */
|
||||
case 4:
|
||||
case 0xb:
|
||||
case 0xd:
|
||||
for (j = 0; ; j++) {
|
||||
if (i == 0xd && j == 64) {
|
||||
break;
|
||||
}
|
||||
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
c->index = j;
|
||||
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
|
||||
if (i == 4 && c->eax == 0) {
|
||||
break;
|
||||
}
|
||||
if (i == 0xb && !(c->ecx & 0xff00)) {
|
||||
break;
|
||||
}
|
||||
if (i == 0x1f && !(c->ecx & 0xff00)) {
|
||||
break;
|
||||
}
|
||||
if (i == 0xd && c->eax == 0) {
|
||||
continue;
|
||||
}
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
||||
abort();
|
||||
}
|
||||
c = &entries[cpuid_i++];
|
||||
}
|
||||
break;
|
||||
case 0x7:
|
||||
case 0x12:
|
||||
for (j = 0; ; j++) {
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
c->index = j;
|
||||
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
|
||||
if (j > 1 && (c->eax & 0xf) != 1) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:0x12,ecx:0x%x)\n", j);
|
||||
abort();
|
||||
}
|
||||
c = &entries[cpuid_i++];
|
||||
}
|
||||
break;
|
||||
case 0x14:
|
||||
case 0x1d:
|
||||
case 0x1e: {
|
||||
uint32_t times;
|
||||
|
||||
c->function = i;
|
||||
c->index = 0;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
times = c->eax;
|
||||
|
||||
for (j = 1; j <= times; ++j) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
||||
abort();
|
||||
}
|
||||
c = &entries[cpuid_i++];
|
||||
c->function = i;
|
||||
c->index = j;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
c->function = i;
|
||||
c->flags = 0;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
|
||||
/*
|
||||
* KVM already returns all zeroes if a CPUID entry is missing,
|
||||
* so we can omit it and avoid hitting KVM's 80-entry limit.
|
||||
*/
|
||||
cpuid_i--;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (limit >= 0x0a) {
|
||||
uint32_t eax, edx;
|
||||
|
||||
cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
|
||||
|
||||
has_architectural_pmu_version = eax & 0xff;
|
||||
if (has_architectural_pmu_version > 0) {
|
||||
num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
|
||||
|
||||
/* Shouldn't be more than 32, since that's the number of bits
|
||||
* available in EBX to tell us _which_ counters are available.
|
||||
* Play it safe.
|
||||
*/
|
||||
if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
|
||||
num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
|
||||
}
|
||||
|
||||
if (has_architectural_pmu_version > 1) {
|
||||
num_architectural_pmu_fixed_counters = edx & 0x1f;
|
||||
|
||||
if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
|
||||
num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
|
||||
|
||||
for (i = 0x80000000; i <= limit; i++) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
|
||||
abort();
|
||||
}
|
||||
c = &entries[cpuid_i++];
|
||||
|
||||
switch (i) {
|
||||
case 0x8000001d:
|
||||
/* Query for all AMD cache information leaves */
|
||||
for (j = 0; ; j++) {
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
c->index = j;
|
||||
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
|
||||
if (c->eax == 0) {
|
||||
break;
|
||||
}
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
||||
abort();
|
||||
}
|
||||
c = &entries[cpuid_i++];
|
||||
}
|
||||
break;
|
||||
default:
|
||||
c->function = i;
|
||||
c->flags = 0;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
|
||||
/*
|
||||
* KVM already returns all zeroes if a CPUID entry is missing,
|
||||
* so we can omit it and avoid hitting KVM's 80-entry limit.
|
||||
*/
|
||||
cpuid_i--;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Call Centaur's CPUID instructions they are supported. */
|
||||
if (env->cpuid_xlevel2 > 0) {
|
||||
cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
|
||||
|
||||
for (i = 0xC0000000; i <= limit; i++) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
|
||||
abort();
|
||||
}
|
||||
c = &entries[cpuid_i++];
|
||||
|
||||
c->function = i;
|
||||
c->flags = 0;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
}
|
||||
}
|
||||
|
||||
return cpuid_i;
|
||||
}
|
||||
|
||||
int kvm_arch_init_vcpu(CPUState *cs)
|
||||
{
|
||||
struct {
|
||||
@@ -1715,8 +1988,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
uint32_t limit, i, j, cpuid_i;
|
||||
uint32_t unused;
|
||||
uint32_t cpuid_i;
|
||||
struct kvm_cpuid_entry2 *c;
|
||||
uint32_t signature[3];
|
||||
int kvm_base = KVM_CPUID_SIGNATURE;
|
||||
@@ -1869,8 +2141,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
c->edx = env->features[FEAT_KVM_HINTS];
|
||||
}
|
||||
|
||||
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
|
||||
|
||||
if (cpu->kvm_pv_enforce_cpuid) {
|
||||
r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
|
||||
if (r < 0) {
|
||||
@@ -1881,227 +2151,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i <= limit; i++) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "unsupported level value: 0x%x\n", limit);
|
||||
abort();
|
||||
}
|
||||
c = &cpuid_data.entries[cpuid_i++];
|
||||
|
||||
switch (i) {
|
||||
case 2: {
|
||||
/* Keep reading function 2 till all the input is received */
|
||||
int times;
|
||||
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
|
||||
KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
times = c->eax & 0xff;
|
||||
|
||||
for (j = 1; j < times; ++j) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:2):eax & 0xf = 0x%x\n", times);
|
||||
abort();
|
||||
}
|
||||
c = &cpuid_data.entries[cpuid_i++];
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0x1f:
|
||||
if (env->nr_dies < 2) {
|
||||
break;
|
||||
}
|
||||
/* fallthrough */
|
||||
case 4:
|
||||
case 0xb:
|
||||
case 0xd:
|
||||
for (j = 0; ; j++) {
|
||||
if (i == 0xd && j == 64) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == 0x1f && j == 64) {
|
||||
break;
|
||||
}
|
||||
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
c->index = j;
|
||||
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
|
||||
if (i == 4 && c->eax == 0) {
|
||||
break;
|
||||
}
|
||||
if (i == 0xb && !(c->ecx & 0xff00)) {
|
||||
break;
|
||||
}
|
||||
if (i == 0x1f && !(c->ecx & 0xff00)) {
|
||||
break;
|
||||
}
|
||||
if (i == 0xd && c->eax == 0) {
|
||||
continue;
|
||||
}
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
||||
abort();
|
||||
}
|
||||
c = &cpuid_data.entries[cpuid_i++];
|
||||
}
|
||||
break;
|
||||
case 0x7:
|
||||
case 0x12:
|
||||
for (j = 0; ; j++) {
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
c->index = j;
|
||||
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
|
||||
if (j > 1 && (c->eax & 0xf) != 1) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:0x12,ecx:0x%x)\n", j);
|
||||
abort();
|
||||
}
|
||||
c = &cpuid_data.entries[cpuid_i++];
|
||||
}
|
||||
break;
|
||||
case 0x14:
|
||||
case 0x1d:
|
||||
case 0x1e: {
|
||||
uint32_t times;
|
||||
|
||||
c->function = i;
|
||||
c->index = 0;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
times = c->eax;
|
||||
|
||||
for (j = 1; j <= times; ++j) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
||||
abort();
|
||||
}
|
||||
c = &cpuid_data.entries[cpuid_i++];
|
||||
c->function = i;
|
||||
c->index = j;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
c->function = i;
|
||||
c->flags = 0;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
|
||||
/*
|
||||
* KVM already returns all zeroes if a CPUID entry is missing,
|
||||
* so we can omit it and avoid hitting KVM's 80-entry limit.
|
||||
*/
|
||||
cpuid_i--;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (limit >= 0x0a) {
|
||||
uint32_t eax, edx;
|
||||
|
||||
cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
|
||||
|
||||
has_architectural_pmu_version = eax & 0xff;
|
||||
if (has_architectural_pmu_version > 0) {
|
||||
num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
|
||||
|
||||
/* Shouldn't be more than 32, since that's the number of bits
|
||||
* available in EBX to tell us _which_ counters are available.
|
||||
* Play it safe.
|
||||
*/
|
||||
if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
|
||||
num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
|
||||
}
|
||||
|
||||
if (has_architectural_pmu_version > 1) {
|
||||
num_architectural_pmu_fixed_counters = edx & 0x1f;
|
||||
|
||||
if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
|
||||
num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
|
||||
|
||||
for (i = 0x80000000; i <= limit; i++) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
|
||||
abort();
|
||||
}
|
||||
c = &cpuid_data.entries[cpuid_i++];
|
||||
|
||||
switch (i) {
|
||||
case 0x8000001d:
|
||||
/* Query for all AMD cache information leaves */
|
||||
for (j = 0; ; j++) {
|
||||
c->function = i;
|
||||
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
c->index = j;
|
||||
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
|
||||
if (c->eax == 0) {
|
||||
break;
|
||||
}
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "cpuid_data is full, no space for "
|
||||
"cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
|
||||
abort();
|
||||
}
|
||||
c = &cpuid_data.entries[cpuid_i++];
|
||||
}
|
||||
break;
|
||||
default:
|
||||
c->function = i;
|
||||
c->flags = 0;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
|
||||
/*
|
||||
* KVM already returns all zeroes if a CPUID entry is missing,
|
||||
* so we can omit it and avoid hitting KVM's 80-entry limit.
|
||||
*/
|
||||
cpuid_i--;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Call Centaur's CPUID instructions they are supported. */
|
||||
if (env->cpuid_xlevel2 > 0) {
|
||||
cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
|
||||
|
||||
for (i = 0xC0000000; i <= limit; i++) {
|
||||
if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
|
||||
fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
|
||||
abort();
|
||||
}
|
||||
c = &cpuid_data.entries[cpuid_i++];
|
||||
|
||||
c->function = i;
|
||||
c->flags = 0;
|
||||
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
|
||||
}
|
||||
}
|
||||
|
||||
cpuid_i = kvm_x86_arch_cpuid(env, cpuid_data.entries, cpuid_i);
|
||||
cpuid_data.cpuid.nent = cpuid_i;
|
||||
|
||||
if (((env->cpuid_version >> 8)&0xF) >= 6
|
||||
@@ -2227,6 +2277,15 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
|
||||
{
|
||||
if (is_tdx_vm()) {
|
||||
return tdx_pre_create_vcpu(cpu, errp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_destroy_vcpu(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
@@ -2514,6 +2573,17 @@ int kvm_arch_get_default_type(MachineState *ms)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_confidential_guest_init(MachineState *ms, Error **errp)
|
||||
{
|
||||
if (object_dynamic_cast(OBJECT(ms->cgs), TYPE_SEV_GUEST)) {
|
||||
return sev_kvm_init(ms->cgs, errp);
|
||||
} else if (object_dynamic_cast(OBJECT(ms->cgs), TYPE_TDX_GUEST)) {
|
||||
return tdx_kvm_init(ms, errp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_init(MachineState *ms, KVMState *s)
|
||||
{
|
||||
uint64_t identity_base = 0xfffbc000;
|
||||
@@ -2523,18 +2593,12 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
|
||||
Error *local_err = NULL;
|
||||
|
||||
/*
|
||||
* Initialize SEV context, if required
|
||||
* Initialize confidential guest (SEV/TDX) context, if required
|
||||
*
|
||||
* If no memory encryption is requested (ms->cgs == NULL) this is
|
||||
* a no-op.
|
||||
*
|
||||
* It's also a no-op if a non-SEV confidential guest support
|
||||
* mechanism is selected. SEV is the only mechanism available to
|
||||
* select on x86 at present, so this doesn't arise, but if new
|
||||
* mechanisms are supported in future (e.g. TDX), they'll need
|
||||
* their own initialization either here or elsewhere.
|
||||
* It's a no-op if a non-SEV/non-tdx confidential guest support
|
||||
* mechanism is selected, i.e., ms->cgs == NULL
|
||||
*/
|
||||
ret = sev_kvm_init(ms->cgs, &local_err);
|
||||
ret = kvm_confidential_guest_init(ms, &local_err);
|
||||
if (ret < 0) {
|
||||
error_report_err(local_err);
|
||||
return ret;
|
||||
@@ -2997,6 +3061,11 @@ void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* TODO: Allow accessing guest state for debug TDs. */
|
||||
if (is_tdx_vm()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
|
||||
assert(ret == 1);
|
||||
}
|
||||
@@ -3215,32 +3284,34 @@ static void kvm_init_msrs(X86CPU *cpu)
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
kvm_msr_buf_reset(cpu);
|
||||
if (has_msr_arch_capabs) {
|
||||
kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
|
||||
env->features[FEAT_ARCH_CAPABILITIES]);
|
||||
}
|
||||
|
||||
if (has_msr_core_capabs) {
|
||||
kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
|
||||
env->features[FEAT_CORE_CAPABILITY]);
|
||||
}
|
||||
if (!is_tdx_vm()) {
|
||||
if (has_msr_arch_capabs) {
|
||||
kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
|
||||
env->features[FEAT_ARCH_CAPABILITIES]);
|
||||
}
|
||||
|
||||
if (has_msr_perf_capabs && cpu->enable_pmu) {
|
||||
kvm_msr_entry_add_perf(cpu, env->features);
|
||||
if (has_msr_core_capabs) {
|
||||
kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
|
||||
env->features[FEAT_CORE_CAPABILITY]);
|
||||
}
|
||||
|
||||
if (has_msr_perf_capabs && cpu->enable_pmu) {
|
||||
kvm_msr_entry_add_perf(cpu, env->features);
|
||||
}
|
||||
|
||||
/*
|
||||
* Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
|
||||
* all kernels with MSR features should have them.
|
||||
*/
|
||||
if (kvm_feature_msrs && cpu_has_vmx(env)) {
|
||||
kvm_msr_entry_add_vmx(cpu, env->features);
|
||||
}
|
||||
}
|
||||
|
||||
if (has_msr_ucode_rev) {
|
||||
kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
|
||||
* all kernels with MSR features should have them.
|
||||
*/
|
||||
if (kvm_feature_msrs && cpu_has_vmx(env)) {
|
||||
kvm_msr_entry_add_vmx(cpu, env->features);
|
||||
}
|
||||
|
||||
assert(kvm_buf_set_msrs(cpu) == 0);
|
||||
}
|
||||
|
||||
@@ -4550,6 +4621,11 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
|
||||
|
||||
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
|
||||
|
||||
/* TODO: Allow accessing guest state for debug TDs. */
|
||||
if (is_tdx_vm()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
|
||||
* root operation upon vCPU reset. kvm_put_msr_feature_control() should also
|
||||
@@ -4650,6 +4726,12 @@ int kvm_arch_get_registers(CPUState *cs)
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* TODO: Allow accessing guest state for debug TDs. */
|
||||
if (is_tdx_vm()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = kvm_getput_regs(cpu, 0);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
@@ -5350,6 +5432,15 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
||||
ret = kvm_xen_handle_exit(cpu, &run->xen);
|
||||
break;
|
||||
#endif
|
||||
case KVM_EXIT_TDX:
|
||||
if (!is_tdx_vm()) {
|
||||
error_report("KVM: get KVM_EXIT_TDX for a non-TDX VM.");
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
tdx_handle_exit(cpu, &run->tdx);
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
|
||||
ret = -1;
|
||||
@@ -5602,7 +5693,7 @@ bool kvm_has_waitpkg(void)
|
||||
|
||||
bool kvm_arch_cpu_check_are_resettable(void)
|
||||
{
|
||||
return !sev_es_enabled();
|
||||
return !sev_es_enabled() && !is_tdx_vm();
|
||||
}
|
||||
|
||||
#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
|
||||
#include "sysemu/kvm.h"
|
||||
|
||||
#define KVM_MAX_CPUID_ENTRIES 100
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
|
||||
#define kvm_pit_in_kernel() \
|
||||
@@ -22,6 +24,9 @@
|
||||
#define kvm_ioapic_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
|
||||
uint32_t kvm_x86_arch_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries,
|
||||
uint32_t cpuid_i);
|
||||
|
||||
#else
|
||||
|
||||
#define kvm_pit_in_kernel() 0
|
||||
@@ -37,6 +42,7 @@ bool kvm_hv_vpindex_settable(void);
|
||||
bool kvm_enable_sgx_provisioning(KVMState *s);
|
||||
bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp);
|
||||
|
||||
int kvm_get_vm_type(MachineState *ms, const char *vm_type);
|
||||
void kvm_arch_reset_vcpu(X86CPU *cs);
|
||||
void kvm_arch_after_reset_vcpu(X86CPU *cpu);
|
||||
void kvm_arch_do_init_vcpu(X86CPU *cs);
|
||||
|
||||
@@ -9,6 +9,8 @@ i386_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files('xen-emu.c'))
|
||||
|
||||
i386_kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c'))
|
||||
|
||||
i386_kvm_ss.add(when: 'CONFIG_TDX', if_true: files('tdx.c'), if_false: files('tdx-stub.c'))
|
||||
|
||||
i386_system_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c'))
|
||||
|
||||
i386_system_ss.add_all(when: 'CONFIG_KVM', if_true: i386_kvm_ss)
|
||||
|
||||
23
target/i386/kvm/tdx-stub.c
Normal file
23
target/i386/kvm/tdx-stub.c
Normal file
@@ -0,0 +1,23 @@
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include "tdx.h"
|
||||
|
||||
int tdx_kvm_init(MachineState *ms, Error **errp)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int tdx_pre_create_vcpu(CPUState *cpu, Error **errp)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int tdx_parse_tdvf(void *flash_ptr, int size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void tdx_handle_exit(X86CPU *cpu, struct kvm_tdx_exit *tdx_exit)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
1612
target/i386/kvm/tdx.c
Normal file
1612
target/i386/kvm/tdx.c
Normal file
File diff suppressed because it is too large
Load Diff
72
target/i386/kvm/tdx.h
Normal file
72
target/i386/kvm/tdx.h
Normal file
@@ -0,0 +1,72 @@
|
||||
#ifndef QEMU_I386_TDX_H
|
||||
#define QEMU_I386_TDX_H
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include CONFIG_DEVICES /* CONFIG_TDX */
|
||||
#endif
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include "exec/confidential-guest-support.h"
|
||||
#include "hw/i386/tdvf.h"
|
||||
#include "io/channel-socket.h"
|
||||
#include "sysemu/kvm.h"
|
||||
|
||||
#define TYPE_TDX_GUEST "tdx-guest"
|
||||
#define TDX_GUEST(obj) OBJECT_CHECK(TdxGuest, (obj), TYPE_TDX_GUEST)
|
||||
|
||||
typedef struct TdxGuestClass {
|
||||
ConfidentialGuestSupportClass parent_class;
|
||||
} TdxGuestClass;
|
||||
|
||||
enum TdxRamType{
|
||||
TDX_RAM_UNACCEPTED,
|
||||
TDX_RAM_ADDED,
|
||||
};
|
||||
|
||||
typedef struct TdxRamEntry {
|
||||
uint64_t address;
|
||||
uint64_t length;
|
||||
enum TdxRamType type;
|
||||
} TdxRamEntry;
|
||||
|
||||
typedef struct TdxGuest {
|
||||
ConfidentialGuestSupport parent_obj;
|
||||
|
||||
QemuMutex lock;
|
||||
|
||||
bool initialized;
|
||||
uint64_t attributes; /* TD attributes */
|
||||
char *mrconfigid; /* base64 encoded sha348 digest */
|
||||
char *mrowner; /* base64 encoded sha348 digest */
|
||||
char *mrownerconfig; /* base64 encoded sha348 digest */
|
||||
|
||||
TdxFirmware tdvf;
|
||||
MemoryRegion *tdvf_region;
|
||||
|
||||
uint32_t nr_ram_entries;
|
||||
TdxRamEntry *ram_entries;
|
||||
|
||||
/* runtime state */
|
||||
int event_notify_interrupt;
|
||||
uint32_t event_notify_apic_id;
|
||||
|
||||
/* GetQuote */
|
||||
int quote_generation_num;
|
||||
SocketAddress *quote_generation;
|
||||
} TdxGuest;
|
||||
|
||||
#ifdef CONFIG_TDX
|
||||
bool is_tdx_vm(void);
|
||||
#else
|
||||
#define is_tdx_vm() 0
|
||||
#endif /* CONFIG_TDX */
|
||||
|
||||
int tdx_kvm_init(MachineState *ms, Error **errp);
|
||||
void tdx_get_supported_cpuid(uint32_t function, uint32_t index, int reg,
|
||||
uint32_t *ret);
|
||||
int tdx_pre_create_vcpu(CPUState *cpu, Error **errp);
|
||||
void tdx_set_tdvf_region(MemoryRegion *tdvf_region);
|
||||
int tdx_parse_tdvf(void *flash_ptr, int size);
|
||||
void tdx_handle_exit(X86CPU *cpu, struct kvm_tdx_exit *tdx_exit);
|
||||
|
||||
#endif /* QEMU_I386_TDX_H */
|
||||
@@ -39,7 +39,6 @@
|
||||
#include "hw/i386/pc.h"
|
||||
#include "exec/address-spaces.h"
|
||||
|
||||
#define TYPE_SEV_GUEST "sev-guest"
|
||||
OBJECT_DECLARE_SIMPLE_TYPE(SevGuestState, SEV_GUEST)
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
|
||||
#include "exec/confidential-guest-support.h"
|
||||
|
||||
#define TYPE_SEV_GUEST "sev-guest"
|
||||
|
||||
#define SEV_POLICY_NODBG 0x1
|
||||
#define SEV_POLICY_NOKS 0x2
|
||||
#define SEV_POLICY_ES 0x4
|
||||
|
||||
@@ -433,6 +433,22 @@ static int arch_sections_write(DumpState *s, uint8_t *buff)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void arch_cleanup(DumpState *s)
|
||||
{
|
||||
g_autofree uint8_t *buff = NULL;
|
||||
int rc;
|
||||
|
||||
if (!pv_dump_initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
buff = g_malloc(kvm_s390_pv_dmp_get_size_completion_data());
|
||||
rc = kvm_s390_dump_completion_data(buff);
|
||||
if (!rc) {
|
||||
pv_dump_initialized = false;
|
||||
}
|
||||
}
|
||||
|
||||
int cpu_get_dump_info(ArchDumpInfo *info,
|
||||
const struct GuestPhysBlockList *guest_phys_blocks)
|
||||
{
|
||||
@@ -448,10 +464,7 @@ int cpu_get_dump_info(ArchDumpInfo *info,
|
||||
info->arch_sections_add_fn = *arch_sections_add;
|
||||
info->arch_sections_write_hdr_fn = *arch_sections_write_hdr;
|
||||
info->arch_sections_write_fn = *arch_sections_write;
|
||||
} else {
|
||||
info->arch_sections_add_fn = NULL;
|
||||
info->arch_sections_write_hdr_fn = NULL;
|
||||
info->arch_sections_write_fn = NULL;
|
||||
info->arch_cleanup_fn = *arch_cleanup;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -36,8 +36,8 @@ class S390CCWVirtioMachine(QemuSystemTest):
|
||||
dmesg_clear_count = 1
|
||||
def clear_guest_dmesg(self):
|
||||
exec_command_and_wait_for_pattern(self, 'dmesg -c > /dev/null; '
|
||||
'echo dm_clear\ ' + str(self.dmesg_clear_count),
|
||||
'dm_clear ' + str(self.dmesg_clear_count))
|
||||
r'echo dm_clear\ ' + str(self.dmesg_clear_count),
|
||||
r'dm_clear ' + str(self.dmesg_clear_count))
|
||||
self.dmesg_clear_count += 1
|
||||
|
||||
def test_s390x_devices(self):
|
||||
@@ -121,15 +121,15 @@ class S390CCWVirtioMachine(QemuSystemTest):
|
||||
'cat /sys/bus/ccw/devices/0.1.1111/cutype',
|
||||
'3832/01')
|
||||
exec_command_and_wait_for_pattern(self,
|
||||
'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor',
|
||||
'0x1af4')
|
||||
r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor',
|
||||
r'0x1af4')
|
||||
exec_command_and_wait_for_pattern(self,
|
||||
'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device',
|
||||
'0x0001')
|
||||
r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device',
|
||||
r'0x0001')
|
||||
# check fid propagation
|
||||
exec_command_and_wait_for_pattern(self,
|
||||
'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id',
|
||||
'0x0000000c')
|
||||
r'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id',
|
||||
r'0x0000000c')
|
||||
# add another device
|
||||
self.clear_guest_dmesg()
|
||||
self.vm.cmd('device_add', driver='virtio-net-ccw',
|
||||
@@ -235,7 +235,7 @@ class S390CCWVirtioMachine(QemuSystemTest):
|
||||
'while ! (dmesg | grep gpudrmfb) ; do sleep 1 ; done',
|
||||
'virtio_gpudrmfb frame buffer device')
|
||||
exec_command_and_wait_for_pattern(self,
|
||||
'echo -e "\e[?25l" > /dev/tty0', ':/#')
|
||||
r'echo -e "\e[?25l" > /dev/tty0', ':/#')
|
||||
exec_command_and_wait_for_pattern(self, 'for ((i=0;i<250;i++)); do '
|
||||
'echo " The qu ick fo x j ump s o ver a laz y d og" >> fox.txt;'
|
||||
'done',
|
||||
|
||||
356
tests/avocado/mem-addr-space-check.py
Normal file
356
tests/avocado/mem-addr-space-check.py
Normal file
@@ -0,0 +1,356 @@
|
||||
# Check for crash when using memory beyond the available guest processor
|
||||
# address space.
|
||||
#
|
||||
# Copyright (c) 2023 Red Hat, Inc.
|
||||
#
|
||||
# Author:
|
||||
# Ani Sinha <anisinha@redhat.com>
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
from avocado_qemu import QemuSystemTest
|
||||
import signal
|
||||
import time
|
||||
|
||||
class MemAddrCheck(QemuSystemTest):
|
||||
# after launch, in order to generate the logs from QEMU we need to
|
||||
# wait for some time. Launching and then immediately shutting down
|
||||
# the VM generates empty logs. A delay of 1 second is added for
|
||||
# this reason.
|
||||
DELAY_Q35_BOOT_SEQUENCE = 1
|
||||
|
||||
# first, lets test some 32-bit processors.
|
||||
# for all 32-bit cases, pci64_hole_size is 0.
|
||||
def test_phybits_low_pse36(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
With pse36 feature ON, a processor has 36 bits of addressing. So it can
|
||||
access up to a maximum of 64GiB of memory. Memory hotplug region begins
|
||||
at 4 GiB boundary when "above_4g_mem_size" is 0 (this would be true when
|
||||
we have 0.5 GiB of VM memory, see pc_q35_init()). This means total
|
||||
hotpluggable memory size is 60 GiB. Per slot, we reserve 1 GiB of memory
|
||||
for dimm alignment for all newer machines (see enforce_aligned_dimm
|
||||
property for pc machines and pc_get_device_memory_range()). That leaves
|
||||
total hotpluggable actual memory size of 59 GiB. If the VM is started
|
||||
with 0.5 GiB of memory, maxmem should be set to a maximum value of
|
||||
59.5 GiB to ensure that the processor can address all memory directly.
|
||||
Note that 64-bit pci hole size is 0 in this case. If maxmem is set to
|
||||
59.6G, QEMU should fail to start with a message "phy-bits are too low".
|
||||
If maxmem is set to 59.5G with all other QEMU parameters identical, QEMU
|
||||
should start fine.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.6G',
|
||||
'-cpu', 'pentium,pse36=on', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_pae(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
With pae feature ON, a processor has 36 bits of addressing. So it can
|
||||
access up to a maximum of 64GiB of memory. Rest is the same as the case
|
||||
with pse36 above.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.6G',
|
||||
'-cpu', 'pentium,pae=on', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_pentium_pse36(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Setting maxmem to 59.5G and making sure that QEMU can start with the
|
||||
same options as the failing case above with pse36 cpu feature.
|
||||
"""
|
||||
self.vm.add_args('-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.5G',
|
||||
'-cpu', 'pentium,pse36=on', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_pentium_pae(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Test is same as above but now with pae cpu feature turned on.
|
||||
Setting maxmem to 59.5G and making sure that QEMU can start fine
|
||||
with the same options as the case above.
|
||||
"""
|
||||
self.vm.add_args('-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.5G',
|
||||
'-cpu', 'pentium,pae=on', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_pentium2(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Pentium2 has 36 bits of addressing, so its same as pentium
|
||||
with pse36 ON.
|
||||
"""
|
||||
self.vm.add_args('-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=59.5G',
|
||||
'-cpu', 'pentium2', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_nonpse36(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Pentium processor has 32 bits of addressing without pse36 or pae
|
||||
so it can access physical address up to 4 GiB. Setting maxmem to
|
||||
4 GiB should make QEMU fail to start with "phys-bits too low"
|
||||
message because the region for memory hotplug is always placed
|
||||
above 4 GiB due to the PCI hole and simplicity.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'q35', '-m',
|
||||
'512,slots=1,maxmem=4G',
|
||||
'-cpu', 'pentium', '-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
# now lets test some 64-bit CPU cases.
|
||||
def test_phybits_low_tcg_q35_70_amd(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
For q35 7.1 machines and above, there is a HT window that starts at
|
||||
1024 GiB and ends at 1 TiB - 1. If the max GPA falls in this range,
|
||||
"above_4G" memory is adjusted to start at 1 TiB boundary for AMD cpus
|
||||
in the default case. Lets test without that case for machines 7.0.
|
||||
For q35-7.0 machines, "above 4G" memory starts are 4G.
|
||||
pci64_hole size is 32 GiB. Since TCG_PHYS_ADDR_BITS is defined to
|
||||
be 40, TCG emulated CPUs have maximum of 1 TiB (1024 GiB) of
|
||||
directly addressible memory.
|
||||
Hence, maxmem value at most can be
|
||||
1024 GiB - 4 GiB - 1 GiB per slot for alignment - 32 GiB + 0.5 GiB
|
||||
which is equal to 987.5 GiB. Setting the value to 988 GiB should
|
||||
make QEMU fail with the error message.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
|
||||
'512,slots=1,maxmem=988G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_tcg_q35_71_amd(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
AMD_HT_START is defined to be at 1012 GiB. So for q35 machines
|
||||
version > 7.0 and AMD cpus, instead of 1024 GiB limit for 40 bit
|
||||
processor address space, it has to be 1012 GiB , that is 12 GiB
|
||||
less than the case above in order to accomodate HT hole.
|
||||
Make sure QEMU fails when maxmem size is 976 GiB (12 GiB less
|
||||
than 988 GiB).
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=976G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_70_amd(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Same as q35-7.0 AMD case except that here we check that QEMU can
|
||||
successfully start when maxmem is < 988G.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
|
||||
'512,slots=1,maxmem=987.5G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_71_amd(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Same as q35-7.1 AMD case except that here we check that QEMU can
|
||||
successfully start when maxmem is < 976G.
|
||||
"""
|
||||
self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=975.5G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_71_intel(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Same parameters as test_phybits_low_tcg_q35_71_amd() but use
|
||||
Intel cpu instead. QEMU should start fine in this case as
|
||||
"above_4G" memory starts at 4G.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'Skylake-Server',
|
||||
'-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=976G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_tcg_q35_71_amd_41bits(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
AMD processor with 41 bits. Max cpu hw address = 2 TiB.
|
||||
By setting maxram above 1012 GiB - 32 GiB - 4 GiB = 976 GiB, we can
|
||||
force "above_4G" memory to start at 1 TiB for q35-7.1 machines
|
||||
(max GPA will be above AMD_HT_START which is defined as 1012 GiB).
|
||||
|
||||
With pci_64_hole size at 32 GiB, in this case, maxmem should be 991.5
|
||||
GiB with 1 GiB per slot for alignment and 0.5 GiB as non-hotplug
|
||||
memory for the VM (1024 - 32 - 1 + 0.5). With 992 GiB, QEMU should
|
||||
fail to start.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
|
||||
'-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=992G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_71_amd_41bits(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
AMD processor with 41 bits. Max cpu hw address = 2 TiB.
|
||||
Same as above but by setting maxram beween 976 GiB and 992 Gib,
|
||||
QEMU should start fine.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
|
||||
'-machine', 'pc-q35-7.1', '-m',
|
||||
'512,slots=1,maxmem=990G',
|
||||
'-display', 'none',
|
||||
'-object', 'memory-backend-ram,id=mem1,size=1G',
|
||||
'-device', 'pc-dimm,id=vm0,memdev=mem1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_low_tcg_q35_intel_cxl(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
cxl memory window starts after memory device range. Here, we use 1 GiB
|
||||
of cxl window memory. 4G_mem end aligns at 4G. pci64_hole is 32 GiB and
|
||||
starts after the cxl memory window.
|
||||
So maxmem here should be at most 986 GiB considering all memory boundary
|
||||
alignment constraints with 40 bits (1 TiB) of processor physical bits.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
|
||||
'-machine', 'q35,cxl=on', '-m',
|
||||
'512,slots=1,maxmem=987G',
|
||||
'-display', 'none',
|
||||
'-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1',
|
||||
'-M', 'cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=1G')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
self.vm.wait()
|
||||
self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1")
|
||||
self.assertRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
|
||||
def test_phybits_ok_tcg_q35_intel_cxl(self):
|
||||
"""
|
||||
:avocado: tags=machine:q35
|
||||
:avocado: tags=arch:x86_64
|
||||
|
||||
Same as above but here we do not reserve any cxl memory window. Hence,
|
||||
with the exact same parameters as above, QEMU should start fine even
|
||||
with cxl enabled.
|
||||
"""
|
||||
self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
|
||||
'-machine', 'q35,cxl=on', '-m',
|
||||
'512,slots=1,maxmem=987G',
|
||||
'-display', 'none',
|
||||
'-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1')
|
||||
self.vm.set_qmp_monitor(enabled=False)
|
||||
self.vm.launch()
|
||||
time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
|
||||
self.vm.shutdown()
|
||||
self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
|
||||
@@ -136,12 +136,11 @@ def test_frontend(fname):
|
||||
def open_test_result(dir_name, file_name, update):
|
||||
mode = 'r+' if update else 'r'
|
||||
try:
|
||||
fp = open(os.path.join(dir_name, file_name), mode)
|
||||
return open(os.path.join(dir_name, file_name), mode, encoding='utf-8')
|
||||
except FileNotFoundError:
|
||||
if not update:
|
||||
raise
|
||||
fp = open(os.path.join(dir_name, file_name), 'w+')
|
||||
return fp
|
||||
return open(os.path.join(dir_name, file_name), 'w+', encoding='utf-8')
|
||||
|
||||
|
||||
def test_and_diff(test_name, dir_name, update):
|
||||
@@ -218,9 +217,9 @@ def main(argv):
|
||||
test_name = os.path.splitext(base_name)[0]
|
||||
status |= test_and_diff(test_name, dir_name, args.update)
|
||||
|
||||
exit(status)
|
||||
sys.exit(status)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
exit(0)
|
||||
sys.exit(0)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# This is an example blacklist.
|
||||
# To enable use of the blacklist add this to configure:
|
||||
# "--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/blacklist.tsan"
|
||||
# This is an example ignore list.
|
||||
# To enable use of the ignore list add this to configure:
|
||||
# "--extra-cflags=-fsanitize-blacklist=<src path>/tests/tsan/ignore.tsan"
|
||||
# The eventual goal would be to fix these warnings.
|
||||
|
||||
# TSan is not happy about setting/getting of dirty bits,
|
||||
@@ -44,6 +44,10 @@ static void compare_ranges(const char *prefix, GList *ranges,
|
||||
print_ranges("out", ranges);
|
||||
print_ranges("expected", expected);
|
||||
#endif
|
||||
if (!expected) {
|
||||
g_assert_true(!ranges);
|
||||
return;
|
||||
}
|
||||
g_assert_cmpint(g_list_length(ranges), ==, g_list_length(expected));
|
||||
for (l = ranges, e = expected; l ; l = l->next, e = e->next) {
|
||||
Range *r = (Range *)l->data;
|
||||
|
||||
@@ -30,8 +30,8 @@ class NetBSDVM(basevm.BaseVM):
|
||||
"git-base",
|
||||
"pkgconf",
|
||||
"xz",
|
||||
"python310",
|
||||
"py310-expat",
|
||||
"python311",
|
||||
"py311-expat",
|
||||
"ninja-build",
|
||||
|
||||
# gnu tools
|
||||
|
||||
Reference in New Issue
Block a user