xen/55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
Charles Arnold 4a5ee0f11d - bsc#945164 - Xl destroy show error with kernel of SLES 12 sp1
5537a4d8-libxl-use-DEBUG-log-level-instead-of-INFO.patch

- Upstream patches from Jan
  55dc78e9-x86-amd_ucode-skip-updates-for-final-levels.patch
  55dc7937-x86-IO-APIC-don-t-create-pIRQ-mapping-from-masked-RTE.patch
  55df2f76-IOMMU-skip-domains-without-page-tables-when-dumping.patch
  55e43fd8-x86-NUMA-fix-setup_node.patch
  55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
  55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
  54c2553c-grant-table-use-uint16_t-consistently-for-offset-and-length.patch
  54ca33bc-grant-table-refactor-grant-copy-to-reduce-duplicate-code.patch
  54ca340e-grant-table-defer-releasing-pages-acquired-in-a-grant-copy.patch

- bsc#944463 - VUL-0: CVE-2015-5239: qemu-kvm: Integer overflow in
  vnc_client_read() and protocol_client_msg()
  CVE-2015-5239-qemuu-limit-client_cut_text-msg-payload-size.patch
  CVE-2015-5239-qemut-limit-client_cut_text-msg-payload-size.patch
- bsc#944697 - VUL-1: CVE-2015-6815: qemu: net: e1000: infinite
  loop issue
  CVE-2015-6815-qemuu-e1000-fix-infinite-loop.patch
  CVE-2015-6815-qemut-e1000-fix-infinite-loop.patch

OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=375
2015-09-16 16:29:39 +00:00

133 lines
4.6 KiB
Diff

# Commit c011f470e6e79208f5baa071b4d072b78c88e2ba
# Date 2015-08-31 13:52:24 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/NUMA: don't account hotplug regions
... except in cases where they really matter: node_memblk_range[] now
is the only place all regions get stored. nodes[] and NODE_DATA() track
present memory only. This improves the reporting when nodes have
disjoint "normal" and hotplug regions, with the hotplug region sitting
above the highest populated page. In such cases a node's spanned-pages
value (visible in both XEN_SYSCTL_numainfo and 'u' debug key output)
covered all the way up to top of populated memory, giving quite
different a picture from what an otherwise identically configured
system without and hotplug regions would report. Note, however, that
the actual hotplug case (as well as cases of nodes with multiple
disjoint present regions) is still not being handled such that the
reported values would represent how much memory a node really has (but
that can be considered intentional).
Reported-by: Jim Fehlig <jfehlig@suse.com>
This at once makes nodes_cover_memory() no longer consider E820_RAM
regions covered by SRAT hotplug regions.
Also reject self-overlaps with mismatching hotplug flags.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Tested-by: Jim Fehlig <jfehlig@suse.com>
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -32,7 +32,7 @@ static u8 __read_mostly pxm2node[256] =
static int num_node_memblks;
static struct node node_memblk_range[NR_NODE_MEMBLKS];
static int memblk_nodeid[NR_NODE_MEMBLKS];
-
+static __initdata DECLARE_BITMAP(memblk_hotplug, NR_NODE_MEMBLKS);
static int node_to_pxm(int n);
@@ -89,9 +89,9 @@ static __init int conflicting_memblks(u6
if (nd->start == nd->end)
continue;
if (nd->end > start && nd->start < end)
- return memblk_nodeid[i];
+ return i;
if (nd->end == end && nd->start == start)
- return memblk_nodeid[i];
+ return i;
}
return -1;
}
@@ -229,7 +229,6 @@ acpi_numa_processor_affinity_init(struct
void __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
- struct node *nd;
u64 start, end;
int node, pxm;
int i;
@@ -263,30 +262,40 @@ acpi_numa_memory_affinity_init(struct ac
}
/* It is fine to add this area to the nodes data it will be used later*/
i = conflicting_memblks(start, end);
- if (i == node) {
- printk(KERN_WARNING
- "SRAT: Warning: PXM %d (%"PRIx64"-%"PRIx64") overlaps with itself (%"
- PRIx64"-%"PRIx64")\n", pxm, start, end, nodes[i].start, nodes[i].end);
- } else if (i >= 0) {
+ if (i < 0)
+ /* everything fine */;
+ else if (memblk_nodeid[i] == node) {
+ bool_t mismatch = !(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) !=
+ !test_bit(i, memblk_hotplug);
+
+ printk("%sSRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with itself (%"PRIx64"-%"PRIx64")\n",
+ mismatch ? KERN_ERR : KERN_WARNING, pxm, start, end,
+ node_memblk_range[i].start, node_memblk_range[i].end);
+ if (mismatch) {
+ bad_srat();
+ return;
+ }
+ } else {
printk(KERN_ERR
- "SRAT: PXM %d (%"PRIx64"-%"PRIx64") overlaps with PXM %d (%"
- PRIx64"-%"PRIx64")\n", pxm, start, end, node_to_pxm(i),
- nodes[i].start, nodes[i].end);
+ "SRAT: PXM %u (%"PRIx64"-%"PRIx64") overlaps with PXM %u (%"PRIx64"-%"PRIx64")\n",
+ pxm, start, end, node_to_pxm(memblk_nodeid[i]),
+ node_memblk_range[i].start, node_memblk_range[i].end);
bad_srat();
return;
}
- nd = &nodes[node];
- if (!node_test_and_set(node, memory_nodes_parsed)) {
- nd->start = start;
- nd->end = end;
- } else {
- if (start < nd->start)
+ if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) {
+ struct node *nd = &nodes[node];
+
+ if (!node_test_and_set(node, memory_nodes_parsed)) {
nd->start = start;
- if (nd->end < end)
nd->end = end;
+ } else {
+ if (start < nd->start)
+ nd->start = start;
+ if (nd->end < end)
+ nd->end = end;
+ }
}
- if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && end > mem_hotplug)
- mem_hotplug = end;
printk(KERN_INFO "SRAT: Node %u PXM %u %"PRIx64"-%"PRIx64"%s\n",
node, pxm, start, end,
ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE ? " (hotplug)" : "");
@@ -294,6 +303,11 @@ acpi_numa_memory_affinity_init(struct ac
node_memblk_range[num_node_memblks].start = start;
node_memblk_range[num_node_memblks].end = end;
memblk_nodeid[num_node_memblks] = node;
+ if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) {
+ __set_bit(num_node_memblks, memblk_hotplug);
+ if (end > mem_hotplug)
+ mem_hotplug = end;
+ }
num_node_memblks++;
}