36 lines
1.6 KiB
Diff
36 lines
1.6 KiB
Diff
|
# Commit 50df6f7429f73364bbddb0970a3a34faa01a7790
|
||
|
# Date 2014-05-28 09:51:07 +0200
|
||
|
# Author Jan Beulich <jbeulich@suse.com>
|
||
|
# Committer Jan Beulich <jbeulich@suse.com>
|
||
|
x86: don't use VA for cache flush when also flushing TLB
|
||
|
|
||
|
Doing both flushes at once is a strong indication for the address
|
||
|
mapping to either having got dropped (in which case the cache flush,
|
||
|
when done via INVLPG, would fault) or its physical address having
|
||
|
changed (in which case the cache flush would end up being done on the
|
||
|
wrong address range). There is no adverse effect (other than the
|
||
|
obvious performance one) using WBINVD in this case regardless of the
|
||
|
range's size; only map_pages_to_xen() uses combined flushes at present.
|
||
|
|
||
|
This problem was observed with the 2nd try backport of d6cb14b3 ("VT-d:
|
||
|
suppress UR signaling for desktop chipsets") to 4.2 (where ioremap()
|
||
|
needs to be replaced with set_fixmap_nocache(); the now commented out
|
||
|
__set_fixmap(, 0, 0) there to undo the mapping resulted in the first of
|
||
|
the above two scenarios).
|
||
|
|
||
|
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||
|
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||
|
|
||
|
--- a/xen/arch/x86/flushtlb.c
|
||
|
+++ b/xen/arch/x86/flushtlb.c
|
||
|
@@ -152,7 +152,8 @@ void flush_area_local(const void *va, un
|
||
|
if ( order < (BITS_PER_LONG - PAGE_SHIFT) )
|
||
|
sz = 1UL << (order + PAGE_SHIFT);
|
||
|
|
||
|
- if ( c->x86_clflush_size && c->x86_cache_size && sz &&
|
||
|
+ if ( !(flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL)) &&
|
||
|
+ c->x86_clflush_size && c->x86_cache_size && sz &&
|
||
|
((sz >> 10) < c->x86_cache_size) )
|
||
|
{
|
||
|
va = (const void *)((unsigned long)va & ~(sz - 1));
|