Accepting request 884057 from home:mbrugger:branches:Kernel:kdump

- Fix guessing of va_bits (bsc#1183977)
  * makedumpfile-1-3-Use-vmcoreinfo-note-in-proc-kcore-for-mem-.patch
  * makedumpfile-2-3-arm64-Make-use-of-NUMBER-VA_BITS-in-vmcore.patch
  * makedumpfile-3-3-arm64-support-flipped-VA-and-52-bit-kernel.patch

OBS-URL: https://build.opensuse.org/request/show/884057
OBS-URL: https://build.opensuse.org/package/show/Kernel:kdump/makedumpfile?expand=0&rev=153
This commit is contained in:
Michal Suchanek 2021-04-19 09:33:13 +00:00 committed by Git OBS Bridge
parent 29b838c5b9
commit 2d0224555d
5 changed files with 503 additions and 0 deletions

View File

@ -0,0 +1,146 @@
From d8b701796f0491f2ac4b06c7a5b795c29399efab Mon Sep 17 00:00:00 2001
From: Kazuhito Hagio <k-hagio-ab@nec.com>
Date: Fri, 29 Jan 2021 11:40:23 +0900
Subject: [PATCH 1/3] [PATCH 1/3] Use vmcoreinfo note in /proc/kcore for
--mem-usage option
kernel commit 23c85094fe18 added vmcoreinfo note to /proc/kcore.
Use the vmcoreinfo note to get necessary information, especially
page_offset and phys_base on arm64, for the --mem-usage option.
Signed-off-by: Kazuhito Hagio <k-hagio-ab@nec.com>
---
elf_info.c | 49 -------------------------------------------------
elf_info.h | 1 -
makedumpfile.c | 26 +++++++++++++++++++++-----
3 files changed, 21 insertions(+), 55 deletions(-)
diff --git a/elf_info.c b/elf_info.c
index a6624b5..e8affb7 100644
--- a/elf_info.c
+++ b/elf_info.c
@@ -698,55 +698,6 @@ get_elf32_ehdr(int fd, char *filename, Elf32_Ehdr *ehdr)
return TRUE;
}
-int
-get_elf_loads(int fd, char *filename)
-{
- int i, j, phnum, elf_format;
- Elf64_Phdr phdr;
-
- /*
- * Check ELF64 or ELF32.
- */
- elf_format = check_elf_format(fd, filename, &phnum, &num_pt_loads);
- if (elf_format == ELF64)
- flags_memory |= MEMORY_ELF64;
- else if (elf_format != ELF32)
- return FALSE;
-
- if (!num_pt_loads) {
- ERRMSG("Can't get the number of PT_LOAD.\n");
- return FALSE;
- }
-
- /*
- * The below file information will be used as /proc/vmcore.
- */
- fd_memory = fd;
- name_memory = filename;
-
- pt_loads = calloc(sizeof(struct pt_load_segment), num_pt_loads);
- if (pt_loads == NULL) {
- ERRMSG("Can't allocate memory for the PT_LOAD. %s\n",
- strerror(errno));
- return FALSE;
- }
- for (i = 0, j = 0; i < phnum; i++) {
- if (!get_phdr_memory(i, &phdr))
- return FALSE;
-
- if (phdr.p_type != PT_LOAD)
- continue;
-
- if (j >= num_pt_loads)
- return FALSE;
- if (!dump_Elf_load(&phdr, j))
- return FALSE;
- j++;
- }
-
- return TRUE;
-}
-
static int exclude_segment(struct pt_load_segment **pt_loads,
unsigned int *num_pt_loads, uint64_t start, uint64_t end)
{
diff --git a/elf_info.h b/elf_info.h
index d9b5d05..d5416b3 100644
--- a/elf_info.h
+++ b/elf_info.h
@@ -44,7 +44,6 @@ int get_elf64_ehdr(int fd, char *filename, Elf64_Ehdr *ehdr);
int get_elf32_ehdr(int fd, char *filename, Elf32_Ehdr *ehdr);
int get_elf_info(int fd, char *filename);
void free_elf_info(void);
-int get_elf_loads(int fd, char *filename);
int set_kcore_vmcoreinfo(uint64_t vmcoreinfo_addr, uint64_t vmcoreinfo_len);
int get_kcore_dump_loads(void);
diff --git a/makedumpfile.c b/makedumpfile.c
index ba0003a..768eda4 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -11445,6 +11445,7 @@ int show_mem_usage(void)
{
uint64_t vmcoreinfo_addr, vmcoreinfo_len;
struct cycle cycle = {0};
+ int vmcoreinfo = FALSE;
if (!is_crashkernel_mem_reserved()) {
ERRMSG("No memory is reserved for crashkernel!\n");
@@ -11456,9 +11457,22 @@ int show_mem_usage(void)
if (!open_files_for_creating_dumpfile())
return FALSE;
- if (!get_elf_loads(info->fd_memory, info->name_memory))
+ if (!get_elf_info(info->fd_memory, info->name_memory))
return FALSE;
+ /*
+ * /proc/kcore on Linux 4.19 and later kernels have vmcoreinfo note in
+ * NOTE segment. See commit 23c85094fe18.
+ */
+ if (has_vmcoreinfo()) {
+ off_t offset;
+ unsigned long size;
+
+ get_vmcoreinfo(&offset, &size);
+ vmcoreinfo = read_vmcoreinfo_from_vmcore(offset, size, FALSE);
+ DEBUG_MSG("Read vmcoreinfo from NOTE segment: %d\n", vmcoreinfo);
+ }
+
if (!get_page_offset())
return FALSE;
@@ -11466,11 +11480,13 @@ int show_mem_usage(void)
if (!get_phys_base())
return FALSE;
- if (!get_sys_kernel_vmcoreinfo(&vmcoreinfo_addr, &vmcoreinfo_len))
- return FALSE;
+ if (!vmcoreinfo) {
+ if (!get_sys_kernel_vmcoreinfo(&vmcoreinfo_addr, &vmcoreinfo_len))
+ return FALSE;
- if (!set_kcore_vmcoreinfo(vmcoreinfo_addr, vmcoreinfo_len))
- return FALSE;
+ if (!set_kcore_vmcoreinfo(vmcoreinfo_addr, vmcoreinfo_len))
+ return FALSE;
+ }
if (!initial())
return FALSE;
--
2.30.2

View File

@ -0,0 +1,101 @@
From 67d0e1d68f28c567a704fd6b9b8fd696ad3df183 Mon Sep 17 00:00:00 2001
From: Kazuhito Hagio <k-hagio-ab@nec.com>
Date: Fri, 29 Jan 2021 11:40:24 +0900
Subject: [PATCH 2/3] [PATCH 2/3] arm64: Make use of NUMBER(VA_BITS) in
vmcoreinfo
Make use of the NUMBER(VA_BITS) in vmcoreinfo, which was added by
kernel commit 20a166243328 (Linux 4.12 and later kernels), as the
current way of guessing VA_BITS does not work on Linux 5.4 and
later kernels.
Signed-off-by: Bhupesh Sharma <bhsharma@redhat.com>
Signed-off-by: Kazuhito Hagio <k-hagio-ab@nec.com>
---
arch/arm64.c | 63 ++++++++++++++++++++++++++++++++++------------------
1 file changed, 42 insertions(+), 21 deletions(-)
diff --git a/arch/arm64.c b/arch/arm64.c
index 3d7b416..2916b4f 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -345,6 +345,43 @@ get_stext_symbol(void)
return(found ? kallsym : FALSE);
}
+static int
+get_va_bits_from_stext_arm64(void)
+{
+ ulong _stext;
+
+ _stext = get_stext_symbol();
+ if (!_stext) {
+ ERRMSG("Can't get the symbol of _stext.\n");
+ return FALSE;
+ }
+
+ /*
+ * Derive va_bits as per arch/arm64/Kconfig. Note that this is a
+ * best case approximation at the moment, as there can be
+ * inconsistencies in this calculation (for e.g., for 52-bit
+ * kernel VA case, the 48th bit is set in * the _stext symbol).
+ */
+ if ((_stext & PAGE_OFFSET_48) == PAGE_OFFSET_48) {
+ va_bits = 48;
+ } else if ((_stext & PAGE_OFFSET_47) == PAGE_OFFSET_47) {
+ va_bits = 47;
+ } else if ((_stext & PAGE_OFFSET_42) == PAGE_OFFSET_42) {
+ va_bits = 42;
+ } else if ((_stext & PAGE_OFFSET_39) == PAGE_OFFSET_39) {
+ va_bits = 39;
+ } else if ((_stext & PAGE_OFFSET_36) == PAGE_OFFSET_36) {
+ va_bits = 36;
+ } else {
+ ERRMSG("Cannot find a proper _stext for calculating VA_BITS\n");
+ return FALSE;
+ }
+
+ DEBUG_MSG("va_bits : %d (guess from _stext)\n", va_bits);
+
+ return TRUE;
+}
+
int
get_machdep_info_arm64(void)
{
@@ -398,27 +435,11 @@ get_xen_info_arm64(void)
int
get_versiondep_info_arm64(void)
{
- ulong _stext;
-
- _stext = get_stext_symbol();
- if (!_stext) {
- ERRMSG("Can't get the symbol of _stext.\n");
- return FALSE;
- }
-
- /* Derive va_bits as per arch/arm64/Kconfig */
- if ((_stext & PAGE_OFFSET_36) == PAGE_OFFSET_36) {
- va_bits = 36;
- } else if ((_stext & PAGE_OFFSET_39) == PAGE_OFFSET_39) {
- va_bits = 39;
- } else if ((_stext & PAGE_OFFSET_42) == PAGE_OFFSET_42) {
- va_bits = 42;
- } else if ((_stext & PAGE_OFFSET_47) == PAGE_OFFSET_47) {
- va_bits = 47;
- } else if ((_stext & PAGE_OFFSET_48) == PAGE_OFFSET_48) {
- va_bits = 48;
- } else {
- ERRMSG("Cannot find a proper _stext for calculating VA_BITS\n");
+ if (NUMBER(VA_BITS) != NOT_FOUND_NUMBER) {
+ va_bits = NUMBER(VA_BITS);
+ DEBUG_MSG("va_bits : %d (vmcoreinfo)\n", va_bits);
+ } else if (get_va_bits_from_stext_arm64() == FALSE) {
+ ERRMSG("Can't determine va_bits.\n");
return FALSE;
}
--
2.30.2

View File

@ -0,0 +1,245 @@
From a0216b678a95f099a16172cc4a67ad5aa6a89583 Mon Sep 17 00:00:00 2001
From: Kazuhito Hagio <k-hagio-ab@nec.com>
Date: Fri, 29 Jan 2021 11:40:25 +0900
Subject: [PATCH 3/3] [PATCH 3/3] arm64: support flipped VA and 52-bit kernel
VA
Linux 5.4 and later kernels for arm64 changed the kernel VA space
arrangement and introduced 52-bit kernel VAs by merging branch
commit b333b0ba2346. Support 5.9+ kernels with vmcoreinfo entries
and 5.4+ kernels with best guessing.
However, the following conditions are not supported for now due to
no necessary information provided from kernel:
(1) 5.4 <= kernels <= 5.8 and
- if PA_BITS=52 && VA_BITS!=52
- with -x option if vabits_actual=52
(2) kernels < 5.4 with CONFIG_ARM64_USER_VA_BITS_52=y
(1) should be supported with kernel commit bbdbc11804ff and
1d50e5d0c5052 adding necessary information to vmcoreinfo.
Signed-off-by: Bhupesh Sharma <bhsharma@redhat.com>
Signed-off-by: Kazuhito Hagio <k-hagio-ab@nec.com>
Reviewed-by: Pingfan Liu <piliu@redhat.com>
---
arch/arm64.c | 100 +++++++++++++++++++++++++++++++++++++++++--------
makedumpfile.c | 2 +
makedumpfile.h | 1 +
3 files changed, 88 insertions(+), 15 deletions(-)
diff --git a/arch/arm64.c b/arch/arm64.c
index 2916b4f..1072178 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -47,6 +47,8 @@ typedef struct {
static int lpa_52_bit_support_available;
static int pgtable_level;
static int va_bits;
+static int vabits_actual;
+static int flipped_va;
static unsigned long kimage_voffset;
#define SZ_4K 4096
@@ -58,7 +60,6 @@ static unsigned long kimage_voffset;
#define PAGE_OFFSET_42 ((0xffffffffffffffffUL) << 42)
#define PAGE_OFFSET_47 ((0xffffffffffffffffUL) << 47)
#define PAGE_OFFSET_48 ((0xffffffffffffffffUL) << 48)
-#define PAGE_OFFSET_52 ((0xffffffffffffffffUL) << 52)
#define pgd_val(x) ((x).pgd)
#define pud_val(x) (pgd_val((x).pgd))
@@ -218,12 +219,20 @@ pmd_page_paddr(pmd_t pmd)
#define pte_index(vaddr) (((vaddr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1))
#define pte_offset(dir, vaddr) (pmd_page_paddr((*dir)) + pte_index(vaddr) * sizeof(pte_t))
+/*
+ * The linear kernel range starts at the bottom of the virtual address
+ * space. Testing the top bit for the start of the region is a
+ * sufficient check and avoids having to worry about the tag.
+ */
+#define is_linear_addr(addr) (flipped_va ? \
+ (!((unsigned long)(addr) & (1UL << (vabits_actual - 1)))) : \
+ (!!((unsigned long)(addr) & (1UL << (vabits_actual - 1)))))
+
static unsigned long long
__pa(unsigned long vaddr)
{
- if (kimage_voffset == NOT_FOUND_NUMBER ||
- (vaddr >= PAGE_OFFSET))
- return (vaddr - PAGE_OFFSET + info->phys_base);
+ if (kimage_voffset == NOT_FOUND_NUMBER || is_linear_addr(vaddr))
+ return ((vaddr & ~PAGE_OFFSET) + info->phys_base);
else
return (vaddr - kimage_voffset);
}
@@ -253,6 +262,7 @@ static int calculate_plat_config(void)
(PAGESIZE() == SZ_64K && va_bits == 42)) {
pgtable_level = 2;
} else if ((PAGESIZE() == SZ_64K && va_bits == 48) ||
+ (PAGESIZE() == SZ_64K && va_bits == 52) ||
(PAGESIZE() == SZ_4K && va_bits == 39) ||
(PAGESIZE() == SZ_16K && va_bits == 47)) {
pgtable_level = 3;
@@ -263,6 +273,7 @@ static int calculate_plat_config(void)
PAGESIZE(), va_bits);
return FALSE;
}
+ DEBUG_MSG("pgtable_level: %d\n", pgtable_level);
return TRUE;
}
@@ -270,6 +281,9 @@ static int calculate_plat_config(void)
unsigned long
get_kvbase_arm64(void)
{
+ if (flipped_va)
+ return PAGE_OFFSET;
+
return (0xffffffffffffffffUL << va_bits);
}
@@ -382,22 +396,54 @@ get_va_bits_from_stext_arm64(void)
return TRUE;
}
+static void
+get_page_offset_arm64(void)
+{
+ ulong page_end;
+ int vabits_min;
+
+ /*
+ * See arch/arm64/include/asm/memory.h for more details of
+ * the PAGE_OFFSET calculation.
+ */
+ vabits_min = (va_bits > 48) ? 48 : va_bits;
+ page_end = -(1UL << (vabits_min - 1));
+
+ if (SYMBOL(_stext) > page_end) {
+ flipped_va = TRUE;
+ info->page_offset = -(1UL << vabits_actual);
+ } else {
+ flipped_va = FALSE;
+ info->page_offset = -(1UL << (vabits_actual - 1));
+ }
+
+ DEBUG_MSG("page_offset : %lx (from page_end check)\n",
+ info->page_offset);
+}
+
int
get_machdep_info_arm64(void)
{
+ /* Check if va_bits is still not initialized. If still 0, call
+ * get_versiondep_info() to initialize the same.
+ */
+ if (!va_bits)
+ get_versiondep_info_arm64();
+
/* Determine if the PA address range is 52-bits: ARMv8.2-LPA */
if (NUMBER(MAX_PHYSMEM_BITS) != NOT_FOUND_NUMBER) {
info->max_physmem_bits = NUMBER(MAX_PHYSMEM_BITS);
+ DEBUG_MSG("max_physmem_bits : %ld (vmcoreinfo)\n", info->max_physmem_bits);
if (info->max_physmem_bits == 52)
lpa_52_bit_support_available = 1;
- } else
- info->max_physmem_bits = 48;
+ } else {
+ if (va_bits == 52)
+ info->max_physmem_bits = 52; /* just guess */
+ else
+ info->max_physmem_bits = 48;
- /* Check if va_bits is still not initialized. If still 0, call
- * get_versiondep_info() to initialize the same.
- */
- if (!va_bits)
- get_versiondep_info_arm64();
+ DEBUG_MSG("max_physmem_bits : %ld (guess)\n", info->max_physmem_bits);
+ }
if (!calculate_plat_config()) {
ERRMSG("Can't determine platform config values\n");
@@ -408,7 +454,6 @@ get_machdep_info_arm64(void)
info->section_size_bits = SECTIONS_SIZE_BITS;
DEBUG_MSG("kimage_voffset : %lx\n", kimage_voffset);
- DEBUG_MSG("max_physmem_bits : %ld\n", info->max_physmem_bits);
DEBUG_MSG("section_size_bits: %ld\n", info->section_size_bits);
return TRUE;
@@ -443,10 +488,35 @@ get_versiondep_info_arm64(void)
return FALSE;
}
- info->page_offset = (0xffffffffffffffffUL) << (va_bits - 1);
+ /*
+ * See TCR_EL1, Translation Control Register (EL1) register
+ * description in the ARMv8 Architecture Reference Manual.
+ * Basically, we can use the TCR_EL1.T1SZ value to determine
+ * the virtual addressing range supported in the kernel-space
+ * (i.e. vabits_actual) since Linux 5.9.
+ */
+ if (NUMBER(TCR_EL1_T1SZ) != NOT_FOUND_NUMBER) {
+ vabits_actual = 64 - NUMBER(TCR_EL1_T1SZ);
+ DEBUG_MSG("vabits_actual : %d (vmcoreinfo)\n", vabits_actual);
+ } else if ((va_bits == 52) && (SYMBOL(mem_section) != NOT_FOUND_SYMBOL)) {
+ /*
+ * Linux 5.4 through 5.10 have the following linear space:
+ * 48-bit: 0xffff000000000000 - 0xffff7fffffffffff
+ * 52-bit: 0xfff0000000000000 - 0xfff7ffffffffffff
+ * and SYMBOL(mem_section) should be in linear space if
+ * the kernel is configured with COMFIG_SPARSEMEM_EXTREME=y.
+ */
+ if (SYMBOL(mem_section) & (1UL << (va_bits - 1)))
+ vabits_actual = 48;
+ else
+ vabits_actual = 52;
+ DEBUG_MSG("vabits_actual : %d (guess from mem_section)\n", vabits_actual);
+ } else {
+ vabits_actual = va_bits;
+ DEBUG_MSG("vabits_actual : %d (same as va_bits)\n", vabits_actual);
+ }
- DEBUG_MSG("va_bits : %d\n", va_bits);
- DEBUG_MSG("page_offset : %lx\n", info->page_offset);
+ get_page_offset_arm64();
return TRUE;
}
diff --git a/makedumpfile.c b/makedumpfile.c
index 768eda4..fcd766b 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -2397,6 +2397,7 @@ write_vmcoreinfo_data(void)
WRITE_NUMBER("HUGETLB_PAGE_DTOR", HUGETLB_PAGE_DTOR);
#ifdef __aarch64__
WRITE_NUMBER("VA_BITS", VA_BITS);
+ /* WRITE_NUMBER("TCR_EL1_T1SZ", TCR_EL1_T1SZ); should not exists */
WRITE_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
WRITE_NUMBER_UNSIGNED("kimage_voffset", kimage_voffset);
#endif
@@ -2836,6 +2837,7 @@ read_vmcoreinfo(void)
READ_NUMBER("KERNEL_IMAGE_SIZE", KERNEL_IMAGE_SIZE);
#ifdef __aarch64__
READ_NUMBER("VA_BITS", VA_BITS);
+ READ_NUMBER("TCR_EL1_T1SZ", TCR_EL1_T1SZ);
READ_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
READ_NUMBER_UNSIGNED("kimage_voffset", kimage_voffset);
#endif
diff --git a/makedumpfile.h b/makedumpfile.h
index 0ed6417..97a5554 100644
--- a/makedumpfile.h
+++ b/makedumpfile.h
@@ -2049,6 +2049,7 @@ struct number_table {
long KERNEL_IMAGE_SIZE;
#ifdef __aarch64__
long VA_BITS;
+ long TCR_EL1_T1SZ;
unsigned long PHYS_OFFSET;
unsigned long kimage_voffset;
#endif
--
2.30.2

View File

@ -1,3 +1,11 @@
-------------------------------------------------------------------
Thu Apr 8 09:55:13 UTC 2021 - Matthias Brugger <mbrugger@suse.com>
- Fix guessing of va_bits (bsc#1183977)
* makedumpfile-1-3-Use-vmcoreinfo-note-in-proc-kcore-for-mem-.patch
* makedumpfile-2-3-arm64-Make-use-of-NUMBER-VA_BITS-in-vmcore.patch
* makedumpfile-3-3-arm64-support-flipped-VA-and-52-bit-kernel.patch
-------------------------------------------------------------------
Wed Apr 7 09:52:16 UTC 2021 - Petr Tesařík <ptesarik@suse.com>

View File

@ -46,6 +46,9 @@ Patch3: %{name}-PN_XNUM.patch
Patch4: %{name}-printk-add-support-for-lockless-ringbuffer.patch
Patch5: %{name}-printk-use-committed-finalized-state-value.patch
Patch6: %{name}-use-uts_namespace.name-offset-VMCOREINFO.patch
Patch7: %{name}-1-3-Use-vmcoreinfo-note-in-proc-kcore-for-mem-.patch
Patch8: %{name}-2-3-arm64-Make-use-of-NUMBER-VA_BITS-in-vmcore.patch
Patch9: %{name}-3-3-arm64-support-flipped-VA-and-52-bit-kernel.patch
BuildRequires: libdw-devel
BuildRequires: libelf-devel
BuildRequires: libeppic-devel