Dominique Leuenberger 2018-02-25 10:32:48 +00:00 committed by Git OBS Bridge
commit 699d84f2e4
4 changed files with 501 additions and 0 deletions

View File

@ -0,0 +1,177 @@
From: Hari Bathini <hbathini@linux.vnet.ibm.com>
Date: Tue, 20 Feb 2018 19:48:00 +0530
Subject: kexec: add a helper function to add ranges
References: bsc#1081789, LTC#164625
Upstream: merged
Git-commit: c740fdb2048265551f77d3f0fe53b2fddc0c8489 Mon Sep 17 00:00:00 2001
Add a helper function for adding ranges to avoid duplicating code.
Signed-off-by: Hari Bathini <hbathini@linux.vnet.ibm.com>
Reviewed-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Simon Horman <horms@verge.net.au>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
kexec/fs2dt.c | 115 ++++++++++++++++++++++++++--------------------------------
1 file changed, 53 insertions(+), 62 deletions(-)
--- a/kexec/fs2dt.c
+++ b/kexec/fs2dt.c
@@ -169,6 +169,50 @@ static unsigned propnum(const char *name
return offset;
}
+/*
+ * Add ranges by comparing 'base' and 'end' addresses with usable
+ * memory ranges. Returns the number of ranges added. Each range added
+ * increments 'idx' by 2.
+ */
+static uint64_t add_ranges(uint64_t **ranges, int *ranges_size, int idx,
+ uint64_t base, uint64_t end)
+{
+ uint64_t loc_base, loc_end, rngs_cnt = 0;
+ size_t range;
+ int add = 0;
+
+ for (range = 0; range < usablemem_rgns.size; range++) {
+ loc_base = usablemem_rgns.ranges[range].start;
+ loc_end = usablemem_rgns.ranges[range].end;
+ if (loc_base >= base && loc_end <= end) {
+ add = 1;
+ } else if (base < loc_end && end > loc_base) {
+ if (loc_base < base)
+ loc_base = base;
+ if (loc_end > end)
+ loc_end = end;
+ add = 1;
+ }
+
+ if (add) {
+ if (idx >= ((*ranges_size) - 2)) {
+ (*ranges_size) += MEM_RANGE_CHUNK_SZ;
+ *ranges = realloc(*ranges, (*ranges_size)*8);
+ if (!(*ranges))
+ die("unrecoverable error: can't realloc"
+ "%d bytes for ranges.\n",
+ (*ranges_size)*8);
+ }
+ (*ranges)[idx++] = cpu_to_be64(loc_base);
+ (*ranges)[idx++] = cpu_to_be64(loc_end - loc_base);
+
+ rngs_cnt++;
+ }
+ }
+
+ return rngs_cnt;
+}
+
#ifdef HAVE_DYNAMIC_MEMORY
static void add_dyn_reconf_usable_mem_property__(int fd)
{
@@ -176,8 +220,8 @@ static void add_dyn_reconf_usable_mem_pr
uint64_t buf[32];
uint64_t *ranges;
int ranges_size = MEM_RANGE_CHUNK_SZ;
- uint64_t base, end, loc_base, loc_end;
- size_t i, rngs_cnt, range;
+ uint64_t base, end, rngs_cnt;
+ size_t i;
int rlen = 0;
int tmp_indx;
@@ -210,36 +254,8 @@ static void add_dyn_reconf_usable_mem_pr
tmp_indx = rlen++;
- rngs_cnt = 0;
- for (range = 0; range < usablemem_rgns.size; range++) {
- int add = 0;
- loc_base = usablemem_rgns.ranges[range].start;
- loc_end = usablemem_rgns.ranges[range].end;
- if (loc_base >= base && loc_end <= end) {
- add = 1;
- } else if (base < loc_end && end > loc_base) {
- if (loc_base < base)
- loc_base = base;
- if (loc_end > end)
- loc_end = end;
- add = 1;
- }
-
- if (add) {
- if (rlen >= (ranges_size-2)) {
- ranges_size += MEM_RANGE_CHUNK_SZ;
- ranges = realloc(ranges, ranges_size*8);
- if (!ranges)
- die("unrecoverable error: can't"
- " realloc %d bytes for"
- " ranges.\n",
- ranges_size*8);
- }
- ranges[rlen++] = cpu_to_be64(loc_base);
- ranges[rlen++] = cpu_to_be64(loc_end - loc_base);
- rngs_cnt++;
- }
- }
+ rngs_cnt = add_ranges(&ranges, &ranges_size, rlen,
+ base, end);
if (rngs_cnt == 0) {
/* We still need to add a counter for every LMB because
* the kernel parsing code is dumb. We just have
@@ -261,7 +277,8 @@ static void add_dyn_reconf_usable_mem_pr
}
} else {
/* Store the count of (base, size) duple */
- ranges[tmp_indx] = cpu_to_be64((uint64_t) rngs_cnt);
+ ranges[tmp_indx] = cpu_to_be64(rngs_cnt);
+ rlen += rngs_cnt * 2;
}
}
@@ -294,8 +311,7 @@ static void add_usable_mem_property(int
uint64_t buf[2];
uint64_t *ranges;
int ranges_size = MEM_RANGE_CHUNK_SZ;
- uint64_t base, end, loc_base, loc_end;
- size_t range;
+ uint64_t base, end, rngs_cnt;
int rlen = 0;
strcpy(fname, pathname);
@@ -326,33 +342,8 @@ static void add_usable_mem_property(int
die("unrecoverable error: can't alloc %zu bytes for ranges.\n",
ranges_size * sizeof(*ranges));
- for (range = 0; range < usablemem_rgns.size; range++) {
- int add = 0;
- loc_base = usablemem_rgns.ranges[range].start;
- loc_end = usablemem_rgns.ranges[range].end;
- if (loc_base >= base && loc_end <= end) {
- add = 1;
- } else if (base < loc_end && end > loc_base) {
- if (loc_base < base)
- loc_base = base;
- if (loc_end > end)
- loc_end = end;
- add = 1;
- }
- if (add) {
- if (rlen >= (ranges_size-2)) {
- ranges_size += MEM_RANGE_CHUNK_SZ;
- ranges = realloc(ranges, ranges_size *
- sizeof(*ranges));
- if (!ranges)
- die("unrecoverable error: can't realloc"
- "%zu bytes for ranges.\n",
- ranges_size*sizeof(*ranges));
- }
- ranges[rlen++] = cpu_to_be64(loc_base);
- ranges[rlen++] = cpu_to_be64(loc_end - loc_base);
- }
- }
+ rngs_cnt = add_ranges(&ranges, &ranges_size, rlen, base, end);
+ rlen += rngs_cnt * 2;
if (!rlen) {
/*

View File

@ -0,0 +1,311 @@
From: Hari Bathini <hbathini@linux.vnet.ibm.com>
Date: Tue, 20 Feb 2018 19:48:09 +0530
Subject: kexec/ppc64: add support to parse ibm, dynamic-memory-v2 property
References: bsc#1081789, LTC#164625
Upstream: merged
Git-commit: b10924a7da3ca48c04982cd23daf04882afb1a87
Add support to parse the new 'ibm,dynamic-memory-v2' property in the
'ibm,dynamic-reconfiguration-memory' node. This replaces the old
'ibm,dynamic-memory' property and is enabled in the kernel with a
patch series that starts with commit 0c38ed6f6f0b ("powerpc/pseries:
Enable support of ibm,dynamic-memory-v2"). All LMBs that share the same
flags and are adjacent are grouped together in the newer version of the
property making it compact to represent larger memory configurations.
Signed-off-by: Hari Bathini <hbathini@linux.vnet.ibm.com>
Mahesh Jagannath Salgaonkar <mahesh@linux.vnet.ibm.com>
Signed-off-by: Simon Horman <horms@verge.net.au>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
kexec/arch/ppc64/crashdump-ppc64.c | 23 +++++++--
kexec/arch/ppc64/crashdump-ppc64.h | 14 ++++-
kexec/arch/ppc64/kexec-ppc64.c | 35 ++++++++++----
kexec/fs2dt.c | 92 ++++++++++++++++++++++---------------
4 files changed, 111 insertions(+), 53 deletions(-)
--- a/kexec/arch/ppc64/crashdump-ppc64.c
+++ b/kexec/arch/ppc64/crashdump-ppc64.c
@@ -39,6 +39,10 @@
#define DEVTREE_CRASHKERNEL_BASE "/proc/device-tree/chosen/linux,crashkernel-base"
#define DEVTREE_CRASHKERNEL_SIZE "/proc/device-tree/chosen/linux,crashkernel-size"
+unsigned int num_of_lmb_sets;
+unsigned int is_dyn_mem_v2;
+uint64_t lmb_size;
+
static struct crash_elf_info elf_info64 =
{
class: ELFCLASS64,
@@ -127,6 +131,7 @@ static int get_dyn_reconf_crash_memory_r
{
uint64_t start, end;
uint64_t startrange, endrange;
+ uint64_t size;
char fname[128], buf[32];
FILE *file;
unsigned int i;
@@ -135,6 +140,8 @@ static int get_dyn_reconf_crash_memory_r
strcpy(fname, "/proc/device-tree/");
strcat(fname, "ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory");
+ if (is_dyn_mem_v2)
+ strcat(fname, "-v2");
if ((file = fopen(fname, "r")) == NULL) {
perror(fname);
return -1;
@@ -142,8 +149,9 @@ static int get_dyn_reconf_crash_memory_r
fseek(file, 4, SEEK_SET);
startrange = endrange = 0;
- for (i = 0; i < num_of_lmbs; i++) {
- if ((n = fread(buf, 1, 24, file)) < 0) {
+ size = lmb_size;
+ for (i = 0; i < num_of_lmb_sets; i++) {
+ if ((n = fread(buf, 1, LMB_ENTRY_SIZE, file)) < 0) {
perror(fname);
fclose(file);
return -1;
@@ -156,8 +164,15 @@ static int get_dyn_reconf_crash_memory_r
return -1;
}
- start = be64_to_cpu(((uint64_t *)buf)[DRCONF_ADDR]);
- end = start + lmb_size;
+ /*
+ * If the property is ibm,dynamic-memory-v2, the first 4 bytes
+ * tell the number of sequential LMBs in this entry.
+ */
+ if (is_dyn_mem_v2)
+ size = be32_to_cpu(((unsigned int *)buf)[0]) * lmb_size;
+
+ start = be64_to_cpu(*((uint64_t *)&buf[DRCONF_ADDR]));
+ end = start + size;
if (start == 0 && end >= (BACKUP_SRC_END + 1))
start = BACKUP_SRC_END + 1;
--- a/kexec/arch/ppc64/crashdump-ppc64.h
+++ b/kexec/arch/ppc64/crashdump-ppc64.h
@@ -34,10 +34,18 @@ extern unsigned int rtas_size;
extern uint64_t opal_base;
extern uint64_t opal_size;
-uint64_t lmb_size;
-unsigned int num_of_lmbs;
+/*
+ * In case of ibm,dynamic-memory-v2 property, this is the number of LMB
+ * sets where each set represents a group of sequential LMB entries. In
+ * case of ibm,dynamic-memory property, the number of LMB sets is nothing
+ * but the total number of LMB entries.
+ */
+extern unsigned int num_of_lmb_sets;
+extern unsigned int is_dyn_mem_v2;
+extern uint64_t lmb_size;
-#define DRCONF_ADDR 0
+#define LMB_ENTRY_SIZE 24
+#define DRCONF_ADDR (is_dyn_mem_v2 ? 4 : 0)
#define DRCONF_FLAGS 20
#endif /* CRASHDUMP_PPC64_H */
--- a/kexec/arch/ppc64/kexec-ppc64.c
+++ b/kexec/arch/ppc64/kexec-ppc64.c
@@ -149,6 +149,7 @@ static void add_base_memory_range(uint64
static int get_dyn_reconf_base_ranges(void)
{
uint64_t start, end;
+ uint64_t size;
char fname[128], buf[32];
FILE *file;
unsigned int i;
@@ -166,29 +167,35 @@ static int get_dyn_reconf_base_ranges(vo
return -1;
}
/*
- * lmb_size, num_of_lmbs(global variables) are
+ * lmb_size, num_of_lmb_sets(global variables) are
* initialized once here.
*/
- lmb_size = be64_to_cpu(((uint64_t *)buf)[0]);
+ size = lmb_size = be64_to_cpu(((uint64_t *)buf)[0]);
fclose(file);
strcpy(fname, "/proc/device-tree/");
strcat(fname,
"ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory");
if ((file = fopen(fname, "r")) == NULL) {
- perror(fname);
- return -1;
+ strcat(fname, "-v2");
+ if ((file = fopen(fname, "r")) == NULL) {
+ perror(fname);
+ return -1;
+ }
+
+ is_dyn_mem_v2 = 1;
}
- /* first 4 bytes tell the number of lmbs */
+
+ /* first 4 bytes tell the number of lmb set entries */
if (fread(buf, 1, 4, file) != 4) {
perror(fname);
fclose(file);
return -1;
}
- num_of_lmbs = be32_to_cpu(((unsigned int *)buf)[0]);
+ num_of_lmb_sets = be32_to_cpu(((unsigned int *)buf)[0]);
- for (i = 0; i < num_of_lmbs; i++) {
- if ((n = fread(buf, 1, 24, file)) < 0) {
+ for (i = 0; i < num_of_lmb_sets; i++) {
+ if ((n = fread(buf, 1, LMB_ENTRY_SIZE, file)) < 0) {
perror(fname);
fclose(file);
return -1;
@@ -196,13 +203,21 @@ static int get_dyn_reconf_base_ranges(vo
if (nr_memory_ranges >= max_memory_ranges)
return -1;
- start = be64_to_cpu(((uint64_t *)buf)[0]);
- end = start + lmb_size;
+ /*
+ * If the property is ibm,dynamic-memory-v2, the first 4 bytes
+ * tell the number of sequential LMBs in this entry.
+ */
+ if (is_dyn_mem_v2)
+ size = be32_to_cpu(((unsigned int *)buf)[0]) * lmb_size;
+
+ start = be64_to_cpu(*((uint64_t *)&buf[DRCONF_ADDR]));
+ end = start + size;
add_base_memory_range(start, end);
}
fclose(file);
return 0;
}
+
/* Sort the base ranges in memory - this is useful for ensuring that our
* ranges are in ascending order, even if device-tree read of memory nodes
* is done differently. Also, could be used for other range coalescing later
--- a/kexec/fs2dt.c
+++ b/kexec/fs2dt.c
@@ -217,11 +217,12 @@ static uint64_t add_ranges(uint64_t **ra
static void add_dyn_reconf_usable_mem_property__(int fd)
{
char fname[MAXPATH], *bname;
- uint64_t buf[32];
+ char buf[32];
+ uint32_t lmbs_in_set = 1;
uint64_t *ranges;
int ranges_size = MEM_RANGE_CHUNK_SZ;
uint64_t base, end, rngs_cnt;
- size_t i;
+ size_t i, j;
int rlen = 0;
int tmp_indx;
@@ -242,43 +243,61 @@ static void add_dyn_reconf_usable_mem_pr
ranges_size*8);
rlen = 0;
- for (i = 0; i < num_of_lmbs; i++) {
- if (read(fd, buf, 24) < 0)
+ for (i = 0; i < num_of_lmb_sets; i++) {
+ if (read(fd, buf, LMB_ENTRY_SIZE) < 0)
die("unrecoverable error: error reading \"%s\": %s\n",
pathname, strerror(errno));
- base = be64_to_cpu((uint64_t) buf[0]);
- end = base + lmb_size;
- if (~0ULL - base < end)
- die("unrecoverable error: mem property overflow\n");
-
- tmp_indx = rlen++;
-
- rngs_cnt = add_ranges(&ranges, &ranges_size, rlen,
- base, end);
- if (rngs_cnt == 0) {
- /* We still need to add a counter for every LMB because
- * the kernel parsing code is dumb. We just have
- * a zero in this case, with no following base/len.
- */
- ranges[tmp_indx] = 0;
- /* rlen is already just tmp_indx+1 as we didn't write
- * anything. Check array size here, as we'll probably
- * go on for a while writing zeros now.
- */
- if (rlen >= (ranges_size-1)) {
- ranges_size += MEM_RANGE_CHUNK_SZ;
- ranges = realloc(ranges, ranges_size*8);
- if (!ranges)
- die("unrecoverable error: can't"
- " realloc %d bytes for"
- " ranges.\n",
- ranges_size*8);
+ /*
+ * If the property is ibm,dynamic-memory-v2, the first 4 bytes
+ * tell the number of sequential LMBs in this entry. Else, if
+ * the property is ibm,dynamic-memory, each entry represents
+ * one LMB. Make sure to add an entry for each LMB as kernel
+ * looks for a counter for every LMB.
+ */
+ if (is_dyn_mem_v2)
+ lmbs_in_set = be32_to_cpu(((unsigned int *)buf)[0]);
+
+ base = be64_to_cpu(*((uint64_t *)&buf[DRCONF_ADDR]));
+ for (j = 0; j < lmbs_in_set; j++) {
+ end = base + lmb_size;
+ if (~0ULL - base < end) {
+ die("unrecoverable error: mem property"
+ " overflow\n");
}
- } else {
- /* Store the count of (base, size) duple */
- ranges[tmp_indx] = cpu_to_be64(rngs_cnt);
- rlen += rngs_cnt * 2;
+
+ tmp_indx = rlen++;
+
+ rngs_cnt = add_ranges(&ranges, &ranges_size, rlen,
+ base, end);
+ if (rngs_cnt == 0) {
+ /* We still need to add a counter for every LMB
+ * because the kernel parsing code is dumb. We
+ * just have a zero in this case, with no
+ * following base/len.
+ */
+ ranges[tmp_indx] = 0;
+
+ /* rlen is already just tmp_indx+1 as we didn't
+ * write anything. Check array size here, as we
+ * will probably go on writing zeros for a while
+ */
+ if (rlen >= (ranges_size-1)) {
+ ranges_size += MEM_RANGE_CHUNK_SZ;
+ ranges = realloc(ranges, ranges_size*8);
+ if (!ranges)
+ die("unrecoverable error: can't"
+ " realloc %d bytes for"
+ " ranges.\n",
+ ranges_size*8);
+ }
+ } else {
+ /* Store the count of (base, size) duple */
+ ranges[tmp_indx] = cpu_to_be64(rngs_cnt);
+ rlen += rngs_cnt * 2;
+ }
+
+ base = end;
}
}
@@ -298,7 +317,8 @@ static void add_dyn_reconf_usable_mem_pr
static void add_dyn_reconf_usable_mem_property(struct dirent *dp, int fd)
{
- if (!strcmp(dp->d_name, "ibm,dynamic-memory") && usablemem_rgns.size)
+ if ((!strcmp(dp->d_name, "ibm,dynamic-memory-v2") ||
+ !strcmp(dp->d_name, "ibm,dynamic-memory")) && usablemem_rgns.size)
add_dyn_reconf_usable_mem_property__(fd);
}
#else

View File

@ -1,3 +1,12 @@
-------------------------------------------------------------------
Fri Feb 23 07:38:55 UTC 2018 - ptesarik@suse.com
- kexec-tools-ppc64-parse-ibm-dynamic-memory.patch: kexec/ppc64:
add support to parse ibm, dynamic-memory-v2 property
(bsc#1081789, LTC#164625).
- kexec-tools-add-a-helper-function-to-add-ranges.patch: kexec: add
a helper function to add ranges (bsc#1081789, LTC#164625).
-------------------------------------------------------------------
Fri Jan 19 12:59:56 UTC 2018 - tchvatal@suse.com

View File

@ -31,6 +31,8 @@ Source4: %{name}-rpmlintrc
Patch1: %{name}-xen-static.patch
Patch2: %{name}-xen-balloon-up.patch
Patch3: %{name}-disable-test.patch
Patch4: %{name}-add-a-helper-function-to-add-ranges.patch
Patch5: %{name}-ppc64-parse-ibm-dynamic-memory.patch
BuildRequires: autoconf
BuildRequires: automake
BuildRequires: systemd-rpm-macros
@ -57,6 +59,8 @@ the loaded kernel after it panics.
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%patch5 -p1
%build
autoreconf -fvi