13bc6e358c
- kexec-tools-ppc64-parse-ibm-dynamic-memory.patch: kexec/ppc64: add support to parse ibm, dynamic-memory-v2 property (bsc#1081789, LTC#164625). - kexec-tools-add-a-helper-function-to-add-ranges.patch: kexec: add a helper function to add ranges (bsc#1081789, LTC#164625). OBS-URL: https://build.opensuse.org/request/show/579331 OBS-URL: https://build.opensuse.org/package/show/Kernel:kdump/kexec-tools?expand=0&rev=93
178 lines
5.0 KiB
Diff
178 lines
5.0 KiB
Diff
From: Hari Bathini <hbathini@linux.vnet.ibm.com>
|
|
Date: Tue, 20 Feb 2018 19:48:00 +0530
|
|
Subject: kexec: add a helper function to add ranges
|
|
References: bsc#1081789, LTC#164625
|
|
Upstream: merged
|
|
Git-commit: c740fdb2048265551f77d3f0fe53b2fddc0c8489 Mon Sep 17 00:00:00 2001
|
|
|
|
Add a helper function for adding ranges to avoid duplicating code.
|
|
|
|
Signed-off-by: Hari Bathini <hbathini@linux.vnet.ibm.com>
|
|
Reviewed-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
|
|
Signed-off-by: Simon Horman <horms@verge.net.au>
|
|
Acked-by: Petr Tesarik <ptesarik@suse.com>
|
|
|
|
---
|
|
kexec/fs2dt.c | 115 ++++++++++++++++++++++++++--------------------------------
|
|
1 file changed, 53 insertions(+), 62 deletions(-)
|
|
|
|
--- a/kexec/fs2dt.c
|
|
+++ b/kexec/fs2dt.c
|
|
@@ -169,6 +169,50 @@ static unsigned propnum(const char *name
|
|
return offset;
|
|
}
|
|
|
|
+/*
|
|
+ * Add ranges by comparing 'base' and 'end' addresses with usable
|
|
+ * memory ranges. Returns the number of ranges added. Each range added
|
|
+ * increments 'idx' by 2.
|
|
+ */
|
|
+static uint64_t add_ranges(uint64_t **ranges, int *ranges_size, int idx,
|
|
+ uint64_t base, uint64_t end)
|
|
+{
|
|
+ uint64_t loc_base, loc_end, rngs_cnt = 0;
|
|
+ size_t range;
|
|
+ int add = 0;
|
|
+
|
|
+ for (range = 0; range < usablemem_rgns.size; range++) {
|
|
+ loc_base = usablemem_rgns.ranges[range].start;
|
|
+ loc_end = usablemem_rgns.ranges[range].end;
|
|
+ if (loc_base >= base && loc_end <= end) {
|
|
+ add = 1;
|
|
+ } else if (base < loc_end && end > loc_base) {
|
|
+ if (loc_base < base)
|
|
+ loc_base = base;
|
|
+ if (loc_end > end)
|
|
+ loc_end = end;
|
|
+ add = 1;
|
|
+ }
|
|
+
|
|
+ if (add) {
|
|
+ if (idx >= ((*ranges_size) - 2)) {
|
|
+ (*ranges_size) += MEM_RANGE_CHUNK_SZ;
|
|
+ *ranges = realloc(*ranges, (*ranges_size)*8);
|
|
+ if (!(*ranges))
|
|
+ die("unrecoverable error: can't realloc"
|
|
+ "%d bytes for ranges.\n",
|
|
+ (*ranges_size)*8);
|
|
+ }
|
|
+ (*ranges)[idx++] = cpu_to_be64(loc_base);
|
|
+ (*ranges)[idx++] = cpu_to_be64(loc_end - loc_base);
|
|
+
|
|
+ rngs_cnt++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return rngs_cnt;
|
|
+}
|
|
+
|
|
#ifdef HAVE_DYNAMIC_MEMORY
|
|
static void add_dyn_reconf_usable_mem_property__(int fd)
|
|
{
|
|
@@ -176,8 +220,8 @@ static void add_dyn_reconf_usable_mem_pr
|
|
uint64_t buf[32];
|
|
uint64_t *ranges;
|
|
int ranges_size = MEM_RANGE_CHUNK_SZ;
|
|
- uint64_t base, end, loc_base, loc_end;
|
|
- size_t i, rngs_cnt, range;
|
|
+ uint64_t base, end, rngs_cnt;
|
|
+ size_t i;
|
|
int rlen = 0;
|
|
int tmp_indx;
|
|
|
|
@@ -210,36 +254,8 @@ static void add_dyn_reconf_usable_mem_pr
|
|
|
|
tmp_indx = rlen++;
|
|
|
|
- rngs_cnt = 0;
|
|
- for (range = 0; range < usablemem_rgns.size; range++) {
|
|
- int add = 0;
|
|
- loc_base = usablemem_rgns.ranges[range].start;
|
|
- loc_end = usablemem_rgns.ranges[range].end;
|
|
- if (loc_base >= base && loc_end <= end) {
|
|
- add = 1;
|
|
- } else if (base < loc_end && end > loc_base) {
|
|
- if (loc_base < base)
|
|
- loc_base = base;
|
|
- if (loc_end > end)
|
|
- loc_end = end;
|
|
- add = 1;
|
|
- }
|
|
-
|
|
- if (add) {
|
|
- if (rlen >= (ranges_size-2)) {
|
|
- ranges_size += MEM_RANGE_CHUNK_SZ;
|
|
- ranges = realloc(ranges, ranges_size*8);
|
|
- if (!ranges)
|
|
- die("unrecoverable error: can't"
|
|
- " realloc %d bytes for"
|
|
- " ranges.\n",
|
|
- ranges_size*8);
|
|
- }
|
|
- ranges[rlen++] = cpu_to_be64(loc_base);
|
|
- ranges[rlen++] = cpu_to_be64(loc_end - loc_base);
|
|
- rngs_cnt++;
|
|
- }
|
|
- }
|
|
+ rngs_cnt = add_ranges(&ranges, &ranges_size, rlen,
|
|
+ base, end);
|
|
if (rngs_cnt == 0) {
|
|
/* We still need to add a counter for every LMB because
|
|
* the kernel parsing code is dumb. We just have
|
|
@@ -261,7 +277,8 @@ static void add_dyn_reconf_usable_mem_pr
|
|
}
|
|
} else {
|
|
/* Store the count of (base, size) duple */
|
|
- ranges[tmp_indx] = cpu_to_be64((uint64_t) rngs_cnt);
|
|
+ ranges[tmp_indx] = cpu_to_be64(rngs_cnt);
|
|
+ rlen += rngs_cnt * 2;
|
|
}
|
|
}
|
|
|
|
@@ -294,8 +311,7 @@ static void add_usable_mem_property(int
|
|
uint64_t buf[2];
|
|
uint64_t *ranges;
|
|
int ranges_size = MEM_RANGE_CHUNK_SZ;
|
|
- uint64_t base, end, loc_base, loc_end;
|
|
- size_t range;
|
|
+ uint64_t base, end, rngs_cnt;
|
|
int rlen = 0;
|
|
|
|
strcpy(fname, pathname);
|
|
@@ -326,33 +342,8 @@ static void add_usable_mem_property(int
|
|
die("unrecoverable error: can't alloc %zu bytes for ranges.\n",
|
|
ranges_size * sizeof(*ranges));
|
|
|
|
- for (range = 0; range < usablemem_rgns.size; range++) {
|
|
- int add = 0;
|
|
- loc_base = usablemem_rgns.ranges[range].start;
|
|
- loc_end = usablemem_rgns.ranges[range].end;
|
|
- if (loc_base >= base && loc_end <= end) {
|
|
- add = 1;
|
|
- } else if (base < loc_end && end > loc_base) {
|
|
- if (loc_base < base)
|
|
- loc_base = base;
|
|
- if (loc_end > end)
|
|
- loc_end = end;
|
|
- add = 1;
|
|
- }
|
|
- if (add) {
|
|
- if (rlen >= (ranges_size-2)) {
|
|
- ranges_size += MEM_RANGE_CHUNK_SZ;
|
|
- ranges = realloc(ranges, ranges_size *
|
|
- sizeof(*ranges));
|
|
- if (!ranges)
|
|
- die("unrecoverable error: can't realloc"
|
|
- "%zu bytes for ranges.\n",
|
|
- ranges_size*sizeof(*ranges));
|
|
- }
|
|
- ranges[rlen++] = cpu_to_be64(loc_base);
|
|
- ranges[rlen++] = cpu_to_be64(loc_end - loc_base);
|
|
- }
|
|
- }
|
|
+ rngs_cnt = add_ranges(&ranges, &ranges_size, rlen, base, end);
|
|
+ rlen += rngs_cnt * 2;
|
|
|
|
if (!rlen) {
|
|
/*
|