850cf2857b
- SLE15 continues to use mdadm-4.0, synchronize mdadm package from SLE12-SP3 to SLE15, re-order all patches. - Rename the following patches, they are deleted and re-add in next part of patches 0001-Generic-support-for-consistency-policy-and-PPL.patch 0002-Detail-show-consistency-policy.patch 0002-The-mdcheck-script-now-adds-messages-to-the-system.patch 0003-imsm-PPL-support.patch 0004-super1-PPL-support.patch 0005-Add-ppl-and-no-ppl-options-for-update.patch 0006-Grow-support-consistency-policy-change.patch 0007-udev-md-raid-assembly.rules-Skip-non-ready-devices.patch 0008-Retry-HOT_REMOVE_DISK-a-few-times.patch 0009-Introduce-sys_hot_remove_disk.patch 0010-Add-force-flag-to-hot_remove_disk.patch 0011-Detail-handle-non-existent-arrays-better.patch - Synchronize patches from mdadm of SLE12-SP3, the above renamed patches are re-add here, 0001-Makefile-Fix-date-to-be-output-in-ISO-format.patch 0002-imsm-fix-missing-error-message-during-migration.patch 0003-Fix-oddity-where-mdadm-did-not-recognise-a-relative-.patch 0004-mdadm-check-the-nodes-when-operate-clustered-array.patch 0005-examine-tidy-up-some-code.patch 0006-mdadm-add-man-page-for-symlinks.patch 0007-mdadm-add-checking-clustered-bitmap-in-assemble-mode.patch 0008-mdadm-Add-Wimplicit-fallthrough-0-in-Makefile.patch 0009-mdadm-Specify-enough-length-when-write-to-buffer.patch 0010-mdadm-it-doesn-t-make-sense-to-set-bitmap-twice.patch 0011-mdadm-Monitor-Fix-NULL-pointer-dereference-when-stat.patch 0012-Replace-snprintf-with-strncpy-at-some-places-to-avoi.patch OBS-URL: https://build.opensuse.org/request/show/517978 OBS-URL: https://build.opensuse.org/package/show/Base:System/mdadm?expand=0&rev=150
140 lines
5.3 KiB
Diff
140 lines
5.3 KiB
Diff
From b53bfba6119d3f6f56eb9e10e5a59da6901af159 Mon Sep 17 00:00:00 2001
|
|
From: Tomasz Majchrzak <tomasz.majchrzak@intel.com>
|
|
Date: Thu, 30 Mar 2017 16:25:41 +0200
|
|
Subject: [PATCH] imsm: use rounded size for metadata initialization
|
|
|
|
Array size is rounded to the nearest MB, however number of data stripes
|
|
and blocks per disk are calculated using size passed by the user. If
|
|
given size is not aligned, there is a mismatch. It's not possible to
|
|
assemble raid0 migrated to raid5 since raid5 arrays use number of data
|
|
stripes to calculate array size.
|
|
|
|
Signed-off-by: Tomasz Majchrzak <tomasz.majchrzak@intel.com>
|
|
Signed-off-by: Jes Sorensen <Jes.Sorensen@gmail.com>
|
|
---
|
|
super-intel.c | 52 ++++++++++++++++++++++++++++++++++------------------
|
|
1 file changed, 34 insertions(+), 18 deletions(-)
|
|
|
|
diff --git a/super-intel.c b/super-intel.c
|
|
index 785488a..84dfe2b 100644
|
|
--- a/super-intel.c
|
|
+++ b/super-intel.c
|
|
@@ -264,6 +264,8 @@ struct bbm_log {
|
|
static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed" };
|
|
#endif
|
|
|
|
+#define BLOCKS_PER_KB (1024/512)
|
|
+
|
|
#define RAID_DISK_RESERVED_BLOCKS_IMSM_HI 2209
|
|
|
|
#define GEN_MIGR_AREA_SIZE 2048 /* General Migration Copy Area size in blocks */
|
|
@@ -1324,6 +1326,19 @@ static int is_journal(struct imsm_disk *disk)
|
|
return (disk->status & JOURNAL_DISK) == JOURNAL_DISK;
|
|
}
|
|
|
|
+/* round array size down to closest MB and ensure it splits evenly
|
|
+ * between members
|
|
+ */
|
|
+static unsigned long long round_size_to_mb(unsigned long long size, unsigned int
|
|
+ disk_count)
|
|
+{
|
|
+ size /= disk_count;
|
|
+ size = (size >> SECT_PER_MB_SHIFT) << SECT_PER_MB_SHIFT;
|
|
+ size *= disk_count;
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
/* try to determine how much space is reserved for metadata from
|
|
* the last get_extents() entry on the smallest active disk,
|
|
* otherwise fallback to the default
|
|
@@ -3330,11 +3345,10 @@ static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info,
|
|
if (used_disks > 0) {
|
|
array_blocks = blocks_per_member(map) *
|
|
used_disks;
|
|
- /* round array size down to closest MB
|
|
- */
|
|
- info->custom_array_size = (array_blocks
|
|
- >> SECT_PER_MB_SHIFT)
|
|
- << SECT_PER_MB_SHIFT;
|
|
+ info->custom_array_size =
|
|
+ round_size_to_mb(array_blocks,
|
|
+ used_disks);
|
|
+
|
|
}
|
|
}
|
|
case MIGR_VERIFY:
|
|
@@ -5241,6 +5255,8 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
|
|
unsigned long long array_blocks;
|
|
size_t size_old, size_new;
|
|
unsigned long long num_data_stripes;
|
|
+ unsigned int data_disks;
|
|
+ unsigned long long size_per_member;
|
|
|
|
if (super->orom && mpb->num_raid_devs >= super->orom->vpa) {
|
|
pr_err("This imsm-container already has the maximum of %d volumes\n", super->orom->vpa);
|
|
@@ -5317,9 +5333,11 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
|
|
strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
|
|
array_blocks = calc_array_size(info->level, info->raid_disks,
|
|
info->layout, info->chunk_size,
|
|
- s->size * 2);
|
|
- /* round array size down to closest MB */
|
|
- array_blocks = (array_blocks >> SECT_PER_MB_SHIFT) << SECT_PER_MB_SHIFT;
|
|
+ s->size * BLOCKS_PER_KB);
|
|
+ data_disks = get_data_disks(info->level, info->layout,
|
|
+ info->raid_disks);
|
|
+ array_blocks = round_size_to_mb(array_blocks, data_disks);
|
|
+ size_per_member = array_blocks / data_disks;
|
|
|
|
dev->size_low = __cpu_to_le32((__u32) array_blocks);
|
|
dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
|
|
@@ -5331,7 +5349,9 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
|
|
vol->curr_migr_unit = 0;
|
|
map = get_imsm_map(dev, MAP_0);
|
|
set_pba_of_lba0(map, super->create_offset);
|
|
- set_blocks_per_member(map, info_to_blocks_per_member(info, s->size));
|
|
+ set_blocks_per_member(map, info_to_blocks_per_member(info,
|
|
+ size_per_member /
|
|
+ BLOCKS_PER_KB));
|
|
map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
|
|
map->failed_disk_num = ~0;
|
|
if (info->level > 0)
|
|
@@ -5359,7 +5379,7 @@ static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
|
|
map->num_domains = 1;
|
|
|
|
/* info->size is only int so use the 'size' parameter instead */
|
|
- num_data_stripes = (s->size * 2) / info_to_blocks_per_strip(info);
|
|
+ num_data_stripes = size_per_member / info_to_blocks_per_strip(info);
|
|
num_data_stripes /= map->num_domains;
|
|
set_num_data_stripes(map, num_data_stripes);
|
|
|
|
@@ -7981,9 +8001,7 @@ static unsigned long long imsm_set_array_size(struct imsm_dev *dev,
|
|
array_blocks = new_size;
|
|
}
|
|
|
|
- /* round array size down to closest MB
|
|
- */
|
|
- array_blocks = (array_blocks >> SECT_PER_MB_SHIFT) << SECT_PER_MB_SHIFT;
|
|
+ array_blocks = round_size_to_mb(array_blocks, used_disks);
|
|
dev->size_low = __cpu_to_le32((__u32)array_blocks);
|
|
dev->size_high = __cpu_to_le32((__u32)(array_blocks >> 32));
|
|
|
|
@@ -8096,11 +8114,9 @@ static int imsm_set_array_state(struct active_array *a, int consistent)
|
|
array_blocks =
|
|
blocks_per_member(map) *
|
|
used_disks;
|
|
- /* round array size down to closest MB
|
|
- */
|
|
- array_blocks = (array_blocks
|
|
- >> SECT_PER_MB_SHIFT)
|
|
- << SECT_PER_MB_SHIFT;
|
|
+ array_blocks =
|
|
+ round_size_to_mb(array_blocks,
|
|
+ used_disks);
|
|
a->info.custom_array_size = array_blocks;
|
|
/* encourage manager to update array
|
|
* size
|
|
--
|
|
2.10.2
|
|
|