157 lines
6.3 KiB
Diff
157 lines
6.3 KiB
Diff
|
/*
|
||
|
This patch is related with following upstream kernel commit.
|
||
|
This patch uses QUEUE_FLAG_STABLE_WRITES to replace QUEUE_FLAG_DISCARD.
|
||
|
*/
|
||
|
|
||
|
commit 70200574cc229f6ba038259e8142af2aa09e6976
|
||
|
Author: Christoph Hellwig <hch@lst.de>
|
||
|
Date: Fri Apr 15 06:52:55 2022 +0200
|
||
|
|
||
|
block: remove QUEUE_FLAG_DISCARD
|
||
|
|
||
|
Just use a non-zero max_discard_sectors as an indicator for discard
|
||
|
support, similar to what is done for write zeroes.
|
||
|
|
||
|
The only places where needs special attention is the RAID5 driver,
|
||
|
which must clear discard support for security reasons by default,
|
||
|
even if the default stacking rules would allow for it.
|
||
|
|
||
|
Signed-off-by: Christoph Hellwig <hch@lst.de>
|
||
|
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
|
||
|
Acked-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com> [drbd]
|
||
|
Acked-by: Jan Höppner <hoeppner@linux.ibm.com> [s390]
|
||
|
Acked-by: Coly Li <colyli@suse.de> [bcache]
|
||
|
Acked-by: David Sterba <dsterba@suse.com> [btrfs]
|
||
|
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
|
||
|
Link: https://lore.kernel.org/r/20220415045258.199825-25-hch@lst.de
|
||
|
Signed-off-by: Jens Axboe <axboe@kernel.dk>
|
||
|
---
|
||
|
|
||
|
diff -Nupr a/drbd/drbd-kernel-compat/tests/have_blk_queue_flag_set.c b/drbd/drbd-kernel-compat/tests/have_blk_queue_flag_set.c
|
||
|
--- a/drbd/drbd-kernel-compat/tests/have_blk_queue_flag_set.c 2022-08-22 18:14:20.639382230 +0800
|
||
|
+++ b/drbd/drbd-kernel-compat/tests/have_blk_queue_flag_set.c 2022-08-22 18:14:43.819285373 +0800
|
||
|
@@ -3,5 +3,5 @@
|
||
|
|
||
|
void dummy(struct request_queue *q)
|
||
|
{
|
||
|
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
|
||
|
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
|
||
|
}
|
||
|
diff -Nupr a/drbd/drbd_main.c b/drbd/drbd_main.c
|
||
|
--- a/drbd/drbd_main.c 2022-08-23 08:04:26.097721587 +0800
|
||
|
+++ b/drbd/drbd_main.c 2022-08-23 08:04:41.449655955 +0800
|
||
|
@@ -1574,7 +1574,7 @@ static void assign_p_sizes_qlim(struct d
|
||
|
p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
|
||
|
p->qlim->io_min = cpu_to_be32(queue_io_min(q));
|
||
|
p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
|
||
|
- p->qlim->discard_enabled = blk_queue_discard(q);
|
||
|
+ p->qlim->discard_enabled = !!bdev_max_discard_sectors(device->ldev->backing_bdev);
|
||
|
p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q);
|
||
|
p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
|
||
|
} else {
|
||
|
diff -Nupr a/drbd/drbd_nl.c b/drbd/drbd_nl.c
|
||
|
--- a/drbd/drbd_nl.c 2022-08-23 08:04:26.101721570 +0800
|
||
|
+++ b/drbd/drbd_nl.c 2022-08-23 08:04:41.453655938 +0800
|
||
|
@@ -1967,13 +1967,14 @@ static unsigned int drbd_max_discard_sec
|
||
|
static void decide_on_discard_support(struct drbd_device *device,
|
||
|
struct request_queue *q,
|
||
|
struct request_queue *b,
|
||
|
- bool discard_zeroes_if_aligned)
|
||
|
+ bool discard_zeroes_if_aligned,
|
||
|
+ struct drbd_backing_dev *bdev)
|
||
|
{
|
||
|
/* q = drbd device queue (device->rq_queue)
|
||
|
* b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
|
||
|
* or NULL if diskless
|
||
|
*/
|
||
|
- bool can_do = b ? blk_queue_discard(b) : true;
|
||
|
+ bool can_do = b ? bdev_max_discard_sectors(bdev->backing_bdev) : true;
|
||
|
|
||
|
if (can_do && b && !queue_discard_zeroes_data(b) && !discard_zeroes_if_aligned) {
|
||
|
can_do = false;
|
||
|
@@ -1992,23 +1993,12 @@ static void decide_on_discard_support(st
|
||
|
* topology on all peers. */
|
||
|
blk_queue_discard_granularity(q, 512);
|
||
|
q->limits.max_discard_sectors = drbd_max_discard_sectors(device->resource);
|
||
|
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
|
||
|
+ q->limits.max_write_zeroes_sectors =
|
||
|
+ drbd_max_discard_sectors(device->resource);
|
||
|
} else {
|
||
|
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
|
||
|
blk_queue_discard_granularity(q, 0);
|
||
|
q->limits.max_discard_sectors = 0;
|
||
|
- }
|
||
|
-}
|
||
|
-
|
||
|
-static void fixup_discard_if_not_supported(struct request_queue *q)
|
||
|
-{
|
||
|
- /* To avoid confusion, if this queue does not support discard, clear
|
||
|
- * max_discard_sectors, which is what lsblk -D reports to the user.
|
||
|
- * Older kernels got this wrong in "stack limits".
|
||
|
- * */
|
||
|
- if (!blk_queue_discard(q)) {
|
||
|
- blk_queue_max_discard_sectors(q, 0);
|
||
|
- blk_queue_discard_granularity(q, 0);
|
||
|
+ q->limits.max_write_zeroes_sectors = 0;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
@@ -2116,7 +2106,7 @@ static void drbd_setup_queue_param(struc
|
||
|
blk_queue_max_hw_sectors(q, max_hw_sectors);
|
||
|
/* This is the workaround for "bio would need to, but cannot, be split" */
|
||
|
blk_queue_segment_boundary(q, PAGE_SIZE-1);
|
||
|
- decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
|
||
|
+ decide_on_discard_support(device, q, b, discard_zeroes_if_aligned, bdev);
|
||
|
decide_on_write_same_support(device, q, b, o, disable_write_same);
|
||
|
|
||
|
if (b) {
|
||
|
@@ -2127,7 +2117,6 @@ static void drbd_setup_queue_param(struc
|
||
|
blk_queue_update_readahead(q);
|
||
|
#endif
|
||
|
}
|
||
|
- fixup_discard_if_not_supported(q);
|
||
|
fixup_write_zeroes(device, q);
|
||
|
}
|
||
|
|
||
|
@@ -2233,13 +2222,14 @@ static void sanitize_disk_conf(struct dr
|
||
|
struct drbd_backing_dev *nbc)
|
||
|
{
|
||
|
struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
|
||
|
+ struct block_device *bdev = nbc->backing_bdev;
|
||
|
|
||
|
if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
|
||
|
disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
|
||
|
if (disk_conf->al_extents > drbd_al_extents_max(nbc))
|
||
|
disk_conf->al_extents = drbd_al_extents_max(nbc);
|
||
|
|
||
|
- if (!blk_queue_discard(q) ||
|
||
|
+ if (!bdev_max_discard_sectors(bdev) ||
|
||
|
(!queue_discard_zeroes_data(q) && !disk_conf->discard_zeroes_if_aligned)) {
|
||
|
if (disk_conf->rs_discard_granularity) {
|
||
|
disk_conf->rs_discard_granularity = 0; /* disable feature */
|
||
|
@@ -2261,7 +2251,7 @@ static void sanitize_disk_conf(struct dr
|
||
|
/* compat:
|
||
|
* old kernel has 0 granularity means "unknown" means one sector.
|
||
|
* current kernel has 0 granularity means "discard not supported".
|
||
|
- * Not supported is checked above already with !blk_queue_discard(q).
|
||
|
+ * Not supported is checked above already with !blk_max_discard_sectors().
|
||
|
*/
|
||
|
unsigned int ql_dg = q->limits.discard_granularity ?: 512;
|
||
|
|
||
|
diff -Nupr a/drbd/drbd_receiver.c b/drbd/drbd_receiver.c
|
||
|
--- a/drbd/drbd_receiver.c 2022-08-23 08:04:26.105721553 +0800
|
||
|
+++ b/drbd/drbd_receiver.c 2022-08-23 08:25:31.188262629 +0800
|
||
|
@@ -1686,11 +1686,10 @@ int drbd_issue_discard_or_zero_out(struc
|
||
|
|
||
|
static bool can_do_reliable_discards(struct drbd_device *device)
|
||
|
{
|
||
|
- struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
|
||
|
struct disk_conf *dc;
|
||
|
bool can_do;
|
||
|
|
||
|
- if (!blk_queue_discard(q))
|
||
|
+ if (!bdev_max_discard_sectors(device->ldev->backing_bdev))
|
||
|
return false;
|
||
|
|
||
|
if (queue_discard_zeroes_data(q))
|