SHA256
1
0
forked from pool/drbd
drbd/bsc-1201335_04-bio_alloc_bioset.patch

80 lines
3.2 KiB
Diff
Raw Normal View History

/* This patch is related with following upstream kernel commit */
commit 609be1066731fea86436f5f91022f82e592ab456
Author: Christoph Hellwig <hch@lst.de>
Date: Mon Jan 24 10:11:03 2022 +0100
block: pass a block_device and opf to bio_alloc_bioset
Pass the block_device and operation that we plan to use this bio for to
bio_alloc_bioset to optimize the assigment. NULL/0 can be passed, both
for the passthrough case on a raw request_queue and to temporarily avoid
refactoring some nasty code.
Also move the gfp_mask argument after the nr_vecs argument for a much
more logical calling convention matching what most of the kernel does.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220124091107.642561-16-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
diff -Nupr a/drbd/drbd_actlog.c b/drbd/drbd_actlog.c
--- a/drbd/drbd_actlog.c 2022-07-07 18:23:47.643895879 +0800
+++ b/drbd/drbd_actlog.c 2022-07-08 06:46:56.597471149 +0800
@@ -94,8 +94,8 @@ static int _drbd_md_sync_page_io(struct
device->md_io.done = 0;
device->md_io.error = -ENODEV;
- bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
- bio_set_dev(bio, bdev->md_bdev);
+ bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO,
+ &drbd_md_io_bio_set);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
@@ -103,8 +103,6 @@ static int _drbd_md_sync_page_io(struct
bio->bi_private = device;
bio->bi_end_io = drbd_md_endio;
- bio->bi_opf = op | op_flags;
-
if (op != REQ_OP_WRITE && device->disk_state[NOW] == D_DISKLESS && device->ldev == NULL)
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
;
diff -Nupr a/drbd/drbd_bitmap.c b/drbd/drbd_bitmap.c
--- a/drbd/drbd_bitmap.c 2022-07-07 18:24:13.671780513 +0800
+++ b/drbd/drbd_bitmap.c 2022-07-08 06:49:05.232970673 +0800
@@ -1123,12 +1123,13 @@ static void drbd_bm_endio(struct bio *bi
static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
- struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
struct drbd_device *device = ctx->device;
+ unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
+ struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
+ GFP_NOIO, &drbd_md_io_bio_set);
struct drbd_bitmap *b = device->bitmap;
struct page *page;
unsigned int len;
- unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
sector_t on_disk_sector =
device->ldev->md.md_offset + device->ldev->md.bm_offset;
@@ -1153,14 +1154,12 @@ static void bm_page_io_async(struct drbd
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio_set_dev(bio, device->ldev->md_bdev);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = drbd_bm_endio;
- bio->bi_opf = op;
if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio->bi_status = BLK_STS_IOERR;