fscrypt: keep multiple bios in flight in fscrypt_zeroout_range_inline_crypt

This should slightly improve performance for large zeroing operations,
but more importantly prepares for blk-crypto refactoring that requires
all fscrypt users to call submit_bio directly.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2026-01-09 07:07:42 +01:00 committed by Jens Axboe
parent c22756a997
commit bc26e2efa2

View file

@ -47,49 +47,71 @@ bool fscrypt_decrypt_bio(struct bio *bio)
}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
struct fscrypt_zero_done {
atomic_t pending;
blk_status_t status;
struct completion done;
};
static void fscrypt_zeroout_range_done(struct fscrypt_zero_done *done)
{
if (atomic_dec_and_test(&done->pending))
complete(&done->done);
}
static void fscrypt_zeroout_range_end_io(struct bio *bio)
{
struct fscrypt_zero_done *done = bio->bi_private;
if (bio->bi_status)
cmpxchg(&done->status, 0, bio->bi_status);
fscrypt_zeroout_range_done(done);
bio_put(bio);
}
static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
pgoff_t lblk, sector_t sector,
unsigned int len)
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
struct bio *bio;
int ret, err = 0;
int num_pages = 0;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
GFP_NOFS);
struct fscrypt_zero_done done = {
.pending = ATOMIC_INIT(1),
.done = COMPLETION_INITIALIZER_ONSTACK(done.done),
};
while (len) {
unsigned int blocks_this_page = min(len, blocks_per_page);
unsigned int bytes_this_page = blocks_this_page << blockbits;
struct bio *bio;
unsigned int n;
if (num_pages == 0) {
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
bio->bi_iter.bi_sector = sector;
}
ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
if (WARN_ON_ONCE(ret != bytes_this_page)) {
err = -EIO;
goto out;
}
num_pages++;
len -= blocks_this_page;
lblk += blocks_this_page;
sector += (bytes_this_page >> SECTOR_SHIFT);
if (num_pages == BIO_MAX_VECS || !len ||
!fscrypt_mergeable_bio(bio, inode, lblk)) {
err = submit_bio_wait(bio);
if (err)
goto out;
bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
num_pages = 0;
bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
GFP_NOFS);
bio->bi_iter.bi_sector = sector;
bio->bi_private = &done;
bio->bi_end_io = fscrypt_zeroout_range_end_io;
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
for (n = 0; n < BIO_MAX_VECS; n++) {
unsigned int blocks_this_page =
min(len, blocks_per_page);
unsigned int bytes_this_page = blocks_this_page << blockbits;
__bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
len -= blocks_this_page;
lblk += blocks_this_page;
sector += (bytes_this_page >> SECTOR_SHIFT);
if (!len || !fscrypt_mergeable_bio(bio, inode, lblk))
break;
}
atomic_inc(&done.pending);
submit_bio(bio);
}
out:
bio_put(bio);
return err;
fscrypt_zeroout_range_done(&done);
wait_for_completion(&done.done);
return blk_status_to_errno(done.status);
}
/**