blk-crypto: use on-stack skcipher requests for fallback en/decryption

Allocating a skcipher request dynamically can deadlock or cause
unexpected I/O failures when called from writeback context.  Avoid the
allocation entirely by using on-stack skciphers, similar to what the
non-blk-crypto fscrypt path already does.

This drops the incomplete support for asynchronous algorithms, which
previously could be used, but only synchronously.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2026-01-09 07:07:46 +01:00 committed by Jens Axboe
parent b37fbce460
commit 2f655dcb2d

View file

@ -75,7 +75,7 @@ static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
static struct blk_crypto_fallback_keyslot {
enum blk_crypto_mode_num crypto_mode;
struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
struct crypto_sync_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
} *blk_crypto_keyslots;
static struct blk_crypto_profile *blk_crypto_fallback_profile;
@ -98,7 +98,7 @@ static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
/* Clear the key in the skcipher */
err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
err = crypto_sync_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
blk_crypto_modes[crypto_mode].keysize);
WARN_ON(err);
slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
@ -119,7 +119,7 @@ blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile,
blk_crypto_fallback_evict_keyslot(slot);
slotp->crypto_mode = crypto_mode;
err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->bytes,
err = crypto_sync_skcipher_setkey(slotp->tfms[crypto_mode], key->bytes,
key->size);
if (err) {
blk_crypto_fallback_evict_keyslot(slot);
@ -176,28 +176,13 @@ static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
return bio;
}
static bool
blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
struct skcipher_request **ciph_req_ret,
struct crypto_wait *wait)
static struct crypto_sync_skcipher *
blk_crypto_fallback_tfm(struct blk_crypto_keyslot *slot)
{
struct skcipher_request *ciph_req;
const struct blk_crypto_fallback_keyslot *slotp;
int keyslot_idx = blk_crypto_keyslot_index(slot);
const struct blk_crypto_fallback_keyslot *slotp =
&blk_crypto_keyslots[blk_crypto_keyslot_index(slot)];
slotp = &blk_crypto_keyslots[keyslot_idx];
ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
GFP_NOIO);
if (!ciph_req)
return false;
skcipher_request_set_callback(ciph_req,
CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, wait);
*ciph_req_ret = ciph_req;
return true;
return slotp->tfms[slotp->crypto_mode];
}
union blk_crypto_iv {
@ -214,46 +199,22 @@ static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
iv->dun[i] = cpu_to_le64(dun[i]);
}
/*
* The crypto API fallback's encryption routine.
*
* Allocate one or more bios for encryption, encrypt the input bio using the
* crypto API, and submit the encrypted bios. Sets bio->bi_status and
* completes the source bio on error
*/
static void blk_crypto_fallback_encrypt_bio(struct bio *src_bio)
static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
struct crypto_sync_skcipher *tfm)
{
struct bio_crypt_ctx *bc = src_bio->bi_crypt_context;
int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
struct skcipher_request *ciph_req = NULL;
struct blk_crypto_keyslot *slot;
DECLARE_CRYPTO_WAIT(wait);
SYNC_SKCIPHER_REQUEST_ON_STACK(ciph_req, tfm);
u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
struct scatterlist src, dst;
union blk_crypto_iv iv;
unsigned int nr_enc_pages, enc_idx;
struct bio *enc_bio;
blk_status_t status;
unsigned int i;
/*
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key.
*/
status = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot);
if (status != BLK_STS_OK) {
src_bio->bi_status = status;
bio_endio(src_bio);
return;
}
/* and then allocate an skcipher_request for it */
if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
src_bio->bi_status = BLK_STS_RESOURCE;
bio_endio(src_bio);
goto out_release_keyslot;
}
skcipher_request_set_callback(ciph_req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
sg_init_table(&src, 1);
@ -297,10 +258,9 @@ new_bio:
*/
for (i = 0; i < src_bv.bv_len; i += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
&wait)) {
if (crypto_skcipher_encrypt(ciph_req)) {
bio_io_error(enc_bio);
goto out_free_request;
return;
}
bio_crypt_dun_increment(curr_dun, 1);
src.offset += data_unit_size;
@ -327,14 +287,76 @@ new_bio:
}
submit_bio(enc_bio);
out_free_request:
skcipher_request_free(ciph_req);
out_release_keyslot:
}
/*
* The crypto API fallback's encryption routine.
*
* Allocate one or more bios for encryption, encrypt the input bio using the
* crypto API, and submit the encrypted bios. Sets bio->bi_status and
* completes the source bio on error
*/
static void blk_crypto_fallback_encrypt_bio(struct bio *src_bio)
{
struct bio_crypt_ctx *bc = src_bio->bi_crypt_context;
struct blk_crypto_keyslot *slot;
blk_status_t status;
status = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot);
if (status != BLK_STS_OK) {
src_bio->bi_status = status;
bio_endio(src_bio);
return;
}
__blk_crypto_fallback_encrypt_bio(src_bio,
blk_crypto_fallback_tfm(slot));
blk_crypto_put_keyslot(slot);
}
static blk_status_t __blk_crypto_fallback_decrypt_bio(struct bio *bio,
struct bio_crypt_ctx *bc, struct bvec_iter iter,
struct crypto_sync_skcipher *tfm)
{
SYNC_SKCIPHER_REQUEST_ON_STACK(ciph_req, tfm);
u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
union blk_crypto_iv iv;
struct scatterlist sg;
struct bio_vec bv;
const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
unsigned int i;
skcipher_request_set_callback(ciph_req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
sg_init_table(&sg, 1);
skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
iv.bytes);
/* Decrypt each segment in the bio */
__bio_for_each_segment(bv, bio, iter, iter) {
struct page *page = bv.bv_page;
sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
/* Decrypt each data unit in the segment */
for (i = 0; i < bv.bv_len; i += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
if (crypto_skcipher_decrypt(ciph_req))
return BLK_STS_IOERR;
bio_crypt_dun_increment(curr_dun, 1);
sg.offset += data_unit_size;
}
}
return BLK_STS_OK;
}
/*
* The crypto API fallback's main decryption routine.
*
* Decrypts input bio in place, and calls bio_endio on the bio.
*/
static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
@ -344,63 +366,19 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
struct bio *bio = f_ctx->bio;
struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
struct blk_crypto_keyslot *slot;
struct skcipher_request *ciph_req = NULL;
DECLARE_CRYPTO_WAIT(wait);
u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
union blk_crypto_iv iv;
struct scatterlist sg;
struct bio_vec bv;
struct bvec_iter iter;
const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
unsigned int i;
blk_status_t blk_st;
blk_status_t status;
/*
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key.
*/
blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
status = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) {
bio->bi_status = blk_st;
goto out_no_keyslot;
if (status == BLK_STS_OK) {
status = __blk_crypto_fallback_decrypt_bio(bio, bc,
f_ctx->crypt_iter,
blk_crypto_fallback_tfm(slot));
blk_crypto_put_keyslot(slot);
}
/* and then allocate an skcipher_request for it */
if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
bio->bi_status = BLK_STS_RESOURCE;
goto out;
}
memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
sg_init_table(&sg, 1);
skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
iv.bytes);
/* Decrypt each segment in the bio */
__bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
struct page *page = bv.bv_page;
sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
/* Decrypt each data unit in the segment */
for (i = 0; i < bv.bv_len; i += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
&wait)) {
bio->bi_status = BLK_STS_IOERR;
goto out;
}
bio_crypt_dun_increment(curr_dun, 1);
sg.offset += data_unit_size;
}
}
out:
skcipher_request_free(ciph_req);
blk_crypto_put_keyslot(slot);
out_no_keyslot:
mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
bio->bi_status = status;
bio_endio(bio);
}
@ -608,7 +586,8 @@ int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
for (i = 0; i < blk_crypto_num_keyslots; i++) {
slotp = &blk_crypto_keyslots[i];
slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
slotp->tfms[mode_num] = crypto_alloc_sync_skcipher(cipher_str,
0, 0);
if (IS_ERR(slotp->tfms[mode_num])) {
err = PTR_ERR(slotp->tfms[mode_num]);
if (err == -ENOENT) {
@ -620,7 +599,7 @@ int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
goto out_free_tfms;
}
crypto_skcipher_set_flags(slotp->tfms[mode_num],
crypto_sync_skcipher_set_flags(slotp->tfms[mode_num],
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
}
@ -634,7 +613,7 @@ int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
out_free_tfms:
for (i = 0; i < blk_crypto_num_keyslots; i++) {
slotp = &blk_crypto_keyslots[i];
crypto_free_skcipher(slotp->tfms[mode_num]);
crypto_free_sync_skcipher(slotp->tfms[mode_num]);
slotp->tfms[mode_num] = NULL;
}
out: