dm: pass through operations on wrapped inline crypto keys

Make the device-mapper layer pass through the derive_sw_secret,
import_key, generate_key, and prepare_key blk-crypto operations when all
underlying devices support hardware-wrapped inline crypto keys and are
passing through inline crypto support.

Commit ebc4176551 ("blk-crypto: add basic hardware-wrapped key
support") already made BLK_CRYPTO_KEY_TYPE_HW_WRAPPED be passed through
in the same way that the other crypto capabilities are.  But the wrapped
key support also includes additional operations in blk_crypto_ll_ops,
and the dm layer needs to implement those to pass them through.
derive_sw_secret is needed by fscrypt, while the other operations are
needed for the new blk-crypto ioctls to work on device-mapper devices
and not just the raw partitions.

Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
This commit is contained in:
Eric Biggers 2025-05-01 14:23:20 -07:00 committed by Mikulas Patocka
parent 025e138eeb
commit e93912786e

View file

@ -1197,6 +1197,176 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
return 0;
}
enum dm_wrappedkey_op {
DERIVE_SW_SECRET,
IMPORT_KEY,
GENERATE_KEY,
PREPARE_KEY,
};
struct dm_wrappedkey_op_args {
enum dm_wrappedkey_op op;
int err;
union {
struct {
const u8 *eph_key;
size_t eph_key_size;
u8 *sw_secret;
} derive_sw_secret;
struct {
const u8 *raw_key;
size_t raw_key_size;
u8 *lt_key;
} import_key;
struct {
u8 *lt_key;
} generate_key;
struct {
const u8 *lt_key;
size_t lt_key_size;
u8 *eph_key;
} prepare_key;
};
};
static int dm_wrappedkey_op_callback(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_wrappedkey_op_args *args = data;
struct block_device *bdev = dev->bdev;
struct blk_crypto_profile *profile =
bdev_get_queue(bdev)->crypto_profile;
int err = -EOPNOTSUPP;
if (!args->err)
return 0;
switch (args->op) {
case DERIVE_SW_SECRET:
err = blk_crypto_derive_sw_secret(
bdev,
args->derive_sw_secret.eph_key,
args->derive_sw_secret.eph_key_size,
args->derive_sw_secret.sw_secret);
break;
case IMPORT_KEY:
err = blk_crypto_import_key(profile,
args->import_key.raw_key,
args->import_key.raw_key_size,
args->import_key.lt_key);
break;
case GENERATE_KEY:
err = blk_crypto_generate_key(profile,
args->generate_key.lt_key);
break;
case PREPARE_KEY:
err = blk_crypto_prepare_key(profile,
args->prepare_key.lt_key,
args->prepare_key.lt_key_size,
args->prepare_key.eph_key);
break;
}
args->err = err;
/* Try another device in case this fails. */
return 0;
}
static int dm_exec_wrappedkey_op(struct blk_crypto_profile *profile,
struct dm_wrappedkey_op_args *args)
{
struct mapped_device *md =
container_of(profile, struct dm_crypto_profile, profile)->md;
struct dm_target *ti;
struct dm_table *t;
int srcu_idx;
int i;
args->err = -EOPNOTSUPP;
t = dm_get_live_table(md, &srcu_idx);
if (!t)
goto out;
/*
* blk-crypto currently has no support for multiple incompatible
* implementations of wrapped inline crypto keys on a single system.
* It was already checked earlier that support for wrapped keys was
* declared on all underlying devices. Thus, all the underlying devices
* should support all wrapped key operations and they should behave
* identically, i.e. work with the same keys. So, just executing the
* operation on the first device on which it works suffices for now.
*/
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices)
continue;
ti->type->iterate_devices(ti, dm_wrappedkey_op_callback, args);
if (!args->err)
break;
}
out:
dm_put_live_table(md, srcu_idx);
return args->err;
}
static int dm_derive_sw_secret(struct blk_crypto_profile *profile,
const u8 *eph_key, size_t eph_key_size,
u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
{
struct dm_wrappedkey_op_args args = {
.op = DERIVE_SW_SECRET,
.derive_sw_secret = {
.eph_key = eph_key,
.eph_key_size = eph_key_size,
.sw_secret = sw_secret,
},
};
return dm_exec_wrappedkey_op(profile, &args);
}
static int dm_import_key(struct blk_crypto_profile *profile,
const u8 *raw_key, size_t raw_key_size,
u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
{
struct dm_wrappedkey_op_args args = {
.op = IMPORT_KEY,
.import_key = {
.raw_key = raw_key,
.raw_key_size = raw_key_size,
.lt_key = lt_key,
},
};
return dm_exec_wrappedkey_op(profile, &args);
}
static int dm_generate_key(struct blk_crypto_profile *profile,
u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
{
struct dm_wrappedkey_op_args args = {
.op = GENERATE_KEY,
.generate_key = {
.lt_key = lt_key,
},
};
return dm_exec_wrappedkey_op(profile, &args);
}
static int dm_prepare_key(struct blk_crypto_profile *profile,
const u8 *lt_key, size_t lt_key_size,
u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
{
struct dm_wrappedkey_op_args args = {
.op = PREPARE_KEY,
.prepare_key = {
.lt_key = lt_key,
.lt_key_size = lt_key_size,
.eph_key = eph_key,
},
};
return dm_exec_wrappedkey_op(profile, &args);
}
static int
device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@ -1271,6 +1441,13 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
profile);
}
if (profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) {
profile->ll_ops.derive_sw_secret = dm_derive_sw_secret;
profile->ll_ops.import_key = dm_import_key;
profile->ll_ops.generate_key = dm_generate_key;
profile->ll_ops.prepare_key = dm_prepare_key;
}
if (t->md->queue &&
!blk_crypto_has_capabilities(profile,
t->md->queue->crypto_profile)) {