mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
Move the aes_encrypt_zvkned() and aes_decrypt_zvkned() assembly functions into lib/crypto/, wire them up to the AES library API, and remove the "aes-riscv64-zvkned" crypto_cipher algorithm. To make this possible, change the prototypes of these functions to take (rndkeys, key_len) instead of a pointer to crypto_aes_ctx, and change the RISC-V AES-XTS code to implement tweak encryption using the AES library instead of directly calling aes_encrypt_zvkned(). The result is that both the AES library and crypto_cipher APIs use RISC-V's AES instructions, whereas previously only crypto_cipher did (and it wasn't enabled by default, which this commit fixes as well). Acked-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20260112192035.10427-15-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
63 lines
1.8 KiB
C
63 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2023 VRULL GmbH
|
|
* Copyright (C) 2023 SiFive, Inc.
|
|
* Copyright 2024 Google LLC
|
|
*/
|
|
|
|
#include <asm/simd.h>
|
|
#include <asm/vector.h>
|
|
|
|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_zvkned);
|
|
|
|
void aes_encrypt_zvkned(const u32 rndkeys[], int key_len,
|
|
u8 out[AES_BLOCK_SIZE], const u8 in[AES_BLOCK_SIZE]);
|
|
void aes_decrypt_zvkned(const u32 rndkeys[], int key_len,
|
|
u8 out[AES_BLOCK_SIZE], const u8 in[AES_BLOCK_SIZE]);
|
|
|
|
static void aes_preparekey_arch(union aes_enckey_arch *k,
|
|
union aes_invkey_arch *inv_k,
|
|
const u8 *in_key, int key_len, int nrounds)
|
|
{
|
|
aes_expandkey_generic(k->rndkeys, inv_k ? inv_k->inv_rndkeys : NULL,
|
|
in_key, key_len);
|
|
}
|
|
|
|
static void aes_encrypt_arch(const struct aes_enckey *key,
|
|
u8 out[AES_BLOCK_SIZE],
|
|
const u8 in[AES_BLOCK_SIZE])
|
|
{
|
|
if (static_branch_likely(&have_zvkned) && likely(may_use_simd())) {
|
|
kernel_vector_begin();
|
|
aes_encrypt_zvkned(key->k.rndkeys, key->len, out, in);
|
|
kernel_vector_end();
|
|
} else {
|
|
aes_encrypt_generic(key->k.rndkeys, key->nrounds, out, in);
|
|
}
|
|
}
|
|
|
|
static void aes_decrypt_arch(const struct aes_key *key,
|
|
u8 out[AES_BLOCK_SIZE],
|
|
const u8 in[AES_BLOCK_SIZE])
|
|
{
|
|
/*
|
|
* Note that the Zvkned code uses the standard round keys, while the
|
|
* fallback uses the inverse round keys. Thus both must be present.
|
|
*/
|
|
if (static_branch_likely(&have_zvkned) && likely(may_use_simd())) {
|
|
kernel_vector_begin();
|
|
aes_decrypt_zvkned(key->k.rndkeys, key->len, out, in);
|
|
kernel_vector_end();
|
|
} else {
|
|
aes_decrypt_generic(key->inv_k.inv_rndkeys, key->nrounds,
|
|
out, in);
|
|
}
|
|
}
|
|
|
|
#define aes_mod_init_arch aes_mod_init_arch
|
|
static void aes_mod_init_arch(void)
|
|
{
|
|
if (riscv_isa_extension_available(NULL, ZVKNED) &&
|
|
riscv_vector_vlen() >= 128)
|
|
static_branch_enable(&have_zvkned);
|
|
}
|