This update includes the following changes:

API:
 
 - Fix race condition in hwrng core by using RCU.
 
 Algorithms:
 
 - Allow authenc(sha224,rfc3686) in fips mode.
 - Add test vectors for authenc(hmac(sha384),cbc(aes)).
 - Add test vectors for authenc(hmac(sha224),cbc(aes)).
 - Add test vectors for authenc(hmac(md5),cbc(des3_ede)).
 - Add lz4 support in hisi_zip.
 - Only allow clear key use during self-test in s390/{phmac,paes}.
 
 Drivers:
 
 - Set rng quality to 900 in airoha.
 - Add gcm(aes) support for AMD/Xilinx Versal device.
 - Allow tfms to share device in hisilicon/trng.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn51F/lCuNhUwmDeSxycdCkmxi6cFAmmJlNEACgkQxycdCkmx
 i6dfYw//fLKHita7B7k6Rnfv7aTX7ZaF7bwMb1w2OtNu7061ZK1+Ou127ZjFKFxC
 qJtI71qmTnhTOXnqeLHDio81QLZ3D9cUwSITv4YS4SCIZlbpKmKNFNfmNd5qweNG
 xHRQnD4jiM2Qk8GFx6CmXKWEooev9Z9vvjWtPSbuHSXVUd5WPGkJfLv6s9Oy3W6u
 7/Z+KcPtMNx3mAhNy7ZwzttKLCPfLp8YhEP99sOFmrUhehjC2e5z59xcQmef5gfJ
 cCTBUJkySLChF2bd8eHWilr8y7jow/pyldu2Ksxv2/o0l01xMqrQoIOXwCeEuEq0
 uxpKMCR0wM9jBlA1C59zBfiL5Dacb+Dbc7jcRRAa49MuYclVMRoPmnAutUMiz38G
 mk/gpc1BQJIez1rAoTyXiNsXiSeZnu/fR9tOq28pTfNXOt2CXsR6kM1AuuP2QyuP
 QC0+UM5UsTE+QIibYklop3HfSCFIaV5LkDI/RIvPzrUjcYkJYgMnG3AoIlqkOl1s
 mzcs20cH9PoZG3v5W4SkKJMib6qSx1qfa1YZ7GucYT1nUk04Plcb8tuYabPP4x6y
 ow/vfikRjnzuMesJShifJUwplaZqP64RBXMvIfgdoOCXfeQ1tKCKz0yssPfgmSs6
 K5mmnmtMvgB6k14luCD3E2zFHO6W+PHZQbSanEvhnlikPo86Dbk=
 =n4fL
 -----END PGP SIGNATURE-----

Merge tag 'v7.0-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "API:
   - Fix race condition in hwrng core by using RCU

  Algorithms:
   - Allow authenc(sha224,rfc3686) in fips mode
   - Add test vectors for authenc(hmac(sha384),cbc(aes))
   - Add test vectors for authenc(hmac(sha224),cbc(aes))
   - Add test vectors for authenc(hmac(md5),cbc(des3_ede))
   - Add lz4 support in hisi_zip
   - Only allow clear key use during self-test in s390/{phmac,paes}

  Drivers:
   - Set rng quality to 900 in airoha
   - Add gcm(aes) support for AMD/Xilinx Versal device
   - Allow tfms to share device in hisilicon/trng"

* tag 'v7.0-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (100 commits)
  crypto: img-hash - Use unregister_ahashes in img_{un}register_algs
  crypto: testmgr - Add test vectors for authenc(hmac(md5),cbc(des3_ede))
  crypto: cesa - Simplify return statement in mv_cesa_dequeue_req_locked
  crypto: testmgr - Add test vectors for authenc(hmac(sha224),cbc(aes))
  crypto: testmgr - Add test vectors for authenc(hmac(sha384),cbc(aes))
  hwrng: core - use RCU and work_struct to fix race condition
  crypto: starfive - Fix memory leak in starfive_aes_aead_do_one_req()
  crypto: xilinx - Fix inconsistant indentation
  crypto: rng - Use unregister_rngs in register_rngs
  crypto: atmel - Use unregister_{aeads,ahashes,skciphers}
  hwrng: optee - simplify OP-TEE context match
  crypto: ccp - Add sysfs attribute for boot integrity
  dt-bindings: crypto: atmel,at91sam9g46-sha: add microchip,lan9691-sha
  dt-bindings: crypto: atmel,at91sam9g46-aes: add microchip,lan9691-aes
  dt-bindings: crypto: qcom,inline-crypto-engine: document the Milos ICE
  crypto: caam - fix netdev memory leak in dpaa2_caam_probe
  crypto: hisilicon/qm - increase wait time for mailbox
  crypto: hisilicon/qm - obtain the mailbox configuration at one time
  crypto: hisilicon/qm - remove unnecessary code in qm_mb_write()
  crypto: hisilicon/qm - move the barrier before writing to the mailbox register
  ...
This commit is contained in:
Linus Torvalds 2026-02-10 08:36:42 -08:00
commit 08df88fa14
91 changed files with 3250 additions and 1313 deletions

View file

@ -8,6 +8,21 @@ Description:
0: Not fused
1: Fused
What: /sys/bus/pci/devices/<BDF>/boot_integrity
Date: April 2026
KernelVersion: 6.20
Contact: mario.limonciello@amd.com
Description:
The /sys/bus/pci/devices/<BDF>/boot_integrity reports
whether the AMD CPU or APU is used for a hardware root of trust
during the boot process.
Possible values:
0: Not hardware root of trust.
1: Hardware root of trust
NOTE: Vendors may provide design specific alternative hardware
root of trust implementations.
What: /sys/bus/pci/devices/<BDF>/debug_lock_on
Date: June 2022
KernelVersion: 5.19

View file

@ -16,6 +16,7 @@ properties:
- const: atmel,at91sam9g46-aes
- items:
- enum:
- microchip,lan9691-aes
- microchip,sam9x7-aes
- microchip,sama7d65-aes
- const: atmel,at91sam9g46-aes

View file

@ -16,6 +16,7 @@ properties:
- const: atmel,at91sam9g46-sha
- items:
- enum:
- microchip,lan9691-sha
- microchip,sam9x7-sha
- microchip,sama7d65-sha
- const: atmel,at91sam9g46-sha

View file

@ -14,6 +14,7 @@ properties:
items:
- enum:
- qcom,kaanapali-inline-crypto-engine
- qcom,milos-inline-crypto-engine
- qcom,qcs8300-inline-crypto-engine
- qcom,sa8775p-inline-crypto-engine
- qcom,sc7180-inline-crypto-engine

View file

@ -30,6 +30,7 @@ properties:
- qcom,sm8550-trng
- qcom,sm8650-trng
- qcom,sm8750-trng
- qcom,x1e80100-trng
- const: qcom,trng
reg:

View file

@ -14,6 +14,8 @@ description: |
The ZynqMP AES-GCM hardened cryptographic accelerator is used to
encrypt or decrypt the data with provided key and initialization vector.
deprecated: true
properties:
compatible:
const: xlnx,zynqmp-aes

View file

@ -104,6 +104,7 @@ properties:
used to encrypt or decrypt the data with provided key and initialization
vector.
type: object
deprecated: true
required:
- compatible

View file

@ -40,6 +40,10 @@
#define PAES_256_PROTKEY_SIZE (32 + 32) /* key + verification pattern */
#define PXTS_256_PROTKEY_SIZE (32 + 32 + 32) /* k1 + k2 + verification pattern */
static bool pkey_clrkey_allowed;
module_param_named(clrkey, pkey_clrkey_allowed, bool, 0444);
MODULE_PARM_DESC(clrkey, "Allow clear key material (default N)");
static u8 *ctrblk;
static DEFINE_MUTEX(ctrblk_lock);
@ -192,10 +196,14 @@ static inline int pxts_ctx_setkey(struct s390_pxts_ctx *ctx,
* This function may sleep - don't call in non-sleeping context.
*/
static inline int convert_key(const u8 *key, unsigned int keylen,
struct paes_protkey *pk)
struct paes_protkey *pk, bool tested)
{
u32 xflags = PKEY_XFLAG_NOMEMALLOC;
int rc, i;
if (tested && !pkey_clrkey_allowed)
xflags |= PKEY_XFLAG_NOCLEARKEY;
pk->len = sizeof(pk->protkey);
/*
@ -209,7 +217,7 @@ static inline int convert_key(const u8 *key, unsigned int keylen,
}
rc = pkey_key2protkey(key, keylen,
pk->protkey, &pk->len, &pk->type,
PKEY_XFLAG_NOMEMALLOC);
xflags);
}
out:
@ -231,7 +239,7 @@ out:
* unnecessary additional conversion but never to invalid data on en-
* or decrypt operations.
*/
static int paes_convert_key(struct s390_paes_ctx *ctx)
static int paes_convert_key(struct s390_paes_ctx *ctx, bool tested)
{
struct paes_protkey pk;
int rc;
@ -240,7 +248,7 @@ static int paes_convert_key(struct s390_paes_ctx *ctx)
ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
spin_unlock_bh(&ctx->pk_lock);
rc = convert_key(ctx->keybuf, ctx->keylen, &pk);
rc = convert_key(ctx->keybuf, ctx->keylen, &pk, tested);
/* update context */
spin_lock_bh(&ctx->pk_lock);
@ -263,7 +271,7 @@ static int paes_convert_key(struct s390_paes_ctx *ctx)
* pk_type, pk_len and the protected key in the tfm context.
* See also comments on function paes_convert_key.
*/
static int pxts_convert_key(struct s390_pxts_ctx *ctx)
static int pxts_convert_key(struct s390_pxts_ctx *ctx, bool tested)
{
struct paes_protkey pk0, pk1;
size_t split_keylen;
@ -273,7 +281,7 @@ static int pxts_convert_key(struct s390_pxts_ctx *ctx)
ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
spin_unlock_bh(&ctx->pk_lock);
rc = convert_key(ctx->keybuf, ctx->keylen, &pk0);
rc = convert_key(ctx->keybuf, ctx->keylen, &pk0, tested);
if (rc)
goto out;
@ -287,7 +295,7 @@ static int pxts_convert_key(struct s390_pxts_ctx *ctx)
}
split_keylen = ctx->keylen / 2;
rc = convert_key(ctx->keybuf + split_keylen,
split_keylen, &pk1);
split_keylen, &pk1, tested);
if (rc)
goto out;
if (pk0.type != pk1.type) {
@ -343,6 +351,7 @@ static int ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
bool tested = crypto_skcipher_tested(tfm);
long fc;
int rc;
@ -352,7 +361,7 @@ static int ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
goto out;
/* convert key into protected key */
rc = paes_convert_key(ctx);
rc = paes_convert_key(ctx, tested);
if (rc)
goto out;
@ -382,7 +391,7 @@ out:
static int ecb_paes_do_crypt(struct s390_paes_ctx *ctx,
struct s390_pecb_req_ctx *req_ctx,
bool maysleep)
bool tested, bool maysleep)
{
struct ecb_param *param = &req_ctx->param;
struct skcipher_walk *walk = &req_ctx->walk;
@ -430,7 +439,7 @@ static int ecb_paes_do_crypt(struct s390_paes_ctx *ctx,
rc = -EKEYEXPIRED;
goto out;
}
rc = paes_convert_key(ctx);
rc = paes_convert_key(ctx, tested);
if (rc)
goto out;
spin_lock_bh(&ctx->pk_lock);
@ -450,6 +459,7 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk *walk = &req_ctx->walk;
bool tested = crypto_skcipher_tested(tfm);
int rc;
/*
@ -468,7 +478,7 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
/* Try synchronous operation if no active engine usage */
if (!atomic_read(&ctx->via_engine_ctr)) {
rc = ecb_paes_do_crypt(ctx, req_ctx, false);
rc = ecb_paes_do_crypt(ctx, req_ctx, tested, false);
if (rc == 0)
goto out;
}
@ -531,11 +541,12 @@ static int ecb_paes_do_one_request(struct crypto_engine *engine, void *areq)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk *walk = &req_ctx->walk;
bool tested = crypto_skcipher_tested(tfm);
int rc;
/* walk has already been prepared */
rc = ecb_paes_do_crypt(ctx, req_ctx, true);
rc = ecb_paes_do_crypt(ctx, req_ctx, tested, true);
if (rc == -EKEYEXPIRED) {
/*
* Protected key expired, conversion is in process.
@ -602,6 +613,7 @@ static int cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
bool tested = crypto_skcipher_tested(tfm);
long fc;
int rc;
@ -611,7 +623,7 @@ static int cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
goto out;
/* convert raw key into protected key */
rc = paes_convert_key(ctx);
rc = paes_convert_key(ctx, tested);
if (rc)
goto out;
@ -641,7 +653,7 @@ out:
static int cbc_paes_do_crypt(struct s390_paes_ctx *ctx,
struct s390_pcbc_req_ctx *req_ctx,
bool maysleep)
bool tested, bool maysleep)
{
struct cbc_param *param = &req_ctx->param;
struct skcipher_walk *walk = &req_ctx->walk;
@ -693,7 +705,7 @@ static int cbc_paes_do_crypt(struct s390_paes_ctx *ctx,
rc = -EKEYEXPIRED;
goto out;
}
rc = paes_convert_key(ctx);
rc = paes_convert_key(ctx, tested);
if (rc)
goto out;
spin_lock_bh(&ctx->pk_lock);
@ -713,6 +725,7 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk *walk = &req_ctx->walk;
bool tested = crypto_skcipher_tested(tfm);
int rc;
/*
@ -731,7 +744,7 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
/* Try synchronous operation if no active engine usage */
if (!atomic_read(&ctx->via_engine_ctr)) {
rc = cbc_paes_do_crypt(ctx, req_ctx, false);
rc = cbc_paes_do_crypt(ctx, req_ctx, tested, false);
if (rc == 0)
goto out;
}
@ -794,11 +807,12 @@ static int cbc_paes_do_one_request(struct crypto_engine *engine, void *areq)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk *walk = &req_ctx->walk;
bool tested = crypto_skcipher_tested(tfm);
int rc;
/* walk has already been prepared */
rc = cbc_paes_do_crypt(ctx, req_ctx, true);
rc = cbc_paes_do_crypt(ctx, req_ctx, tested, true);
if (rc == -EKEYEXPIRED) {
/*
* Protected key expired, conversion is in process.
@ -865,6 +879,7 @@ static int ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
bool tested = crypto_skcipher_tested(tfm);
long fc;
int rc;
@ -874,7 +889,7 @@ static int ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
goto out;
/* convert raw key into protected key */
rc = paes_convert_key(ctx);
rc = paes_convert_key(ctx, tested);
if (rc)
goto out;
@ -919,7 +934,7 @@ static inline unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes
static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx,
struct s390_pctr_req_ctx *req_ctx,
bool maysleep)
bool tested, bool maysleep)
{
struct ctr_param *param = &req_ctx->param;
struct skcipher_walk *walk = &req_ctx->walk;
@ -979,7 +994,7 @@ static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx,
rc = -EKEYEXPIRED;
goto out;
}
rc = paes_convert_key(ctx);
rc = paes_convert_key(ctx, tested);
if (rc) {
if (locked)
mutex_unlock(&ctrblk_lock);
@ -1006,7 +1021,7 @@ static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx,
rc = -EKEYEXPIRED;
goto out;
}
rc = paes_convert_key(ctx);
rc = paes_convert_key(ctx, tested);
if (rc)
goto out;
spin_lock_bh(&ctx->pk_lock);
@ -1029,6 +1044,7 @@ static int ctr_paes_crypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk *walk = &req_ctx->walk;
bool tested = crypto_skcipher_tested(tfm);
int rc;
/*
@ -1046,7 +1062,7 @@ static int ctr_paes_crypt(struct skcipher_request *req)
/* Try synchronous operation if no active engine usage */
if (!atomic_read(&ctx->via_engine_ctr)) {
rc = ctr_paes_do_crypt(ctx, req_ctx, false);
rc = ctr_paes_do_crypt(ctx, req_ctx, tested, false);
if (rc == 0)
goto out;
}
@ -1099,11 +1115,12 @@ static int ctr_paes_do_one_request(struct crypto_engine *engine, void *areq)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk *walk = &req_ctx->walk;
bool tested = crypto_skcipher_tested(tfm);
int rc;
/* walk has already been prepared */
rc = ctr_paes_do_crypt(ctx, req_ctx, true);
rc = ctr_paes_do_crypt(ctx, req_ctx, tested, true);
if (rc == -EKEYEXPIRED) {
/*
* Protected key expired, conversion is in process.
@ -1190,6 +1207,7 @@ static int xts_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int in_keylen)
{
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
bool tested = crypto_skcipher_tested(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
unsigned int ckey_len;
long fc;
@ -1205,7 +1223,7 @@ static int xts_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
goto out;
/* convert raw key(s) into protected key(s) */
rc = pxts_convert_key(ctx);
rc = pxts_convert_key(ctx, tested);
if (rc)
goto out;
@ -1255,7 +1273,7 @@ out:
static int xts_paes_do_crypt_fullkey(struct s390_pxts_ctx *ctx,
struct s390_pxts_req_ctx *req_ctx,
bool maysleep)
bool tested, bool maysleep)
{
struct xts_full_km_param *param = &req_ctx->param.full_km_param;
struct skcipher_walk *walk = &req_ctx->walk;
@ -1299,7 +1317,7 @@ static int xts_paes_do_crypt_fullkey(struct s390_pxts_ctx *ctx,
rc = -EKEYEXPIRED;
goto out;
}
rc = pxts_convert_key(ctx);
rc = pxts_convert_key(ctx, tested);
if (rc)
goto out;
spin_lock_bh(&ctx->pk_lock);
@ -1318,7 +1336,8 @@ static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx,
struct xts_km_param *param,
struct skcipher_walk *walk,
unsigned int keylen,
unsigned int offset, bool maysleep)
unsigned int offset,
bool tested, bool maysleep)
{
struct xts_pcc_param pcc_param;
unsigned long cc = 1;
@ -1337,7 +1356,7 @@ static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx,
rc = -EKEYEXPIRED;
break;
}
rc = pxts_convert_key(ctx);
rc = pxts_convert_key(ctx, tested);
if (rc)
break;
continue;
@ -1351,7 +1370,7 @@ static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx,
static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx,
struct s390_pxts_req_ctx *req_ctx,
bool maysleep)
bool tested, bool maysleep)
{
struct xts_km_param *param = &req_ctx->param.km_param;
struct skcipher_walk *walk = &req_ctx->walk;
@ -1369,7 +1388,7 @@ static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx,
if (!req_ctx->param_init_done) {
rc = __xts_2keys_prep_param(ctx, param, walk,
keylen, offset, maysleep);
keylen, offset, tested, maysleep);
if (rc)
goto out;
req_ctx->param_init_done = true;
@ -1392,7 +1411,7 @@ static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx,
rc = -EKEYEXPIRED;
goto out;
}
rc = pxts_convert_key(ctx);
rc = pxts_convert_key(ctx, tested);
if (rc)
goto out;
spin_lock_bh(&ctx->pk_lock);
@ -1408,7 +1427,7 @@ out:
static int xts_paes_do_crypt(struct s390_pxts_ctx *ctx,
struct s390_pxts_req_ctx *req_ctx,
bool maysleep)
bool tested, bool maysleep)
{
int pk_state, rc = 0;
@ -1436,11 +1455,11 @@ static int xts_paes_do_crypt(struct s390_pxts_ctx *ctx,
switch (ctx->fc) {
case CPACF_KM_PXTS_128:
case CPACF_KM_PXTS_256:
rc = xts_paes_do_crypt_2keys(ctx, req_ctx, maysleep);
rc = xts_paes_do_crypt_2keys(ctx, req_ctx, tested, maysleep);
break;
case CPACF_KM_PXTS_128_FULL:
case CPACF_KM_PXTS_256_FULL:
rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, maysleep);
rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, tested, maysleep);
break;
default:
rc = -EINVAL;
@ -1457,6 +1476,7 @@ static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long mod
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk *walk = &req_ctx->walk;
bool tested = crypto_skcipher_tested(tfm);
int rc;
/*
@ -1475,7 +1495,7 @@ static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long mod
/* Try synchronous operation if no active engine usage */
if (!atomic_read(&ctx->via_engine_ctr)) {
rc = xts_paes_do_crypt(ctx, req_ctx, false);
rc = xts_paes_do_crypt(ctx, req_ctx, tested, false);
if (rc == 0)
goto out;
}
@ -1538,11 +1558,12 @@ static int xts_paes_do_one_request(struct crypto_engine *engine, void *areq)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk *walk = &req_ctx->walk;
bool tested = crypto_skcipher_tested(tfm);
int rc;
/* walk has already been prepared */
rc = xts_paes_do_crypt(ctx, req_ctx, true);
rc = xts_paes_do_crypt(ctx, req_ctx, tested, true);
if (rc == -EKEYEXPIRED) {
/*
* Protected key expired, conversion is in process.

View file

@ -23,6 +23,10 @@
static struct crypto_engine *phmac_crypto_engine;
#define MAX_QLEN 10
static bool pkey_clrkey_allowed;
module_param_named(clrkey, pkey_clrkey_allowed, bool, 0444);
MODULE_PARM_DESC(clrkey, "Allow clear key material (default N)");
/*
* A simple hash walk helper
*/
@ -311,10 +315,14 @@ static inline int phmac_tfm_ctx_setkey(struct phmac_tfm_ctx *tfm_ctx,
* This function may sleep - don't call in non-sleeping context.
*/
static inline int convert_key(const u8 *key, unsigned int keylen,
struct phmac_protkey *pk)
struct phmac_protkey *pk, bool tested)
{
u32 xflags = PKEY_XFLAG_NOMEMALLOC;
int rc, i;
if (tested && !pkey_clrkey_allowed)
xflags |= PKEY_XFLAG_NOCLEARKEY;
pk->len = sizeof(pk->protkey);
/*
@ -328,7 +336,7 @@ static inline int convert_key(const u8 *key, unsigned int keylen,
}
rc = pkey_key2protkey(key, keylen,
pk->protkey, &pk->len, &pk->type,
PKEY_XFLAG_NOMEMALLOC);
xflags);
}
out:
@ -350,7 +358,7 @@ out:
* unnecessary additional conversion but never to invalid data on the
* hash operation.
*/
static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx)
static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx, bool tested)
{
struct phmac_protkey pk;
int rc;
@ -359,7 +367,7 @@ static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx)
tfm_ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
spin_unlock_bh(&tfm_ctx->pk_lock);
rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk);
rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk, tested);
/* update context */
spin_lock_bh(&tfm_ctx->pk_lock);
@ -404,6 +412,7 @@ static int phmac_kmac_update(struct ahash_request *req, bool maysleep)
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
struct hash_walk_helper *hwh = &req_ctx->hwh;
unsigned int bs = crypto_ahash_blocksize(tfm);
bool tested = crypto_ahash_tested(tfm);
unsigned int offset, k, n;
int rc = 0;
@ -444,7 +453,7 @@ static int phmac_kmac_update(struct ahash_request *req, bool maysleep)
rc = -EKEYEXPIRED;
goto out;
}
rc = phmac_convert_key(tfm_ctx);
rc = phmac_convert_key(tfm_ctx, tested);
if (rc)
goto out;
spin_lock_bh(&tfm_ctx->pk_lock);
@ -480,7 +489,7 @@ static int phmac_kmac_update(struct ahash_request *req, bool maysleep)
rc = -EKEYEXPIRED;
goto out;
}
rc = phmac_convert_key(tfm_ctx);
rc = phmac_convert_key(tfm_ctx, tested);
if (rc)
goto out;
spin_lock_bh(&tfm_ctx->pk_lock);
@ -517,6 +526,7 @@ static int phmac_kmac_final(struct ahash_request *req, bool maysleep)
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
unsigned int ds = crypto_ahash_digestsize(tfm);
unsigned int bs = crypto_ahash_blocksize(tfm);
bool tested = crypto_ahash_tested(tfm);
unsigned int k, n;
int rc = 0;
@ -537,7 +547,7 @@ static int phmac_kmac_final(struct ahash_request *req, bool maysleep)
rc = -EKEYEXPIRED;
goto out;
}
rc = phmac_convert_key(tfm_ctx);
rc = phmac_convert_key(tfm_ctx, tested);
if (rc)
goto out;
spin_lock_bh(&tfm_ctx->pk_lock);
@ -741,11 +751,12 @@ static int phmac_setkey(struct crypto_ahash *tfm,
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
unsigned int bs = crypto_ahash_blocksize(tfm);
bool tested = crypto_ahash_tested(tfm);
unsigned int tmpkeylen;
u8 *tmpkey = NULL;
int rc = 0;
if (!crypto_ahash_tested(tfm)) {
if (!tested) {
/*
* selftest running: key is a raw hmac clear key and needs
* to get embedded into a 'clear key token' in order to have
@ -770,7 +781,7 @@ static int phmac_setkey(struct crypto_ahash *tfm,
goto out;
/* convert raw key into protected key */
rc = phmac_convert_key(tfm_ctx);
rc = phmac_convert_key(tfm_ctx, tested);
if (rc)
goto out;

View file

@ -21,7 +21,8 @@
* @param keylen size of the key blob in bytes
* @param protkey pointer to buffer receiving the protected key
* @param xflags additional execution flags (see PKEY_XFLAG_* definitions below)
* As of now the only supported flag is PKEY_XFLAG_NOMEMALLOC.
* As of now the only supported flags are PKEY_XFLAG_NOMEMALLOC
* and PKEY_XFLAG_NOCLEARKEY.
* @return 0 on success, negative errno value on failure
*/
int pkey_key2protkey(const u8 *key, u32 keylen,
@ -38,4 +39,9 @@ int pkey_key2protkey(const u8 *key, u32 keylen,
*/
#define PKEY_XFLAG_NOMEMALLOC 0x0001
/*
* Do not accept a clear key token as source for a protected key.
*/
#define PKEY_XFLAG_NOCLEARKEY 0x0002
#endif /* _KAPI_PKEY_H */

View file

@ -60,10 +60,8 @@ static int __maybe_unused crypto_acomp_report(
return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
}
static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
static void __maybe_unused crypto_acomp_show(struct seq_file *m,
struct crypto_alg *alg)
{
seq_puts(m, "type : acomp\n");
}
@ -337,17 +335,13 @@ int crypto_register_acomps(struct acomp_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_register_acomp(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_unregister_acomps(algs, i);
return ret;
}
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_acomp(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_acomps);

View file

@ -151,9 +151,8 @@ static int __maybe_unused crypto_aead_report(
return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
}
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
static void __maybe_unused crypto_aead_show(struct seq_file *m,
struct crypto_alg *alg)
{
struct aead_alg *aead = container_of(alg, struct aead_alg, base);

View file

@ -801,9 +801,8 @@ static int __maybe_unused crypto_ahash_report(
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
static void __maybe_unused crypto_ahash_show(struct seq_file *m,
struct crypto_alg *alg)
{
seq_printf(m, "type : ahash\n");
seq_printf(m, "async : %s\n",
@ -1020,17 +1019,13 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_register_ahash(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_unregister_ahashes(algs, i);
return ret;
}
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_ahash(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_ahashes);

View file

@ -46,10 +46,8 @@ static int __maybe_unused crypto_akcipher_report(
sizeof(rakcipher), &rakcipher);
}
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
static void __maybe_unused crypto_akcipher_show(struct seq_file *m,
struct crypto_alg *alg)
{
seq_puts(m, "type : akcipher\n");
}

View file

@ -511,17 +511,13 @@ int crypto_register_algs(struct crypto_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_register_alg(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_unregister_algs(algs, i);
return ret;
}
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_alg(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_algs);
@ -529,7 +525,7 @@ void crypto_unregister_algs(struct crypto_alg *algs, int count)
{
int i;
for (i = 0; i < count; i++)
for (i = count - 1; i >= 0; --i)
crypto_unregister_alg(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_algs);

View file

@ -306,7 +306,7 @@ static const u32 bf_sbox[256 * 4] = {
/*
* The blowfish encipher, processes 64-bit blocks.
* NOTE: This function MUSTN'T respect endianess
* NOTE: This function MUSTN'T respect endianness
*/
static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
{

View file

@ -524,16 +524,13 @@ int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_engine_register_aead(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_engine_unregister_aeads(algs, i);
return ret;
}
}
return 0;
err:
crypto_engine_unregister_aeads(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
@ -566,16 +563,13 @@ int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_engine_register_ahash(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_engine_unregister_ahashes(algs, i);
return ret;
}
}
return 0;
err:
crypto_engine_unregister_ahashes(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
@ -638,16 +632,13 @@ int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
for (i = 0; i < count; i++) {
ret = crypto_engine_register_skcipher(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_engine_unregister_skciphers(algs, i);
return ret;
}
}
return 0;
err:
crypto_engine_unregister_skciphers(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);

View file

@ -226,40 +226,37 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
* @entropy buffer of seed data to be checked
*
* return:
* 0 on success
* -EAGAIN on when the CTRNG is not yet primed
* < 0 on error
* %true on success
* %false when the CTRNG is not yet primed
*/
static int drbg_fips_continuous_test(struct drbg_state *drbg,
const unsigned char *entropy)
static bool drbg_fips_continuous_test(struct drbg_state *drbg,
const unsigned char *entropy)
{
unsigned short entropylen = drbg_sec_strength(drbg->core->flags);
int ret = 0;
if (!IS_ENABLED(CONFIG_CRYPTO_FIPS))
return 0;
return true;
/* skip test if we test the overall system */
if (list_empty(&drbg->test_data.list))
return 0;
return true;
/* only perform test in FIPS mode */
if (!fips_enabled)
return 0;
return true;
if (!drbg->fips_primed) {
/* Priming of FIPS test */
memcpy(drbg->prev, entropy, entropylen);
drbg->fips_primed = true;
/* priming: another round is needed */
return -EAGAIN;
return false;
}
ret = memcmp(drbg->prev, entropy, entropylen);
if (!ret)
if (!memcmp(drbg->prev, entropy, entropylen))
panic("DRBG continuous self test failed\n");
memcpy(drbg->prev, entropy, entropylen);
/* the test shall pass when the two values are not equal */
return 0;
return true;
}
/******************************************************************
@ -845,20 +842,13 @@ static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
return ret;
}
static inline int drbg_get_random_bytes(struct drbg_state *drbg,
unsigned char *entropy,
unsigned int entropylen)
static inline void drbg_get_random_bytes(struct drbg_state *drbg,
unsigned char *entropy,
unsigned int entropylen)
{
int ret;
do {
do
get_random_bytes(entropy, entropylen);
ret = drbg_fips_continuous_test(drbg, entropy);
if (ret && ret != -EAGAIN)
return ret;
} while (ret);
return 0;
while (!drbg_fips_continuous_test(drbg, entropy));
}
static int drbg_seed_from_random(struct drbg_state *drbg)
@ -875,13 +865,10 @@ static int drbg_seed_from_random(struct drbg_state *drbg)
drbg_string_fill(&data, entropy, entropylen);
list_add_tail(&data.list, &seedlist);
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
drbg_get_random_bytes(drbg, entropy, entropylen);
ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
out:
memzero_explicit(entropy, entropylen);
return ret;
}
@ -956,9 +943,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
if (!rng_is_initialized())
new_seed_state = DRBG_SEED_STATE_PARTIAL;
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
drbg_get_random_bytes(drbg, entropy, entropylen);
if (!drbg->jent) {
drbg_string_fill(&data1, entropy, entropylen);

View file

@ -90,33 +90,24 @@ void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
}
EXPORT_SYMBOL(ecc_digits_from_bytes);
static u64 *ecc_alloc_digits_space(unsigned int ndigits)
{
size_t len = ndigits * sizeof(u64);
if (!len)
return NULL;
return kmalloc(len, GFP_KERNEL);
}
static void ecc_free_digits_space(u64 *space)
{
kfree_sensitive(space);
}
struct ecc_point *ecc_alloc_point(unsigned int ndigits)
{
struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL);
struct ecc_point *p;
size_t ndigits_sz;
if (!ndigits)
return NULL;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return NULL;
p->x = ecc_alloc_digits_space(ndigits);
ndigits_sz = ndigits * sizeof(u64);
p->x = kmalloc(ndigits_sz, GFP_KERNEL);
if (!p->x)
goto err_alloc_x;
p->y = ecc_alloc_digits_space(ndigits);
p->y = kmalloc(ndigits_sz, GFP_KERNEL);
if (!p->y)
goto err_alloc_y;
@ -125,7 +116,7 @@ struct ecc_point *ecc_alloc_point(unsigned int ndigits)
return p;
err_alloc_y:
ecc_free_digits_space(p->x);
kfree(p->x);
err_alloc_x:
kfree(p);
return NULL;

View file

@ -22,7 +22,7 @@ ATOMIC_NOTIFIER_HEAD(fips_fail_notif_chain);
EXPORT_SYMBOL_GPL(fips_fail_notif_chain);
/* Process kernel command-line parameter at boot time. fips=0 or fips=1 */
static int fips_enable(char *str)
static int __init fips_enable(char *str)
{
if (kstrtoint(str, 0, &fips_enabled))
return 0;

View file

@ -859,10 +859,7 @@ static struct crypto_alg khazad_alg = {
static int __init khazad_mod_init(void)
{
int ret = 0;
ret = crypto_register_alg(&khazad_alg);
return ret;
return crypto_register_alg(&khazad_alg);
}
static void __exit khazad_mod_fini(void)

View file

@ -29,10 +29,8 @@ static int __maybe_unused crypto_kpp_report(
return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp);
}
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
static void __maybe_unused crypto_kpp_show(struct seq_file *m,
struct crypto_alg *alg)
{
seq_puts(m, "type : kpp\n");
}

View file

@ -384,17 +384,13 @@ int crypto_register_lskciphers(struct lskcipher_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_register_lskcipher(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_unregister_lskciphers(algs, i);
return ret;
}
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_lskcipher(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_lskciphers);

View file

@ -77,9 +77,8 @@ static int __maybe_unused crypto_rng_report(
return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
}
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
static void __maybe_unused crypto_rng_show(struct seq_file *m,
struct crypto_alg *alg)
{
seq_printf(m, "type : rng\n");
seq_printf(m, "seedsize : %u\n", seedsize(alg));
@ -203,17 +202,13 @@ int crypto_register_rngs(struct rng_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_register_rng(algs + i);
if (ret)
goto err;
if (ret) {
crypto_unregister_rngs(algs, i);
return ret;
}
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_rng(algs + i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_rngs);

View file

@ -58,10 +58,8 @@ static int __maybe_unused crypto_scomp_report(
sizeof(rscomp), &rscomp);
}
static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
static void __maybe_unused crypto_scomp_show(struct seq_file *m,
struct crypto_alg *alg)
{
seq_puts(m, "type : scomp\n");
}
@ -383,17 +381,13 @@ int crypto_register_scomps(struct scomp_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_register_scomp(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_unregister_scomps(algs, i);
return ret;
}
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_scomp(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_scomps);

View file

@ -346,9 +346,8 @@ static int __maybe_unused crypto_shash_report(
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
static void __maybe_unused crypto_shash_show(struct seq_file *m,
struct crypto_alg *alg)
{
struct shash_alg *salg = __crypto_shash_alg(alg);
@ -542,17 +541,13 @@ int crypto_register_shashes(struct shash_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_register_shash(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_unregister_shashes(algs, i);
return ret;
}
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_shash(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_shashes);

View file

@ -352,8 +352,8 @@ static int simd_aead_init(struct crypto_aead *tfm)
ctx->cryptd_tfm = cryptd_tfm;
reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm));
reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base));
reqsize = max(crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm)),
crypto_aead_reqsize(&cryptd_tfm->base));
reqsize += sizeof(struct aead_request);
crypto_aead_set_reqsize(tfm, reqsize);

View file

@ -570,9 +570,8 @@ static void crypto_skcipher_free_instance(struct crypto_instance *inst)
skcipher->free(skcipher);
}
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
static void __maybe_unused crypto_skcipher_show(struct seq_file *m,
struct crypto_alg *alg)
{
struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
@ -741,17 +740,13 @@ int crypto_register_skciphers(struct skcipher_alg *algs, int count)
for (i = 0; i < count; i++) {
ret = crypto_register_skcipher(&algs[i]);
if (ret)
goto err;
if (ret) {
crypto_unregister_skciphers(algs, i);
return ret;
}
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_skcipher(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_skciphers);

View file

@ -4079,6 +4079,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.aead = __VECS(aegis128_tv_template)
}
}, {
.alg = "authenc(hmac(md5),cbc(des3_ede))",
.generic_driver = "authenc(hmac-md5-lib,cbc(des3_ede-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_md5_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(md5),ecb(cipher_null))",
.generic_driver = "authenc(hmac-md5-lib,ecb-cipher_null)",
@ -4123,6 +4130,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha224),cbc(aes))",
.generic_driver = "authenc(hmac-sha224-lib,cbc(aes-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha224_aes_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha224),cbc(des))",
.generic_driver = "authenc(hmac-sha224-lib,cbc(des-generic))",
@ -4137,6 +4151,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.aead = __VECS(hmac_sha224_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha256),cbc(aes))",
.generic_driver = "authenc(hmac-sha256-lib,cbc(aes-lib))",
@ -4174,6 +4192,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha384),cbc(aes))",
.generic_driver = "authenc(hmac-sha384-lib,cbc(aes-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha384_aes_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha384),cbc(des))",
.generic_driver = "authenc(hmac-sha384-lib,cbc(des-generic))",

View file

@ -14919,6 +14919,291 @@ static const struct aead_testvec hmac_sha1_ecb_cipher_null_tv_temp[] = {
},
};
static const struct aead_testvec hmac_sha224_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00"
"\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
"\x51\x2e\x03\xd5\x34\x12\x00\x06",
.klen = 8 + 28 + 16,
.iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
"\xb4\x22\xda\x80\x2c\x9f\xac\x41",
.assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
"\xb4\x22\xda\x80\x2c\x9f\xac\x41",
.alen = 16,
.ptext = "Single block msg",
.plen = 16,
.ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
"\x27\x08\x94\x2d\xbe\x77\x18\x1a"
"\x17\xe8\x00\x76\x70\x71\xd1\x72"
"\xf8\xd0\x91\x51\x67\xf9\xdf\xd6"
"\x0d\x56\x1a\xb3\x52\x19\x85\xae"
"\x46\x74\xb6\x98",
.clen = 16 + 28,
}, { /* RFC 3602 Case 2 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x20\x21\x22\x23\x24\x25\x26\x27"
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
"\x30\x31\x32\x33\x34\x35\x36\x37"
"\x38\x39\x3a\x3b"
"\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
"\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
.klen = 8 + 28 + 16,
.iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
"\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
.assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
"\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
.alen = 16,
.ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
.plen = 32,
.ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
"\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
"\x75\x86\x60\x2d\x25\x3c\xff\xf9"
"\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1"
"\xa1\x11\xfa\xbb\x1e\x04\x7e\xe7"
"\x4c\x5f\x65\xbf\x68\x8d\x33\x9d"
"\xbc\x74\x9b\xf3\x15\xf3\x8f\x8d"
"\xe8\xaf\x33\xe0",
.clen = 32 + 28,
}, { /* RFC 3602 Case 3 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd"
"\x6c\x3e\xa0\x47\x76\x30\xce\x21"
"\xa2\xce\x33\x4a\xa7\x46\xc2\xcd",
.klen = 8 + 28 + 16,
.iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
"\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
.assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
"\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
.alen = 16,
.ptext = "This is a 48-byte message (exactly 3 AES blocks)",
.plen = 48,
.ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
"\xd4\x93\x66\x5d\x33\xf0\xe8\x86"
"\x2d\xea\x54\xcd\xb2\x93\xab\xc7"
"\x50\x69\x39\x27\x67\x72\xf8\xd5"
"\x02\x1c\x19\x21\x6b\xad\x52\x5c"
"\x85\x79\x69\x5d\x83\xba\x26\x84"
"\x60\xb3\xca\x0e\xc1\xfe\xf2\x27"
"\x5a\x41\xe4\x99\xa8\x19\x56\xf1"
"\x44\x98\x27\x9f\x99\xb0\x4a\xad"
"\x4d\xc1\x1e\x88",
.clen = 48 + 28,
}, { /* RFC 3602 Case 4 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd"
"\x56\xe4\x7a\x38\xc5\x59\x89\x74"
"\xbc\x46\x90\x3d\xba\x29\x03\x49",
.klen = 8 + 28 + 16,
.iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
"\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
.assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
"\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
.alen = 16,
.ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf",
.plen = 64,
.ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
"\x6a\xff\x6a\xf0\x86\x9f\x71\xaa"
"\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6"
"\x84\xdb\x20\x7e\xb0\xef\x8e\x4e"
"\x35\x90\x7a\xa6\x32\xc3\xff\xdf"
"\x86\x8b\xb7\xb2\x9d\x3d\x46\xad"
"\x83\xce\x9f\x9a\x10\x2e\xe9\x9d"
"\x49\xa5\x3e\x87\xf4\xc3\xda\x55"
"\xbb\xe9\x38\xf8\xb9\xbf\xcb\x7b"
"\xa8\x22\x91\xea\x1e\xaf\x13\xba"
"\x24\x18\x64\x9c\xcb\xb4\xa9\x16"
"\x4b\x83\x9c\xec",
.clen = 64 + 28,
}, { /* RFC 3602 Case 5 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd"
"\x90\xd3\x82\xb4\x10\xee\xba\x7a"
"\xd9\x38\xc4\x6c\xec\x1a\x82\xbf",
.klen = 8 + 28 + 16,
.iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
"\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\xe9\x6e\x8c\x08\xab\x46\x57\x63"
"\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
.alen = 24,
.ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
"\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x21\x22\x23\x24\x25\x26\x27"
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
"\x30\x31\x32\x33\x34\x35\x36\x37"
"\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01",
.plen = 80,
.ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
"\xa9\x45\x3e\x19\x4e\x12\x08\x49"
"\xa4\x87\x0b\x66\xcc\x6b\x99\x65"
"\x33\x00\x13\xb4\x89\x8d\xc8\x56"
"\xa4\x69\x9e\x52\x3a\x55\xdb\x08"
"\x0b\x59\xec\x3a\x8e\x4b\x7e\x52"
"\x77\x5b\x07\xd1\xdb\x34\xed\x9c"
"\x53\x8a\xb5\x0c\x55\x1b\x87\x4a"
"\xa2\x69\xad\xd0\x47\xad\x2d\x59"
"\x13\xac\x19\xb7\xcf\xba\xd4\xa6"
"\x04\x5e\x83\x45\xc5\x6a\x5b\xe2"
"\x5e\xd8\x59\x06\xbd\xc7\xd2\x9b"
"\x0b\x65\x1f\x31\xc7\xe6\x9c\x39"
"\xa3\x66\xdb\xb8",
.clen = 80 + 28,
}, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x18" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd"
"\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
"\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
"\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
.klen = 8 + 28 + 24,
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.plen = 64,
.ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
"\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
"\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
"\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
"\x57\x1b\x24\x20\x12\xfb\x7a\xe0"
"\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
"\x08\xb0\xe2\x79\x88\x59\x88\x81"
"\xd9\x20\xa9\xe6\x4f\x56\x15\xcd"
"\x67\x35\xcd\x86\x94\x51\x3b\x3a"
"\xaa\x07\xb1\xed\x18\x55\x62\x01"
"\x95\xb2\x53\xb5\x20\x78\x16\xd7"
"\xb8\x49\x7f\x96",
.clen = 64 + 28,
}, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x20" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd"
"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
.klen = 8 + 28 + 32,
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.plen = 64,
.ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
"\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
"\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
"\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
"\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
"\xa5\x30\xe2\x63\x04\x23\x14\x61"
"\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
"\xda\x6c\x19\x07\x8c\x6a\x9d\x1b"
"\xe0\xe2\x3d\x3f\x55\x24\x2c\x4d"
"\xb9\x13\x2a\xc0\x07\xbb\x3b\xda"
"\xfd\xa4\x51\x32\x3f\x44\xb1\x13"
"\x98\xf9\xbc\xb9",
.clen = 64 + 28,
},
};
static const struct aead_testvec hmac_sha256_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
@ -15202,6 +15487,317 @@ static const struct aead_testvec hmac_sha256_aes_cbc_tv_temp[] = {
},
};
static const struct aead_testvec hmac_sha384_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
"\x51\x2e\x03\xd5\x34\x12\x00\x06",
.klen = 8 + 48 + 16,
.iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
"\xb4\x22\xda\x80\x2c\x9f\xac\x41",
.assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
"\xb4\x22\xda\x80\x2c\x9f\xac\x41",
.alen = 16,
.ptext = "Single block msg",
.plen = 16,
.ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
"\x27\x08\x94\x2d\xbe\x77\x18\x1a"
"\x79\x1c\xf1\x22\x95\x80\xe0\x60"
"\x7f\xf9\x92\x60\x83\xbd\x60\x9c"
"\xf6\x62\x8b\xa9\x7d\x56\xe2\xaf"
"\x80\x43\xbc\x41\x4a\x63\x0b\xa0"
"\x16\x25\xe2\xfe\x0a\x96\xf6\xa5"
"\x6c\x0b\xc2\x53\xb4\x27\xd9\x42",
.clen = 16 + 48,
}, { /* RFC 3602 Case 2 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x20\x21\x22\x23\x24\x25\x26\x27"
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
"\x30\x31\x32\x33\x34\x35\x36\x37"
"\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
"\x40\x41\x42\x43\x44\x45\x46\x47"
"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
"\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
"\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
.klen = 8 + 48 + 16,
.iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
"\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
.assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
"\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
.alen = 16,
.ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
.plen = 32,
.ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
"\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
"\x75\x86\x60\x2d\x25\x3c\xff\xf9"
"\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1"
"\x4e\x5b\xa8\x65\x51\xc6\x58\xaf"
"\x31\x57\x50\x3d\x01\xa1\xa4\x3f"
"\x42\xd1\xd7\x31\x76\x8d\xf8\xc8"
"\xe4\xd2\x7e\xc5\x23\xe7\xc6\x2e"
"\x2d\xfd\x9d\xc1\xac\x50\x1e\xcf"
"\xa0\x10\xeb\x1a\x9c\xb7\xe1\xca",
.clen = 32 + 48,
}, { /* RFC 3602 Case 3 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
"\x33\x44\x55\x66\x77\x88\x99\xaa"
"\xbb\xcc\xdd\xee\xff\x11\x22\x33"
"\x6c\x3e\xa0\x47\x76\x30\xce\x21"
"\xa2\xce\x33\x4a\xa7\x46\xc2\xcd",
.klen = 8 + 48 + 16,
.iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
"\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
.assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
"\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
.alen = 16,
.ptext = "This is a 48-byte message (exactly 3 AES blocks)",
.plen = 48,
.ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
"\xd4\x93\x66\x5d\x33\xf0\xe8\x86"
"\x2d\xea\x54\xcd\xb2\x93\xab\xc7"
"\x50\x69\x39\x27\x67\x72\xf8\xd5"
"\x02\x1c\x19\x21\x6b\xad\x52\x5c"
"\x85\x79\x69\x5d\x83\xba\x26\x84"
"\xa1\x52\xe7\xda\xf7\x05\xb6\xca"
"\xad\x0f\x51\xed\x5a\xd3\x0f\xdf"
"\xde\xeb\x3f\x31\xed\x3a\x43\x93"
"\x3b\xb7\xca\xc8\x1b\xe7\x3b\x61"
"\x6a\x05\xfd\x2d\x6a\x5c\xb1\x0d"
"\x6e\x7a\xeb\x1c\x84\xec\xdb\xde",
.clen = 48 + 48,
}, { /* RFC 3602 Case 4 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
"\x33\x44\x55\x66\x77\x88\x99\xaa"
"\xbb\xcc\xdd\xee\xff\x11\x22\x33"
"\x56\xe4\x7a\x38\xc5\x59\x89\x74"
"\xbc\x46\x90\x3d\xba\x29\x03\x49",
.klen = 8 + 48 + 16,
.iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
"\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
.assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
"\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
.alen = 16,
.ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf",
.plen = 64,
.ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
"\x6a\xff\x6a\xf0\x86\x9f\x71\xaa"
"\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6"
"\x84\xdb\x20\x7e\xb0\xef\x8e\x4e"
"\x35\x90\x7a\xa6\x32\xc3\xff\xdf"
"\x86\x8b\xb7\xb2\x9d\x3d\x46\xad"
"\x83\xce\x9f\x9a\x10\x2e\xe9\x9d"
"\x49\xa5\x3e\x87\xf4\xc3\xda\x55"
"\x85\x7b\x91\xe0\x29\xeb\xd3\x59"
"\x7c\xe3\x67\x14\xbe\x71\x2a\xd2"
"\x8a\x1a\xd2\x35\x78\x6b\x69\xba"
"\x64\xa5\x04\x00\x19\xc3\x4c\xae"
"\x71\xff\x76\x9f\xbb\xc3\x29\x22"
"\xc2\xc6\x51\xf1\xe6\x29\x5e\xa5",
.clen = 64 + 48,
}, { /* RFC 3602 Case 5 */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x10" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
"\x33\x44\x55\x66\x77\x88\x99\xaa"
"\xbb\xcc\xdd\xee\xff\x11\x22\x33"
"\x90\xd3\x82\xb4\x10\xee\xba\x7a"
"\xd9\x38\xc4\x6c\xec\x1a\x82\xbf",
.klen = 8 + 48 + 16,
.iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
"\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\xe9\x6e\x8c\x08\xab\x46\x57\x63"
"\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
.alen = 24,
.ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
"\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x21\x22\x23\x24\x25\x26\x27"
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
"\x30\x31\x32\x33\x34\x35\x36\x37"
"\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01",
.plen = 80,
.ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
"\xa9\x45\x3e\x19\x4e\x12\x08\x49"
"\xa4\x87\x0b\x66\xcc\x6b\x99\x65"
"\x33\x00\x13\xb4\x89\x8d\xc8\x56"
"\xa4\x69\x9e\x52\x3a\x55\xdb\x08"
"\x0b\x59\xec\x3a\x8e\x4b\x7e\x52"
"\x77\x5b\x07\xd1\xdb\x34\xed\x9c"
"\x53\x8a\xb5\x0c\x55\x1b\x87\x4a"
"\xa2\x69\xad\xd0\x47\xad\x2d\x59"
"\x13\xac\x19\xb7\xcf\xba\xd4\xa6"
"\x57\x5f\xb4\xd7\x74\x6f\x18\x97"
"\xb7\xde\xfc\xf3\x4e\x0d\x29\x4d"
"\xa0\xff\x39\x9e\x2d\xbf\x27\xac"
"\x54\xb9\x8a\x3e\xab\x3b\xac\xd3"
"\x36\x43\x74\xfc\xc2\x64\x81\x8a"
"\x2c\x15\x72\xdf\x3f\x9d\x5b\xa4",
.clen = 80 + 48,
}, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x18" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
"\x33\x44\x55\x66\x77\x88\x99\xaa"
"\xbb\xcc\xdd\xee\xff\x11\x22\x33"
"\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
"\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
"\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
.klen = 8 + 48 + 24,
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.plen = 64,
.ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
"\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
"\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
"\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
"\x57\x1b\x24\x20\x12\xfb\x7a\xe0"
"\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
"\x08\xb0\xe2\x79\x88\x59\x88\x81"
"\xd9\x20\xa9\xe6\x4f\x56\x15\xcd"
"\x29\x9b\x42\x47\x0b\xbf\xf3\x54"
"\x54\x95\xb0\x89\xd5\xa0\xc3\x78"
"\x60\x6c\x18\x39\x6d\xc9\xfb\x2a"
"\x34\x1c\xed\x95\x10\x1e\x43\x0a"
"\x72\xce\x26\xbc\x74\xd9\x6f\xa2"
"\xf1\xd9\xd0\xb1\xdf\x3d\x93\x14",
.clen = 64 + 48,
}, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x20" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\x22\x33\x44\x55\x66\x77\x88\x99"
"\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
"\x33\x44\x55\x66\x77\x88\x99\xaa"
"\xbb\xcc\xdd\xee\xff\x11\x22\x33"
"\x60\x3d\xeb\x10\x15\xca\x71\xbe"
"\x2b\x73\xae\xf0\x85\x7d\x77\x81"
"\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
"\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
.klen = 8 + 48 + 32,
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.alen = 16,
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.plen = 64,
.ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
"\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
"\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
"\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
"\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
"\xa5\x30\xe2\x63\x04\x23\x14\x61"
"\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
"\xda\x6c\x19\x07\x8c\x6a\x9d\x1b"
"\x9f\x50\xce\x64\xd9\xa3\xc9\x7a"
"\x15\x3a\x3d\x46\x9a\x90\xf3\x06"
"\x22\xad\xc5\x24\x77\x50\xb8\xfe"
"\xbe\x37\x16\x86\x34\x5f\xaf\x97"
"\x00\x9d\x86\xc8\x32\x4f\x72\x2f"
"\x48\x97\xad\xb6\xb9\x77\x33\xbc",
.clen = 64 + 48,
},
};
static const struct aead_testvec hmac_sha512_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
@ -15854,6 +16450,65 @@ static const struct aead_testvec hmac_sha512_des_cbc_tv_temp[] = {
},
};
static const struct aead_testvec hmac_md5_des3_ede_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
.key = "\x08\x00" /* rta length */
"\x01\x00" /* rta type */
#else
.key = "\x00\x08" /* rta length */
"\x00\x01" /* rta type */
#endif
"\x00\x00\x00\x18" /* enc key length */
"\x11\x22\x33\x44\x55\x66\x77\x88"
"\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
"\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
"\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
"\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
.klen = 8 + 16 + 24,
.iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
"\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.alen = 16,
.ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
"\x20\x73\x6f\x54\x20\x6f\x61\x4d"
"\x79\x6e\x53\x20\x63\x65\x65\x72"
"\x73\x74\x54\x20\x6f\x6f\x4d\x20"
"\x6e\x61\x20\x79\x65\x53\x72\x63"
"\x74\x65\x20\x73\x6f\x54\x20\x6f"
"\x61\x4d\x79\x6e\x53\x20\x63\x65"
"\x65\x72\x73\x74\x54\x20\x6f\x6f"
"\x4d\x20\x6e\x61\x20\x79\x65\x53"
"\x72\x63\x74\x65\x20\x73\x6f\x54"
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
.plen = 128,
.ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
"\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
"\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
"\x76\xd1\xda\x0c\x94\x67\xbb\x04"
"\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
"\x22\x64\x47\xaa\x8f\x75\x13\xbf"
"\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
"\x71\x63\x2e\x89\x7b\x1e\x12\xca"
"\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
"\xd6\xf9\x21\x31\x62\x44\x45\xa6"
"\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
"\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
"\x6b\xfa\xb1\x91\x13\xb0\xd9\x19"
"\x99\x09\xfb\x05\x35\xc8\xcc\x38"
"\xc3\x1e\x5e\xe1\xe6\x96\x84\xc8",
.clen = 128 + 16,
},
};
static const struct aead_testvec hmac_sha1_des3_ede_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN

View file

@ -212,6 +212,7 @@ static int airoha_trng_probe(struct platform_device *pdev)
trng->rng.init = airoha_trng_init;
trng->rng.cleanup = airoha_trng_cleanup;
trng->rng.read = airoha_trng_read;
trng->rng.quality = 900;
ret = devm_hwrng_register(dev, &trng->rng);
if (ret) {

View file

@ -20,23 +20,25 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#define RNG_MODULE_NAME "hw_random"
#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
static struct hwrng *current_rng;
static struct hwrng __rcu *current_rng;
/* the current rng has been explicitly chosen by user via sysfs */
static int cur_rng_set_by_user;
static struct task_struct *hwrng_fill;
/* list of registered rngs */
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
/* Protects rng_list, hwrng_fill and updating on current_rng */
static DEFINE_MUTEX(rng_mutex);
/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
static DEFINE_MUTEX(reading_mutex);
@ -64,18 +66,39 @@ static size_t rng_buffer_size(void)
return RNG_BUFFER_SIZE;
}
static inline void cleanup_rng(struct kref *kref)
static void cleanup_rng_work(struct work_struct *work)
{
struct hwrng *rng = container_of(kref, struct hwrng, ref);
struct hwrng *rng = container_of(work, struct hwrng, cleanup_work);
/*
* Hold rng_mutex here so we serialize in case they set_current_rng
* on rng again immediately.
*/
mutex_lock(&rng_mutex);
/* Skip if rng has been reinitialized. */
if (kref_read(&rng->ref)) {
mutex_unlock(&rng_mutex);
return;
}
if (rng->cleanup)
rng->cleanup(rng);
complete(&rng->cleanup_done);
mutex_unlock(&rng_mutex);
}
static inline void cleanup_rng(struct kref *kref)
{
struct hwrng *rng = container_of(kref, struct hwrng, ref);
schedule_work(&rng->cleanup_work);
}
static int set_current_rng(struct hwrng *rng)
{
struct hwrng *old_rng;
int err;
BUG_ON(!mutex_is_locked(&rng_mutex));
@ -84,8 +107,14 @@ static int set_current_rng(struct hwrng *rng)
if (err)
return err;
drop_current_rng();
current_rng = rng;
old_rng = rcu_dereference_protected(current_rng,
lockdep_is_held(&rng_mutex));
rcu_assign_pointer(current_rng, rng);
if (old_rng) {
synchronize_rcu();
kref_put(&old_rng->ref, cleanup_rng);
}
/* if necessary, start hwrng thread */
if (!hwrng_fill) {
@ -101,47 +130,56 @@ static int set_current_rng(struct hwrng *rng)
static void drop_current_rng(void)
{
BUG_ON(!mutex_is_locked(&rng_mutex));
if (!current_rng)
struct hwrng *rng;
rng = rcu_dereference_protected(current_rng,
lockdep_is_held(&rng_mutex));
if (!rng)
return;
RCU_INIT_POINTER(current_rng, NULL);
synchronize_rcu();
if (hwrng_fill) {
kthread_stop(hwrng_fill);
hwrng_fill = NULL;
}
/* decrease last reference for triggering the cleanup */
kref_put(&current_rng->ref, cleanup_rng);
current_rng = NULL;
kref_put(&rng->ref, cleanup_rng);
}
/* Returns ERR_PTR(), NULL or refcounted hwrng */
/* Returns NULL or refcounted hwrng */
static struct hwrng *get_current_rng_nolock(void)
{
if (current_rng)
kref_get(&current_rng->ref);
struct hwrng *rng;
return current_rng;
rng = rcu_dereference_protected(current_rng,
lockdep_is_held(&rng_mutex));
if (rng)
kref_get(&rng->ref);
return rng;
}
static struct hwrng *get_current_rng(void)
{
struct hwrng *rng;
if (mutex_lock_interruptible(&rng_mutex))
return ERR_PTR(-ERESTARTSYS);
rcu_read_lock();
rng = rcu_dereference(current_rng);
if (rng)
kref_get(&rng->ref);
rng = get_current_rng_nolock();
rcu_read_unlock();
mutex_unlock(&rng_mutex);
return rng;
}
static void put_rng(struct hwrng *rng)
{
/*
* Hold rng_mutex here so we serialize in case they set_current_rng
* on rng again immediately.
*/
mutex_lock(&rng_mutex);
if (rng)
kref_put(&rng->ref, cleanup_rng);
mutex_unlock(&rng_mutex);
}
static int hwrng_init(struct hwrng *rng)
@ -213,10 +251,6 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
while (size) {
rng = get_current_rng();
if (IS_ERR(rng)) {
err = PTR_ERR(rng);
goto out;
}
if (!rng) {
err = -ENODEV;
goto out;
@ -303,7 +337,7 @@ static struct miscdevice rng_miscdev = {
static int enable_best_rng(void)
{
struct hwrng *rng, *new_rng = NULL;
struct hwrng *rng, *cur_rng, *new_rng = NULL;
int ret = -ENODEV;
BUG_ON(!mutex_is_locked(&rng_mutex));
@ -321,7 +355,9 @@ static int enable_best_rng(void)
new_rng = rng;
}
ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
cur_rng = rcu_dereference_protected(current_rng,
lockdep_is_held(&rng_mutex));
ret = ((new_rng == cur_rng) ? 0 : set_current_rng(new_rng));
if (!ret)
cur_rng_set_by_user = 0;
@ -371,8 +407,6 @@ static ssize_t rng_current_show(struct device *dev,
struct hwrng *rng;
rng = get_current_rng();
if (IS_ERR(rng))
return PTR_ERR(rng);
ret = sysfs_emit(buf, "%s\n", rng ? rng->name : "none");
put_rng(rng);
@ -416,8 +450,6 @@ static ssize_t rng_quality_show(struct device *dev,
struct hwrng *rng;
rng = get_current_rng();
if (IS_ERR(rng))
return PTR_ERR(rng);
if (!rng) /* no need to put_rng */
return -ENODEV;
@ -432,6 +464,7 @@ static ssize_t rng_quality_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct hwrng *rng;
u16 quality;
int ret = -EINVAL;
@ -448,12 +481,13 @@ static ssize_t rng_quality_store(struct device *dev,
goto out;
}
if (!current_rng) {
rng = rcu_dereference_protected(current_rng, lockdep_is_held(&rng_mutex));
if (!rng) {
ret = -ENODEV;
goto out;
}
current_rng->quality = quality;
rng->quality = quality;
current_quality = quality; /* obsolete */
/* the best available RNG may have changed */
@ -489,8 +523,20 @@ static int hwrng_fillfn(void *unused)
struct hwrng *rng;
rng = get_current_rng();
if (IS_ERR(rng) || !rng)
if (!rng) {
/*
* Keep the task_struct alive until kthread_stop()
* is called to avoid UAF in drop_current_rng().
*/
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop())
schedule();
}
set_current_state(TASK_RUNNING);
break;
}
mutex_lock(&reading_mutex);
rc = rng_get_data(rng, rng_fillbuf,
rng_buffer_size(), 1);
@ -518,14 +564,13 @@ static int hwrng_fillfn(void *unused)
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
entropy >> 10, true);
}
hwrng_fill = NULL;
return 0;
}
int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *tmp;
struct hwrng *cur_rng, *tmp;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
@ -540,6 +585,7 @@ int hwrng_register(struct hwrng *rng)
}
list_add_tail(&rng->list, &rng_list);
INIT_WORK(&rng->cleanup_work, cleanup_rng_work);
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
init_completion(&rng->dying);
@ -547,16 +593,19 @@ int hwrng_register(struct hwrng *rng)
/* Adjust quality field to always have a proper value */
rng->quality = min3(default_quality, 1024, rng->quality ?: 1024);
if (!cur_rng_set_by_user &&
(!current_rng || rng->quality > current_rng->quality)) {
/*
* Set new rng as current as the new rng source
* provides better entropy quality and was not
* chosen by userspace.
*/
err = set_current_rng(rng);
if (err)
goto out_unlock;
if (!cur_rng_set_by_user) {
cur_rng = rcu_dereference_protected(current_rng,
lockdep_is_held(&rng_mutex));
if (!cur_rng || rng->quality > cur_rng->quality) {
/*
* Set new rng as current as the new rng source
* provides better entropy quality and was not
* chosen by userspace.
*/
err = set_current_rng(rng);
if (err)
goto out_unlock;
}
}
mutex_unlock(&rng_mutex);
return 0;
@ -569,14 +618,17 @@ EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng)
{
struct hwrng *new_rng;
struct hwrng *cur_rng;
int err;
mutex_lock(&rng_mutex);
list_del(&rng->list);
complete_all(&rng->dying);
if (current_rng == rng) {
cur_rng = rcu_dereference_protected(current_rng,
lockdep_is_held(&rng_mutex));
if (cur_rng == rng) {
err = enable_best_rng();
if (err) {
drop_current_rng();
@ -584,17 +636,7 @@ void hwrng_unregister(struct hwrng *rng)
}
}
new_rng = get_current_rng_nolock();
if (list_empty(&rng_list)) {
mutex_unlock(&rng_mutex);
if (hwrng_fill)
kthread_stop(hwrng_fill);
} else
mutex_unlock(&rng_mutex);
if (new_rng)
put_rng(new_rng);
mutex_unlock(&rng_mutex);
wait_for_completion(&rng->cleanup_done);
}
EXPORT_SYMBOL_GPL(hwrng_unregister);
@ -682,7 +724,7 @@ static int __init hwrng_modinit(void)
static void __exit hwrng_modexit(void)
{
mutex_lock(&rng_mutex);
BUG_ON(current_rng);
WARN_ON(rcu_access_pointer(current_rng));
kfree(rng_buffer);
kfree(rng_fillbuf);
mutex_unlock(&rng_mutex);

View file

@ -205,10 +205,7 @@ static int get_optee_rng_info(struct device *dev)
static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
{
if (ver->impl_id == TEE_IMPL_ID_OPTEE)
return 1;
else
return 0;
return (ver->impl_id == TEE_IMPL_ID_OPTEE);
}
static int optee_rng_probe(struct device *dev)

View file

@ -248,9 +248,11 @@ struct sun8i_ss_hash_tfm_ctx {
struct sun8i_ss_hash_reqctx {
struct sginfo t_src[MAX_SG];
struct sginfo t_dst[MAX_SG];
struct ahash_request fallback_req;
u32 method;
int flow;
/* Must be last as it ends in a flexible-array member. */
struct ahash_request fallback_req;
};
/*

View file

@ -2201,12 +2201,10 @@ static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
int i;
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc)
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
crypto_unregister_aead(&aes_authenc_algs[i]);
crypto_unregister_aeads(aes_authenc_algs,
ARRAY_SIZE(aes_authenc_algs));
#endif
if (dd->caps.has_xts)
@ -2215,8 +2213,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_skcipher(&aes_algs[i]);
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
@ -2229,7 +2226,7 @@ static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
{
int err, i, j;
int err, i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
atmel_aes_crypto_alg_init(&aes_algs[i].base);
@ -2272,8 +2269,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* i = ARRAY_SIZE(aes_authenc_algs); */
err_aes_authenc_alg:
for (j = 0; j < i; j++)
crypto_unregister_aead(&aes_authenc_algs[j]);
crypto_unregister_aeads(aes_authenc_algs, i);
crypto_unregister_skcipher(&aes_xts_alg);
#endif
err_aes_xts_alg:
@ -2281,8 +2277,7 @@ err_aes_xts_alg:
err_aes_gcm_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
for (j = 0; j < i; j++)
crypto_unregister_skcipher(&aes_algs[j]);
crypto_unregister_skciphers(aes_algs, i);
return err;
}

View file

@ -2418,27 +2418,23 @@ EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort);
static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
{
int i;
if (dd->caps.has_hmac)
for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
crypto_unregister_ahash(&sha_hmac_algs[i]);
crypto_unregister_ahashes(sha_hmac_algs,
ARRAY_SIZE(sha_hmac_algs));
for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
crypto_unregister_ahash(&sha_1_256_algs[i]);
crypto_unregister_ahashes(sha_1_256_algs, ARRAY_SIZE(sha_1_256_algs));
if (dd->caps.has_sha224)
crypto_unregister_ahash(&sha_224_alg);
if (dd->caps.has_sha_384_512) {
for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
crypto_unregister_ahash(&sha_384_512_algs[i]);
}
if (dd->caps.has_sha_384_512)
crypto_unregister_ahashes(sha_384_512_algs,
ARRAY_SIZE(sha_384_512_algs));
}
static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
{
int err, i, j;
int err, i;
for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
atmel_sha_alg_init(&sha_1_256_algs[i]);
@ -2480,18 +2476,15 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
/*i = ARRAY_SIZE(sha_hmac_algs);*/
err_sha_hmac_algs:
for (j = 0; j < i; j++)
crypto_unregister_ahash(&sha_hmac_algs[j]);
crypto_unregister_ahashes(sha_hmac_algs, i);
i = ARRAY_SIZE(sha_384_512_algs);
err_sha_384_512_algs:
for (j = 0; j < i; j++)
crypto_unregister_ahash(&sha_384_512_algs[j]);
crypto_unregister_ahashes(sha_384_512_algs, i);
crypto_unregister_ahash(&sha_224_alg);
err_sha_224_algs:
i = ARRAY_SIZE(sha_1_256_algs);
err_sha_1_256_algs:
for (j = 0; j < i; j++)
crypto_unregister_ahash(&sha_1_256_algs[j]);
crypto_unregister_ahashes(sha_1_256_algs, i);
return err;
}

View file

@ -897,38 +897,25 @@ static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
return IRQ_NONE;
}
static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
{
int i;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
crypto_unregister_skcipher(&tdes_algs[i]);
}
static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
{
int err, i, j;
int err, i;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
err = crypto_register_skcipher(&tdes_algs[i]);
if (err)
goto err_tdes_algs;
if (err) {
crypto_unregister_skciphers(tdes_algs, i);
return err;
}
}
return 0;
err_tdes_algs:
for (j = 0; j < i; j++)
crypto_unregister_skcipher(&tdes_algs[j]);
return err;
}
static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
{
dd->caps.has_dma = 0;
/* keep only major version number */
@ -1061,7 +1048,7 @@ static void atmel_tdes_remove(struct platform_device *pdev)
list_del(&tdes_dd->list);
spin_unlock(&atmel_tdes.lock);
atmel_tdes_unregister_algs(tdes_dd);
crypto_unregister_skciphers(tdes_algs, ARRAY_SIZE(tdes_algs));
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);

View file

@ -4814,7 +4814,8 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
{
struct device *dev = priv->dev;
struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
int err;
struct dpaa2_caam_priv_per_cpu *ppriv;
int i, err;
if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
@ -4822,6 +4823,12 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
dev_err(dev, "dpseci_reset() failed\n");
}
for_each_cpu(i, priv->clean_mask) {
ppriv = per_cpu_ptr(priv->ppriv, i);
free_netdev(ppriv->net_dev);
}
free_cpumask_var(priv->clean_mask);
dpaa2_dpseci_congestion_free(priv);
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
}
@ -5007,16 +5014,15 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
struct device *dev = &ls_dev->dev;
struct dpaa2_caam_priv *priv;
struct dpaa2_caam_priv_per_cpu *ppriv;
cpumask_var_t clean_mask;
int err, cpu;
u8 i;
err = -ENOMEM;
if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
goto err_cpumask;
priv = dev_get_drvdata(dev);
if (!zalloc_cpumask_var(&priv->clean_mask, GFP_KERNEL))
goto err_cpumask;
priv->dev = dev;
priv->dpsec_id = ls_dev->obj_desc.id;
@ -5118,7 +5124,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
err = -ENOMEM;
goto err_alloc_netdev;
}
cpumask_set_cpu(cpu, clean_mask);
cpumask_set_cpu(cpu, priv->clean_mask);
ppriv->net_dev->dev = *dev;
netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
@ -5126,18 +5132,16 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
DPAA2_CAAM_NAPI_WEIGHT);
}
err = 0;
goto free_cpumask;
return 0;
err_alloc_netdev:
free_dpaa2_pcpu_netdev(priv, clean_mask);
free_dpaa2_pcpu_netdev(priv, priv->clean_mask);
err_get_rx_queue:
dpaa2_dpseci_congestion_free(priv);
err_get_vers:
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
err_open:
free_cpumask:
free_cpumask_var(clean_mask);
free_cpumask_var(priv->clean_mask);
err_cpumask:
return err;
}
@ -5182,7 +5186,6 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
ppriv = per_cpu_ptr(priv->ppriv, i);
napi_disable(&ppriv->napi);
netif_napi_del(&ppriv->napi);
free_netdev(ppriv->net_dev);
}
return 0;

View file

@ -42,6 +42,7 @@
* @mc_io: pointer to MC portal's I/O object
* @domain: IOMMU domain
* @ppriv: per CPU pointers to privata data
* @clean_mask: CPU mask of CPUs that have allocated netdevs
*/
struct dpaa2_caam_priv {
int dpsec_id;
@ -65,6 +66,7 @@ struct dpaa2_caam_priv {
struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
struct dentry *dfs_root;
cpumask_var_t clean_mask;
};
/**

View file

@ -180,7 +180,8 @@ static void free_command_queues(struct cpt_vf *cptvf,
hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
nextchunk) {
dma_free_coherent(&pdev->dev, chunk->size,
dma_free_coherent(&pdev->dev,
chunk->size + CPT_NEXT_CHUNK_PTR_SIZE,
chunk->head,
chunk->dma_addr);
chunk->head = NULL;

View file

@ -642,7 +642,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct ccp_data dst;
struct ccp_data aad;
struct ccp_op op;
} *wa __cleanup(kfree) = kzalloc(sizeof *wa, GFP_KERNEL);
} *wa __free(kfree) = kzalloc(sizeof(*wa), GFP_KERNEL);
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;

View file

@ -30,6 +30,8 @@ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
security_attribute_show(fused_part)
static DEVICE_ATTR_RO(fused_part);
security_attribute_show(boot_integrity)
static DEVICE_ATTR_RO(boot_integrity);
security_attribute_show(debug_lock_on)
static DEVICE_ATTR_RO(debug_lock_on);
security_attribute_show(tsme_status)
@ -47,6 +49,7 @@ static DEVICE_ATTR_RO(rom_armor_enforced);
static struct attribute *psp_security_attrs[] = {
&dev_attr_fused_part.attr,
&dev_attr_boot_integrity.attr,
&dev_attr_debug_lock_on.attr,
&dev_attr_tsme_status.attr,
&dev_attr_anti_rollback_status.attr,

View file

@ -36,7 +36,7 @@ union psp_cap_register {
rsvd1 :3,
security_reporting :1,
fused_part :1,
rsvd2 :1,
boot_integrity :1,
debug_lock_on :1,
rsvd3 :2,
tsme_status :1,

View file

@ -228,7 +228,7 @@ static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
if (is_pci_tsm_pf0(pdev))
return tio_pf0_probe(pdev, sev);
return 0;
return NULL;
}
static void dsm_remove(struct pci_tsm *tsm)

View file

@ -127,13 +127,6 @@ static size_t sev_es_tmr_size = SEV_TMR_SIZE;
#define NV_LENGTH (32 * 1024)
static void *sev_init_ex_buffer;
/*
* SEV_DATA_RANGE_LIST:
* Array containing range of pages that firmware transitions to HV-fixed
* page state.
*/
static struct sev_data_range_list *snp_range_list;
static void __sev_firmware_shutdown(struct sev_device *sev, bool panic);
static int snp_shutdown_on_panic(struct notifier_block *nb,
@ -1361,6 +1354,7 @@ static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
{
struct sev_data_range_list *snp_range_list __free(kfree) = NULL;
struct psp_device *psp = psp_master;
struct sev_data_snp_init_ex data;
struct sev_device *sev;
@ -2378,11 +2372,10 @@ e_free_pdh:
static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
bool shutdown_required = false;
struct sev_data_snp_addr buf;
struct page *status_page;
int ret, error;
void *data;
int ret;
if (!argp->data)
return -EINVAL;
@ -2393,31 +2386,35 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
data = page_address(status_page);
if (!sev->snp_initialized) {
ret = snp_move_to_init_state(argp, &shutdown_required);
if (ret)
goto cleanup;
}
/*
* Firmware expects status page to be in firmware-owned state, otherwise
* it will report firmware error code INVALID_PAGE_STATE (0x1A).
* SNP_PLATFORM_STATUS can be executed in any SNP state. But if executed
* when SNP has been initialized, the status page must be firmware-owned.
*/
if (rmp_mark_pages_firmware(__pa(data), 1, true)) {
ret = -EFAULT;
goto cleanup;
if (sev->snp_initialized) {
/*
* Firmware expects the status page to be in Firmware state,
* otherwise it will report an error INVALID_PAGE_STATE.
*/
if (rmp_mark_pages_firmware(__pa(data), 1, true)) {
ret = -EFAULT;
goto cleanup;
}
}
buf.address = __psp_pa(data);
ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error);
/*
* Status page will be transitioned to Reclaim state upon success, or
* left in Firmware state in failure. Use snp_reclaim_pages() to
* transition either case back to Hypervisor-owned state.
*/
if (snp_reclaim_pages(__pa(data), 1, true))
return -EFAULT;
if (sev->snp_initialized) {
/*
* The status page will be in Reclaim state on success, or left
* in Firmware state on failure. Use snp_reclaim_pages() to
* transition either case back to Hypervisor-owned state.
*/
if (snp_reclaim_pages(__pa(data), 1, true)) {
snp_leak_pages(__page_to_pfn(status_page), 1);
return -EFAULT;
}
}
if (ret)
goto cleanup;
@ -2427,9 +2424,6 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
ret = -EFAULT;
cleanup:
if (shutdown_required)
__sev_snp_shutdown_locked(&error, false);
__free_pages(status_page, 0);
return ret;
}
@ -2780,11 +2774,6 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
sev_init_ex_buffer = NULL;
}
if (snp_range_list) {
kfree(snp_range_list);
snp_range_list = NULL;
}
__sev_snp_shutdown_locked(&error, panic);
}

View file

@ -57,6 +57,8 @@ config CRYPTO_DEV_HISI_ZIP
depends on UACCE || UACCE=n
depends on ACPI
select CRYPTO_DEV_HISI_QM
select CRYPTO_DEFLATE
select CRYPTO_LZ4
help
Support for HiSilicon ZIP Driver

View file

@ -94,9 +94,8 @@ struct hpre_sqe {
__le64 key;
__le64 in;
__le64 out;
__le16 tag;
__le16 resv2;
#define _HPRE_SQE_ALIGN_EXT 7
__le64 tag;
#define _HPRE_SQE_ALIGN_EXT 6
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};

View file

@ -93,6 +93,7 @@ struct hpre_dh_ctx {
char *g; /* m */
dma_addr_t dma_g;
struct crypto_kpp *soft_tfm;
};
struct hpre_ecdh_ctx {
@ -103,17 +104,15 @@ struct hpre_ecdh_ctx {
/* low address: x->y */
unsigned char *g;
dma_addr_t dma_g;
struct crypto_kpp *soft_tfm;
};
struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
struct hpre_asym_request **req_list;
struct hpre *hpre;
spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
struct idr req_idr;
union {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
@ -123,6 +122,7 @@ struct hpre_ctx {
unsigned int curve_id;
/* for high performance core */
u8 enable_hpcore;
bool fallback;
};
struct hpre_asym_request {
@ -136,7 +136,6 @@ struct hpre_asym_request {
struct kpp_request *ecdh;
} areq;
int err;
int req_id;
hpre_cb cb;
struct timespec64 req_time;
};
@ -151,79 +150,13 @@ static inline unsigned int hpre_align_pd(void)
return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
}
static int hpre_alloc_req_id(struct hpre_ctx *ctx)
{
unsigned long flags;
int id;
spin_lock_irqsave(&ctx->req_lock, flags);
id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
spin_unlock_irqrestore(&ctx->req_lock, flags);
return id;
}
static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
{
unsigned long flags;
spin_lock_irqsave(&ctx->req_lock, flags);
idr_remove(&ctx->req_idr, req_id);
spin_unlock_irqrestore(&ctx->req_lock, flags);
}
static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
{
struct hpre_ctx *ctx;
struct hpre_dfx *dfx;
int id;
ctx = hpre_req->ctx;
id = hpre_alloc_req_id(ctx);
if (unlikely(id < 0))
return -EINVAL;
ctx->req_list[id] = hpre_req;
hpre_req->req_id = id;
dfx = ctx->hpre->debug.dfx;
if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
ktime_get_ts64(&hpre_req->req_time);
return id;
}
static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
static void hpre_dfx_add_req_time(struct hpre_asym_request *hpre_req)
{
struct hpre_ctx *ctx = hpre_req->ctx;
int id = hpre_req->req_id;
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
if (hpre_req->req_id >= 0) {
hpre_req->req_id = HPRE_INVLD_REQ_ID;
ctx->req_list[id] = NULL;
hpre_free_req_id(ctx, id);
}
}
static struct hisi_qp *hpre_get_qp_and_start(u8 type)
{
struct hisi_qp *qp;
int ret;
qp = hpre_create_qp(type);
if (!qp) {
pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
}
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
hisi_qm_free_qps(&qp, 1);
pci_err(qp->qm->pdev, "Can not start qp!\n");
return ERR_PTR(-EINVAL);
}
return qp;
if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
ktime_get_ts64(&hpre_req->req_time);
}
static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
@ -340,26 +273,19 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
void **kreq)
{
struct hpre_asym_request *req;
unsigned int err, done, alg;
int id;
#define HPRE_NO_HW_ERR 0
#define HPRE_HW_TASK_DONE 3
#define HREE_HW_ERR_MASK GENMASK(10, 0)
#define HREE_SQE_DONE_MASK GENMASK(1, 0)
#define HREE_ALG_TYPE_MASK GENMASK(4, 0)
id = (int)le16_to_cpu(sqe->tag);
req = ctx->req_list[id];
hpre_rm_req_from_ctx(req);
*kreq = req;
*kreq = (void *)le64_to_cpu(sqe->tag);
err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
HREE_HW_ERR_MASK;
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
HREE_SQE_DONE_MASK;
if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
return 0;
@ -370,36 +296,10 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
return -EINVAL;
}
static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
{
struct hpre *hpre;
if (!ctx || !qp || qlen < 0)
return -EINVAL;
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
ctx->dev = &qp->qm->pdev->dev;
hpre = container_of(ctx->qp->qm, struct hpre, qm);
ctx->hpre = hpre;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list)
return -ENOMEM;
ctx->key_sz = 0;
ctx->crt_g2_mode = false;
idr_init(&ctx->req_idr);
return 0;
}
static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
{
if (is_clear_all) {
idr_destroy(&ctx->req_idr);
kfree(ctx->req_list);
if (is_clear_all)
hisi_qm_free_qps(&ctx->qp, 1);
}
ctx->crt_g2_mode = false;
ctx->key_sz = 0;
@ -467,49 +367,44 @@ static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
{
struct hpre_ctx *ctx = qp->qp_ctx;
struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *h_req;
struct hpre_sqe *sqe = resp;
struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
if (unlikely(!req)) {
atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
h_req = (struct hpre_asym_request *)le64_to_cpu(sqe->tag);
if (unlikely(!h_req)) {
pr_err("Failed to get request, and qp_id is %u\n", qp->qp_id);
return;
}
req->cb(ctx, resp);
}
static void hpre_stop_qp_and_put(struct hisi_qp *qp)
{
hisi_qm_stop_qp(qp);
hisi_qm_free_qps(&qp, 1);
h_req->cb(h_req->ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
{
struct hisi_qp *qp;
int ret;
struct hpre *hpre;
qp = hpre_get_qp_and_start(type);
if (IS_ERR(qp))
return PTR_ERR(qp);
qp = hpre_create_qp(type);
if (!qp) {
ctx->qp = NULL;
return -ENODEV;
}
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
ctx->qp = qp;
ctx->dev = &qp->qm->pdev->dev;
hpre = container_of(ctx->qp->qm, struct hpre, qm);
ctx->hpre = hpre;
ctx->key_sz = 0;
ctx->crt_g2_mode = false;
ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
if (ret)
hpre_stop_qp_and_put(qp);
return ret;
return 0;
}
static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
{
struct hpre_asym_request *h_req;
struct hpre_sqe *msg;
int req_id;
void *tmp;
if (is_rsa) {
@ -549,11 +444,8 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
req_id = hpre_add_req_to_ctx(h_req);
if (req_id < 0)
return -EBUSY;
msg->tag = cpu_to_le16((u16)req_id);
hpre_dfx_add_req_time(h_req);
msg->tag = cpu_to_le64((uintptr_t)h_req);
return 0;
}
@ -566,9 +458,7 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
do {
atomic64_inc(&dfx[HPRE_SEND_CNT].value);
spin_lock_bh(&ctx->req_lock);
ret = hisi_qp_send(ctx->qp, msg);
spin_unlock_bh(&ctx->req_lock);
if (ret != -EBUSY)
break;
atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
@ -619,12 +509,53 @@ static int hpre_dh_compute_value(struct kpp_request *req)
return -EINPROGRESS;
clear_all:
hpre_rm_req_from_ctx(hpre_req);
hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
}
static struct kpp_request *hpre_dh_prepare_fb_req(struct kpp_request *req)
{
struct kpp_request *fb_req = kpp_request_ctx(req);
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
kpp_request_set_tfm(fb_req, ctx->dh.soft_tfm);
kpp_request_set_callback(fb_req, req->base.flags, req->base.complete, req->base.data);
kpp_request_set_input(fb_req, req->src, req->src_len);
kpp_request_set_output(fb_req, req->dst, req->dst_len);
return fb_req;
}
static int hpre_dh_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
struct kpp_request *fb_req;
if (ctx->fallback) {
fb_req = hpre_dh_prepare_fb_req(req);
return crypto_kpp_generate_public_key(fb_req);
}
return hpre_dh_compute_value(req);
}
static int hpre_dh_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
struct kpp_request *fb_req;
if (ctx->fallback) {
fb_req = hpre_dh_prepare_fb_req(req);
return crypto_kpp_compute_shared_secret(fb_req);
}
return hpre_dh_compute_value(req);
}
static int hpre_is_dh_params_length_valid(unsigned int key_sz)
{
#define _HPRE_DH_GRP1 768
@ -651,13 +582,6 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
struct device *dev = ctx->dev;
unsigned int sz;
if (params->p_size > HPRE_DH_MAX_P_SZ)
return -EINVAL;
if (hpre_is_dh_params_length_valid(params->p_size <<
HPRE_BITS_2_BYTES_SHIFT))
return -EINVAL;
sz = ctx->key_sz = params->p_size;
ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
&ctx->dh.dma_xa_p, GFP_KERNEL);
@ -690,8 +614,8 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
if (!ctx->qp)
return;
if (ctx->dh.g) {
dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
@ -718,6 +642,13 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
if (crypto_dh_decode_key(buf, len, &params) < 0)
return -EINVAL;
if (!ctx->qp)
goto set_soft_secret;
if (hpre_is_dh_params_length_valid(params.p_size <<
HPRE_BITS_2_BYTES_SHIFT))
goto set_soft_secret;
/* Free old secret if any */
hpre_dh_clear_ctx(ctx, false);
@ -728,27 +659,55 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
params.key_size);
ctx->fallback = false;
return 0;
err_clear_ctx:
hpre_dh_clear_ctx(ctx, false);
return ret;
set_soft_secret:
ctx->fallback = true;
return crypto_kpp_set_secret(ctx->dh.soft_tfm, buf, len);
}
static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
if (ctx->fallback)
return crypto_kpp_maxsize(ctx->dh.soft_tfm);
return ctx->key_sz;
}
static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
const char *alg = kpp_alg_name(tfm);
unsigned int reqsize;
int ret;
kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
ctx->dh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->dh.soft_tfm)) {
pr_err("Failed to alloc dh tfm!\n");
return PTR_ERR(ctx->dh.soft_tfm);
}
return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
crypto_kpp_set_flags(ctx->dh.soft_tfm, crypto_kpp_get_flags(tfm));
reqsize = max(sizeof(struct hpre_asym_request) + hpre_align_pd(),
sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->dh.soft_tfm));
kpp_set_reqsize(tfm, reqsize);
ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
if (ret && ret != -ENODEV) {
crypto_free_kpp(ctx->dh.soft_tfm);
return ret;
} else if (ret == -ENODEV) {
ctx->fallback = true;
}
return 0;
}
static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
@ -756,6 +715,7 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
hpre_dh_clear_ctx(ctx, true);
crypto_free_kpp(ctx->dh.soft_tfm);
}
static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
@ -795,9 +755,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
struct hpre_sqe *msg = &hpre_req->req;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
/* For unsupported key size and unavailable devices, use soft tfm instead */
if (ctx->fallback) {
akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
ret = crypto_akcipher_encrypt(req);
akcipher_request_set_tfm(req, tfm);
@ -828,7 +787,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return -EINPROGRESS;
clear_all:
hpre_rm_req_from_ctx(hpre_req);
hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
@ -843,9 +801,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
struct hpre_sqe *msg = &hpre_req->req;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
/* For unsupported key size and unavailable devices, use soft tfm instead */
if (ctx->fallback) {
akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
ret = crypto_akcipher_decrypt(req);
akcipher_request_set_tfm(req, tfm);
@ -883,7 +840,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return -EINPROGRESS;
clear_all:
hpre_rm_req_from_ctx(hpre_req);
hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
@ -899,8 +855,10 @@ static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
ctx->key_sz = vlen;
/* if invalid key size provided, we use software tfm */
if (!hpre_rsa_key_size_is_support(ctx->key_sz))
if (!hpre_rsa_key_size_is_support(ctx->key_sz)) {
ctx->fallback = true;
return 0;
}
ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
&ctx->rsa.dma_pubkey,
@ -1035,8 +993,8 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
unsigned int half_key_sz = ctx->key_sz >> 1;
struct device *dev = ctx->dev;
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
if (!ctx->qp)
return;
if (ctx->rsa.pubkey) {
dma_free_coherent(dev, ctx->key_sz << 1,
@ -1117,6 +1075,7 @@ static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
goto free;
}
ctx->fallback = false;
return 0;
free:
@ -1134,6 +1093,9 @@ static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
if (ret)
return ret;
if (!ctx->qp)
return 0;
return hpre_rsa_setkey(ctx, key, keylen, false);
}
@ -1147,6 +1109,9 @@ static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
if (ret)
return ret;
if (!ctx->qp)
return 0;
return hpre_rsa_setkey(ctx, key, keylen, true);
}
@ -1154,9 +1119,8 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
{
struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
/* For 512 and 1536 bits key size, use soft tfm instead */
if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
/* For unsupported key size and unavailable devices, use soft tfm instead */
if (ctx->fallback)
return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
return ctx->key_sz;
@ -1177,10 +1141,14 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
hpre_align_pd());
ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
if (ret)
if (ret && ret != -ENODEV) {
crypto_free_akcipher(ctx->rsa.soft_tfm);
return ret;
} else if (ret == -ENODEV) {
ctx->fallback = true;
}
return ret;
return 0;
}
static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
@ -1207,9 +1175,6 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
unsigned int sz = ctx->key_sz;
unsigned int shift = sz << 1;
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
if (ctx->ecdh.p) {
/* ecdh: p->a->k->b */
memzero_explicit(ctx->ecdh.p + shift, sz);
@ -1346,7 +1311,7 @@ static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
return 0;
}
static bool hpre_key_is_zero(char *key, unsigned short key_sz)
static bool hpre_key_is_zero(const char *key, unsigned short key_sz)
{
int i;
@ -1387,6 +1352,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct ecdh params;
int ret;
if (ctx->fallback)
return crypto_kpp_set_secret(ctx->ecdh.soft_tfm, buf, len);
if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
dev_err(dev, "failed to decode ecdh key!\n");
return -EINVAL;
@ -1488,7 +1456,6 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
{
struct hpre_asym_request *h_req;
struct hpre_sqe *msg;
int req_id;
void *tmp;
if (req->dst_len < ctx->key_sz << 1) {
@ -1510,11 +1477,8 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
req_id = hpre_add_req_to_ctx(h_req);
if (req_id < 0)
return -EBUSY;
msg->tag = cpu_to_le16((u16)req_id);
hpre_dfx_add_req_time(h_req);
msg->tag = cpu_to_le64((uintptr_t)h_req);
return 0;
}
@ -1612,28 +1576,86 @@ static int hpre_ecdh_compute_value(struct kpp_request *req)
return -EINPROGRESS;
clear_all:
hpre_rm_req_from_ctx(hpre_req);
hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
}
static int hpre_ecdh_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
int ret;
if (ctx->fallback) {
kpp_request_set_tfm(req, ctx->ecdh.soft_tfm);
ret = crypto_kpp_generate_public_key(req);
kpp_request_set_tfm(req, tfm);
return ret;
}
return hpre_ecdh_compute_value(req);
}
static int hpre_ecdh_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
int ret;
if (ctx->fallback) {
kpp_request_set_tfm(req, ctx->ecdh.soft_tfm);
ret = crypto_kpp_compute_shared_secret(req);
kpp_request_set_tfm(req, tfm);
return ret;
}
return hpre_ecdh_compute_value(req);
}
static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
if (ctx->fallback)
return crypto_kpp_maxsize(ctx->ecdh.soft_tfm);
/* max size is the pub_key_size, include x and y */
return ctx->key_sz << 1;
}
static int hpre_ecdh_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
const char *alg = kpp_alg_name(tfm);
int ret;
ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
if (!ret) {
kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
return 0;
} else if (ret && ret != -ENODEV) {
return ret;
}
ctx->ecdh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->ecdh.soft_tfm)) {
pr_err("Failed to alloc %s tfm!\n", alg);
return PTR_ERR(ctx->ecdh.soft_tfm);
}
crypto_kpp_set_flags(ctx->ecdh.soft_tfm, crypto_kpp_get_flags(tfm));
ctx->fallback = true;
return 0;
}
static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P192;
kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
return hpre_ecdh_init_tfm(tfm);
}
static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
@ -1643,9 +1665,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->enable_hpcore = 1;
kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
return hpre_ecdh_init_tfm(tfm);
}
static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
@ -1654,15 +1674,18 @@ static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
ctx->curve_id = ECC_CURVE_NIST_P384;
kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
return hpre_ecdh_init_tfm(tfm);
}
static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
if (ctx->fallback) {
crypto_free_kpp(ctx->ecdh.soft_tfm);
return;
}
hpre_ecc_clear_ctx(ctx, true);
}
@ -1680,13 +1703,14 @@ static struct akcipher_alg rsa = {
.cra_name = "rsa",
.cra_driver_name = "hpre-rsa",
.cra_module = THIS_MODULE,
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
};
static struct kpp_alg dh = {
.set_secret = hpre_dh_set_secret,
.generate_public_key = hpre_dh_compute_value,
.compute_shared_secret = hpre_dh_compute_value,
.generate_public_key = hpre_dh_generate_public_key,
.compute_shared_secret = hpre_dh_compute_shared_secret,
.max_size = hpre_dh_max_size,
.init = hpre_dh_init_tfm,
.exit = hpre_dh_exit_tfm,
@ -1696,14 +1720,15 @@ static struct kpp_alg dh = {
.cra_name = "dh",
.cra_driver_name = "hpre-dh",
.cra_module = THIS_MODULE,
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
};
static struct kpp_alg ecdh_curves[] = {
{
.set_secret = hpre_ecdh_set_secret,
.generate_public_key = hpre_ecdh_compute_value,
.compute_shared_secret = hpre_ecdh_compute_value,
.generate_public_key = hpre_ecdh_generate_public_key,
.compute_shared_secret = hpre_ecdh_compute_shared_secret,
.max_size = hpre_ecdh_max_size,
.init = hpre_ecdh_nist_p192_init_tfm,
.exit = hpre_ecdh_exit_tfm,
@ -1713,11 +1738,12 @@ static struct kpp_alg ecdh_curves[] = {
.cra_name = "ecdh-nist-p192",
.cra_driver_name = "hpre-ecdh-nist-p192",
.cra_module = THIS_MODULE,
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
}, {
.set_secret = hpre_ecdh_set_secret,
.generate_public_key = hpre_ecdh_compute_value,
.compute_shared_secret = hpre_ecdh_compute_value,
.generate_public_key = hpre_ecdh_generate_public_key,
.compute_shared_secret = hpre_ecdh_compute_shared_secret,
.max_size = hpre_ecdh_max_size,
.init = hpre_ecdh_nist_p256_init_tfm,
.exit = hpre_ecdh_exit_tfm,
@ -1727,11 +1753,12 @@ static struct kpp_alg ecdh_curves[] = {
.cra_name = "ecdh-nist-p256",
.cra_driver_name = "hpre-ecdh-nist-p256",
.cra_module = THIS_MODULE,
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
}, {
.set_secret = hpre_ecdh_set_secret,
.generate_public_key = hpre_ecdh_compute_value,
.compute_shared_secret = hpre_ecdh_compute_value,
.generate_public_key = hpre_ecdh_generate_public_key,
.compute_shared_secret = hpre_ecdh_compute_shared_secret,
.max_size = hpre_ecdh_max_size,
.init = hpre_ecdh_nist_p384_init_tfm,
.exit = hpre_ecdh_exit_tfm,
@ -1741,6 +1768,7 @@ static struct kpp_alg ecdh_curves[] = {
.cra_name = "ecdh-nist-p384",
.cra_driver_name = "hpre-ecdh-nist-p384",
.cra_module = THIS_MODULE,
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
}
};

View file

@ -465,7 +465,7 @@ struct hisi_qp *hpre_create_qp(u8 type)
* type: 0 - RSA/DH. algorithm supported in V2,
* 1 - ECC algorithm in V3.
*/
ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, &type, node, &qp);
if (!ret)
return qp;

View file

@ -31,6 +31,9 @@
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
#define QM_MB_STATUS_MASK GENMASK(12, 9)
#define QM_MB_BUSY_MASK BIT(13)
#define QM_MB_MAX_WAIT_TIMEOUT USEC_PER_SEC
#define QM_MB_MAX_STOP_TIMEOUT (5 * USEC_PER_SEC)
/* sqc shift */
#define QM_SQ_HOP_NUM_SHIFT 0
@ -188,8 +191,8 @@
#define QM_IFC_INT_DISABLE BIT(0)
#define QM_IFC_INT_STATUS_MASK BIT(0)
#define QM_IFC_INT_SET_MASK BIT(0)
#define QM_WAIT_DST_ACK 10
#define QM_MAX_PF_WAIT_COUNT 10
#define QM_WAIT_DST_ACK 1000
#define QM_MAX_PF_WAIT_COUNT 20
#define QM_MAX_VF_WAIT_COUNT 40
#define QM_VF_RESET_WAIT_US 20000
#define QM_VF_RESET_WAIT_CNT 3000
@ -582,36 +585,44 @@ static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
mailbox->rsvd = 0;
}
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
/*
* The mailbox is 128 bits and requires a single read/write operation.
* Since there is no general 128-bit IO memory access API in the current
* ARM64 architecture, this needs to be implemented in the driver.
*/
static struct qm_mailbox qm_mb_read(struct hisi_qm *qm)
{
u32 val;
struct qm_mailbox mailbox = {0};
return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
val, !((val >> QM_MB_BUSY_SHIFT) &
0x1), POLL_PERIOD, POLL_TIMEOUT);
#if IS_ENABLED(CONFIG_ARM64)
const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
unsigned long tmp0, tmp1;
asm volatile("ldp %0, %1, %3\n"
"stp %0, %1, %2\n"
: "=&r" (tmp0),
"=&r" (tmp1),
"+Q" (mailbox)
: "Q" (*((char __iomem *)fun_base))
: "memory");
#endif
return mailbox;
}
EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
/* 128 bit should be written to hardware at one time to trigger a mailbox */
static void qm_mb_write(struct hisi_qm *qm, const void *src)
{
#if IS_ENABLED(CONFIG_ARM64)
void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
#if IS_ENABLED(CONFIG_ARM64)
unsigned long tmp0 = 0, tmp1 = 0;
#endif
if (!IS_ENABLED(CONFIG_ARM64)) {
memcpy_toio(fun_base, src, 16);
dma_wmb();
return;
}
#if IS_ENABLED(CONFIG_ARM64)
unsigned long tmp0, tmp1;
/*
* The dmb oshst instruction ensures that the data in the
* mailbox is written before it is sent to the hardware.
*/
asm volatile("ldp %0, %1, %3\n"
"stp %0, %1, %2\n"
"dmb oshst\n"
"stp %0, %1, %2\n"
: "=&r" (tmp0),
"=&r" (tmp1),
"+Q" (*((char __iomem *)fun_base))
@ -620,35 +631,61 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
#endif
}
static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
{
struct qm_mailbox mailbox = {0};
int ret;
u32 val;
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
ret = read_poll_timeout(qm_mb_read, mailbox,
!(le16_to_cpu(mailbox.w0) & QM_MB_BUSY_MASK),
POLL_PERIOD, POLL_TIMEOUT,
true, qm);
if (ret)
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
ret = -EBUSY;
goto mb_busy;
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox, u32 wait_timeout)
{
struct device *dev = &qm->pdev->dev;
int ret;
ret = read_poll_timeout(qm_mb_read, *mailbox,
!(le16_to_cpu(mailbox->w0) & QM_MB_BUSY_MASK),
POLL_PERIOD, wait_timeout,
true, qm);
if (ret) {
dev_err(dev, "QM mailbox operation timeout!\n");
return ret;
}
qm_mb_write(qm, mailbox);
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
ret = -ETIMEDOUT;
goto mb_busy;
}
val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
if (val & QM_MB_STATUS_MASK) {
dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
ret = -EIO;
goto mb_busy;
if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) {
dev_err(dev, "QM mailbox operation failed!\n");
return -EIO;
}
return 0;
}
mb_busy:
static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox, u32 wait_timeout)
{
int ret;
ret = hisi_qm_wait_mb_ready(qm);
if (ret)
goto mb_err_cnt_increase;
qm_mb_write(qm, mailbox);
ret = qm_wait_mb_finish(qm, mailbox, wait_timeout);
if (ret)
goto mb_err_cnt_increase;
return 0;
mb_err_cnt_increase:
atomic64_inc(&qm->debug.dfx.mb_err_cnt);
return ret;
}
@ -657,18 +694,49 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
bool op)
{
struct qm_mailbox mailbox;
u32 wait_timeout;
int ret;
if (cmd == QM_MB_CMD_STOP_QP || cmd == QM_MB_CMD_FLUSH_QM)
wait_timeout = QM_MB_MAX_STOP_TIMEOUT;
else
wait_timeout = QM_MB_MAX_WAIT_TIMEOUT;
/* No need to judge if master OOO is blocked. */
if (qm_check_dev_error(qm)) {
dev_err(&qm->pdev->dev, "QM mailbox operation failed since qm is stop!\n");
return -EIO;
}
qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
mutex_lock(&qm->mailbox_lock);
ret = qm_mb_nolock(qm, &mailbox);
ret = qm_mb_nolock(qm, &mailbox, wait_timeout);
mutex_unlock(&qm->mailbox_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_mb);
int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue)
{
struct qm_mailbox mailbox;
int ret;
qm_mb_pre_init(&mailbox, cmd, 0, queue, 1);
mutex_lock(&qm->mailbox_lock);
ret = qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_TIMEOUT);
mutex_unlock(&qm->mailbox_lock);
if (ret)
return ret;
*base = le32_to_cpu(mailbox.base_l) |
((u64)le32_to_cpu(mailbox.base_h) << 32);
return 0;
}
EXPORT_SYMBOL_GPL(hisi_qm_mb_read);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
@ -715,7 +783,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
memcpy(tmp_xqc, xqc, size);
qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op);
ret = qm_mb_nolock(qm, &mailbox);
ret = qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_TIMEOUT);
if (!ret && op)
memcpy(xqc, tmp_xqc, size);
@ -1385,12 +1453,10 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
u64 sqc_vft;
int ret;
ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0);
if (ret)
return ret;
sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
*number = (QM_SQC_VFT_NUM_MASK_V2 &
(sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
@ -1530,25 +1596,6 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
{
struct qm_mailbox mailbox;
int ret;
qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
mutex_lock(&qm->mailbox_lock);
ret = qm_mb_nolock(qm, &mailbox);
if (ret)
goto err_unlock;
*msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
err_unlock:
mutex_unlock(&qm->mailbox_lock);
return ret;
}
static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
{
u32 val;
@ -1864,7 +1911,7 @@ static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data
qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
mutex_lock(&qm->mailbox_lock);
return qm_mb_nolock(qm, &mailbox);
return qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_TIMEOUT);
}
static void qm_set_ifc_end_v3(struct hisi_qm *qm)
@ -1877,7 +1924,7 @@ static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u3
u64 msg;
int ret;
ret = qm_get_mb_cmd(qm, &msg, fun_num);
ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, fun_num);
if (ret)
return ret;
@ -2002,7 +2049,38 @@ static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
*addr = 0;
}
static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
static struct hisi_qp *find_shareable_qp(struct hisi_qm *qm, u8 alg_type, bool is_in_kernel)
{
struct device *dev = &qm->pdev->dev;
struct hisi_qp *share_qp = NULL;
struct hisi_qp *qp;
u32 ref_count = ~0;
int i;
if (!is_in_kernel)
goto queues_busy;
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
if (qp->is_in_kernel && qp->alg_type == alg_type && qp->ref_count < ref_count) {
ref_count = qp->ref_count;
share_qp = qp;
}
}
if (share_qp) {
share_qp->ref_count++;
return share_qp;
}
queues_busy:
dev_info_ratelimited(dev, "All %u queues of QM are busy and no shareable queue\n",
qm->qp_num);
atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
return ERR_PTR(-EBUSY);
}
static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type, bool is_in_kernel)
{
struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp;
@ -2013,12 +2091,9 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
return ERR_PTR(-EPERM);
}
if (qm->qp_in_used == qm->qp_num) {
dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
qm->qp_num);
atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
return ERR_PTR(-EBUSY);
}
/* Try to find a shareable queue when all queues are busy */
if (qm->qp_in_used == qm->qp_num)
return find_shareable_qp(qm, alg_type, is_in_kernel);
qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
if (qp_id < 0) {
@ -2034,10 +2109,10 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp->event_cb = NULL;
qp->req_cb = NULL;
qp->qp_id = qp_id;
qp->alg_type = alg_type;
qp->is_in_kernel = true;
qp->is_in_kernel = is_in_kernel;
qm->qp_in_used++;
qp->ref_count = 1;
return qp;
}
@ -2059,7 +2134,7 @@ static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
return ERR_PTR(ret);
down_write(&qm->qps_lock);
qp = qm_create_qp_nolock(qm, alg_type);
qp = qm_create_qp_nolock(qm, alg_type, false);
up_write(&qm->qps_lock);
if (IS_ERR(qp))
@ -2219,6 +2294,7 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
for (i = 0; i < qp_used; i++) {
pos = (i + cur_head) % sq_depth;
qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
qm_cq_head_update(qp);
atomic_dec(&qp->qp_status.used);
}
}
@ -2368,25 +2444,33 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
void *sqe = qm_get_avail_sqe(qp);
u16 sq_tail, sq_tail_next;
void *sqe;
spin_lock_bh(&qp->qp_lock);
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
atomic_read(&qp->qm->status.flags) == QM_STOP ||
qp->is_resetting)) {
spin_unlock_bh(&qp->qp_lock);
dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
return -EAGAIN;
}
if (!sqe)
sqe = qm_get_avail_sqe(qp);
if (!sqe) {
spin_unlock_bh(&qp->qp_lock);
return -EBUSY;
}
sq_tail = qp_status->sq_tail;
sq_tail_next = (sq_tail + 1) % qp->sq_depth;
memcpy(sqe, msg, qp->qm->sqe_size);
qp->msg[sq_tail] = msg;
qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
atomic_inc(&qp->qp_status.used);
qp_status->sq_tail = sq_tail_next;
spin_unlock_bh(&qp->qp_lock);
return 0;
}
@ -2449,7 +2533,6 @@ static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
qp->uacce_q = q;
qp->event_cb = qm_qp_event_notifier;
qp->pasid = arg;
qp->is_in_kernel = false;
return 0;
}
@ -2919,12 +3002,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
{
struct device *dev = &qm->pdev->dev;
struct qm_dma *qdma;
struct hisi_qp *qp;
int i;
for (i = num - 1; i >= 0; i--) {
qdma = &qm->qp_array[i].qdma;
dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
qp = &qm->qp_array[i];
dma_free_coherent(dev, qp->qdma.size, qp->qdma.va, qp->qdma.dma);
kfree(qp->msg);
kfree(qm->poll_data[i].qp_finish_id);
}
@ -2946,10 +3030,14 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
return -ENOMEM;
qp = &qm->qp_array[id];
qp->msg = kmalloc_array(sq_depth, sizeof(void *), GFP_KERNEL);
if (!qp->msg)
goto err_free_qp_finish_id;
qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
GFP_KERNEL);
if (!qp->qdma.va)
goto err_free_qp_finish_id;
goto err_free_qp_msg;
qp->sqe = qp->qdma.va;
qp->sqe_dma = qp->qdma.dma;
@ -2961,8 +3049,14 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
qp->qm = qm;
qp->qp_id = id;
spin_lock_init(&qp->qp_lock);
spin_lock_init(&qp->backlog.lock);
INIT_LIST_HEAD(&qp->backlog.list);
return 0;
err_free_qp_msg:
kfree(qp->msg);
err_free_qp_finish_id:
kfree(qm->poll_data[id].qp_finish_id);
return ret;
@ -3533,6 +3627,17 @@ void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
static void qm_release_qp_nolock(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
if (--qp->ref_count)
return;
qm->qp_in_used--;
idr_remove(&qm->qp_idr, qp->qp_id);
}
/**
* hisi_qm_free_qps() - free multiple queue pairs.
* @qps: The queue pairs need to be freed.
@ -3545,11 +3650,34 @@ void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
if (!qps || qp_num <= 0)
return;
for (i = qp_num - 1; i >= 0; i--)
hisi_qm_release_qp(qps[i]);
down_write(&qps[0]->qm->qps_lock);
for (i = qp_num - 1; i >= 0; i--) {
if (qps[i]->ref_count == 1)
qm_stop_qp_nolock(qps[i]);
qm_release_qp_nolock(qps[i]);
}
up_write(&qps[0]->qm->qps_lock);
qm_pm_put_sync(qps[0]->qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
static void qm_insert_sorted(struct list_head *head, struct hisi_qm_resource *res)
{
struct hisi_qm_resource *tmp;
struct list_head *n = head;
list_for_each_entry(tmp, head, list) {
if (res->distance < tmp->distance) {
n = &tmp->list;
break;
}
}
list_add_tail(&res->list, n);
}
static void free_list(struct list_head *head)
{
struct hisi_qm_resource *res, *tmp;
@ -3560,14 +3688,57 @@ static void free_list(struct list_head *head)
}
}
static int qm_get_and_start_qp(struct hisi_qm *qm, int qp_num, struct hisi_qp **qps, u8 *alg_type)
{
int i, ret;
ret = qm_pm_get_sync(qm);
if (ret)
return ret;
down_write(&qm->qps_lock);
for (i = 0; i < qp_num; i++) {
qps[i] = qm_create_qp_nolock(qm, alg_type[i], true);
if (IS_ERR(qps[i])) {
ret = -ENODEV;
goto stop_and_free;
}
if (qps[i]->ref_count != 1)
continue;
ret = qm_start_qp_nolock(qps[i], 0);
if (ret) {
qm_release_qp_nolock(qps[i]);
goto stop_and_free;
}
}
up_write(&qm->qps_lock);
return 0;
stop_and_free:
for (i--; i >= 0; i--) {
if (qps[i]->ref_count == 1)
qm_stop_qp_nolock(qps[i]);
qm_release_qp_nolock(qps[i]);
}
up_write(&qm->qps_lock);
qm_pm_put_sync(qm);
return ret;
}
static int hisi_qm_sort_devices(int node, struct list_head *head,
struct hisi_qm_list *qm_list)
{
struct hisi_qm_resource *res, *tmp;
struct hisi_qm_resource *res;
struct hisi_qm *qm;
struct list_head *n;
struct device *dev;
int dev_node;
LIST_HEAD(non_full_list);
LIST_HEAD(full_list);
list_for_each_entry(qm, &qm_list->list, list) {
dev = &qm->pdev->dev;
@ -3582,16 +3753,16 @@ static int hisi_qm_sort_devices(int node, struct list_head *head,
res->qm = qm;
res->distance = node_distance(dev_node, node);
n = head;
list_for_each_entry(tmp, head, list) {
if (res->distance < tmp->distance) {
n = &tmp->list;
break;
}
}
list_add_tail(&res->list, n);
if (qm->qp_in_used == qm->qp_num)
qm_insert_sorted(&full_list, res);
else
qm_insert_sorted(&non_full_list, res);
}
list_splice_tail(&non_full_list, head);
list_splice_tail(&full_list, head);
return 0;
}
@ -3608,12 +3779,11 @@ static int hisi_qm_sort_devices(int node, struct list_head *head,
* not meet the requirements will return error.
*/
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
u8 alg_type, int node, struct hisi_qp **qps)
u8 *alg_type, int node, struct hisi_qp **qps)
{
struct hisi_qm_resource *tmp;
int ret = -ENODEV;
LIST_HEAD(head);
int i;
if (!qps || !qm_list || qp_num <= 0)
return -EINVAL;
@ -3625,24 +3795,15 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
}
list_for_each_entry(tmp, &head, list) {
for (i = 0; i < qp_num; i++) {
qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
if (IS_ERR(qps[i])) {
hisi_qm_free_qps(qps, i);
break;
}
}
if (i == qp_num) {
ret = 0;
ret = qm_get_and_start_qp(tmp->qm, qp_num, qps, alg_type);
if (!ret)
break;
}
}
mutex_unlock(&qm_list->lock);
if (ret)
pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
node, alg_type, qp_num);
pr_info("Failed to create qps, node[%d], qp[%d]!\n",
node, qp_num);
err:
free_list(&head);

View file

@ -82,11 +82,6 @@ struct sec_aead_req {
__u8 out_mac_buf[SEC_MAX_MAC_LEN];
};
struct sec_instance_backlog {
struct list_head list;
spinlock_t lock;
};
/* SEC request of Crypto */
struct sec_req {
union {
@ -112,7 +107,6 @@ struct sec_req {
bool use_pbuf;
struct list_head list;
struct sec_instance_backlog *backlog;
struct sec_request_buf buf;
};
@ -172,7 +166,6 @@ struct sec_qp_ctx {
spinlock_t id_lock;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
struct sec_instance_backlog backlog;
u16 send_head;
};

View file

@ -54,7 +54,6 @@
#define SEC_AUTH_CIPHER_V3 0x40
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
#define SEC_TYPE_MASK 0x0F
#define SEC_DONE_MASK 0x0001
#define SEC_ICV_MASK 0x000E
@ -148,7 +147,7 @@ static void sec_free_req_id(struct sec_req *req)
spin_unlock_bh(&qp_ctx->id_lock);
}
static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
static void pre_parse_finished_bd(struct bd_status *status, void *resp)
{
struct sec_sqe *bd = resp;
@ -158,11 +157,9 @@ static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
status->tag = le16_to_cpu(bd->type2.tag);
status->err_type = bd->type2.error_type;
return bd->type_cipher_auth & SEC_TYPE_MASK;
}
static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
static void pre_parse_finished_bd3(struct bd_status *status, void *resp)
{
struct sec_sqe3 *bd3 = resp;
@ -172,8 +169,6 @@ static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
status->tag = le64_to_cpu(bd3->tag);
status->err_type = bd3->error_type;
return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
}
static int sec_cb_status_check(struct sec_req *req,
@ -244,7 +239,7 @@ static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp
struct sec_req *req, *tmp;
int ret;
list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
list_for_each_entry_safe(req, tmp, &qp_ctx->qp->backlog.list, list) {
list_del(&req->list);
ctx->req_op->buf_unmap(ctx, req);
if (req->req_id >= 0)
@ -265,11 +260,12 @@ static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp
static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
{
struct hisi_qp *qp = qp_ctx->qp;
struct sec_req *req, *tmp;
int ret;
spin_lock_bh(&qp_ctx->backlog.lock);
list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
spin_lock_bh(&qp->backlog.lock);
list_for_each_entry_safe(req, tmp, &qp->backlog.list, list) {
ret = qp_send_message(req);
switch (ret) {
case -EINPROGRESS:
@ -287,42 +283,21 @@ static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
}
unlock:
spin_unlock_bh(&qp_ctx->backlog.lock);
spin_unlock_bh(&qp->backlog.lock);
}
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
u8 type_supported = qp_ctx->ctx->type_supported;
const struct sec_sqe *sqe = qp->msg[qp->qp_status.cq_head];
struct sec_req *req = container_of(sqe, struct sec_req, sec_sqe);
struct sec_ctx *ctx = req->ctx;
struct sec_dfx *dfx = &ctx->sec->debug.dfx;
struct bd_status status;
struct sec_ctx *ctx;
struct sec_req *req;
int err;
u8 type;
if (type_supported == SEC_BD_TYPE2) {
type = pre_parse_finished_bd(&status, resp);
req = qp_ctx->req_list[status.tag];
} else {
type = pre_parse_finished_bd3(&status, resp);
req = (void *)(uintptr_t)status.tag;
}
if (unlikely(type != type_supported)) {
atomic64_inc(&dfx->err_bd_cnt);
pr_err("err bd type [%u]\n", type);
return;
}
if (unlikely(!req)) {
atomic64_inc(&dfx->invalid_req_cnt);
atomic_inc(&qp->qp_status.used);
return;
}
pre_parse_finished_bd(&status, resp);
req->err_type = status.err_type;
ctx = req->ctx;
err = sec_cb_status_check(req, &status);
if (err)
atomic64_inc(&dfx->done_flag_cnt);
@ -330,7 +305,31 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
atomic64_inc(&dfx->recv_cnt);
ctx->req_op->buf_unmap(ctx, req);
ctx->req_op->callback(ctx, req, err);
}
static void sec_req_cb3(struct hisi_qp *qp, void *resp)
{
struct bd_status status;
struct sec_ctx *ctx;
struct sec_dfx *dfx;
struct sec_req *req;
int err;
pre_parse_finished_bd3(&status, resp);
req = (void *)(uintptr_t)status.tag;
req->err_type = status.err_type;
ctx = req->ctx;
dfx = &ctx->sec->debug.dfx;
err = sec_cb_status_check(req, &status);
if (err)
atomic64_inc(&dfx->done_flag_cnt);
atomic64_inc(&dfx->recv_cnt);
ctx->req_op->buf_unmap(ctx, req);
ctx->req_op->callback(ctx, req, err);
}
@ -348,8 +347,10 @@ static int sec_alg_send_message_retry(struct sec_req *req)
static int sec_alg_try_enqueue(struct sec_req *req)
{
struct hisi_qp *qp = req->qp_ctx->qp;
/* Check if any request is already backlogged */
if (!list_empty(&req->backlog->list))
if (!list_empty(&qp->backlog.list))
return -EBUSY;
/* Try to enqueue to HW ring */
@ -359,17 +360,18 @@ static int sec_alg_try_enqueue(struct sec_req *req)
static int sec_alg_send_message_maybacklog(struct sec_req *req)
{
struct hisi_qp *qp = req->qp_ctx->qp;
int ret;
ret = sec_alg_try_enqueue(req);
if (ret != -EBUSY)
return ret;
spin_lock_bh(&req->backlog->lock);
spin_lock_bh(&qp->backlog.lock);
ret = sec_alg_try_enqueue(req);
if (ret == -EBUSY)
list_add_tail(&req->list, &req->backlog->list);
spin_unlock_bh(&req->backlog->lock);
list_add_tail(&req->list, &qp->backlog.list);
spin_unlock_bh(&qp->backlog.lock);
return ret;
}
@ -624,32 +626,25 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
qp = ctx->qps[qp_ctx_id];
qp->req_type = 0;
qp->qp_ctx = qp_ctx;
qp_ctx->qp = qp;
qp_ctx->ctx = ctx;
qp->req_cb = sec_req_cb;
if (ctx->type_supported == SEC_BD_TYPE3)
qp->req_cb = sec_req_cb3;
else
qp->req_cb = sec_req_cb;
spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
spin_lock_init(&qp_ctx->backlog.lock);
spin_lock_init(&qp_ctx->id_lock);
INIT_LIST_HEAD(&qp_ctx->backlog.list);
qp_ctx->send_head = 0;
ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
if (ret)
goto err_destroy_idr;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0)
goto err_resource_free;
return 0;
err_resource_free:
sec_free_qp_ctx_resource(ctx, qp_ctx);
err_destroy_idr:
idr_destroy(&qp_ctx->req_idr);
return ret;
@ -658,7 +653,6 @@ err_destroy_idr:
static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
hisi_qm_stop_qp(qp_ctx->qp);
sec_free_qp_ctx_resource(ctx, qp_ctx);
idr_destroy(&qp_ctx->req_idr);
}
@ -669,10 +663,8 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
int i, ret;
ctx->qps = sec_create_qps();
if (!ctx->qps) {
pr_err("Can not create sec qps!\n");
if (!ctx->qps)
return -ENODEV;
}
sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
@ -708,6 +700,9 @@ static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
int i;
if (!ctx->qps)
return;
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@ -719,6 +714,9 @@ static int sec_cipher_init(struct sec_ctx *ctx)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
if (!ctx->qps)
return 0;
c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&c_ctx->c_key_dma, GFP_KERNEL);
if (!c_ctx->c_key)
@ -731,6 +729,9 @@ static void sec_cipher_uninit(struct sec_ctx *ctx)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
if (!ctx->qps)
return;
memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
c_ctx->c_key, c_ctx->c_key_dma);
@ -752,6 +753,9 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
if (!ctx->qps)
return;
memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
a_ctx->a_key, a_ctx->a_key_dma);
@ -789,7 +793,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
}
ret = sec_ctx_base_init(ctx);
if (ret)
if (ret && ret != -ENODEV)
return ret;
ret = sec_cipher_init(ctx);
@ -898,6 +902,9 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
struct device *dev = ctx->dev;
int ret;
if (!ctx->qps)
goto set_soft_key;
if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
if (ret) {
@ -928,13 +935,14 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
if (c_ctx->fbtfm) {
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
if (ret) {
dev_err(dev, "failed to set fallback skcipher key!\n");
return ret;
}
set_soft_key:
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
if (ret) {
dev_err(dev, "failed to set fallback skcipher key!\n");
return ret;
}
return 0;
}
@ -1398,6 +1406,9 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
struct crypto_authenc_keys keys;
int ret;
if (!ctx->qps)
return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
c_ctx->c_mode = c_mode;
@ -1952,7 +1963,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
} while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num);
req->qp_ctx = qp_ctx;
req->backlog = &qp_ctx->backlog;
return 0;
}
@ -2055,6 +2065,9 @@ static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
if (ret)
return ret;
if (!ctx->qps)
return 0;
if (ctx->sec->qm.ver < QM_HW_V3) {
ctx->type_supported = SEC_BD_TYPE2;
ctx->req_op = &sec_skcipher_req_ops;
@ -2063,7 +2076,7 @@ static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
ctx->req_op = &sec_skcipher_req_ops_v3;
}
return ret;
return 0;
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
@ -2131,7 +2144,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
int ret;
ret = sec_aead_init(tfm);
if (ret) {
if (ret && ret != -ENODEV) {
pr_err("hisi_sec2: aead init error!\n");
return ret;
}
@ -2173,7 +2186,7 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
int ret;
ret = sec_aead_init(tfm);
if (ret) {
if (ret && ret != -ENODEV) {
dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
return ret;
}
@ -2318,6 +2331,9 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
bool need_fallback = false;
int ret;
if (!ctx->qps)
goto soft_crypto;
if (!sk_req->cryptlen) {
if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
return -EINVAL;
@ -2335,9 +2351,12 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
return -EINVAL;
if (unlikely(ctx->c_ctx.fallback || need_fallback))
return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
goto soft_crypto;
return ctx->req_op->process(ctx, req);
soft_crypto:
return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
}
static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
@ -2545,6 +2564,9 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
bool need_fallback = false;
int ret;
if (!ctx->qps)
goto soft_crypto;
req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
@ -2555,11 +2577,14 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
ret = sec_aead_param_check(ctx, req, &need_fallback);
if (unlikely(ret)) {
if (need_fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
goto soft_crypto;
return -EINVAL;
}
return ctx->req_op->process(ctx, req);
soft_crypto:
return sec_aead_soft_crypto(ctx, a_req, encrypt);
}
static int sec_aead_encrypt(struct aead_request *a_req)

View file

@ -417,18 +417,29 @@ struct hisi_qp **sec_create_qps(void)
int node = cpu_to_node(raw_smp_processor_id());
u32 ctx_num = ctx_q_num;
struct hisi_qp **qps;
u8 *type;
int ret;
qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
if (!qps)
return NULL;
ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
if (!ret)
return qps;
/* The type of SEC is all 0, so just allocated by kcalloc */
type = kcalloc(ctx_num, sizeof(u8), GFP_KERNEL);
if (!type) {
kfree(qps);
return NULL;
}
kfree(qps);
return NULL;
ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, type, node, qps);
if (ret) {
kfree(type);
kfree(qps);
return NULL;
}
kfree(type);
return qps;
}
u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)

View file

@ -260,7 +260,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl,
return curr_hw_sgl;
err_unmap:
dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
dma_unmap_sg(dev, sgl, sg_n, dir);
return ERR_PTR(ret);
}

View file

@ -40,6 +40,7 @@
#define SEED_SHIFT_24 24
#define SEED_SHIFT_16 16
#define SEED_SHIFT_8 8
#define SW_MAX_RANDOM_BYTES 65520
struct hisi_trng_list {
struct mutex lock;
@ -53,8 +54,10 @@ struct hisi_trng {
struct list_head list;
struct hwrng rng;
u32 ver;
bool is_used;
struct mutex mutex;
u32 ctx_num;
/* The bytes of the random number generated since the last seeding. */
u32 random_bytes;
struct mutex lock;
};
struct hisi_trng_ctx {
@ -63,10 +66,14 @@ struct hisi_trng_ctx {
static atomic_t trng_active_devs;
static struct hisi_trng_list trng_devices;
static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait);
static void hisi_trng_set_seed(struct hisi_trng *trng, const u8 *seed)
static int hisi_trng_set_seed(struct hisi_trng *trng, const u8 *seed)
{
u32 val, seed_reg, i;
int ret;
writel(0x0, trng->base + SW_DRBG_BLOCKS);
for (i = 0; i < SW_DRBG_SEED_SIZE;
i += SW_DRBG_SEED_SIZE / SW_DRBG_SEED_REGS_NUM) {
@ -78,6 +85,20 @@ static void hisi_trng_set_seed(struct hisi_trng *trng, const u8 *seed)
seed_reg = (i >> SW_DRBG_NUM_SHIFT) % SW_DRBG_SEED_REGS_NUM;
writel(val, trng->base + SW_DRBG_SEED(seed_reg));
}
writel(SW_DRBG_BLOCKS_NUM | (0x1 << SW_DRBG_ENABLE_SHIFT),
trng->base + SW_DRBG_BLOCKS);
writel(0x1, trng->base + SW_DRBG_INIT);
ret = readl_relaxed_poll_timeout(trng->base + SW_DRBG_STATUS,
val, val & BIT(0), SLEEP_US, TIMEOUT_US);
if (ret) {
pr_err("failed to init trng(%d)\n", ret);
return -EIO;
}
trng->random_bytes = 0;
return 0;
}
static int hisi_trng_seed(struct crypto_rng *tfm, const u8 *seed,
@ -85,8 +106,7 @@ static int hisi_trng_seed(struct crypto_rng *tfm, const u8 *seed,
{
struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm);
struct hisi_trng *trng = ctx->trng;
u32 val = 0;
int ret = 0;
int ret;
if (slen < SW_DRBG_SEED_SIZE) {
pr_err("slen(%u) is not matched with trng(%d)\n", slen,
@ -94,43 +114,45 @@ static int hisi_trng_seed(struct crypto_rng *tfm, const u8 *seed,
return -EINVAL;
}
writel(0x0, trng->base + SW_DRBG_BLOCKS);
hisi_trng_set_seed(trng, seed);
writel(SW_DRBG_BLOCKS_NUM | (0x1 << SW_DRBG_ENABLE_SHIFT),
trng->base + SW_DRBG_BLOCKS);
writel(0x1, trng->base + SW_DRBG_INIT);
ret = readl_relaxed_poll_timeout(trng->base + SW_DRBG_STATUS,
val, val & BIT(0), SLEEP_US, TIMEOUT_US);
if (ret)
pr_err("fail to init trng(%d)\n", ret);
mutex_lock(&trng->lock);
ret = hisi_trng_set_seed(trng, seed);
mutex_unlock(&trng->lock);
return ret;
}
static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src,
unsigned int slen, u8 *dstn, unsigned int dlen)
static int hisi_trng_reseed(struct hisi_trng *trng)
{
u8 seed[SW_DRBG_SEED_SIZE];
int size;
if (!trng->random_bytes)
return 0;
size = hisi_trng_read(&trng->rng, seed, SW_DRBG_SEED_SIZE, false);
if (size != SW_DRBG_SEED_SIZE)
return -EIO;
return hisi_trng_set_seed(trng, seed);
}
static int hisi_trng_get_bytes(struct hisi_trng *trng, u8 *dstn, unsigned int dlen)
{
struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm);
struct hisi_trng *trng = ctx->trng;
u32 data[SW_DRBG_DATA_NUM];
u32 currsize = 0;
u32 val = 0;
int ret;
u32 i;
if (dlen > SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES || dlen == 0) {
pr_err("dlen(%u) exceeds limit(%d)!\n", dlen,
SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES);
return -EINVAL;
}
ret = hisi_trng_reseed(trng);
if (ret)
return ret;
do {
ret = readl_relaxed_poll_timeout(trng->base + SW_DRBG_STATUS,
val, val & BIT(1), SLEEP_US, TIMEOUT_US);
val, val & BIT(1), SLEEP_US, TIMEOUT_US);
if (ret) {
pr_err("fail to generate random number(%d)!\n", ret);
pr_err("failed to generate random number(%d)!\n", ret);
break;
}
@ -145,30 +167,57 @@ static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src,
currsize = dlen;
}
trng->random_bytes += SW_DRBG_BYTES;
writel(0x1, trng->base + SW_DRBG_GEN);
} while (currsize < dlen);
return ret;
}
static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src,
unsigned int slen, u8 *dstn, unsigned int dlen)
{
struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm);
struct hisi_trng *trng = ctx->trng;
unsigned int currsize = 0;
unsigned int block_size;
int ret;
if (!dstn || !dlen) {
pr_err("output is error, dlen %u!\n", dlen);
return -EINVAL;
}
do {
block_size = min_t(unsigned int, dlen - currsize, SW_MAX_RANDOM_BYTES);
mutex_lock(&trng->lock);
ret = hisi_trng_get_bytes(trng, dstn + currsize, block_size);
mutex_unlock(&trng->lock);
if (ret)
return ret;
currsize += block_size;
} while (currsize < dlen);
return 0;
}
static int hisi_trng_init(struct crypto_tfm *tfm)
{
struct hisi_trng_ctx *ctx = crypto_tfm_ctx(tfm);
struct hisi_trng *trng;
int ret = -EBUSY;
u32 ctx_num = ~0;
mutex_lock(&trng_devices.lock);
list_for_each_entry(trng, &trng_devices.list, list) {
if (!trng->is_used) {
trng->is_used = true;
if (trng->ctx_num < ctx_num) {
ctx_num = trng->ctx_num;
ctx->trng = trng;
ret = 0;
break;
}
}
ctx->trng->ctx_num++;
mutex_unlock(&trng_devices.lock);
return ret;
return 0;
}
static void hisi_trng_exit(struct crypto_tfm *tfm)
@ -176,7 +225,7 @@ static void hisi_trng_exit(struct crypto_tfm *tfm)
struct hisi_trng_ctx *ctx = crypto_tfm_ctx(tfm);
mutex_lock(&trng_devices.lock);
ctx->trng->is_used = false;
ctx->trng->ctx_num--;
mutex_unlock(&trng_devices.lock);
}
@ -238,7 +287,7 @@ static int hisi_trng_del_from_list(struct hisi_trng *trng)
int ret = -EBUSY;
mutex_lock(&trng_devices.lock);
if (!trng->is_used) {
if (!trng->ctx_num) {
list_del(&trng->list);
ret = 0;
}
@ -262,7 +311,9 @@ static int hisi_trng_probe(struct platform_device *pdev)
if (IS_ERR(trng->base))
return PTR_ERR(trng->base);
trng->is_used = false;
trng->ctx_num = 0;
trng->random_bytes = SW_MAX_RANDOM_BYTES;
mutex_init(&trng->lock);
trng->ver = readl(trng->base + HISI_TRNG_VERSION);
if (!trng_devices.is_init) {
INIT_LIST_HEAD(&trng_devices.list);

View file

@ -99,7 +99,7 @@ enum zip_cap_table_type {
ZIP_CORE5_BITMAP,
};
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node, u8 *alg_type);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);

View file

@ -17,13 +17,17 @@
/* hisi_zip_sqe dw9 */
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
#define HZIP_ALG_TYPE_DEFLATE 0x01
#define HZIP_ALG_TYPE_LZ4 0x04
#define HZIP_BUF_TYPE_M GENMASK(11, 8)
#define HZIP_SGL 0x1
#define HZIP_WIN_SIZE_M GENMASK(15, 12)
#define HZIP_16K_WINSZ 0x2
#define HZIP_ALG_PRIORITY 300
#define HZIP_SGL_SGE_NR 10
#define HZIP_ALG_DEFLATE GENMASK(5, 4)
#define HZIP_ALG_LZ4 BIT(8)
static DEFINE_MUTEX(zip_algs_lock);
static unsigned int zip_available_devs;
@ -39,8 +43,10 @@ enum {
HZIP_CTX_Q_NUM
};
#define GET_REQ_FROM_SQE(sqe) ((u64)(sqe)->dw26 | (u64)(sqe)->dw27 << 32)
#define COMP_NAME_TO_TYPE(alg_name) \
(!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : 0)
(!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : \
(!strcmp((alg_name), "lz4") ? HZIP_ALG_TYPE_LZ4 : 0))
struct hisi_zip_req {
struct acomp_req *req;
@ -48,6 +54,7 @@ struct hisi_zip_req {
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
dma_addr_t dma_dst;
struct hisi_zip_qp_ctx *qp_ctx;
u16 req_id;
};
@ -64,6 +71,7 @@ struct hisi_zip_qp_ctx {
struct hisi_acc_sgl_pool *sgl_pool;
struct hisi_zip *zip_dev;
struct hisi_zip_ctx *ctx;
u8 req_type;
};
struct hisi_zip_sqe_ops {
@ -72,9 +80,9 @@ struct hisi_zip_sqe_ops {
void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
void (*fill_win_size)(struct hisi_zip_sqe *sqe, u8 win_size);
void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
u32 (*get_tag)(struct hisi_zip_sqe *sqe);
u32 (*get_status)(struct hisi_zip_sqe *sqe);
u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
};
@ -82,6 +90,7 @@ struct hisi_zip_sqe_ops {
struct hisi_zip_ctx {
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
const struct hisi_zip_sqe_ops *ops;
bool fallback;
};
static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
@ -108,6 +117,24 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
static int hisi_zip_fallback_do_work(struct acomp_req *acomp_req, bool is_decompress)
{
ACOMP_FBREQ_ON_STACK(fbreq, acomp_req);
int ret;
if (!is_decompress)
ret = crypto_acomp_compress(fbreq);
else
ret = crypto_acomp_decompress(fbreq);
if (ret) {
pr_err("failed to do fallback work, ret=%d\n", ret);
return ret;
}
acomp_req->dlen = fbreq->dlen;
return ret;
}
static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
struct acomp_req *req)
{
@ -131,6 +158,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
req_cache->qp_ctx = qp_ctx;
return req_cache;
}
@ -179,9 +207,19 @@ static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
sqe->dw9 = val;
}
static void hisi_zip_fill_win_size(struct hisi_zip_sqe *sqe, u8 win_size)
{
u32 val;
val = sqe->dw9 & ~HZIP_WIN_SIZE_M;
val |= FIELD_PREP(HZIP_WIN_SIZE_M, win_size);
sqe->dw9 = val;
}
static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
{
sqe->dw26 = req->req_id;
sqe->dw26 = lower_32_bits((u64)req);
sqe->dw27 = upper_32_bits((u64)req);
}
static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
@ -204,6 +242,7 @@ static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe
ops->fill_buf_size(sqe, req);
ops->fill_buf_type(sqe, HZIP_SGL);
ops->fill_req_type(sqe, req_type);
ops->fill_win_size(sqe, HZIP_16K_WINSZ);
ops->fill_tag(sqe, req);
ops->fill_sqe_type(sqe, ops->sqe_type);
}
@ -213,7 +252,6 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
@ -237,18 +275,16 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
&req->dma_dst, DMA_FROM_DEVICE);
if (IS_ERR(req->hw_dst)) {
ret = PTR_ERR(req->hw_dst);
dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
dev_err(dev, "failed to map the dst buffer to hw sgl (%d)!\n",
ret);
goto err_unmap_input;
}
hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp_ctx->req_type, req);
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
spin_lock_bh(&req_q->req_lock);
ret = hisi_qp_send(qp, &zip_sqe);
spin_unlock_bh(&req_q->req_lock);
if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
@ -265,11 +301,6 @@ err_unmap_input:
return ret;
}
static u32 hisi_zip_get_tag(struct hisi_zip_sqe *sqe)
{
return sqe->dw26;
}
static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
{
return sqe->dw3 & HZIP_BD_STATUS_M;
@ -282,14 +313,12 @@ static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
{
struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
struct hisi_zip_sqe *sqe = data;
struct hisi_zip_req *req = (struct hisi_zip_req *)GET_REQ_FROM_SQE(sqe);
struct hisi_zip_qp_ctx *qp_ctx = req->qp_ctx;
const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_zip_sqe *sqe = data;
u32 tag = ops->get_tag(sqe);
struct hisi_zip_req *req = req_q->q + tag;
struct acomp_req *acomp_req = req->req;
int err = 0;
u32 status;
@ -319,10 +348,15 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
struct device *dev;
int ret;
if (ctx->fallback)
return hisi_zip_fallback_do_work(acomp_req, 0);
dev = &qp_ctx->qp->qm->pdev->dev;
req = hisi_zip_create_req(qp_ctx, acomp_req);
if (IS_ERR(req))
return PTR_ERR(req);
@ -340,10 +374,15 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
struct device *dev;
int ret;
if (ctx->fallback)
return hisi_zip_fallback_do_work(acomp_req, 1);
dev = &qp_ctx->qp->qm->pdev->dev;
req = hisi_zip_create_req(qp_ctx, acomp_req);
if (IS_ERR(req))
return PTR_ERR(req);
@ -358,31 +397,9 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return ret;
}
static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
int alg_type, int req_type)
static int hisi_zip_decompress(struct acomp_req *acomp_req)
{
struct device *dev = &qp->qm->pdev->dev;
int ret;
qp->req_type = req_type;
qp->alg_type = alg_type;
qp->qp_ctx = qp_ctx;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
dev_err(dev, "failed to start qp (%d)!\n", ret);
return ret;
}
qp_ctx->qp = qp;
return 0;
}
static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
{
hisi_qm_stop_qp(qp_ctx->qp);
hisi_qm_free_qps(&qp_ctx->qp, 1);
return hisi_zip_fallback_do_work(acomp_req, 1);
}
static const struct hisi_zip_sqe_ops hisi_zip_ops = {
@ -391,9 +408,9 @@ static const struct hisi_zip_sqe_ops hisi_zip_ops = {
.fill_buf_size = hisi_zip_fill_buf_size,
.fill_buf_type = hisi_zip_fill_buf_type,
.fill_req_type = hisi_zip_fill_req_type,
.fill_win_size = hisi_zip_fill_win_size,
.fill_tag = hisi_zip_fill_tag,
.fill_sqe_type = hisi_zip_fill_sqe_type,
.get_tag = hisi_zip_get_tag,
.get_status = hisi_zip_get_status,
.get_dstlen = hisi_zip_get_dstlen,
};
@ -402,10 +419,15 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
{
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
struct hisi_zip_qp_ctx *qp_ctx;
u8 alg_type[HZIP_CTX_Q_NUM];
struct hisi_zip *hisi_zip;
int ret, i, j;
int ret, i;
ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
alg_type[i] = i;
ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node, alg_type);
if (ret) {
pr_err("failed to create zip qps (%d)!\n", ret);
return -ENODEV;
@ -414,19 +436,11 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
qp_ctx = &hisi_zip_ctx->qp_ctx[i];
qp_ctx->ctx = hisi_zip_ctx;
ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
if (ret) {
for (j = i - 1; j >= 0; j--)
hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
return ret;
}
qp_ctx->zip_dev = hisi_zip;
qp_ctx->req_type = req_type;
qp_ctx->qp = qps[i];
}
hisi_zip_ctx->ops = &hisi_zip_ops;
@ -436,10 +450,13 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
{
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
int i;
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
qps[i] = hisi_zip_ctx->qp_ctx[i].qp;
hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
}
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
@ -549,7 +566,7 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
if (ret) {
pr_err("failed to init ctx (%d)!\n", ret);
return ret;
goto switch_to_soft;
}
dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
@ -574,14 +591,18 @@ err_release_req_q:
hisi_zip_release_req_q(ctx);
err_ctx_exit:
hisi_zip_ctx_exit(ctx);
return ret;
switch_to_soft:
ctx->fallback = true;
return 0;
}
static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
hisi_zip_set_acomp_cb(ctx, NULL);
if (ctx->fallback)
return;
hisi_zip_release_sgl_pool(ctx);
hisi_zip_release_req_q(ctx);
hisi_zip_ctx_exit(ctx);
@ -595,7 +616,8 @@ static struct acomp_alg hisi_zip_acomp_deflate = {
.base = {
.cra_name = "deflate",
.cra_driver_name = "hisi-deflate-acomp",
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_module = THIS_MODULE,
.cra_priority = HZIP_ALG_PRIORITY,
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
@ -624,18 +646,69 @@ static void hisi_zip_unregister_deflate(struct hisi_qm *qm)
crypto_unregister_acomp(&hisi_zip_acomp_deflate);
}
static struct acomp_alg hisi_zip_acomp_lz4 = {
.init = hisi_zip_acomp_init,
.exit = hisi_zip_acomp_exit,
.compress = hisi_zip_acompress,
.decompress = hisi_zip_decompress,
.base = {
.cra_name = "lz4",
.cra_driver_name = "hisi-lz4-acomp",
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_module = THIS_MODULE,
.cra_priority = HZIP_ALG_PRIORITY,
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
}
};
static int hisi_zip_register_lz4(struct hisi_qm *qm)
{
int ret;
if (!hisi_zip_alg_support(qm, HZIP_ALG_LZ4))
return 0;
ret = crypto_register_acomp(&hisi_zip_acomp_lz4);
if (ret)
dev_err(&qm->pdev->dev, "failed to register to LZ4 (%d)!\n", ret);
return ret;
}
static void hisi_zip_unregister_lz4(struct hisi_qm *qm)
{
if (!hisi_zip_alg_support(qm, HZIP_ALG_LZ4))
return;
crypto_unregister_acomp(&hisi_zip_acomp_lz4);
}
int hisi_zip_register_to_crypto(struct hisi_qm *qm)
{
int ret = 0;
mutex_lock(&zip_algs_lock);
if (zip_available_devs++)
if (zip_available_devs) {
zip_available_devs++;
goto unlock;
}
ret = hisi_zip_register_deflate(qm);
if (ret)
zip_available_devs--;
goto unlock;
ret = hisi_zip_register_lz4(qm);
if (ret)
goto unreg_deflate;
zip_available_devs++;
mutex_unlock(&zip_algs_lock);
return 0;
unreg_deflate:
hisi_zip_unregister_deflate(qm);
unlock:
mutex_unlock(&zip_algs_lock);
return ret;
@ -648,6 +721,7 @@ void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
goto unlock;
hisi_zip_unregister_deflate(qm);
hisi_zip_unregister_lz4(qm);
unlock:
mutex_unlock(&zip_algs_lock);

View file

@ -446,12 +446,12 @@ static const struct pci_device_id hisi_zip_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node, u8 *alg_type)
{
if (node == NUMA_NO_NODE)
node = cpu_to_node(raw_smp_processor_id());
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, alg_type, node, qps);
}
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)

View file

@ -870,25 +870,18 @@ static int img_register_algs(struct img_hash_dev *hdev)
for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
err = crypto_register_ahash(&img_algs[i]);
if (err)
goto err_reg;
if (err) {
crypto_unregister_ahashes(img_algs, i);
return err;
}
}
return 0;
err_reg:
for (; i--; )
crypto_unregister_ahash(&img_algs[i]);
return err;
}
static int img_unregister_algs(struct img_hash_dev *hdev)
static void img_unregister_algs(struct img_hash_dev *hdev)
{
int i;
for (i = 0; i < ARRAY_SIZE(img_algs); i++)
crypto_unregister_ahash(&img_algs[i]);
return 0;
crypto_unregister_ahashes(img_algs, ARRAY_SIZE(img_algs));
}
static void img_hash_done_task(unsigned long data)

View file

@ -77,11 +77,44 @@ inline void eip93_irq_clear(struct eip93_device *eip93, u32 mask)
__raw_writel(mask, eip93->base + EIP93_REG_INT_CLR);
}
static void eip93_unregister_algs(unsigned int i)
static int eip93_algo_is_supported(u32 alg_flags, u32 supported_algo_flags)
{
if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) &&
!(supported_algo_flags & EIP93_PE_OPTION_TDES))
return 0;
if (IS_AES(alg_flags) &&
!(supported_algo_flags & EIP93_PE_OPTION_AES))
return 0;
if (IS_HASH_MD5(alg_flags) &&
!(supported_algo_flags & EIP93_PE_OPTION_MD5))
return 0;
if (IS_HASH_SHA1(alg_flags) &&
!(supported_algo_flags & EIP93_PE_OPTION_SHA_1))
return 0;
if (IS_HASH_SHA224(alg_flags) &&
!(supported_algo_flags & EIP93_PE_OPTION_SHA_224))
return 0;
if (IS_HASH_SHA256(alg_flags) &&
!(supported_algo_flags & EIP93_PE_OPTION_SHA_256))
return 0;
return 1;
}
static void eip93_unregister_algs(u32 supported_algo_flags, unsigned int i)
{
unsigned int j;
for (j = 0; j < i; j++) {
if (!eip93_algo_is_supported(eip93_algs[j]->flags,
supported_algo_flags))
continue;
switch (eip93_algs[j]->type) {
case EIP93_ALG_TYPE_SKCIPHER:
crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher);
@ -90,7 +123,7 @@ static void eip93_unregister_algs(unsigned int i)
crypto_unregister_aead(&eip93_algs[j]->alg.aead);
break;
case EIP93_ALG_TYPE_HASH:
crypto_unregister_ahash(&eip93_algs[i]->alg.ahash);
crypto_unregister_ahash(&eip93_algs[j]->alg.ahash);
break;
}
}
@ -106,49 +139,27 @@ static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_fl
eip93_algs[i]->eip93 = eip93;
if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) &&
!(supported_algo_flags & EIP93_PE_OPTION_TDES))
if (!eip93_algo_is_supported(alg_flags, supported_algo_flags))
continue;
if (IS_AES(alg_flags)) {
if (!(supported_algo_flags & EIP93_PE_OPTION_AES))
continue;
if (IS_AES(alg_flags) && !IS_HMAC(alg_flags)) {
if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128)
eip93_algs[i]->alg.skcipher.max_keysize =
AES_KEYSIZE_128;
if (!IS_HMAC(alg_flags)) {
if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128)
eip93_algs[i]->alg.skcipher.max_keysize =
AES_KEYSIZE_128;
if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192)
eip93_algs[i]->alg.skcipher.max_keysize =
AES_KEYSIZE_192;
if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192)
eip93_algs[i]->alg.skcipher.max_keysize =
AES_KEYSIZE_192;
if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256)
eip93_algs[i]->alg.skcipher.max_keysize =
AES_KEYSIZE_256;
if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256)
eip93_algs[i]->alg.skcipher.max_keysize =
AES_KEYSIZE_256;
if (IS_RFC3686(alg_flags))
eip93_algs[i]->alg.skcipher.max_keysize +=
CTR_RFC3686_NONCE_SIZE;
}
if (IS_RFC3686(alg_flags))
eip93_algs[i]->alg.skcipher.max_keysize +=
CTR_RFC3686_NONCE_SIZE;
}
if (IS_HASH_MD5(alg_flags) &&
!(supported_algo_flags & EIP93_PE_OPTION_MD5))
continue;
if (IS_HASH_SHA1(alg_flags) &&
!(supported_algo_flags & EIP93_PE_OPTION_SHA_1))
continue;
if (IS_HASH_SHA224(alg_flags) &&
!(supported_algo_flags & EIP93_PE_OPTION_SHA_224))
continue;
if (IS_HASH_SHA256(alg_flags) &&
!(supported_algo_flags & EIP93_PE_OPTION_SHA_256))
continue;
switch (eip93_algs[i]->type) {
case EIP93_ALG_TYPE_SKCIPHER:
ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher);
@ -167,7 +178,7 @@ static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_fl
return 0;
fail:
eip93_unregister_algs(i);
eip93_unregister_algs(supported_algo_flags, i);
return ret;
}
@ -469,8 +480,11 @@ static int eip93_crypto_probe(struct platform_device *pdev)
static void eip93_crypto_remove(struct platform_device *pdev)
{
struct eip93_device *eip93 = platform_get_drvdata(pdev);
u32 algo_flags;
eip93_unregister_algs(ARRAY_SIZE(eip93_algs));
algo_flags = readl(eip93->base + EIP93_REG_PE_OPTION_1);
eip93_unregister_algs(algo_flags, ARRAY_SIZE(eip93_algs));
eip93_cleanup(eip93);
}

View file

@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
@ -96,7 +97,7 @@ static bool iaa_verify_compress = true;
static ssize_t verify_compress_show(struct device_driver *driver, char *buf)
{
return sprintf(buf, "%d\n", iaa_verify_compress);
return sysfs_emit(buf, "%d\n", iaa_verify_compress);
}
static ssize_t verify_compress_store(struct device_driver *driver,
@ -188,11 +189,11 @@ static ssize_t sync_mode_show(struct device_driver *driver, char *buf)
int ret = 0;
if (!async_mode && !use_irq)
ret = sprintf(buf, "%s\n", "sync");
ret = sysfs_emit(buf, "%s\n", "sync");
else if (async_mode && !use_irq)
ret = sprintf(buf, "%s\n", "async");
ret = sysfs_emit(buf, "%s\n", "async");
else if (async_mode && use_irq)
ret = sprintf(buf, "%s\n", "async_irq");
ret = sysfs_emit(buf, "%s\n", "async_irq");
return ret;
}
@ -221,15 +222,13 @@ static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX];
static int find_empty_iaa_compression_mode(void)
{
int i = -EINVAL;
int i;
for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
if (iaa_compression_modes[i])
continue;
break;
}
for (i = 0; i < IAA_COMP_MODES_MAX; i++)
if (!iaa_compression_modes[i])
return i;
return i;
return -EINVAL;
}
static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx)
@ -544,13 +543,7 @@ static struct iaa_device *add_iaa_device(struct idxd_device *idxd)
static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq)
{
int ret = 0;
ret = init_device_compression_modes(iaa_device, iaa_wq->wq);
if (ret)
return ret;
return ret;
return init_device_compression_modes(iaa_device, iaa_wq->wq);
}
static void del_iaa_device(struct iaa_device *iaa_device)
@ -1704,12 +1697,10 @@ out:
return ret;
}
static int iaa_unregister_compression_device(void)
static void iaa_unregister_compression_device(void)
{
if (iaa_crypto_registered)
crypto_unregister_acomp(&iaa_acomp_fixed_deflate);
return 0;
}
static int iaa_crypto_probe(struct idxd_dev *idxd_dev)
@ -1925,8 +1916,7 @@ err_aecs_init:
static void __exit iaa_crypto_cleanup_module(void)
{
if (iaa_unregister_compression_device())
pr_debug("IAA compression device unregister failed\n");
iaa_unregister_compression_device();
iaa_crypto_debugfs_cleanup();
driver_remove_file(&iaa_crypto_driver.drv,

View file

@ -3,6 +3,7 @@
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_admin.h>
#include <adf_bank_state.h>
#include <adf_cfg.h>
#include <adf_cfg_services.h>
#include <adf_clock.h>
@ -459,6 +460,8 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
hw_data->bank_state_save = adf_bank_state_save;
hw_data->bank_state_restore = adf_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;

View file

@ -148,6 +148,16 @@ static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
break;
default:
dev_err(&GET_DEV(vf_info->accel_dev),
"Invalid BlockMsg type 0x%.4x received from VF%u\n",
req.type, vf_info->vf_nr);
resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK,
ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR) |
FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK,
ADF_PF2VF_UNSPECIFIED_ERROR);
return resp;
}
/* Is this a request for CRC or data? */

View file

@ -255,8 +255,8 @@ static int qat_dh_compute_value(struct kpp_request *req)
qat_req->areq.dh = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
QAT_COMN_PTR_TYPE_FLAT);
/*
* If no source is provided use g as base
@ -731,8 +731,8 @@ static int qat_rsa_enc(struct akcipher_request *req)
qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
QAT_COMN_PTR_TYPE_FLAT);
qat_req->in.rsa.enc.e = ctx->dma_e;
qat_req->in.rsa.enc.n = ctx->dma_n;
@ -867,8 +867,8 @@ static int qat_rsa_dec(struct akcipher_request *req)
qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
QAT_COMN_PTR_TYPE_FLAT);
if (ctx->crt_mode) {
qat_req->in.rsa.dec_crt.p = ctx->dma_p;

View file

@ -38,15 +38,9 @@ struct crypto_async_request *
mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
struct crypto_async_request **backlog)
{
struct crypto_async_request *req;
*backlog = crypto_get_backlog(&engine->queue);
req = crypto_dequeue_request(&engine->queue);
if (!req)
return NULL;
return req;
return crypto_dequeue_request(&engine->queue);
}
static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)

View file

@ -1326,7 +1326,7 @@ static ssize_t ucode_load_store(struct device *dev,
int del_grp_idx = -1;
int ucode_idx = 0;
if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
if (count >= OTX_CPT_UCODE_NAME_LENGTH)
return -EINVAL;
eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);

View file

@ -168,7 +168,8 @@ static void free_command_queues(struct otx_cptvf *cptvf,
chunk = list_first_entry(&cqinfo->queue[i].chead,
struct otx_cpt_cmd_chunk, nextchunk);
dma_free_coherent(&pdev->dev, chunk->size,
dma_free_coherent(&pdev->dev,
chunk->size + OTX_CPT_NEXT_CHUNK_PTR_SIZE,
chunk->head,
chunk->dma_addr);
chunk->head = NULL;

View file

@ -2,6 +2,7 @@
/* Copyright (C) 2020 Marvell. */
#include <linux/firmware.h>
#include <linux/sysfs.h>
#include "otx2_cpt_hw_types.h"
#include "otx2_cpt_common.h"
#include "otx2_cpt_devlink.h"
@ -507,7 +508,7 @@ static ssize_t sso_pf_func_ovrd_show(struct device *dev,
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
return sysfs_emit(buf, "%d\n", cptpf->sso_pf_func_ovrd);
}
static ssize_t sso_pf_func_ovrd_store(struct device *dev,
@ -533,7 +534,7 @@ static ssize_t kvf_limits_show(struct device *dev,
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", cptpf->kvf_limits);
return sysfs_emit(buf, "%d\n", cptpf->kvf_limits);
}
static ssize_t kvf_limits_store(struct device *dev,

View file

@ -908,7 +908,6 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
{
int chip_id, vasid, ret = 0;
int ct_842 = 0, ct_gzip = 0;
struct device_node *dn;
chip_id = of_get_ibm_chip_id(pn);
if (chip_id < 0) {
@ -922,7 +921,7 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
return -EINVAL;
}
for_each_child_of_node(pn, dn) {
for_each_child_of_node_scoped(pn, dn) {
ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842,
"ibm,p9-nx-842", &ct_842);
@ -930,10 +929,8 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
ret = find_nx_device_tree(dn, chip_id, vasid,
NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
if (ret) {
of_node_put(dn);
if (ret)
return ret;
}
}
if (!ct_842 || !ct_gzip) {

View file

@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "omap-crypto.h"
@ -1042,7 +1043,7 @@ static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
{
struct omap_aes_dev *dd = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
return sysfs_emit(buf, "%d\n", dd->engine->queue.max_qlen);
}
static ssize_t queue_len_store(struct device *dev,

View file

@ -37,6 +37,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#define MD5_DIGEST_SIZE 16
@ -1973,7 +1974,7 @@ static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
{
struct omap_sham_dev *dd = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", dd->fallback_sz);
return sysfs_emit(buf, "%d\n", dd->fallback_sz);
}
static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
@ -2003,7 +2004,7 @@ static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
{
struct omap_sham_dev *dd = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", dd->queue.max_qlen);
return sysfs_emit(buf, "%d\n", dd->queue.max_qlen);
}
static ssize_t queue_len_store(struct device *dev,

View file

@ -669,8 +669,10 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
return -ENOMEM;
if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen),
rctx->adata, cryp->assoclen) != cryp->assoclen)
rctx->adata, cryp->assoclen) != cryp->assoclen) {
kfree(rctx->adata);
return -EINVAL;
}
}
if (cryp->total_in)
@ -681,8 +683,11 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
ctx->rctx = rctx;
ret = starfive_aes_hw_init(ctx);
if (ret)
if (ret) {
if (cryp->assoclen)
kfree(rctx->adata);
return ret;
}
if (!cryp->assoclen)
goto write_text;

View file

@ -216,13 +216,15 @@ struct starfive_cryp_request_ctx {
struct scatterlist *in_sg;
struct scatterlist *out_sg;
struct ahash_request ahash_fbk_req;
size_t total;
unsigned int blksize;
unsigned int digsize;
unsigned long in_sg_len;
unsigned char *adata;
u8 rsa_data[STARFIVE_RSA_MAX_KEYSZ] __aligned(sizeof(u32));
/* Must be last as it ends in a flexible-array member. */
struct ahash_request ahash_fbk_req;
};
struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx);

View file

@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/minmax.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@ -1922,20 +1923,19 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
u32 block[AES_BLOCK_32];
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out));
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
cryp->payload_out);
memcpy_to_scatterwalk(&cryp->out_walk, block, min(cryp->hw_blocksize,
cryp->payload_out));
cryp->payload_out -= min(cryp->hw_blocksize, cryp->payload_out);
}
static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
{
u32 block[AES_BLOCK_32] = {0};
memcpy_from_scatterwalk(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_in));
memcpy_from_scatterwalk(block, &cryp->in_walk, min(cryp->hw_blocksize,
cryp->payload_in));
writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32));
cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in);
cryp->payload_in -= min(cryp->hw_blocksize, cryp->payload_in);
}
static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
@ -1980,10 +1980,9 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
*/
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out));
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
cryp->payload_out);
memcpy_to_scatterwalk(&cryp->out_walk, block, min(cryp->hw_blocksize,
cryp->payload_out));
cryp->payload_out -= min(cryp->hw_blocksize, cryp->payload_out);
/* d) change mode back to AES GCM */
cfg &= ~CR_ALGO_MASK;
@ -2078,9 +2077,9 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
*/
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out));
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out);
memcpy_to_scatterwalk(&cryp->out_walk, block, min(cryp->hw_blocksize,
cryp->payload_out));
cryp->payload_out -= min(cryp->hw_blocksize, cryp->payload_out);
/* d) Load again CRYP_CSGCMCCMxR */
for (i = 0; i < ARRAY_SIZE(cstmp2); i++)
@ -2158,7 +2157,7 @@ static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp)
u32 block[AES_BLOCK_32] = {0};
size_t written;
written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in);
written = min(AES_BLOCK_SIZE, cryp->header_in);
memcpy_from_scatterwalk(block, &cryp->in_walk, written);

View file

@ -1115,8 +1115,7 @@ static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx,
return -ENOMEM;
}
if (state->bufcnt)
memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset,
min(new_len, rctx->total) - state->bufcnt, 0);
@ -1300,8 +1299,7 @@ static int stm32_hash_prepare_request(struct ahash_request *req)
}
/* copy buffer in a temporary one that is used for sg alignment */
if (state->bufcnt)
memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx);
if (ret)

View file

@ -135,7 +135,7 @@ static inline int virtio_crypto_get_current_node(void)
int cpu, node;
cpu = get_cpu();
node = topology_physical_package_id(cpu);
node = cpu_to_node(cpu);
put_cpu();
return node;

View file

@ -75,15 +75,20 @@ static void virtcrypto_done_task(unsigned long data)
struct data_queue *data_vq = (struct data_queue *)data;
struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
unsigned long flags;
unsigned int len;
spin_lock_irqsave(&data_vq->lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
spin_unlock_irqrestore(&data_vq->lock, flags);
if (vc_req->alg_cb)
vc_req->alg_cb(vc_req, len);
spin_lock_irqsave(&data_vq->lock, flags);
}
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&data_vq->lock, flags);
}
static void virtcrypto_dataq_callback(struct virtqueue *vq)

View file

@ -541,8 +541,6 @@ int virtio_crypto_skcipher_crypt_req(
if (ret < 0)
return ret;
virtqueue_kick(data_vq->vq);
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares
obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o zynqmp-crypto.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o

View file

@ -0,0 +1,238 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Firmware layer for XilSecure APIs.
*
* Copyright (C) 2014-2022 Xilinx, Inc.
* Copyright (C) 2022-2025 Advanced Micro Devices, Inc.
*/
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/module.h>
/**
* zynqmp_pm_aes_engine - Access AES hardware to encrypt/decrypt the data using
* AES-GCM core.
* @address: Address of the AesParams structure.
* @out: Returned output value
*
* Return: Returns status, either success or error code.
*/
int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!out)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, ret_payload, 2, upper_32_bits(address),
lower_32_bits(address));
*out = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
* zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
* @address: Address of the data/ Address of output buffer where
* hash should be stored.
* @size: Size of the data.
* @flags:
* BIT(0) - for initializing csudma driver and SHA3(Here address
* and size inputs can be NULL).
* BIT(1) - to call Sha3_Update API which can be called multiple
* times when data is not contiguous.
* BIT(2) - to get final hash of the whole updated data.
* Hash will be overwritten at provided address with
* 48 bytes.
*
* Return: Returns status, either success or error code.
*/
int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags)
{
u32 lower_addr = lower_32_bits(address);
u32 upper_addr = upper_32_bits(address);
return zynqmp_pm_invoke_fn(PM_SECURE_SHA, NULL, 4, upper_addr, lower_addr, size, flags);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
/**
* xlnx_get_crypto_dev_data() - Get crypto dev data of platform
* @feature_map: List of available feature map of all platform
*
* Return: Returns crypto dev data, either address crypto dev or ERR PTR
*/
void *xlnx_get_crypto_dev_data(struct xlnx_feature *feature_map)
{
struct xlnx_feature *feature;
u32 pm_family_code;
int ret;
/* Get the Family code and sub family code of platform */
ret = zynqmp_pm_get_family_info(&pm_family_code);
if (ret < 0)
return ERR_PTR(ret);
feature = feature_map;
for (; feature->family; feature++) {
if (feature->family == pm_family_code) {
ret = zynqmp_pm_feature(feature->feature_id);
if (ret < 0)
return ERR_PTR(ret);
return feature->data;
}
}
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(xlnx_get_crypto_dev_data);
/**
* versal_pm_aes_key_write - Write AES key registers
* @keylen: Size of the input key to be written
* @keysrc: Key Source to be selected to which provided
* key should be updated
* @keyaddr: Address of a buffer which should contain the key
* to be written
*
* This function provides support to write AES volatile user keys.
*
* Return: Returns status, either success or error+reason
*/
int versal_pm_aes_key_write(const u32 keylen,
const u32 keysrc, const u64 keyaddr)
{
return zynqmp_pm_invoke_fn(XSECURE_API_AES_WRITE_KEY, NULL, 4,
keylen, keysrc,
lower_32_bits(keyaddr),
upper_32_bits(keyaddr));
}
EXPORT_SYMBOL_GPL(versal_pm_aes_key_write);
/**
* versal_pm_aes_key_zero - Zeroise AES User key registers
* @keysrc: Key Source to be selected to which provided
* key should be updated
*
* This function provides support to zeroise AES volatile user keys.
*
* Return: Returns status, either success or error+reason
*/
int versal_pm_aes_key_zero(const u32 keysrc)
{
return zynqmp_pm_invoke_fn(XSECURE_API_AES_KEY_ZERO, NULL, 1, keysrc);
}
EXPORT_SYMBOL_GPL(versal_pm_aes_key_zero);
/**
* versal_pm_aes_op_init - Init AES operation
* @hw_req: AES op init structure address
*
* This function provides support to init AES operation.
*
* Return: Returns status, either success or error+reason
*/
int versal_pm_aes_op_init(const u64 hw_req)
{
return zynqmp_pm_invoke_fn(XSECURE_API_AES_OP_INIT, NULL, 2,
lower_32_bits(hw_req),
upper_32_bits(hw_req));
}
EXPORT_SYMBOL_GPL(versal_pm_aes_op_init);
/**
* versal_pm_aes_update_aad - AES update aad
* @aad_addr: AES aad address
* @aad_len: AES aad data length
*
* This function provides support to update AAD data.
*
* Return: Returns status, either success or error+reason
*/
int versal_pm_aes_update_aad(const u64 aad_addr, const u32 aad_len)
{
return zynqmp_pm_invoke_fn(XSECURE_API_AES_UPDATE_AAD, NULL, 3,
lower_32_bits(aad_addr),
upper_32_bits(aad_addr),
aad_len);
}
EXPORT_SYMBOL_GPL(versal_pm_aes_update_aad);
/**
* versal_pm_aes_enc_update - Access AES hardware to encrypt the data using
* AES-GCM core.
* @in_params: Address of the AesParams structure
* @in_addr: Address of input buffer
*
* Return: Returns status, either success or error code.
*/
int versal_pm_aes_enc_update(const u64 in_params, const u64 in_addr)
{
return zynqmp_pm_invoke_fn(XSECURE_API_AES_ENCRYPT_UPDATE, NULL, 4,
lower_32_bits(in_params),
upper_32_bits(in_params),
lower_32_bits(in_addr),
upper_32_bits(in_addr));
}
EXPORT_SYMBOL_GPL(versal_pm_aes_enc_update);
/**
* versal_pm_aes_enc_final - Access AES hardware to store the GCM tag
* @gcm_addr: Address of the gcm tag
*
* Return: Returns status, either success or error code.
*/
int versal_pm_aes_enc_final(const u64 gcm_addr)
{
return zynqmp_pm_invoke_fn(XSECURE_API_AES_ENCRYPT_FINAL, NULL, 2,
lower_32_bits(gcm_addr),
upper_32_bits(gcm_addr));
}
EXPORT_SYMBOL_GPL(versal_pm_aes_enc_final);
/**
* versal_pm_aes_dec_update - Access AES hardware to decrypt the data using
* AES-GCM core.
* @in_params: Address of the AesParams structure
* @in_addr: Address of input buffer
*
* Return: Returns status, either success or error code.
*/
int versal_pm_aes_dec_update(const u64 in_params, const u64 in_addr)
{
return zynqmp_pm_invoke_fn(XSECURE_API_AES_DECRYPT_UPDATE, NULL, 4,
lower_32_bits(in_params),
upper_32_bits(in_params),
lower_32_bits(in_addr),
upper_32_bits(in_addr));
}
EXPORT_SYMBOL_GPL(versal_pm_aes_dec_update);
/**
* versal_pm_aes_dec_final - Access AES hardware to get the GCM tag
* @gcm_addr: Address of the gcm tag
*
* Return: Returns status, either success or error code.
*/
int versal_pm_aes_dec_final(const u64 gcm_addr)
{
return zynqmp_pm_invoke_fn(XSECURE_API_AES_DECRYPT_FINAL, NULL, 2,
lower_32_bits(gcm_addr),
upper_32_bits(gcm_addr));
}
EXPORT_SYMBOL_GPL(versal_pm_aes_dec_final);
/**
* versal_pm_aes_init - Init AES block
*
* This function initialise AES block.
*
* Return: Returns status, either success or error+reason
*/
int versal_pm_aes_init(void)
{
return zynqmp_pm_invoke_fn(XSECURE_API_AES_INIT, NULL, 0);
}
EXPORT_SYMBOL_GPL(versal_pm_aes_init);

View file

@ -1521,30 +1521,6 @@ int zynqmp_pm_load_pdi(const u32 src, const u64 address)
}
EXPORT_SYMBOL_GPL(zynqmp_pm_load_pdi);
/**
* zynqmp_pm_aes_engine - Access AES hardware to encrypt/decrypt the data using
* AES-GCM core.
* @address: Address of the AesParams structure.
* @out: Returned output value
*
* Return: Returns status, either success or error code.
*/
int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!out)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, ret_payload, 2, upper_32_bits(address),
lower_32_bits(address));
*out = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
* zynqmp_pm_efuse_access - Provides access to efuse memory.
* @address: Address of the efuse params structure
@ -1569,31 +1545,6 @@ int zynqmp_pm_efuse_access(const u64 address, u32 *out)
}
EXPORT_SYMBOL_GPL(zynqmp_pm_efuse_access);
/**
* zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
* @address: Address of the data/ Address of output buffer where
* hash should be stored.
* @size: Size of the data.
* @flags:
* BIT(0) - for initializing csudma driver and SHA3(Here address
* and size inputs can be NULL).
* BIT(1) - to call Sha3_Update API which can be called multiple
* times when data is not contiguous.
* BIT(2) - to get final hash of the whole updated data.
* Hash will be overwritten at provided address with
* 48 bytes.
*
* Return: Returns status, either success or error code.
*/
int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags)
{
u32 lower_addr = lower_32_bits(address);
u32 upper_addr = upper_32_bits(address);
return zynqmp_pm_invoke_fn(PM_SECURE_SHA, NULL, 4, upper_addr, lower_addr, size, flags);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
/**
* zynqmp_pm_register_notifier() - PM API for register a subsystem
* to be notified about specific

View file

@ -390,6 +390,11 @@ static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
int i, len, rc;
u32 xflags;
if (pflags & PKEY_XFLAG_NOCLEARKEY) {
PKEY_DBF_ERR("%s clear key but xflag NOCLEARKEY\n", __func__);
return -EINVAL;
}
xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
/* check keytype, subtype, clrkeylen, keybitsize */

View file

@ -358,6 +358,11 @@ static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
int i, len, rc;
u32 xflags;
if (pflags & PKEY_XFLAG_NOCLEARKEY) {
PKEY_DBF_ERR("%s clear key but xflag NOCLEARKEY\n", __func__);
return -EINVAL;
}
xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
/* check keytype, subtype, clrkeylen, keybitsize */

View file

@ -215,7 +215,8 @@ out:
}
static int pckmo_key2protkey(const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
u8 *protkey, u32 *protkeylen, u32 *protkeytype,
u32 xflags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
int rc = -EINVAL;
@ -266,6 +267,11 @@ static int pckmo_key2protkey(const u8 *key, u32 keylen,
struct clearkeytoken *t = (struct clearkeytoken *)key;
u32 keysize;
if (xflags & PKEY_XFLAG_NOCLEARKEY) {
PKEY_DBF_ERR("%s clear key token but xflag NOCLEARKEY\n",
__func__);
goto out;
}
if (keylen < sizeof(*t) ||
keylen < sizeof(*t) + t->len)
goto out;
@ -406,10 +412,10 @@ static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns,
size_t _nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *keyinfo,
u32 _xflags __always_unused)
u32 xflags)
{
return pckmo_key2protkey(key, keylen,
protkey, protkeylen, keyinfo);
protkey, protkeylen, keyinfo, xflags);
}
static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns,

View file

@ -242,6 +242,13 @@ static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
return crypto_tfm_ctx_dma(&tfm->base);
}
static inline bool crypto_skcipher_tested(struct crypto_skcipher *tfm)
{
struct crypto_tfm *tfm_base = crypto_skcipher_tfm(tfm);
return tfm_base->__crt_alg->cra_flags & CRYPTO_ALG_TESTED;
}
static inline void *skcipher_request_ctx(struct skcipher_request *req)
{
return req->__ctx;

View file

@ -0,0 +1,119 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Firmware layer for XilSECURE APIs.
*
* Copyright (C) 2014-2022 Xilinx, Inc.
* Copyright (C) 2022-2025 Advanced Micro Devices, Inc.
*/
#ifndef __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__
#define __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__
/**
* struct xlnx_feature - Feature data
* @family: Family code of platform
* @subfamily: Subfamily code of platform
* @feature_id: Feature id of module
* @data: Collection of all supported platform data
*/
struct xlnx_feature {
u32 family;
u32 feature_id;
void *data;
};
/* xilSecure API commands module id + api id */
#define XSECURE_API_AES_INIT 0x509
#define XSECURE_API_AES_OP_INIT 0x50a
#define XSECURE_API_AES_UPDATE_AAD 0x50b
#define XSECURE_API_AES_ENCRYPT_UPDATE 0x50c
#define XSECURE_API_AES_ENCRYPT_FINAL 0x50d
#define XSECURE_API_AES_DECRYPT_UPDATE 0x50e
#define XSECURE_API_AES_DECRYPT_FINAL 0x50f
#define XSECURE_API_AES_KEY_ZERO 0x510
#define XSECURE_API_AES_WRITE_KEY 0x511
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
int zynqmp_pm_aes_engine(const u64 address, u32 *out);
int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
void *xlnx_get_crypto_dev_data(struct xlnx_feature *feature_map);
int versal_pm_aes_key_write(const u32 keylen,
const u32 keysrc, const u64 keyaddr);
int versal_pm_aes_key_zero(const u32 keysrc);
int versal_pm_aes_op_init(const u64 hw_req);
int versal_pm_aes_update_aad(const u64 aad_addr, const u32 aad_len);
int versal_pm_aes_enc_update(const u64 in_params, const u64 in_addr);
int versal_pm_aes_dec_update(const u64 in_params, const u64 in_addr);
int versal_pm_aes_dec_final(const u64 gcm_addr);
int versal_pm_aes_enc_final(const u64 gcm_addr);
int versal_pm_aes_init(void);
#else
static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
return -ENODEV;
}
static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
const u32 flags)
{
return -ENODEV;
}
static inline void *xlnx_get_crypto_dev_data(struct xlnx_feature *feature_map)
{
return ERR_PTR(-ENODEV);
}
static inline int versal_pm_aes_key_write(const u32 keylen,
const u32 keysrc, const u64 keyaddr)
{
return -ENODEV;
}
static inline int versal_pm_aes_key_zero(const u32 keysrc)
{
return -ENODEV;
}
static inline int versal_pm_aes_op_init(const u64 hw_req)
{
return -ENODEV;
}
static inline int versal_pm_aes_update_aad(const u64 aad_addr,
const u32 aad_len)
{
return -ENODEV;
}
static inline int versal_pm_aes_enc_update(const u64 in_params,
const u64 in_addr)
{
return -ENODEV;
}
static inline int versal_pm_aes_dec_update(const u64 in_params,
const u64 in_addr)
{
return -ENODEV;
}
static inline int versal_pm_aes_enc_final(const u64 gcm_addr)
{
return -ENODEV;
}
static inline int versal_pm_aes_dec_final(const u64 gcm_addr)
{
return -ENODEV;
}
static inline int versal_pm_aes_init(void)
{
return -ENODEV;
}
#endif
#endif /* __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__ */

View file

@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp-ufs.h>
#include <linux/firmware/xlnx-zynqmp-crypto.h>
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@ -589,9 +590,7 @@ int zynqmp_pm_release_node(const u32 node);
int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
int zynqmp_pm_aes_engine(const u64 address, u32 *out);
int zynqmp_pm_efuse_access(const u64 address, u32 *out);
int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_get_status(u32 *value);
int zynqmp_pm_fpga_get_config_status(u32 *value);
@ -772,22 +771,11 @@ static inline int zynqmp_pm_set_requirement(const u32 node,
return -ENODEV;
}
static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
return -ENODEV;
}
static inline int zynqmp_pm_efuse_access(const u64 address, u32 *out)
{
return -ENODEV;
}
static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
const u32 flags)
{
return -ENODEV;
}
static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
const u32 flags)
{

View file

@ -447,12 +447,16 @@ struct hisi_qp_ops {
int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
};
struct instance_backlog {
struct list_head list;
spinlock_t lock;
};
struct hisi_qp {
u32 qp_id;
u16 sq_depth;
u16 cq_depth;
u8 alg_type;
u8 req_type;
struct qm_dma qdma;
void *sqe;
@ -462,7 +466,6 @@ struct hisi_qp {
struct hisi_qp_status qp_status;
struct hisi_qp_ops *hw_ops;
void *qp_ctx;
void (*req_cb)(struct hisi_qp *qp, void *data);
void (*event_cb)(struct hisi_qp *qp);
@ -471,6 +474,11 @@ struct hisi_qp {
bool is_in_kernel;
u16 pasid;
struct uacce_queue *uacce_q;
u32 ref_count;
spinlock_t qp_lock;
struct instance_backlog backlog;
const void **msg;
};
static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
@ -563,6 +571,7 @@ void hisi_qm_reset_done(struct pci_dev *pdev);
int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
bool op);
int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
@ -575,7 +584,7 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
void hisi_acc_free_sgl_pool(struct device *dev,
struct hisi_acc_sgl_pool *pool);
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
u8 alg_type, int node, struct hisi_qp **qps);
u8 *alg_type, int node, struct hisi_qp **qps);
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
void hisi_qm_dev_shutdown(struct pci_dev *pdev);
void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);

View file

@ -15,6 +15,7 @@
#include <linux/completion.h>
#include <linux/kref.h>
#include <linux/types.h>
#include <linux/workqueue_types.h>
/**
* struct hwrng - Hardware Random Number Generator driver
@ -48,6 +49,7 @@ struct hwrng {
/* internal. */
struct list_head list;
struct kref ref;
struct work_struct cleanup_work;
struct completion cleanup_done;
struct completion dying;
};

View file

@ -819,7 +819,7 @@ static void __padata_free(struct padata_instance *pinst)
#define kobj2pinst(_kobj) \
container_of(_kobj, struct padata_instance, kobj)
#define attr2pentry(_attr) \
container_of(_attr, struct padata_sysfs_entry, attr)
container_of_const(_attr, struct padata_sysfs_entry, attr)
static void padata_sysfs_release(struct kobject *kobj)
{
@ -829,13 +829,13 @@ static void padata_sysfs_release(struct kobject *kobj)
struct padata_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
ssize_t (*store)(struct padata_instance *, struct attribute *,
ssize_t (*show)(struct padata_instance *, const struct attribute *, char *);
ssize_t (*store)(struct padata_instance *, const struct attribute *,
const char *, size_t);
};
static ssize_t show_cpumask(struct padata_instance *pinst,
struct attribute *attr, char *buf)
const struct attribute *attr, char *buf)
{
struct cpumask *cpumask;
ssize_t len;
@ -853,7 +853,7 @@ static ssize_t show_cpumask(struct padata_instance *pinst,
}
static ssize_t store_cpumask(struct padata_instance *pinst,
struct attribute *attr,
const struct attribute *attr,
const char *buf, size_t count)
{
cpumask_var_t new_cpumask;
@ -880,10 +880,10 @@ out:
}
#define PADATA_ATTR_RW(_name, _show_name, _store_name) \
static struct padata_sysfs_entry _name##_attr = \
static const struct padata_sysfs_entry _name##_attr = \
__ATTR(_name, 0644, _show_name, _store_name)
#define PADATA_ATTR_RO(_name, _show_name) \
static struct padata_sysfs_entry _name##_attr = \
#define PADATA_ATTR_RO(_name, _show_name) \
static const struct padata_sysfs_entry _name##_attr = \
__ATTR(_name, 0400, _show_name, NULL)
PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
@ -894,7 +894,7 @@ PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
* serial_cpumask [RW] - cpumask for serial workers
* parallel_cpumask [RW] - cpumask for parallel workers
*/
static struct attribute *padata_default_attrs[] = {
static const struct attribute *const padata_default_attrs[] = {
&serial_cpumask_attr.attr,
&parallel_cpumask_attr.attr,
NULL,
@ -904,8 +904,8 @@ ATTRIBUTE_GROUPS(padata_default);
static ssize_t padata_sysfs_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
const struct padata_sysfs_entry *pentry;
struct padata_instance *pinst;
struct padata_sysfs_entry *pentry;
ssize_t ret = -EIO;
pinst = kobj2pinst(kobj);
@ -919,8 +919,8 @@ static ssize_t padata_sysfs_show(struct kobject *kobj,
static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
const struct padata_sysfs_entry *pentry;
struct padata_instance *pinst;
struct padata_sysfs_entry *pentry;
ssize_t ret = -EIO;
pinst = kobj2pinst(kobj);