crypto: Enable context analysis

Enable context analysis for crypto subsystem.

This demonstrates a larger conversion to use Clang's context
analysis. The benefit is additional static checking of locking rules,
along with better documentation.

Note the use of the __acquire_ret macro how to define an API where a
function returns a pointer to an object (struct scomp_scratch) with a
lock held. Additionally, the analysis only resolves aliases where the
analysis unambiguously sees that a variable was not reassigned after
initialization, requiring minor code changes.

Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-36-elver@google.com
This commit is contained in:
Marco Elver 2025-12-19 16:40:24 +01:00 committed by Peter Zijlstra
parent 87335b61a2
commit dc36d55d4e
11 changed files with 35 additions and 21 deletions

View file

@ -3,6 +3,8 @@
# Cryptographic API
#
CONTEXT_ANALYSIS := y
obj-$(CONFIG_CRYPTO) += crypto.o
crypto-y := api.o cipher.o

View file

@ -449,8 +449,8 @@ int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
}
EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
struct crypto_acomp_streams *s) __acquires(stream)
struct crypto_acomp_stream *_crypto_acomp_lock_stream_bh(
struct crypto_acomp_streams *s)
{
struct crypto_acomp_stream __percpu *streams = s->streams;
int cpu = raw_smp_processor_id();
@ -469,7 +469,7 @@ struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
spin_lock(&ps->lock);
return ps;
}
EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
EXPORT_SYMBOL_GPL(_crypto_acomp_lock_stream_bh);
void acomp_walk_done_src(struct acomp_walk *walk, int used)
{

View file

@ -244,6 +244,7 @@ EXPORT_SYMBOL_GPL(crypto_remove_spawns);
static void crypto_alg_finish_registration(struct crypto_alg *alg,
struct list_head *algs_to_put)
__must_hold(&crypto_alg_sem)
{
struct crypto_alg *q;
@ -299,6 +300,7 @@ static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg)
static struct crypto_larval *
__crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
__must_hold(&crypto_alg_sem)
{
struct crypto_alg *q;
struct crypto_larval *larval;

View file

@ -57,6 +57,7 @@ EXPORT_SYMBOL_GPL(crypto_mod_put);
static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
u32 mask)
__must_hold_shared(&crypto_alg_sem)
{
struct crypto_alg *q, *alg = NULL;
int best = -2;

View file

@ -453,8 +453,8 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev));
crypto_init_queue(&engine->queue, qlen);
spin_lock_init(&engine->queue_lock);
crypto_init_queue(&engine->queue, qlen);
engine->kworker = kthread_run_worker(0, "%s", engine->name);
if (IS_ERR(engine->kworker)) {

View file

@ -232,6 +232,7 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
*/
static int drbg_fips_continuous_test(struct drbg_state *drbg,
const unsigned char *entropy)
__must_hold(&drbg->drbg_mutex)
{
unsigned short entropylen = drbg_sec_strength(drbg->core->flags);
int ret = 0;
@ -848,6 +849,7 @@ static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
static inline int drbg_get_random_bytes(struct drbg_state *drbg,
unsigned char *entropy,
unsigned int entropylen)
__must_hold(&drbg->drbg_mutex)
{
int ret;
@ -862,6 +864,7 @@ static inline int drbg_get_random_bytes(struct drbg_state *drbg,
}
static int drbg_seed_from_random(struct drbg_state *drbg)
__must_hold(&drbg->drbg_mutex)
{
struct drbg_string data;
LIST_HEAD(seedlist);
@ -919,6 +922,7 @@ static bool drbg_nopr_reseed_interval_elapsed(struct drbg_state *drbg)
*/
static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
bool reseed)
__must_hold(&drbg->drbg_mutex)
{
int ret;
unsigned char entropy[((32 + 16) * 2)];
@ -1153,6 +1157,7 @@ err:
static int drbg_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct drbg_string *addtl)
__must_hold(&drbg->drbg_mutex)
{
int len = 0;
LIST_HEAD(addtllist);

View file

@ -61,8 +61,8 @@ enum {
/* Maximum number of (rtattr) parameters for each template. */
#define CRYPTO_MAX_ATTRS 32
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
extern struct list_head crypto_alg_list __guarded_by(&crypto_alg_sem);
extern struct blocking_notifier_head crypto_chain;
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);

View file

@ -19,17 +19,20 @@
#include "internal.h"
static void *c_start(struct seq_file *m, loff_t *pos)
__acquires_shared(&crypto_alg_sem)
{
down_read(&crypto_alg_sem);
return seq_list_start(&crypto_alg_list, *pos);
}
static void *c_next(struct seq_file *m, void *p, loff_t *pos)
__must_hold_shared(&crypto_alg_sem)
{
return seq_list_next(p, &crypto_alg_list, pos);
}
static void c_stop(struct seq_file *m, void *p)
__releases_shared(&crypto_alg_sem)
{
up_read(&crypto_alg_sem);
}

View file

@ -28,8 +28,8 @@
struct scomp_scratch {
spinlock_t lock;
union {
void *src;
unsigned long saddr;
void *src __guarded_by(&lock);
unsigned long saddr __guarded_by(&lock);
};
};
@ -38,8 +38,8 @@ static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
};
static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
static int scomp_scratch_users __guarded_by(&scomp_lock);
static cpumask_t scomp_scratch_want;
static void scomp_scratch_workfn(struct work_struct *work);
@ -67,6 +67,7 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
}
static void crypto_scomp_free_scratches(void)
__context_unsafe(/* frees @scratch */)
{
struct scomp_scratch *scratch;
int i;
@ -101,7 +102,7 @@ static void scomp_scratch_workfn(struct work_struct *work)
struct scomp_scratch *scratch;
scratch = per_cpu_ptr(&scomp_scratch, cpu);
if (scratch->src)
if (context_unsafe(scratch->src))
continue;
if (scomp_alloc_scratch(scratch, cpu))
break;
@ -111,6 +112,7 @@ static void scomp_scratch_workfn(struct work_struct *work)
}
static int crypto_scomp_alloc_scratches(void)
__context_unsafe(/* allocates @scratch */)
{
unsigned int i = cpumask_first(cpu_possible_mask);
struct scomp_scratch *scratch;
@ -139,7 +141,8 @@ unlock:
return ret;
}
static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch)
#define scomp_lock_scratch(...) __acquire_ret(_scomp_lock_scratch(__VA_ARGS__), &__ret->lock)
static struct scomp_scratch *_scomp_lock_scratch(void) __acquires_ret
{
int cpu = raw_smp_processor_id();
struct scomp_scratch *scratch;
@ -159,7 +162,7 @@ static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch)
}
static inline void scomp_unlock_scratch(struct scomp_scratch *scratch)
__releases(scratch)
__releases(&scratch->lock)
{
spin_unlock(&scratch->lock);
}
@ -171,8 +174,6 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
bool src_isvirt = acomp_request_src_isvirt(req);
bool dst_isvirt = acomp_request_dst_isvirt(req);
struct crypto_scomp *scomp = *tfm_ctx;
struct crypto_acomp_stream *stream;
struct scomp_scratch *scratch;
unsigned int slen = req->slen;
unsigned int dlen = req->dlen;
struct page *spage, *dpage;
@ -232,13 +233,12 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
} while (0);
}
stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
struct crypto_acomp_stream *stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
if (!src_isvirt && !src) {
const u8 *src;
struct scomp_scratch *scratch = scomp_lock_scratch();
const u8 *src = scratch->src;
scratch = scomp_lock_scratch();
src = scratch->src;
memcpy_from_sglist(scratch->src, req->src, 0, slen);
if (dir)

View file

@ -191,11 +191,12 @@ static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
struct crypto_acomp_streams *s) __acquires(stream);
#define crypto_acomp_lock_stream_bh(...) __acquire_ret(_crypto_acomp_lock_stream_bh(__VA_ARGS__), &__ret->lock);
struct crypto_acomp_stream *_crypto_acomp_lock_stream_bh(
struct crypto_acomp_streams *s) __acquires_ret;
static inline void crypto_acomp_unlock_stream_bh(
struct crypto_acomp_stream *stream) __releases(stream)
struct crypto_acomp_stream *stream) __releases(&stream->lock)
{
spin_unlock_bh(&stream->lock);
}

View file

@ -45,7 +45,7 @@ struct crypto_engine {
struct list_head list;
spinlock_t queue_lock;
struct crypto_queue queue;
struct crypto_queue queue __guarded_by(&queue_lock);
struct device *dev;
struct kthread_worker *kworker;