mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
zsmalloc: rename pool lock
The old name comes from the times when the pool did not have compaction (defragmentation). Rename it to ->lock because these days it synchronizes not only migration. Link: https://lkml.kernel.org/r/20250303022425.285971-13-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev> Cc: Hillf Danton <hdanton@sina.com> Cc: Kairui Song <ryncsn@gmail.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7e1b0212d4
commit
0d6fa44e4e
1 changed files with 19 additions and 19 deletions
|
|
@ -18,7 +18,7 @@
|
|||
/*
|
||||
* lock ordering:
|
||||
* page_lock
|
||||
* pool->migrate_lock
|
||||
* pool->lock
|
||||
* class->lock
|
||||
* zspage->lock
|
||||
*/
|
||||
|
|
@ -223,8 +223,8 @@ struct zs_pool {
|
|||
#ifdef CONFIG_COMPACTION
|
||||
struct work_struct free_work;
|
||||
#endif
|
||||
/* protect page/zspage migration */
|
||||
rwlock_t migrate_lock;
|
||||
/* protect zspage migration/compaction */
|
||||
rwlock_t lock;
|
||||
atomic_t compaction_in_progress;
|
||||
};
|
||||
|
||||
|
|
@ -1206,7 +1206,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
|||
BUG_ON(in_interrupt());
|
||||
|
||||
/* It guarantees it can get zspage from handle safely */
|
||||
read_lock(&pool->migrate_lock);
|
||||
read_lock(&pool->lock);
|
||||
obj = handle_to_obj(handle);
|
||||
obj_to_location(obj, &zpdesc, &obj_idx);
|
||||
zspage = get_zspage(zpdesc);
|
||||
|
|
@ -1218,7 +1218,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
|||
* which is smaller granularity.
|
||||
*/
|
||||
migrate_read_lock(zspage);
|
||||
read_unlock(&pool->migrate_lock);
|
||||
read_unlock(&pool->lock);
|
||||
|
||||
class = zspage_class(pool, zspage);
|
||||
off = offset_in_page(class->size * obj_idx);
|
||||
|
|
@ -1450,16 +1450,16 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
|
|||
return;
|
||||
|
||||
/*
|
||||
* The pool->migrate_lock protects the race with zpage's migration
|
||||
* The pool->lock protects the race with zpage's migration
|
||||
* so it's safe to get the page from handle.
|
||||
*/
|
||||
read_lock(&pool->migrate_lock);
|
||||
read_lock(&pool->lock);
|
||||
obj = handle_to_obj(handle);
|
||||
obj_to_zpdesc(obj, &f_zpdesc);
|
||||
zspage = get_zspage(f_zpdesc);
|
||||
class = zspage_class(pool, zspage);
|
||||
spin_lock(&class->lock);
|
||||
read_unlock(&pool->migrate_lock);
|
||||
read_unlock(&pool->lock);
|
||||
|
||||
class_stat_sub(class, ZS_OBJS_INUSE, 1);
|
||||
obj_free(class->size, obj);
|
||||
|
|
@ -1796,7 +1796,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
|||
* The pool migrate_lock protects the race between zpage migration
|
||||
* and zs_free.
|
||||
*/
|
||||
write_lock(&pool->migrate_lock);
|
||||
write_lock(&pool->lock);
|
||||
class = zspage_class(pool, zspage);
|
||||
|
||||
/*
|
||||
|
|
@ -1833,7 +1833,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
|||
* Since we complete the data copy and set up new zspage structure,
|
||||
* it's okay to release migration_lock.
|
||||
*/
|
||||
write_unlock(&pool->migrate_lock);
|
||||
write_unlock(&pool->lock);
|
||||
spin_unlock(&class->lock);
|
||||
migrate_write_unlock(zspage);
|
||||
|
||||
|
|
@ -1956,7 +1956,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
|||
* protect the race between zpage migration and zs_free
|
||||
* as well as zpage allocation/free
|
||||
*/
|
||||
write_lock(&pool->migrate_lock);
|
||||
write_lock(&pool->lock);
|
||||
spin_lock(&class->lock);
|
||||
while (zs_can_compact(class)) {
|
||||
int fg;
|
||||
|
|
@ -1983,14 +1983,14 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
|||
src_zspage = NULL;
|
||||
|
||||
if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
|
||||
|| rwlock_is_contended(&pool->migrate_lock)) {
|
||||
|| rwlock_is_contended(&pool->lock)) {
|
||||
putback_zspage(class, dst_zspage);
|
||||
dst_zspage = NULL;
|
||||
|
||||
spin_unlock(&class->lock);
|
||||
write_unlock(&pool->migrate_lock);
|
||||
write_unlock(&pool->lock);
|
||||
cond_resched();
|
||||
write_lock(&pool->migrate_lock);
|
||||
write_lock(&pool->lock);
|
||||
spin_lock(&class->lock);
|
||||
}
|
||||
}
|
||||
|
|
@ -2002,7 +2002,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
|||
putback_zspage(class, dst_zspage);
|
||||
|
||||
spin_unlock(&class->lock);
|
||||
write_unlock(&pool->migrate_lock);
|
||||
write_unlock(&pool->lock);
|
||||
|
||||
return pages_freed;
|
||||
}
|
||||
|
|
@ -2014,10 +2014,10 @@ unsigned long zs_compact(struct zs_pool *pool)
|
|||
unsigned long pages_freed = 0;
|
||||
|
||||
/*
|
||||
* Pool compaction is performed under pool->migrate_lock so it is basically
|
||||
* Pool compaction is performed under pool->lock so it is basically
|
||||
* single-threaded. Having more than one thread in __zs_compact()
|
||||
* will increase pool->migrate_lock contention, which will impact other
|
||||
* zsmalloc operations that need pool->migrate_lock.
|
||||
* will increase pool->lock contention, which will impact other
|
||||
* zsmalloc operations that need pool->lock.
|
||||
*/
|
||||
if (atomic_xchg(&pool->compaction_in_progress, 1))
|
||||
return 0;
|
||||
|
|
@ -2139,7 +2139,7 @@ struct zs_pool *zs_create_pool(const char *name)
|
|||
return NULL;
|
||||
|
||||
init_deferred_free(pool);
|
||||
rwlock_init(&pool->migrate_lock);
|
||||
rwlock_init(&pool->lock);
|
||||
atomic_set(&pool->compaction_in_progress, 0);
|
||||
|
||||
pool->name = kstrdup(name, GFP_KERNEL);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue