mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
drm/gpuvm: drm_gpuvm_bo_obtain() requires lock and staged mode
In commit9ce4aef9a5("drm/gpuvm: take GEM lock inside drm_gpuvm_bo_obtain_prealloc()") we update drm_gpuvm_bo_obtain_prealloc() to take locks internally, which means that it's only usable in immediate mode. In this commit, we notice that drm_gpuvm_bo_obtain() requires you to use staged mode. This means that we now have one variant of obtain for each mode you might use gpuvm in. To reflect this information, we add a warning about using it in immediate mode, and to make the distinction clearer we rename the method with a _locked() suffix so that it's clear that it requires the caller to take the locks. Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Signed-off-by: Alice Ryhl <aliceryhl@google.com> Link: https://patch.msgid.link/20260108-gpuvm-rust-v2-2-dbd014005a0b@google.com [ Slightly reword commit message to refer to commit9ce4aef9a5("drm/gpuvm: take GEM lock inside drm_gpuvm_bo_obtain_prealloc()"). - Danilo ] Signed-off-by: Danilo Krummrich <dakr@kernel.org>
This commit is contained in:
parent
44e4c88951
commit
9bf4ca1e69
7 changed files with 21 additions and 11 deletions
|
|
@ -1825,16 +1825,26 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
|
|||
* count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
|
||||
* &drm_gpuvm_bo.
|
||||
*
|
||||
* Requires the lock for the GEMs gpuva list.
|
||||
*
|
||||
* A new &drm_gpuvm_bo is added to the GEMs gpuva list.
|
||||
*
|
||||
* Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
|
||||
*/
|
||||
struct drm_gpuvm_bo *
|
||||
drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
|
||||
struct drm_gem_object *obj)
|
||||
drm_gpuvm_bo_obtain_locked(struct drm_gpuvm *gpuvm,
|
||||
struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gpuvm_bo *vm_bo;
|
||||
|
||||
/*
|
||||
* In immediate mode this would require the caller to hold the GEMs
|
||||
* gpuva mutex, but it's not okay to allocate while holding that lock,
|
||||
* and this method allocates. Immediate mode drivers should use
|
||||
* drm_gpuvm_bo_obtain_prealloc() instead.
|
||||
*/
|
||||
drm_WARN_ON(gpuvm->drm, drm_gpuvm_immediate_mode(gpuvm));
|
||||
|
||||
vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
|
||||
if (vm_bo)
|
||||
return vm_bo;
|
||||
|
|
@ -1848,7 +1858,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
|
|||
|
||||
return vm_bo;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
|
||||
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_locked);
|
||||
|
||||
/**
|
||||
* drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
|
||||
|
|
|
|||
|
|
@ -256,7 +256,7 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
|
|||
bind_op->type = PVR_VM_BIND_TYPE_MAP;
|
||||
|
||||
dma_resv_lock(obj->resv, NULL);
|
||||
bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
|
||||
bind_op->gpuvm_bo = drm_gpuvm_bo_obtain_locked(&vm_ctx->gpuvm_mgr, obj);
|
||||
dma_resv_unlock(obj->resv);
|
||||
if (IS_ERR(bind_op->gpuvm_bo))
|
||||
return PTR_ERR(bind_op->gpuvm_bo);
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ struct msm_gem_vm_log_entry {
|
|||
* embedded in any larger driver structure. The GEM object holds a list of
|
||||
* drm_gpuvm_bo, which in turn holds a list of msm_gem_vma. A linked vma
|
||||
* holds a reference to the vm_bo, and drops it when the vma is unlinked.
|
||||
* So we just need to call drm_gpuvm_bo_obtain() to return a ref to an
|
||||
* So we just need to call drm_gpuvm_bo_obtain_locked() to return a ref to an
|
||||
* existing vm_bo, or create a new one. Once the vma is linked, the ref
|
||||
* to the vm_bo can be dropped (since the vma is holding one).
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -413,7 +413,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
|
|||
if (!obj)
|
||||
return &vma->base;
|
||||
|
||||
vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj);
|
||||
vm_bo = drm_gpuvm_bo_obtain_locked(&vm->base, obj);
|
||||
if (IS_ERR(vm_bo)) {
|
||||
ret = PTR_ERR(vm_bo);
|
||||
goto err_va_remove;
|
||||
|
|
|
|||
|
|
@ -1275,7 +1275,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
|
|||
return -ENOENT;
|
||||
|
||||
dma_resv_lock(obj->resv, NULL);
|
||||
op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj);
|
||||
op->vm_bo = drm_gpuvm_bo_obtain_locked(&uvmm->base, obj);
|
||||
dma_resv_unlock(obj->resv);
|
||||
if (IS_ERR(op->vm_bo))
|
||||
return PTR_ERR(op->vm_bo);
|
||||
|
|
|
|||
|
|
@ -1022,7 +1022,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
|||
|
||||
xe_bo_assert_held(bo);
|
||||
|
||||
vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
|
||||
vm_bo = drm_gpuvm_bo_obtain_locked(vma->gpuva.vm, &bo->ttm.base);
|
||||
if (IS_ERR(vm_bo)) {
|
||||
xe_vma_free(vma);
|
||||
return ERR_CAST(vm_bo);
|
||||
|
|
@ -2269,7 +2269,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
|
|||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
|
||||
vm_bo = drm_gpuvm_bo_obtain_locked(&vm->gpuvm, obj);
|
||||
if (IS_ERR(vm_bo)) {
|
||||
xe_bo_unlock(bo);
|
||||
return ERR_CAST(vm_bo);
|
||||
|
|
|
|||
|
|
@ -736,8 +736,8 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
|
|||
struct drm_gem_object *obj);
|
||||
|
||||
struct drm_gpuvm_bo *
|
||||
drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
|
||||
struct drm_gem_object *obj);
|
||||
drm_gpuvm_bo_obtain_locked(struct drm_gpuvm *gpuvm,
|
||||
struct drm_gem_object *obj);
|
||||
struct drm_gpuvm_bo *
|
||||
drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue