drm/xe/bo: Update atomic_access attribute on madvise

Update the bo_atomic_access based on user-provided input and determine
the migration to smem during a CPU fault

v2 (Matthew Brost)
- Avoid cpu unmapping if bo is already in smem
- check atomics on smem too for ioctl
- Add comments

v3
- Avoid migration in prefetch

v4 (Matthew Brost)
- make sanity check function bool
- add assert for smem placement
- fix doc

v5 (Matthew Brost)
- NACK atomic fault with  DRM_XE_ATOMIC_CPU

Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250821173104.3030148-16-himal.prasad.ghimiray@intel.com
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
This commit is contained in:
Himal Prasad Ghimiray 2025-08-21 23:00:59 +05:30
parent 072e299982
commit 293032eec4
4 changed files with 102 additions and 28 deletions

View file

@ -1712,6 +1712,18 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
}
}
static bool should_migrate_to_smem(struct xe_bo *bo)
{
/*
* NOTE: The following atomic checks are platform-specific. For example,
* if a device supports CXL atomics, these may not be necessary or
* may behave differently.
*/
return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL ||
bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
}
static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
@ -1720,7 +1732,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
struct xe_bo *bo = ttm_to_xe_bo(tbo);
bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
vm_fault_t ret;
int idx;
int idx, r = 0;
if (needs_rpm)
xe_pm_runtime_get(xe);
@ -1732,8 +1744,19 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
if (drm_dev_enter(ddev, &idx)) {
trace_xe_bo_cpu_fault(bo);
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT);
if (should_migrate_to_smem(bo)) {
xe_assert(xe, bo->flags & XE_BO_FLAG_SYSTEM);
r = xe_bo_migrate(bo, XE_PL_TT);
if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
ret = VM_FAULT_NOPAGE;
else if (r)
ret = VM_FAULT_SIGBUS;
}
if (!ret)
ret = ttm_bo_vm_fault_reserved(vmf,
vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
if (ret == VM_FAULT_RETRY &&

View file

@ -75,7 +75,7 @@ static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
}
static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
bool atomic, struct xe_vram_region *vram)
bool need_vram_move, struct xe_vram_region *vram)
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
@ -85,26 +85,13 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
if (err)
return err;
if (atomic && vram) {
xe_assert(vm->xe, IS_DGFX(vm->xe));
if (!bo)
return 0;
if (xe_vma_is_userptr(vma)) {
err = -EACCES;
return err;
}
err = need_vram_move ? xe_bo_migrate(bo, vram->placement) :
xe_bo_validate(bo, vm, true);
/* Migrate to VRAM, move should invalidate the VMA first */
err = xe_bo_migrate(bo, vram->placement);
if (err)
return err;
} else if (bo) {
/* Create backing store if needed */
err = xe_bo_validate(bo, vm, true);
if (err)
return err;
}
return 0;
return err;
}
static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
@ -115,10 +102,14 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
struct drm_exec exec;
struct dma_fence *fence;
ktime_t end = 0;
int err;
int err, needs_vram;
lockdep_assert_held_write(&vm->lock);
needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma)))
return needs_vram < 0 ? needs_vram : -EACCES;
xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
@ -141,7 +132,7 @@ retry_userptr:
/* Lock VM and BOs dma-resv */
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
err = xe_pf_begin(&exec, vma, atomic, tile->mem.vram);
err = xe_pf_begin(&exec, vma, needs_vram == 1, tile->mem.vram);
drm_exec_retry_on_contention(&exec);
if (xe_vm_validate_should_retry(&exec, err, &end))
err = -EAGAIN;
@ -576,7 +567,7 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
/* Lock VM and BOs dma-resv */
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
ret = xe_pf_begin(&exec, vma, true, tile->mem.vram);
ret = xe_pf_begin(&exec, vma, IS_DGFX(vm->xe), tile->mem.vram);
drm_exec_retry_on_contention(&exec);
if (ret)
break;

View file

@ -4242,15 +4242,18 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
*/
int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic)
{
u32 atomic_access = xe_vma_bo(vma) ? xe_vma_bo(vma)->attr.atomic_access :
vma->attr.atomic_access;
if (!IS_DGFX(xe) || !is_atomic)
return 0;
return false;
/*
* NOTE: The checks implemented here are platform-specific. For
* instance, on a device supporting CXL atomics, these would ideally
* work universally without additional handling.
*/
switch (vma->attr.atomic_access) {
switch (atomic_access) {
case DRM_XE_ATOMIC_DEVICE:
return !xe->info.has_device_atomics_on_smem;

View file

@ -102,6 +102,7 @@ static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
struct xe_vma **vmas, int num_vmas,
struct drm_xe_madvise *op)
{
struct xe_bo *bo;
int i;
xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC);
@ -114,7 +115,19 @@ static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
continue;
vmas[i]->attr.atomic_access = op->atomic.val;
/*TODO: handle bo backed vmas */
bo = xe_vma_bo(vmas[i]);
if (!bo)
continue;
xe_bo_assert_held(bo);
bo->attr.atomic_access = op->atomic.val;
/* Invalidate cpu page table, so bo can migrate to smem in next access */
if (xe_bo_is_vram(bo) &&
(bo->attr.atomic_access == DRM_XE_ATOMIC_CPU ||
bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL))
ttm_bo_unmap_virtual(&bo->ttm);
}
}
@ -262,6 +275,41 @@ static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madv
return true;
}
static bool check_bo_args_are_sane(struct xe_vm *vm, struct xe_vma **vmas,
int num_vmas, u32 atomic_val)
{
struct xe_device *xe = vm->xe;
struct xe_bo *bo;
int i;
for (i = 0; i < num_vmas; i++) {
bo = xe_vma_bo(vmas[i]);
if (!bo)
continue;
/*
* NOTE: The following atomic checks are platform-specific. For example,
* if a device supports CXL atomics, these may not be necessary or
* may behave differently.
*/
if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_CPU &&
!(bo->flags & XE_BO_FLAG_SYSTEM)))
return false;
if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_DEVICE &&
!(bo->flags & XE_BO_FLAG_VRAM0) &&
!(bo->flags & XE_BO_FLAG_VRAM1) &&
!(bo->flags & XE_BO_FLAG_SYSTEM &&
xe->info.has_device_atomics_on_smem)))
return false;
if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_GLOBAL &&
(!(bo->flags & XE_BO_FLAG_SYSTEM) ||
(!(bo->flags & XE_BO_FLAG_VRAM0) &&
!(bo->flags & XE_BO_FLAG_VRAM1)))))
return false;
}
return true;
}
/**
* xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
* @dev: DRM device pointer
@ -313,6 +361,15 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
goto unlock_vm;
if (madvise_range.has_bo_vmas) {
if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
if (!check_bo_args_are_sane(vm, madvise_range.vmas,
madvise_range.num_vmas,
args->atomic.val)) {
err = -EINVAL;
goto unlock_vm;
}
}
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
for (int i = 0; i < madvise_range.num_vmas; i++) {