drm/amdgpu: add amdgpu_ttm_job_submit helper

Deduplicate the IB padding code and will also be used
later to check locking.

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Pierre-Eric Pelloux-Prayer 2025-11-18 14:41:50 +01:00 committed by Alex Deucher
parent edf47fb3d1
commit 582c65e854

View file

@ -162,6 +162,18 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
*placement = abo->placement;
}
static struct dma_fence *
amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_job *job, u32 num_dw)
{
struct amdgpu_ring *ring;
ring = adev->mman.buffer_funcs_ring;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
return amdgpu_job_submit(job);
}
/**
* amdgpu_ttm_map_buffer - Map memory into the GART windows
* @bo: buffer object to map
@ -184,7 +196,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
unsigned int offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr;
struct amdgpu_ring *ring;
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
@ -239,10 +250,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
dst_addr, num_bytes, 0);
ring = adev->mman.buffer_funcs_ring;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
if (tmz)
flags |= AMDGPU_PTE_TMZ;
@ -260,7 +267,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
}
dma_fence_put(amdgpu_job_submit(job));
dma_fence_put(amdgpu_ttm_job_submit(adev, job, num_dw));
return 0;
}
@ -1577,10 +1584,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
PAGE_SIZE, 0);
amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
fence = amdgpu_job_submit(job);
fence = amdgpu_ttm_job_submit(adev, job, num_dw);
mutex_unlock(&adev->mman.gtt_window_lock);
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
@ -2401,11 +2405,9 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
byte_count -= cur_size_in_bytes;
}
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
*fence = amdgpu_job_submit(job);
if (r)
goto error_free;
*fence = amdgpu_ttm_job_submit(adev, job, num_dw);
return r;
@ -2423,7 +2425,6 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, uint32_t src_data,
u64 k_job_id)
{
unsigned int num_loops, num_dw;
struct amdgpu_ring *ring;
struct amdgpu_job *job;
uint32_t max_bytes;
unsigned int i;
@ -2447,10 +2448,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, uint32_t src_data,
byte_count -= cur_size;
}
ring = adev->mman.buffer_funcs_ring;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
*fence = amdgpu_job_submit(job);
*fence = amdgpu_ttm_job_submit(adev, job, num_dw);
return 0;
}