drm/amdgpu: remove the ring param from ttm functions

With the removal of the direct_submit argument, the ring param
becomes useless: the jobs are always submitted to buffer_funcs_ring.

Some functions are getting an amdgpu_device argument since they
were getting it from the ring arg.

---
v4: remove adev param from amdgpu_ttm_map_buffer
---

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Acked-by: Felix Kuehling <felix.kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Pierre-Eric Pelloux-Prayer 2025-11-17 10:53:14 +01:00 committed by Alex Deucher
parent 3272cd887f
commit 42cbb68ce8
4 changed files with 21 additions and 21 deletions

View file

@ -37,8 +37,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
stime = ktime_get();
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
r = amdgpu_copy_buffer(adev, saddr, daddr, size, NULL, &fence,
false, 0);
if (r)
goto exit_do_move;

View file

@ -168,7 +168,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* @mem: memory object to map
* @mm_cur: range to map
* @window: which GART window to use
* @ring: DMA ring to use for the copy
* @tmz: if we should setup a TMZ enabled mapping
* @size: in number of bytes to map, out number of bytes mapped
* @addr: resulting address inside the MC address space
@ -179,12 +178,13 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct amdgpu_res_cursor *mm_cur,
unsigned int window, struct amdgpu_ring *ring,
unsigned int window,
bool tmz, uint64_t *size, uint64_t *addr)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
unsigned int offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr;
struct amdgpu_ring *ring;
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
@ -239,6 +239,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
dst_addr, num_bytes, 0);
ring = adev->mman.buffer_funcs_ring;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
@ -286,7 +287,6 @@ static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct dma_resv *resv,
struct dma_fence **f)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor src_mm, dst_mm;
struct dma_fence *fence = NULL;
int r = 0;
@ -313,12 +313,12 @@ static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
/* Map src to window 0 and dst to window 1. */
r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
0, ring, tmz, &cur_size, &from);
0, tmz, &cur_size, &from);
if (r)
goto error;
r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
1, ring, tmz, &cur_size, &to);
1, tmz, &cur_size, &to);
if (r)
goto error;
@ -345,7 +345,7 @@ static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
write_compress_disable));
}
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
r = amdgpu_copy_buffer(adev, from, to, cur_size, resv,
&next, true, copy_flags);
if (r)
goto error;
@ -2349,19 +2349,21 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
DMA_RESV_USAGE_BOOKKEEP);
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
bool vm_needs_flush, uint32_t copy_flags)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
struct amdgpu_ring *ring;
struct amdgpu_job *job;
uint32_t max_bytes;
unsigned int i;
int r;
ring = adev->mman.buffer_funcs_ring;
if (!ring->sched.ready) {
dev_err(adev->dev,
"Trying to move memory with ring turned off.\n");
@ -2401,15 +2403,15 @@ error_free:
return r;
}
static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
bool vm_needs_flush, bool delayed,
u64 k_job_id)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
struct amdgpu_ring *ring;
struct amdgpu_job *job;
uint32_t max_bytes;
unsigned int i;
@ -2433,6 +2435,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
byte_count -= cur_size;
}
ring = adev->mman.buffer_funcs_ring;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
*fence = amdgpu_job_submit(job);
@ -2455,7 +2458,6 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor cursor;
u64 addr;
int r = 0;
@ -2484,11 +2486,11 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
size = min(cursor.size, 256ULL << 20);
r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
1, ring, false, &size, &addr);
1, false, &size, &addr);
if (r)
goto err;
r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
r = amdgpu_ttm_fill_mem(adev, 0, addr, size, resv,
&next, true, true,
AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
@ -2513,7 +2515,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct dma_fence *fence = NULL;
struct amdgpu_res_cursor dst;
int r;
@ -2535,11 +2536,11 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
cur_size = min(dst.size, 256ULL << 20);
r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
1, ring, false, &cur_size, &to);
1, false, &cur_size, &to);
if (r)
goto error;
r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
r = amdgpu_ttm_fill_mem(adev, src_data, to, cur_size, resv,
&next, true, delayed, k_job_id);
if (r)
goto error;

View file

@ -163,7 +163,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
bool enable);
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
int amdgpu_copy_buffer(struct amdgpu_device *adev, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,

View file

@ -152,7 +152,7 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
goto out_unlock;
}
r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
r = amdgpu_copy_buffer(adev, gart_s, gart_d, size * PAGE_SIZE,
NULL, &next, true, 0);
if (r) {
dev_err(adev->dev, "fail %d to copy memory\n", r);