mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:04:43 +01:00
drm/amdgpu: give each kernel job a unique id
Userspace jobs have drm_file.client_id as a unique identifier as job's owners. For kernel jobs, we can allocate arbitrary values - the risk of overlap with userspace ids is small (given that it's a u64 value). In the unlikely case the overlap happens, it'll only impact trace events. Since this ID is traced in the gpu_scheduler trace events, this allows to determine the source of each job sent to the hardware. To make grepping easier, the IDs are defined as they will appear in the trace output. Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com> Link: https://lore.kernel.org/r/20250604122827.2191-1-pierre-eric.pelloux-prayer@amd.com
This commit is contained in:
parent
f3e8293685
commit
256576ed68
19 changed files with 84 additions and 41 deletions
|
|
@ -1474,7 +1474,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
|
|||
owner = (void *)(unsigned long)atomic_inc_return(&counter);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
|
||||
64, 0, &job);
|
||||
64, 0, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
|
|
|||
|
|
@ -690,7 +690,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
|
||||
&job);
|
||||
&job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
|
||||
if (r)
|
||||
goto error_alloc;
|
||||
|
||||
|
|
|
|||
|
|
@ -209,11 +209,12 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
size_t size, enum amdgpu_ib_pool_type pool_type,
|
||||
struct amdgpu_job **job)
|
||||
struct amdgpu_job **job, u64 k_job_id)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0);
|
||||
r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
|
||||
k_job_id);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
|||
|
|
@ -44,6 +44,22 @@
|
|||
struct amdgpu_fence;
|
||||
enum amdgpu_ib_pool_type;
|
||||
|
||||
/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */
|
||||
#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL)
|
||||
#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL)
|
||||
|
||||
struct amdgpu_job {
|
||||
struct drm_sched_job base;
|
||||
struct amdgpu_vm *vm;
|
||||
|
|
@ -96,7 +112,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
size_t size, enum amdgpu_ib_pool_type pool_type,
|
||||
struct amdgpu_job **job);
|
||||
struct amdgpu_job **job,
|
||||
u64 k_job_id);
|
||||
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
|
||||
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
|
||||
void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||
|
|
|
|||
|
|
@ -194,7 +194,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
AMDGPU_IB_POOL_DIRECT, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
|||
|
|
@ -1313,7 +1313,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
|
|||
if (r)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
|
||||
r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
|
||||
AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
|
||||
if (WARN_ON(r))
|
||||
goto out;
|
||||
|
||||
|
|
|
|||
|
|
@ -226,7 +226,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
|
|||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4 + num_bytes,
|
||||
AMDGPU_IB_POOL_DELAYED, &job);
|
||||
AMDGPU_IB_POOL_DELAYED, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -406,7 +407,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
|||
struct dma_fence *wipe_fence = NULL;
|
||||
|
||||
r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
|
||||
false);
|
||||
false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
|
||||
if (r) {
|
||||
goto error;
|
||||
} else if (wipe_fence) {
|
||||
|
|
@ -1510,7 +1511,8 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
|
|||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
|
||||
&job);
|
||||
&job,
|
||||
AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
|
|
@ -2167,7 +2169,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
|
|||
struct dma_resv *resv,
|
||||
bool vm_needs_flush,
|
||||
struct amdgpu_job **job,
|
||||
bool delayed)
|
||||
bool delayed, u64 k_job_id)
|
||||
{
|
||||
enum amdgpu_ib_pool_type pool = direct_submit ?
|
||||
AMDGPU_IB_POOL_DIRECT :
|
||||
|
|
@ -2177,7 +2179,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
|
|||
&adev->mman.high_pr;
|
||||
r = amdgpu_job_alloc_with_ib(adev, entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4, pool, job);
|
||||
num_dw * 4, pool, job, k_job_id);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -2217,7 +2219,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
|||
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
|
||||
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
|
||||
resv, vm_needs_flush, &job, false);
|
||||
resv, vm_needs_flush, &job, false,
|
||||
AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -2252,7 +2255,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
|
|||
uint64_t dst_addr, uint32_t byte_count,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **fence,
|
||||
bool vm_needs_flush, bool delayed)
|
||||
bool vm_needs_flush, bool delayed,
|
||||
u64 k_job_id)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned int num_loops, num_dw;
|
||||
|
|
@ -2265,7 +2269,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
|
|||
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
|
||||
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
|
||||
r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
|
||||
&job, delayed);
|
||||
&job, delayed, k_job_id);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -2335,7 +2339,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
|
|||
goto err;
|
||||
|
||||
r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
|
||||
&next, true, true);
|
||||
&next, true, true,
|
||||
AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
|
@ -2354,7 +2359,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
uint32_t src_data,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **f,
|
||||
bool delayed)
|
||||
bool delayed,
|
||||
u64 k_job_id)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
|
|
@ -2384,7 +2390,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
goto error;
|
||||
|
||||
r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
|
||||
&next, true, delayed);
|
||||
&next, true, delayed, k_job_id);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
|
|
|||
|
|
@ -182,7 +182,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
uint32_t src_data,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **fence,
|
||||
bool delayed);
|
||||
bool delayed,
|
||||
u64 k_job_id);
|
||||
|
||||
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
|
||||
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
|
||||
|
|
|
|||
|
|
@ -1136,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|||
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
64, direct ? AMDGPU_IB_POOL_DIRECT :
|
||||
AMDGPU_IB_POOL_DELAYED, &job);
|
||||
AMDGPU_IB_POOL_DELAYED, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
|||
|
|
@ -449,7 +449,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
&job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -540,7 +540,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
ib_size_dw * 4,
|
||||
direct ? AMDGPU_IB_POOL_DIRECT :
|
||||
AMDGPU_IB_POOL_DELAYED, &job);
|
||||
AMDGPU_IB_POOL_DELAYED, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
|||
|
|
@ -601,7 +601,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
|||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
64, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
&job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
|
@ -781,7 +781,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
|
|||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
&job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
|
@ -911,7 +911,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
&job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -978,7 +978,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
&job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
|||
|
|
@ -977,7 +977,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
|||
params.vm = vm;
|
||||
params.immediate = immediate;
|
||||
|
||||
r = vm->update_funcs->prepare(¶ms, NULL);
|
||||
r = vm->update_funcs->prepare(¶ms, NULL,
|
||||
AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
|
@ -1146,7 +1147,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
dma_fence_put(tmp);
|
||||
}
|
||||
|
||||
r = vm->update_funcs->prepare(¶ms, sync);
|
||||
r = vm->update_funcs->prepare(¶ms, sync,
|
||||
AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
|
|
|||
|
|
@ -308,7 +308,7 @@ struct amdgpu_vm_update_params {
|
|||
struct amdgpu_vm_update_funcs {
|
||||
int (*map_table)(struct amdgpu_bo_vm *bo);
|
||||
int (*prepare)(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_sync *sync);
|
||||
struct amdgpu_sync *sync, u64 k_job_id);
|
||||
int (*update)(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
|
||||
unsigned count, uint32_t incr, uint64_t flags);
|
||||
|
|
|
|||
|
|
@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
|
|||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @sync: sync obj with fences to wait on
|
||||
* @k_job_id: the id for tracing/debug purposes
|
||||
*
|
||||
* Returns:
|
||||
* Negativ errno, 0 for success.
|
||||
*/
|
||||
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_sync *sync)
|
||||
struct amdgpu_sync *sync,
|
||||
u64 k_job_id)
|
||||
{
|
||||
if (!sync)
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@
|
|||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
#include "amdgpu_vm.h"
|
||||
#include "amdgpu_job.h"
|
||||
|
||||
/*
|
||||
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
|
||||
|
|
@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
params.vm = vm;
|
||||
params.immediate = immediate;
|
||||
|
||||
r = vm->update_funcs->prepare(¶ms, NULL);
|
||||
r = vm->update_funcs->prepare(¶ms, NULL,
|
||||
AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
|
||||
if (r)
|
||||
goto exit;
|
||||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
|
|||
|
||||
/* Allocate a new job for @count PTE updates */
|
||||
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
|
||||
unsigned int count)
|
||||
unsigned int count, u64 k_job_id)
|
||||
{
|
||||
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
|
||||
: AMDGPU_IB_POOL_DELAYED;
|
||||
|
|
@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
|
|||
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
|
||||
ndw * 4, pool, &p->job);
|
||||
ndw * 4, pool, &p->job, k_job_id);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
|
|||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @sync: amdgpu_sync object with fences to wait for
|
||||
* @k_job_id: identifier of the job, for tracing purpose
|
||||
*
|
||||
* Returns:
|
||||
* Negativ errno, 0 for success.
|
||||
*/
|
||||
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_sync *sync)
|
||||
struct amdgpu_sync *sync, u64 k_job_id)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_vm_sdma_alloc_job(p, 0);
|
||||
r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vm_sdma_alloc_job(p, count);
|
||||
r = amdgpu_vm_sdma_alloc_job(p, count,
|
||||
AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -217,7 +217,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
|||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
AMDGPU_IB_POOL_DIRECT, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -281,7 +282,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
|||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
AMDGPU_IB_POOL_DIRECT, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
|||
|
|
@ -225,7 +225,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
|
|||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
AMDGPU_IB_POOL_DIRECT, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
@ -288,7 +289,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
|
|||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
AMDGPU_IB_POOL_DIRECT, &job,
|
||||
AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
|||
|
|
@ -68,7 +68,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
|
|||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4 + num_bytes,
|
||||
AMDGPU_IB_POOL_DELAYED,
|
||||
&job);
|
||||
&job,
|
||||
AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue