mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:24:45 +01:00
drm/amdgpu: Update vm start, end, hole to support 57bit address
Change gmc macro AMDGPU_GMC_HOLE_START/END/MASK to 57bit if vm root level is PDB3 for 5-level page tables. The macro access adev without passing adev as parameter is to minimize the code change to support 57bit, then we have to add adev variable in several places to use the macro. Because adev definition is not available in all amdgpu c files which include amdgpu_gmc.h, change inline function amdgpu_gmc_sign_extend to macro. Signed-off-by: Philip Yang <Philip.Yang@amd.com> Acked-by: Felix Kuehling <felix.kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
f6b1c1f5fd
commit
cf856ca9b9
10 changed files with 33 additions and 25 deletions
|
|
@ -1021,6 +1021,7 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
|
|||
struct amdgpu_job *job)
|
||||
{
|
||||
struct amdgpu_ring *ring = amdgpu_job_ring(job);
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned int i;
|
||||
int r;
|
||||
|
||||
|
|
|
|||
|
|
@ -32,9 +32,11 @@
|
|||
#include "amdgpu_xgmi.h"
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
/* VA hole for 48bit addresses on Vega10 */
|
||||
#define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
|
||||
#define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL
|
||||
/* VA hole for 48bit and 57bit addresses */
|
||||
#define AMDGPU_GMC_HOLE_START (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\
|
||||
0x0100000000000000ULL : 0x0000800000000000ULL)
|
||||
#define AMDGPU_GMC_HOLE_END (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\
|
||||
0xff00000000000000ULL : 0xffff800000000000ULL)
|
||||
|
||||
/*
|
||||
* Hardware is programmed as if the hole doesn't exists with start and end
|
||||
|
|
@ -43,7 +45,8 @@
|
|||
* This mask is used to remove the upper 16bits of the VA and so come up with
|
||||
* the linear addr value.
|
||||
*/
|
||||
#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
|
||||
#define AMDGPU_GMC_HOLE_MASK (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\
|
||||
0x00ffffffffffffffULL : 0x0000ffffffffffffULL)
|
||||
|
||||
/*
|
||||
* Ring size as power of two for the log of recent faults.
|
||||
|
|
@ -394,13 +397,8 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
|
|||
*
|
||||
* @addr: address to extend
|
||||
*/
|
||||
static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
|
||||
{
|
||||
if (addr >= AMDGPU_GMC_HOLE_START)
|
||||
addr |= AMDGPU_GMC_HOLE_END;
|
||||
|
||||
return addr;
|
||||
}
|
||||
#define amdgpu_gmc_sign_extend(addr) ((addr) >= AMDGPU_GMC_HOLE_START ?\
|
||||
((addr) | AMDGPU_GMC_HOLE_END) : (addr))
|
||||
|
||||
bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev);
|
||||
int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
|
||||
|
|
|
|||
|
|
@ -166,7 +166,8 @@ static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
|
||||
int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
|
||||
struct amdgpu_usermode_queue *queue,
|
||||
u64 addr, u64 expected_size)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *va_map;
|
||||
|
|
@ -730,9 +731,9 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
|||
db_info.doorbell_offset = args->in.doorbell_offset;
|
||||
|
||||
/* Validate the userq virtual address.*/
|
||||
if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) ||
|
||||
amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
|
||||
amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
|
||||
if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) ||
|
||||
amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
|
||||
amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
|
||||
r = -EINVAL;
|
||||
kfree(queue);
|
||||
goto unlock;
|
||||
|
|
|
|||
|
|
@ -153,7 +153,8 @@ void amdgpu_userq_reset_work(struct work_struct *work);
|
|||
void amdgpu_userq_pre_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost);
|
||||
|
||||
int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
|
||||
int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
|
||||
struct amdgpu_usermode_queue *queue,
|
||||
u64 addr, u64 expected_size);
|
||||
int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va_mapping *mapping,
|
||||
|
|
|
|||
|
|
@ -352,6 +352,7 @@ static const struct dma_fence_ops amdgpu_userq_fence_ops = {
|
|||
/**
|
||||
* amdgpu_userq_fence_read_wptr - Read the userq wptr value
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @queue: user mode queue structure pointer
|
||||
* @wptr: write pointer value
|
||||
*
|
||||
|
|
@ -361,7 +362,8 @@ static const struct dma_fence_ops amdgpu_userq_fence_ops = {
|
|||
*
|
||||
* Returns wptr value on success, error on failure.
|
||||
*/
|
||||
static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
|
||||
static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
|
||||
struct amdgpu_usermode_queue *queue,
|
||||
u64 *wptr)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
|
|
@ -455,6 +457,7 @@ amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
|
|||
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
|
||||
struct drm_amdgpu_userq_signal *args = data;
|
||||
|
|
@ -545,7 +548,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
|
|||
goto put_gobj_write;
|
||||
}
|
||||
|
||||
r = amdgpu_userq_fence_read_wptr(queue, &wptr);
|
||||
r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
|
||||
if (r)
|
||||
goto put_gobj_write;
|
||||
|
||||
|
|
|
|||
|
|
@ -827,7 +827,7 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
* Set the internal MC address mask This is the max address of the GPU's
|
||||
* internal address space.
|
||||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||
adev->gmc.mc_mask = AMDGPU_GMC_HOLE_MASK;
|
||||
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
|
||||
if (r) {
|
||||
|
|
|
|||
|
|
@ -59,7 +59,8 @@ err_reserve_bo_failed:
|
|||
}
|
||||
|
||||
static int
|
||||
mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
|
||||
mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
|
||||
struct amdgpu_userq_mgr *uq_mgr,
|
||||
struct amdgpu_usermode_queue *queue,
|
||||
uint64_t wptr)
|
||||
{
|
||||
|
|
@ -300,7 +301,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
goto free_mqd;
|
||||
}
|
||||
|
||||
r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
|
||||
r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va,
|
||||
2048);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
|
|
@ -341,11 +342,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
userq_props->tmz_queue =
|
||||
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
|
||||
|
||||
r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va,
|
||||
r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va,
|
||||
shadow_info.shadow_size);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
|
||||
r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va,
|
||||
shadow_info.csa_size);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
|
|
@ -366,7 +367,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
r = -ENOMEM;
|
||||
goto free_mqd;
|
||||
}
|
||||
r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
|
||||
r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va,
|
||||
32);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
|
|
@ -391,7 +392,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
}
|
||||
|
||||
/* FW expects WPTR BOs to be mapped into GART */
|
||||
r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
|
||||
r = mes_userq_create_wptr_mapping(adev, uq_mgr, queue, userq_props->wptr_gpu_addr);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create WPTR mapping\n");
|
||||
goto free_ctx;
|
||||
|
|
|
|||
|
|
@ -2053,6 +2053,7 @@ static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
|
|||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
||||
struct amdgpu_device *adev = parser->adev;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_bo *bo;
|
||||
|
|
|
|||
|
|
@ -1907,6 +1907,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
|
|||
uint64_t addr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_device *adev = p->adev;
|
||||
struct amdgpu_bo_va_mapping *map;
|
||||
uint32_t *msg, num_buffers;
|
||||
struct amdgpu_bo *bo;
|
||||
|
|
|
|||
|
|
@ -1824,6 +1824,7 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
|
|||
uint64_t addr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_device *adev = p->adev;
|
||||
struct amdgpu_bo_va_mapping *map;
|
||||
uint32_t *msg, num_buffers;
|
||||
struct amdgpu_bo *bo;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue