aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c42
1 files changed, 31 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index c87515210c4f..83a83ced2439 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -82,6 +82,25 @@ static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
return false;
}
+/**
+ * reuse_dmamap() - Check whether adev can share the original
+ * userptr BO
+ *
+ * If both adev and bo_adev are in direct mapping or
+ * in the same iommu group, they can share the original BO.
+ *
+ * @adev: Device to which can or cannot share the original BO
+ * @bo_adev: Device to which allocated BO belongs to
+ *
+ * Return: returns true if adev can share original userptr BO,
+ * false otherwise.
+ */
+static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
+{
+ return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
+ (adev->dev->iommu_group == bo_adev->dev->iommu_group);
+}
+
/* Set memory usage limits. Current, limits are
* System (TTM + userptr) memory - 15/16th System RAM
* TTM memory - 3/8th System RAM
@@ -253,15 +272,19 @@ create_dmamap_sg_bo(struct amdgpu_device *adev,
struct kgd_mem *mem, struct amdgpu_bo **bo_out)
{
struct drm_gem_object *gem_obj;
- int ret, align;
+ int ret;
+ uint64_t flags = 0;
ret = amdgpu_bo_reserve(mem->bo, false);
if (ret)
return ret;
- align = 1;
- ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, align,
- AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE,
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
+ flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
+ AMDGPU_GEM_CREATE_UNCACHED);
+
+ ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
+ AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj);
amdgpu_bo_unreserve(mem->bo);
@@ -481,9 +504,6 @@ kfd_mem_dmamap_userptr(struct kgd_mem *mem,
if (unlikely(ret))
goto release_sg;
- drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
- ttm->num_pages);
-
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -805,11 +825,11 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
va + bo_size, vm);
if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
- (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) ||
- same_hive) {
+ (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
+ same_hive) {
/* Mappings on the local GPU, or VRAM mappings in the
- * local hive, or userptr mapping IOMMU direct map mode
- * share the original BO
+ * local hive, or userptr mapping can reuse dma map
+ * address space share the original BO
*/
attachment[i]->type = KFD_MEM_ATT_SHARED;
bo[i] = mem->bo;