diff options
Diffstat (limited to 'SOURCES/amdgpu-regression.patch')
-rw-r--r-- | SOURCES/amdgpu-regression.patch | 1178 |
1 files changed, 0 insertions, 1178 deletions
diff --git a/SOURCES/amdgpu-regression.patch b/SOURCES/amdgpu-regression.patch deleted file mode 100644 index 8503700..0000000 --- a/SOURCES/amdgpu-regression.patch +++ /dev/null @@ -1,1178 +0,0 @@ -From c9cad937c0c58618fe5b0310fd539a854dc1ae95 Mon Sep 17 00:00:00 2001
-From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
-Date: Fri, 8 Apr 2022 04:18:43 +0530
-Subject: drm/amdgpu: add drm buddy support to amdgpu
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-- Switch to drm buddy allocator
-- Add resource cursor support for drm buddy
-
-v2(Matthew Auld):
- - replace spinlock with mutex as we call kmem_cache_zalloc
- (..., GFP_KERNEL) in drm_buddy_alloc() function
-
- - lock drm_buddy_block_trim() function as it calls
- mark_free/mark_split are all globally visible
-
-v3(Matthew Auld):
- - remove trim method error handling as we address the failure case
- at drm_buddy_block_trim() function
-
-v4:
- - fix warnings reported by kernel test robot <lkp@intel.com>
-
-v5:
- - fix merge conflict issue
-
-v6:
- - fix warnings reported by kernel test robot <lkp@intel.com>
-
-v7:
- - remove DRM_BUDDY_RANGE_ALLOCATION flag usage
-
-v8:
- - keep DRM_BUDDY_RANGE_ALLOCATION flag usage
- - resolve conflicts created by drm/amdgpu: remove VRAM accounting v2
-
-v9(Christian):
- - merged the below patch
- - drm/amdgpu: move vram inline functions into a header
- - rename label name as fallback
- - move struct amdgpu_vram_mgr to amdgpu_vram_mgr.h
- - remove unnecessary flags from struct amdgpu_vram_reservation
- - rewrite block NULL check condition
- - change else style as per coding standard
- - rewrite the node max size
- - add a helper function to fetch the first entry from the list
-
-v10(Christian):
- - rename amdgpu_get_node() function name as amdgpu_vram_mgr_first_block
-
-v11:
- - if size is not aligned with min_page_size, enable is_contiguous flag,
- therefore, the size round up to the power of two and trimmed to the
- original size.
-v12:
- - rename the function names having prefix as amdgpu_vram_mgr_*()
- - modify the round_up() logic conforming to contiguous flag enablement
- or if size is not aligned to min_block_size
- - modify the trim logic
- - rename node as block wherever applicable
-
-Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
-Acked-by: Christian König <christian.koenig@amd.com>
-Link: https://patchwork.freedesktop.org/patch/msgid/20220407224843.2416-1-Arunpravin.PaneerSelvam@amd.com
-Signed-off-by: Christian König <christian.koenig@amd.com>
----
- drivers/gpu/drm/Kconfig | 1 +
- drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 97 +++++--
- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 10 +-
- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 359 +++++++++++++++----------
- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h | 89 ++++++
- 5 files changed, 380 insertions(+), 176 deletions(-)
- create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
-
-diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
-index f1422bee3dcc..5133c3f028ab 100644
---- a/drivers/gpu/drm/Kconfig
-+++ b/drivers/gpu/drm/Kconfig
-@@ -280,6 +280,7 @@ config DRM_AMDGPU
- select HWMON
- select BACKLIGHT_CLASS_DEVICE
- select INTERVAL_TREE
-+ select DRM_BUDDY
- help
- Choose this option if you have a recent AMD Radeon graphics card.
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
-index acfa207cf970..6546552e596c 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
-@@ -30,12 +30,15 @@
- #include <drm/ttm/ttm_resource.h>
- #include <drm/ttm/ttm_range_manager.h>
-
-+#include "amdgpu_vram_mgr.h"
-+
- /* state back for walking over vram_mgr and gtt_mgr allocations */
- struct amdgpu_res_cursor {
- uint64_t start;
- uint64_t size;
- uint64_t remaining;
-- struct drm_mm_node *node;
-+ void *node;
-+ uint32_t mem_type;
- };
-
- /**
-@@ -52,27 +55,63 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
- uint64_t start, uint64_t size,
- struct amdgpu_res_cursor *cur)
- {
-+ struct drm_buddy_block *block;
-+ struct list_head *head, *next;
- struct drm_mm_node *node;
-
-- if (!res || res->mem_type == TTM_PL_SYSTEM) {
-- cur->start = start;
-- cur->size = size;
-- cur->remaining = size;
-- cur->node = NULL;
-- WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
-- return;
-- }
-+ if (!res)
-+ goto fallback;
-
- BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
-
-- node = to_ttm_range_mgr_node(res)->mm_nodes;
-- while (start >= node->size << PAGE_SHIFT)
-- start -= node++->size << PAGE_SHIFT;
-+ cur->mem_type = res->mem_type;
-+
-+ switch (cur->mem_type) {
-+ case TTM_PL_VRAM:
-+ head = &to_amdgpu_vram_mgr_resource(res)->blocks;
-+
-+ block = list_first_entry_or_null(head,
-+ struct drm_buddy_block,
-+ link);
-+ if (!block)
-+ goto fallback;
-+
-+ while (start >= amdgpu_vram_mgr_block_size(block)) {
-+ start -= amdgpu_vram_mgr_block_size(block);
-+
-+ next = block->link.next;
-+ if (next != head)
-+ block = list_entry(next, struct drm_buddy_block, link);
-+ }
-+
-+ cur->start = amdgpu_vram_mgr_block_start(block) + start;
-+ cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
-+ cur->remaining = size;
-+ cur->node = block;
-+ break;
-+ case TTM_PL_TT:
-+ node = to_ttm_range_mgr_node(res)->mm_nodes;
-+ while (start >= node->size << PAGE_SHIFT)
-+ start -= node++->size << PAGE_SHIFT;
-+
-+ cur->start = (node->start << PAGE_SHIFT) + start;
-+ cur->size = min((node->size << PAGE_SHIFT) - start, size);
-+ cur->remaining = size;
-+ cur->node = node;
-+ break;
-+ default:
-+ goto fallback;
-+ }
-
-- cur->start = (node->start << PAGE_SHIFT) + start;
-- cur->size = min((node->size << PAGE_SHIFT) - start, size);
-+ return;
-+
-+fallback:
-+ cur->start = start;
-+ cur->size = size;
- cur->remaining = size;
-- cur->node = node;
-+ cur->node = NULL;
-+ WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
-+ return;
- }
-
- /**
-@@ -85,7 +124,9 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
- */
- static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
- {
-- struct drm_mm_node *node = cur->node;
-+ struct drm_buddy_block *block;
-+ struct drm_mm_node *node;
-+ struct list_head *next;
-
- BUG_ON(size > cur->remaining);
-
-@@ -99,9 +140,27 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
- return;
- }
-
-- cur->node = ++node;
-- cur->start = node->start << PAGE_SHIFT;
-- cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
-+ switch (cur->mem_type) {
-+ case TTM_PL_VRAM:
-+ block = cur->node;
-+
-+ next = block->link.next;
-+ block = list_entry(next, struct drm_buddy_block, link);
-+
-+ cur->node = block;
-+ cur->start = amdgpu_vram_mgr_block_start(block);
-+ cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
-+ break;
-+ case TTM_PL_TT:
-+ node = cur->node;
-+
-+ cur->node = ++node;
-+ cur->start = node->start << PAGE_SHIFT;
-+ cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
-+ break;
-+ default:
-+ return;
-+ }
- }
-
- #endif
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
-index 9120ae80ef52..6a70818039dd 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
-@@ -26,6 +26,7 @@
-
- #include <linux/dma-direction.h>
- #include <drm/gpu_scheduler.h>
-+#include "amdgpu_vram_mgr.h"
- #include "amdgpu.h"
-
- #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
-@@ -38,15 +39,6 @@
-
- #define AMDGPU_POISON 0xd0bed0be
-
--struct amdgpu_vram_mgr {
-- struct ttm_resource_manager manager;
-- struct drm_mm mm;
-- spinlock_t lock;
-- struct list_head reservations_pending;
-- struct list_head reserved_pages;
-- atomic64_t vis_usage;
--};
--
- struct amdgpu_gtt_mgr {
- struct ttm_resource_manager manager;
- struct drm_mm mm;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-index 0a7611648573..49e4092f447f 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-@@ -32,8 +32,10 @@
- #include "atom.h"
-
- struct amdgpu_vram_reservation {
-- struct list_head node;
-- struct drm_mm_node mm_node;
-+ u64 start;
-+ u64 size;
-+ struct list_head allocated;
-+ struct list_head blocks;
- };
-
- static inline struct amdgpu_vram_mgr *
-@@ -186,18 +188,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = {
- };
-
- /**
-- * amdgpu_vram_mgr_vis_size - Calculate visible node size
-+ * amdgpu_vram_mgr_vis_size - Calculate visible block size
- *
- * @adev: amdgpu_device pointer
-- * @node: MM node structure
-+ * @block: DRM BUDDY block structure
- *
-- * Calculate how many bytes of the MM node are inside visible VRAM
-+ * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
- */
- static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
-- struct drm_mm_node *node)
-+ struct drm_buddy_block *block)
- {
-- uint64_t start = node->start << PAGE_SHIFT;
-- uint64_t end = (node->size + node->start) << PAGE_SHIFT;
-+ u64 start = amdgpu_vram_mgr_block_start(block);
-+ u64 end = start + amdgpu_vram_mgr_block_size(block);
-
- if (start >= adev->gmc.visible_vram_size)
- return 0;
-@@ -218,9 +220,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
- {
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_resource *res = bo->tbo.resource;
-- unsigned pages = res->num_pages;
-- struct drm_mm_node *mm;
-- u64 usage;
-+ struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
-+ struct drm_buddy_block *block;
-+ u64 usage = 0;
-
- if (amdgpu_gmc_vram_full_visible(&adev->gmc))
- return amdgpu_bo_size(bo);
-@@ -228,9 +230,8 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
- if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
- return 0;
-
-- mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
-- for (usage = 0; pages; pages -= mm->size, mm++)
-- usage += amdgpu_vram_mgr_vis_size(adev, mm);
-+ list_for_each_entry(block, &vres->blocks, link)
-+ usage += amdgpu_vram_mgr_vis_size(adev, block);
-
- return usage;
- }
-@@ -240,23 +241,30 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
- {
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
- struct amdgpu_device *adev = to_amdgpu_device(mgr);
-- struct drm_mm *mm = &mgr->mm;
-+ struct drm_buddy *mm = &mgr->mm;
- struct amdgpu_vram_reservation *rsv, *temp;
-+ struct drm_buddy_block *block;
- uint64_t vis_usage;
-
-- list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
-- if (drm_mm_reserve_node(mm, &rsv->mm_node))
-+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
-+ if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
-+ rsv->size, mm->chunk_size, &rsv->allocated,
-+ DRM_BUDDY_RANGE_ALLOCATION))
-+ continue;
-+
-+ block = amdgpu_vram_mgr_first_block(&rsv->allocated);
-+ if (!block)
- continue;
-
- dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
-- rsv->mm_node.start, rsv->mm_node.size);
-+ rsv->start, rsv->size);
-
-- vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
-+ vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
- atomic64_add(vis_usage, &mgr->vis_usage);
- spin_lock(&man->bdev->lru_lock);
-- man->usage += rsv->mm_node.size << PAGE_SHIFT;
-+ man->usage += rsv->size;
- spin_unlock(&man->bdev->lru_lock);
-- list_move(&rsv->node, &mgr->reserved_pages);
-+ list_move(&rsv->blocks, &mgr->reserved_pages);
- }
- }
-
-@@ -278,14 +286,16 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
- if (!rsv)
- return -ENOMEM;
-
-- INIT_LIST_HEAD(&rsv->node);
-- rsv->mm_node.start = start >> PAGE_SHIFT;
-- rsv->mm_node.size = size >> PAGE_SHIFT;
-+ INIT_LIST_HEAD(&rsv->allocated);
-+ INIT_LIST_HEAD(&rsv->blocks);
-
-- spin_lock(&mgr->lock);
-- list_add_tail(&rsv->node, &mgr->reservations_pending);
-+ rsv->start = start;
-+ rsv->size = size;
-+
-+ mutex_lock(&mgr->lock);
-+ list_add_tail(&rsv->blocks, &mgr->reservations_pending);
- amdgpu_vram_mgr_do_reserve(&mgr->manager);
-- spin_unlock(&mgr->lock);
-+ mutex_unlock(&mgr->lock);
-
- return 0;
- }
-@@ -307,19 +317,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
- struct amdgpu_vram_reservation *rsv;
- int ret;
-
-- spin_lock(&mgr->lock);
-+ mutex_lock(&mgr->lock);
-
-- list_for_each_entry(rsv, &mgr->reservations_pending, node) {
-- if ((rsv->mm_node.start <= start) &&
-- (start < (rsv->mm_node.start + rsv->mm_node.size))) {
-+ list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
-+ if (rsv->start <= start &&
-+ (start < (rsv->start + rsv->size))) {
- ret = -EBUSY;
- goto out;
- }
- }
-
-- list_for_each_entry(rsv, &mgr->reserved_pages, node) {
-- if ((rsv->mm_node.start <= start) &&
-- (start < (rsv->mm_node.start + rsv->mm_node.size))) {
-+ list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
-+ if (rsv->start <= start &&
-+ (start < (rsv->start + rsv->size))) {
- ret = 0;
- goto out;
- }
-@@ -327,32 +337,10 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
-
- ret = -ENOENT;
- out:
-- spin_unlock(&mgr->lock);
-+ mutex_unlock(&mgr->lock);
- return ret;
- }
-
--/**
-- * amdgpu_vram_mgr_virt_start - update virtual start address
-- *
-- * @mem: ttm_resource to update
-- * @node: just allocated node
-- *
-- * Calculate a virtual BO start address to easily check if everything is CPU
-- * accessible.
-- */
--static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
-- struct drm_mm_node *node)
--{
-- unsigned long start;
--
-- start = node->start + node->size;
-- if (start > mem->num_pages)
-- start -= mem->num_pages;
-- else
-- start = 0;
-- mem->start = max(mem->start, start);
--}
--
- /**
- * amdgpu_vram_mgr_new - allocate new ranges
- *
-@@ -368,46 +356,44 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_resource **res)
- {
-- unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
-+ u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
- struct amdgpu_device *adev = to_amdgpu_device(mgr);
-- uint64_t vis_usage = 0, mem_bytes, max_bytes;
-- struct ttm_range_mgr_node *node;
-- struct drm_mm *mm = &mgr->mm;
-- enum drm_mm_insert_mode mode;
-- unsigned i;
-+ struct amdgpu_vram_mgr_resource *vres;
-+ u64 size, remaining_size, lpfn, fpfn;
-+ struct drm_buddy *mm = &mgr->mm;
-+ struct drm_buddy_block *block;
-+ unsigned long pages_per_block;
- int r;
-
-- lpfn = place->lpfn;
-+ lpfn = place->lpfn << PAGE_SHIFT;
- if (!lpfn)
-- lpfn = man->size >> PAGE_SHIFT;
-+ lpfn = man->size;
-+
-+ fpfn = place->fpfn << PAGE_SHIFT;
-
- max_bytes = adev->gmc.mc_vram_size;
- if (tbo->type != ttm_bo_type_kernel)
- max_bytes -= AMDGPU_VM_RESERVED_VRAM;
-
-- mem_bytes = tbo->base.size;
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-- pages_per_node = ~0ul;
-- num_nodes = 1;
-+ pages_per_block = ~0ul;
- } else {
- #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-- pages_per_node = HPAGE_PMD_NR;
-+ pages_per_block = HPAGE_PMD_NR;
- #else
- /* default to 2MB */
-- pages_per_node = 2UL << (20UL - PAGE_SHIFT);
-+ pages_per_block = 2UL << (20UL - PAGE_SHIFT);
- #endif
-- pages_per_node = max_t(uint32_t, pages_per_node,
-- tbo->page_alignment);
-- num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
-+ pages_per_block = max_t(uint32_t, pages_per_block,
-+ tbo->page_alignment);
- }
-
-- node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
-- GFP_KERNEL | __GFP_ZERO);
-- if (!node)
-+ vres = kzalloc(sizeof(*vres), GFP_KERNEL);
-+ if (!vres)
- return -ENOMEM;
-
-- ttm_resource_init(tbo, place, &node->base);
-+ ttm_resource_init(tbo, place, &vres->base);
-
- /* bail out quickly if there's likely not enough VRAM for this BO */
- if (ttm_resource_manager_usage(man) > max_bytes) {
-@@ -415,66 +401,130 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
- goto error_fini;
- }
-
-- mode = DRM_MM_INSERT_BEST;
-+ INIT_LIST_HEAD(&vres->blocks);
-+
- if (place->flags & TTM_PL_FLAG_TOPDOWN)
-- mode = DRM_MM_INSERT_HIGH;
--
-- pages_left = node->base.num_pages;
--
-- /* Limit maximum size to 2GB due to SG table limitations */
-- pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
--
-- i = 0;
-- spin_lock(&mgr->lock);
-- while (pages_left) {
-- uint32_t alignment = tbo->page_alignment;
--
-- if (pages >= pages_per_node)
-- alignment = pages_per_node;
--
-- r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
-- alignment, 0, place->fpfn,
-- lpfn, mode);
-- if (unlikely(r)) {
-- if (pages > pages_per_node) {
-- if (is_power_of_2(pages))
-- pages = pages / 2;
-- else
-- pages = rounddown_pow_of_two(pages);
-- continue;
-+ vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
-+
-+ if (fpfn || lpfn != man->size)
-+ /* Allocate blocks in desired range */
-+ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
-+
-+ remaining_size = vres->base.num_pages << PAGE_SHIFT;
-+
-+ mutex_lock(&mgr->lock);
-+ while (remaining_size) {
-+ if (tbo->page_alignment)
-+ min_block_size = tbo->page_alignment << PAGE_SHIFT;
-+ else
-+ min_block_size = mgr->default_page_size;
-+
-+ BUG_ON(min_block_size < mm->chunk_size);
-+
-+ /* Limit maximum size to 2GiB due to SG table limitations */
-+ size = min(remaining_size, 2ULL << 30);
-+
-+ if (size >= pages_per_block << PAGE_SHIFT)
-+ min_block_size = pages_per_block << PAGE_SHIFT;
-+
-+ cur_size = size;
-+
-+ if (fpfn + size != place->lpfn << PAGE_SHIFT) {
-+ /*
-+ * Except for actual range allocation, modify the size and
-+ * min_block_size conforming to continuous flag enablement
-+ */
-+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-+ size = roundup_pow_of_two(size);
-+ min_block_size = size;
-+ /*
-+ * Modify the size value if size is not
-+ * aligned with min_block_size
-+ */
-+ } else if (!IS_ALIGNED(size, min_block_size)) {
-+ size = round_up(size, min_block_size);
- }
-- goto error_free;
- }
-
-- vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
-- amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
-- pages_left -= pages;
-- ++i;
-+ r = drm_buddy_alloc_blocks(mm, fpfn,
-+ lpfn,
-+ size,
-+ min_block_size,
-+ &vres->blocks,
-+ vres->flags);
-+ if (unlikely(r))
-+ goto error_free_blocks;
-+
-+ if (size > remaining_size)
-+ remaining_size = 0;
-+ else
-+ remaining_size -= size;
-+ }
-+ mutex_unlock(&mgr->lock);
-+
-+ if (cur_size != size) {
-+ struct drm_buddy_block *block;
-+ struct list_head *trim_list;
-+ u64 original_size;
-+ LIST_HEAD(temp);
-+
-+ trim_list = &vres->blocks;
-+ original_size = vres->base.num_pages << PAGE_SHIFT;
-+
-+ /*
-+ * If size value is rounded up to min_block_size, trim the last
-+ * block to the required size
-+ */
-+ if (!list_is_singular(&vres->blocks)) {
-+ block = list_last_entry(&vres->blocks, typeof(*block), link);
-+ list_move_tail(&block->link, &temp);
-+ trim_list = &temp;
-+ /*
-+ * Compute the original_size value by subtracting the
-+ * last block size with (aligned size - original size)
-+ */
-+ original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
-+ }
-
-- if (pages > pages_left)
-- pages = pages_left;
-+ mutex_lock(&mgr->lock);
-+ drm_buddy_block_trim(mm,
-+ original_size,
-+ trim_list);
-+ mutex_unlock(&mgr->lock);
-+
-+ if (!list_empty(&temp))
-+ list_splice_tail(trim_list, &vres->blocks);
-+ }
-+
-+ list_for_each_entry(block, &vres->blocks, link)
-+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
-+
-+ block = amdgpu_vram_mgr_first_block(&vres->blocks);
-+ if (!block) {
-+ r = -EINVAL;
-+ goto error_fini;
- }
-- spin_unlock(&mgr->lock);
-
-- if (i == 1)
-- node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
-+ vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
-+
-+ if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
-+ vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
-
- if (adev->gmc.xgmi.connected_to_cpu)
-- node->base.bus.caching = ttm_cached;
-+ vres->base.bus.caching = ttm_cached;
- else
-- node->base.bus.caching = ttm_write_combined;
-+ vres->base.bus.caching = ttm_write_combined;
-
- atomic64_add(vis_usage, &mgr->vis_usage);
-- *res = &node->base;
-+ *res = &vres->base;
- return 0;
-
--error_free:
-- while (i--)
-- drm_mm_remove_node(&node->mm_nodes[i]);
-- spin_unlock(&mgr->lock);
-+error_free_blocks:
-+ drm_buddy_free_list(mm, &vres->blocks);
-+ mutex_unlock(&mgr->lock);
- error_fini:
-- ttm_resource_fini(man, &node->base);
-- kvfree(node);
-+ ttm_resource_fini(man, &vres->base);
-+ kfree(vres);
-
- return r;
- }
-@@ -490,27 +540,26 @@ error_fini:
- static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *res)
- {
-- struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
-+ struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
- struct amdgpu_device *adev = to_amdgpu_device(mgr);
-+ struct drm_buddy *mm = &mgr->mm;
-+ struct drm_buddy_block *block;
- uint64_t vis_usage = 0;
-- unsigned i, pages;
-
-- spin_lock(&mgr->lock);
-- for (i = 0, pages = res->num_pages; pages;
-- pages -= node->mm_nodes[i].size, ++i) {
-- struct drm_mm_node *mm = &node->mm_nodes[i];
-+ mutex_lock(&mgr->lock);
-+ list_for_each_entry(block, &vres->blocks, link)
-+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
-
-- drm_mm_remove_node(mm);
-- vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
-- }
- amdgpu_vram_mgr_do_reserve(man);
-- spin_unlock(&mgr->lock);
-+
-+ drm_buddy_free_list(mm, &vres->blocks);
-+ mutex_unlock(&mgr->lock);
-
- atomic64_sub(vis_usage, &mgr->vis_usage);
-
- ttm_resource_fini(man, res);
-- kvfree(node);
-+ kfree(vres);
- }
-
- /**
-@@ -542,7 +591,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- if (!*sgt)
- return -ENOMEM;
-
-- /* Determine the number of DRM_MM nodes to export */
-+ /* Determine the number of DRM_BUDDY blocks to export */
- amdgpu_res_first(res, offset, length, &cursor);
- while (cursor.remaining) {
- num_entries++;
-@@ -558,10 +607,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- sg->length = 0;
-
- /*
-- * Walk down DRM_MM nodes to populate scatterlist nodes
-- * @note: Use iterator api to get first the DRM_MM node
-+ * Walk down DRM_BUDDY blocks to populate scatterlist nodes
-+ * @note: Use iterator api to get first the DRM_BUDDY block
- * and the number of bytes from it. Access the following
-- * DRM_MM node(s) if more buffer needs to exported
-+ * DRM_BUDDY block(s) if more buffer needs to exported
- */
- amdgpu_res_first(res, offset, length, &cursor);
- for_each_sgtable_sg((*sgt), sg, i) {
-@@ -648,13 +697,22 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
- {
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
-+ struct drm_buddy *mm = &mgr->mm;
-+ struct drm_buddy_block *block;
-
- drm_printf(printer, " vis usage:%llu\n",
- amdgpu_vram_mgr_vis_usage(mgr));
-
-- spin_lock(&mgr->lock);
-- drm_mm_print(&mgr->mm, printer);
-- spin_unlock(&mgr->lock);
-+ mutex_lock(&mgr->lock);
-+ drm_printf(printer, "default_page_size: %lluKiB\n",
-+ mgr->default_page_size >> 10);
-+
-+ drm_buddy_print(mm, printer);
-+
-+ drm_printf(printer, "reserved:\n");
-+ list_for_each_entry(block, &mgr->reserved_pages, link)
-+ drm_buddy_block_print(mm, block, printer);
-+ mutex_unlock(&mgr->lock);
- }
-
- static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
-@@ -674,16 +732,21 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
- {
- struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
- struct ttm_resource_manager *man = &mgr->manager;
-+ int err;
-
- ttm_resource_manager_init(man, &adev->mman.bdev,
- adev->gmc.real_vram_size);
-
- man->func = &amdgpu_vram_mgr_func;
-
-- drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
-- spin_lock_init(&mgr->lock);
-+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
-+ if (err)
-+ return err;
-+
-+ mutex_init(&mgr->lock);
- INIT_LIST_HEAD(&mgr->reservations_pending);
- INIT_LIST_HEAD(&mgr->reserved_pages);
-+ mgr->default_page_size = PAGE_SIZE;
-
- ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
- ttm_resource_manager_set_used(man, true);
-@@ -711,16 +774,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
- if (ret)
- return;
-
-- spin_lock(&mgr->lock);
-- list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
-+ mutex_lock(&mgr->lock);
-+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
- kfree(rsv);
-
-- list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
-- drm_mm_remove_node(&rsv->mm_node);
-+ list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
-+ drm_buddy_free_list(&mgr->mm, &rsv->blocks);
- kfree(rsv);
- }
-- drm_mm_takedown(&mgr->mm);
-- spin_unlock(&mgr->lock);
-+ drm_buddy_fini(&mgr->mm);
-+ mutex_unlock(&mgr->lock);
-
- ttm_resource_manager_cleanup(man);
- ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
-new file mode 100644
-index 000000000000..9a2db87186c7
---- /dev/null
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
-@@ -0,0 +1,89 @@
-+/* SPDX-License-Identifier: MIT
-+ * Copyright 2021 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ */
-+
-+#ifndef __AMDGPU_VRAM_MGR_H__
-+#define __AMDGPU_VRAM_MGR_H__
-+
-+#include <drm/drm_buddy.h>
-+
-+struct amdgpu_vram_mgr {
-+ struct ttm_resource_manager manager;
-+ struct drm_buddy mm;
-+ /* protects access to buffer objects */
-+ struct mutex lock;
-+ struct list_head reservations_pending;
-+ struct list_head reserved_pages;
-+ atomic64_t vis_usage;
-+ u64 default_page_size;
-+};
-+
-+struct amdgpu_vram_mgr_resource {
-+ struct ttm_resource base;
-+ struct list_head blocks;
-+ unsigned long flags;
-+};
-+
-+static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
-+{
-+ return drm_buddy_block_offset(block);
-+}
-+
-+static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
-+{
-+ return PAGE_SIZE << drm_buddy_block_order(block);
-+}
-+
-+static inline struct drm_buddy_block *
-+amdgpu_vram_mgr_first_block(struct list_head *list)
-+{
-+ return list_first_entry_or_null(list, struct drm_buddy_block, link);
-+}
-+
-+static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
-+{
-+ struct drm_buddy_block *block;
-+ u64 start, size;
-+
-+ block = amdgpu_vram_mgr_first_block(head);
-+ if (!block)
-+ return false;
-+
-+ while (head != block->link.next) {
-+ start = amdgpu_vram_mgr_block_start(block);
-+ size = amdgpu_vram_mgr_block_size(block);
-+
-+ block = list_entry(block->link.next, struct drm_buddy_block, link);
-+ if (start + size != amdgpu_vram_mgr_block_start(block))
-+ return false;
-+ }
-+
-+ return true;
-+}
-+
-+static inline struct amdgpu_vram_mgr_resource *
-+to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
-+{
-+ return container_of(res, struct amdgpu_vram_mgr_resource, base);
-+}
-+
-+#endif
---
-cgit v1.2.1
-
-From 708d19d9f362766147cab79eccae60912c6d3068 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
-Date: Tue, 10 May 2022 13:26:46 +0200
-Subject: drm/amdgpu: move internal vram_mgr function into the C file
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-No need to have those in the header.
-
-Signed-off-by: Christian König <christian.koenig@amd.com>
-Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
-Reviewed-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
-Link: https://patchwork.freedesktop.org/patch/msgid/20220510113649.879821-2-christian.koenig@amd.com
----
- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 29 ++++++++++++++++++++++++++++
- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h | 27 --------------------------
- 2 files changed, 29 insertions(+), 27 deletions(-)
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-index 51d9d3a4456c..7a5e8a7b4a1b 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-@@ -50,6 +50,35 @@ to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
- return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
- }
-
-+static inline struct drm_buddy_block *
-+amdgpu_vram_mgr_first_block(struct list_head *list)
-+{
-+ return list_first_entry_or_null(list, struct drm_buddy_block, link);
-+}
-+
-+static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
-+{
-+ struct drm_buddy_block *block;
-+ u64 start, size;
-+
-+ block = amdgpu_vram_mgr_first_block(head);
-+ if (!block)
-+ return false;
-+
-+ while (head != block->link.next) {
-+ start = amdgpu_vram_mgr_block_start(block);
-+ size = amdgpu_vram_mgr_block_size(block);
-+
-+ block = list_entry(block->link.next, struct drm_buddy_block, link);
-+ if (start + size != amdgpu_vram_mgr_block_start(block))
-+ return false;
-+ }
-+
-+ return true;
-+}
-+
-+
-+
- /**
- * DOC: mem_info_vram_total
- *
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
-index 9a2db87186c7..4b267bf1c5db 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
-@@ -53,33 +53,6 @@ static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
- return PAGE_SIZE << drm_buddy_block_order(block);
- }
-
--static inline struct drm_buddy_block *
--amdgpu_vram_mgr_first_block(struct list_head *list)
--{
-- return list_first_entry_or_null(list, struct drm_buddy_block, link);
--}
--
--static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
--{
-- struct drm_buddy_block *block;
-- u64 start, size;
--
-- block = amdgpu_vram_mgr_first_block(head);
-- if (!block)
-- return false;
--
-- while (head != block->link.next) {
-- start = amdgpu_vram_mgr_block_start(block);
-- size = amdgpu_vram_mgr_block_size(block);
--
-- block = list_entry(block->link.next, struct drm_buddy_block, link);
-- if (start + size != amdgpu_vram_mgr_block_start(block))
-- return false;
-- }
--
-- return true;
--}
--
- static inline struct amdgpu_vram_mgr_resource *
- to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
- {
---
-cgit v1.2.1
-
-From 5e3f1e7729ec7a99e145e9d8ed58963d86cdfb98 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
-Date: Tue, 10 May 2022 12:52:05 +0200
-Subject: drm/amdgpu: fix start calculation in amdgpu_vram_mgr_new
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-We still need to calculate a virtual start address for the resource to
-aid checking of it is visible or not. Only matters on small BAR systems,
-but better save than sorry.
-
-Signed-off-by: Christian König <christian.koenig@amd.com>
-Acked-by: Alex Deucher <alexander.deucher@amd.com>
-Reviewed-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
-Link: https://patchwork.freedesktop.org/patch/msgid/20220510113649.879821-1-christian.koenig@amd.com
----
- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 22 ++++++++++++++--------
- 1 file changed, 14 insertions(+), 8 deletions(-)
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-index 49e4092f447f..51d9d3a4456c 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-@@ -496,16 +496,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
- list_splice_tail(trim_list, &vres->blocks);
- }
-
-- list_for_each_entry(block, &vres->blocks, link)
-- vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
-+ vres->base.start = 0;
-+ list_for_each_entry(block, &vres->blocks, link) {
-+ unsigned long start;
-
-- block = amdgpu_vram_mgr_first_block(&vres->blocks);
-- if (!block) {
-- r = -EINVAL;
-- goto error_fini;
-- }
-+ start = amdgpu_vram_mgr_block_start(block) +
-+ amdgpu_vram_mgr_block_size(block);
-+ start >>= PAGE_SHIFT;
-
-- vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
-+ if (start > vres->base.num_pages)
-+ start -= vres->base.num_pages;
-+ else
-+ start = 0;
-+ vres->base.start = max(vres->base.start, start);
-+
-+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
-+ }
-
- if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
- vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
---
-cgit v1.2.1
-
-From 6f2c8d5f16594a13295d153245e0bb8166db7ac9 Mon Sep 17 00:00:00 2001
-From: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
-Date: Thu, 14 Jul 2022 03:12:14 -0700
-Subject: drm/amdgpu: Fix for drm buddy memory corruption
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-User reported gpu page fault when running graphics applications
-and in some cases garbaged graphics are observed as soon as X
-starts. This patch fixes all the issues.
-
-Fixed the typecast issue for fpfn and lpfn variables, thus
-preventing the overflow problem which resolves the memory
-corruption.
-
-Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
-Reported-by: Mike Lothian <mike@fireburn.co.uk>
-Tested-by: Mike Lothian <mike@fireburn.co.uk>
-Link: https://patchwork.freedesktop.org/patch/msgid/20220714101214.7620-1-Arunpravin.PaneerSelvam@amd.com
-Reviewed-by: Christian König <christian.koenig@amd.com>
-Signed-off-by: Christian König <christian.koenig@amd.com>
----
- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 16 ++++++++--------
- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h | 2 +-
- 2 files changed, 9 insertions(+), 9 deletions(-)
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-index 7a5e8a7b4a1b..28ec5f8ac1c1 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
-@@ -395,11 +395,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
- unsigned long pages_per_block;
- int r;
-
-- lpfn = place->lpfn << PAGE_SHIFT;
-+ lpfn = (u64)place->lpfn << PAGE_SHIFT;
- if (!lpfn)
- lpfn = man->size;
-
-- fpfn = place->fpfn << PAGE_SHIFT;
-+ fpfn = (u64)place->fpfn << PAGE_SHIFT;
-
- max_bytes = adev->gmc.mc_vram_size;
- if (tbo->type != ttm_bo_type_kernel)
-@@ -439,12 +439,12 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
- /* Allocate blocks in desired range */
- vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
-
-- remaining_size = vres->base.num_pages << PAGE_SHIFT;
-+ remaining_size = (u64)vres->base.num_pages << PAGE_SHIFT;
-
- mutex_lock(&mgr->lock);
- while (remaining_size) {
- if (tbo->page_alignment)
-- min_block_size = tbo->page_alignment << PAGE_SHIFT;
-+ min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT;
- else
- min_block_size = mgr->default_page_size;
-
-@@ -453,12 +453,12 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
- /* Limit maximum size to 2GiB due to SG table limitations */
- size = min(remaining_size, 2ULL << 30);
-
-- if (size >= pages_per_block << PAGE_SHIFT)
-- min_block_size = pages_per_block << PAGE_SHIFT;
-+ if (size >= (u64)pages_per_block << PAGE_SHIFT)
-+ min_block_size = (u64)pages_per_block << PAGE_SHIFT;
-
- cur_size = size;
-
-- if (fpfn + size != place->lpfn << PAGE_SHIFT) {
-+ if (fpfn + size != (u64)place->lpfn << PAGE_SHIFT) {
- /*
- * Except for actual range allocation, modify the size and
- * min_block_size conforming to continuous flag enablement
-@@ -498,7 +498,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
- LIST_HEAD(temp);
-
- trim_list = &vres->blocks;
-- original_size = vres->base.num_pages << PAGE_SHIFT;
-+ original_size = (u64)vres->base.num_pages << PAGE_SHIFT;
-
- /*
- * If size value is rounded up to min_block_size, trim the last
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
-index 4b267bf1c5db..0e04e42cf809 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
-@@ -50,7 +50,7 @@ static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
-
- static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
- {
-- return PAGE_SIZE << drm_buddy_block_order(block);
-+ return (u64)PAGE_SIZE << drm_buddy_block_order(block);
- }
-
- static inline struct amdgpu_vram_mgr_resource *
---
-cgit v1.2.1
-
|