summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c229
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h24
4 files changed, 131 insertions, 124 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 35b0214b3080..f961beac85ac 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1993,7 +1993,6 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_ppgtt *pvt_ppgtt;
seq_printf(m, "\nproc: %s\n",
get_pid_task(file->pid, PIDTYPE_PID)->comm);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 29d7f225aa7b..ea1f8cfc61ae 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -567,6 +567,7 @@ enum i915_cache_level {
large Last-Level-Cache. LLC is coherent with
the CPU, but L3 is only visible to the GPU. */
I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
+ I915_CACHE_MAX,
};
struct i915_ctx_hang_stats {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9f49d641a4bf..2335d79bef26 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -555,7 +555,7 @@ static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine_cs *ring,
bool synchronous)
{
return gen8_write_pdp(ring, 0, ppgtt->pml4.daddr, synchronous);
@@ -737,9 +737,9 @@ static void gen8_map_page_directory_pointer(struct i915_pml4 *pml4,
kunmap_atomic(pagemap);
}
-static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
- struct i915_pagedirpo *pdp,
- uint64_t start, uint64_t length)
+static void gen8_unmap_vma_3lvl(struct i915_address_space *vm,
+ struct i915_pagedirpo *pdp,
+ uint64_t start, uint64_t length)
{
struct drm_device *dev = vm->dev;
struct i915_pagedir *pd;
@@ -824,38 +824,43 @@ static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
}
}
-static void gen8_teardown_va_range_4lvl(struct i915_address_space *vm,
- struct i915_pml4 *pml4,
- uint64_t start, uint64_t length)
+static void gen8_unmap_vma_4lvl(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start, uint64_t length)
{
struct i915_pagedirpo *pdp;
uint64_t temp, pml4e;
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
- gen8_teardown_va_range_3lvl(vm, pdp, start, length);
+ gen8_unmap_vma_3lvl(vm, pdp, start, length);
if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(vm->dev)))
clear_bit(pml4e, pml4->used_pml4es);
}
}
-static void gen8_teardown_va_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
+static void __gen8_teardown_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
if (HAS_48B_PPGTT(vm->dev))
- gen8_teardown_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+ gen8_unmap_vma_4lvl(vm, &ppgtt->pml4, start, length);
else
- gen8_teardown_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+ gen8_unmap_vma_3lvl(vm, &ppgtt->pdp, start, length);
+}
+
+static void gen8_unmap_vma(struct i915_vma *vma)
+{
+ __gen8_teardown_va_range(vma->vm, vma->node.start, vma->node.size);
}
static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
trace_i915_va_teardown(&ppgtt->base,
ppgtt->base.start, ppgtt->base.total);
- gen8_teardown_va_range(&ppgtt->base,
- ppgtt->base.start, ppgtt->base.total);
+ __gen8_teardown_va_range(&ppgtt->base,
+ ppgtt->base.start, ppgtt->base.total);
WARN_ON(!bitmap_empty(ppgtt->pdp.used_pdpes,
I915_PDPES_PER_PDP(ppgtt->base.dev)));
@@ -1195,15 +1200,15 @@ err_out:
start = orig_start;
length = orig_length;
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e)
- gen8_teardown_va_range_3lvl(vm, pdp, start, length);
+ gen8_unmap_vma_3lvl(vm, pdp, start, length);
err_alloc:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
free_pdp_single(pdp, vm->dev);
}
-static int gen8_alloc_va_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
+static int __gen8_alloc_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -1214,6 +1219,19 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
}
+static int gen8_map_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level, u32 flags)
+{
+ int ret = __gen8_alloc_va_range(vma->vm, vma->node.start,vma->node.size);
+ if (!ret) {
+ BUG_ON(flags >= I915_CACHE_MAX);
+ gen8_ppgtt_insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+ cache_level, flags);
+ }
+
+ return ret;
+}
+
static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt)
{
free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev);
@@ -1236,7 +1254,6 @@ static int gen8_ppgtt_init_common(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.total = size;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->enable = gen8_ppgtt_enable;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->scratch_pd = alloc_pt_scratch(ppgtt->base.dev);
if (IS_ERR(ppgtt->scratch_pd))
@@ -1277,7 +1294,7 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
- ret = gen8_alloc_va_range(&ppgtt->base, start, size);
+ ret = __gen8_alloc_va_range(&ppgtt->base, start, size);
if (ret) {
gen8_ppgtt_fini_common(ppgtt);
return ret;
@@ -1287,9 +1304,11 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
gen8_for_each_pdpe(pd, pdp, start, size, temp, pdpe)
gen8_map_pagetable_range(&ppgtt->base, pd, start, size);
- ppgtt->base.allocate_va_range = NULL;
- ppgtt->base.teardown_va_range = NULL;
- ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+ BUG(); // we need a map_vma for aliasing
+ ppgtt->base.map_vma = NULL;
+ ppgtt->base.unmap_vma = NULL;
+
+ gen8_ppgtt_clear_range(&ppgtt->base, 0, dev_priv->gtt.base.total, true);
return 0;
}
@@ -1304,9 +1323,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
- ppgtt->base.allocate_va_range = gen8_alloc_va_range;
- ppgtt->base.teardown_va_range = gen8_teardown_va_range;
- ppgtt->base.clear_range = NULL;
+ ppgtt->base.map_vma = gen8_map_vma;
+ ppgtt->base.unmap_vma = gen8_unmap_vma;
return 0;
}
@@ -1677,15 +1695,17 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
}
-static int gen6_alloc_va_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
+static int gen6_alloc_va_range(struct i915_vma *vma,
+ enum i915_cache_level cache_level, u32 flags)
{
DECLARE_BITMAP(new_page_tables, I915_PDES_PER_PD);
+ struct i915_address_space *vm = vma->vm;
struct drm_device *dev = vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
struct i915_pagetab *pt;
+ uint32_t start = vma->node.start, length = vma->node.size;
const uint32_t start_save = start, length_save = length;
uint32_t pde, temp;
int ret;
@@ -1755,6 +1775,10 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
* table. Also require for WC mapped PTEs */
readl(dev_priv->gtt.gsm);
+ BUG_ON(flags >= I915_CACHE_MAX);
+ gen6_ppgtt_insert_entries(vm, vma->obj->pages, vma->node.start,
+ cache_level, flags);
+
return 0;
unwind_out:
@@ -1766,18 +1790,20 @@ unwind_out:
return ret;
}
-static void gen6_teardown_va_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
+static void gen6_unmap_vma(struct i915_vma *vma)
{
+ struct i915_address_space *vm = vma->vm;
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
+ uint32_t start = vma->node.start, length = vma->node.size;
+ const uint32_t orig_start = start, orig_length = length;
struct i915_pagetab *pt;
uint32_t pde, temp;
gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
if (WARN(pt == ppgtt->scratch_pt,
- "Tried to teardown scratch page vm %p. pde %u: %llx-%llx\n",
+ "Tried to teardown scratch page vm %p. pde %u: %x-%x\n",
vm, pde, start, start + length))
continue;
@@ -1797,6 +1823,8 @@ static void gen6_teardown_va_range(struct i915_address_space *vm,
ppgtt->pd.page_tables[pde] = ppgtt->scratch_pt;
}
}
+
+ gen6_ppgtt_clear_range(vm, orig_start, orig_length, true);
}
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
@@ -1928,10 +1956,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
if (ret)
return ret;
- ppgtt->base.allocate_va_range = gen6_alloc_va_range;
- ppgtt->base.teardown_va_range = gen6_teardown_va_range;
- ppgtt->base.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.map_vma = gen6_alloc_va_range;
+ ppgtt->base.unmap_vma = gen6_unmap_vma;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
ppgtt->base.total = I915_PDES_PER_PD * GEN6_PTES_PER_PT * PAGE_SIZE;
@@ -1977,8 +2003,6 @@ int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt, boo
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, ppgtt->base.total);
- if (ppgtt->base.clear_range)
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
i915_init_vm(dev_priv, &ppgtt->base);
return 0;
@@ -2001,44 +2025,33 @@ ppgtt_bind_vma(struct i915_vma *vma,
{
int ret;
+ BUG_ON(!vma->vm->map_vma);
+
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
flags |= PTE_READ_ONLY;
- if (vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size);
-
- ret = vma->vm->allocate_va_range(vma->vm,
- vma->node.start,
- vma->node.size);
- if (ret)
- return ret;
+ trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size);
- ppgtt_invalidate_tlbs(vma->vm);
- }
+ ret = vma->vm->map_vma(vma, cache_level, flags);
+ if (ret)
+ return ret;
- vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
- cache_level, flags);
+ ppgtt_invalidate_tlbs(vma->vm);
return 0;
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
- if (vma->vm->clear_range)
- vma->vm->clear_range(vma->vm,
- vma->node.start,
- vma->obj->base.size,
- true);
-
- if (vma->vm->teardown_va_range) {
+ if (vma->vm->unmap_vma) {
trace_i915_va_teardown(vma->vm,
vma->node.start, vma->node.size);
- vma->vm->teardown_va_range(vma->vm,
- vma->node.start, vma->node.size);
+ vma->vm->unmap_vma(vma);
ppgtt_invalidate_tlbs(vma->vm);
- }
+ } else
+ BUG();
}
extern int intel_iommu_gfx_mapped;
@@ -2120,10 +2133,10 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev);
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
- true);
+ dev_priv->gtt.clear_range(&dev_priv->gtt,
+ dev_priv->gtt.base.start,
+ dev_priv->gtt.base.total,
+ true);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -2135,10 +2148,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev);
/* First fill our portion of the GTT with scratch pages */
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
- true);
+ dev_priv->gtt.clear_range(&dev_priv->gtt,
+ dev_priv->gtt.base.start,
+ dev_priv->gtt.base.total,
+ true);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj,
@@ -2211,15 +2224,16 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
#endif
}
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+static void gen8_ggtt_insert_entries(struct i915_gtt *gtt,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level, u32 unused)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv =
+ container_of(gtt, struct drm_i915_private, gtt);
unsigned first_entry = start >> PAGE_SHIFT;
gen8_gtt_pte_t __iomem *gtt_entries =
- (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen8_gtt_pte_t __iomem *)gtt->gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
@@ -2257,22 +2271,23 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+static void gen6_ggtt_insert_entries(struct i915_gtt *gtt,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level, u32 flags)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv =
+ container_of(gtt, struct drm_i915_private, gtt);
unsigned first_entry = start >> PAGE_SHIFT;
gen6_gtt_pte_t __iomem *gtt_entries =
- (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen6_gtt_pte_t __iomem *)gtt->gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr;
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
+ iowrite32(gtt->base.pte_encode(addr, level, true, flags), &gtt_entries[i]);
i++;
}
@@ -2284,7 +2299,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) !=
- vm->pte_encode(addr, level, true, flags));
+ gtt->base.pte_encode(addr, level, true, flags));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -2294,17 +2309,16 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+static void gen8_ggtt_clear_range(struct i915_gtt *gtt,
uint64_t start,
uint64_t length,
bool use_scratch)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(&dev_priv->gtt) - first_entry;
+ (gen8_gtt_pte_t __iomem *) gtt->gsm + first_entry;
+ const int max_entries = gtt_total_entries(gtt) - first_entry;
int i;
if (WARN(num_entries > max_entries,
@@ -2312,7 +2326,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = gen8_pte_encode(vm->scratch.addr,
+ scratch_pte = gen8_pte_encode(gtt->base.scratch.addr,
I915_CACHE_LLC,
use_scratch);
for (i = 0; i < num_entries; i++)
@@ -2354,17 +2368,16 @@ void gen8_for_every_pdpe_pde(struct i915_hw_ppgtt *ppgtt,
}
}
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+static void gen6_ggtt_clear_range(struct i915_gtt *gtt,
uint64_t start,
uint64_t length,
bool use_scratch)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(&dev_priv->gtt) - first_entry;
+ (gen6_gtt_pte_t __iomem *) gtt->gsm + first_entry;
+ const int max_entries = gtt_total_entries(gtt) - first_entry;
int i;
if (WARN(num_entries > max_entries,
@@ -2372,7 +2385,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
+ scratch_pte = gtt->base.pte_encode(gtt->base.scratch.addr,
+ I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -2395,7 +2409,7 @@ static int i915_ggtt_bind_vma(struct i915_vma *vma,
return 0;
}
-static void i915_ggtt_clear_range(struct i915_address_space *vm,
+static void i915_ggtt_clear_range(struct i915_gtt *gunused,
uint64_t start,
uint64_t length,
bool unused)
@@ -2441,9 +2455,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!obj->has_global_gtt_mapping ||
(cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, obj->pages,
- vma->node.start,
- cache_level, flags);
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ gtt->insert_entries(gtt, obj->pages,
+ vma->node.start,
+ cache_level, flags);
obj->has_global_gtt_mapping = 1;
}
}
@@ -2455,10 +2470,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
(!obj->has_aliasing_ppgtt_mapping ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
- appgtt->base.insert_entries(&appgtt->base,
- vma->obj->pages,
- vma->node.start,
- cache_level, flags);
+ BUG();
+ appgtt->base.map_vma(vma, cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
}
@@ -2469,22 +2482,19 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
{
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_gtt *gtt = &dev_priv->gtt;
struct drm_i915_gem_object *obj = vma->obj;
+ BUG_ON(vma->vm != &gtt->base);
+
if (obj->has_global_gtt_mapping) {
- vma->vm->clear_range(vma->vm,
- vma->node.start,
- obj->base.size,
- true);
+ gtt->clear_range(gtt, vma->node.start, obj->base.size, true);
obj->has_global_gtt_mapping = 0;
}
if (obj->has_aliasing_ppgtt_mapping) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
- appgtt->base.clear_range(&appgtt->base,
- vma->node.start,
- obj->base.size,
- true);
+ appgtt->base.unmap_vma(vma);
obj->has_aliasing_ppgtt_mapping = 0;
}
}
@@ -2537,7 +2547,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
* of the aperture.
*/
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ struct i915_address_space *ggtt_vm = &gtt->base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@@ -2570,12 +2581,12 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt_vm->clear_range(ggtt_vm, hole_start,
- hole_end - hole_start, true);
+ gtt->clear_range(gtt, hole_start,
+ hole_end - hole_start, true);
}
/* And finally clear the reserved guard page */
- ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+ gtt->clear_range(gtt, end - PAGE_SIZE, PAGE_SIZE, true);
}
void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -2806,8 +2817,8 @@ static int gen8_gmch_probe(struct drm_device *dev,
ret = ggtt_probe_common(dev, gtt_size);
- dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
+ dev_priv->gtt.clear_range = gen8_ggtt_clear_range;
+ dev_priv->gtt.insert_entries = gen8_ggtt_insert_entries;
return ret;
}
@@ -2846,8 +2857,8 @@ static int gen6_gmch_probe(struct drm_device *dev,
ret = ggtt_probe_common(dev, gtt_size);
- dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
+ dev_priv->gtt.clear_range = gen6_ggtt_clear_range;
+ dev_priv->gtt.insert_entries = gen6_ggtt_insert_entries;
return ret;
}
@@ -2883,7 +2894,7 @@ static int i915_gmch_probe(struct drm_device *dev,
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+ dev_priv->gtt.clear_range = i915_ggtt_clear_range;
if (unlikely(dev_priv->gtt.do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index f2fbe3978408..7e3ac3597104 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -261,20 +261,8 @@ struct i915_address_space {
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 flags); /* Create a valid PTE */
- int (*allocate_va_range)(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length);
- void (*teardown_va_range)(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length);
- void (*clear_range)(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length,
- bool use_scratch);
- void (*insert_entries)(struct i915_address_space *vm,
- struct sg_table *st,
- uint64_t start,
- enum i915_cache_level cache_level, u32 flags);
+ int (*map_vma)(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags);
+ void (*unmap_vma)(struct i915_vma *vma);
void (*cleanup)(struct i915_address_space *vm);
};
@@ -330,6 +318,14 @@ struct i915_gtt {
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
+ void (*insert_entries)(struct i915_gtt *gtt,
+ struct sg_table *st,
+ uint64_t start,
+ enum i915_cache_level cache_level, u32 flags);
+ void (*clear_range)(struct i915_gtt *gtt,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch);
};
/* For each pde iterates over every pde between from start until start + length.