diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
28 files changed, 536 insertions, 353 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 4fd38101bb56..4d2f40cf237b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -72,6 +72,8 @@ #include "gt/intel_context_param.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_user.h" +#include "gt/intel_execlists_submission.h" /* virtual_engine */ +#include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "i915_gem_context.h" @@ -333,13 +335,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) return e; } -static void i915_gem_context_free(struct i915_gem_context *ctx) +void i915_gem_context_release(struct kref *ref) { - GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); - spin_lock(&ctx->i915->gem.contexts.lock); - list_del(&ctx->link); - spin_unlock(&ctx->i915->gem.contexts.lock); + trace_i915_context_free(ctx); + GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->lut_mutex); @@ -353,37 +354,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) kfree_rcu(ctx, rcu); } -static void contexts_free_all(struct llist_node *list) -{ - struct i915_gem_context *ctx, *cn; - - llist_for_each_entry_safe(ctx, cn, list, free_link) - i915_gem_context_free(ctx); -} - -static void contexts_flush_free(struct i915_gem_contexts *gc) -{ - contexts_free_all(llist_del_all(&gc->free_list)); -} - -static void contexts_free_worker(struct work_struct *work) -{ - struct i915_gem_contexts *gc = - container_of(work, typeof(*gc), free_work); - - contexts_flush_free(gc); -} - -void i915_gem_context_release(struct kref *ref) -{ - struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); - struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; - - trace_i915_context_free(ctx); - if (llist_add(&ctx->free_link, &gc->free_list)) - schedule_work(&gc->free_work); -} - static inline struct i915_gem_engines * __context_engines_static(const struct i915_gem_context *ctx) { @@ -438,7 +408,7 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active) } if (i915_request_is_active(rq)) { - if (!i915_request_completed(rq)) + if (!__i915_request_is_complete(rq)) *active = locked; ret = true; } @@ -453,6 +423,9 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) struct intel_engine_cs *engine = NULL; struct i915_request *rq; + if (intel_context_has_inflight(ce)) + return intel_context_inflight(ce); + if (!ce->timeline) return NULL; @@ -632,6 +605,10 @@ static void context_close(struct i915_gem_context *ctx) */ lut_close(ctx); + spin_lock(&ctx->i915->gem.contexts.lock); + list_del(&ctx->link); + spin_unlock(&ctx->i915->gem.contexts.lock); + mutex_unlock(&ctx->mutex); /* @@ -740,7 +717,8 @@ err_free: } static inline struct i915_gem_engines * -__context_engines_await(const struct i915_gem_context *ctx) +__context_engines_await(const struct i915_gem_context *ctx, + bool *user_engines) { struct i915_gem_engines *engines; @@ -749,6 +727,10 @@ __context_engines_await(const struct i915_gem_context *ctx) engines = rcu_dereference(ctx->engines); GEM_BUG_ON(!engines); + if (user_engines) + *user_engines = i915_gem_context_user_engines(ctx); + + /* successful await => strong mb */ if (unlikely(!i915_sw_fence_await(&engines->fence))) continue; @@ -772,7 +754,7 @@ context_apply_all(struct i915_gem_context *ctx, struct intel_context *ce; int err = 0; - e = __context_engines_await(ctx); + e = __context_engines_await(ctx, NULL); for_each_gem_engine(ce, e, it) { err = fn(ce, data); if (err) @@ -849,9 +831,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) !HAS_EXECLISTS(i915)) return ERR_PTR(-EINVAL); - /* Reap the stale contexts */ - contexts_flush_free(&i915->gem.contexts); - ctx = __create_context(i915); if (IS_ERR(ctx)) return ctx; @@ -896,23 +875,11 @@ static void init_contexts(struct i915_gem_contexts *gc) { spin_lock_init(&gc->lock); INIT_LIST_HEAD(&gc->list); - - INIT_WORK(&gc->free_work, contexts_free_worker); - init_llist_head(&gc->free_list); } void i915_gem_init__contexts(struct drm_i915_private *i915) { init_contexts(&i915->gem.contexts); - drm_dbg(&i915->drm, "%s context support initialized\n", - DRIVER_CAPS(i915)->has_logical_contexts ? - "logical" : "fake"); -} - -void i915_gem_driver_release__contexts(struct drm_i915_private *i915) -{ - flush_work(&i915->gem.contexts.free_work); - rcu_barrier(); /* and flush the left over RCU frees */ } static int gem_context_register(struct i915_gem_context *ctx, @@ -988,7 +955,6 @@ err: void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_private *i915 = file_priv->dev_priv; struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long idx; @@ -1000,8 +966,6 @@ void i915_gem_context_close(struct drm_file *file) xa_for_each(&file_priv->vm_xa, idx, vm) i915_vm_put(vm); xa_destroy(&file_priv->vm_xa); - - contexts_flush_free(&i915->gem.contexts); } int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, @@ -1116,7 +1080,7 @@ static int context_barrier_task(struct i915_gem_context *ctx, return err; } - e = __context_engines_await(ctx); + e = __context_engines_await(ctx, NULL); if (!e) { i915_active_release(&cb->base); return -ENOENT; @@ -1879,27 +1843,6 @@ replace: return 0; } -static struct i915_gem_engines * -__copy_engines(struct i915_gem_engines *e) -{ - struct i915_gem_engines *copy; - unsigned int n; - - copy = alloc_engines(e->num_engines); - if (!copy) - return ERR_PTR(-ENOMEM); - - for (n = 0; n < e->num_engines; n++) { - if (e->engines[n]) - copy->engines[n] = intel_context_get(e->engines[n]); - else - copy->engines[n] = NULL; - } - copy->num_engines = n; - - return copy; -} - static int get_engines(struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) @@ -1907,19 +1850,17 @@ get_engines(struct i915_gem_context *ctx, struct i915_context_param_engines __user *user; struct i915_gem_engines *e; size_t n, count, size; + bool user_engines; int err = 0; - err = mutex_lock_interruptible(&ctx->engines_mutex); - if (err) - return err; + e = __context_engines_await(ctx, &user_engines); + if (!e) + return -ENOENT; - e = NULL; - if (i915_gem_context_user_engines(ctx)) - e = __copy_engines(i915_gem_context_engines(ctx)); - mutex_unlock(&ctx->engines_mutex); - if (IS_ERR_OR_NULL(e)) { + if (!user_engines) { + i915_sw_fence_complete(&e->fence); args->size = 0; - return PTR_ERR_OR_ZERO(e); + return 0; } count = e->num_engines; @@ -1970,7 +1911,7 @@ get_engines(struct i915_gem_context *ctx, args->size = size; err_free: - free_engines(e); + i915_sw_fence_complete(&e->fence); return err; } @@ -2136,11 +2077,14 @@ static int copy_ring_size(struct intel_context *dst, static int clone_engines(struct i915_gem_context *dst, struct i915_gem_context *src) { - struct i915_gem_engines *e = i915_gem_context_lock_engines(src); - struct i915_gem_engines *clone; + struct i915_gem_engines *clone, *e; bool user_engines; unsigned long n; + e = __context_engines_await(src, &user_engines); + if (!e) + return -ENOENT; + clone = alloc_engines(e->num_engines); if (!clone) goto err_unlock; @@ -2182,9 +2126,7 @@ static int clone_engines(struct i915_gem_context *dst, } } clone->num_engines = n; - - user_engines = i915_gem_context_user_engines(src); - i915_gem_context_unlock_engines(src); + i915_sw_fence_complete(&e->fence); /* Serialised by constructor */ engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); @@ -2195,7 +2137,7 @@ static int clone_engines(struct i915_gem_context *dst, return 0; err_unlock: - i915_gem_context_unlock_engines(src); + i915_sw_fence_complete(&e->fence); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index a133f92bbedb..b5c908f3f4f2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -110,7 +110,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) /* i915_gem_context.c */ void i915_gem_init__contexts(struct drm_i915_private *i915); -void i915_gem_driver_release__contexts(struct drm_i915_private *i915); int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index ae14ca24a11f..1449f54924e0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -108,7 +108,6 @@ struct i915_gem_context { /** link: place with &drm_i915_private.context_list */ struct list_head link; - struct llist_node free_link; /** * @ref: reference count diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c new file mode 100644 index 000000000000..45d60e3d98e3 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include "gem/i915_gem_ioctls.h" +#include "gem/i915_gem_region.h" + +#include "i915_drv.h" + +static int +i915_gem_create(struct drm_file *file, + struct intel_memory_region *mr, + u64 *size_p, + u32 *handle_p) +{ + struct drm_i915_gem_object *obj; + u32 handle; + u64 size; + int ret; + + GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); + size = round_up(*size_p, mr->min_page_size); + if (size == 0) + return -EINVAL; + + /* For most of the ABI (e.g. mmap) we think in system pages */ + GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); + + /* Allocate the new object */ + obj = i915_gem_object_create_region(mr, size, 0); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + GEM_BUG_ON(size != obj->base.size); + + ret = drm_gem_handle_create(file, &obj->base, &handle); + /* drop reference from allocate - handle holds it now */ + i915_gem_object_put(obj); + if (ret) + return ret; + + *handle_p = handle; + *size_p = size; + return 0; +} + +int +i915_gem_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + enum intel_memory_type mem_type; + int cpp = DIV_ROUND_UP(args->bpp, 8); + u32 format; + + switch (cpp) { + case 1: + format = DRM_FORMAT_C8; + break; + case 2: + format = DRM_FORMAT_RGB565; + break; + case 4: + format = DRM_FORMAT_XRGB8888; + break; + default: + return -EINVAL; + } + + /* have to work out size/pitch and return them */ + args->pitch = ALIGN(args->width * cpp, 64); + + /* align stride to page size so that we can remap */ + if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, + DRM_FORMAT_MOD_LINEAR)) + args->pitch = ALIGN(args->pitch, 4096); + + if (args->pitch < args->width) + return -EINVAL; + + args->size = mul_u32_u32(args->pitch, args->height); + + mem_type = INTEL_MEMORY_SYSTEM; + if (HAS_LMEM(to_i915(dev))) + mem_type = INTEL_MEMORY_LOCAL; + + return i915_gem_create(file, + intel_memory_region_by_type(to_i915(dev), + mem_type), + &args->size, &args->handle); +} + +/** + * Creates a new mm object and returns a handle to it. + * @dev: drm device pointer + * @data: ioctl data blob + * @file: drm file pointer + */ +int +i915_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_create *args = data; + + i915_gem_flush_free_objects(i915); + + return i915_gem_create(file, + intel_memory_region_by_type(i915, + INTEL_MEMORY_SYSTEM), + &args->size, &args->handle); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 3d435bfff764..36f54cedaaeb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -5,6 +5,7 @@ */ #include "display/intel_frontbuffer.h" +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_clflush.h" @@ -15,13 +16,58 @@ #include "i915_gem_lmem.h" #include "i915_gem_mman.h" +static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) +{ + return !(obj->cache_level == I915_CACHE_NONE || + obj->cache_level == I915_CACHE_WT); +} + +static void +flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) +{ + struct i915_vma *vma; + + assert_object_held(obj); + + if (!(obj->write_domain & flush_domains)) + return; + + switch (obj->write_domain) { + case I915_GEM_DOMAIN_GTT: + spin_lock(&obj->vma.lock); + for_each_ggtt_vma(vma, obj) { + if (i915_vma_unset_ggtt_write(vma)) + intel_gt_flush_ggtt_writes(vma->vm->gt); + } + spin_unlock(&obj->vma.lock); + + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); + break; + + case I915_GEM_DOMAIN_WC: + wmb(); + break; + + case I915_GEM_DOMAIN_CPU: + i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); + break; + + case I915_GEM_DOMAIN_RENDER: + if (gpu_write_needs_clflush(obj)) + obj->cache_dirty = true; + break; + } + + obj->write_domain = 0; +} + static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) { /* * We manually flush the CPU domain so that we can override and * force the flush for the display, and perform it asyncrhonously. */ - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); if (obj->cache_dirty) i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); obj->write_domain = 0; @@ -80,7 +126,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) if (ret) return ret; - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); + flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); /* Serialise direct access to this object with the barriers for * coherent writes from the GPU, by effectively invalidating the @@ -141,7 +187,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (ret) return ret; - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); + flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); /* Serialise direct access to this object with the barriers for * coherent writes from the GPU, by effectively invalidating the @@ -370,6 +416,7 @@ retry: } vma->display_alignment = max_t(u64, vma->display_alignment, alignment); + i915_vma_mark_scanout(vma); i915_gem_object_flush_if_display_locked(obj); @@ -409,7 +456,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) if (ret) return ret; - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); /* Flush the CPU cache if it's still invalid. */ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { @@ -574,7 +621,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, goto out; } - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); /* If we're not in the cpu read domain, set ourself into the gtt * read domain and manually flush cachelines (if required). This @@ -625,7 +672,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, goto out; } - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); /* If we're not in the cpu write domain, set ourself into the * gtt write domain and manually flush cachelines (as required). diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index bd3046e5a934..d70ca36f74f6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -15,6 +15,7 @@ #include "gem/i915_gem_ioctls.h" #include "gt/intel_context.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" #include "gt/intel_gt_pm.h" @@ -534,8 +535,6 @@ eb_add_vma(struct i915_execbuffer *eb, struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct eb_vma *ev = &eb->vma[i]; - GEM_BUG_ON(i915_vma_is_closed(vma)); - ev->vma = vma; ev->exec = entry; ev->flags = entry->flags; @@ -1277,7 +1276,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, int err; if (!pool) { - pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE); + pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE, + cache->has_llc ? + I915_MAP_WB : + I915_MAP_WC); if (IS_ERR(pool)) return PTR_ERR(pool); } @@ -1287,10 +1289,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, if (err) goto err_pool; - cmd = i915_gem_object_pin_map(pool->obj, - cache->has_llc ? - I915_MAP_FORCE_WB : - I915_MAP_FORCE_WC); + cmd = i915_gem_object_pin_map(pool->obj, pool->type); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err_pool; @@ -2459,7 +2458,8 @@ static int eb_parse(struct i915_execbuffer *eb) return -EINVAL; if (!pool) { - pool = intel_gt_get_buffer_pool(eb->engine->gt, len); + pool = intel_gt_get_buffer_pool(eb->engine->gt, len, + I915_MAP_WB); if (IS_ERR(pool)) return PTR_ERR(pool); eb->batch_pool = pool; @@ -2535,6 +2535,9 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) { int err; + if (intel_context_nopreempt(eb->context)) + __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); + err = eb_move_to_gpu(eb); if (err) return err; @@ -2575,15 +2578,12 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) return err; } - if (intel_context_nopreempt(eb->context)) - __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); - return 0; } static int num_vcs_engines(const struct drm_i915_private *i915) { - return hweight64(VDBOX_MASK(&i915->gt)); + return hweight_long(VDBOX_MASK(&i915->gt)); } /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index 932ee21e6609..194f35342710 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -31,18 +31,13 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915, size, flags); } -struct drm_i915_gem_object * -__i915_gem_lmem_object_create(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags) +int __i915_gem_lmem_object_init(struct intel_memory_region *mem, + struct drm_i915_gem_object *obj, + resource_size_t size, + unsigned int flags) { static struct lock_class_key lock_class; struct drm_i915_private *i915 = mem->i915; - struct drm_i915_gem_object *obj; - - obj = i915_gem_object_alloc(); - if (!obj) - return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class); @@ -53,5 +48,5 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem, i915_gem_object_init_memory_region(obj, mem, flags); - return obj; + return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index fc3f15580fe3..036d53c01de9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -21,9 +21,9 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915, resource_size_t size, unsigned int flags); -struct drm_i915_gem_object * -__i915_gem_lmem_object_create(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags); +int __i915_gem_lmem_object_init(struct intel_memory_region *mem, + struct drm_i915_gem_object *obj, + resource_size_t size, + unsigned int flags); #endif /* !__I915_GEM_LMEM_H */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 00d24000b5e8..70f798405f7f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -25,13 +25,13 @@ #include <linux/sched/mm.h> #include "display/intel_frontbuffer.h" -#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" #include "i915_globals.h" +#include "i915_memcpy.h" #include "i915_trace.h" static struct i915_global_object { @@ -313,52 +313,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) queue_work(i915->wq, &i915->mm.free_work); } -static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) -{ - return !(obj->cache_level == I915_CACHE_NONE || - obj->cache_level == I915_CACHE_WT); -} - -void -i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, - unsigned int flush_domains) -{ - struct i915_vma *vma; - - assert_object_held(obj); - - if (!(obj->write_domain & flush_domains)) - return; - - switch (obj->write_domain) { - case I915_GEM_DOMAIN_GTT: - spin_lock(&obj->vma.lock); - for_each_ggtt_vma(vma, obj) { - if (i915_vma_unset_ggtt_write(vma)) - intel_gt_flush_ggtt_writes(vma->vm->gt); - } - spin_unlock(&obj->vma.lock); - - i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); - break; - - case I915_GEM_DOMAIN_WC: - wmb(); - break; - - case I915_GEM_DOMAIN_CPU: - i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); - break; - - case I915_GEM_DOMAIN_RENDER: - if (gpu_write_needs_clflush(obj)) - obj->cache_dirty = true; - break; - } - - obj->write_domain = 0; -} - void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, enum fb_op_origin origin) { @@ -383,6 +337,70 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, } } +static void +i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) +{ + void *src_map; + void *src_ptr; + + src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT)); + + src_ptr = src_map + offset_in_page(offset); + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) + drm_clflush_virt_range(src_ptr, size); + memcpy(dst, src_ptr, size); + + kunmap_atomic(src_map); +} + +static void +i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) +{ + void __iomem *src_map; + void __iomem *src_ptr; + dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT); + + src_map = io_mapping_map_wc(&obj->mm.region->iomap, + dma - obj->mm.region->region.start, + PAGE_SIZE); + + src_ptr = src_map + offset_in_page(offset); + if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size)) + memcpy_fromio(dst, src_ptr, size); + + io_mapping_unmap(src_map); +} + +/** + * i915_gem_object_read_from_page - read data from the page of a GEM object + * @obj: GEM object to read from + * @offset: offset within the object + * @dst: buffer to store the read data + * @size: size to read + * + * Reads data from @obj at the specified offset. The requested region to read + * from can't cross a page boundary. The caller must ensure that @obj pages + * are pinned and that @obj is synced wrt. any related writes. + * + * Returns 0 on success or -ENODEV if the type of @obj's backing store is + * unsupported. + */ +int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) +{ + GEM_BUG_ON(offset >= obj->base.size); + GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size); + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + + if (i915_gem_object_has_struct_page(obj)) + i915_gem_object_read_from_page_kmap(obj, offset, dst, size); + else if (i915_gem_object_has_iomem(obj)) + i915_gem_object_read_from_page_iomap(obj, offset, dst, size); + else + return -ENODEV; + + return 0; +} + void i915_gem_init__objects(struct drm_i915_private *i915) { INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 4556afe18f16..d0ae834d787a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -188,6 +188,24 @@ i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) } static inline bool +i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj) +{ + return test_bit(I915_TILING_QUIRK_BIT, &obj->flags); +} + +static inline void +i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj) +{ + set_bit(I915_TILING_QUIRK_BIT, &obj->flags); +} + +static inline void +i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) +{ + clear_bit(I915_TILING_QUIRK_BIT, &obj->flags); +} + +static inline bool i915_gem_object_type_has(const struct drm_i915_gem_object *obj, unsigned long flags) { @@ -201,6 +219,12 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) } static inline bool +i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj) +{ + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM); +} + +static inline bool i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) { return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); @@ -384,14 +408,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_object_truncate(struct drm_i915_gem_object *obj); void i915_gem_object_writeback(struct drm_i915_gem_object *obj); -enum i915_map_type { - I915_MAP_WB = 0, - I915_MAP_WC, -#define I915_MAP_OVERRIDE BIT(31) - I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, - I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, -}; - /** * i915_gem_object_pin_map - return a contiguous mapping of the entire object * @obj: the object to map into kernel address space @@ -435,10 +451,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); -void -i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, - unsigned int flush_domains); - int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, unsigned int *needs_clflush); int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, @@ -511,6 +523,9 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj) obj->cache_dirty = true; } +void i915_gem_fence_wait_priority(struct dma_fence *fence, + const struct i915_sched_attr *attr); + int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, long timeout); @@ -539,4 +554,8 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, __i915_gem_object_invalidate_frontbuffer(obj, origin); } +int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size); + +bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); + #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index aee7ad3cc3c6..d6dac21fce0b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" #include "gt/intel_ring.h" @@ -34,7 +35,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, count = div_u64(round_up(vma->size, block_size), block_size); size = (1 + 8 * count) * sizeof(u32); size = round_up(size, PAGE_SIZE); - pool = intel_gt_get_buffer_pool(ce->engine->gt, size); + pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC); if (IS_ERR(pool)) { err = PTR_ERR(pool); goto out_pm; @@ -54,7 +55,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, if (unlikely(err)) goto out_put; - cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC); + cmd = i915_gem_object_pin_map(pool->obj, pool->type); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto out_unpin; @@ -256,7 +257,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, count = div_u64(round_up(dst->size, block_size), block_size); size = (1 + 11 * count) * sizeof(u32); size = round_up(size, PAGE_SIZE); - pool = intel_gt_get_buffer_pool(ce->engine->gt, size); + pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC); if (IS_ERR(pool)) { err = PTR_ERR(pool); goto out_pm; @@ -276,7 +277,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, if (unlikely(err)) goto out_put; - cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC); + cmd = i915_gem_object_pin_map(pool->obj, pool->type); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto out_unpin; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index e2d9b7e1e152..0438e00d4ca7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -67,6 +67,14 @@ struct drm_i915_gem_object_ops { const char *name; /* friendly name for debug, e.g. lockdep classes */ }; +enum i915_map_type { + I915_MAP_WB = 0, + I915_MAP_WC, +#define I915_MAP_OVERRIDE BIT(31) + I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, + I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, +}; + enum i915_mmap_type { I915_MMAP_TYPE_GTT = 0, I915_MMAP_TYPE_WC, @@ -142,8 +150,6 @@ struct drm_i915_gem_object { */ struct list_head obj_link; - /** Stolen memory for this object, instead of being backed by shmem. */ - struct drm_mm_node *stolen; union { struct rcu_head rcu; struct llist_node freed; @@ -167,6 +173,7 @@ struct drm_i915_gem_object { #define I915_BO_ALLOC_VOLATILE BIT(1) #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE) #define I915_BO_READONLY BIT(2) +#define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */ /* * Is the object to be mapped as read-only to the GPU @@ -275,12 +282,6 @@ struct drm_i915_gem_object { * pages were last acquired. */ bool dirty:1; - - /** - * This is set if the object has been pinned due to unknown - * swizzling. - */ - bool quirked:1; } mm; /** Record of address bit 17 of each page at last unbind. */ @@ -295,6 +296,8 @@ struct drm_i915_gem_object { struct work_struct *work; } userptr; + struct drm_mm_node *stolen; + unsigned long scratch; u64 encode; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index e2c7b2a7895f..43028f3539a6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -16,6 +16,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned long supported = INTEL_INFO(i915)->page_sizes; + bool shrinkable; int i; lockdep_assert_held(&obj->mm.lock); @@ -38,13 +39,6 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, obj->mm.pages = pages; - if (i915_gem_object_is_tiled(obj) && - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { - GEM_BUG_ON(obj->mm.quirked); - __i915_gem_object_pin_pages(obj); - obj->mm.quirked = true; - } - GEM_BUG_ON(!sg_page_sizes); obj->mm.page_sizes.phys = sg_page_sizes; @@ -63,7 +57,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, } GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); - if (i915_gem_object_is_shrinkable(obj)) { + shrinkable = i915_gem_object_is_shrinkable(obj); + + if (i915_gem_object_is_tiled(obj) && + i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); + i915_gem_object_set_tiling_quirk(obj); + shrinkable = false; + } + + if (shrinkable) { struct list_head *list; unsigned long flags; @@ -238,7 +241,7 @@ unlock: /* The 'mapping' part of i915_gem_object_pin_map() below */ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, - enum i915_map_type type) + enum i915_map_type type) { unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; struct page *stack[32], **pages = stack, *page; @@ -281,7 +284,7 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, /* Too big for stack -- allocate temporary array instead */ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); if (!pages) - return NULL; + return ERR_PTR(-ENOMEM); } i = 0; @@ -290,11 +293,12 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, vaddr = vmap(pages, n_pages, 0, pgprot); if (pages != stack) kvfree(pages); - return vaddr; + + return vaddr ?: ERR_PTR(-ENOMEM); } static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, - enum i915_map_type type) + enum i915_map_type type) { resource_size_t iomap = obj->mm.region->iomap.base - obj->mm.region->region.start; @@ -305,13 +309,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, void *vaddr; if (type != I915_MAP_WC) - return NULL; + return ERR_PTR(-ENODEV); if (n_pfn > ARRAY_SIZE(stack)) { /* Too big for stack -- allocate temporary array instead */ pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); if (!pfns) - return NULL; + return ERR_PTR(-ENOMEM); } i = 0; @@ -320,7 +324,8 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); if (pfns != stack) kvfree(pfns); - return vaddr; + + return vaddr ?: ERR_PTR(-ENOMEM); } /* get, pin, and map the pages of the object into kernel space */ @@ -349,8 +354,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); err = ____i915_gem_object_get_pages(obj); - if (err) - goto err_unlock; + if (err) { + ptr = ERR_PTR(err); + goto out_unlock; + } smp_mb__before_atomic(); } @@ -362,7 +369,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ptr = page_unpack_bits(obj->mm.mapping, &has_type); if (ptr && has_type != type) { if (pinned) { - err = -EBUSY; + ptr = ERR_PTR(-EBUSY); goto err_unpin; } @@ -374,15 +381,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, if (!ptr) { if (GEM_WARN_ON(type == I915_MAP_WC && !static_cpu_has(X86_FEATURE_PAT))) - ptr = NULL; + ptr = ERR_PTR(-ENODEV); else if (i915_gem_object_has_struct_page(obj)) ptr = i915_gem_object_map_page(obj, type); else ptr = i915_gem_object_map_pfn(obj, type); - if (!ptr) { - err = -ENOMEM; + if (IS_ERR(ptr)) goto err_unpin; - } obj->mm.mapping = page_pack_bits(ptr, type); } @@ -393,8 +398,6 @@ out_unlock: err_unpin: atomic_dec(&obj->mm.pages_pin_count); -err_unlock: - ptr = ERR_PTR(err); goto out_unlock; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 3a4dfe2ef1da..3c0b157e2a35 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -213,7 +213,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) if (obj->ops == &i915_gem_phys_ops) return 0; - if (obj->ops != &i915_gem_shmem_ops) + if (!i915_gem_object_is_shmem(obj)) return -EINVAL; err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); @@ -227,7 +227,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) goto err_unlock; } - if (obj->mm.quirked) { + if (i915_gem_object_has_tiling_quirk(obj)) { err = -EFAULT; goto err_unlock; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 40d3e40500fa..215766cc22bf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -11,6 +11,13 @@ #include "i915_drv.h" +#if defined(CONFIG_X86) +#include <asm/smp.h> +#else +#define wbinvd_on_all_cpus() \ + pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__) +#endif + void i915_gem_suspend(struct drm_i915_private *i915) { GEM_TRACE("%s\n", dev_name(i915->drm.dev)); @@ -32,13 +39,6 @@ void i915_gem_suspend(struct drm_i915_private *i915) i915_gem_drain_freed_objects(i915); } -static struct drm_i915_gem_object *first_mm_object(struct list_head *list) -{ - return list_first_entry_or_null(list, - struct drm_i915_gem_object, - mm.link); -} - void i915_gem_suspend_late(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; @@ -48,6 +48,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) NULL }, **phase; unsigned long flags; + bool flush = false; /* * Neither the BIOS, ourselves or any other kernel @@ -73,29 +74,15 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) spin_lock_irqsave(&i915->mm.obj_lock, flags); for (phase = phases; *phase; phase++) { - LIST_HEAD(keep); - - while ((obj = first_mm_object(*phase))) { - list_move_tail(&obj->mm.link, &keep); - - /* Beware the background _i915_gem_free_objects */ - if (!kref_get_unless_zero(&obj->base.refcount)) - continue; - - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - - i915_gem_object_lock(obj, NULL); - drm_WARN_ON(&i915->drm, - i915_gem_object_set_to_gtt_domain(obj, false)); - i915_gem_object_unlock(obj); - i915_gem_object_put(obj); - - spin_lock_irqsave(&i915->mm.obj_lock, flags); + list_for_each_entry(obj, *phase, mm.link) { + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) + flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0; + __start_cpu_write(obj); /* presume auto-hibernate */ } - - list_splice_tail(&keep, *phase); } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); + if (flush) + wbinvd_on_all_cpus(); } void i915_gem_resume(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index 1515384d7e0e..3e3dad22a683 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -22,6 +22,7 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj, int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) { + const u64 max_segment = i915_sg_segment_size(); struct intel_memory_region *mem = obj->mm.region; struct list_head *blocks = &obj->mm.blocks; resource_size_t size = obj->base.size; @@ -37,7 +38,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) if (!st) return -ENOMEM; - if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) { + if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) { kfree(st); return -ENOMEM; } @@ -64,27 +65,30 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) i915_buddy_block_size(&mem->mm, block)); offset = i915_buddy_block_offset(block); - GEM_BUG_ON(overflows_type(block_size, sg->length)); + while (block_size) { + u64 len; - if (offset != prev_end || - add_overflows_t(typeof(sg->length), sg->length, block_size)) { - if (st->nents) { - sg_page_sizes |= sg->length; - sg = __sg_next(sg); + if (offset != prev_end || sg->length >= max_segment) { + if (st->nents) { + sg_page_sizes |= sg->length; + sg = __sg_next(sg); + } + + sg_dma_address(sg) = mem->region.start + offset; + sg_dma_len(sg) = 0; + sg->length = 0; + st->nents++; } - sg_dma_address(sg) = mem->region.start + offset; - sg_dma_len(sg) = block_size; + len = min(block_size, max_segment - sg->length); + sg->length += len; + sg_dma_len(sg) += len; - sg->length = block_size; + offset += len; + block_size -= len; - st->nents++; - } else { - sg->length += block_size; - sg_dma_len(sg) += block_size; + prev_end = offset; } - - prev_end = offset + block_size; } sg_page_sizes |= sg->length; @@ -139,6 +143,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem, unsigned int flags) { struct drm_i915_gem_object *obj; + int err; /* * NB: Our use of resource_size_t for the size stems from using struct @@ -169,9 +174,18 @@ i915_gem_object_create_region(struct intel_memory_region *mem, if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = mem->ops->create_object(mem, size, flags); - if (!IS_ERR(obj)) - trace_i915_gem_object_create(obj); + obj = i915_gem_object_alloc(); + if (!obj) + return ERR_PTR(-ENOMEM); + err = mem->ops->init_object(mem, obj, size, flags); + if (err) + goto err_object_free; + + trace_i915_gem_object_create(obj); return obj; + +err_object_free: + i915_gem_object_free(obj); + return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 75e8b71c18b9..cf83c208688c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -464,26 +464,21 @@ static int __create_shmem(struct drm_i915_private *i915, return 0; } -static struct drm_i915_gem_object * -create_shmem(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags) +static int shmem_object_init(struct intel_memory_region *mem, + struct drm_i915_gem_object *obj, + resource_size_t size, + unsigned int flags) { static struct lock_class_key lock_class; struct drm_i915_private *i915 = mem->i915; - struct drm_i915_gem_object *obj; struct address_space *mapping; unsigned int cache_level; gfp_t mask; int ret; - obj = i915_gem_object_alloc(); - if (!obj) - return ERR_PTR(-ENOMEM); - ret = __create_shmem(i915, &obj->base, size); if (ret) - goto fail; + return ret; mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; if (IS_I965GM(i915) || IS_I965G(i915)) { @@ -522,11 +517,7 @@ create_shmem(struct intel_memory_region *mem, i915_gem_object_init_memory_region(obj, mem, 0); - return obj; - -fail: - i915_gem_object_free(obj); - return ERR_PTR(ret); + return 0; } struct drm_i915_gem_object * @@ -611,7 +602,7 @@ static void release_shmem(struct intel_memory_region *mem) static const struct intel_memory_region_ops shmem_region_ops = { .init = init_shmem, .release = release_shmem, - .create_object = create_shmem, + .init_object = shmem_object_init, }; struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915) @@ -621,3 +612,8 @@ struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915) PAGE_SIZE, 0, &shmem_region_ops); } + +bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) +{ + return obj->ops == &i915_gem_shmem_ops; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index dc8f052a0ffe..c2dba1cd9532 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -15,6 +15,7 @@ #include "gt/intel_gt_requests.h" +#include "dma_resv_utils.h" #include "i915_trace.h" static bool swap_available(void) @@ -209,6 +210,8 @@ i915_gem_shrink(struct drm_i915_private *i915, mutex_unlock(&obj->mm.lock); } + dma_resv_prune(obj->base.resv); + scanned += obj->base.size >> PAGE_SHIFT; i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 29bffc6afcc1..551935348ad8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -608,11 +608,10 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); GEM_BUG_ON(!stolen); - - i915_gem_object_release_memory_region(obj); - i915_gem_stolen_remove_node(i915, stolen); kfree(stolen); + + i915_gem_object_release_memory_region(obj); } static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { @@ -622,18 +621,13 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { .release = i915_gem_object_release_stolen, }; -static struct drm_i915_gem_object * -__i915_gem_object_create_stolen(struct intel_memory_region *mem, - struct drm_mm_node *stolen) +static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, + struct drm_i915_gem_object *obj, + struct drm_mm_node *stolen) { static struct lock_class_key lock_class; - struct drm_i915_gem_object *obj; unsigned int cache_level; - int err = -ENOMEM; - - obj = i915_gem_object_alloc(); - if (!obj) - goto err; + int err; drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class); @@ -645,55 +639,47 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem, err = i915_gem_object_pin_pages(obj); if (err) - goto cleanup; + return err; i915_gem_object_init_memory_region(obj, mem, 0); - return obj; - -cleanup: - i915_gem_object_free(obj); -err: - return ERR_PTR(err); + return 0; } -static struct drm_i915_gem_object * -_i915_gem_object_create_stolen(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags) +static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, + struct drm_i915_gem_object *obj, + resource_size_t size, + unsigned int flags) { struct drm_i915_private *i915 = mem->i915; - struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; if (!drm_mm_initialized(&i915->mm.stolen)) - return ERR_PTR(-ENODEV); + return -ENODEV; if (size == 0) - return ERR_PTR(-EINVAL); + return -EINVAL; stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); if (!stolen) - return ERR_PTR(-ENOMEM); + return -ENOMEM; ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096); - if (ret) { - obj = ERR_PTR(ret); + if (ret) goto err_free; - } - obj = __i915_gem_object_create_stolen(mem, stolen); - if (IS_ERR(obj)) + ret = __i915_gem_object_create_stolen(mem, obj, stolen); + if (ret) goto err_remove; - return obj; + return 0; err_remove: i915_gem_stolen_remove_node(i915, stolen); err_free: kfree(stolen); - return obj; + return ret; } struct drm_i915_gem_object * @@ -723,7 +709,7 @@ static void release_stolen(struct intel_memory_region *mem) static const struct intel_memory_region_ops i915_region_stolen_ops = { .init = init_stolen, .release = release_stolen, - .create_object = _i915_gem_object_create_stolen, + .init_object = _i915_gem_object_stolen_init, }; struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915) @@ -772,16 +758,31 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, goto err_free; } - obj = __i915_gem_object_create_stolen(mem, stolen); - if (IS_ERR(obj)) + obj = i915_gem_object_alloc(); + if (!obj) { + obj = ERR_PTR(-ENOMEM); goto err_stolen; + } + + ret = __i915_gem_object_create_stolen(mem, obj, stolen); + if (ret) { + obj = ERR_PTR(ret); + goto err_object_free; + } i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); return obj; +err_object_free: + i915_gem_object_free(obj); err_stolen: i915_gem_stolen_remove_node(i915, stolen); err_free: kfree(stolen); return obj; } + +bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) +{ + return obj->ops == &i915_gem_object_stolen_ops; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h index 61e028063f9f..b03489706796 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h @@ -30,6 +30,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv resource_size_t stolen_offset, resource_size_t size); +bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj); + #define I915_GEM_STOLEN_BIAS SZ_128K #endif /* __I915_GEM_STOLEN_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index ffcaee74a249..d589d3d81085 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -270,14 +270,14 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, obj->mm.madv == I915_MADV_WILLNEED && i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { if (tiling == I915_TILING_NONE) { - GEM_BUG_ON(!obj->mm.quirked); - __i915_gem_object_unpin_pages(obj); - obj->mm.quirked = false; + GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); + i915_gem_object_clear_tiling_quirk(obj); + i915_gem_object_make_shrinkable(obj); } if (!i915_gem_object_is_tiled(obj)) { - GEM_BUG_ON(obj->mm.quirked); - __i915_gem_object_pin_pages(obj); - obj->mm.quirked = true; + GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); + i915_gem_object_make_unshrinkable(obj); + i915_gem_object_set_tiling_quirk(obj); } } mutex_unlock(&obj->mm.lock); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index 8af55cd3e690..4b9856d5ba14 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -5,10 +5,12 @@ */ #include <linux/dma-fence-array.h> +#include <linux/dma-fence-chain.h> #include <linux/jiffies.h> #include "gt/intel_engine.h" +#include "dma_resv_utils.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" @@ -43,8 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int count, i; int ret; - ret = dma_resv_get_fences_rcu(resv, - &excl, &count, &shared); + ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); if (ret) return ret; @@ -84,17 +85,14 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, * Opportunistically prune the fences iff we know they have *all* been * signaled. */ - if (prune_fences && dma_resv_trylock(resv)) { - if (dma_resv_test_signaled_rcu(resv, true)) - dma_resv_add_excl_fence(resv, NULL); - dma_resv_unlock(resv); - } + if (prune_fences) + dma_resv_prune(resv); return timeout; } -static void __fence_set_priority(struct dma_fence *fence, - const struct i915_sched_attr *attr) +static void fence_set_priority(struct dma_fence *fence, + const struct i915_sched_attr *attr) { struct i915_request *rq; struct intel_engine_cs *engine; @@ -105,27 +103,47 @@ static void __fence_set_priority(struct dma_fence *fence, rq = to_request(fence); engine = rq->engine; - local_bh_disable(); rcu_read_lock(); /* RCU serialisation for set-wedged protection */ if (engine->schedule) engine->schedule(rq, attr); rcu_read_unlock(); - local_bh_enable(); /* kick the tasklets if queues were reprioritised */ } -static void fence_set_priority(struct dma_fence *fence, - const struct i915_sched_attr *attr) +static inline bool __dma_fence_is_chain(const struct dma_fence *fence) { + return fence->ops == &dma_fence_chain_ops; +} + +void i915_gem_fence_wait_priority(struct dma_fence *fence, + const struct i915_sched_attr *attr) +{ + if (dma_fence_is_signaled(fence)) + return; + + local_bh_disable(); + /* Recurse once into a fence-array */ if (dma_fence_is_array(fence)) { struct dma_fence_array *array = to_dma_fence_array(fence); int i; for (i = 0; i < array->num_fences; i++) - __fence_set_priority(array->fences[i], attr); + fence_set_priority(array->fences[i], attr); + } else if (__dma_fence_is_chain(fence)) { + struct dma_fence *iter; + + /* The chain is ordered; if we boost the last, we boost all */ + dma_fence_chain_for_each(iter, fence) { + fence_set_priority(to_dma_fence_chain(iter)->fence, + attr); + break; + } + dma_fence_put(iter); } else { - __fence_set_priority(fence, attr); + fence_set_priority(fence, attr); } + + local_bh_enable(); /* kick the tasklets if queues were reprioritised */ } int @@ -141,12 +159,12 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, int ret; ret = dma_resv_get_fences_rcu(obj->base.resv, - &excl, &count, &shared); + &excl, &count, &shared); if (ret) return ret; for (i = 0; i < count; i++) { - fence_set_priority(shared[i], attr); + i915_gem_fence_wait_priority(shared[i], attr); dma_fence_put(shared[i]); } @@ -156,7 +174,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, } if (excl) { - fence_set_priority(excl, attr); + i915_gem_fence_wait_priority(excl, attr); dma_fence_put(excl); } return 0; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c index a768ec61e966..2fb501a78a85 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -27,7 +27,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj, static int huge_get_pages(struct drm_i915_gem_object *obj) { -#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) +#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL) const unsigned long nreal = obj->scratch / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE; struct scatterlist *sg, *src, *end; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 1f35e71429b4..aacf4856ccb4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -368,6 +368,27 @@ static int igt_check_page_sizes(struct i915_vma *vma) err = -EINVAL; } + /* + * The dma-api is like a box of chocolates when it comes to the + * alignment of dma addresses, however for LMEM we have total control + * and so can guarantee alignment, likewise when we allocate our blocks + * they should appear in descending order, and if we know that we align + * to the largest page size for the GTT address, we should be able to + * assert that if we see 2M physical pages then we should also get 2M + * GTT pages. If we don't then something might be wrong in our + * construction of the backing pages. + * + * Maintaining alignment is required to utilise huge pages in the ppGGT. + */ + if (i915_gem_object_is_lmem(obj) && + IS_ALIGNED(vma->node.start, SZ_2M) && + vma->page_sizes.sg & SZ_2M && + vma->page_sizes.gtt < SZ_2M) { + pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n", + vma->page_sizes.sg, vma->page_sizes.gtt); + err = -EINVAL; + } + if (obj->mm.page_sizes.gtt) { pr_err("obj->page_sizes.gtt(%u) should never be set\n", obj->mm.page_sizes.gtt); @@ -1333,6 +1354,7 @@ static int igt_ppgtt_sanity_check(void *arg) unsigned int flags; } backends[] = { { igt_create_system, 0, }, + { igt_create_local, 0, }, { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, }, }; struct { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 4e36d4897ea6..6a674a7994df 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -20,13 +20,11 @@ static int __igt_client_fill(struct intel_engine_cs *engine) { struct intel_context *ce = engine->kernel_context; struct drm_i915_gem_object *obj; - struct rnd_state prng; + I915_RND_STATE(prng); IGT_TIMEOUT(end); u32 *vaddr; int err = 0; - prandom_seed_state(&prng, i915_selftest.random_seed); - intel_engine_pm_get(engine); do { const u32 max_block_size = S16_MAX * PAGE_SIZE; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 7049a6bbc03d..1117d2a44518 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -7,6 +7,7 @@ #include <linux/prime_numbers.h> #include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_ring.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index d27d87a678c8..d429c7643ff2 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -7,6 +7,7 @@ #include <linux/prime_numbers.h> #include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gem/i915_gem_region.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index e21b5023ca7d..d6783061bc72 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -9,6 +9,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "i915_vma.h" #include "i915_drv.h" |