diff options
author | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2020-07-06 17:34:44 -0700 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2020-07-06 17:34:44 -0700 |
commit | 121cca10836d1ad185839c862931ced0b9e935c6 (patch) | |
tree | 6c9405f9714554c8997021511d1fa65b8fe31207 | |
parent | 04dd3a2cf05567320d5c61b7f3d7e64f1298fe15 (diff) |
2020y-07m-07d-00h-33m-41s UTC: drm-tip rerere cache update
git version 2.24.1
-rw-r--r-- | rr-cache/1eeb692e94724b3d619b2fd6c004889550a6d72e/postimage | 1389 | ||||
-rw-r--r-- | rr-cache/1eeb692e94724b3d619b2fd6c004889550a6d72e/preimage | 1392 | ||||
-rw-r--r-- | rr-cache/4d878ad09076a3824576140c279b378cfcdad271/postimage | 1432 | ||||
-rw-r--r-- | rr-cache/4d878ad09076a3824576140c279b378cfcdad271/preimage | 1438 | ||||
-rw-r--r-- | rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/postimage.1 | 1432 | ||||
-rw-r--r-- | rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/preimage | 1435 | ||||
-rw-r--r-- | rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/preimage.1 | 1435 |
7 files changed, 9953 insertions, 0 deletions
diff --git a/rr-cache/1eeb692e94724b3d619b2fd6c004889550a6d72e/postimage b/rr-cache/1eeb692e94724b3d619b2fd6c004889550a6d72e/postimage new file mode 100644 index 000000000000..8dde2415a0ef --- /dev/null +++ b/rr-cache/1eeb692e94724b3d619b2fd6c004889550a6d72e/postimage @@ -0,0 +1,1389 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/sched/mm.h> +#include <drm/drm_gem.h> + +#include "display/intel_frontbuffer.h" + +#include "gt/intel_engine.h" +#include "gt/intel_engine_heartbeat.h" +#include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" + +#include "i915_drv.h" +#include "i915_globals.h" +#include "i915_sw_fence_work.h" +#include "i915_trace.h" +#include "i915_vma.h" + +static struct i915_global_vma { + struct i915_global base; + struct kmem_cache *slab_vmas; +} global; + +struct i915_vma *i915_vma_alloc(void) +{ + return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); +} + +void i915_vma_free(struct i915_vma *vma) +{ + return kmem_cache_free(global.slab_vmas, vma); +} + +#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) + +#include <linux/stackdepot.h> + +static void vma_print_allocator(struct i915_vma *vma, const char *reason) +{ + unsigned long *entries; + unsigned int nr_entries; + char buf[512]; + + if (!vma->node.stack) { + DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", + vma->node.start, vma->node.size, reason); + return; + } + + nr_entries = stack_depot_fetch(vma->node.stack, &entries); + stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); + DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", + vma->node.start, vma->node.size, reason, buf); +} + +#else + +static void vma_print_allocator(struct i915_vma *vma, const char *reason) +{ +} + +#endif + +static inline struct i915_vma *active_to_vma(struct i915_active *ref) +{ + return container_of(ref, typeof(struct i915_vma), active); +} + +static int __i915_vma_active(struct i915_active *ref) +{ + return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; +} + +__i915_active_call +static void __i915_vma_retire(struct i915_active *ref) +{ + i915_vma_put(active_to_vma(ref)); +} + +static struct i915_vma * +vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + struct i915_vma *pos = ERR_PTR(-E2BIG); + struct i915_vma *vma; + struct rb_node *rb, **p; + + /* The aliasing_ppgtt should never be used directly! */ + GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); + + vma = i915_vma_alloc(); + if (vma == NULL) + return ERR_PTR(-ENOMEM); + + kref_init(&vma->ref); + mutex_init(&vma->pages_mutex); + vma->vm = i915_vm_get(vm); + vma->ops = &vm->vma_ops; + vma->obj = obj; + vma->resv = obj->base.resv; + vma->size = obj->base.size; + vma->display_alignment = I915_GTT_MIN_ALIGNMENT; + + i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire); + + /* Declare ourselves safe for use inside shrinkers */ + if (IS_ENABLED(CONFIG_LOCKDEP)) { + fs_reclaim_acquire(GFP_KERNEL); + might_lock(&vma->active.mutex); + fs_reclaim_release(GFP_KERNEL); + } + + INIT_LIST_HEAD(&vma->closed_link); + + if (view && view->type != I915_GGTT_VIEW_NORMAL) { + vma->ggtt_view = *view; + if (view->type == I915_GGTT_VIEW_PARTIAL) { + GEM_BUG_ON(range_overflows_t(u64, + view->partial.offset, + view->partial.size, + obj->base.size >> PAGE_SHIFT)); + vma->size = view->partial.size; + vma->size <<= PAGE_SHIFT; + GEM_BUG_ON(vma->size > obj->base.size); + } else if (view->type == I915_GGTT_VIEW_ROTATED) { + vma->size = intel_rotation_info_size(&view->rotated); + vma->size <<= PAGE_SHIFT; + } else if (view->type == I915_GGTT_VIEW_REMAPPED) { + vma->size = intel_remapped_info_size(&view->remapped); + vma->size <<= PAGE_SHIFT; + } + } + + if (unlikely(vma->size > vm->total)) + goto err_vma; + + GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); + + spin_lock(&obj->vma.lock); + + if (i915_is_ggtt(vm)) { + if (unlikely(overflows_type(vma->size, u32))) + goto err_unlock; + + vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, + i915_gem_object_get_tiling(obj), + i915_gem_object_get_stride(obj)); + if (unlikely(vma->fence_size < vma->size || /* overflow */ + vma->fence_size > vm->total)) + goto err_unlock; + + GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); + + vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, + i915_gem_object_get_tiling(obj), + i915_gem_object_get_stride(obj)); + GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); + + __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); + } + + rb = NULL; + p = &obj->vma.tree.rb_node; + while (*p) { + long cmp; + + rb = *p; + pos = rb_entry(rb, struct i915_vma, obj_node); + + /* + * If the view already exists in the tree, another thread + * already created a matching vma, so return the older instance + * and dispose of ours. + */ + cmp = i915_vma_compare(pos, vm, view); + if (cmp == 0) { + spin_unlock(&obj->vma.lock); + i915_vm_put(vm); + i915_vma_free(vma); + return pos; + } + + if (cmp < 0) + p = &rb->rb_right; + else if (cmp > 0) + p = &rb->rb_left; + else + goto err_unlock; + } + rb_link_node(&vma->obj_node, rb, p); + rb_insert_color(&vma->obj_node, &obj->vma.tree); + + if (i915_vma_is_ggtt(vma)) + /* + * We put the GGTT vma at the start of the vma-list, followed + * by the ppGGTT vma. This allows us to break early when + * iterating over only the GGTT vma for an object, see + * for_each_ggtt_vma() + */ + list_add(&vma->obj_link, &obj->vma.list); + else + list_add_tail(&vma->obj_link, &obj->vma.list); + + spin_unlock(&obj->vma.lock); + + return vma; + +err_unlock: + spin_unlock(&obj->vma.lock); +err_vma: + i915_vm_put(vm); + i915_vma_free(vma); + return pos; +} + +static struct i915_vma * +vma_lookup(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + struct rb_node *rb; + + rb = obj->vma.tree.rb_node; + while (rb) { + struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); + long cmp; + + cmp = i915_vma_compare(vma, vm, view); + if (cmp == 0) + return vma; + + if (cmp < 0) + rb = rb->rb_right; + else + rb = rb->rb_left; + } + + return NULL; +} + +/** + * i915_vma_instance - return the singleton instance of the VMA + * @obj: parent &struct drm_i915_gem_object to be mapped + * @vm: address space in which the mapping is located + * @view: additional mapping requirements + * + * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with + * the same @view characteristics. If a match is not found, one is created. + * Once created, the VMA is kept until either the object is freed, or the + * address space is closed. + * + * Returns the vma, or an error pointer. + */ +struct i915_vma * +i915_vma_instance(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + struct i915_vma *vma; + + GEM_BUG_ON(view && !i915_is_ggtt(vm)); + GEM_BUG_ON(!atomic_read(&vm->open)); + + spin_lock(&obj->vma.lock); + vma = vma_lookup(obj, vm, view); + spin_unlock(&obj->vma.lock); + + /* vma_create() will resolve the race if another creates the vma */ + if (unlikely(!vma)) + vma = vma_create(obj, vm, view); + + GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); + return vma; +} + +struct i915_vma_work { + struct dma_fence_work base; + struct i915_vma *vma; + struct drm_i915_gem_object *pinned; + struct i915_sw_dma_fence_cb cb; + enum i915_cache_level cache_level; + unsigned int flags; +}; + +static int __vma_bind(struct dma_fence_work *work) +{ + struct i915_vma_work *vw = container_of(work, typeof(*vw), base); + struct i915_vma *vma = vw->vma; + int err; + + err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags); + if (err) + atomic_or(I915_VMA_ERROR, &vma->flags); + + return err; +} + +static void __vma_release(struct dma_fence_work *work) +{ + struct i915_vma_work *vw = container_of(work, typeof(*vw), base); + + if (vw->pinned) + __i915_gem_object_unpin_pages(vw->pinned); +} + +static const struct dma_fence_work_ops bind_ops = { + .name = "bind", + .work = __vma_bind, + .release = __vma_release, +}; + +struct i915_vma_work *i915_vma_work(void) +{ + struct i915_vma_work *vw; + + vw = kzalloc(sizeof(*vw), GFP_KERNEL); + if (!vw) + return NULL; + + dma_fence_work_init(&vw->base, &bind_ops); + vw->base.dma.error = -EAGAIN; /* disable the worker by default */ + + return vw; +} + +int i915_vma_wait_for_bind(struct i915_vma *vma) +{ + int err = 0; + + if (rcu_access_pointer(vma->active.excl.fence)) { + struct dma_fence *fence; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); + rcu_read_unlock(); + if (fence) { + err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT); + dma_fence_put(fence); + } + } + + return err; +} + +/** + * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. + * @vma: VMA to map + * @cache_level: mapping cache level + * @flags: flags like global or local mapping + * @work: preallocated worker for allocating and binding the PTE + * + * DMA addresses are taken from the scatter-gather table of this object (or of + * this VMA in case of non-default GGTT views) and PTE entries set up. + * Note that DMA addresses are also the only part of the SG table we care about. + */ +int i915_vma_bind(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags, + struct i915_vma_work *work) +{ + u32 bind_flags; + u32 vma_flags; + int ret; + + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(vma->size > vma->node.size); + + if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, + vma->node.size, + vma->vm->total))) + return -ENODEV; + + if (GEM_DEBUG_WARN_ON(!flags)) + return -EINVAL; + + bind_flags = flags; + bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; + + vma_flags = atomic_read(&vma->flags); + vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; + + bind_flags &= ~vma_flags; + if (bind_flags == 0) + return 0; + + GEM_BUG_ON(!vma->pages); + + trace_i915_vma_bind(vma, bind_flags); + if (work && bind_flags & vma->vm->bind_async_flags) { + struct dma_fence *prev; + + work->vma = vma; + work->cache_level = cache_level; + work->flags = bind_flags | I915_VMA_ALLOC; + + /* + * Note we only want to chain up to the migration fence on + * the pages (not the object itself). As we don't track that, + * yet, we have to use the exclusive fence instead. + * + * Also note that we do not want to track the async vma as + * part of the obj->resv->excl_fence as it only affects + * execution and not content or object's backing store lifetime. + */ + prev = i915_active_set_exclusive(&vma->active, &work->base.dma); + if (prev) { + __i915_sw_fence_await_dma_fence(&work->base.chain, + prev, + &work->cb); + dma_fence_put(prev); + } + + work->base.dma.error = 0; /* enable the queue_work() */ + + if (vma->obj) { + __i915_gem_object_pin_pages(vma->obj); + work->pinned = vma->obj; + } + } else { + ret = vma->ops->bind_vma(vma, cache_level, bind_flags); + if (ret) + return ret; + } + + atomic_or(bind_flags, &vma->flags); + return 0; +} + +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) +{ + void __iomem *ptr; + int err; + + if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { + err = -ENODEV; + goto err; + } + + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); + + ptr = READ_ONCE(vma->iomap); + if (ptr == NULL) { + ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, + vma->node.start, + vma->node.size); + if (ptr == NULL) { + err = -ENOMEM; + goto err; + } + + if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { + io_mapping_unmap(ptr); + ptr = vma->iomap; + } + } + + __i915_vma_pin(vma); + + err = i915_vma_pin_fence(vma); + if (err) + goto err_unpin; + + i915_vma_set_ggtt_write(vma); + + /* NB Access through the GTT requires the device to be awake. */ + return ptr; + +err_unpin: + __i915_vma_unpin(vma); +err: + return IO_ERR_PTR(err); +} + +void i915_vma_flush_writes(struct i915_vma *vma) +{ + if (i915_vma_unset_ggtt_write(vma)) + intel_gt_flush_ggtt_writes(vma->vm->gt); +} + +void i915_vma_unpin_iomap(struct i915_vma *vma) +{ + GEM_BUG_ON(vma->iomap == NULL); + + i915_vma_flush_writes(vma); + + i915_vma_unpin_fence(vma); + i915_vma_unpin(vma); +} + +void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) +{ + struct i915_vma *vma; + struct drm_i915_gem_object *obj; + + vma = fetch_and_zero(p_vma); + if (!vma) + return; + + obj = vma->obj; + GEM_BUG_ON(!obj); + + i915_vma_unpin(vma); + + if (flags & I915_VMA_RELEASE_MAP) + i915_gem_object_unpin_map(obj); + + i915_gem_object_put(obj); +} + +bool i915_vma_misplaced(const struct i915_vma *vma, + u64 size, u64 alignment, u64 flags) +{ + if (!drm_mm_node_allocated(&vma->node)) + return false; + + if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) + return true; + + if (vma->node.size < size) + return true; + + GEM_BUG_ON(alignment && !is_power_of_2(alignment)); + if (alignment && !IS_ALIGNED(vma->node.start, alignment)) + return true; + + if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) + return true; + + if (flags & PIN_OFFSET_BIAS && + vma->node.start < (flags & PIN_OFFSET_MASK)) + return true; + + if (flags & PIN_OFFSET_FIXED && + vma->node.start != (flags & PIN_OFFSET_MASK)) + return true; + + return false; +} + +void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) +{ + bool mappable, fenceable; + + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON(!vma->fence_size); + + fenceable = (vma->node.size >= vma->fence_size && + IS_ALIGNED(vma->node.start, vma->fence_alignment)); + + mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; + + if (mappable && fenceable) + set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); + else + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); +} + +bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) +{ + struct drm_mm_node *node = &vma->node; + struct drm_mm_node *other; + + /* + * On some machines we have to be careful when putting differing types + * of snoopable memory together to avoid the prefetcher crossing memory + * domains and dying. During vm initialisation, we decide whether or not + * these constraints apply and set the drm_mm.color_adjust + * appropriately. + */ + if (!i915_vm_has_cache_coloring(vma->vm)) + return true; + + /* Only valid to be called on an already inserted vma */ + GEM_BUG_ON(!drm_mm_node_allocated(node)); + GEM_BUG_ON(list_empty(&node->node_list)); + + other = list_prev_entry(node, node_list); + if (i915_node_color_differs(other, color) && + !drm_mm_hole_follows(other)) + return false; + + other = list_next_entry(node, node_list); + if (i915_node_color_differs(other, color) && + !drm_mm_hole_follows(node)) + return false; + + return true; +} + +/** + * i915_vma_insert - finds a slot for the vma in its address space + * @vma: the vma + * @size: requested size in bytes (can be larger than the VMA) + * @alignment: required alignment + * @flags: mask of PIN_* flags to use + * + * First we try to allocate some free space that meets the requirements for + * the VMA. Failiing that, if the flags permit, it will evict an old VMA, + * preferrably the oldest idle entry to make room for the new VMA. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +static int +i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + unsigned long color; + u64 start, end; + int ret; + + GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + + size = max(size, vma->size); + alignment = max(alignment, vma->display_alignment); + if (flags & PIN_MAPPABLE) { + size = max_t(typeof(size), size, vma->fence_size); + alignment = max_t(typeof(alignment), + alignment, vma->fence_alignment); + } + + GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); + GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); + GEM_BUG_ON(!is_power_of_2(alignment)); + + start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; + GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); + + end = vma->vm->total; + if (flags & PIN_MAPPABLE) + end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); + if (flags & PIN_ZONE_4G) + end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); + GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); + + /* If binding the object/GGTT view requires more space than the entire + * aperture has, reject it early before evicting everything in a vain + * attempt to find space. + */ + if (size > end) { + DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", + size, flags & PIN_MAPPABLE ? "mappable" : "total", + end); + return -ENOSPC; + } + + color = 0; + if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) + color = vma->obj->cache_level; + + if (flags & PIN_OFFSET_FIXED) { + u64 offset = flags & PIN_OFFSET_MASK; + if (!IS_ALIGNED(offset, alignment) || + range_overflows(offset, size, end)) + return -EINVAL; + + ret = i915_gem_gtt_reserve(vma->vm, &vma->node, + size, offset, color, + flags); + if (ret) + return ret; + } else { + /* + * We only support huge gtt pages through the 48b PPGTT, + * however we also don't want to force any alignment for + * objects which need to be tightly packed into the low 32bits. + * + * Note that we assume that GGTT are limited to 4GiB for the + * forseeable future. See also i915_ggtt_offset(). + */ + if (upper_32_bits(end - 1) && + vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { + /* + * We can't mix 64K and 4K PTEs in the same page-table + * (2M block), and so to avoid the ugliness and + * complexity of coloring we opt for just aligning 64K + * objects to 2M. + */ + u64 page_alignment = + rounddown_pow_of_two(vma->page_sizes.sg | + I915_GTT_PAGE_SIZE_2M); + + /* + * Check we don't expand for the limited Global GTT + * (mappable aperture is even more precious!). This + * also checks that we exclude the aliasing-ppgtt. + */ + GEM_BUG_ON(i915_vma_is_ggtt(vma)); + + alignment = max(alignment, page_alignment); + + if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) + size = round_up(size, I915_GTT_PAGE_SIZE_2M); + } + + ret = i915_gem_gtt_insert(vma->vm, &vma->node, + size, alignment, color, + start, end, flags); + if (ret) + return ret; + + GEM_BUG_ON(vma->node.start < start); + GEM_BUG_ON(vma->node.start + vma->node.size > end); + } + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); + + list_add_tail(&vma->vm_link, &vma->vm->bound_list); + + return 0; +} + +static void +i915_vma_detach(struct i915_vma *vma) +{ + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + + /* + * And finally now the object is completely decoupled from this + * vma, we can drop its hold on the backing storage and allow + * it to be reaped by the shrinker. + */ + list_del(&vma->vm_link); +} + +static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) +{ + unsigned int bound; + bool pinned = true; + + bound = atomic_read(&vma->flags); + do { + if (unlikely(flags & ~bound)) + return false; + + if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) + return false; + + if (!(bound & I915_VMA_PIN_MASK)) + goto unpinned; + + GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); + } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); + + return true; + +unpinned: + /* + * If pin_count==0, but we are bound, check under the lock to avoid + * racing with a concurrent i915_vma_unbind(). + */ + mutex_lock(&vma->vm->mutex); + do { + if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { + pinned = false; + break; + } + + if (unlikely(flags & ~bound)) { + pinned = false; + break; + } + } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); + mutex_unlock(&vma->vm->mutex); + + return pinned; +} + +static int vma_get_pages(struct i915_vma *vma) +{ + int err = 0; + + if (atomic_add_unless(&vma->pages_count, 1, 0)) + return 0; + + /* Allocations ahoy! */ + if (mutex_lock_interruptible(&vma->pages_mutex)) + return -EINTR; + + if (!atomic_read(&vma->pages_count)) { + if (vma->obj) { + err = i915_gem_object_pin_pages(vma->obj); + if (err) + goto unlock; + } + + err = vma->ops->set_pages(vma); + if (err) { + if (vma->obj) + i915_gem_object_unpin_pages(vma->obj); + goto unlock; + } + } + atomic_inc(&vma->pages_count); + +unlock: + mutex_unlock(&vma->pages_mutex); + + return err; +} + +static void __vma_put_pages(struct i915_vma *vma, unsigned int count) +{ + /* We allocate under vma_get_pages, so beware the shrinker */ + mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); + GEM_BUG_ON(atomic_read(&vma->pages_count) < count); + if (atomic_sub_return(count, &vma->pages_count) == 0) { + vma->ops->clear_pages(vma); + GEM_BUG_ON(vma->pages); + if (vma->obj) + i915_gem_object_unpin_pages(vma->obj); + } + mutex_unlock(&vma->pages_mutex); +} + +static void vma_put_pages(struct i915_vma *vma) +{ + if (atomic_add_unless(&vma->pages_count, -1, 1)) + return; + + __vma_put_pages(vma, 1); +} + +static void vma_unbind_pages(struct i915_vma *vma) +{ + unsigned int count; + + lockdep_assert_held(&vma->vm->mutex); + + /* The upper portion of pages_count is the number of bindings */ + count = atomic_read(&vma->pages_count); + count >>= I915_VMA_PAGES_BIAS; + GEM_BUG_ON(!count); + + __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); +} + +int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + struct i915_vma_work *work = NULL; + intel_wakeref_t wakeref = 0; + unsigned int bound; + int err; + + BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); + BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); + + GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); + + /* First try and grab the pin without rebinding the vma */ + if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) + return 0; + + err = vma_get_pages(vma); + if (err) + return err; + + if (flags & vma->vm->bind_async_flags) { + work = i915_vma_work(); + if (!work) { + err = -ENOMEM; + goto err_pages; + } + } + + if (flags & PIN_GLOBAL) + wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); + + /* + * Differentiate between user/kernel vma inside the aliasing-ppgtt. + * + * We conflate the Global GTT with the user's vma when using the + * aliasing-ppgtt, but it is still vitally important to try and + * keep the use cases distinct. For example, userptr objects are + * not allowed inside the Global GTT as that will cause lock + * inversions when we have to evict them the mmu_notifier callbacks - + * but they are allowed to be part of the user ppGTT which can never + * be mapped. As such we try to give the distinct users of the same + * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt + * and i915_ppgtt separate]. + * + * NB this may cause us to mask real lock inversions -- while the + * code is safe today, lockdep may not be able to spot future + * transgressions. + */ + err = mutex_lock_interruptible_nested(&vma->vm->mutex, + !(flags & PIN_GLOBAL)); + if (err) + goto err_fence; + + /* No more allocations allowed now we hold vm->mutex */ + + if (unlikely(i915_vma_is_closed(vma))) { + err = -ENOENT; + goto err_unlock; + } + + bound = atomic_read(&vma->flags); + if (unlikely(bound & I915_VMA_ERROR)) { + err = -ENOMEM; + goto err_unlock; + } + + if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { + err = -EAGAIN; /* pins are meant to be fairly temporary */ + goto err_unlock; + } + + if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { + __i915_vma_pin(vma); + goto err_unlock; + } + + err = i915_active_acquire(&vma->active); + if (err) + goto err_unlock; + + if (!(bound & I915_VMA_BIND_MASK)) { + err = i915_vma_insert(vma, size, alignment, flags); + if (err) + goto err_active; + + if (i915_is_ggtt(vma->vm)) + __i915_vma_set_map_and_fenceable(vma); + } + + GEM_BUG_ON(!vma->pages); + err = i915_vma_bind(vma, + vma->obj ? vma->obj->cache_level : 0, + flags, work); + if (err) + goto err_remove; + + /* There should only be at most 2 active bindings (user, global) */ + GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); + atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); + + __i915_vma_pin(vma); + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); + GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); + +err_remove: + if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { + i915_vma_detach(vma); + drm_mm_remove_node(&vma->node); + } +err_active: + i915_active_release(&vma->active); +err_unlock: + mutex_unlock(&vma->vm->mutex); +err_fence: + if (work) + dma_fence_work_commit_imm(&work->base); + if (wakeref) + intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); +err_pages: + vma_put_pages(vma); + return err; +} + +static void flush_idle_contexts(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + intel_engine_flush_barriers(engine); + + intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); +} + +int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) +{ + struct i915_address_space *vm = vma->vm; + int err; + + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + + do { + err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); + if (err != -ENOSPC) { + if (!err) { + err = i915_vma_wait_for_bind(vma); + if (err) + i915_vma_unpin(vma); + } + return err; + } + + /* Unlike i915_vma_pin, we don't take no for an answer! */ + flush_idle_contexts(vm->gt); + if (mutex_lock_interruptible(&vm->mutex) == 0) { + i915_gem_evict_vm(vm); + mutex_unlock(&vm->mutex); + } + } while (1); +} + +static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) +{ + /* + * We defer actually closing, unbinding and destroying the VMA until + * the next idle point, or if the object is freed in the meantime. By + * postponing the unbind, we allow for it to be resurrected by the + * client, avoiding the work required to rebind the VMA. This is + * advantageous for DRI, where the client/server pass objects + * between themselves, temporarily opening a local VMA to the + * object, and then closing it again. The same object is then reused + * on the next frame (or two, depending on the depth of the swap queue) + * causing us to rebind the VMA once more. This ends up being a lot + * of wasted work for the steady state. + */ + GEM_BUG_ON(i915_vma_is_closed(vma)); + list_add(&vma->closed_link, >->closed_vma); +} + +void i915_vma_close(struct i915_vma *vma) +{ + struct intel_gt *gt = vma->vm->gt; + unsigned long flags; + + if (i915_vma_is_ggtt(vma)) + return; + + GEM_BUG_ON(!atomic_read(&vma->open_count)); + if (atomic_dec_and_lock_irqsave(&vma->open_count, + >->closed_lock, + flags)) { + __vma_close(vma, gt); + spin_unlock_irqrestore(>->closed_lock, flags); + } +} + +static void __i915_vma_remove_closed(struct i915_vma *vma) +{ + struct intel_gt *gt = vma->vm->gt; + + spin_lock_irq(>->closed_lock); + list_del_init(&vma->closed_link); + spin_unlock_irq(>->closed_lock); +} + +void i915_vma_reopen(struct i915_vma *vma) +{ + if (i915_vma_is_closed(vma)) + __i915_vma_remove_closed(vma); +} + +void i915_vma_release(struct kref *ref) +{ + struct i915_vma *vma = container_of(ref, typeof(*vma), ref); + + if (drm_mm_node_allocated(&vma->node)) { + mutex_lock(&vma->vm->mutex); + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); + WARN_ON(__i915_vma_unbind(vma)); + mutex_unlock(&vma->vm->mutex); + GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + } + GEM_BUG_ON(i915_vma_is_active(vma)); + + if (vma->obj) { + struct drm_i915_gem_object *obj = vma->obj; + + spin_lock(&obj->vma.lock); + list_del(&vma->obj_link); + if (!RB_EMPTY_NODE(&vma->obj_node)) + rb_erase(&vma->obj_node, &obj->vma.tree); + spin_unlock(&obj->vma.lock); + } + + __i915_vma_remove_closed(vma); + i915_vm_put(vma->vm); + + i915_active_fini(&vma->active); + i915_vma_free(vma); +} + +void i915_vma_parked(struct intel_gt *gt) +{ + struct i915_vma *vma, *next; + LIST_HEAD(closed); + + spin_lock_irq(>->closed_lock); + list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { + struct drm_i915_gem_object *obj = vma->obj; + struct i915_address_space *vm = vma->vm; + + /* XXX All to avoid keeping a reference on i915_vma itself */ + + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + + if (!i915_vm_tryopen(vm)) { + i915_gem_object_put(obj); + continue; + } + + list_move(&vma->closed_link, &closed); + } + spin_unlock_irq(>->closed_lock); + + /* As the GT is held idle, no vma can be reopened as we destroy them */ + list_for_each_entry_safe(vma, next, &closed, closed_link) { + struct drm_i915_gem_object *obj = vma->obj; + struct i915_address_space *vm = vma->vm; + + INIT_LIST_HEAD(&vma->closed_link); + __i915_vma_put(vma); + + i915_gem_object_put(obj); + i915_vm_close(vm); + } +} + +static void __i915_vma_iounmap(struct i915_vma *vma) +{ + GEM_BUG_ON(i915_vma_is_pinned(vma)); + + if (vma->iomap == NULL) + return; + + io_mapping_unmap(vma->iomap); + vma->iomap = NULL; +} + +void i915_vma_revoke_mmap(struct i915_vma *vma) +{ + struct drm_vma_offset_node *node; + u64 vma_offset; + + if (!i915_vma_has_userfault(vma)) + return; + + GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); + GEM_BUG_ON(!vma->obj->userfault_count); + + node = &vma->mmo->vma_node; + vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; + unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, + drm_vma_node_offset_addr(node) + vma_offset, + vma->size, + 1); + + i915_vma_unset_userfault(vma); + if (!--vma->obj->userfault_count) + list_del(&vma->obj->userfault_link); +} + +int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) +{ + int err; + + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + + /* Wait for the vma to be bound before we start! */ + err = i915_request_await_active(rq, &vma->active, + I915_ACTIVE_AWAIT_EXCL); + if (err) + return err; + + return i915_active_add_request(&vma->active, rq); +} + +int i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + int err; + + assert_object_held(obj); + + err = __i915_vma_move_to_active(vma, rq); + if (unlikely(err)) + return err; + + if (flags & EXEC_OBJECT_WRITE) { + struct intel_frontbuffer *front; + + front = __intel_frontbuffer_get(obj); + if (unlikely(front)) { + if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) + i915_active_add_request(&front->write, rq); + intel_frontbuffer_put(front); + } + + dma_resv_add_excl_fence(vma->resv, &rq->fence); + obj->write_domain = I915_GEM_DOMAIN_RENDER; + obj->read_domains = 0; + } else { + err = dma_resv_reserve_shared(vma->resv, 1); + if (unlikely(err)) + return err; + + dma_resv_add_shared_fence(vma->resv, &rq->fence); + obj->write_domain = 0; + } + + if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) + i915_active_add_request(&vma->fence->active, rq); + + obj->read_domains |= I915_GEM_GPU_DOMAINS; + obj->mm.dirty = true; + + GEM_BUG_ON(!i915_vma_is_active(vma)); + return 0; +} + +void __i915_vma_evict(struct i915_vma *vma) +{ + GEM_BUG_ON(i915_vma_is_pinned(vma)); + + if (i915_vma_is_map_and_fenceable(vma)) { + /* Force a pagefault for domain tracking on next user access */ + i915_vma_revoke_mmap(vma); + + /* + * Check that we have flushed all writes through the GGTT + * before the unbind, other due to non-strict nature of those + * indirect writes they may end up referencing the GGTT PTE + * after the unbind. + * + * Note that we may be concurrently poking at the GGTT_WRITE + * bit from set-domain, as we mark all GGTT vma associated + * with an object. We know this is for another vma, as we + * are currently unbinding this one -- so if this vma will be + * reused, it will be refaulted and have its dirty bit set + * before the next write. + */ + i915_vma_flush_writes(vma); + + /* release the fence reg _after_ flushing */ + i915_vma_revoke_fence(vma); + + __i915_vma_iounmap(vma); + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); + } + GEM_BUG_ON(vma->fence); + GEM_BUG_ON(i915_vma_has_userfault(vma)); + + if (likely(atomic_read(&vma->vm->open))) { + trace_i915_vma_unbind(vma); + vma->ops->unbind_vma(vma); + } + atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), + &vma->flags); + + i915_vma_detach(vma); + vma_unbind_pages(vma); +} + +int __i915_vma_unbind(struct i915_vma *vma) +{ + int ret; + + lockdep_assert_held(&vma->vm->mutex); + + if (!drm_mm_node_allocated(&vma->node)) + return 0; + + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); + return -EAGAIN; + } + + /* + * After confirming that no one else is pinning this vma, wait for + * any laggards who may have crept in during the wait (through + * a residual pin skipping the vm->mutex) to complete. + */ + ret = i915_vma_sync(vma); + if (ret) + return ret; + + GEM_BUG_ON(i915_vma_is_active(vma)); + __i915_vma_evict(vma); + + drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ + return 0; +} + +int i915_vma_unbind(struct i915_vma *vma) +{ + struct i915_address_space *vm = vma->vm; + intel_wakeref_t wakeref = 0; + int err; + + /* Optimistic wait before taking the mutex */ + err = i915_vma_sync(vma); + if (err) + return err; + + if (!drm_mm_node_allocated(&vma->node)) + return 0; + + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); + return -EAGAIN; + } + + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + /* XXX not always required: nop_clear_range */ + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + + err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); + if (err) + goto out_rpm; + + err = __i915_vma_unbind(vma); + mutex_unlock(&vm->mutex); + +out_rpm: + if (wakeref) + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); + return err; +} + +struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) +{ + i915_gem_object_make_unshrinkable(vma->obj); + return vma; +} + +void i915_vma_make_shrinkable(struct i915_vma *vma) +{ + i915_gem_object_make_shrinkable(vma->obj); +} + +void i915_vma_make_purgeable(struct i915_vma *vma) +{ + i915_gem_object_make_purgeable(vma->obj); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/i915_vma.c" +#endif + +static void i915_global_vma_shrink(void) +{ + kmem_cache_shrink(global.slab_vmas); +} + +static void i915_global_vma_exit(void) +{ + kmem_cache_destroy(global.slab_vmas); +} + +static struct i915_global_vma global = { { + .shrink = i915_global_vma_shrink, + .exit = i915_global_vma_exit, +} }; + +int __init i915_global_vma_init(void) +{ + global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); + if (!global.slab_vmas) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/rr-cache/1eeb692e94724b3d619b2fd6c004889550a6d72e/preimage b/rr-cache/1eeb692e94724b3d619b2fd6c004889550a6d72e/preimage new file mode 100644 index 000000000000..72f58ba9bb41 --- /dev/null +++ b/rr-cache/1eeb692e94724b3d619b2fd6c004889550a6d72e/preimage @@ -0,0 +1,1392 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/sched/mm.h> +#include <drm/drm_gem.h> + +#include "display/intel_frontbuffer.h" + +#include "gt/intel_engine.h" +#include "gt/intel_engine_heartbeat.h" +#include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" + +#include "i915_drv.h" +#include "i915_globals.h" +#include "i915_sw_fence_work.h" +#include "i915_trace.h" +#include "i915_vma.h" + +static struct i915_global_vma { + struct i915_global base; + struct kmem_cache *slab_vmas; +} global; + +struct i915_vma *i915_vma_alloc(void) +{ + return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); +} + +void i915_vma_free(struct i915_vma *vma) +{ + return kmem_cache_free(global.slab_vmas, vma); +} + +#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) + +#include <linux/stackdepot.h> + +static void vma_print_allocator(struct i915_vma *vma, const char *reason) +{ + unsigned long *entries; + unsigned int nr_entries; + char buf[512]; + + if (!vma->node.stack) { + DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", + vma->node.start, vma->node.size, reason); + return; + } + + nr_entries = stack_depot_fetch(vma->node.stack, &entries); + stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); + DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", + vma->node.start, vma->node.size, reason, buf); +} + +#else + +static void vma_print_allocator(struct i915_vma *vma, const char *reason) +{ +} + +#endif + +static inline struct i915_vma *active_to_vma(struct i915_active *ref) +{ + return container_of(ref, typeof(struct i915_vma), active); +} + +static int __i915_vma_active(struct i915_active *ref) +{ + return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; +} + +__i915_active_call +static void __i915_vma_retire(struct i915_active *ref) +{ + i915_vma_put(active_to_vma(ref)); +} + +static struct i915_vma * +vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + struct i915_vma *pos = ERR_PTR(-E2BIG); + struct i915_vma *vma; + struct rb_node *rb, **p; + + /* The aliasing_ppgtt should never be used directly! */ + GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); + + vma = i915_vma_alloc(); + if (vma == NULL) + return ERR_PTR(-ENOMEM); + + kref_init(&vma->ref); + mutex_init(&vma->pages_mutex); + vma->vm = i915_vm_get(vm); + vma->ops = &vm->vma_ops; + vma->obj = obj; + vma->resv = obj->base.resv; + vma->size = obj->base.size; + vma->display_alignment = I915_GTT_MIN_ALIGNMENT; + + i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire); + + /* Declare ourselves safe for use inside shrinkers */ + if (IS_ENABLED(CONFIG_LOCKDEP)) { + fs_reclaim_acquire(GFP_KERNEL); + might_lock(&vma->active.mutex); + fs_reclaim_release(GFP_KERNEL); + } + + INIT_LIST_HEAD(&vma->closed_link); + + if (view && view->type != I915_GGTT_VIEW_NORMAL) { + vma->ggtt_view = *view; + if (view->type == I915_GGTT_VIEW_PARTIAL) { + GEM_BUG_ON(range_overflows_t(u64, + view->partial.offset, + view->partial.size, + obj->base.size >> PAGE_SHIFT)); + vma->size = view->partial.size; + vma->size <<= PAGE_SHIFT; + GEM_BUG_ON(vma->size > obj->base.size); + } else if (view->type == I915_GGTT_VIEW_ROTATED) { + vma->size = intel_rotation_info_size(&view->rotated); + vma->size <<= PAGE_SHIFT; + } else if (view->type == I915_GGTT_VIEW_REMAPPED) { + vma->size = intel_remapped_info_size(&view->remapped); + vma->size <<= PAGE_SHIFT; + } + } + + if (unlikely(vma->size > vm->total)) + goto err_vma; + + GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); + + spin_lock(&obj->vma.lock); + + if (i915_is_ggtt(vm)) { + if (unlikely(overflows_type(vma->size, u32))) + goto err_unlock; + + vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, + i915_gem_object_get_tiling(obj), + i915_gem_object_get_stride(obj)); + if (unlikely(vma->fence_size < vma->size || /* overflow */ + vma->fence_size > vm->total)) + goto err_unlock; + + GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); + + vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, + i915_gem_object_get_tiling(obj), + i915_gem_object_get_stride(obj)); + GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); + + __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); + } + + rb = NULL; + p = &obj->vma.tree.rb_node; + while (*p) { + long cmp; + + rb = *p; + pos = rb_entry(rb, struct i915_vma, obj_node); + + /* + * If the view already exists in the tree, another thread + * already created a matching vma, so return the older instance + * and dispose of ours. + */ + cmp = i915_vma_compare(pos, vm, view); +<<<<<<< +======= + if (cmp == 0) { + spin_unlock(&obj->vma.lock); + i915_vm_put(vm); + i915_vma_free(vma); + return pos; + } + +>>>>>>> + if (cmp < 0) + p = &rb->rb_right; + else if (cmp > 0) + p = &rb->rb_left; + else + goto err_unlock; + } + rb_link_node(&vma->obj_node, rb, p); + rb_insert_color(&vma->obj_node, &obj->vma.tree); + + if (i915_vma_is_ggtt(vma)) + /* + * We put the GGTT vma at the start of the vma-list, followed + * by the ppGGTT vma. This allows us to break early when + * iterating over only the GGTT vma for an object, see + * for_each_ggtt_vma() + */ + list_add(&vma->obj_link, &obj->vma.list); + else + list_add_tail(&vma->obj_link, &obj->vma.list); + + spin_unlock(&obj->vma.lock); + + return vma; + +err_unlock: + spin_unlock(&obj->vma.lock); +err_vma: + i915_vm_put(vm); + i915_vma_free(vma); + return pos; +} + +static struct i915_vma * +vma_lookup(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + struct rb_node *rb; + + rb = obj->vma.tree.rb_node; + while (rb) { + struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); + long cmp; + + cmp = i915_vma_compare(vma, vm, view); + if (cmp == 0) + return vma; + + if (cmp < 0) + rb = rb->rb_right; + else + rb = rb->rb_left; + } + + return NULL; +} + +/** + * i915_vma_instance - return the singleton instance of the VMA + * @obj: parent &struct drm_i915_gem_object to be mapped + * @vm: address space in which the mapping is located + * @view: additional mapping requirements + * + * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with + * the same @view characteristics. If a match is not found, one is created. + * Once created, the VMA is kept until either the object is freed, or the + * address space is closed. + * + * Returns the vma, or an error pointer. + */ +struct i915_vma * +i915_vma_instance(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + struct i915_vma *vma; + + GEM_BUG_ON(view && !i915_is_ggtt(vm)); + GEM_BUG_ON(!atomic_read(&vm->open)); + + spin_lock(&obj->vma.lock); + vma = vma_lookup(obj, vm, view); + spin_unlock(&obj->vma.lock); + + /* vma_create() will resolve the race if another creates the vma */ + if (unlikely(!vma)) + vma = vma_create(obj, vm, view); + + GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); + return vma; +} + +struct i915_vma_work { + struct dma_fence_work base; + struct i915_vma *vma; + struct drm_i915_gem_object *pinned; + struct i915_sw_dma_fence_cb cb; + enum i915_cache_level cache_level; + unsigned int flags; +}; + +static int __vma_bind(struct dma_fence_work *work) +{ + struct i915_vma_work *vw = container_of(work, typeof(*vw), base); + struct i915_vma *vma = vw->vma; + int err; + + err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags); + if (err) + atomic_or(I915_VMA_ERROR, &vma->flags); + + return err; +} + +static void __vma_release(struct dma_fence_work *work) +{ + struct i915_vma_work *vw = container_of(work, typeof(*vw), base); + + if (vw->pinned) + __i915_gem_object_unpin_pages(vw->pinned); +} + +static const struct dma_fence_work_ops bind_ops = { + .name = "bind", + .work = __vma_bind, + .release = __vma_release, +}; + +struct i915_vma_work *i915_vma_work(void) +{ + struct i915_vma_work *vw; + + vw = kzalloc(sizeof(*vw), GFP_KERNEL); + if (!vw) + return NULL; + + dma_fence_work_init(&vw->base, &bind_ops); + vw->base.dma.error = -EAGAIN; /* disable the worker by default */ + + return vw; +} + +int i915_vma_wait_for_bind(struct i915_vma *vma) +{ + int err = 0; + + if (rcu_access_pointer(vma->active.excl.fence)) { + struct dma_fence *fence; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); + rcu_read_unlock(); + if (fence) { + err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT); + dma_fence_put(fence); + } + } + + return err; +} + +/** + * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. + * @vma: VMA to map + * @cache_level: mapping cache level + * @flags: flags like global or local mapping + * @work: preallocated worker for allocating and binding the PTE + * + * DMA addresses are taken from the scatter-gather table of this object (or of + * this VMA in case of non-default GGTT views) and PTE entries set up. + * Note that DMA addresses are also the only part of the SG table we care about. + */ +int i915_vma_bind(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags, + struct i915_vma_work *work) +{ + u32 bind_flags; + u32 vma_flags; + int ret; + + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(vma->size > vma->node.size); + + if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, + vma->node.size, + vma->vm->total))) + return -ENODEV; + + if (GEM_DEBUG_WARN_ON(!flags)) + return -EINVAL; + + bind_flags = flags; + bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; + + vma_flags = atomic_read(&vma->flags); + vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; + + bind_flags &= ~vma_flags; + if (bind_flags == 0) + return 0; + + GEM_BUG_ON(!vma->pages); + + trace_i915_vma_bind(vma, bind_flags); + if (work && bind_flags & vma->vm->bind_async_flags) { + struct dma_fence *prev; + + work->vma = vma; + work->cache_level = cache_level; + work->flags = bind_flags | I915_VMA_ALLOC; + + /* + * Note we only want to chain up to the migration fence on + * the pages (not the object itself). As we don't track that, + * yet, we have to use the exclusive fence instead. + * + * Also note that we do not want to track the async vma as + * part of the obj->resv->excl_fence as it only affects + * execution and not content or object's backing store lifetime. + */ + prev = i915_active_set_exclusive(&vma->active, &work->base.dma); + if (prev) { + __i915_sw_fence_await_dma_fence(&work->base.chain, + prev, + &work->cb); + dma_fence_put(prev); + } + + work->base.dma.error = 0; /* enable the queue_work() */ + + if (vma->obj) { + __i915_gem_object_pin_pages(vma->obj); + work->pinned = vma->obj; + } + } else { + ret = vma->ops->bind_vma(vma, cache_level, bind_flags); + if (ret) + return ret; + } + + atomic_or(bind_flags, &vma->flags); + return 0; +} + +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) +{ + void __iomem *ptr; + int err; + + if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { + err = -ENODEV; + goto err; + } + + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); + + ptr = READ_ONCE(vma->iomap); + if (ptr == NULL) { + ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, + vma->node.start, + vma->node.size); + if (ptr == NULL) { + err = -ENOMEM; + goto err; + } + + if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { + io_mapping_unmap(ptr); + ptr = vma->iomap; + } + } + + __i915_vma_pin(vma); + + err = i915_vma_pin_fence(vma); + if (err) + goto err_unpin; + + i915_vma_set_ggtt_write(vma); + + /* NB Access through the GTT requires the device to be awake. */ + return ptr; + +err_unpin: + __i915_vma_unpin(vma); +err: + return IO_ERR_PTR(err); +} + +void i915_vma_flush_writes(struct i915_vma *vma) +{ + if (i915_vma_unset_ggtt_write(vma)) + intel_gt_flush_ggtt_writes(vma->vm->gt); +} + +void i915_vma_unpin_iomap(struct i915_vma *vma) +{ + GEM_BUG_ON(vma->iomap == NULL); + + i915_vma_flush_writes(vma); + + i915_vma_unpin_fence(vma); + i915_vma_unpin(vma); +} + +void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) +{ + struct i915_vma *vma; + struct drm_i915_gem_object *obj; + + vma = fetch_and_zero(p_vma); + if (!vma) + return; + + obj = vma->obj; + GEM_BUG_ON(!obj); + + i915_vma_unpin(vma); + + if (flags & I915_VMA_RELEASE_MAP) + i915_gem_object_unpin_map(obj); + + i915_gem_object_put(obj); +} + +bool i915_vma_misplaced(const struct i915_vma *vma, + u64 size, u64 alignment, u64 flags) +{ + if (!drm_mm_node_allocated(&vma->node)) + return false; + + if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) + return true; + + if (vma->node.size < size) + return true; + + GEM_BUG_ON(alignment && !is_power_of_2(alignment)); + if (alignment && !IS_ALIGNED(vma->node.start, alignment)) + return true; + + if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) + return true; + + if (flags & PIN_OFFSET_BIAS && + vma->node.start < (flags & PIN_OFFSET_MASK)) + return true; + + if (flags & PIN_OFFSET_FIXED && + vma->node.start != (flags & PIN_OFFSET_MASK)) + return true; + + return false; +} + +void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) +{ + bool mappable, fenceable; + + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON(!vma->fence_size); + + fenceable = (vma->node.size >= vma->fence_size && + IS_ALIGNED(vma->node.start, vma->fence_alignment)); + + mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; + + if (mappable && fenceable) + set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); + else + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); +} + +bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) +{ + struct drm_mm_node *node = &vma->node; + struct drm_mm_node *other; + + /* + * On some machines we have to be careful when putting differing types + * of snoopable memory together to avoid the prefetcher crossing memory + * domains and dying. During vm initialisation, we decide whether or not + * these constraints apply and set the drm_mm.color_adjust + * appropriately. + */ + if (!i915_vm_has_cache_coloring(vma->vm)) + return true; + + /* Only valid to be called on an already inserted vma */ + GEM_BUG_ON(!drm_mm_node_allocated(node)); + GEM_BUG_ON(list_empty(&node->node_list)); + + other = list_prev_entry(node, node_list); + if (i915_node_color_differs(other, color) && + !drm_mm_hole_follows(other)) + return false; + + other = list_next_entry(node, node_list); + if (i915_node_color_differs(other, color) && + !drm_mm_hole_follows(node)) + return false; + + return true; +} + +/** + * i915_vma_insert - finds a slot for the vma in its address space + * @vma: the vma + * @size: requested size in bytes (can be larger than the VMA) + * @alignment: required alignment + * @flags: mask of PIN_* flags to use + * + * First we try to allocate some free space that meets the requirements for + * the VMA. Failiing that, if the flags permit, it will evict an old VMA, + * preferrably the oldest idle entry to make room for the new VMA. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +static int +i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + unsigned long color; + u64 start, end; + int ret; + + GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + + size = max(size, vma->size); + alignment = max(alignment, vma->display_alignment); + if (flags & PIN_MAPPABLE) { + size = max_t(typeof(size), size, vma->fence_size); + alignment = max_t(typeof(alignment), + alignment, vma->fence_alignment); + } + + GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); + GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); + GEM_BUG_ON(!is_power_of_2(alignment)); + + start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; + GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); + + end = vma->vm->total; + if (flags & PIN_MAPPABLE) + end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); + if (flags & PIN_ZONE_4G) + end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); + GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); + + /* If binding the object/GGTT view requires more space than the entire + * aperture has, reject it early before evicting everything in a vain + * attempt to find space. + */ + if (size > end) { + DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", + size, flags & PIN_MAPPABLE ? "mappable" : "total", + end); + return -ENOSPC; + } + + color = 0; + if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) + color = vma->obj->cache_level; + + if (flags & PIN_OFFSET_FIXED) { + u64 offset = flags & PIN_OFFSET_MASK; + if (!IS_ALIGNED(offset, alignment) || + range_overflows(offset, size, end)) + return -EINVAL; + + ret = i915_gem_gtt_reserve(vma->vm, &vma->node, + size, offset, color, + flags); + if (ret) + return ret; + } else { + /* + * We only support huge gtt pages through the 48b PPGTT, + * however we also don't want to force any alignment for + * objects which need to be tightly packed into the low 32bits. + * + * Note that we assume that GGTT are limited to 4GiB for the + * forseeable future. See also i915_ggtt_offset(). + */ + if (upper_32_bits(end - 1) && + vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { + /* + * We can't mix 64K and 4K PTEs in the same page-table + * (2M block), and so to avoid the ugliness and + * complexity of coloring we opt for just aligning 64K + * objects to 2M. + */ + u64 page_alignment = + rounddown_pow_of_two(vma->page_sizes.sg | + I915_GTT_PAGE_SIZE_2M); + + /* + * Check we don't expand for the limited Global GTT + * (mappable aperture is even more precious!). This + * also checks that we exclude the aliasing-ppgtt. + */ + GEM_BUG_ON(i915_vma_is_ggtt(vma)); + + alignment = max(alignment, page_alignment); + + if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) + size = round_up(size, I915_GTT_PAGE_SIZE_2M); + } + + ret = i915_gem_gtt_insert(vma->vm, &vma->node, + size, alignment, color, + start, end, flags); + if (ret) + return ret; + + GEM_BUG_ON(vma->node.start < start); + GEM_BUG_ON(vma->node.start + vma->node.size > end); + } + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); + + list_add_tail(&vma->vm_link, &vma->vm->bound_list); + + return 0; +} + +static void +i915_vma_detach(struct i915_vma *vma) +{ + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + + /* + * And finally now the object is completely decoupled from this + * vma, we can drop its hold on the backing storage and allow + * it to be reaped by the shrinker. + */ + list_del(&vma->vm_link); +} + +static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) +{ + unsigned int bound; + bool pinned = true; + + bound = atomic_read(&vma->flags); + do { + if (unlikely(flags & ~bound)) + return false; + + if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) + return false; + + if (!(bound & I915_VMA_PIN_MASK)) + goto unpinned; + + GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); + } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); + + return true; + +unpinned: + /* + * If pin_count==0, but we are bound, check under the lock to avoid + * racing with a concurrent i915_vma_unbind(). + */ + mutex_lock(&vma->vm->mutex); + do { + if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { + pinned = false; + break; + } + + if (unlikely(flags & ~bound)) { + pinned = false; + break; + } + } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); + mutex_unlock(&vma->vm->mutex); + + return pinned; +} + +static int vma_get_pages(struct i915_vma *vma) +{ + int err = 0; + + if (atomic_add_unless(&vma->pages_count, 1, 0)) + return 0; + + /* Allocations ahoy! */ + if (mutex_lock_interruptible(&vma->pages_mutex)) + return -EINTR; + + if (!atomic_read(&vma->pages_count)) { + if (vma->obj) { + err = i915_gem_object_pin_pages(vma->obj); + if (err) + goto unlock; + } + + err = vma->ops->set_pages(vma); + if (err) { + if (vma->obj) + i915_gem_object_unpin_pages(vma->obj); + goto unlock; + } + } + atomic_inc(&vma->pages_count); + +unlock: + mutex_unlock(&vma->pages_mutex); + + return err; +} + +static void __vma_put_pages(struct i915_vma *vma, unsigned int count) +{ + /* We allocate under vma_get_pages, so beware the shrinker */ + mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); + GEM_BUG_ON(atomic_read(&vma->pages_count) < count); + if (atomic_sub_return(count, &vma->pages_count) == 0) { + vma->ops->clear_pages(vma); + GEM_BUG_ON(vma->pages); + if (vma->obj) + i915_gem_object_unpin_pages(vma->obj); + } + mutex_unlock(&vma->pages_mutex); +} + +static void vma_put_pages(struct i915_vma *vma) +{ + if (atomic_add_unless(&vma->pages_count, -1, 1)) + return; + + __vma_put_pages(vma, 1); +} + +static void vma_unbind_pages(struct i915_vma *vma) +{ + unsigned int count; + + lockdep_assert_held(&vma->vm->mutex); + + /* The upper portion of pages_count is the number of bindings */ + count = atomic_read(&vma->pages_count); + count >>= I915_VMA_PAGES_BIAS; + GEM_BUG_ON(!count); + + __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); +} + +int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + struct i915_vma_work *work = NULL; + intel_wakeref_t wakeref = 0; + unsigned int bound; + int err; + + BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); + BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); + + GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); + + /* First try and grab the pin without rebinding the vma */ + if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) + return 0; + + err = vma_get_pages(vma); + if (err) + return err; + + if (flags & vma->vm->bind_async_flags) { + work = i915_vma_work(); + if (!work) { + err = -ENOMEM; + goto err_pages; + } + } + + if (flags & PIN_GLOBAL) + wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); + + /* + * Differentiate between user/kernel vma inside the aliasing-ppgtt. + * + * We conflate the Global GTT with the user's vma when using the + * aliasing-ppgtt, but it is still vitally important to try and + * keep the use cases distinct. For example, userptr objects are + * not allowed inside the Global GTT as that will cause lock + * inversions when we have to evict them the mmu_notifier callbacks - + * but they are allowed to be part of the user ppGTT which can never + * be mapped. As such we try to give the distinct users of the same + * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt + * and i915_ppgtt separate]. + * + * NB this may cause us to mask real lock inversions -- while the + * code is safe today, lockdep may not be able to spot future + * transgressions. + */ + err = mutex_lock_interruptible_nested(&vma->vm->mutex, + !(flags & PIN_GLOBAL)); + if (err) + goto err_fence; + + /* No more allocations allowed now we hold vm->mutex */ + + if (unlikely(i915_vma_is_closed(vma))) { + err = -ENOENT; + goto err_unlock; + } + + bound = atomic_read(&vma->flags); + if (unlikely(bound & I915_VMA_ERROR)) { + err = -ENOMEM; + goto err_unlock; + } + + if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { + err = -EAGAIN; /* pins are meant to be fairly temporary */ + goto err_unlock; + } + + if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { + __i915_vma_pin(vma); + goto err_unlock; + } + + err = i915_active_acquire(&vma->active); + if (err) + goto err_unlock; + + if (!(bound & I915_VMA_BIND_MASK)) { + err = i915_vma_insert(vma, size, alignment, flags); + if (err) + goto err_active; + + if (i915_is_ggtt(vma->vm)) + __i915_vma_set_map_and_fenceable(vma); + } + + GEM_BUG_ON(!vma->pages); + err = i915_vma_bind(vma, + vma->obj ? vma->obj->cache_level : 0, + flags, work); + if (err) + goto err_remove; + + /* There should only be at most 2 active bindings (user, global) */ + GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); + atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); + + __i915_vma_pin(vma); + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); + GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); + +err_remove: + if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { + i915_vma_detach(vma); + drm_mm_remove_node(&vma->node); + } +err_active: + i915_active_release(&vma->active); +err_unlock: + mutex_unlock(&vma->vm->mutex); +err_fence: + if (work) + dma_fence_work_commit_imm(&work->base); + if (wakeref) + intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); +err_pages: + vma_put_pages(vma); + return err; +} + +static void flush_idle_contexts(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + intel_engine_flush_barriers(engine); + + intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); +} + +int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) +{ + struct i915_address_space *vm = vma->vm; + int err; + + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + + do { + err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); + if (err != -ENOSPC) { + if (!err) { + err = i915_vma_wait_for_bind(vma); + if (err) + i915_vma_unpin(vma); + } + return err; + } + + /* Unlike i915_vma_pin, we don't take no for an answer! */ + flush_idle_contexts(vm->gt); + if (mutex_lock_interruptible(&vm->mutex) == 0) { + i915_gem_evict_vm(vm); + mutex_unlock(&vm->mutex); + } + } while (1); +} + +static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) +{ + /* + * We defer actually closing, unbinding and destroying the VMA until + * the next idle point, or if the object is freed in the meantime. By + * postponing the unbind, we allow for it to be resurrected by the + * client, avoiding the work required to rebind the VMA. This is + * advantageous for DRI, where the client/server pass objects + * between themselves, temporarily opening a local VMA to the + * object, and then closing it again. The same object is then reused + * on the next frame (or two, depending on the depth of the swap queue) + * causing us to rebind the VMA once more. This ends up being a lot + * of wasted work for the steady state. + */ + GEM_BUG_ON(i915_vma_is_closed(vma)); + list_add(&vma->closed_link, >->closed_vma); +} + +void i915_vma_close(struct i915_vma *vma) +{ + struct intel_gt *gt = vma->vm->gt; + unsigned long flags; + + if (i915_vma_is_ggtt(vma)) + return; + + GEM_BUG_ON(!atomic_read(&vma->open_count)); + if (atomic_dec_and_lock_irqsave(&vma->open_count, + >->closed_lock, + flags)) { + __vma_close(vma, gt); + spin_unlock_irqrestore(>->closed_lock, flags); + } +} + +static void __i915_vma_remove_closed(struct i915_vma *vma) +{ + struct intel_gt *gt = vma->vm->gt; + + spin_lock_irq(>->closed_lock); + list_del_init(&vma->closed_link); + spin_unlock_irq(>->closed_lock); +} + +void i915_vma_reopen(struct i915_vma *vma) +{ + if (i915_vma_is_closed(vma)) + __i915_vma_remove_closed(vma); +} + +void i915_vma_release(struct kref *ref) +{ + struct i915_vma *vma = container_of(ref, typeof(*vma), ref); + + if (drm_mm_node_allocated(&vma->node)) { + mutex_lock(&vma->vm->mutex); + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); + WARN_ON(__i915_vma_unbind(vma)); + mutex_unlock(&vma->vm->mutex); + GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + } + GEM_BUG_ON(i915_vma_is_active(vma)); + + if (vma->obj) { + struct drm_i915_gem_object *obj = vma->obj; + + spin_lock(&obj->vma.lock); + list_del(&vma->obj_link); + if (!RB_EMPTY_NODE(&vma->obj_node)) + rb_erase(&vma->obj_node, &obj->vma.tree); + spin_unlock(&obj->vma.lock); + } + + __i915_vma_remove_closed(vma); + i915_vm_put(vma->vm); + + i915_active_fini(&vma->active); + i915_vma_free(vma); +} + +void i915_vma_parked(struct intel_gt *gt) +{ + struct i915_vma *vma, *next; + LIST_HEAD(closed); + + spin_lock_irq(>->closed_lock); + list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { + struct drm_i915_gem_object *obj = vma->obj; + struct i915_address_space *vm = vma->vm; + + /* XXX All to avoid keeping a reference on i915_vma itself */ + + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + + if (!i915_vm_tryopen(vm)) { + i915_gem_object_put(obj); + continue; + } + + list_move(&vma->closed_link, &closed); + } + spin_unlock_irq(>->closed_lock); + + /* As the GT is held idle, no vma can be reopened as we destroy them */ + list_for_each_entry_safe(vma, next, &closed, closed_link) { + struct drm_i915_gem_object *obj = vma->obj; + struct i915_address_space *vm = vma->vm; + + INIT_LIST_HEAD(&vma->closed_link); + __i915_vma_put(vma); + + i915_gem_object_put(obj); + i915_vm_close(vm); + } +} + +static void __i915_vma_iounmap(struct i915_vma *vma) +{ + GEM_BUG_ON(i915_vma_is_pinned(vma)); + + if (vma->iomap == NULL) + return; + + io_mapping_unmap(vma->iomap); + vma->iomap = NULL; +} + +void i915_vma_revoke_mmap(struct i915_vma *vma) +{ + struct drm_vma_offset_node *node; + u64 vma_offset; + + if (!i915_vma_has_userfault(vma)) + return; + + GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); + GEM_BUG_ON(!vma->obj->userfault_count); + + node = &vma->mmo->vma_node; + vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; + unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, + drm_vma_node_offset_addr(node) + vma_offset, + vma->size, + 1); + + i915_vma_unset_userfault(vma); + if (!--vma->obj->userfault_count) + list_del(&vma->obj->userfault_link); +} + +int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) +{ + int err; + + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + + /* Wait for the vma to be bound before we start! */ + err = i915_request_await_active(rq, &vma->active, + I915_ACTIVE_AWAIT_EXCL); + if (err) + return err; + + return i915_active_add_request(&vma->active, rq); +} + +int i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + int err; + + assert_object_held(obj); + + err = __i915_vma_move_to_active(vma, rq); + if (unlikely(err)) + return err; + + if (flags & EXEC_OBJECT_WRITE) { + struct intel_frontbuffer *front; + + front = __intel_frontbuffer_get(obj); + if (unlikely(front)) { + if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) + i915_active_add_request(&front->write, rq); + intel_frontbuffer_put(front); + } + + dma_resv_add_excl_fence(vma->resv, &rq->fence); + obj->write_domain = I915_GEM_DOMAIN_RENDER; + obj->read_domains = 0; + } else { + err = dma_resv_reserve_shared(vma->resv, 1); + if (unlikely(err)) + return err; + + dma_resv_add_shared_fence(vma->resv, &rq->fence); + obj->write_domain = 0; + } + + if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) + i915_active_add_request(&vma->fence->active, rq); + + obj->read_domains |= I915_GEM_GPU_DOMAINS; + obj->mm.dirty = true; + + GEM_BUG_ON(!i915_vma_is_active(vma)); + return 0; +} + +void __i915_vma_evict(struct i915_vma *vma) +{ + GEM_BUG_ON(i915_vma_is_pinned(vma)); + + if (i915_vma_is_map_and_fenceable(vma)) { + /* Force a pagefault for domain tracking on next user access */ + i915_vma_revoke_mmap(vma); + + /* + * Check that we have flushed all writes through the GGTT + * before the unbind, other due to non-strict nature of those + * indirect writes they may end up referencing the GGTT PTE + * after the unbind. + * + * Note that we may be concurrently poking at the GGTT_WRITE + * bit from set-domain, as we mark all GGTT vma associated + * with an object. We know this is for another vma, as we + * are currently unbinding this one -- so if this vma will be + * reused, it will be refaulted and have its dirty bit set + * before the next write. + */ + i915_vma_flush_writes(vma); + + /* release the fence reg _after_ flushing */ + i915_vma_revoke_fence(vma); + + __i915_vma_iounmap(vma); + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); + } + GEM_BUG_ON(vma->fence); + GEM_BUG_ON(i915_vma_has_userfault(vma)); + + if (likely(atomic_read(&vma->vm->open))) { + trace_i915_vma_unbind(vma); + vma->ops->unbind_vma(vma); + } + atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), + &vma->flags); + + i915_vma_detach(vma); + vma_unbind_pages(vma); +} + +int __i915_vma_unbind(struct i915_vma *vma) +{ + int ret; + + lockdep_assert_held(&vma->vm->mutex); + + if (!drm_mm_node_allocated(&vma->node)) + return 0; + + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); + return -EAGAIN; + } + + /* + * After confirming that no one else is pinning this vma, wait for + * any laggards who may have crept in during the wait (through + * a residual pin skipping the vm->mutex) to complete. + */ + ret = i915_vma_sync(vma); + if (ret) + return ret; + + GEM_BUG_ON(i915_vma_is_active(vma)); + __i915_vma_evict(vma); + + drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ + return 0; +} + +int i915_vma_unbind(struct i915_vma *vma) +{ + struct i915_address_space *vm = vma->vm; + intel_wakeref_t wakeref = 0; + int err; + + /* Optimistic wait before taking the mutex */ + err = i915_vma_sync(vma); + if (err) + return err; + + if (!drm_mm_node_allocated(&vma->node)) + return 0; + + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); + return -EAGAIN; + } + + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + /* XXX not always required: nop_clear_range */ + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); + + err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); + if (err) + goto out_rpm; + + err = __i915_vma_unbind(vma); + mutex_unlock(&vm->mutex); + +out_rpm: + if (wakeref) + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); + return err; +} + +struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) +{ + i915_gem_object_make_unshrinkable(vma->obj); + return vma; +} + +void i915_vma_make_shrinkable(struct i915_vma *vma) +{ + i915_gem_object_make_shrinkable(vma->obj); +} + +void i915_vma_make_purgeable(struct i915_vma *vma) +{ + i915_gem_object_make_purgeable(vma->obj); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/i915_vma.c" +#endif + +static void i915_global_vma_shrink(void) +{ + kmem_cache_shrink(global.slab_vmas); +} + +static void i915_global_vma_exit(void) +{ + kmem_cache_destroy(global.slab_vmas); +} + +static struct i915_global_vma global = { { + .shrink = i915_global_vma_shrink, + .exit = i915_global_vma_exit, +} }; + +int __init i915_global_vma_init(void) +{ + global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); + if (!global.slab_vmas) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/rr-cache/4d878ad09076a3824576140c279b378cfcdad271/postimage b/rr-cache/4d878ad09076a3824576140c279b378cfcdad271/postimage new file mode 100644 index 000000000000..69a0682ddb6a --- /dev/null +++ b/rr-cache/4d878ad09076a3824576140c279b378cfcdad271/postimage @@ -0,0 +1,1432 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * DOC: Frame Buffer Compression (FBC) + * + * FBC tries to save memory bandwidth (and so power consumption) by + * compressing the amount of memory used by the display. It is total + * transparent to user space and completely handled in the kernel. + * + * The benefits of FBC are mostly visible with solid backgrounds and + * variation-less patterns. It comes from keeping the memory footprint small + * and having fewer memory pages opened and accessed for refreshing the display. + * + * i915 is responsible to reserve stolen memory for FBC and configure its + * offset on proper registers. The hardware takes care of all + * compress/decompress. However there are many known cases where we have to + * forcibly disable it to allow proper screen updates. + */ + +#include <drm/drm_fourcc.h> + +#include "i915_drv.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_display_types.h" +#include "intel_fbc.h" +#include "intel_frontbuffer.h" + +/* + * For SKL+, the plane source size used by the hardware is based on the value we + * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value + * we wrote to PIPESRC. + */ +static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, + int *width, int *height) +{ + if (width) + *width = cache->plane.src_w; + if (height) + *height = cache->plane.src_h; +} + +static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, + const struct intel_fbc_state_cache *cache) +{ + int lines; + + intel_fbc_get_plane_source_size(cache, NULL, &lines); + if (IS_GEN(dev_priv, 7)) + lines = min(lines, 2048); + else if (INTEL_GEN(dev_priv) >= 8) + lines = min(lines, 2560); + + /* Hardware needs the full buffer stride, not just the active area. */ + return lines * cache->fb.stride; +} + +static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 fbc_ctl; + + /* Disable compression */ + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + + fbc_ctl &= ~FBC_CTL_EN; + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ + if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, + FBC_STAT_COMPRESSING, 10)) { + drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); + return; + } +} + +static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + int cfb_pitch; + int i; + u32 fbc_ctl; + + /* Note: fbc.threshold == 1 for i8xx */ + cfb_pitch = params->cfb_size / FBC_LL_SIZE; + if (params->fb.stride < cfb_pitch) + cfb_pitch = params->fb.stride; + + /* FBC_CTL wants 32B or 64B units */ + if (IS_GEN(dev_priv, 2)) + cfb_pitch = (cfb_pitch / 32) - 1; + else + cfb_pitch = (cfb_pitch / 64) - 1; + + /* Clear old tags */ + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) + intel_de_write(dev_priv, FBC_TAG(i), 0); + + if (IS_GEN(dev_priv, 4)) { + u32 fbc_ctl2; + + /* Set it up... */ + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; + fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fence_id >= 0) + fbc_ctl2 |= FBC_CTL_CPU_FENCE; + intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); + intel_de_write(dev_priv, FBC_FENCE_OFF, + params->fence_y_offset); + } + + /* enable it... */ + fbc_ctl = FBC_CTL_INTERVAL(params->interval); + fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev_priv)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff); + if (params->fence_id >= 0) + fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); +} + +static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; +} + +static void g4x_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; + if (params->fb.format->cpp[0] == 2) + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + else + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; + intel_de_write(dev_priv, DPFC_FENCE_YOFF, + params->fence_y_offset); + } else { + intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); + } + + /* enable it... */ + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); +} + +static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); + } +} + +static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; +} + +/* This function forces a CFB recompression through the nuke operation. */ +static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_nuke(fbc->crtc); + + intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); +} + +static void ilk_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN; + if (IS_GEN(dev_priv, 5)) + dpfc_ctl |= params->fence_id; + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } + } else { + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + } + + intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, + params->fence_y_offset); + /* enable it... */ + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); + } +} + +static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + +static void gen7_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + /* Display WA #0529: skl, kbl, bxt. */ + if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { + u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4); + + val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); + + if (params->gen9_wa_cfb_stride) + val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; + + intel_de_write(dev_priv, CHICKEN_MISC_4, val); + } + + dpfc_ctl = 0; + if (IS_IVYBRIDGE(dev_priv)) + dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); + + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } else if (dev_priv->ggtt.num_fences) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + + if (dev_priv->fbc.false_color) + dpfc_ctl |= FBC_CTL_FALSE_COLOR; + + if (IS_IVYBRIDGE(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:ivb */ + intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1, + intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ + intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe), + intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); + } + + if (INTEL_GEN(dev_priv) >= 11) + /* Wa_1409120013:icl,ehl,tgl */ + intel_de_write(dev_priv, ILK_DPFC_CHICKEN, + ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 5) + return ilk_fbc_is_active(dev_priv); + else if (IS_GM45(dev_priv)) + return g4x_fbc_is_active(dev_priv); + else + return i8xx_fbc_is_active(dev_priv); +} + +static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_activate(fbc->crtc); + + fbc->active = true; + fbc->activated = true; + + if (INTEL_GEN(dev_priv) >= 7) + gen7_fbc_activate(dev_priv); + else if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_activate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_activate(dev_priv); + else + i8xx_fbc_activate(dev_priv); +} + +static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_deactivate(fbc->crtc); + + fbc->active = false; + + if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_deactivate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_deactivate(dev_priv); + else + i8xx_fbc_deactivate(dev_priv); +} + +/** + * intel_fbc_is_active - Is FBC active? + * @dev_priv: i915 device instance + * + * This function is used to verify the current state of FBC. + * + * FIXME: This should be tracked in the plane config eventually + * instead of queried at runtime for most callers. + */ +bool intel_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return dev_priv->fbc.active; +} + +static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, + const char *reason) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->active) + intel_fbc_hw_deactivate(dev_priv); + + fbc->no_fbc_reason = reason; +} + +static int find_compression_threshold(struct drm_i915_private *dev_priv, + struct drm_mm_node *node, + unsigned int size, + unsigned int fb_cpp) +{ + int compression_threshold = 1; + int ret; + u64 end; + + /* The FBC hardware for BDW/SKL doesn't have access to the stolen + * reserved range size, so it always assumes the maximum (8mb) is used. + * If we enable FBC using a CFB on that memory range we'll get FIFO + * underruns, even if that range is not reserved by the BIOS. */ + if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv)) + end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; + else + end = U64_MAX; + + /* HACK: This code depends on what we will do in *_enable_fbc. If that + * code changes, this code needs to change as well. + * + * The enable_fbc code will attempt to use one of our 2 compression + * thresholds, therefore, in that case, we only have 1 resort. + */ + + /* Try to over-allocate to reduce reallocations and fragmentation. */ + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, + 4096, 0, end); + if (ret == 0) + return compression_threshold; + +again: + /* HW's ability to limit the CFB is 1:4 */ + if (compression_threshold > 4 || + (fb_cpp == 2 && compression_threshold == 2)) + return 0; + + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, + 4096, 0, end); + if (ret && INTEL_GEN(dev_priv) <= 4) { + return 0; + } else if (ret) { + compression_threshold <<= 1; + goto again; + } else { + return compression_threshold; + } +} + +static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, + unsigned int size, unsigned int fb_cpp) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_mm_node *uninitialized_var(compressed_llb); + int ret; + + drm_WARN_ON(&dev_priv->drm, + drm_mm_node_allocated(&fbc->compressed_fb)); + + ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, + size, fb_cpp); + if (!ret) + goto err_llb; + else if (ret > 1) { + drm_info_once(&dev_priv->drm, + "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); + } + + fbc->threshold = ret; + + if (INTEL_GEN(dev_priv) >= 5) + intel_de_write(dev_priv, ILK_DPFC_CB_BASE, + fbc->compressed_fb.start); + else if (IS_GM45(dev_priv)) { + intel_de_write(dev_priv, DPFC_CB_BASE, + fbc->compressed_fb.start); + } else { + compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); + if (!compressed_llb) + goto err_fb; + + ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, + 4096, 4096); + if (ret) + goto err_fb; + + fbc->compressed_llb = compressed_llb; + + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_fb.start, + U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_llb->start, + U32_MAX)); + intel_de_write(dev_priv, FBC_CFB_BASE, + dev_priv->dsm.start + fbc->compressed_fb.start); + intel_de_write(dev_priv, FBC_LL_BASE, + dev_priv->dsm.start + compressed_llb->start); + } + + drm_dbg_kms(&dev_priv->drm, + "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", + fbc->compressed_fb.size, fbc->threshold); + + return 0; + +err_fb: + kfree(compressed_llb); + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +err_llb: + if (drm_mm_initialized(&dev_priv->mm.stolen)) + drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + return -ENOSPC; +} + +static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) + return; + + if (!drm_mm_node_allocated(&fbc->compressed_fb)) + return; + + if (fbc->compressed_llb) { + i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); + kfree(fbc->compressed_llb); + } + + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +} + +void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_cleanup_cfb(dev_priv); + mutex_unlock(&fbc->lock); +} + +static bool stride_is_valid(struct drm_i915_private *dev_priv, + u64 modifier, unsigned int stride) +{ + /* This should have been caught earlier. */ + if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) + return false; + + /* Below are the additional FBC restrictions. */ + if (stride < 512) + return false; + + if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3)) + return stride == 4096 || stride == 8192; + + if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048) + return false; + + /* Display WA #1105: skl,bxt,kbl,cfl,glk */ + if (IS_GEN(dev_priv, 9) && + modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) + return false; + + if (stride > 16384) + return false; + + return true; +} + +static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format) +{ + switch (pixel_format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + return true; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + /* 16bpp not supported on gen2 */ + if (IS_GEN(dev_priv, 2)) + return false; + /* WaFbcOnly1to1Ratio:ctg */ + if (IS_G4X(dev_priv)) + return false; + return true; + default: + return false; + } +} + +static bool rotation_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format, unsigned int rotation) +{ + if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + drm_rotation_90_or_270(rotation)) + return false; + else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && + rotation != DRM_MODE_ROTATE_0) + return false; + + return true; +} + +/* + * For some reason, the hardware tracking starts looking at whatever we + * programmed as the display plane base address register. It does not look at + * the X and Y offset registers. That's why we include the src x/y offsets + * instead of just looking at the plane size. + */ +static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + unsigned int effective_w, effective_h, max_w, max_h; + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + max_w = 5120; + max_h = 4096; + } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { + max_w = 4096; + max_h = 4096; + } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { + max_w = 4096; + max_h = 2048; + } else { + max_w = 2048; + max_h = 1536; + } + + intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, + &effective_h); + effective_w += fbc->state_cache.plane.adjusted_x; + effective_h += fbc->state_cache.plane.adjusted_y; + + return effective_w <= max_w && effective_h <= max_h; +} + +static bool tiling_is_valid(struct drm_i915_private *dev_priv, + uint64_t modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + if (INTEL_GEN(dev_priv) >= 9) + return true; + return false; + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + return true; + default: + return false; + } +} + +static void intel_fbc_update_state_cache(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct drm_framebuffer *fb = plane_state->hw.fb; + + cache->plane.visible = plane_state->uapi.visible; + if (!cache->plane.visible) + return; + + cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; + + cache->plane.rotation = plane_state->hw.rotation; + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + cache->plane.adjusted_x = plane_state->color_plane[0].x; + cache->plane.adjusted_y = plane_state->color_plane[0].y; + + cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; + + cache->fb.format = fb->format; + cache->fb.stride = fb->pitches[0]; + cache->fb.modifier = fb->modifier; + + /* FBC1 compression interval: arbitrary choice of 1 second */ + cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); + + cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); + + drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && + !plane_state->vma->fence); + + if (plane_state->flags & PLANE_HAS_FENCE && + plane_state->vma->fence) + cache->fence_id = plane_state->vma->fence->id; + else + cache->fence_id = -1; +} + +static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > + fbc->compressed_fb.size * fbc->threshold; +} + +static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (intel_vgpu_active(dev_priv)) { + fbc->no_fbc_reason = "VGPU is active"; + return false; + } + + if (!dev_priv->params.enable_fbc) { + fbc->no_fbc_reason = "disabled per module param or by default"; + return false; + } + + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + return true; +} + +static bool intel_fbc_can_activate(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!intel_fbc_can_enable(dev_priv)) + return false; + + if (!cache->plane.visible) { + fbc->no_fbc_reason = "primary plane not visible"; + return false; + } + + /* We don't need to use a state cache here since this information is + * global for all CRTC. + */ + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { + fbc->no_fbc_reason = "incompatible mode"; + return false; + } + + if (!intel_fbc_hw_tracking_covers_screen(crtc)) { + fbc->no_fbc_reason = "mode too large for compression"; + return false; + } + + /* The use of a CPU fence is one of two ways to detect writes by the + * CPU to the scanout and trigger updates to the FBC. + * + * The other method is by software tracking (see + * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke + * the current compressed buffer and recompress it. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constraints + * at the time of pinning. + * + * FIXME with 90/270 degree rotation we should use the fence on + * the normal GTT view (the rotated view doesn't even have a + * fence). Would need changes to the FBC fence Y offset as well. + * For now this will effectively disable FBC with 90/270 degree + * rotation. + */ + if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) { + fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + return false; + } + + if (!rotation_is_valid(dev_priv, cache->fb.format->format, + cache->plane.rotation)) { + fbc->no_fbc_reason = "rotation unsupported"; + return false; + } + + if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { + fbc->no_fbc_reason = "tiling unsupported"; + return false; + } + + if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) { + fbc->no_fbc_reason = "framebuffer stride not supported"; + return false; + } + + if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { + fbc->no_fbc_reason = "pixel format is invalid"; + return false; + } + + if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + cache->fb.format->has_alpha) { + fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; + return false; + } + + /* WaFbcExceedCdClockThreshold:hsw,bdw */ + if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && + cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { + fbc->no_fbc_reason = "pixel rate is too big"; + return false; + } + + /* It is possible for the required CFB size change without a + * crtc->disable + crtc->enable since it is possible to change the + * stride without triggering a full modeset. Since we try to + * over-allocate the CFB, there's a chance we may keep FBC enabled even + * if this happens, but if we exceed the current CFB size we'll have to + * disable FBC. Notice that it would be possible to disable FBC, wait + * for a frame, free the stolen node, then try to reenable FBC in case + * we didn't get any invalidate/deactivate calls, but this would require + * a lot of tracking just for a specific case. If we conclude it's an + * important case, we can implement it later. */ + if (intel_fbc_cfb_size_changed(dev_priv)) { + fbc->no_fbc_reason = "CFB requirements changed"; + return false; + } + + /* + * Work around a problem on GEN9+ HW, where enabling FBC on a plane + * having a Y offset that isn't divisible by 4 causes FIFO underrun + * and screen flicker. + */ + if (INTEL_GEN(dev_priv) >= 9 && + (fbc->state_cache.plane.adjusted_y & 3)) { + fbc->no_fbc_reason = "plane Y offset is misaligned"; + return false; + } + + return true; +} + +static void intel_fbc_get_reg_params(struct intel_crtc *crtc, + struct intel_fbc_reg_params *params) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + /* Since all our fields are integer types, use memset here so the + * comparison function can rely on memcmp because the padding will be + * zero. */ + memset(params, 0, sizeof(*params)); + + params->fence_id = cache->fence_id; + params->fence_y_offset = cache->fence_y_offset; + + params->interval = cache->interval; + + params->crtc.pipe = crtc->pipe; + params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; + + params->fb.format = cache->fb.format; + params->fb.stride = cache->fb.stride; + + params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); + + params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride; + + params->plane_visible = cache->plane.visible; +} + +static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_fbc *fbc = &dev_priv->fbc; + const struct intel_fbc_state_cache *cache = &fbc->state_cache; + const struct intel_fbc_reg_params *params = &fbc->params; + + if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) + return false; + + if (!params->plane_visible) + return false; + + if (!intel_fbc_can_activate(crtc)) + return false; + + if (params->fb.format != cache->fb.format) + return false; + + if (params->fb.stride != cache->fb.stride) + return false; + + if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache)) + return false; + + if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride) + return false; + + return true; +} + +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + const char *reason = "update pending"; + bool need_vblank_wait = false; + + if (!plane->has_fbc || !plane_state) + return need_vblank_wait; + + mutex_lock(&fbc->lock); + + if (fbc->crtc != crtc) + goto unlock; + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + fbc->flip_pending = true; + + if (!intel_fbc_can_flip_nuke(crtc_state)) { + intel_fbc_deactivate(dev_priv, reason); + + /* + * Display WA #1198: glk+ + * Need an extra vblank wait between FBC disable and most plane + * updates. Bspec says this is only needed for plane disable, but + * that is not true. Touching most plane registers will cause the + * corruption to appear. Also SKL/derivatives do not seem to be + * affected. + * + * TODO: could optimize this a bit by sampling the frame + * counter when we disable FBC (if it was already done earlier) + * and skipping the extra vblank wait before the plane update + * if at least one frame has already passed. + */ + if (fbc->activated && + (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))) + need_vblank_wait = true; + fbc->activated = false; + } +unlock: + mutex_unlock(&fbc->lock); + + return need_vblank_wait; +} + +/** + * __intel_fbc_disable - disable FBC + * @dev_priv: i915 device instance + * + * This is the low level function that actually disables FBC. Callers should + * grab the FBC lock. + */ +static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_crtc *crtc = fbc->crtc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&dev_priv->drm, !fbc->crtc); + drm_WARN_ON(&dev_priv->drm, fbc->active); + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + + __intel_fbc_cleanup_cfb(dev_priv); + + fbc->crtc = NULL; +} + +static void __intel_fbc_post_update(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->crtc != crtc) + return; + + fbc->flip_pending = false; + + if (!dev_priv->params.enable_fbc) { + intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); + __intel_fbc_disable(dev_priv); + + return; + } + + intel_fbc_get_reg_params(crtc, &fbc->params); + + if (!intel_fbc_can_activate(crtc)) + return; + + if (!fbc->busy_bits) + intel_fbc_hw_activate(dev_priv); + else + intel_fbc_deactivate(dev_priv, "frontbuffer write"); +} + +void intel_fbc_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_post_update(crtc); + mutex_unlock(&fbc->lock); +} + +static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) +{ + if (fbc->crtc) + return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + else + return fbc->possible_framebuffer_bits; +} + +void intel_fbc_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; + + if (fbc->crtc && fbc->busy_bits) + intel_fbc_deactivate(dev_priv, "frontbuffer write"); + + mutex_unlock(&fbc->lock); +} + +void intel_fbc_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + /* + * GTT tracking does not nuke the entire cfb + * so don't clear busy_bits set for some other + * reason. + */ + if (origin == ORIGIN_GTT) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits &= ~frontbuffer_bits; + + if (origin == ORIGIN_FLIP) + goto out; + + if (!fbc->busy_bits && fbc->crtc && + (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { + if (fbc->active) + intel_fbc_recompress(dev_priv); + else if (!fbc->flip_pending) + __intel_fbc_post_update(fbc->crtc); + } + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_choose_crtc - select a CRTC to enable FBC on + * @dev_priv: i915 device instance + * @state: the atomic state structure + * + * This function looks at the proposed state for CRTCs and planes, then chooses + * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to + * true. + * + * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe + * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. + */ +void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, + struct intel_atomic_state *state) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_plane *plane; + struct intel_plane_state *plane_state; + bool crtc_chosen = false; + int i; + + mutex_lock(&fbc->lock); + + /* Does this atomic commit involve the CRTC currently tied to FBC? */ + if (fbc->crtc && + !intel_atomic_get_new_crtc_state(state, fbc->crtc)) + goto out; + + if (!intel_fbc_can_enable(dev_priv)) + goto out; + + /* Simply choose the first CRTC that is compatible and has a visible + * plane. We could go for fancier schemes such as checking the plane + * size, but this would just affect the few platforms that don't tie FBC + * to pipe or plane A. */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); + + if (!plane->has_fbc) + continue; + + if (!plane_state->uapi.visible) + continue; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + crtc_state->enable_fbc = true; + crtc_chosen = true; + break; + } + + if (!crtc_chosen) + fbc->no_fbc_reason = "no suitable CRTC for FBC"; + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_enable: tries to enable FBC on the CRTC + * @crtc: the CRTC + * @state: corresponding &drm_crtc_state for @crtc + * + * This function checks if the given CRTC was chosen for FBC, then enables it if + * possible. Notice that it doesn't activate FBC. It is valid to call + * intel_fbc_enable multiple times for the same pipe without an + * intel_fbc_disable in the middle, as long as it is deactivated. + */ +void intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + + if (fbc->crtc) { + if (fbc->crtc != crtc || + !intel_fbc_cfb_size_changed(dev_priv)) + goto out; + + __intel_fbc_disable(dev_priv); + } + + drm_WARN_ON(&dev_priv->drm, fbc->active); + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + + /* FIXME crtc_state->enable_fbc lies :( */ + if (!cache->plane.visible) + goto out; + + if (intel_fbc_alloc_cfb(dev_priv, + intel_fbc_calculate_cfb_size(dev_priv, cache), + plane_state->hw.fb->format->cpp[0])) { + cache->plane.visible = false; + fbc->no_fbc_reason = "not enough stolen memory"; + goto out; + } + + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && + plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) + cache->gen9_wa_cfb_stride = + DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + else + cache->gen9_wa_cfb_stride = 0; + + drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + fbc->no_fbc_reason = "FBC enabled but not active yet\n"; + + fbc->crtc = crtc; +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_disable - disable FBC if it's associated with crtc + * @crtc: the CRTC + * + * This function disables FBC if it's associated with the provided CRTC. + */ +void intel_fbc_disable(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc == crtc) + __intel_fbc_disable(dev_priv); + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_global_disable - globally disable FBC + * @dev_priv: i915 device instance + * + * This function disables FBC regardless of which CRTC is associated with it. + */ +void intel_fbc_global_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc) { + drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); + __intel_fbc_disable(dev_priv); + } + mutex_unlock(&fbc->lock); +} + +static void intel_fbc_underrun_work_fn(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, fbc.underrun_work); + struct intel_fbc *fbc = &dev_priv->fbc; + + mutex_lock(&fbc->lock); + + /* Maybe we were scheduled twice. */ + if (fbc->underrun_detected || !fbc->crtc) + goto out; + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); + fbc->underrun_detected = true; + + intel_fbc_deactivate(dev_priv, "FIFO underrun"); +out: + mutex_unlock(&fbc->lock); +} + +/* + * intel_fbc_reset_underrun - reset FBC fifo underrun status. + * @dev_priv: i915 device instance + * + * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we + * want to re-enable FBC after an underrun to increase test coverage. + */ +int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) +{ + int ret; + + cancel_work_sync(&dev_priv->fbc.underrun_work); + + ret = mutex_lock_interruptible(&dev_priv->fbc.lock); + if (ret) + return ret; + + if (dev_priv->fbc.underrun_detected) { + drm_dbg_kms(&dev_priv->drm, + "Re-allowing FBC after fifo underrun\n"); + dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; + } + + dev_priv->fbc.underrun_detected = false; + mutex_unlock(&dev_priv->fbc.lock); + + return 0; +} + +/** + * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun + * @dev_priv: i915 device instance + * + * Without FBC, most underruns are harmless and don't really cause too many + * problems, except for an annoying message on dmesg. With FBC, underruns can + * become black screens or even worse, especially when paired with bad + * watermarks. So in order for us to be on the safe side, completely disable FBC + * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe + * already suggests that watermarks may be bad, so try to be as safe as + * possible. + * + * This function is called from the IRQ handler. + */ +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + /* There's no guarantee that underrun_detected won't be set to true + * right after this check and before the work is scheduled, but that's + * not a problem since we'll check it again under the work function + * while FBC is locked. This check here is just to prevent us from + * unnecessarily scheduling the work, and it relies on the fact that we + * never switch underrun_detect back to false after it's true. */ + if (READ_ONCE(fbc->underrun_detected)) + return; + + schedule_work(&fbc->underrun_work); +} + +/* + * The DDX driver changes its behavior depending on the value it reads from + * i915.enable_fbc, so sanitize it by translating the default value into either + * 0 or 1 in order to allow it to know what's going on. + * + * Notice that this is done at driver initialization and we still allow user + * space to change the value during runtime without sanitizing it again. IGT + * relies on being able to change i915.enable_fbc at runtime. + */ +static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +{ + if (dev_priv->params.enable_fbc >= 0) + return !!dev_priv->params.enable_fbc; + + if (!HAS_FBC(dev_priv)) + return 0; + + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) + return 1; + + return 0; +} + +static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +{ + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ + if (intel_vtd_active() && + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { + drm_info(&dev_priv->drm, + "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); + return true; + } + + return false; +} + +/** + * intel_fbc_init - Initialize FBC + * @dev_priv: the i915 device + * + * This function might be called during PM init process. + */ +void intel_fbc_init(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); + mutex_init(&fbc->lock); + fbc->active = false; + + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + if (need_fbc_vtd_wa(dev_priv)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv); + drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", + dev_priv->params.enable_fbc); + + if (!HAS_FBC(dev_priv)) { + fbc->no_fbc_reason = "unsupported by this chipset"; + return; + } + + /* We still don't have any sort of hardware state readout for FBC, so + * deactivate it in case the BIOS activated it to make sure software + * matches the hardware state. */ + if (intel_fbc_hw_is_active(dev_priv)) + intel_fbc_hw_deactivate(dev_priv); +} diff --git a/rr-cache/4d878ad09076a3824576140c279b378cfcdad271/preimage b/rr-cache/4d878ad09076a3824576140c279b378cfcdad271/preimage new file mode 100644 index 000000000000..ad68e2b3e4c7 --- /dev/null +++ b/rr-cache/4d878ad09076a3824576140c279b378cfcdad271/preimage @@ -0,0 +1,1438 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * DOC: Frame Buffer Compression (FBC) + * + * FBC tries to save memory bandwidth (and so power consumption) by + * compressing the amount of memory used by the display. It is total + * transparent to user space and completely handled in the kernel. + * + * The benefits of FBC are mostly visible with solid backgrounds and + * variation-less patterns. It comes from keeping the memory footprint small + * and having fewer memory pages opened and accessed for refreshing the display. + * + * i915 is responsible to reserve stolen memory for FBC and configure its + * offset on proper registers. The hardware takes care of all + * compress/decompress. However there are many known cases where we have to + * forcibly disable it to allow proper screen updates. + */ + +#include <drm/drm_fourcc.h> + +#include "i915_drv.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_display_types.h" +#include "intel_fbc.h" +#include "intel_frontbuffer.h" + +/* + * For SKL+, the plane source size used by the hardware is based on the value we + * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value + * we wrote to PIPESRC. + */ +static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, + int *width, int *height) +{ + if (width) + *width = cache->plane.src_w; + if (height) + *height = cache->plane.src_h; +} + +static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, + const struct intel_fbc_state_cache *cache) +{ + int lines; + + intel_fbc_get_plane_source_size(cache, NULL, &lines); + if (IS_GEN(dev_priv, 7)) + lines = min(lines, 2048); + else if (INTEL_GEN(dev_priv) >= 8) + lines = min(lines, 2560); + + /* Hardware needs the full buffer stride, not just the active area. */ + return lines * cache->fb.stride; +} + +static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 fbc_ctl; + + /* Disable compression */ + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + + fbc_ctl &= ~FBC_CTL_EN; + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ + if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, + FBC_STAT_COMPRESSING, 10)) { + drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); + return; + } +} + +static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + int cfb_pitch; + int i; + u32 fbc_ctl; + + /* Note: fbc.threshold == 1 for i8xx */ + cfb_pitch = params->cfb_size / FBC_LL_SIZE; + if (params->fb.stride < cfb_pitch) + cfb_pitch = params->fb.stride; + + /* FBC_CTL wants 32B or 64B units */ + if (IS_GEN(dev_priv, 2)) + cfb_pitch = (cfb_pitch / 32) - 1; + else + cfb_pitch = (cfb_pitch / 64) - 1; + + /* Clear old tags */ + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) + intel_de_write(dev_priv, FBC_TAG(i), 0); + + if (IS_GEN(dev_priv, 4)) { + u32 fbc_ctl2; + + /* Set it up... */ + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; + fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fence_id >= 0) + fbc_ctl2 |= FBC_CTL_CPU_FENCE; + intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); + intel_de_write(dev_priv, FBC_FENCE_OFF, + params->fence_y_offset); + } + + /* enable it... */ + fbc_ctl = FBC_CTL_INTERVAL(params->interval); + fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev_priv)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff); + if (params->fence_id >= 0) + fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); +} + +static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; +} + +static void g4x_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; + if (params->fb.format->cpp[0] == 2) + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + else + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; + intel_de_write(dev_priv, DPFC_FENCE_YOFF, + params->fence_y_offset); + } else { + intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); + } + + /* enable it... */ + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); +} + +static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); + } +} + +static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; +} + +/* This function forces a CFB recompression through the nuke operation. */ +static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_nuke(fbc->crtc); + + intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); +} + +static void ilk_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN; + if (IS_GEN(dev_priv, 5)) + dpfc_ctl |= params->fence_id; + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } + } else { + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + } + + intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, + params->fence_y_offset); + /* enable it... */ + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); + } +} + +static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + +static void gen7_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + /* Display WA #0529: skl, kbl, bxt. */ + if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { + u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4); + + val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); + + if (params->gen9_wa_cfb_stride) + val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; + + intel_de_write(dev_priv, CHICKEN_MISC_4, val); + } + + dpfc_ctl = 0; + if (IS_IVYBRIDGE(dev_priv)) + dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); + + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } else if (dev_priv->ggtt.num_fences) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + + if (dev_priv->fbc.false_color) + dpfc_ctl |= FBC_CTL_FALSE_COLOR; + + if (IS_IVYBRIDGE(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:ivb */ + intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1, + intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ + intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe), + intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); + } + + if (INTEL_GEN(dev_priv) >= 11) + /* Wa_1409120013:icl,ehl,tgl */ + intel_de_write(dev_priv, ILK_DPFC_CHICKEN, + ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 5) + return ilk_fbc_is_active(dev_priv); + else if (IS_GM45(dev_priv)) + return g4x_fbc_is_active(dev_priv); + else + return i8xx_fbc_is_active(dev_priv); +} + +static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_activate(fbc->crtc); + + fbc->active = true; + fbc->activated = true; + + if (INTEL_GEN(dev_priv) >= 7) + gen7_fbc_activate(dev_priv); + else if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_activate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_activate(dev_priv); + else + i8xx_fbc_activate(dev_priv); +} + +static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_deactivate(fbc->crtc); + + fbc->active = false; + + if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_deactivate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_deactivate(dev_priv); + else + i8xx_fbc_deactivate(dev_priv); +} + +/** + * intel_fbc_is_active - Is FBC active? + * @dev_priv: i915 device instance + * + * This function is used to verify the current state of FBC. + * + * FIXME: This should be tracked in the plane config eventually + * instead of queried at runtime for most callers. + */ +bool intel_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return dev_priv->fbc.active; +} + +static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, + const char *reason) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->active) + intel_fbc_hw_deactivate(dev_priv); + + fbc->no_fbc_reason = reason; +} + +static int find_compression_threshold(struct drm_i915_private *dev_priv, + struct drm_mm_node *node, + unsigned int size, + unsigned int fb_cpp) +{ + int compression_threshold = 1; + int ret; + u64 end; + + /* The FBC hardware for BDW/SKL doesn't have access to the stolen + * reserved range size, so it always assumes the maximum (8mb) is used. + * If we enable FBC using a CFB on that memory range we'll get FIFO + * underruns, even if that range is not reserved by the BIOS. */ + if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv)) + end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; + else + end = U64_MAX; + + /* HACK: This code depends on what we will do in *_enable_fbc. If that + * code changes, this code needs to change as well. + * + * The enable_fbc code will attempt to use one of our 2 compression + * thresholds, therefore, in that case, we only have 1 resort. + */ + + /* Try to over-allocate to reduce reallocations and fragmentation. */ + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, + 4096, 0, end); + if (ret == 0) + return compression_threshold; + +again: + /* HW's ability to limit the CFB is 1:4 */ + if (compression_threshold > 4 || + (fb_cpp == 2 && compression_threshold == 2)) + return 0; + + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, + 4096, 0, end); + if (ret && INTEL_GEN(dev_priv) <= 4) { + return 0; + } else if (ret) { + compression_threshold <<= 1; + goto again; + } else { + return compression_threshold; + } +} + +static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, + unsigned int size, unsigned int fb_cpp) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_mm_node *uninitialized_var(compressed_llb); + int ret; + + drm_WARN_ON(&dev_priv->drm, + drm_mm_node_allocated(&fbc->compressed_fb)); + + ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, + size, fb_cpp); + if (!ret) + goto err_llb; + else if (ret > 1) { + drm_info_once(&dev_priv->drm, + "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); + } + + fbc->threshold = ret; + + if (INTEL_GEN(dev_priv) >= 5) + intel_de_write(dev_priv, ILK_DPFC_CB_BASE, + fbc->compressed_fb.start); + else if (IS_GM45(dev_priv)) { + intel_de_write(dev_priv, DPFC_CB_BASE, + fbc->compressed_fb.start); + } else { + compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); + if (!compressed_llb) + goto err_fb; + + ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, + 4096, 4096); + if (ret) + goto err_fb; + + fbc->compressed_llb = compressed_llb; + + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_fb.start, + U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_llb->start, + U32_MAX)); + intel_de_write(dev_priv, FBC_CFB_BASE, + dev_priv->dsm.start + fbc->compressed_fb.start); + intel_de_write(dev_priv, FBC_LL_BASE, + dev_priv->dsm.start + compressed_llb->start); + } + + drm_dbg_kms(&dev_priv->drm, + "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", + fbc->compressed_fb.size, fbc->threshold); + + return 0; + +err_fb: + kfree(compressed_llb); + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +err_llb: + if (drm_mm_initialized(&dev_priv->mm.stolen)) + drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + return -ENOSPC; +} + +static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) + return; + + if (!drm_mm_node_allocated(&fbc->compressed_fb)) + return; + + if (fbc->compressed_llb) { + i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); + kfree(fbc->compressed_llb); + } + + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +} + +void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_cleanup_cfb(dev_priv); + mutex_unlock(&fbc->lock); +} + +static bool stride_is_valid(struct drm_i915_private *dev_priv, + u64 modifier, unsigned int stride) +{ + /* This should have been caught earlier. */ + if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) + return false; + + /* Below are the additional FBC restrictions. */ + if (stride < 512) + return false; + + if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3)) + return stride == 4096 || stride == 8192; + + if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048) + return false; + + /* Display WA #1105: skl,bxt,kbl,cfl,glk */ + if (IS_GEN(dev_priv, 9) && + modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) + return false; + + if (stride > 16384) + return false; + + return true; +} + +static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format) +{ + switch (pixel_format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + return true; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + /* 16bpp not supported on gen2 */ + if (IS_GEN(dev_priv, 2)) + return false; + /* WaFbcOnly1to1Ratio:ctg */ + if (IS_G4X(dev_priv)) + return false; + return true; + default: + return false; + } +} + +static bool rotation_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format, unsigned int rotation) +{ + if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + drm_rotation_90_or_270(rotation)) + return false; + else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && + rotation != DRM_MODE_ROTATE_0) + return false; + + return true; +} + +/* + * For some reason, the hardware tracking starts looking at whatever we + * programmed as the display plane base address register. It does not look at + * the X and Y offset registers. That's why we include the src x/y offsets + * instead of just looking at the plane size. + */ +static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + unsigned int effective_w, effective_h, max_w, max_h; + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + max_w = 5120; + max_h = 4096; + } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { + max_w = 4096; + max_h = 4096; + } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { + max_w = 4096; + max_h = 2048; + } else { + max_w = 2048; + max_h = 1536; + } + + intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, + &effective_h); + effective_w += fbc->state_cache.plane.adjusted_x; + effective_h += fbc->state_cache.plane.adjusted_y; + + return effective_w <= max_w && effective_h <= max_h; +} + +static bool tiling_is_valid(struct drm_i915_private *dev_priv, + uint64_t modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + if (INTEL_GEN(dev_priv) >= 9) + return true; + return false; + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + return true; + default: + return false; + } +} + +static void intel_fbc_update_state_cache(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct drm_framebuffer *fb = plane_state->hw.fb; + + cache->plane.visible = plane_state->uapi.visible; + if (!cache->plane.visible) + return; + + cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; + + cache->plane.rotation = plane_state->hw.rotation; + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + cache->plane.adjusted_x = plane_state->color_plane[0].x; + cache->plane.adjusted_y = plane_state->color_plane[0].y; + + cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; + + cache->fb.format = fb->format; + cache->fb.stride = fb->pitches[0]; + cache->fb.modifier = fb->modifier; + +<<<<<<< +======= + /* FBC1 compression interval: arbitrary choice of 1 second */ + cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); + +>>>>>>> + cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); + + drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && + !plane_state->vma->fence); + + if (plane_state->flags & PLANE_HAS_FENCE && + plane_state->vma->fence) + cache->fence_id = plane_state->vma->fence->id; + else + cache->fence_id = -1; +} + +static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > + fbc->compressed_fb.size * fbc->threshold; +} + +static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (intel_vgpu_active(dev_priv)) { + fbc->no_fbc_reason = "VGPU is active"; + return false; + } + + if (!dev_priv->params.enable_fbc) { + fbc->no_fbc_reason = "disabled per module param or by default"; + return false; + } + + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + return true; +} + +static bool intel_fbc_can_activate(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!intel_fbc_can_enable(dev_priv)) + return false; + + if (!cache->plane.visible) { + fbc->no_fbc_reason = "primary plane not visible"; + return false; + } + + /* We don't need to use a state cache here since this information is + * global for all CRTC. + */ + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { + fbc->no_fbc_reason = "incompatible mode"; + return false; + } + + if (!intel_fbc_hw_tracking_covers_screen(crtc)) { + fbc->no_fbc_reason = "mode too large for compression"; + return false; + } + + /* The use of a CPU fence is one of two ways to detect writes by the + * CPU to the scanout and trigger updates to the FBC. + * + * The other method is by software tracking (see + * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke + * the current compressed buffer and recompress it. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constraints + * at the time of pinning. + * + * FIXME with 90/270 degree rotation we should use the fence on + * the normal GTT view (the rotated view doesn't even have a + * fence). Would need changes to the FBC fence Y offset as well. + * For now this will effectively disable FBC with 90/270 degree + * rotation. + */ + if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) { + fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + return false; + } + + if (!rotation_is_valid(dev_priv, cache->fb.format->format, + cache->plane.rotation)) { + fbc->no_fbc_reason = "rotation unsupported"; + return false; + } + + if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { + fbc->no_fbc_reason = "tiling unsupported"; + return false; + } + + if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) { + fbc->no_fbc_reason = "framebuffer stride not supported"; + return false; + } + + if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { + fbc->no_fbc_reason = "pixel format is invalid"; + return false; + } + + if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + cache->fb.format->has_alpha) { + fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; + return false; + } + + /* WaFbcExceedCdClockThreshold:hsw,bdw */ + if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && + cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { + fbc->no_fbc_reason = "pixel rate is too big"; + return false; + } + + /* It is possible for the required CFB size change without a + * crtc->disable + crtc->enable since it is possible to change the + * stride without triggering a full modeset. Since we try to + * over-allocate the CFB, there's a chance we may keep FBC enabled even + * if this happens, but if we exceed the current CFB size we'll have to + * disable FBC. Notice that it would be possible to disable FBC, wait + * for a frame, free the stolen node, then try to reenable FBC in case + * we didn't get any invalidate/deactivate calls, but this would require + * a lot of tracking just for a specific case. If we conclude it's an + * important case, we can implement it later. */ + if (intel_fbc_cfb_size_changed(dev_priv)) { + fbc->no_fbc_reason = "CFB requirements changed"; + return false; + } + + /* + * Work around a problem on GEN9+ HW, where enabling FBC on a plane + * having a Y offset that isn't divisible by 4 causes FIFO underrun + * and screen flicker. + */ + if (INTEL_GEN(dev_priv) >= 9 && + (fbc->state_cache.plane.adjusted_y & 3)) { + fbc->no_fbc_reason = "plane Y offset is misaligned"; + return false; + } + + return true; +} + +static void intel_fbc_get_reg_params(struct intel_crtc *crtc, + struct intel_fbc_reg_params *params) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + /* Since all our fields are integer types, use memset here so the + * comparison function can rely on memcmp because the padding will be + * zero. */ + memset(params, 0, sizeof(*params)); + + params->fence_id = cache->fence_id; + params->fence_y_offset = cache->fence_y_offset; +<<<<<<< +======= + + params->interval = cache->interval; +>>>>>>> + + params->crtc.pipe = crtc->pipe; + params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; + + params->fb.format = cache->fb.format; + params->fb.stride = cache->fb.stride; + + params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); + + params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride; + + params->plane_visible = cache->plane.visible; +} + +static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_fbc *fbc = &dev_priv->fbc; + const struct intel_fbc_state_cache *cache = &fbc->state_cache; + const struct intel_fbc_reg_params *params = &fbc->params; + + if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) + return false; + + if (!params->plane_visible) + return false; + + if (!intel_fbc_can_activate(crtc)) + return false; + + if (params->fb.format != cache->fb.format) + return false; + + if (params->fb.stride != cache->fb.stride) + return false; + + if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache)) + return false; + + if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride) + return false; + + return true; +} + +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + const char *reason = "update pending"; + bool need_vblank_wait = false; + + if (!plane->has_fbc || !plane_state) + return need_vblank_wait; + + mutex_lock(&fbc->lock); + + if (fbc->crtc != crtc) + goto unlock; + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + fbc->flip_pending = true; + + if (!intel_fbc_can_flip_nuke(crtc_state)) { + intel_fbc_deactivate(dev_priv, reason); + + /* + * Display WA #1198: glk+ + * Need an extra vblank wait between FBC disable and most plane + * updates. Bspec says this is only needed for plane disable, but + * that is not true. Touching most plane registers will cause the + * corruption to appear. Also SKL/derivatives do not seem to be + * affected. + * + * TODO: could optimize this a bit by sampling the frame + * counter when we disable FBC (if it was already done earlier) + * and skipping the extra vblank wait before the plane update + * if at least one frame has already passed. + */ + if (fbc->activated && + (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))) + need_vblank_wait = true; + fbc->activated = false; + } +unlock: + mutex_unlock(&fbc->lock); + + return need_vblank_wait; +} + +/** + * __intel_fbc_disable - disable FBC + * @dev_priv: i915 device instance + * + * This is the low level function that actually disables FBC. Callers should + * grab the FBC lock. + */ +static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_crtc *crtc = fbc->crtc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&dev_priv->drm, !fbc->crtc); + drm_WARN_ON(&dev_priv->drm, fbc->active); + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + + __intel_fbc_cleanup_cfb(dev_priv); + + fbc->crtc = NULL; +} + +static void __intel_fbc_post_update(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->crtc != crtc) + return; + + fbc->flip_pending = false; + + if (!dev_priv->params.enable_fbc) { + intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); + __intel_fbc_disable(dev_priv); + + return; + } + + intel_fbc_get_reg_params(crtc, &fbc->params); + + if (!intel_fbc_can_activate(crtc)) + return; + + if (!fbc->busy_bits) + intel_fbc_hw_activate(dev_priv); + else + intel_fbc_deactivate(dev_priv, "frontbuffer write"); +} + +void intel_fbc_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_post_update(crtc); + mutex_unlock(&fbc->lock); +} + +static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) +{ + if (fbc->crtc) + return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + else + return fbc->possible_framebuffer_bits; +} + +void intel_fbc_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; + + if (fbc->crtc && fbc->busy_bits) + intel_fbc_deactivate(dev_priv, "frontbuffer write"); + + mutex_unlock(&fbc->lock); +} + +void intel_fbc_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + /* + * GTT tracking does not nuke the entire cfb + * so don't clear busy_bits set for some other + * reason. + */ + if (origin == ORIGIN_GTT) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits &= ~frontbuffer_bits; + + if (origin == ORIGIN_FLIP) + goto out; + + if (!fbc->busy_bits && fbc->crtc && + (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { + if (fbc->active) + intel_fbc_recompress(dev_priv); + else if (!fbc->flip_pending) + __intel_fbc_post_update(fbc->crtc); + } + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_choose_crtc - select a CRTC to enable FBC on + * @dev_priv: i915 device instance + * @state: the atomic state structure + * + * This function looks at the proposed state for CRTCs and planes, then chooses + * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to + * true. + * + * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe + * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. + */ +void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, + struct intel_atomic_state *state) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_plane *plane; + struct intel_plane_state *plane_state; + bool crtc_chosen = false; + int i; + + mutex_lock(&fbc->lock); + + /* Does this atomic commit involve the CRTC currently tied to FBC? */ + if (fbc->crtc && + !intel_atomic_get_new_crtc_state(state, fbc->crtc)) + goto out; + + if (!intel_fbc_can_enable(dev_priv)) + goto out; + + /* Simply choose the first CRTC that is compatible and has a visible + * plane. We could go for fancier schemes such as checking the plane + * size, but this would just affect the few platforms that don't tie FBC + * to pipe or plane A. */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); + + if (!plane->has_fbc) + continue; + + if (!plane_state->uapi.visible) + continue; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + crtc_state->enable_fbc = true; + crtc_chosen = true; + break; + } + + if (!crtc_chosen) + fbc->no_fbc_reason = "no suitable CRTC for FBC"; + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_enable: tries to enable FBC on the CRTC + * @crtc: the CRTC + * @state: corresponding &drm_crtc_state for @crtc + * + * This function checks if the given CRTC was chosen for FBC, then enables it if + * possible. Notice that it doesn't activate FBC. It is valid to call + * intel_fbc_enable multiple times for the same pipe without an + * intel_fbc_disable in the middle, as long as it is deactivated. + */ +void intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + + if (fbc->crtc) { + if (fbc->crtc != crtc || + !intel_fbc_cfb_size_changed(dev_priv)) + goto out; + + __intel_fbc_disable(dev_priv); + } + + drm_WARN_ON(&dev_priv->drm, fbc->active); + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + + /* FIXME crtc_state->enable_fbc lies :( */ + if (!cache->plane.visible) + goto out; + + if (intel_fbc_alloc_cfb(dev_priv, + intel_fbc_calculate_cfb_size(dev_priv, cache), + plane_state->hw.fb->format->cpp[0])) { + cache->plane.visible = false; + fbc->no_fbc_reason = "not enough stolen memory"; + goto out; + } + + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && + plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) + cache->gen9_wa_cfb_stride = + DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + else + cache->gen9_wa_cfb_stride = 0; + + drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + fbc->no_fbc_reason = "FBC enabled but not active yet\n"; + + fbc->crtc = crtc; +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_disable - disable FBC if it's associated with crtc + * @crtc: the CRTC + * + * This function disables FBC if it's associated with the provided CRTC. + */ +void intel_fbc_disable(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc == crtc) + __intel_fbc_disable(dev_priv); + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_global_disable - globally disable FBC + * @dev_priv: i915 device instance + * + * This function disables FBC regardless of which CRTC is associated with it. + */ +void intel_fbc_global_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc) { + drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); + __intel_fbc_disable(dev_priv); + } + mutex_unlock(&fbc->lock); +} + +static void intel_fbc_underrun_work_fn(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, fbc.underrun_work); + struct intel_fbc *fbc = &dev_priv->fbc; + + mutex_lock(&fbc->lock); + + /* Maybe we were scheduled twice. */ + if (fbc->underrun_detected || !fbc->crtc) + goto out; + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); + fbc->underrun_detected = true; + + intel_fbc_deactivate(dev_priv, "FIFO underrun"); +out: + mutex_unlock(&fbc->lock); +} + +/* + * intel_fbc_reset_underrun - reset FBC fifo underrun status. + * @dev_priv: i915 device instance + * + * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we + * want to re-enable FBC after an underrun to increase test coverage. + */ +int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) +{ + int ret; + + cancel_work_sync(&dev_priv->fbc.underrun_work); + + ret = mutex_lock_interruptible(&dev_priv->fbc.lock); + if (ret) + return ret; + + if (dev_priv->fbc.underrun_detected) { + drm_dbg_kms(&dev_priv->drm, + "Re-allowing FBC after fifo underrun\n"); + dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; + } + + dev_priv->fbc.underrun_detected = false; + mutex_unlock(&dev_priv->fbc.lock); + + return 0; +} + +/** + * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun + * @dev_priv: i915 device instance + * + * Without FBC, most underruns are harmless and don't really cause too many + * problems, except for an annoying message on dmesg. With FBC, underruns can + * become black screens or even worse, especially when paired with bad + * watermarks. So in order for us to be on the safe side, completely disable FBC + * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe + * already suggests that watermarks may be bad, so try to be as safe as + * possible. + * + * This function is called from the IRQ handler. + */ +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + /* There's no guarantee that underrun_detected won't be set to true + * right after this check and before the work is scheduled, but that's + * not a problem since we'll check it again under the work function + * while FBC is locked. This check here is just to prevent us from + * unnecessarily scheduling the work, and it relies on the fact that we + * never switch underrun_detect back to false after it's true. */ + if (READ_ONCE(fbc->underrun_detected)) + return; + + schedule_work(&fbc->underrun_work); +} + +/* + * The DDX driver changes its behavior depending on the value it reads from + * i915.enable_fbc, so sanitize it by translating the default value into either + * 0 or 1 in order to allow it to know what's going on. + * + * Notice that this is done at driver initialization and we still allow user + * space to change the value during runtime without sanitizing it again. IGT + * relies on being able to change i915.enable_fbc at runtime. + */ +static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +{ + if (dev_priv->params.enable_fbc >= 0) + return !!dev_priv->params.enable_fbc; + + if (!HAS_FBC(dev_priv)) + return 0; + + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) + return 1; + + return 0; +} + +static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +{ + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ + if (intel_vtd_active() && + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { + drm_info(&dev_priv->drm, + "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); + return true; + } + + return false; +} + +/** + * intel_fbc_init - Initialize FBC + * @dev_priv: the i915 device + * + * This function might be called during PM init process. + */ +void intel_fbc_init(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); + mutex_init(&fbc->lock); + fbc->active = false; + + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + if (need_fbc_vtd_wa(dev_priv)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv); + drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", + dev_priv->params.enable_fbc); + + if (!HAS_FBC(dev_priv)) { + fbc->no_fbc_reason = "unsupported by this chipset"; + return; + } + + /* We still don't have any sort of hardware state readout for FBC, so + * deactivate it in case the BIOS activated it to make sure software + * matches the hardware state. */ + if (intel_fbc_hw_is_active(dev_priv)) + intel_fbc_hw_deactivate(dev_priv); +} diff --git a/rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/postimage.1 b/rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/postimage.1 new file mode 100644 index 000000000000..3872e0fcd74b --- /dev/null +++ b/rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/postimage.1 @@ -0,0 +1,1432 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * DOC: Frame Buffer Compression (FBC) + * + * FBC tries to save memory bandwidth (and so power consumption) by + * compressing the amount of memory used by the display. It is total + * transparent to user space and completely handled in the kernel. + * + * The benefits of FBC are mostly visible with solid backgrounds and + * variation-less patterns. It comes from keeping the memory footprint small + * and having fewer memory pages opened and accessed for refreshing the display. + * + * i915 is responsible to reserve stolen memory for FBC and configure its + * offset on proper registers. The hardware takes care of all + * compress/decompress. However there are many known cases where we have to + * forcibly disable it to allow proper screen updates. + */ + +#include <drm/drm_fourcc.h> + +#include "i915_drv.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_display_types.h" +#include "intel_fbc.h" +#include "intel_frontbuffer.h" + +/* + * For SKL+, the plane source size used by the hardware is based on the value we + * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value + * we wrote to PIPESRC. + */ +static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, + int *width, int *height) +{ + if (width) + *width = cache->plane.src_w; + if (height) + *height = cache->plane.src_h; +} + +static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, + const struct intel_fbc_state_cache *cache) +{ + int lines; + + intel_fbc_get_plane_source_size(cache, NULL, &lines); + if (IS_GEN(dev_priv, 7)) + lines = min(lines, 2048); + else if (INTEL_GEN(dev_priv) >= 8) + lines = min(lines, 2560); + + /* Hardware needs the full buffer stride, not just the active area. */ + return lines * cache->fb.stride; +} + +static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 fbc_ctl; + + /* Disable compression */ + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + + fbc_ctl &= ~FBC_CTL_EN; + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ + if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, + FBC_STAT_COMPRESSING, 10)) { + drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); + return; + } +} + +static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + int cfb_pitch; + int i; + u32 fbc_ctl; + + /* Note: fbc.threshold == 1 for i8xx */ + cfb_pitch = params->cfb_size / FBC_LL_SIZE; + if (params->fb.stride < cfb_pitch) + cfb_pitch = params->fb.stride; + + /* FBC_CTL wants 32B or 64B units */ + if (IS_GEN(dev_priv, 2)) + cfb_pitch = (cfb_pitch / 32) - 1; + else + cfb_pitch = (cfb_pitch / 64) - 1; + + /* Clear old tags */ + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) + intel_de_write(dev_priv, FBC_TAG(i), 0); + + if (IS_GEN(dev_priv, 4)) { + u32 fbc_ctl2; + + /* Set it up... */ + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; + fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fence_id >= 0) + fbc_ctl2 |= FBC_CTL_CPU_FENCE; + intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); + intel_de_write(dev_priv, FBC_FENCE_OFF, + params->fence_y_offset); + } + + /* enable it... */ + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; + fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev_priv)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; + if (params->fence_id >= 0) + fbc_ctl |= params->fence_id; + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); +} + +static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; +} + +static void g4x_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; + if (params->fb.format->cpp[0] == 2) + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + else + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; + intel_de_write(dev_priv, DPFC_FENCE_YOFF, + params->fence_y_offset); + } else { + intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); + } + + /* enable it... */ + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); +} + +static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); + } +} + +static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; +} + +/* This function forces a CFB recompression through the nuke operation. */ +static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_nuke(fbc->crtc); + + intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); +} + +static void ilk_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN; + if (IS_GEN(dev_priv, 5)) + dpfc_ctl |= params->fence_id; + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } + } else { + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + } + + intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, + params->fence_y_offset); + /* enable it... */ + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); + } +} + +static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + +static void gen7_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + /* Display WA #0529: skl, kbl, bxt. */ + if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { + u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4); + + val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); + + if (params->gen9_wa_cfb_stride) + val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; + + intel_de_write(dev_priv, CHICKEN_MISC_4, val); + } + + dpfc_ctl = 0; + if (IS_IVYBRIDGE(dev_priv)) + dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); + + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } else if (dev_priv->ggtt.num_fences) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + + if (dev_priv->fbc.false_color) + dpfc_ctl |= FBC_CTL_FALSE_COLOR; + + if (IS_IVYBRIDGE(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:ivb */ + intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1, + intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ + intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe), + intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); + } + + if (INTEL_GEN(dev_priv) >= 11) + /* Wa_1409120013:icl,ehl,tgl */ + intel_de_write(dev_priv, ILK_DPFC_CHICKEN, + ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 5) + return ilk_fbc_is_active(dev_priv); + else if (IS_GM45(dev_priv)) + return g4x_fbc_is_active(dev_priv); + else + return i8xx_fbc_is_active(dev_priv); +} + +static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_activate(fbc->crtc); + + fbc->active = true; + fbc->activated = true; + + if (INTEL_GEN(dev_priv) >= 7) + gen7_fbc_activate(dev_priv); + else if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_activate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_activate(dev_priv); + else + i8xx_fbc_activate(dev_priv); +} + +static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_deactivate(fbc->crtc); + + fbc->active = false; + + if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_deactivate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_deactivate(dev_priv); + else + i8xx_fbc_deactivate(dev_priv); +} + +/** + * intel_fbc_is_active - Is FBC active? + * @dev_priv: i915 device instance + * + * This function is used to verify the current state of FBC. + * + * FIXME: This should be tracked in the plane config eventually + * instead of queried at runtime for most callers. + */ +bool intel_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return dev_priv->fbc.active; +} + +static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, + const char *reason) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->active) + intel_fbc_hw_deactivate(dev_priv); + + fbc->no_fbc_reason = reason; +} + +static int find_compression_threshold(struct drm_i915_private *dev_priv, + struct drm_mm_node *node, + unsigned int size, + unsigned int fb_cpp) +{ + int compression_threshold = 1; + int ret; + u64 end; + + /* The FBC hardware for BDW/SKL doesn't have access to the stolen + * reserved range size, so it always assumes the maximum (8mb) is used. + * If we enable FBC using a CFB on that memory range we'll get FIFO + * underruns, even if that range is not reserved by the BIOS. */ + if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv)) + end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; + else + end = U64_MAX; + + /* HACK: This code depends on what we will do in *_enable_fbc. If that + * code changes, this code needs to change as well. + * + * The enable_fbc code will attempt to use one of our 2 compression + * thresholds, therefore, in that case, we only have 1 resort. + */ + + /* Try to over-allocate to reduce reallocations and fragmentation. */ + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, + 4096, 0, end); + if (ret == 0) + return compression_threshold; + +again: + /* HW's ability to limit the CFB is 1:4 */ + if (compression_threshold > 4 || + (fb_cpp == 2 && compression_threshold == 2)) + return 0; + + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, + 4096, 0, end); + if (ret && INTEL_GEN(dev_priv) <= 4) { + return 0; + } else if (ret) { + compression_threshold <<= 1; + goto again; + } else { + return compression_threshold; + } +} + +static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, + unsigned int size, unsigned int fb_cpp) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_mm_node *uninitialized_var(compressed_llb); + int ret; + + drm_WARN_ON(&dev_priv->drm, + drm_mm_node_allocated(&fbc->compressed_fb)); + + ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, + size, fb_cpp); + if (!ret) + goto err_llb; + else if (ret > 1) { + drm_info_once(&dev_priv->drm, + "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); + } + + fbc->threshold = ret; + + if (INTEL_GEN(dev_priv) >= 5) + intel_de_write(dev_priv, ILK_DPFC_CB_BASE, + fbc->compressed_fb.start); + else if (IS_GM45(dev_priv)) { + intel_de_write(dev_priv, DPFC_CB_BASE, + fbc->compressed_fb.start); + } else { + compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); + if (!compressed_llb) + goto err_fb; + + ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, + 4096, 4096); + if (ret) + goto err_fb; + + fbc->compressed_llb = compressed_llb; + + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_fb.start, + U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_llb->start, + U32_MAX)); + intel_de_write(dev_priv, FBC_CFB_BASE, + dev_priv->dsm.start + fbc->compressed_fb.start); + intel_de_write(dev_priv, FBC_LL_BASE, + dev_priv->dsm.start + compressed_llb->start); + } + + drm_dbg_kms(&dev_priv->drm, + "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", + fbc->compressed_fb.size, fbc->threshold); + + return 0; + +err_fb: + kfree(compressed_llb); + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +err_llb: + if (drm_mm_initialized(&dev_priv->mm.stolen)) + drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + return -ENOSPC; +} + +static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) + return; + + if (!drm_mm_node_allocated(&fbc->compressed_fb)) + return; + + if (fbc->compressed_llb) { + i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); + kfree(fbc->compressed_llb); + } + + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +} + +void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_cleanup_cfb(dev_priv); + mutex_unlock(&fbc->lock); +} + +static bool stride_is_valid(struct drm_i915_private *dev_priv, + u64 modifier, unsigned int stride) +{ + /* This should have been caught earlier. */ + if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) + return false; + + /* Below are the additional FBC restrictions. */ + if (stride < 512) + return false; + + if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3)) + return stride == 4096 || stride == 8192; + + if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048) + return false; + + /* Display WA #1105: skl,bxt,kbl,cfl,glk */ + if (IS_GEN(dev_priv, 9) && + modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) + return false; + + if (stride > 16384) + return false; + + return true; +} + +static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format) +{ + switch (pixel_format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + return true; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + /* 16bpp not supported on gen2 */ + if (IS_GEN(dev_priv, 2)) + return false; + /* WaFbcOnly1to1Ratio:ctg */ + if (IS_G4X(dev_priv)) + return false; + return true; + default: + return false; + } +} + +static bool rotation_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format, unsigned int rotation) +{ + if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + drm_rotation_90_or_270(rotation)) + return false; + else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && + rotation != DRM_MODE_ROTATE_0) + return false; + + return true; +} + +/* + * For some reason, the hardware tracking starts looking at whatever we + * programmed as the display plane base address register. It does not look at + * the X and Y offset registers. That's why we include the src x/y offsets + * instead of just looking at the plane size. + */ +static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + unsigned int effective_w, effective_h, max_w, max_h; + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + max_w = 5120; + max_h = 4096; + } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { + max_w = 4096; + max_h = 4096; + } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { + max_w = 4096; + max_h = 2048; + } else { + max_w = 2048; + max_h = 1536; + } + + intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, + &effective_h); + effective_w += fbc->state_cache.plane.adjusted_x; + effective_h += fbc->state_cache.plane.adjusted_y; + + return effective_w <= max_w && effective_h <= max_h; +} + +static bool tiling_is_valid(struct drm_i915_private *dev_priv, + uint64_t modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + if (INTEL_GEN(dev_priv) >= 9) + return true; + return false; + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + return true; + default: + return false; + } +} + +static void intel_fbc_update_state_cache(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct drm_framebuffer *fb = plane_state->hw.fb; + + cache->plane.visible = plane_state->uapi.visible; + if (!cache->plane.visible) + return; + + cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; + + cache->plane.rotation = plane_state->hw.rotation; + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + cache->plane.adjusted_x = plane_state->color_plane[0].x; + cache->plane.adjusted_y = plane_state->color_plane[0].y; + + cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; + + cache->fb.format = fb->format; + cache->fb.modifier = fb->modifier; + + /* FIXME is this correct? */ + cache->fb.stride = plane_state->color_plane[0].stride; + if (drm_rotation_90_or_270(plane_state->hw.rotation)) + cache->fb.stride *= fb->format->cpp[0]; + + /* FBC1 compression interval: arbitrary choice of 1 second */ + cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); + + cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); + + drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && + !plane_state->vma->fence); + + if (plane_state->flags & PLANE_HAS_FENCE && + plane_state->vma->fence) + cache->fence_id = plane_state->vma->fence->id; + else + cache->fence_id = -1; +} + +static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > + fbc->compressed_fb.size * fbc->threshold; +} + +static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (intel_vgpu_active(dev_priv)) { + fbc->no_fbc_reason = "VGPU is active"; + return false; + } + + if (!i915_modparams.enable_fbc) { + fbc->no_fbc_reason = "disabled per module param or by default"; + return false; + } + + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + return true; +} + +static bool intel_fbc_can_activate(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!intel_fbc_can_enable(dev_priv)) + return false; + + if (!cache->plane.visible) { + fbc->no_fbc_reason = "primary plane not visible"; + return false; + } + + /* We don't need to use a state cache here since this information is + * global for all CRTC. + */ + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { + fbc->no_fbc_reason = "incompatible mode"; + return false; + } + + if (!intel_fbc_hw_tracking_covers_screen(crtc)) { + fbc->no_fbc_reason = "mode too large for compression"; + return false; + } + + /* The use of a CPU fence is one of two ways to detect writes by the + * CPU to the scanout and trigger updates to the FBC. + * + * The other method is by software tracking (see + * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke + * the current compressed buffer and recompress it. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constraints + * at the time of pinning. + * + * FIXME with 90/270 degree rotation we should use the fence on + * the normal GTT view (the rotated view doesn't even have a + * fence). Would need changes to the FBC fence Y offset as well. + * For now this will effectively disable FBC with 90/270 degree + * rotation. + */ + if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) { + fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + return false; + } + + if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { + fbc->no_fbc_reason = "pixel format is invalid"; + return false; + } + + if (!rotation_is_valid(dev_priv, cache->fb.format->format, + cache->plane.rotation)) { + fbc->no_fbc_reason = "rotation unsupported"; + return false; + } + + if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { + fbc->no_fbc_reason = "tiling unsupported"; + return false; + } + + if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) { + fbc->no_fbc_reason = "framebuffer stride not supported"; + return false; + } + + if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + cache->fb.format->has_alpha) { + fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; + return false; + } + + /* WaFbcExceedCdClockThreshold:hsw,bdw */ + if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && + cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { + fbc->no_fbc_reason = "pixel rate is too big"; + return false; + } + + /* It is possible for the required CFB size change without a + * crtc->disable + crtc->enable since it is possible to change the + * stride without triggering a full modeset. Since we try to + * over-allocate the CFB, there's a chance we may keep FBC enabled even + * if this happens, but if we exceed the current CFB size we'll have to + * disable FBC. Notice that it would be possible to disable FBC, wait + * for a frame, free the stolen node, then try to reenable FBC in case + * we didn't get any invalidate/deactivate calls, but this would require + * a lot of tracking just for a specific case. If we conclude it's an + * important case, we can implement it later. */ + if (intel_fbc_cfb_size_changed(dev_priv)) { + fbc->no_fbc_reason = "CFB requirements changed"; + return false; + } + + /* + * Work around a problem on GEN9+ HW, where enabling FBC on a plane + * having a Y offset that isn't divisible by 4 causes FIFO underrun + * and screen flicker. + */ + if (INTEL_GEN(dev_priv) >= 9 && + (fbc->state_cache.plane.adjusted_y & 3)) { + fbc->no_fbc_reason = "plane Y offset is misaligned"; + return false; + } + + return true; +} + +static void intel_fbc_get_reg_params(struct intel_crtc *crtc, + struct intel_fbc_reg_params *params) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + /* Since all our fields are integer types, use memset here so the + * comparison function can rely on memcmp because the padding will be + * zero. */ + memset(params, 0, sizeof(*params)); + + params->fence_id = cache->fence_id; + params->fence_y_offset = cache->fence_y_offset; + + params->crtc.pipe = crtc->pipe; + params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; + + params->fb.format = cache->fb.format; + params->fb.stride = cache->fb.stride; + + params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); + + params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride; + + params->plane_visible = cache->plane.visible; +} + +static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_fbc *fbc = &dev_priv->fbc; + const struct intel_fbc_state_cache *cache = &fbc->state_cache; + const struct intel_fbc_reg_params *params = &fbc->params; + + if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) + return false; + + if (!params->plane_visible) + return false; + + if (!intel_fbc_can_activate(crtc)) + return false; + + if (params->fb.format != cache->fb.format) + return false; + + if (params->fb.stride != cache->fb.stride) + return false; + + if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache)) + return false; + + if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride) + return false; + + return true; +} + +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + const char *reason = "update pending"; + bool need_vblank_wait = false; + + if (!plane->has_fbc || !plane_state) + return need_vblank_wait; + + mutex_lock(&fbc->lock); + + if (fbc->crtc != crtc) + goto unlock; + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + fbc->flip_pending = true; + + if (!intel_fbc_can_flip_nuke(crtc_state)) { + intel_fbc_deactivate(dev_priv, reason); + + /* + * Display WA #1198: glk+ + * Need an extra vblank wait between FBC disable and most plane + * updates. Bspec says this is only needed for plane disable, but + * that is not true. Touching most plane registers will cause the + * corruption to appear. Also SKL/derivatives do not seem to be + * affected. + * + * TODO: could optimize this a bit by sampling the frame + * counter when we disable FBC (if it was already done earlier) + * and skipping the extra vblank wait before the plane update + * if at least one frame has already passed. + */ + if (fbc->activated && + (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))) + need_vblank_wait = true; + fbc->activated = false; + } +unlock: + mutex_unlock(&fbc->lock); + + return need_vblank_wait; +} + +/** + * __intel_fbc_disable - disable FBC + * @dev_priv: i915 device instance + * + * This is the low level function that actually disables FBC. Callers should + * grab the FBC lock. + */ +static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_crtc *crtc = fbc->crtc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&dev_priv->drm, !fbc->crtc); + drm_WARN_ON(&dev_priv->drm, fbc->active); + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + + __intel_fbc_cleanup_cfb(dev_priv); + + fbc->crtc = NULL; +} + +static void __intel_fbc_post_update(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->crtc != crtc) + return; + + fbc->flip_pending = false; + + if (!i915_modparams.enable_fbc) { + intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); + __intel_fbc_disable(dev_priv); + + return; + } + + intel_fbc_get_reg_params(crtc, &fbc->params); + + if (!intel_fbc_can_activate(crtc)) + return; + + if (!fbc->busy_bits) + intel_fbc_hw_activate(dev_priv); + else + intel_fbc_deactivate(dev_priv, "frontbuffer write"); +} + +void intel_fbc_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_post_update(crtc); + mutex_unlock(&fbc->lock); +} + +static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) +{ + if (fbc->crtc) + return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + else + return fbc->possible_framebuffer_bits; +} + +void intel_fbc_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; + + if (fbc->crtc && fbc->busy_bits) + intel_fbc_deactivate(dev_priv, "frontbuffer write"); + + mutex_unlock(&fbc->lock); +} + +void intel_fbc_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits &= ~frontbuffer_bits; + + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + goto out; + + if (!fbc->busy_bits && fbc->crtc && + (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { + if (fbc->active) + intel_fbc_recompress(dev_priv); + else if (!fbc->flip_pending) + __intel_fbc_post_update(fbc->crtc); + } + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_choose_crtc - select a CRTC to enable FBC on + * @dev_priv: i915 device instance + * @state: the atomic state structure + * + * This function looks at the proposed state for CRTCs and planes, then chooses + * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to + * true. + * + * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe + * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. + */ +void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, + struct intel_atomic_state *state) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_plane *plane; + struct intel_plane_state *plane_state; + bool crtc_chosen = false; + int i; + + mutex_lock(&fbc->lock); + + /* Does this atomic commit involve the CRTC currently tied to FBC? */ + if (fbc->crtc && + !intel_atomic_get_new_crtc_state(state, fbc->crtc)) + goto out; + + if (!intel_fbc_can_enable(dev_priv)) + goto out; + + /* Simply choose the first CRTC that is compatible and has a visible + * plane. We could go for fancier schemes such as checking the plane + * size, but this would just affect the few platforms that don't tie FBC + * to pipe or plane A. */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); + + if (!plane->has_fbc) + continue; + + if (!plane_state->uapi.visible) + continue; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + crtc_state->enable_fbc = true; + crtc_chosen = true; + break; + } + + if (!crtc_chosen) + fbc->no_fbc_reason = "no suitable CRTC for FBC"; + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_enable: tries to enable FBC on the CRTC + * @crtc: the CRTC + * @state: corresponding &drm_crtc_state for @crtc + * + * This function checks if the given CRTC was chosen for FBC, then enables it if + * possible. Notice that it doesn't activate FBC. It is valid to call + * intel_fbc_enable multiple times for the same pipe without an + * intel_fbc_disable in the middle, as long as it is deactivated. + */ +void intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + + if (fbc->crtc) { + if (fbc->crtc != crtc || + !intel_fbc_cfb_size_changed(dev_priv)) + goto out; + + __intel_fbc_disable(dev_priv); + } + + drm_WARN_ON(&dev_priv->drm, fbc->active); + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + + /* FIXME crtc_state->enable_fbc lies :( */ + if (!cache->plane.visible) + goto out; + + if (intel_fbc_alloc_cfb(dev_priv, + intel_fbc_calculate_cfb_size(dev_priv, cache), + plane_state->hw.fb->format->cpp[0])) { + cache->plane.visible = false; + fbc->no_fbc_reason = "not enough stolen memory"; + goto out; + } + + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && + plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) + cache->gen9_wa_cfb_stride = + DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + else + cache->gen9_wa_cfb_stride = 0; + + drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + fbc->no_fbc_reason = "FBC enabled but not active yet\n"; + + fbc->crtc = crtc; +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_disable - disable FBC if it's associated with crtc + * @crtc: the CRTC + * + * This function disables FBC if it's associated with the provided CRTC. + */ +void intel_fbc_disable(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc == crtc) + __intel_fbc_disable(dev_priv); + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_global_disable - globally disable FBC + * @dev_priv: i915 device instance + * + * This function disables FBC regardless of which CRTC is associated with it. + */ +void intel_fbc_global_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc) { + drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); + __intel_fbc_disable(dev_priv); + } + mutex_unlock(&fbc->lock); +} + +static void intel_fbc_underrun_work_fn(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, fbc.underrun_work); + struct intel_fbc *fbc = &dev_priv->fbc; + + mutex_lock(&fbc->lock); + + /* Maybe we were scheduled twice. */ + if (fbc->underrun_detected || !fbc->crtc) + goto out; + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); + fbc->underrun_detected = true; + + intel_fbc_deactivate(dev_priv, "FIFO underrun"); +out: + mutex_unlock(&fbc->lock); +} + +/* + * intel_fbc_reset_underrun - reset FBC fifo underrun status. + * @dev_priv: i915 device instance + * + * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we + * want to re-enable FBC after an underrun to increase test coverage. + */ +int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) +{ + int ret; + + cancel_work_sync(&dev_priv->fbc.underrun_work); + + ret = mutex_lock_interruptible(&dev_priv->fbc.lock); + if (ret) + return ret; + + if (dev_priv->fbc.underrun_detected) { + drm_dbg_kms(&dev_priv->drm, + "Re-allowing FBC after fifo underrun\n"); + dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; + } + + dev_priv->fbc.underrun_detected = false; + mutex_unlock(&dev_priv->fbc.lock); + + return 0; +} + +/** + * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun + * @dev_priv: i915 device instance + * + * Without FBC, most underruns are harmless and don't really cause too many + * problems, except for an annoying message on dmesg. With FBC, underruns can + * become black screens or even worse, especially when paired with bad + * watermarks. So in order for us to be on the safe side, completely disable FBC + * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe + * already suggests that watermarks may be bad, so try to be as safe as + * possible. + * + * This function is called from the IRQ handler. + */ +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + /* There's no guarantee that underrun_detected won't be set to true + * right after this check and before the work is scheduled, but that's + * not a problem since we'll check it again under the work function + * while FBC is locked. This check here is just to prevent us from + * unnecessarily scheduling the work, and it relies on the fact that we + * never switch underrun_detect back to false after it's true. */ + if (READ_ONCE(fbc->underrun_detected)) + return; + + schedule_work(&fbc->underrun_work); +} + +/* + * The DDX driver changes its behavior depending on the value it reads from + * i915.enable_fbc, so sanitize it by translating the default value into either + * 0 or 1 in order to allow it to know what's going on. + * + * Notice that this is done at driver initialization and we still allow user + * space to change the value during runtime without sanitizing it again. IGT + * relies on being able to change i915.enable_fbc at runtime. + */ +static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +{ + if (i915_modparams.enable_fbc >= 0) + return !!i915_modparams.enable_fbc; + + if (!HAS_FBC(dev_priv)) + return 0; + + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) + return 1; + + return 0; +} + +static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +{ + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ + if (intel_vtd_active() && + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { + drm_info(&dev_priv->drm, + "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); + return true; + } + + return false; +} + +/** + * intel_fbc_init - Initialize FBC + * @dev_priv: the i915 device + * + * This function might be called during PM init process. + */ +void intel_fbc_init(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); + mutex_init(&fbc->lock); + fbc->active = false; + + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + if (need_fbc_vtd_wa(dev_priv)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); + drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", + i915_modparams.enable_fbc); + + if (!HAS_FBC(dev_priv)) { + fbc->no_fbc_reason = "unsupported by this chipset"; + return; + } + + /* This value was pulled out of someone's hat */ + if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv)) + intel_de_write(dev_priv, FBC_CONTROL, + 500 << FBC_CTL_INTERVAL_SHIFT); + + /* We still don't have any sort of hardware state readout for FBC, so + * deactivate it in case the BIOS activated it to make sure software + * matches the hardware state. */ + if (intel_fbc_hw_is_active(dev_priv)) + intel_fbc_hw_deactivate(dev_priv); +} diff --git a/rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/preimage b/rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/preimage new file mode 100644 index 000000000000..5516c6db8dbc --- /dev/null +++ b/rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/preimage @@ -0,0 +1,1435 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * DOC: Frame Buffer Compression (FBC) + * + * FBC tries to save memory bandwidth (and so power consumption) by + * compressing the amount of memory used by the display. It is total + * transparent to user space and completely handled in the kernel. + * + * The benefits of FBC are mostly visible with solid backgrounds and + * variation-less patterns. It comes from keeping the memory footprint small + * and having fewer memory pages opened and accessed for refreshing the display. + * + * i915 is responsible to reserve stolen memory for FBC and configure its + * offset on proper registers. The hardware takes care of all + * compress/decompress. However there are many known cases where we have to + * forcibly disable it to allow proper screen updates. + */ + +#include <drm/drm_fourcc.h> + +#include "i915_drv.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_display_types.h" +#include "intel_fbc.h" +#include "intel_frontbuffer.h" + +/* + * For SKL+, the plane source size used by the hardware is based on the value we + * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value + * we wrote to PIPESRC. + */ +static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, + int *width, int *height) +{ + if (width) + *width = cache->plane.src_w; + if (height) + *height = cache->plane.src_h; +} + +static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, + const struct intel_fbc_state_cache *cache) +{ + int lines; + + intel_fbc_get_plane_source_size(cache, NULL, &lines); + if (IS_GEN(dev_priv, 7)) + lines = min(lines, 2048); + else if (INTEL_GEN(dev_priv) >= 8) + lines = min(lines, 2560); + + /* Hardware needs the full buffer stride, not just the active area. */ + return lines * cache->fb.stride; +} + +static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 fbc_ctl; + + /* Disable compression */ + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + + fbc_ctl &= ~FBC_CTL_EN; + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ + if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, + FBC_STAT_COMPRESSING, 10)) { + drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); + return; + } +} + +static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + int cfb_pitch; + int i; + u32 fbc_ctl; + + /* Note: fbc.threshold == 1 for i8xx */ + cfb_pitch = params->cfb_size / FBC_LL_SIZE; + if (params->fb.stride < cfb_pitch) + cfb_pitch = params->fb.stride; + + /* FBC_CTL wants 32B or 64B units */ + if (IS_GEN(dev_priv, 2)) + cfb_pitch = (cfb_pitch / 32) - 1; + else + cfb_pitch = (cfb_pitch / 64) - 1; + + /* Clear old tags */ + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) + intel_de_write(dev_priv, FBC_TAG(i), 0); + + if (IS_GEN(dev_priv, 4)) { + u32 fbc_ctl2; + + /* Set it up... */ + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; + fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fence_id >= 0) + fbc_ctl2 |= FBC_CTL_CPU_FENCE; + intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); + intel_de_write(dev_priv, FBC_FENCE_OFF, + params->fence_y_offset); + } + + /* enable it... */ + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; + fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev_priv)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; + if (params->fence_id >= 0) + fbc_ctl |= params->fence_id; + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); +} + +static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; +} + +static void g4x_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; + if (params->fb.format->cpp[0] == 2) + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + else + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; + intel_de_write(dev_priv, DPFC_FENCE_YOFF, + params->fence_y_offset); + } else { + intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); + } + + /* enable it... */ + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); +} + +static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); + } +} + +static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; +} + +/* This function forces a CFB recompression through the nuke operation. */ +static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_nuke(fbc->crtc); + + intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); +} + +static void ilk_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN; + if (IS_GEN(dev_priv, 5)) + dpfc_ctl |= params->fence_id; + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } + } else { + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + } + + intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, + params->fence_y_offset); + /* enable it... */ + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); + } +} + +static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + +static void gen7_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + /* Display WA #0529: skl, kbl, bxt. */ + if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { + u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4); + + val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); + + if (params->gen9_wa_cfb_stride) + val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; + + intel_de_write(dev_priv, CHICKEN_MISC_4, val); + } + + dpfc_ctl = 0; + if (IS_IVYBRIDGE(dev_priv)) + dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); + + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } else if (dev_priv->ggtt.num_fences) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + + if (dev_priv->fbc.false_color) + dpfc_ctl |= FBC_CTL_FALSE_COLOR; + + if (IS_IVYBRIDGE(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:ivb */ + intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1, + intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ + intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe), + intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); + } + + if (INTEL_GEN(dev_priv) >= 11) + /* Wa_1409120013:icl,ehl,tgl */ + intel_de_write(dev_priv, ILK_DPFC_CHICKEN, + ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 5) + return ilk_fbc_is_active(dev_priv); + else if (IS_GM45(dev_priv)) + return g4x_fbc_is_active(dev_priv); + else + return i8xx_fbc_is_active(dev_priv); +} + +static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_activate(fbc->crtc); + + fbc->active = true; + fbc->activated = true; + + if (INTEL_GEN(dev_priv) >= 7) + gen7_fbc_activate(dev_priv); + else if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_activate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_activate(dev_priv); + else + i8xx_fbc_activate(dev_priv); +} + +static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_deactivate(fbc->crtc); + + fbc->active = false; + + if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_deactivate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_deactivate(dev_priv); + else + i8xx_fbc_deactivate(dev_priv); +} + +/** + * intel_fbc_is_active - Is FBC active? + * @dev_priv: i915 device instance + * + * This function is used to verify the current state of FBC. + * + * FIXME: This should be tracked in the plane config eventually + * instead of queried at runtime for most callers. + */ +bool intel_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return dev_priv->fbc.active; +} + +static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, + const char *reason) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->active) + intel_fbc_hw_deactivate(dev_priv); + + fbc->no_fbc_reason = reason; +} + +static int find_compression_threshold(struct drm_i915_private *dev_priv, + struct drm_mm_node *node, + unsigned int size, + unsigned int fb_cpp) +{ + int compression_threshold = 1; + int ret; + u64 end; + + /* The FBC hardware for BDW/SKL doesn't have access to the stolen + * reserved range size, so it always assumes the maximum (8mb) is used. + * If we enable FBC using a CFB on that memory range we'll get FIFO + * underruns, even if that range is not reserved by the BIOS. */ + if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv)) + end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; + else + end = U64_MAX; + + /* HACK: This code depends on what we will do in *_enable_fbc. If that + * code changes, this code needs to change as well. + * + * The enable_fbc code will attempt to use one of our 2 compression + * thresholds, therefore, in that case, we only have 1 resort. + */ + + /* Try to over-allocate to reduce reallocations and fragmentation. */ + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, + 4096, 0, end); + if (ret == 0) + return compression_threshold; + +again: + /* HW's ability to limit the CFB is 1:4 */ + if (compression_threshold > 4 || + (fb_cpp == 2 && compression_threshold == 2)) + return 0; + + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, + 4096, 0, end); + if (ret && INTEL_GEN(dev_priv) <= 4) { + return 0; + } else if (ret) { + compression_threshold <<= 1; + goto again; + } else { + return compression_threshold; + } +} + +static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, + unsigned int size, unsigned int fb_cpp) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_mm_node *uninitialized_var(compressed_llb); + int ret; + + drm_WARN_ON(&dev_priv->drm, + drm_mm_node_allocated(&fbc->compressed_fb)); + + ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, + size, fb_cpp); + if (!ret) + goto err_llb; + else if (ret > 1) { + drm_info_once(&dev_priv->drm, + "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); + } + + fbc->threshold = ret; + + if (INTEL_GEN(dev_priv) >= 5) + intel_de_write(dev_priv, ILK_DPFC_CB_BASE, + fbc->compressed_fb.start); + else if (IS_GM45(dev_priv)) { + intel_de_write(dev_priv, DPFC_CB_BASE, + fbc->compressed_fb.start); + } else { + compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); + if (!compressed_llb) + goto err_fb; + + ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, + 4096, 4096); + if (ret) + goto err_fb; + + fbc->compressed_llb = compressed_llb; + + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_fb.start, + U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_llb->start, + U32_MAX)); + intel_de_write(dev_priv, FBC_CFB_BASE, + dev_priv->dsm.start + fbc->compressed_fb.start); + intel_de_write(dev_priv, FBC_LL_BASE, + dev_priv->dsm.start + compressed_llb->start); + } + + drm_dbg_kms(&dev_priv->drm, + "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", + fbc->compressed_fb.size, fbc->threshold); + + return 0; + +err_fb: + kfree(compressed_llb); + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +err_llb: + if (drm_mm_initialized(&dev_priv->mm.stolen)) + drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + return -ENOSPC; +} + +static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) + return; + + if (!drm_mm_node_allocated(&fbc->compressed_fb)) + return; + + if (fbc->compressed_llb) { + i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); + kfree(fbc->compressed_llb); + } + + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +} + +void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_cleanup_cfb(dev_priv); + mutex_unlock(&fbc->lock); +} + +static bool stride_is_valid(struct drm_i915_private *dev_priv, + u64 modifier, unsigned int stride) +{ + /* This should have been caught earlier. */ + if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) + return false; + + /* Below are the additional FBC restrictions. */ + if (stride < 512) + return false; + + if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3)) + return stride == 4096 || stride == 8192; + + if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048) + return false; + + /* Display WA #1105: skl,bxt,kbl,cfl,glk */ + if (IS_GEN(dev_priv, 9) && + modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) + return false; + + if (stride > 16384) + return false; + + return true; +} + +static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format) +{ + switch (pixel_format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + return true; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + /* 16bpp not supported on gen2 */ + if (IS_GEN(dev_priv, 2)) + return false; + /* WaFbcOnly1to1Ratio:ctg */ + if (IS_G4X(dev_priv)) + return false; + return true; + default: + return false; + } +} + +static bool rotation_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format, unsigned int rotation) +{ + if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + drm_rotation_90_or_270(rotation)) + return false; + else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && + rotation != DRM_MODE_ROTATE_0) + return false; + + return true; +} + +/* + * For some reason, the hardware tracking starts looking at whatever we + * programmed as the display plane base address register. It does not look at + * the X and Y offset registers. That's why we include the src x/y offsets + * instead of just looking at the plane size. + */ +static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + unsigned int effective_w, effective_h, max_w, max_h; + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + max_w = 5120; + max_h = 4096; + } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { + max_w = 4096; + max_h = 4096; + } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { + max_w = 4096; + max_h = 2048; + } else { + max_w = 2048; + max_h = 1536; + } + + intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, + &effective_h); + effective_w += fbc->state_cache.plane.adjusted_x; + effective_h += fbc->state_cache.plane.adjusted_y; + + return effective_w <= max_w && effective_h <= max_h; +} + +static bool tiling_is_valid(struct drm_i915_private *dev_priv, + uint64_t modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + if (INTEL_GEN(dev_priv) >= 9) + return true; + return false; + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + return true; + default: + return false; + } +} + +static void intel_fbc_update_state_cache(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct drm_framebuffer *fb = plane_state->hw.fb; + + cache->plane.visible = plane_state->uapi.visible; + if (!cache->plane.visible) + return; + + cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; + + cache->plane.rotation = plane_state->hw.rotation; + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + cache->plane.adjusted_x = plane_state->color_plane[0].x; + cache->plane.adjusted_y = plane_state->color_plane[0].y; + + cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; + + cache->fb.format = fb->format; + cache->fb.modifier = fb->modifier; + +<<<<<<< +======= + /* FIXME is this correct? */ + cache->fb.stride = plane_state->color_plane[0].stride; + if (drm_rotation_90_or_270(plane_state->hw.rotation)) + cache->fb.stride *= fb->format->cpp[0]; + + /* FBC1 compression interval: arbitrary choice of 1 second */ + cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); + +>>>>>>> + cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); + + drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && + !plane_state->vma->fence); + + if (plane_state->flags & PLANE_HAS_FENCE && + plane_state->vma->fence) + cache->fence_id = plane_state->vma->fence->id; + else + cache->fence_id = -1; +} + +static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > + fbc->compressed_fb.size * fbc->threshold; +} + +static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (intel_vgpu_active(dev_priv)) { + fbc->no_fbc_reason = "VGPU is active"; + return false; + } + + if (!i915_modparams.enable_fbc) { + fbc->no_fbc_reason = "disabled per module param or by default"; + return false; + } + + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + return true; +} + +static bool intel_fbc_can_activate(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!intel_fbc_can_enable(dev_priv)) + return false; + + if (!cache->plane.visible) { + fbc->no_fbc_reason = "primary plane not visible"; + return false; + } + + /* We don't need to use a state cache here since this information is + * global for all CRTC. + */ + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { + fbc->no_fbc_reason = "incompatible mode"; + return false; + } + + if (!intel_fbc_hw_tracking_covers_screen(crtc)) { + fbc->no_fbc_reason = "mode too large for compression"; + return false; + } + + /* The use of a CPU fence is one of two ways to detect writes by the + * CPU to the scanout and trigger updates to the FBC. + * + * The other method is by software tracking (see + * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke + * the current compressed buffer and recompress it. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constraints + * at the time of pinning. + * + * FIXME with 90/270 degree rotation we should use the fence on + * the normal GTT view (the rotated view doesn't even have a + * fence). Would need changes to the FBC fence Y offset as well. + * For now this will effectively disable FBC with 90/270 degree + * rotation. + */ + if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) { + fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + return false; + } + + if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { + fbc->no_fbc_reason = "pixel format is invalid"; + return false; + } + + if (!rotation_is_valid(dev_priv, cache->fb.format->format, + cache->plane.rotation)) { + fbc->no_fbc_reason = "rotation unsupported"; + return false; + } + + if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { + fbc->no_fbc_reason = "tiling unsupported"; + return false; + } + + if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) { + fbc->no_fbc_reason = "framebuffer stride not supported"; + return false; + } + + if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + cache->fb.format->has_alpha) { + fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; + return false; + } + + /* WaFbcExceedCdClockThreshold:hsw,bdw */ + if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && + cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { + fbc->no_fbc_reason = "pixel rate is too big"; + return false; + } + + /* It is possible for the required CFB size change without a + * crtc->disable + crtc->enable since it is possible to change the + * stride without triggering a full modeset. Since we try to + * over-allocate the CFB, there's a chance we may keep FBC enabled even + * if this happens, but if we exceed the current CFB size we'll have to + * disable FBC. Notice that it would be possible to disable FBC, wait + * for a frame, free the stolen node, then try to reenable FBC in case + * we didn't get any invalidate/deactivate calls, but this would require + * a lot of tracking just for a specific case. If we conclude it's an + * important case, we can implement it later. */ + if (intel_fbc_cfb_size_changed(dev_priv)) { + fbc->no_fbc_reason = "CFB requirements changed"; + return false; + } + + /* + * Work around a problem on GEN9+ HW, where enabling FBC on a plane + * having a Y offset that isn't divisible by 4 causes FIFO underrun + * and screen flicker. + */ + if (INTEL_GEN(dev_priv) >= 9 && + (fbc->state_cache.plane.adjusted_y & 3)) { + fbc->no_fbc_reason = "plane Y offset is misaligned"; + return false; + } + + return true; +} + +static void intel_fbc_get_reg_params(struct intel_crtc *crtc, + struct intel_fbc_reg_params *params) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + /* Since all our fields are integer types, use memset here so the + * comparison function can rely on memcmp because the padding will be + * zero. */ + memset(params, 0, sizeof(*params)); + + params->fence_id = cache->fence_id; + params->fence_y_offset = cache->fence_y_offset; + + params->crtc.pipe = crtc->pipe; + params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; + + params->fb.format = cache->fb.format; + params->fb.stride = cache->fb.stride; + + params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); + + params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride; + + params->plane_visible = cache->plane.visible; +} + +static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_fbc *fbc = &dev_priv->fbc; + const struct intel_fbc_state_cache *cache = &fbc->state_cache; + const struct intel_fbc_reg_params *params = &fbc->params; + + if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) + return false; + + if (!params->plane_visible) + return false; + + if (!intel_fbc_can_activate(crtc)) + return false; + + if (params->fb.format != cache->fb.format) + return false; + + if (params->fb.stride != cache->fb.stride) + return false; + + if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache)) + return false; + + if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride) + return false; + + return true; +} + +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + const char *reason = "update pending"; + bool need_vblank_wait = false; + + if (!plane->has_fbc || !plane_state) + return need_vblank_wait; + + mutex_lock(&fbc->lock); + + if (fbc->crtc != crtc) + goto unlock; + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + fbc->flip_pending = true; + + if (!intel_fbc_can_flip_nuke(crtc_state)) { + intel_fbc_deactivate(dev_priv, reason); + + /* + * Display WA #1198: glk+ + * Need an extra vblank wait between FBC disable and most plane + * updates. Bspec says this is only needed for plane disable, but + * that is not true. Touching most plane registers will cause the + * corruption to appear. Also SKL/derivatives do not seem to be + * affected. + * + * TODO: could optimize this a bit by sampling the frame + * counter when we disable FBC (if it was already done earlier) + * and skipping the extra vblank wait before the plane update + * if at least one frame has already passed. + */ + if (fbc->activated && + (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))) + need_vblank_wait = true; + fbc->activated = false; + } +unlock: + mutex_unlock(&fbc->lock); + + return need_vblank_wait; +} + +/** + * __intel_fbc_disable - disable FBC + * @dev_priv: i915 device instance + * + * This is the low level function that actually disables FBC. Callers should + * grab the FBC lock. + */ +static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_crtc *crtc = fbc->crtc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&dev_priv->drm, !fbc->crtc); + drm_WARN_ON(&dev_priv->drm, fbc->active); + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + + __intel_fbc_cleanup_cfb(dev_priv); + + fbc->crtc = NULL; +} + +static void __intel_fbc_post_update(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->crtc != crtc) + return; + + fbc->flip_pending = false; + + if (!i915_modparams.enable_fbc) { + intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); + __intel_fbc_disable(dev_priv); + + return; + } + + intel_fbc_get_reg_params(crtc, &fbc->params); + + if (!intel_fbc_can_activate(crtc)) + return; + + if (!fbc->busy_bits) + intel_fbc_hw_activate(dev_priv); + else + intel_fbc_deactivate(dev_priv, "frontbuffer write"); +} + +void intel_fbc_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_post_update(crtc); + mutex_unlock(&fbc->lock); +} + +static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) +{ + if (fbc->crtc) + return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + else + return fbc->possible_framebuffer_bits; +} + +void intel_fbc_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; + + if (fbc->crtc && fbc->busy_bits) + intel_fbc_deactivate(dev_priv, "frontbuffer write"); + + mutex_unlock(&fbc->lock); +} + +void intel_fbc_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits &= ~frontbuffer_bits; + + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + goto out; + + if (!fbc->busy_bits && fbc->crtc && + (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { + if (fbc->active) + intel_fbc_recompress(dev_priv); + else if (!fbc->flip_pending) + __intel_fbc_post_update(fbc->crtc); + } + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_choose_crtc - select a CRTC to enable FBC on + * @dev_priv: i915 device instance + * @state: the atomic state structure + * + * This function looks at the proposed state for CRTCs and planes, then chooses + * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to + * true. + * + * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe + * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. + */ +void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, + struct intel_atomic_state *state) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_plane *plane; + struct intel_plane_state *plane_state; + bool crtc_chosen = false; + int i; + + mutex_lock(&fbc->lock); + + /* Does this atomic commit involve the CRTC currently tied to FBC? */ + if (fbc->crtc && + !intel_atomic_get_new_crtc_state(state, fbc->crtc)) + goto out; + + if (!intel_fbc_can_enable(dev_priv)) + goto out; + + /* Simply choose the first CRTC that is compatible and has a visible + * plane. We could go for fancier schemes such as checking the plane + * size, but this would just affect the few platforms that don't tie FBC + * to pipe or plane A. */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); + + if (!plane->has_fbc) + continue; + + if (!plane_state->uapi.visible) + continue; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + crtc_state->enable_fbc = true; + crtc_chosen = true; + break; + } + + if (!crtc_chosen) + fbc->no_fbc_reason = "no suitable CRTC for FBC"; + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_enable: tries to enable FBC on the CRTC + * @crtc: the CRTC + * @state: corresponding &drm_crtc_state for @crtc + * + * This function checks if the given CRTC was chosen for FBC, then enables it if + * possible. Notice that it doesn't activate FBC. It is valid to call + * intel_fbc_enable multiple times for the same pipe without an + * intel_fbc_disable in the middle, as long as it is deactivated. + */ +void intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + + if (fbc->crtc) { + if (fbc->crtc != crtc || + !intel_fbc_cfb_size_changed(dev_priv)) + goto out; + + __intel_fbc_disable(dev_priv); + } + + drm_WARN_ON(&dev_priv->drm, fbc->active); + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + + /* FIXME crtc_state->enable_fbc lies :( */ + if (!cache->plane.visible) + goto out; + + if (intel_fbc_alloc_cfb(dev_priv, + intel_fbc_calculate_cfb_size(dev_priv, cache), + plane_state->hw.fb->format->cpp[0])) { + cache->plane.visible = false; + fbc->no_fbc_reason = "not enough stolen memory"; + goto out; + } + + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && + plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) + cache->gen9_wa_cfb_stride = + DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + else + cache->gen9_wa_cfb_stride = 0; + + drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + fbc->no_fbc_reason = "FBC enabled but not active yet\n"; + + fbc->crtc = crtc; +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_disable - disable FBC if it's associated with crtc + * @crtc: the CRTC + * + * This function disables FBC if it's associated with the provided CRTC. + */ +void intel_fbc_disable(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc == crtc) + __intel_fbc_disable(dev_priv); + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_global_disable - globally disable FBC + * @dev_priv: i915 device instance + * + * This function disables FBC regardless of which CRTC is associated with it. + */ +void intel_fbc_global_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc) { + drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); + __intel_fbc_disable(dev_priv); + } + mutex_unlock(&fbc->lock); +} + +static void intel_fbc_underrun_work_fn(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, fbc.underrun_work); + struct intel_fbc *fbc = &dev_priv->fbc; + + mutex_lock(&fbc->lock); + + /* Maybe we were scheduled twice. */ + if (fbc->underrun_detected || !fbc->crtc) + goto out; + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); + fbc->underrun_detected = true; + + intel_fbc_deactivate(dev_priv, "FIFO underrun"); +out: + mutex_unlock(&fbc->lock); +} + +/* + * intel_fbc_reset_underrun - reset FBC fifo underrun status. + * @dev_priv: i915 device instance + * + * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we + * want to re-enable FBC after an underrun to increase test coverage. + */ +int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) +{ + int ret; + + cancel_work_sync(&dev_priv->fbc.underrun_work); + + ret = mutex_lock_interruptible(&dev_priv->fbc.lock); + if (ret) + return ret; + + if (dev_priv->fbc.underrun_detected) { + drm_dbg_kms(&dev_priv->drm, + "Re-allowing FBC after fifo underrun\n"); + dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; + } + + dev_priv->fbc.underrun_detected = false; + mutex_unlock(&dev_priv->fbc.lock); + + return 0; +} + +/** + * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun + * @dev_priv: i915 device instance + * + * Without FBC, most underruns are harmless and don't really cause too many + * problems, except for an annoying message on dmesg. With FBC, underruns can + * become black screens or even worse, especially when paired with bad + * watermarks. So in order for us to be on the safe side, completely disable FBC + * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe + * already suggests that watermarks may be bad, so try to be as safe as + * possible. + * + * This function is called from the IRQ handler. + */ +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + /* There's no guarantee that underrun_detected won't be set to true + * right after this check and before the work is scheduled, but that's + * not a problem since we'll check it again under the work function + * while FBC is locked. This check here is just to prevent us from + * unnecessarily scheduling the work, and it relies on the fact that we + * never switch underrun_detect back to false after it's true. */ + if (READ_ONCE(fbc->underrun_detected)) + return; + + schedule_work(&fbc->underrun_work); +} + +/* + * The DDX driver changes its behavior depending on the value it reads from + * i915.enable_fbc, so sanitize it by translating the default value into either + * 0 or 1 in order to allow it to know what's going on. + * + * Notice that this is done at driver initialization and we still allow user + * space to change the value during runtime without sanitizing it again. IGT + * relies on being able to change i915.enable_fbc at runtime. + */ +static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +{ + if (i915_modparams.enable_fbc >= 0) + return !!i915_modparams.enable_fbc; + + if (!HAS_FBC(dev_priv)) + return 0; + + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) + return 1; + + return 0; +} + +static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +{ + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ + if (intel_vtd_active() && + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { + drm_info(&dev_priv->drm, + "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); + return true; + } + + return false; +} + +/** + * intel_fbc_init - Initialize FBC + * @dev_priv: the i915 device + * + * This function might be called during PM init process. + */ +void intel_fbc_init(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); + mutex_init(&fbc->lock); + fbc->active = false; + + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + if (need_fbc_vtd_wa(dev_priv)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); + drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", + i915_modparams.enable_fbc); + + if (!HAS_FBC(dev_priv)) { + fbc->no_fbc_reason = "unsupported by this chipset"; + return; + } + + /* This value was pulled out of someone's hat */ + if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv)) + intel_de_write(dev_priv, FBC_CONTROL, + 500 << FBC_CTL_INTERVAL_SHIFT); + + /* We still don't have any sort of hardware state readout for FBC, so + * deactivate it in case the BIOS activated it to make sure software + * matches the hardware state. */ + if (intel_fbc_hw_is_active(dev_priv)) + intel_fbc_hw_deactivate(dev_priv); +} diff --git a/rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/preimage.1 b/rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/preimage.1 new file mode 100644 index 000000000000..5516c6db8dbc --- /dev/null +++ b/rr-cache/e0cc3a62aab3bb5917dd730bcddf371944fdf461/preimage.1 @@ -0,0 +1,1435 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * DOC: Frame Buffer Compression (FBC) + * + * FBC tries to save memory bandwidth (and so power consumption) by + * compressing the amount of memory used by the display. It is total + * transparent to user space and completely handled in the kernel. + * + * The benefits of FBC are mostly visible with solid backgrounds and + * variation-less patterns. It comes from keeping the memory footprint small + * and having fewer memory pages opened and accessed for refreshing the display. + * + * i915 is responsible to reserve stolen memory for FBC and configure its + * offset on proper registers. The hardware takes care of all + * compress/decompress. However there are many known cases where we have to + * forcibly disable it to allow proper screen updates. + */ + +#include <drm/drm_fourcc.h> + +#include "i915_drv.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "intel_display_types.h" +#include "intel_fbc.h" +#include "intel_frontbuffer.h" + +/* + * For SKL+, the plane source size used by the hardware is based on the value we + * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value + * we wrote to PIPESRC. + */ +static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, + int *width, int *height) +{ + if (width) + *width = cache->plane.src_w; + if (height) + *height = cache->plane.src_h; +} + +static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, + const struct intel_fbc_state_cache *cache) +{ + int lines; + + intel_fbc_get_plane_source_size(cache, NULL, &lines); + if (IS_GEN(dev_priv, 7)) + lines = min(lines, 2048); + else if (INTEL_GEN(dev_priv) >= 8) + lines = min(lines, 2560); + + /* Hardware needs the full buffer stride, not just the active area. */ + return lines * cache->fb.stride; +} + +static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 fbc_ctl; + + /* Disable compression */ + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + + fbc_ctl &= ~FBC_CTL_EN; + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ + if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, + FBC_STAT_COMPRESSING, 10)) { + drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); + return; + } +} + +static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + int cfb_pitch; + int i; + u32 fbc_ctl; + + /* Note: fbc.threshold == 1 for i8xx */ + cfb_pitch = params->cfb_size / FBC_LL_SIZE; + if (params->fb.stride < cfb_pitch) + cfb_pitch = params->fb.stride; + + /* FBC_CTL wants 32B or 64B units */ + if (IS_GEN(dev_priv, 2)) + cfb_pitch = (cfb_pitch / 32) - 1; + else + cfb_pitch = (cfb_pitch / 64) - 1; + + /* Clear old tags */ + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) + intel_de_write(dev_priv, FBC_TAG(i), 0); + + if (IS_GEN(dev_priv, 4)) { + u32 fbc_ctl2; + + /* Set it up... */ + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; + fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fence_id >= 0) + fbc_ctl2 |= FBC_CTL_CPU_FENCE; + intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); + intel_de_write(dev_priv, FBC_FENCE_OFF, + params->fence_y_offset); + } + + /* enable it... */ + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; + fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev_priv)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; + if (params->fence_id >= 0) + fbc_ctl |= params->fence_id; + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); +} + +static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; +} + +static void g4x_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; + if (params->fb.format->cpp[0] == 2) + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + else + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; + intel_de_write(dev_priv, DPFC_FENCE_YOFF, + params->fence_y_offset); + } else { + intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); + } + + /* enable it... */ + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); +} + +static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); + } +} + +static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; +} + +/* This function forces a CFB recompression through the nuke operation. */ +static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_nuke(fbc->crtc); + + intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); +} + +static void ilk_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN; + if (IS_GEN(dev_priv, 5)) + dpfc_ctl |= params->fence_id; + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } + } else { + if (IS_GEN(dev_priv, 6)) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + } + + intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, + params->fence_y_offset); + /* enable it... */ + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) +{ + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); + } +} + +static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + +static void gen7_fbc_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + u32 dpfc_ctl; + int threshold = dev_priv->fbc.threshold; + + /* Display WA #0529: skl, kbl, bxt. */ + if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { + u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4); + + val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); + + if (params->gen9_wa_cfb_stride) + val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; + + intel_de_write(dev_priv, CHICKEN_MISC_4, val); + } + + dpfc_ctl = 0; + if (IS_IVYBRIDGE(dev_priv)) + dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); + + if (params->fb.format->cpp[0] == 2) + threshold++; + + switch (threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: + dpfc_ctl |= DPFC_CTL_LIMIT_2X; + break; + case 1: + dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + + if (params->fence_id >= 0) { + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->fence_y_offset); + } else if (dev_priv->ggtt.num_fences) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); + } + + if (dev_priv->fbc.false_color) + dpfc_ctl |= FBC_CTL_FALSE_COLOR; + + if (IS_IVYBRIDGE(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:ivb */ + intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1, + intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ + intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe), + intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); + } + + if (INTEL_GEN(dev_priv) >= 11) + /* Wa_1409120013:icl,ehl,tgl */ + intel_de_write(dev_priv, ILK_DPFC_CHICKEN, + ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + intel_fbc_recompress(dev_priv); +} + +static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 5) + return ilk_fbc_is_active(dev_priv); + else if (IS_GM45(dev_priv)) + return g4x_fbc_is_active(dev_priv); + else + return i8xx_fbc_is_active(dev_priv); +} + +static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_activate(fbc->crtc); + + fbc->active = true; + fbc->activated = true; + + if (INTEL_GEN(dev_priv) >= 7) + gen7_fbc_activate(dev_priv); + else if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_activate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_activate(dev_priv); + else + i8xx_fbc_activate(dev_priv); +} + +static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_deactivate(fbc->crtc); + + fbc->active = false; + + if (INTEL_GEN(dev_priv) >= 5) + ilk_fbc_deactivate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_deactivate(dev_priv); + else + i8xx_fbc_deactivate(dev_priv); +} + +/** + * intel_fbc_is_active - Is FBC active? + * @dev_priv: i915 device instance + * + * This function is used to verify the current state of FBC. + * + * FIXME: This should be tracked in the plane config eventually + * instead of queried at runtime for most callers. + */ +bool intel_fbc_is_active(struct drm_i915_private *dev_priv) +{ + return dev_priv->fbc.active; +} + +static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, + const char *reason) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->active) + intel_fbc_hw_deactivate(dev_priv); + + fbc->no_fbc_reason = reason; +} + +static int find_compression_threshold(struct drm_i915_private *dev_priv, + struct drm_mm_node *node, + unsigned int size, + unsigned int fb_cpp) +{ + int compression_threshold = 1; + int ret; + u64 end; + + /* The FBC hardware for BDW/SKL doesn't have access to the stolen + * reserved range size, so it always assumes the maximum (8mb) is used. + * If we enable FBC using a CFB on that memory range we'll get FIFO + * underruns, even if that range is not reserved by the BIOS. */ + if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv)) + end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; + else + end = U64_MAX; + + /* HACK: This code depends on what we will do in *_enable_fbc. If that + * code changes, this code needs to change as well. + * + * The enable_fbc code will attempt to use one of our 2 compression + * thresholds, therefore, in that case, we only have 1 resort. + */ + + /* Try to over-allocate to reduce reallocations and fragmentation. */ + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, + 4096, 0, end); + if (ret == 0) + return compression_threshold; + +again: + /* HW's ability to limit the CFB is 1:4 */ + if (compression_threshold > 4 || + (fb_cpp == 2 && compression_threshold == 2)) + return 0; + + ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, + 4096, 0, end); + if (ret && INTEL_GEN(dev_priv) <= 4) { + return 0; + } else if (ret) { + compression_threshold <<= 1; + goto again; + } else { + return compression_threshold; + } +} + +static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, + unsigned int size, unsigned int fb_cpp) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_mm_node *uninitialized_var(compressed_llb); + int ret; + + drm_WARN_ON(&dev_priv->drm, + drm_mm_node_allocated(&fbc->compressed_fb)); + + ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, + size, fb_cpp); + if (!ret) + goto err_llb; + else if (ret > 1) { + drm_info_once(&dev_priv->drm, + "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); + } + + fbc->threshold = ret; + + if (INTEL_GEN(dev_priv) >= 5) + intel_de_write(dev_priv, ILK_DPFC_CB_BASE, + fbc->compressed_fb.start); + else if (IS_GM45(dev_priv)) { + intel_de_write(dev_priv, DPFC_CB_BASE, + fbc->compressed_fb.start); + } else { + compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); + if (!compressed_llb) + goto err_fb; + + ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, + 4096, 4096); + if (ret) + goto err_fb; + + fbc->compressed_llb = compressed_llb; + + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_fb.start, + U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_llb->start, + U32_MAX)); + intel_de_write(dev_priv, FBC_CFB_BASE, + dev_priv->dsm.start + fbc->compressed_fb.start); + intel_de_write(dev_priv, FBC_LL_BASE, + dev_priv->dsm.start + compressed_llb->start); + } + + drm_dbg_kms(&dev_priv->drm, + "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", + fbc->compressed_fb.size, fbc->threshold); + + return 0; + +err_fb: + kfree(compressed_llb); + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +err_llb: + if (drm_mm_initialized(&dev_priv->mm.stolen)) + drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + return -ENOSPC; +} + +static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) + return; + + if (!drm_mm_node_allocated(&fbc->compressed_fb)) + return; + + if (fbc->compressed_llb) { + i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); + kfree(fbc->compressed_llb); + } + + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); +} + +void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_cleanup_cfb(dev_priv); + mutex_unlock(&fbc->lock); +} + +static bool stride_is_valid(struct drm_i915_private *dev_priv, + u64 modifier, unsigned int stride) +{ + /* This should have been caught earlier. */ + if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) + return false; + + /* Below are the additional FBC restrictions. */ + if (stride < 512) + return false; + + if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3)) + return stride == 4096 || stride == 8192; + + if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048) + return false; + + /* Display WA #1105: skl,bxt,kbl,cfl,glk */ + if (IS_GEN(dev_priv, 9) && + modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) + return false; + + if (stride > 16384) + return false; + + return true; +} + +static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format) +{ + switch (pixel_format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + return true; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + /* 16bpp not supported on gen2 */ + if (IS_GEN(dev_priv, 2)) + return false; + /* WaFbcOnly1to1Ratio:ctg */ + if (IS_G4X(dev_priv)) + return false; + return true; + default: + return false; + } +} + +static bool rotation_is_valid(struct drm_i915_private *dev_priv, + u32 pixel_format, unsigned int rotation) +{ + if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + drm_rotation_90_or_270(rotation)) + return false; + else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && + rotation != DRM_MODE_ROTATE_0) + return false; + + return true; +} + +/* + * For some reason, the hardware tracking starts looking at whatever we + * programmed as the display plane base address register. It does not look at + * the X and Y offset registers. That's why we include the src x/y offsets + * instead of just looking at the plane size. + */ +static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + unsigned int effective_w, effective_h, max_w, max_h; + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + max_w = 5120; + max_h = 4096; + } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { + max_w = 4096; + max_h = 4096; + } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { + max_w = 4096; + max_h = 2048; + } else { + max_w = 2048; + max_h = 1536; + } + + intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, + &effective_h); + effective_w += fbc->state_cache.plane.adjusted_x; + effective_h += fbc->state_cache.plane.adjusted_y; + + return effective_w <= max_w && effective_h <= max_h; +} + +static bool tiling_is_valid(struct drm_i915_private *dev_priv, + uint64_t modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + if (INTEL_GEN(dev_priv) >= 9) + return true; + return false; + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + return true; + default: + return false; + } +} + +static void intel_fbc_update_state_cache(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct drm_framebuffer *fb = plane_state->hw.fb; + + cache->plane.visible = plane_state->uapi.visible; + if (!cache->plane.visible) + return; + + cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; + + cache->plane.rotation = plane_state->hw.rotation; + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + cache->plane.adjusted_x = plane_state->color_plane[0].x; + cache->plane.adjusted_y = plane_state->color_plane[0].y; + + cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; + + cache->fb.format = fb->format; + cache->fb.modifier = fb->modifier; + +<<<<<<< +======= + /* FIXME is this correct? */ + cache->fb.stride = plane_state->color_plane[0].stride; + if (drm_rotation_90_or_270(plane_state->hw.rotation)) + cache->fb.stride *= fb->format->cpp[0]; + + /* FBC1 compression interval: arbitrary choice of 1 second */ + cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); + +>>>>>>> + cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); + + drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && + !plane_state->vma->fence); + + if (plane_state->flags & PLANE_HAS_FENCE && + plane_state->vma->fence) + cache->fence_id = plane_state->vma->fence->id; + else + cache->fence_id = -1; +} + +static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > + fbc->compressed_fb.size * fbc->threshold; +} + +static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (intel_vgpu_active(dev_priv)) { + fbc->no_fbc_reason = "VGPU is active"; + return false; + } + + if (!i915_modparams.enable_fbc) { + fbc->no_fbc_reason = "disabled per module param or by default"; + return false; + } + + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + return true; +} + +static bool intel_fbc_can_activate(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!intel_fbc_can_enable(dev_priv)) + return false; + + if (!cache->plane.visible) { + fbc->no_fbc_reason = "primary plane not visible"; + return false; + } + + /* We don't need to use a state cache here since this information is + * global for all CRTC. + */ + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { + fbc->no_fbc_reason = "incompatible mode"; + return false; + } + + if (!intel_fbc_hw_tracking_covers_screen(crtc)) { + fbc->no_fbc_reason = "mode too large for compression"; + return false; + } + + /* The use of a CPU fence is one of two ways to detect writes by the + * CPU to the scanout and trigger updates to the FBC. + * + * The other method is by software tracking (see + * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke + * the current compressed buffer and recompress it. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constraints + * at the time of pinning. + * + * FIXME with 90/270 degree rotation we should use the fence on + * the normal GTT view (the rotated view doesn't even have a + * fence). Would need changes to the FBC fence Y offset as well. + * For now this will effectively disable FBC with 90/270 degree + * rotation. + */ + if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) { + fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + return false; + } + + if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { + fbc->no_fbc_reason = "pixel format is invalid"; + return false; + } + + if (!rotation_is_valid(dev_priv, cache->fb.format->format, + cache->plane.rotation)) { + fbc->no_fbc_reason = "rotation unsupported"; + return false; + } + + if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { + fbc->no_fbc_reason = "tiling unsupported"; + return false; + } + + if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) { + fbc->no_fbc_reason = "framebuffer stride not supported"; + return false; + } + + if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + cache->fb.format->has_alpha) { + fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; + return false; + } + + /* WaFbcExceedCdClockThreshold:hsw,bdw */ + if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && + cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { + fbc->no_fbc_reason = "pixel rate is too big"; + return false; + } + + /* It is possible for the required CFB size change without a + * crtc->disable + crtc->enable since it is possible to change the + * stride without triggering a full modeset. Since we try to + * over-allocate the CFB, there's a chance we may keep FBC enabled even + * if this happens, but if we exceed the current CFB size we'll have to + * disable FBC. Notice that it would be possible to disable FBC, wait + * for a frame, free the stolen node, then try to reenable FBC in case + * we didn't get any invalidate/deactivate calls, but this would require + * a lot of tracking just for a specific case. If we conclude it's an + * important case, we can implement it later. */ + if (intel_fbc_cfb_size_changed(dev_priv)) { + fbc->no_fbc_reason = "CFB requirements changed"; + return false; + } + + /* + * Work around a problem on GEN9+ HW, where enabling FBC on a plane + * having a Y offset that isn't divisible by 4 causes FIFO underrun + * and screen flicker. + */ + if (INTEL_GEN(dev_priv) >= 9 && + (fbc->state_cache.plane.adjusted_y & 3)) { + fbc->no_fbc_reason = "plane Y offset is misaligned"; + return false; + } + + return true; +} + +static void intel_fbc_get_reg_params(struct intel_crtc *crtc, + struct intel_fbc_reg_params *params) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + /* Since all our fields are integer types, use memset here so the + * comparison function can rely on memcmp because the padding will be + * zero. */ + memset(params, 0, sizeof(*params)); + + params->fence_id = cache->fence_id; + params->fence_y_offset = cache->fence_y_offset; + + params->crtc.pipe = crtc->pipe; + params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; + + params->fb.format = cache->fb.format; + params->fb.stride = cache->fb.stride; + + params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); + + params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride; + + params->plane_visible = cache->plane.visible; +} + +static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_fbc *fbc = &dev_priv->fbc; + const struct intel_fbc_state_cache *cache = &fbc->state_cache; + const struct intel_fbc_reg_params *params = &fbc->params; + + if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) + return false; + + if (!params->plane_visible) + return false; + + if (!intel_fbc_can_activate(crtc)) + return false; + + if (params->fb.format != cache->fb.format) + return false; + + if (params->fb.stride != cache->fb.stride) + return false; + + if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache)) + return false; + + if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride) + return false; + + return true; +} + +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + const char *reason = "update pending"; + bool need_vblank_wait = false; + + if (!plane->has_fbc || !plane_state) + return need_vblank_wait; + + mutex_lock(&fbc->lock); + + if (fbc->crtc != crtc) + goto unlock; + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + fbc->flip_pending = true; + + if (!intel_fbc_can_flip_nuke(crtc_state)) { + intel_fbc_deactivate(dev_priv, reason); + + /* + * Display WA #1198: glk+ + * Need an extra vblank wait between FBC disable and most plane + * updates. Bspec says this is only needed for plane disable, but + * that is not true. Touching most plane registers will cause the + * corruption to appear. Also SKL/derivatives do not seem to be + * affected. + * + * TODO: could optimize this a bit by sampling the frame + * counter when we disable FBC (if it was already done earlier) + * and skipping the extra vblank wait before the plane update + * if at least one frame has already passed. + */ + if (fbc->activated && + (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))) + need_vblank_wait = true; + fbc->activated = false; + } +unlock: + mutex_unlock(&fbc->lock); + + return need_vblank_wait; +} + +/** + * __intel_fbc_disable - disable FBC + * @dev_priv: i915 device instance + * + * This is the low level function that actually disables FBC. Callers should + * grab the FBC lock. + */ +static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_crtc *crtc = fbc->crtc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&dev_priv->drm, !fbc->crtc); + drm_WARN_ON(&dev_priv->drm, fbc->active); + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + + __intel_fbc_cleanup_cfb(dev_priv); + + fbc->crtc = NULL; +} + +static void __intel_fbc_post_update(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &dev_priv->fbc; + + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + + if (fbc->crtc != crtc) + return; + + fbc->flip_pending = false; + + if (!i915_modparams.enable_fbc) { + intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); + __intel_fbc_disable(dev_priv); + + return; + } + + intel_fbc_get_reg_params(crtc, &fbc->params); + + if (!intel_fbc_can_activate(crtc)) + return; + + if (!fbc->busy_bits) + intel_fbc_hw_activate(dev_priv); + else + intel_fbc_deactivate(dev_priv, "frontbuffer write"); +} + +void intel_fbc_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + __intel_fbc_post_update(crtc); + mutex_unlock(&fbc->lock); +} + +static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) +{ + if (fbc->crtc) + return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + else + return fbc->possible_framebuffer_bits; +} + +void intel_fbc_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; + + if (fbc->crtc && fbc->busy_bits) + intel_fbc_deactivate(dev_priv, "frontbuffer write"); + + mutex_unlock(&fbc->lock); +} + +void intel_fbc_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, enum fb_op_origin origin) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + + fbc->busy_bits &= ~frontbuffer_bits; + + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + goto out; + + if (!fbc->busy_bits && fbc->crtc && + (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { + if (fbc->active) + intel_fbc_recompress(dev_priv); + else if (!fbc->flip_pending) + __intel_fbc_post_update(fbc->crtc); + } + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_choose_crtc - select a CRTC to enable FBC on + * @dev_priv: i915 device instance + * @state: the atomic state structure + * + * This function looks at the proposed state for CRTCs and planes, then chooses + * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to + * true. + * + * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe + * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. + */ +void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, + struct intel_atomic_state *state) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_plane *plane; + struct intel_plane_state *plane_state; + bool crtc_chosen = false; + int i; + + mutex_lock(&fbc->lock); + + /* Does this atomic commit involve the CRTC currently tied to FBC? */ + if (fbc->crtc && + !intel_atomic_get_new_crtc_state(state, fbc->crtc)) + goto out; + + if (!intel_fbc_can_enable(dev_priv)) + goto out; + + /* Simply choose the first CRTC that is compatible and has a visible + * plane. We could go for fancier schemes such as checking the plane + * size, but this would just affect the few platforms that don't tie FBC + * to pipe or plane A. */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); + + if (!plane->has_fbc) + continue; + + if (!plane_state->uapi.visible) + continue; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + crtc_state->enable_fbc = true; + crtc_chosen = true; + break; + } + + if (!crtc_chosen) + fbc->no_fbc_reason = "no suitable CRTC for FBC"; + +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_enable: tries to enable FBC on the CRTC + * @crtc: the CRTC + * @state: corresponding &drm_crtc_state for @crtc + * + * This function checks if the given CRTC was chosen for FBC, then enables it if + * possible. Notice that it doesn't activate FBC. It is valid to call + * intel_fbc_enable multiple times for the same pipe without an + * intel_fbc_disable in the middle, as long as it is deactivated. + */ +void intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!plane->has_fbc || !plane_state) + return; + + mutex_lock(&fbc->lock); + + if (fbc->crtc) { + if (fbc->crtc != crtc || + !intel_fbc_cfb_size_changed(dev_priv)) + goto out; + + __intel_fbc_disable(dev_priv); + } + + drm_WARN_ON(&dev_priv->drm, fbc->active); + + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + + /* FIXME crtc_state->enable_fbc lies :( */ + if (!cache->plane.visible) + goto out; + + if (intel_fbc_alloc_cfb(dev_priv, + intel_fbc_calculate_cfb_size(dev_priv, cache), + plane_state->hw.fb->format->cpp[0])) { + cache->plane.visible = false; + fbc->no_fbc_reason = "not enough stolen memory"; + goto out; + } + + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && + plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) + cache->gen9_wa_cfb_stride = + DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + else + cache->gen9_wa_cfb_stride = 0; + + drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", + pipe_name(crtc->pipe)); + fbc->no_fbc_reason = "FBC enabled but not active yet\n"; + + fbc->crtc = crtc; +out: + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_disable - disable FBC if it's associated with crtc + * @crtc: the CRTC + * + * This function disables FBC if it's associated with the provided CRTC. + */ +void intel_fbc_disable(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!plane->has_fbc) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc == crtc) + __intel_fbc_disable(dev_priv); + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_global_disable - globally disable FBC + * @dev_priv: i915 device instance + * + * This function disables FBC regardless of which CRTC is associated with it. + */ +void intel_fbc_global_disable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + mutex_lock(&fbc->lock); + if (fbc->crtc) { + drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); + __intel_fbc_disable(dev_priv); + } + mutex_unlock(&fbc->lock); +} + +static void intel_fbc_underrun_work_fn(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, fbc.underrun_work); + struct intel_fbc *fbc = &dev_priv->fbc; + + mutex_lock(&fbc->lock); + + /* Maybe we were scheduled twice. */ + if (fbc->underrun_detected || !fbc->crtc) + goto out; + + drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); + fbc->underrun_detected = true; + + intel_fbc_deactivate(dev_priv, "FIFO underrun"); +out: + mutex_unlock(&fbc->lock); +} + +/* + * intel_fbc_reset_underrun - reset FBC fifo underrun status. + * @dev_priv: i915 device instance + * + * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we + * want to re-enable FBC after an underrun to increase test coverage. + */ +int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) +{ + int ret; + + cancel_work_sync(&dev_priv->fbc.underrun_work); + + ret = mutex_lock_interruptible(&dev_priv->fbc.lock); + if (ret) + return ret; + + if (dev_priv->fbc.underrun_detected) { + drm_dbg_kms(&dev_priv->drm, + "Re-allowing FBC after fifo underrun\n"); + dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; + } + + dev_priv->fbc.underrun_detected = false; + mutex_unlock(&dev_priv->fbc.lock); + + return 0; +} + +/** + * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun + * @dev_priv: i915 device instance + * + * Without FBC, most underruns are harmless and don't really cause too many + * problems, except for an annoying message on dmesg. With FBC, underruns can + * become black screens or even worse, especially when paired with bad + * watermarks. So in order for us to be on the safe side, completely disable FBC + * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe + * already suggests that watermarks may be bad, so try to be as safe as + * possible. + * + * This function is called from the IRQ handler. + */ +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!HAS_FBC(dev_priv)) + return; + + /* There's no guarantee that underrun_detected won't be set to true + * right after this check and before the work is scheduled, but that's + * not a problem since we'll check it again under the work function + * while FBC is locked. This check here is just to prevent us from + * unnecessarily scheduling the work, and it relies on the fact that we + * never switch underrun_detect back to false after it's true. */ + if (READ_ONCE(fbc->underrun_detected)) + return; + + schedule_work(&fbc->underrun_work); +} + +/* + * The DDX driver changes its behavior depending on the value it reads from + * i915.enable_fbc, so sanitize it by translating the default value into either + * 0 or 1 in order to allow it to know what's going on. + * + * Notice that this is done at driver initialization and we still allow user + * space to change the value during runtime without sanitizing it again. IGT + * relies on being able to change i915.enable_fbc at runtime. + */ +static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +{ + if (i915_modparams.enable_fbc >= 0) + return !!i915_modparams.enable_fbc; + + if (!HAS_FBC(dev_priv)) + return 0; + + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) + return 1; + + return 0; +} + +static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +{ + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ + if (intel_vtd_active() && + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { + drm_info(&dev_priv->drm, + "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); + return true; + } + + return false; +} + +/** + * intel_fbc_init - Initialize FBC + * @dev_priv: the i915 device + * + * This function might be called during PM init process. + */ +void intel_fbc_init(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); + mutex_init(&fbc->lock); + fbc->active = false; + + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + if (need_fbc_vtd_wa(dev_priv)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + + i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); + drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", + i915_modparams.enable_fbc); + + if (!HAS_FBC(dev_priv)) { + fbc->no_fbc_reason = "unsupported by this chipset"; + return; + } + + /* This value was pulled out of someone's hat */ + if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv)) + intel_de_write(dev_priv, FBC_CONTROL, + 500 << FBC_CTL_INTERVAL_SHIFT); + + /* We still don't have any sort of hardware state readout for FBC, so + * deactivate it in case the BIOS activated it to make sure software + * matches the hardware state. */ + if (intel_fbc_hw_is_active(dev_priv)) + intel_fbc_hw_deactivate(dev_priv); +} |