diff options
832 files changed, 19022 insertions, 11523 deletions
diff --git a/.clang-format b/.clang-format index 48405c54ef27..d5c05db1a0d9 100644 --- a/.clang-format +++ b/.clang-format @@ -167,7 +167,7 @@ ForEachMacros: - 'drm_connector_for_each_possible_encoder' - 'drm_exec_for_each_locked_object' - 'drm_exec_for_each_locked_object_reverse' - - 'drm_for_each_bridge_in_chain' + - 'drm_for_each_bridge_in_chain_scoped' - 'drm_for_each_connector_iter' - 'drm_for_each_crtc' - 'drm_for_each_crtc_reverse' diff --git a/Documentation/gpu/amdgpu/apu-asic-info-table.csv b/Documentation/gpu/amdgpu/apu-asic-info-table.csv index b479c5629146..dee5f663a47f 100644 --- a/Documentation/gpu/amdgpu/apu-asic-info-table.csv +++ b/Documentation/gpu/amdgpu/apu-asic-info-table.csv @@ -13,5 +13,6 @@ Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8, 13.0.8 Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11, 13.0.4 / 13.0.11 Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11, 13.0.4 / 13.0.11 Ryzen AI 300 series, Strix Point, 3.5.0, 11.5.0, 4.0.5, 6.1.0, 14.0.0, 14.0.0 +Ryzen AI 330 series, Krackan Point, 3.6.0, 11.5.3, 4.0.5, 6.1.3, 14.0.5, 14.0.5 Ryzen AI 350 series, Krackan Point, 3.5.0, 11.5.2, 4.0.5, 6.1.2, 14.0.4, 14.0.4 Ryzen AI Max 300 series, Strix Halo, 3.5.1, 11.5.1, 4.0.6, 6.1.1, 14.0.1, 14.0.1 diff --git a/Documentation/gpu/amdgpu/driver-core.rst b/Documentation/gpu/amdgpu/driver-core.rst index bd4be32f2725..3ce276272171 100644 --- a/Documentation/gpu/amdgpu/driver-core.rst +++ b/Documentation/gpu/amdgpu/driver-core.rst @@ -210,4 +210,4 @@ IP Blocks :doc: IP Blocks .. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h - :identifiers: amd_ip_block_type amd_ip_funcs DC_DEBUG_MASK + :identifiers: amd_ip_block_type amd_ip_funcs DC_FEATURE_MASK DC_DEBUG_MASK diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index 72932fa31b8d..eba09c3ddce4 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -358,8 +358,6 @@ Locking Guidelines #. All locking rules and interface contracts with cross-driver interfaces (dma-buf, dma_fence) need to be followed. -#. No struct_mutex anywhere in the code - #. dma_resv will be the outermost lock (when needed) and ww_acquire_ctx is to be hoisted at highest level and passed down within i915_gem_ctx in the call chain @@ -367,11 +365,6 @@ Locking Guidelines #. While holding lru/memory manager (buddy, drm_mm, whatever) locks system memory allocations are not allowed - * Enforce this by priming lockdep (with fs_reclaim). If we - allocate memory while holding these looks we get a rehash - of the shrinker vs. struct_mutex saga, and that would be - real bad. - #. Do not nest different lru/memory manager locks within each other. Take them in turn to update memory allocations, relying on the object’s dma_resv ww_mutex to serialize against other operations. diff --git a/Documentation/gpu/nova/core/todo.rst b/Documentation/gpu/nova/core/todo.rst index 894a1e9c3741..091a2fb78c6b 100644 --- a/Documentation/gpu/nova/core/todo.rst +++ b/Documentation/gpu/nova/core/todo.rst @@ -131,8 +131,6 @@ crate so it can be used by other components as well. Features desired before this happens: -* Relative register with build-time base address validation, -* Arrays of registers with build-time index validation, * Make I/O optional I/O (for field values that are not registers), * Support other sizes than `u32`, * Allow visibility control for registers and individual fields, @@ -232,23 +230,6 @@ Rust abstraction for debugfs APIs. GPU (general) ============= -Parse firmware headers ----------------------- - -Parse ELF headers from the firmware files loaded from the filesystem. - -| Reference: ELF utils -| Complexity: Beginner -| Contact: Abdiel Janulgue - -Build radix3 page table ------------------------ - -Build the radix3 page table to map the firmware. - -| Complexity: Intermediate -| Contact: Abdiel Janulgue - Initial Devinit support ----------------------- diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 92db80793bba..b5f58b4274b1 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -173,31 +173,6 @@ Contact: Simona Vetter Level: Intermediate -Get rid of dev->struct_mutex from GEM drivers ---------------------------------------------- - -``dev->struct_mutex`` is the Big DRM Lock from legacy days and infested -everything. Nowadays in modern drivers the only bit where it's mandatory is -serializing GEM buffer object destruction. Which unfortunately means drivers -have to keep track of that lock and either call ``unreference`` or -``unreference_locked`` depending upon context. - -Core GEM doesn't have a need for ``struct_mutex`` any more since kernel 4.8, -and there's a GEM object ``free`` callback for any drivers which are -entirely ``struct_mutex`` free. - -For drivers that need ``struct_mutex`` it should be replaced with a driver- -private lock. The tricky part is the BO free functions, since those can't -reliably take that lock any more. Instead state needs to be protected with -suitable subordinate locks or some cleanup work pushed to a worker thread. For -performance-critical drivers it might also be better to go with a more -fine-grained per-buffer object and per-context lockings scheme. Currently only -the ``msm`` and `i915` drivers use ``struct_mutex``. - -Contact: Simona Vetter, respective driver maintainers - -Level: Advanced - Move Buffer Object Locking to dma_resv_lock() --------------------------------------------- diff --git a/Documentation/netlink/specs/conntrack.yaml b/Documentation/netlink/specs/conntrack.yaml index c6832633ab7b..591e22a2ee43 100644 --- a/Documentation/netlink/specs/conntrack.yaml +++ b/Documentation/netlink/specs/conntrack.yaml @@ -575,8 +575,8 @@ operations: - nat-dst - timeout - mark - - counter-orig - - counter-reply + - counters-orig + - counters-reply - use - id - nat-dst @@ -591,7 +591,6 @@ operations: request: value: 0x101 attributes: - - nfgen-family - mark - filter - status @@ -608,8 +607,8 @@ operations: - nat-dst - timeout - mark - - counter-orig - - counter-reply + - counters-orig + - counters-reply - use - id - nat-dst diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml index d15335684ec3..d1b4829b580a 100644 --- a/Documentation/netlink/specs/mptcp_pm.yaml +++ b/Documentation/netlink/specs/mptcp_pm.yaml @@ -28,13 +28,13 @@ definitions: traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport, - dport, server-side. + dport, server-side, [flags]. - name: established doc: >- A MPTCP connection is established (can start new subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport, - dport, server-side. + dport, server-side, [flags]. - name: closed doc: >- diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst index 062b86522e4d..accaebbdd642 100644 --- a/Documentation/sound/alsa-configuration.rst +++ b/Documentation/sound/alsa-configuration.rst @@ -2293,7 +2293,7 @@ delayed_register notice the need. skip_validation Skip unit descriptor validation (default: no). - The option is used to ignores the validation errors with the hexdump + The option is used to ignore the validation errors with the hexdump of the unit descriptor instead of a driver probe error, so that we can check its details. quirk_flags diff --git a/MAINTAINERS b/MAINTAINERS index 5257d52679d6..a4c6f1c8deb9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2086,6 +2086,19 @@ F: Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml F: drivers/gpu/drm/panthor/ F: include/uapi/drm/panthor_drm.h +ARM MALI TYR DRM DRIVER +M: Daniel Almeida <daniel.almeida@collabora.com> +M: Alice Ryhl <aliceryhl@google.com> +L: dri-devel@lists.freedesktop.org +S: Supported +W: https://rust-for-linux.com/tyr-gpu-driver +W https://drm.pages.freedesktop.org/maintainer-tools/drm-rust.html +B: https://gitlab.freedesktop.org/panfrost/linux/-/issues +T: git https://gitlab.freedesktop.org/drm/rust/kernel.git +F: Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml +F: drivers/gpu/drm/tyr/ +F: include/uapi/drm/panthor_drm.h + ARM MALI-DP DRM DRIVER M: Liviu Dudau <liviu.dudau@arm.com> S: Supported @@ -7237,7 +7250,7 @@ F: include/linux/dma-mapping.h F: include/linux/swiotlb.h F: kernel/dma/ -DMA MAPPING HELPERS DEVICE DRIVER API [RUST] +DMA MAPPING & SCATTERLIST API [RUST] M: Danilo Krummrich <dakr@kernel.org> R: Abdiel Janulgue <abdiel.janulgue@gmail.com> R: Daniel Almeida <daniel.almeida@collabora.com> @@ -7248,7 +7261,9 @@ S: Supported W: https://rust-for-linux.com T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git F: rust/helpers/dma.c +F: rust/helpers/scatterlist.c F: rust/kernel/dma.rs +F: rust/kernel/scatterlist.rs F: samples/rust/rust_dma.rs DMA-BUF HEAPS FRAMEWORK @@ -7430,7 +7445,7 @@ S: Supported F: Documentation/devicetree/bindings/dpll/dpll-device.yaml F: Documentation/devicetree/bindings/dpll/dpll-pin.yaml F: Documentation/driver-api/dpll.rst -F: drivers/dpll/* +F: drivers/dpll/ F: include/linux/dpll.h F: include/uapi/linux/dpll.h @@ -7838,6 +7853,7 @@ M: Danilo Krummrich <dakr@kernel.org> M: Alexandre Courbot <acourbot@nvidia.com> L: nouveau@lists.freedesktop.org S: Supported +W: https://rust-for-linux.com/nova-gpu-driver Q: https://patchwork.freedesktop.org/project/nouveau/ B: https://gitlab.freedesktop.org/drm/nova/-/issues C: irc://irc.oftc.net/nouveau @@ -7849,6 +7865,7 @@ DRM DRIVER FOR NVIDIA GPUS [RUST] M: Danilo Krummrich <dakr@kernel.org> L: nouveau@lists.freedesktop.org S: Supported +W: https://rust-for-linux.com/nova-gpu-driver Q: https://patchwork.freedesktop.org/project/nouveau/ B: https://gitlab.freedesktop.org/drm/nova/-/issues C: irc://irc.oftc.net/nouveau @@ -16221,6 +16238,7 @@ R: Rik van Riel <riel@surriel.com> R: Liam R. Howlett <Liam.Howlett@oracle.com> R: Vlastimil Babka <vbabka@suse.cz> R: Harry Yoo <harry.yoo@oracle.com> +R: Jann Horn <jannh@google.com> L: linux-mm@kvack.org S: Maintained F: include/linux/rmap.h @@ -16265,6 +16283,7 @@ R: Nico Pache <npache@redhat.com> R: Ryan Roberts <ryan.roberts@arm.com> R: Dev Jain <dev.jain@arm.com> R: Barry Song <baohua@kernel.org> +R: Lance Yang <lance.yang@linux.dev> L: linux-mm@kvack.org S: Maintained W: http://www.linux-mm.org @@ -22075,6 +22094,7 @@ F: drivers/infiniband/ulp/rtrs/ RUNTIME VERIFICATION (RV) M: Steven Rostedt <rostedt@goodmis.org> +M: Gabriele Monaco <gmonaco@redhat.com> L: linux-trace-kernel@vger.kernel.org S: Maintained F: Documentation/trace/rv/ @@ -24282,7 +24302,7 @@ F: Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml F: drivers/input/keyboard/sun4i-lradc-keys.c SUNDANCE NETWORK DRIVER -M: Denis Kirjanov <dkirjanov@suse.de> +M: Denis Kirjanov <kirjanov@gmail.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/dlink/sundance.c @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 17 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2b07f0a27a7d..0ee4f6fa3a17 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1369,6 +1369,7 @@ static inline bool kvm_system_needs_idmapped_vectors(void) } void kvm_init_host_debug_data(void); +void kvm_debug_init_vhe(void); void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu); void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu); void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 1246216616b5..2888b5d03757 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -355,11 +355,6 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke return pteref; } -static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref) -{ - return pteref; -} - static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) { /* @@ -389,11 +384,6 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED)); } -static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref) -{ - return rcu_dereference_raw(pteref); -} - static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) { if (walker->flags & KVM_PGTABLE_WALK_SHARED) @@ -562,26 +552,6 @@ static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); /** - * kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). - * @addr: Intermediate physical address at which to place the mapping. - * @size: Size of the mapping. - * - * The page-table is assumed to be unreachable by any hardware walkers prior - * to freeing and therefore no TLB invalidation is performed. - */ -void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, - u64 addr, u64 size); - -/** - * kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). - * - * It is assumed that the rest of the page-table is freed before this operation. - */ -void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt); - -/** * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure. * @mm_ops: Memory management callbacks. * @pgtable: Unlinked stage-2 paging structure to be freed. diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index 35f9d9478004..ea58282f59bb 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -179,9 +179,7 @@ struct pkvm_mapping { int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops); -void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, - u64 addr, u64 size); -void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt); +void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, void *mc, enum kvm_pgtable_walk_flags flags); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 5bf101c869c9..bd6b6a620a09 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2113,8 +2113,10 @@ static void cpu_hyp_init_features(void) { cpu_set_hyp_vector(); - if (is_kernel_in_hyp_mode()) + if (is_kernel_in_hyp_mode()) { kvm_timer_init_vhe(); + kvm_debug_init_vhe(); + } if (vgic_present) kvm_vgic_init_cpu_hardware(); diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 381382c19fe4..e027d9c32b0d 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -96,6 +96,13 @@ void kvm_init_host_debug_data(void) } } +void kvm_debug_init_vhe(void) +{ + /* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */ + if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1))) + write_sysreg_el1(0, SYS_PMSCR); +} + /* * Configures the 'external' MDSCR_EL1 value for the guest, i.e. when the host * has taken over MDSCR_EL1. @@ -138,6 +145,9 @@ void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu) /* Must be called before kvm_vcpu_load_vhe() */ KVM_BUG_ON(vcpu_get_flag(vcpu, SYSREGS_ON_CPU), vcpu->kvm); + if (has_vhe()) + *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2); + /* * Determine which of the possible debug states we're in: * @@ -184,6 +194,9 @@ void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu) void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu) { + if (has_vhe()) + write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); + if (likely(!(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) return; diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 84ec4e100fbb..b6682202edf3 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -431,9 +431,6 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); } - *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2); - write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); - if (cpus_have_final_cap(ARM64_HAS_HCX)) { u64 hcrx = vcpu->arch.hcrx_el2; if (is_nested_ctxt(vcpu)) { @@ -454,8 +451,6 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); - write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); - write_sysreg(0, hstr_el2); if (system_supports_pmuv3()) { write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index ccd575d5f6de..d3b9ec8a7c28 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -50,6 +50,10 @@ extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); static void __activate_traps(struct kvm_vcpu *vcpu) { ___activate_traps(vcpu, vcpu->arch.hcr_el2); + + *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2); + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); + __activate_traps_common(vcpu); __activate_cptr_traps(vcpu); @@ -93,6 +97,8 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) isb(); } + write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); + __deactivate_traps_common(vcpu); write_sysreg_hcr(this_cpu_ptr(&kvm_init_params)->hcr_el2); diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 71d2fc97f004..82da9b03692d 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -253,7 +253,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR); - __vcpu_assign_sys_reg(vcpu, read_sysreg_el1(SYS_VBAR), VBAR_EL1); + __vcpu_assign_sys_reg(vcpu, VBAR_EL1, read_sysreg_el1(SYS_VBAR)); kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC); diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index c36f282a175d..c351b4abd5db 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1551,38 +1551,21 @@ static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx, return 0; } -void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, - u64 addr, u64 size) +void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) { + size_t pgd_sz; struct kvm_pgtable_walker walker = { .cb = stage2_free_walker, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, }; - WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker)); -} - -void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt) -{ - size_t pgd_sz; - + WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE; - - /* - * Since the pgtable is unlinked at this point, and not shared with - * other walkers, safely deference pgd with kvm_dereference_pteref_raw() - */ - pgt->mm_ops->free_pages_exact(kvm_dereference_pteref_raw(pgt->pgd), pgd_sz); + pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz); pgt->pgd = NULL; } -void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) -{ - kvm_pgtable_stage2_destroy_range(pgt, 0, BIT(pgt->ia_bits)); - kvm_pgtable_stage2_destroy_pgd(pgt); -} - void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level) { kvm_pteref_t ptep = (kvm_pteref_t)pgtable; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 86f3d80daf37..736394292503 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -904,38 +904,6 @@ static int kvm_init_ipa_range(struct kvm_s2_mmu *mmu, unsigned long type) return 0; } -/* - * Assume that @pgt is valid and unlinked from the KVM MMU to free the - * page-table without taking the kvm_mmu_lock and without performing any - * TLB invalidations. - * - * Also, the range of addresses can be large enough to cause need_resched - * warnings, for instance on CONFIG_PREEMPT_NONE kernels. Hence, invoke - * cond_resched() periodically to prevent hogging the CPU for a long time - * and schedule something else, if required. - */ -static void stage2_destroy_range(struct kvm_pgtable *pgt, phys_addr_t addr, - phys_addr_t end) -{ - u64 next; - - do { - next = stage2_range_addr_end(addr, end); - KVM_PGT_FN(kvm_pgtable_stage2_destroy_range)(pgt, addr, - next - addr); - if (next != end) - cond_resched(); - } while (addr = next, addr != end); -} - -static void kvm_stage2_destroy(struct kvm_pgtable *pgt) -{ - unsigned int ia_bits = VTCR_EL2_IPA(pgt->mmu->vtcr); - - stage2_destroy_range(pgt, 0, BIT(ia_bits)); - KVM_PGT_FN(kvm_pgtable_stage2_destroy_pgd)(pgt); -} - /** * kvm_init_stage2_mmu - Initialise a S2 MMU structure * @kvm: The pointer to the KVM structure @@ -1012,7 +980,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t return 0; out_destroy_pgtable: - kvm_stage2_destroy(pgt); + KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt); out_free_pgtable: kfree(pgt); return err; @@ -1106,10 +1074,14 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) mmu->pgt = NULL; free_percpu(mmu->last_vcpu_ran); } + + if (kvm_is_nested_s2_mmu(kvm, mmu)) + kvm_init_nested_s2_mmu(mmu); + write_unlock(&kvm->mmu_lock); if (pgt) { - kvm_stage2_destroy(pgt); + KVM_PGT_FN(kvm_pgtable_stage2_destroy)(pgt); kfree(pgt); } } @@ -1541,11 +1513,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); - if (fault_is_perm && !write_fault && !exec_fault) { - kvm_err("Unexpected L2 read permission error\n"); - return -EFAULT; - } - if (!is_protected_kvm_enabled()) memcache = &vcpu->arch.mmu_page_cache; else diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 77db81bae86f..50d559248a1f 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -847,7 +847,7 @@ static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end) ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift, vt->wr.level)); - ipa_start = vt->wr.pa & (ipa_size - 1); + ipa_start = vt->wr.pa & ~(ipa_size - 1); ipa_end = ipa_start + ipa_size; if (ipa_end <= start || ipa_start >= end) @@ -887,7 +887,7 @@ static void invalidate_vncr_va(struct kvm *kvm, va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift, vt->wr.level)); - va_start = vt->gva & (va_size - 1); + va_start = vt->gva & ~(va_size - 1); va_end = va_start + va_size; switch (scope->type) { @@ -1276,7 +1276,7 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu) !(tcr & TCR_ASID16)) asid &= GENMASK(7, 0); - return asid != vt->wr.asid; + return asid == vt->wr.asid; } return true; diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 61827cf6fea4..fcd70bfe44fb 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -316,16 +316,9 @@ static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 e return 0; } -void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt, - u64 addr, u64 size) +void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) { - __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size); -} - -void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt) -{ - /* Expected to be called after all pKVM mappings have been released. */ - WARN_ON_ONCE(!RB_EMPTY_ROOT(&pgt->pkvm_mappings.rb_root)); + __pkvm_pgtable_stage2_unmap(pgt, 0, ~(0ULL)); } int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c index 2684f273d9e1..4c1209261b65 100644 --- a/arch/arm64/kvm/vgic/vgic-debug.c +++ b/arch/arm64/kvm/vgic/vgic-debug.c @@ -69,7 +69,7 @@ static int iter_mark_lpis(struct kvm *kvm) int nr_lpis = 0; xa_for_each(&dist->lpi_xa, intid, irq) { - if (!vgic_try_get_irq_kref(irq)) + if (!vgic_try_get_irq_ref(irq)) continue; xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER); diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 1e680ad6e863..4c3c0d82e476 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -53,7 +53,7 @@ void kvm_vgic_early_init(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; - xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ); + xa_init(&dist->lpi_xa); } /* CREATION */ @@ -208,7 +208,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) raw_spin_lock_init(&irq->irq_lock); irq->vcpu = NULL; irq->target_vcpu = vcpu0; - kref_init(&irq->refcount); + refcount_set(&irq->refcount, 0); switch (dist->vgic_model) { case KVM_DEV_TYPE_ARM_VGIC_V2: irq->targets = 0; @@ -277,7 +277,7 @@ static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type) irq->intid = i; irq->vcpu = NULL; irq->target_vcpu = vcpu; - kref_init(&irq->refcount); + refcount_set(&irq->refcount, 0); if (vgic_irq_is_sgi(i)) { /* SGIs */ irq->enabled = 1; diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 7368c13f16b7..ce3e3ed3f29f 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -78,7 +78,6 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, { struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq; - unsigned long flags; int ret; /* In this case there is no put, since we keep the reference. */ @@ -89,7 +88,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, if (!irq) return ERR_PTR(-ENOMEM); - ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT); + ret = xa_reserve(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT); if (ret) { kfree(irq); return ERR_PTR(ret); @@ -99,19 +98,19 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, raw_spin_lock_init(&irq->irq_lock); irq->config = VGIC_CONFIG_EDGE; - kref_init(&irq->refcount); + refcount_set(&irq->refcount, 1); irq->intid = intid; irq->target_vcpu = vcpu; irq->group = 1; - xa_lock_irqsave(&dist->lpi_xa, flags); + xa_lock(&dist->lpi_xa); /* * There could be a race with another vgic_add_lpi(), so we need to * check that we don't add a second list entry with the same LPI. */ oldirq = xa_load(&dist->lpi_xa, intid); - if (vgic_try_get_irq_kref(oldirq)) { + if (vgic_try_get_irq_ref(oldirq)) { /* Someone was faster with adding this LPI, lets use that. */ kfree(irq); irq = oldirq; @@ -126,7 +125,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, } out_unlock: - xa_unlock_irqrestore(&dist->lpi_xa, flags); + xa_unlock(&dist->lpi_xa); if (ret) return ERR_PTR(ret); @@ -547,7 +546,7 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db, rcu_read_lock(); irq = xa_load(&its->translation_cache, cache_key); - if (!vgic_try_get_irq_kref(irq)) + if (!vgic_try_get_irq_ref(irq)) irq = NULL; rcu_read_unlock(); @@ -571,7 +570,7 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its, * its_lock, as the ITE (and the reference it holds) cannot be freed. */ lockdep_assert_held(&its->its_lock); - vgic_get_irq_kref(irq); + vgic_get_irq_ref(irq); old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT); diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 4d9343d2b0b1..548aec9d5a72 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -518,7 +518,7 @@ static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq) if (!irq->hw || irq->host_irq != host_irq) continue; - if (!vgic_try_get_irq_kref(irq)) + if (!vgic_try_get_irq_ref(irq)) return NULL; return irq; diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index f5148b38120a..6dd5a10081e2 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -28,8 +28,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { * kvm->arch.config_lock (mutex) * its->cmd_lock (mutex) * its->its_lock (mutex) - * vgic_cpu->ap_list_lock must be taken with IRQs disabled - * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled + * vgic_dist->lpi_xa.xa_lock + * vgic_cpu->ap_list_lock must be taken with IRQs disabled * vgic_irq->irq_lock must be taken with IRQs disabled * * As the ap_list_lock might be taken from the timer interrupt handler, @@ -71,7 +71,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) rcu_read_lock(); irq = xa_load(&dist->lpi_xa, intid); - if (!vgic_try_get_irq_kref(irq)) + if (!vgic_try_get_irq_ref(irq)) irq = NULL; rcu_read_unlock(); @@ -114,37 +114,66 @@ struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid) return vgic_get_irq(vcpu->kvm, intid); } -/* - * We can't do anything in here, because we lack the kvm pointer to - * lock and remove the item from the lpi_list. So we keep this function - * empty and use the return value of kref_put() to trigger the freeing. - */ -static void vgic_irq_release(struct kref *ref) +static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq) +{ + lockdep_assert_held(&dist->lpi_xa.xa_lock); + __xa_erase(&dist->lpi_xa, irq->intid); + kfree_rcu(irq, rcu); +} + +static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) +{ + if (irq->intid < VGIC_MIN_LPI) + return false; + + return refcount_dec_and_test(&irq->refcount); +} + +static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq *irq) { + if (!__vgic_put_irq(kvm, irq)) + return false; + + irq->pending_release = true; + return true; } void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) { struct vgic_dist *dist = &kvm->arch.vgic; - unsigned long flags; - if (irq->intid < VGIC_MIN_LPI) - return; + if (irq->intid >= VGIC_MIN_LPI) + might_lock(&dist->lpi_xa.xa_lock); - if (!kref_put(&irq->refcount, vgic_irq_release)) + if (!__vgic_put_irq(kvm, irq)) return; - xa_lock_irqsave(&dist->lpi_xa, flags); - __xa_erase(&dist->lpi_xa, irq->intid); - xa_unlock_irqrestore(&dist->lpi_xa, flags); + xa_lock(&dist->lpi_xa); + vgic_release_lpi_locked(dist, irq); + xa_unlock(&dist->lpi_xa); +} - kfree_rcu(irq, rcu); +static void vgic_release_deleted_lpis(struct kvm *kvm) +{ + struct vgic_dist *dist = &kvm->arch.vgic; + unsigned long intid; + struct vgic_irq *irq; + + xa_lock(&dist->lpi_xa); + + xa_for_each(&dist->lpi_xa, intid, irq) { + if (irq->pending_release) + vgic_release_lpi_locked(dist, irq); + } + + xa_unlock(&dist->lpi_xa); } void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_irq *irq, *tmp; + bool deleted = false; unsigned long flags; raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); @@ -155,11 +184,14 @@ void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu) list_del(&irq->ap_list); irq->vcpu = NULL; raw_spin_unlock(&irq->irq_lock); - vgic_put_irq(vcpu->kvm, irq); + deleted |= vgic_put_irq_norelease(vcpu->kvm, irq); } } raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); + + if (deleted) + vgic_release_deleted_lpis(vcpu->kvm); } void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) @@ -399,7 +431,7 @@ retry: * now in the ap_list. This is safe as the caller must already hold a * reference on the irq. */ - vgic_get_irq_kref(irq); + vgic_get_irq_ref(irq); list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); irq->vcpu = vcpu; @@ -630,6 +662,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_irq *irq, *tmp; + bool deleted_lpis = false; DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); @@ -657,12 +690,12 @@ retry: /* * This vgic_put_irq call matches the - * vgic_get_irq_kref in vgic_queue_irq_unlock, + * vgic_get_irq_ref in vgic_queue_irq_unlock, * where we added the LPI to the ap_list. As * we remove the irq from the list, we drop * also drop the refcount. */ - vgic_put_irq(vcpu->kvm, irq); + deleted_lpis |= vgic_put_irq_norelease(vcpu->kvm, irq); continue; } @@ -725,6 +758,9 @@ retry: } raw_spin_unlock(&vgic_cpu->ap_list_lock); + + if (unlikely(deleted_lpis)) + vgic_release_deleted_lpis(vcpu->kvm); } static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) @@ -818,7 +854,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) * the AP list has been sorted already. */ if (multi_sgi && irq->priority > prio) { - _raw_spin_unlock(&irq->irq_lock); + raw_spin_unlock(&irq->irq_lock); break; } diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index de1c1d3261c3..ac5f9c5d2b98 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -267,7 +267,7 @@ void vgic_v2_put(struct kvm_vcpu *vcpu); void vgic_v2_save_state(struct kvm_vcpu *vcpu); void vgic_v2_restore_state(struct kvm_vcpu *vcpu); -static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq) +static inline bool vgic_try_get_irq_ref(struct vgic_irq *irq) { if (!irq) return false; @@ -275,12 +275,12 @@ static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq) if (irq->intid < VGIC_MIN_LPI) return true; - return kref_get_unless_zero(&irq->refcount); + return refcount_inc_not_zero(&irq->refcount); } -static inline void vgic_get_irq_kref(struct vgic_irq *irq) +static inline void vgic_get_irq_ref(struct vgic_irq *irq) { - WARN_ON_ONCE(!vgic_try_get_irq_kref(irq)); + WARN_ON_ONCE(!vgic_try_get_irq_ref(irq)); } void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index f0abc38c40ac..0631a6b11281 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -298,6 +298,10 @@ config AS_HAS_LVZ_EXTENSION config CC_HAS_ANNOTATE_TABLEJUMP def_bool $(cc-option,-mannotate-tablejump) +config RUSTC_HAS_ANNOTATE_TABLEJUMP + depends on RUST + def_bool $(rustc-option,-Cllvm-args=--loongarch-annotate-tablejump) + menu "Kernel type and options" source "kernel/Kconfig.hz" @@ -563,10 +567,14 @@ config ARCH_STRICT_ALIGN -mstrict-align build parameter to prevent unaligned accesses. CPUs with h/w unaligned access support: - Loongson-2K2000/2K3000/3A5000/3C5000/3D5000. + Loongson-2K2000/2K3000 and all of Loongson-3 series processors + based on LoongArch. CPUs without h/w unaligned access support: - Loongson-2K500/2K1000. + Loongson-2K0300/2K0500/2K1000. + + If you want to make sure whether to support unaligned memory access + on your hardware, please read the bit 20 (UAL) of CPUCFG1 register. This option is enabled by default to make the kernel be able to run on all LoongArch systems. But you can disable it manually if you want diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index a3a9759414f4..ae419e32f22e 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -102,16 +102,21 @@ KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma) ifdef CONFIG_OBJTOOL ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP +KBUILD_CFLAGS += -mannotate-tablejump +else +KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers +endif +ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP +KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump +else +KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers +endif +ifdef CONFIG_LTO_CLANG # The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled. # Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to # be passed via '-mllvm' to ld.lld. -KBUILD_CFLAGS += -mannotate-tablejump -ifdef CONFIG_LTO_CLANG KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump endif -else -KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers -endif endif KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h index 52f298f7293b..483c955f2ae5 100644 --- a/arch/loongarch/include/asm/acenv.h +++ b/arch/loongarch/include/asm/acenv.h @@ -10,9 +10,8 @@ #ifndef _ASM_LOONGARCH_ACENV_H #define _ASM_LOONGARCH_ACENV_H -/* - * This header is required by ACPI core, but we have nothing to fill in - * right now. Will be updated later when needed. - */ +#ifdef CONFIG_ARCH_STRICT_ALIGN +#define ACPI_MISALIGNMENT_NOT_SUPPORTED +#endif /* CONFIG_ARCH_STRICT_ALIGN */ #endif /* _ASM_LOONGARCH_ACENV_H */ diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h index 099bafc6f797..e36cc7e8ed20 100644 --- a/arch/loongarch/include/asm/kvm_mmu.h +++ b/arch/loongarch/include/asm/kvm_mmu.h @@ -16,6 +16,13 @@ */ #define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1) +/* + * _PAGE_MODIFIED is a SW pte bit, it records page ever written on host + * kernel, on secondary MMU it records the page writeable attribute, in + * order for fast path handling. + */ +#define KVM_PAGE_WRITEABLE _PAGE_MODIFIED + #define _KVM_FLUSH_PGTABLE 0x1 #define _KVM_HAS_PGMASK 0x2 #define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) @@ -52,10 +59,10 @@ static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val) WRITE_ONCE(*ptep, val); } -static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; } -static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; } static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } +static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; } +static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; } static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte) { @@ -69,12 +76,12 @@ static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte) static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) { - return pte | _PAGE_DIRTY; + return pte | __WRITEABLE; } static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) { - return pte & ~_PAGE_DIRTY; + return pte & ~__WRITEABLE; } static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) @@ -87,6 +94,11 @@ static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte) return pte & ~_PAGE_HUGE; } +static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte) +{ + return pte | KVM_PAGE_WRITEABLE; +} + static inline int kvm_need_flush(kvm_ptw_ctx *ctx) { return ctx->flag & _KVM_FLUSH_PGTABLE; diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index c0a5dc9aeae2..23bd5ae2212c 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -86,7 +86,7 @@ late_initcall(fdt_cpu_clk_init); static ssize_t boardinfo_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, + return sysfs_emit(buf, "BIOS Information\n" "Vendor\t\t\t: %s\n" "Version\t\t\t: %s\n" @@ -109,6 +109,8 @@ static int __init boardinfo_init(void) struct kobject *loongson_kobj; loongson_kobj = kobject_create_and_add("loongson", firmware_kobj); + if (!loongson_kobj) + return -ENOMEM; return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr); } diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c index 9a038d1070d7..387dc4d3c486 100644 --- a/arch/loongarch/kernel/stacktrace.c +++ b/arch/loongarch/kernel/stacktrace.c @@ -51,12 +51,13 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, if (task == current) { regs->regs[3] = (unsigned long)__builtin_frame_address(0); regs->csr_era = (unsigned long)__builtin_return_address(0); + regs->regs[22] = 0; } else { regs->regs[3] = thread_saved_fp(task); regs->csr_era = thread_saved_ra(task); + regs->regs[22] = task->thread.reg22; } regs->regs[1] = 0; - regs->regs[22] = 0; for (unwind_start(&state, task, regs); !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) { diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c index 7b888d9085a0..dee1a15d7f4c 100644 --- a/arch/loongarch/kernel/vdso.c +++ b/arch/loongarch/kernel/vdso.c @@ -54,6 +54,9 @@ static int __init init_vdso(void) vdso_info.code_mapping.pages = kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL); + if (!vdso_info.code_mapping.pages) + return -ENOMEM; + pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso)); for (i = 0; i < vdso_info.size / PAGE_SIZE; i++) vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i); diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 2ce41f93b2a4..6c9c7de7226b 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -778,10 +778,8 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu) return 0; default: return KVM_HCALL_INVALID_CODE; - }; - - return KVM_HCALL_INVALID_CODE; -}; + } +} /* * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c index 026b139dcff2..c32333695381 100644 --- a/arch/loongarch/kvm/intc/eiointc.c +++ b/arch/loongarch/kvm/intc/eiointc.c @@ -426,21 +426,26 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, struct loongarch_eiointc *s = dev->kvm->arch.eiointc; data = (void __user *)attr->addr; - spin_lock_irqsave(&s->lock, flags); switch (type) { case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: if (copy_from_user(&val, data, 4)) - ret = -EFAULT; - else { - if (val >= EIOINTC_ROUTE_MAX_VCPUS) - ret = -EINVAL; - else - s->num_cpu = val; - } + return -EFAULT; + break; + default: + break; + } + + spin_lock_irqsave(&s->lock, flags); + switch (type) { + case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: + if (val >= EIOINTC_ROUTE_MAX_VCPUS) + ret = -EINVAL; + else + s->num_cpu = val; break; case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: - if (copy_from_user(&s->features, data, 4)) - ret = -EFAULT; + s->features = val; if (!(s->features & BIT(EIOINTC_HAS_VIRT_EXTENSION))) s->status |= BIT(EIOINTC_ENABLE); break; @@ -462,19 +467,17 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev, static int kvm_eiointc_regs_access(struct kvm_device *dev, struct kvm_device_attr *attr, - bool is_write) + bool is_write, int *data) { int addr, cpu, offset, ret = 0; unsigned long flags; void *p = NULL; - void __user *data; struct loongarch_eiointc *s; s = dev->kvm->arch.eiointc; addr = attr->attr; cpu = addr >> 16; addr &= 0xffff; - data = (void __user *)attr->addr; switch (addr) { case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END: offset = (addr - EIOINTC_NODETYPE_START) / 4; @@ -513,13 +516,10 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, } spin_lock_irqsave(&s->lock, flags); - if (is_write) { - if (copy_from_user(p, data, 4)) - ret = -EFAULT; - } else { - if (copy_to_user(data, p, 4)) - ret = -EFAULT; - } + if (is_write) + memcpy(p, data, 4); + else + memcpy(data, p, 4); spin_unlock_irqrestore(&s->lock, flags); return ret; @@ -527,19 +527,17 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev, static int kvm_eiointc_sw_status_access(struct kvm_device *dev, struct kvm_device_attr *attr, - bool is_write) + bool is_write, int *data) { int addr, ret = 0; unsigned long flags; void *p = NULL; - void __user *data; struct loongarch_eiointc *s; s = dev->kvm->arch.eiointc; addr = attr->attr; addr &= 0xffff; - data = (void __user *)attr->addr; switch (addr) { case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: if (is_write) @@ -561,13 +559,10 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev, return -EINVAL; } spin_lock_irqsave(&s->lock, flags); - if (is_write) { - if (copy_from_user(p, data, 4)) - ret = -EFAULT; - } else { - if (copy_to_user(data, p, 4)) - ret = -EFAULT; - } + if (is_write) + memcpy(p, data, 4); + else + memcpy(data, p, 4); spin_unlock_irqrestore(&s->lock, flags); return ret; @@ -576,11 +571,27 @@ static int kvm_eiointc_sw_status_access(struct kvm_device *dev, static int kvm_eiointc_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { + int ret, data; + switch (attr->group) { case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: - return kvm_eiointc_regs_access(dev, attr, false); + ret = kvm_eiointc_regs_access(dev, attr, false, &data); + if (ret) + return ret; + + if (copy_to_user((void __user *)attr->addr, &data, 4)) + ret = -EFAULT; + + return ret; case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: - return kvm_eiointc_sw_status_access(dev, attr, false); + ret = kvm_eiointc_sw_status_access(dev, attr, false, &data); + if (ret) + return ret; + + if (copy_to_user((void __user *)attr->addr, &data, 4)) + ret = -EFAULT; + + return ret; default: return -EINVAL; } @@ -589,13 +600,21 @@ static int kvm_eiointc_get_attr(struct kvm_device *dev, static int kvm_eiointc_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { + int data; + switch (attr->group) { case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL: return kvm_eiointc_ctrl_access(dev, attr); case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS: - return kvm_eiointc_regs_access(dev, attr, true); + if (copy_from_user(&data, (void __user *)attr->addr, 4)) + return -EFAULT; + + return kvm_eiointc_regs_access(dev, attr, true, &data); case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS: - return kvm_eiointc_sw_status_access(dev, attr, true); + if (copy_from_user(&data, (void __user *)attr->addr, 4)) + return -EFAULT; + + return kvm_eiointc_sw_status_access(dev, attr, true, &data); default: return -EINVAL; } diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index 119290bcea79..baf3b4faf7ea 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -348,6 +348,7 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev, struct kvm_device_attr *attr, bool is_write) { + char buf[8]; int addr, offset, len = 8, ret = 0; void __user *data; void *p = NULL; @@ -397,17 +398,23 @@ static int kvm_pch_pic_regs_access(struct kvm_device *dev, return -EINVAL; } - spin_lock(&s->lock); - /* write or read value according to is_write */ if (is_write) { - if (copy_from_user(p, data, len)) - ret = -EFAULT; - } else { - if (copy_to_user(data, p, len)) - ret = -EFAULT; + if (copy_from_user(buf, data, len)) + return -EFAULT; } + + spin_lock(&s->lock); + if (is_write) + memcpy(p, buf, len); + else + memcpy(buf, p, len); spin_unlock(&s->lock); + if (!is_write) { + if (copy_to_user(data, buf, len)) + return -EFAULT; + } + return ret; } diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index ed956c5cf2cc..7c8143e79c12 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -569,7 +569,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ /* Track access to pages marked old */ new = kvm_pte_mkyoung(*ptep); if (write && !kvm_pte_dirty(new)) { - if (!kvm_pte_write(new)) { + if (!kvm_pte_writeable(new)) { ret = -EFAULT; goto out; } @@ -856,9 +856,9 @@ retry: prot_bits |= _CACHE_SUC; if (writeable) { - prot_bits |= _PAGE_WRITE; + prot_bits = kvm_pte_mkwriteable(prot_bits); if (write) - prot_bits |= __WRITEABLE; + prot_bits = kvm_pte_mkdirty(prot_bits); } /* Disable dirty logging on HugePages */ @@ -904,7 +904,7 @@ retry: kvm_release_faultin_page(kvm, page, false, writeable); spin_unlock(&kvm->mmu_lock); - if (prot_bits & _PAGE_DIRTY) + if (kvm_pte_dirty(prot_bits)) mark_page_dirty_in_slot(kvm, memslot, gfn); out: diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index e5f57cfe1d45..025c6dcbf893 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -16,11 +16,11 @@ #define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40 #define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44 -/* Load/Store return codes */ -#define ZPCI_PCI_LS_OK 0 -#define ZPCI_PCI_LS_ERR 1 -#define ZPCI_PCI_LS_BUSY 2 -#define ZPCI_PCI_LS_INVAL_HANDLE 3 +/* PCI instruction condition codes */ +#define ZPCI_CC_OK 0 +#define ZPCI_CC_ERR 1 +#define ZPCI_CC_BUSY 2 +#define ZPCI_CC_INVAL_HANDLE 3 /* Load/Store address space identifiers */ #define ZPCI_PCIAS_MEMIO_0 0 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 2a92a8b9e4c2..9384572ffa7b 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2778,12 +2778,19 @@ static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) static struct page *get_map_page(struct kvm *kvm, u64 uaddr) { + struct mm_struct *mm = kvm->mm; struct page *page = NULL; + int locked = 1; + + if (mmget_not_zero(mm)) { + mmap_read_lock(mm); + get_user_pages_remote(mm, uaddr, 1, FOLL_WRITE, + &page, &locked); + if (locked) + mmap_read_unlock(mm); + mmput(mm); + } - mmap_read_lock(kvm->mm); - get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, - &page, NULL); - mmap_read_unlock(kvm->mm); return page; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index bf6fa8b9ca73..6d51aa5f66be 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4864,12 +4864,12 @@ static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu) * @vcpu: the vCPU whose gmap is to be fixed up * @gfn: the guest frame number used for memslots (including fake memslots) * @gaddr: the gmap address, does not have to match @gfn for ucontrol gmaps - * @flags: FOLL_* flags + * @foll: FOLL_* flags * * Return: 0 on success, < 0 in case of error. * Context: The mm lock must not be held before calling. May sleep. */ -int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags) +int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int foll) { struct kvm_memory_slot *slot; unsigned int fault_flags; @@ -4883,13 +4883,13 @@ int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, u if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return vcpu_post_run_addressing_exception(vcpu); - fault_flags = flags & FOLL_WRITE ? FAULT_FLAG_WRITE : 0; + fault_flags = foll & FOLL_WRITE ? FAULT_FLAG_WRITE : 0; if (vcpu->arch.gmap->pfault_enabled) - flags |= FOLL_NOWAIT; + foll |= FOLL_NOWAIT; vmaddr = __gfn_to_hva_memslot(slot, gfn); try_again: - pfn = __kvm_faultin_pfn(slot, gfn, flags, &writable, &page); + pfn = __kvm_faultin_pfn(slot, gfn, foll, &writable, &page); /* Access outside memory, inject addressing exception */ if (is_noslot_pfn(pfn)) @@ -4905,7 +4905,7 @@ try_again: return 0; vcpu->stat.pfault_sync++; /* Could not setup async pfault, try again synchronously */ - flags &= ~FOLL_NOWAIT; + foll &= ~FOLL_NOWAIT; goto try_again; } /* Any other error */ @@ -4925,7 +4925,7 @@ try_again: return rc; } -static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int flags) +static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int foll) { unsigned long gaddr_tmp; gfn_t gfn; @@ -4950,18 +4950,18 @@ static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, un } gfn = gpa_to_gfn(gaddr_tmp); } - return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, flags); + return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, foll); } static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu) { - unsigned int flags = 0; + unsigned int foll = 0; unsigned long gaddr; int rc; gaddr = current->thread.gmap_teid.addr * PAGE_SIZE; if (kvm_s390_cur_gmap_fault_is_write()) - flags = FAULT_FLAG_WRITE; + foll = FOLL_WRITE; switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) { case 0: @@ -5003,7 +5003,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu) send_sig(SIGSEGV, current, 0); if (rc != -ENXIO) break; - flags = FAULT_FLAG_WRITE; + foll = FOLL_WRITE; fallthrough; case PGM_PROTECTION: case PGM_SEGMENT_TRANSLATION: @@ -5013,7 +5013,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu) case PGM_REGION_SECOND_TRANS: case PGM_REGION_THIRD_TRANS: kvm_s390_assert_primary_as(vcpu); - return vcpu_dat_fault_handler(vcpu, gaddr, flags); + return vcpu_dat_fault_handler(vcpu, gaddr, foll); default: KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx", current->thread.gmap_int_code, current->thread.gmap_teid.val); diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 25ede8354514..6ba5a0305e25 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -624,6 +624,17 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) int cc, ret; u16 dummy; + /* Add the notifier only once. No races because we hold kvm->lock */ + if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) { + /* The notifier will be unregistered when the VM is destroyed */ + kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops; + ret = mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm); + if (ret) { + kvm->arch.pv.mmu_notifier.ops = NULL; + return ret; + } + } + ret = kvm_s390_pv_alloc_vm(kvm); if (ret) return ret; @@ -659,11 +670,6 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) return -EIO; } kvm->arch.gmap->guest_handle = uvcb.guest_handle; - /* Add the notifier only once. No races because we hold kvm->lock */ - if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) { - kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops; - mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm); - } return 0; } diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index ad8d78fb1d9a..de7867ae220d 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -1250,10 +1250,12 @@ static int virtio_uml_probe(struct platform_device *pdev) device_set_wakeup_capable(&vu_dev->vdev.dev, true); rc = register_virtio_device(&vu_dev->vdev); - if (rc) + if (rc) { put_device(&vu_dev->vdev.dev); + return rc; + } vu_dev->registered = 1; - return rc; + return 0; error_init: os_close_file(vu_dev->sock); diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c index 617886d1fb1e..21f0e50fb1df 100644 --- a/arch/um/os-Linux/file.c +++ b/arch/um/os-Linux/file.c @@ -535,7 +535,7 @@ ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds, cmsg->cmsg_type != SCM_RIGHTS) return n; - memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len); + memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0)); return n; } diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c index 4193e04d7e4a..e3ad71a0d13c 100644 --- a/arch/um/os-Linux/util.c +++ b/arch/um/os-Linux/util.c @@ -20,8 +20,7 @@ void stack_protections(unsigned long address) { - if (mprotect((void *) address, UM_THREAD_SIZE, - PROT_READ | PROT_WRITE | PROT_EXEC) < 0) + if (mprotect((void *) address, UM_THREAD_SIZE, PROT_READ | PROT_WRITE) < 0) panic("protecting stack failed, errno = %d", errno); } diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 02236962fdb1..465b19fd1a2d 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -562,6 +562,24 @@ enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, extern struct ghcb *boot_ghcb; +static inline void sev_evict_cache(void *va, int npages) +{ + volatile u8 val __always_unused; + u8 *bytes = va; + int page_idx; + + /* + * For SEV guests, a read from the first/last cache-lines of a 4K page + * using the guest key is sufficient to cause a flush of all cache-lines + * associated with that 4K page without incurring all the overhead of a + * full CLFLUSH sequence. + */ + for (page_idx = 0; page_idx < npages; page_idx++) { + val = bytes[page_idx * PAGE_SIZE]; + val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1]; + } +} + #else /* !CONFIG_AMD_MEM_ENCRYPT */ #define snp_vmpl 0 @@ -605,6 +623,7 @@ static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; } static inline void __init snp_secure_tsc_prepare(void) { } static inline void __init snp_secure_tsc_init(void) { } +static inline void sev_evict_cache(void *va, int npages) {} #endif /* CONFIG_AMD_MEM_ENCRYPT */ @@ -619,24 +638,6 @@ int rmp_make_shared(u64 pfn, enum pg_level level); void snp_leak_pages(u64 pfn, unsigned int npages); void kdump_sev_callback(void); void snp_fixup_e820_tables(void); - -static inline void sev_evict_cache(void *va, int npages) -{ - volatile u8 val __always_unused; - u8 *bytes = va; - int page_idx; - - /* - * For SEV guests, a read from the first/last cache-lines of a 4K page - * using the guest key is sufficient to cause a flush of all cache-lines - * associated with that 4K page without incurring all the overhead of a - * full CLFLUSH sequence. - */ - for (page_idx = 0; page_idx < npages; page_idx++) { - val = bytes[page_idx * PAGE_SIZE]; - val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1]; - } -} #else static inline bool snp_probe_rmptable_info(void) { return false; } static inline int snp_rmptable_init(void) { return -ENOSYS; } @@ -652,7 +653,6 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} static inline void kdump_sev_callback(void) { } static inline void snp_fixup_e820_tables(void) {} -static inline void sev_evict_cache(void *va, int npages) {} #endif #endif diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d9931c6c4bc6..1bfebe40854f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4046,8 +4046,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); u64 cr8; - if (nested_svm_virtualize_tpr(vcpu) || - kvm_vcpu_apicv_active(vcpu)) + if (nested_svm_virtualize_tpr(vcpu)) return; cr8 = kvm_get_cr8(vcpu); diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 0da7c1ac778a..ca6fdcc6c54a 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -970,6 +970,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); + if (ctx->write) { + release_sock(sk); + return -EBUSY; + } + ctx->write = true; + if (ctx->init && !ctx->more) { if (ctx->used) { err = -EINVAL; @@ -1019,6 +1025,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, continue; } + ctx->merge = 0; + if (!af_alg_writable(sk)) { err = af_alg_wait_for_wmem(sk, msg->msg_flags); if (err) @@ -1058,7 +1066,6 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, ctx->used += plen; copied += plen; size -= plen; - ctx->merge = 0; } else { do { struct page *pg; @@ -1104,6 +1111,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unlock: af_alg_data_wakeup(sk); + ctx->write = false; release_sock(sk); return copied ?: err; diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c index 87c425e3d2b9..6e39c769bb6d 100644 --- a/drivers/accel/amdxdna/aie2_pci.c +++ b/drivers/accel/amdxdna/aie2_pci.c @@ -898,6 +898,12 @@ static int aie2_query_ctx_status_array(struct amdxdna_client *client, drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); + if (args->element_size > SZ_4K || args->num_element > SZ_1K) { + XDNA_DBG(xdna, "Invalid element size %d or number of element %d", + args->element_size, args->num_element); + return -EINVAL; + } + array_args.element_size = min(args->element_size, sizeof(struct amdxdna_drm_hwctx_entry)); array_args.buffer = args->buffer; diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c index d407a36eb412..7f91863c3f24 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.c +++ b/drivers/accel/amdxdna/amdxdna_gem.c @@ -392,35 +392,33 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = { .vunmap = drm_gem_dmabuf_vunmap, }; -static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map) +static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr) { - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); - - iosys_map_clear(map); - - dma_resv_assert_held(obj->resv); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); + int ret; if (is_import_bo(abo)) - dma_buf_vmap(abo->dma_buf, map); + ret = dma_buf_vmap_unlocked(abo->dma_buf, &map); else - drm_gem_shmem_object_vmap(obj, map); - - if (!map->vaddr) - return -ENOMEM; + ret = drm_gem_vmap(to_gobj(abo), &map); - return 0; + *vaddr = map.vaddr; + return ret; } -static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo) { - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); + struct iosys_map map; + + if (!abo->mem.kva) + return; - dma_resv_assert_held(obj->resv); + iosys_map_set_vaddr(&map, abo->mem.kva); if (is_import_bo(abo)) - dma_buf_vunmap(abo->dma_buf, map); + dma_buf_vunmap_unlocked(abo->dma_buf, &map); else - drm_gem_shmem_object_vunmap(obj, map); + drm_gem_vunmap(to_gobj(abo), &map); } static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) @@ -455,7 +453,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) { struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); - struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); @@ -468,7 +465,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) if (abo->type == AMDXDNA_BO_DEV_HEAP) drm_mm_takedown(&abo->mm); - drm_gem_vunmap(gobj, &map); + amdxdna_gem_obj_vunmap(abo); mutex_destroy(&abo->lock); if (is_import_bo(abo)) { @@ -489,8 +486,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { .pin = drm_gem_shmem_object_pin, .unpin = drm_gem_shmem_object_unpin, .get_sg_table = drm_gem_shmem_object_get_sg_table, - .vmap = amdxdna_gem_obj_vmap, - .vunmap = amdxdna_gem_obj_vunmap, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, .mmap = amdxdna_gem_obj_mmap, .vm_ops = &drm_gem_shmem_vm_ops, .export = amdxdna_gem_prime_export, @@ -663,7 +660,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, struct drm_file *filp) { struct amdxdna_client *client = filp->driver_priv; - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); struct amdxdna_gem_obj *abo; int ret; @@ -692,12 +688,11 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size); - ret = drm_gem_vmap(to_gobj(abo), &map); + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); if (ret) { XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret); goto release_obj; } - abo->mem.kva = map.vaddr; client->dev_heap = abo; drm_gem_object_get(to_gobj(abo)); @@ -748,7 +743,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, struct amdxdna_drm_create_bo *args, struct drm_file *filp) { - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); struct amdxdna_gem_obj *abo; int ret; @@ -770,12 +764,11 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, abo->type = AMDXDNA_BO_CMD; abo->client = filp->driver_priv; - ret = drm_gem_vmap(to_gobj(abo), &map); + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); if (ret) { XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); goto release_obj; } - abo->mem.kva = map.vaddr; return abo; diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index cd24ccd20ba6..3bd85ee6c26b 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -398,35 +398,25 @@ static int dct_active_set(void *data, u64 active_percent) DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n"); +static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw, + int band, const char *name) +{ + seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n", + name, + hw->hws.grace_period[band], + hw->hws.process_grace_period[band], + hw->hws.process_quantum[band]); +} + static int priority_bands_show(struct seq_file *s, void *v) { struct ivpu_device *vdev = s->private; struct ivpu_hw_info *hw = vdev->hw; - for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE; - band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) { - switch (band) { - case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE: - seq_puts(s, "Idle: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL: - seq_puts(s, "Normal: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS: - seq_puts(s, "Focus: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME: - seq_puts(s, "Realtime: "); - break; - } - - seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n", - hw->hws.grace_period[band], hw->hws.process_grace_period[band], - hw->hws.process_quantum[band]); - } + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime"); return 0; } diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 3289751b4757..a08f99c3ba4a 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -200,6 +200,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f case DRM_IVPU_PARAM_CAPABILITIES: args->value = ivpu_is_capable(vdev, args->index); break; + case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE: + args->value = ivpu_fw_preempt_buf_size(vdev); + break; default: ret = -EINVAL; break; diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 9db741695401..32f513499829 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -26,6 +26,8 @@ #define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START) #define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE) #define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE) +#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K +#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M #define WATCHDOG_MSS_REDIRECT 32 #define WATCHDOG_NCE_REDIRECT 33 @@ -151,6 +153,47 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he return VPU_SCHEDULING_MODE_HW; } +static void +ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr) +{ + struct ivpu_fw_info *fw = vdev->fw; + u32 primary_preempt_buf_size, secondary_preempt_buf_size; + + if (fw_hdr->preemption_buffer_1_max_size) + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; + else + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; + + if (fw_hdr->preemption_buffer_2_max_size) + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; + else + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; + + ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n", + primary_preempt_buf_size, secondary_preempt_buf_size); + + if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE || + secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) { + ivpu_warn(vdev, "Preemption buffers size too small\n"); + return; + } + + if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE || + secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) { + ivpu_warn(vdev, "Preemption buffers size too big\n"); + return; + } + + if (fw->sched_mode != VPU_SCHEDULING_MODE_HW) + return; + + if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) + return; + + vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE); + vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE); +} + static int ivpu_fw_parse(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; @@ -235,17 +278,9 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr); ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS"); - if (fw_hdr->preemption_buffer_1_max_size) - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; - else - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; - - if (fw_hdr->preemption_buffer_2_max_size) - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; - else - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; - ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n", - fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size); + ivpu_preemption_config_parse(vdev, fw_hdr); + ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n", + ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not"); if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size, @@ -483,11 +518,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n", boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg); - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n", - boot_params->global_memory_allocator_base); - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n", - boot_params->global_memory_allocator_size); - ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n", boot_params->shave_nn_fw_base); @@ -495,10 +525,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->watchdog_irq_mss); ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n", boot_params->watchdog_irq_nce); - ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n", - boot_params->host_to_vpu_irq); - ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n", - boot_params->job_done_irq); ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n", boot_params->host_version_id); diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 7081913fb0dd..6fe2917abda6 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_FW_H__ @@ -52,4 +52,9 @@ static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev) return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point; } +static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev) +{ + return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size; +} + #endif /* __IVPU_FW_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index aa8ff14f7aae..3ee996d503b2 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_GEM_H__ #define __IVPU_GEM_H__ @@ -96,4 +96,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr) return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo)); } +static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo) +{ + return bo->flags & DRM_IVPU_BO_MAPPABLE; +} + #endif /* __IVPU_GEM_H__ */ diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 060f1fc031d3..044268d0fc87 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -34,22 +34,20 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) { - u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE); - u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE); - - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW || - ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) + if (ivpu_fw_preempt_buf_size(vdev) == 0) return 0; cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user, - primary_size, DRM_IVPU_BO_WC); + vdev->fw->primary_preempt_buf_size, + DRM_IVPU_BO_WC); if (!cmdq->primary_preempt_buf) { ivpu_err(vdev, "Failed to create primary preemption buffer\n"); return -ENOMEM; } cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma, - secondary_size, DRM_IVPU_BO_WC); + vdev->fw->secondary_preempt_buf_size, + DRM_IVPU_BO_WC); if (!cmdq->secondary_preempt_buf) { ivpu_err(vdev, "Failed to create secondary preemption buffer\n"); goto err_free_primary; @@ -66,20 +64,39 @@ err_free_primary: static void ivpu_preemption_buffers_free(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) { - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) - return; - if (cmdq->primary_preempt_buf) ivpu_bo_free(cmdq->primary_preempt_buf); if (cmdq->secondary_preempt_buf) ivpu_bo_free(cmdq->secondary_preempt_buf); } +static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, + struct ivpu_cmdq *cmdq, struct ivpu_job *job) +{ + int ret; + + /* Use preemption buffer provided by the user space */ + if (job->primary_preempt_buf) + return 0; + + if (!cmdq->primary_preempt_buf) { + /* Allocate per command queue preemption buffers */ + ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); + if (ret) + return ret; + } + + /* Use preemption buffers allocated by the kernel */ + job->primary_preempt_buf = cmdq->primary_preempt_buf; + job->secondary_preempt_buf = cmdq->secondary_preempt_buf; + + return 0; +} + static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) { struct ivpu_device *vdev = file_priv->vdev; struct ivpu_cmdq *cmdq; - int ret; cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL); if (!cmdq) @@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) if (!cmdq->mem) goto err_free_cmdq; - ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); - if (ret) - ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n"); - return cmdq; err_free_cmdq: @@ -219,11 +232,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq * ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); - if (!ret) + if (!ret) { ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n", cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority); - else + } else { xa_erase(&vdev->db_xa, cmdq->db_id); + cmdq->db_id = 0; + } return ret; } @@ -427,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION)) entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK; - if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { - if (cmdq->primary_preempt_buf) { - entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr; - entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf); - } + if (job->primary_preempt_buf) { + entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr; + entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf); + } - if (cmdq->secondary_preempt_buf) { - entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr; - entry->secondary_preempt_buf_size = - ivpu_bo_size(cmdq->secondary_preempt_buf); - } + if (job->secondary_preempt_buf) { + entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr; + entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf); } wmb(); /* Ensure that tail is updated after filling entry */ @@ -661,6 +673,13 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) goto err_unlock; } + ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job); + if (ret) { + ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n", + job->job_id, ret); + goto err_unlock; + } + job->cmdq_id = cmdq->id; is_first_job = xa_empty(&vdev->submitted_jobs_xa); @@ -714,7 +733,7 @@ err_unlock: static int ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles, - u32 buf_count, u32 commands_offset) + u32 buf_count, u32 commands_offset, u32 preempt_buffer_index) { struct ivpu_file_priv *file_priv = job->file_priv; struct ivpu_device *vdev = file_priv->vdev; @@ -750,6 +769,20 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; + if (preempt_buffer_index) { + struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index]; + + if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) { + ivpu_warn(vdev, "Preemption buffer is too small\n"); + return -EINVAL; + } + if (ivpu_bo_is_mappable(preempt_bo)) { + ivpu_warn(vdev, "Preemption buffer cannot be mappable\n"); + return -EINVAL; + } + job->primary_preempt_buf = preempt_bo; + } + ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx); if (ret) { @@ -780,7 +813,7 @@ unlock_reservations: static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id, u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset, - u8 priority) + u32 preempt_buffer_index, u8 priority) { struct ivpu_device *vdev = file_priv->vdev; struct ivpu_job *job; @@ -812,7 +845,8 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, goto err_exit_dev; } - ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset); + ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset, + preempt_buffer_index); if (ret) { ivpu_err(vdev, "Failed to prepare job: %d\n", ret); goto err_destroy_job; @@ -866,7 +900,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) priority = ivpu_job_to_jsm_priority(args->priority); return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine, - (void __user *)args->buffers_ptr, args->commands_offset, priority); + (void __user *)args->buffers_ptr, args->commands_offset, 0, priority); } int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -883,6 +917,9 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file * if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) return -EINVAL; + if (args->preempt_buffer_index >= args->buffer_count) + return -EINVAL; + if (!IS_ALIGNED(args->commands_offset, 8)) return -EINVAL; @@ -893,7 +930,8 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file * return -EBADFD; return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE, - (void __user *)args->buffers_ptr, args->commands_offset, 0); + (void __user *)args->buffers_ptr, args->commands_offset, + args->preempt_buffer_index, 0); } int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -1012,7 +1050,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work) if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) if (ivpu_jsm_reset_engine(vdev, 0)) - return; + goto runtime_put; mutex_lock(&vdev->context_list_lock); xa_for_each(&vdev->context_xa, ctx_id, file_priv) { @@ -1036,7 +1074,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work) goto runtime_put; if (ivpu_jsm_hws_resume_engine(vdev, 0)) - return; + goto runtime_put; /* * In hardware scheduling mode NPU already has stopped processing jobs * and won't send us any further notifications, thus we have to free job related resources diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h index 2e301c2eea7b..6c8b9c739b51 100644 --- a/drivers/accel/ivpu/ivpu_job.h +++ b/drivers/accel/ivpu/ivpu_job.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_JOB_H__ @@ -55,6 +55,8 @@ struct ivpu_job { u32 job_id; u32 engine_idx; size_t bo_count; + struct ivpu_bo *primary_preempt_buf; + struct ivpu_bo *secondary_preempt_buf; struct ivpu_bo *bos[] __counted_by(bo_count); }; diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h index 4b6b2b3d2583..de1b37ea1251 100644 --- a/drivers/accel/ivpu/vpu_jsm_api.h +++ b/drivers/accel/ivpu/vpu_jsm_api.h @@ -1,15 +1,16 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright (c) 2020-2024, Intel Corporation. + * Copyright (c) 2020-2025, Intel Corporation. + */ + +/** + * @addtogroup Jsm + * @{ */ /** * @file * @brief JSM shared definitions - * - * @ingroup Jsm - * @brief JSM shared definitions - * @{ */ #ifndef VPU_JSM_API_H #define VPU_JSM_API_H @@ -22,12 +23,12 @@ /* * Minor version changes when API backward compatibility is preserved. */ -#define VPU_JSM_API_VER_MINOR 29 +#define VPU_JSM_API_VER_MINOR 32 /* * API header changed (field names, documentation, formatting) but API itself has not been changed */ -#define VPU_JSM_API_VER_PATCH 0 +#define VPU_JSM_API_VER_PATCH 5 /* * Index in the API version table @@ -71,9 +72,12 @@ #define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU #define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU #define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU -/* Job status returned when the job was preempted mid-inference */ +/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */ #define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU +/* Job status returned when the job was preempted mid-command */ +#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU #define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU +#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU /* * Host <-> VPU IPC channels. @@ -134,11 +138,21 @@ enum { * 2. Native fence queues are only supported on VPU 40xx onwards. */ VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U), - /* * Enable turbo mode for testing NPU performance; not recommended for regular usage. */ - VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U) + VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U), + /* + * Queue error detection mode flag + * For 'interactive' queues (this bit not set), the FW will identify queues that have not + * completed a job inside the TDR timeout as in error as part of engine reset sequence. + * For 'non-interactive' queues (this bit set), the FW will identify queues that have not + * progressed the heartbeat inside the non-interactive no-progress timeout as in error as + * part of engine reset sequence. Additionally, there is an upper limit applied to these + * queues: even if they progress the heartbeat, if they run longer than non-interactive + * timeout, then the FW will also identify them as in error. + */ + VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U) }; /* @@ -209,7 +223,7 @@ enum { */ #define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2 -/* +/** * Job scheduling priority bands for both hardware scheduling and OS scheduling. */ enum vpu_job_scheduling_priority_band { @@ -220,16 +234,16 @@ enum vpu_job_scheduling_priority_band { VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4, }; -/* +/** * Job format. * Jobs defines the actual workloads to be executed by a given engine. */ struct vpu_job_queue_entry { - /**< Address of VPU commands batch buffer */ + /** Address of VPU commands batch buffer */ u64 batch_buf_addr; - /**< Job ID */ + /** Job ID */ u32 job_id; - /**< Flags bit field, see VPU_JOB_FLAGS_* above */ + /** Flags bit field, see VPU_JOB_FLAGS_* above */ u32 flags; /** * Doorbell ring timestamp taken by KMD from SoC's global system clock, in @@ -237,20 +251,20 @@ struct vpu_job_queue_entry { * to match other profiling timestamps. */ u64 doorbell_timestamp; - /**< Extra id for job tracking, used only in the firmware perf traces */ + /** Extra id for job tracking, used only in the firmware perf traces */ u64 host_tracking_id; - /**< Address of the primary preemption buffer to use for this job */ + /** Address of the primary preemption buffer to use for this job */ u64 primary_preempt_buf_addr; - /**< Size of the primary preemption buffer to use for this job */ + /** Size of the primary preemption buffer to use for this job */ u32 primary_preempt_buf_size; - /**< Size of secondary preemption buffer to use for this job */ + /** Size of secondary preemption buffer to use for this job */ u32 secondary_preempt_buf_size; - /**< Address of secondary preemption buffer to use for this job */ + /** Address of secondary preemption buffer to use for this job */ u64 secondary_preempt_buf_addr; u64 reserved_0; }; -/* +/** * Inline command format. * Inline commands are the commands executed at scheduler level (typically, * synchronization directives). Inline command and job objects must be of @@ -258,34 +272,36 @@ struct vpu_job_queue_entry { */ struct vpu_inline_cmd { u64 reserved_0; - /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ + /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ u32 type; - /* Flags bit field, see VPU_JOB_FLAGS_* above. */ + /** Flags bit field, see VPU_JOB_FLAGS_* above. */ u32 flags; - /* Inline command payload. Depends on inline command type. */ - union { - /* Fence (wait and signal) commands' payload. */ - struct { - /* Fence object handle. */ + /** Inline command payload. Depends on inline command type. */ + union payload { + /** Fence (wait and signal) commands' payload. */ + struct fence { + /** Fence object handle. */ u64 fence_handle; - /* User VA of the current fence value. */ + /** User VA of the current fence value. */ u64 current_value_va; - /* User VA of the monitored fence value (read-only). */ + /** User VA of the monitored fence value (read-only). */ u64 monitored_value_va; - /* Value to wait for or write in fence location. */ + /** Value to wait for or write in fence location. */ u64 value; - /* User VA of the log buffer in which to add log entry on completion. */ + /** User VA of the log buffer in which to add log entry on completion. */ u64 log_buffer_va; - /* NPU private data. */ + /** NPU private data. */ u64 npu_private_data; } fence; - /* Other commands do not have a payload. */ - /* Payload definition for future inline commands can be inserted here. */ + /** + * Other commands do not have a payload: + * Payload definition for future inline commands can be inserted here. + */ u64 reserved_1[6]; } payload; }; -/* +/** * Job queue slots can be populated either with job objects or inline command objects. */ union vpu_jobq_slot { @@ -293,7 +309,7 @@ union vpu_jobq_slot { struct vpu_inline_cmd inline_cmd; }; -/* +/** * Job queue control registers. */ struct vpu_job_queue_header { @@ -301,18 +317,18 @@ struct vpu_job_queue_header { u32 head; u32 tail; u32 flags; - /* Set to 1 to indicate priority_band field is valid */ + /** Set to 1 to indicate priority_band field is valid */ u32 priority_band_valid; - /* + /** * Priority for the work of this job queue, valid only if the HWS is NOT used - * and the `priority_band_valid` is set to 1. It is applied only during - * the VPU_JSM_MSG_REGISTER_DB message processing. - * The device firmware might use the `priority_band` to optimize the power + * and the @ref priority_band_valid is set to 1. It is applied only during + * the @ref VPU_JSM_MSG_REGISTER_DB message processing. + * The device firmware might use the priority_band to optimize the power * management logic, but it will not affect the order of jobs. * Available priority bands: @see enum vpu_job_scheduling_priority_band */ u32 priority_band; - /* Inside realtime band assigns a further priority, limited to 0..31 range */ + /** Inside realtime band assigns a further priority, limited to 0..31 range */ u32 realtime_priority_level; u32 reserved_0[9]; }; @@ -337,16 +353,16 @@ enum vpu_trace_entity_type { VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2, }; -/* +/** * HWS specific log buffer header details. * Total size is 32 bytes. */ struct vpu_hws_log_buffer_header { - /* Written by VPU after adding a log entry. Initialised by host to 0. */ + /** Written by VPU after adding a log entry. Initialised by host to 0. */ u32 first_free_entry_index; - /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ + /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ u32 wraparound_count; - /* + /** * This is the number of buffers that can be stored in the log buffer provided by the host. * It is written by host before passing buffer to VPU. VPU should consider it read-only. */ @@ -354,14 +370,14 @@ struct vpu_hws_log_buffer_header { u64 reserved[2]; }; -/* +/** * HWS specific log buffer entry details. * Total size is 32 bytes. */ struct vpu_hws_log_buffer_entry { - /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ + /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ u64 vpu_timestamp; - /* + /** * Operation type: * 0 - context state change * 1 - queue new work @@ -371,7 +387,7 @@ struct vpu_hws_log_buffer_entry { */ u32 operation_type; u32 reserved; - /* Operation data depends on operation type */ + /** Operation data depends on operation type */ u64 operation_data[2]; }; @@ -381,51 +397,54 @@ enum vpu_hws_native_fence_log_type { VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2 }; -/* HWS native fence log buffer header. */ +/** HWS native fence log buffer header. */ struct vpu_hws_native_fence_log_header { union { struct { - /* Index of the first free entry in buffer. */ + /** Index of the first free entry in buffer. */ u32 first_free_entry_idx; - /* Incremented each time NPU wraps around the buffer to write next entry. */ + /** + * Incremented each time NPU wraps around + * the buffer to write next entry. + */ u32 wraparound_count; }; - /* Field allowing atomic update of both fields above. */ + /** Field allowing atomic update of both fields above. */ u64 atomic_wraparound_and_entry_idx; }; - /* Log buffer type, see enum vpu_hws_native_fence_log_type. */ + /** Log buffer type, see enum vpu_hws_native_fence_log_type. */ u64 type; - /* Allocated number of entries in the log buffer. */ + /** Allocated number of entries in the log buffer. */ u64 entry_nb; u64 reserved[2]; }; -/* Native fence log operation types. */ +/** Native fence log operation types. */ enum vpu_hws_native_fence_log_op { VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0, VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1 }; -/* HWS native fence log entry. */ +/** HWS native fence log entry. */ struct vpu_hws_native_fence_log_entry { - /* Newly signaled/unblocked fence value. */ + /** Newly signaled/unblocked fence value. */ u64 fence_value; - /* Native fence object handle to which this operation belongs. */ + /** Native fence object handle to which this operation belongs. */ u64 fence_handle; - /* Operation type, see enum vpu_hws_native_fence_log_op. */ + /** Operation type, see enum vpu_hws_native_fence_log_op. */ u64 op_type; u64 reserved_0; - /* + /** * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence * wait was started (in NPU SysTime). */ u64 fence_wait_start_ts; u64 reserved_1; - /* Timestamp at which fence operation was completed (in NPU SysTime). */ + /** Timestamp at which fence operation was completed (in NPU SysTime). */ u64 fence_end_ts; }; -/* Native fence log buffer. */ +/** Native fence log buffer. */ struct vpu_hws_native_fence_log_buffer { struct vpu_hws_native_fence_log_header header; struct vpu_hws_native_fence_log_entry entry[]; @@ -450,8 +469,21 @@ enum vpu_ipc_msg_type { * after preemption or when resubmitting jobs to the queue. */ VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101, + /** + * OS scheduling doorbell register command + * @see vpu_ipc_msg_payload_register_db + */ VPU_JSM_MSG_REGISTER_DB = 0x1102, + /** + * OS scheduling doorbell unregister command + * @see vpu_ipc_msg_payload_unregister_db + */ VPU_JSM_MSG_UNREGISTER_DB = 0x1103, + /** + * Query engine heartbeat. Heartbeat is expected to increase monotonically + * and increase while work is being progressed by NPU. + * @see vpu_ipc_msg_payload_query_engine_hb + */ VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104, VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105, VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106, @@ -477,6 +509,7 @@ enum vpu_ipc_msg_type { * aborted and removed from internal scheduling queues. All doorbells assigned * to the host_ssid are unregistered and any internal FW resources belonging to * the host_ssid are released. + * @see vpu_ipc_msg_payload_ssid_release */ VPU_JSM_MSG_SSID_RELEASE = 0x110e, /** @@ -504,26 +537,51 @@ enum vpu_ipc_msg_type { * @see vpu_jsm_metric_streamer_start */ VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112, - /** Control command: Priority band setup */ + /** + * Control command: Priority band setup + * @see vpu_ipc_msg_payload_hws_priority_band_setup + */ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113, - /** Control command: Create command queue */ + /** + * Control command: Create command queue + * @see vpu_ipc_msg_payload_hws_create_cmdq + */ VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114, - /** Control command: Destroy command queue */ + /** + * Control command: Destroy command queue + * @see vpu_ipc_msg_payload_hws_destroy_cmdq + */ VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115, - /** Control command: Set context scheduling properties */ + /** + * Control command: Set context scheduling properties + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties + */ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116, - /* + /** * Register a doorbell to notify VPU of new work. The doorbell may later be * deallocated or reassigned to another context. + * @see vpu_jsm_hws_register_db */ VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117, - /** Control command: Log buffer setting */ + /** + * Control command: Log buffer setting + * @see vpu_ipc_msg_payload_hws_set_scheduling_log + */ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118, - /* Control command: Suspend command queue. */ + /** + * Control command: Suspend command queue. + * @see vpu_ipc_msg_payload_hws_suspend_cmdq + */ VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119, - /* Control command: Resume command queue */ + /** + * Control command: Resume command queue + * @see vpu_ipc_msg_payload_hws_resume_cmdq + */ VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a, - /* Control command: Resume engine after reset */ + /** + * Control command: Resume engine after reset + * @see vpu_ipc_msg_payload_hws_resume_engine + */ VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b, /* Control command: Enable survivability/DCT mode */ VPU_JSM_MSG_DCT_ENABLE = 0x111c, @@ -540,7 +598,8 @@ enum vpu_ipc_msg_type { VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD, /** * Control dyndbg behavior by executing a dyndbg command; equivalent to - * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`. + * Linux command: + * @verbatim echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control @endverbatim */ VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201, /** @@ -550,15 +609,26 @@ enum vpu_ipc_msg_type { /* IPC Device -> Host, Job completion */ VPU_JSM_MSG_JOB_DONE = 0x2100, - /* IPC Device -> Host, Fence signalled */ + /** + * IPC Device -> Host, Fence signalled + * @see vpu_ipc_msg_payload_native_fence_signalled + */ VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101, /* IPC Device -> Host, Async command completion */ VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200, + /** + * IPC Device -> Host, engine reset complete + * @see vpu_ipc_msg_payload_engine_reset_done + */ VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE, VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201, VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202, VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203, + /** + * Response to query engine heartbeat. + * @see vpu_ipc_msg_payload_query_engine_hb_done + */ VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204, VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205, VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206, @@ -575,7 +645,10 @@ enum vpu_ipc_msg_type { VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c, /** Response to VPU_JSM_MSG_TRACE_GET_NAME. */ VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d, - /** Response to VPU_JSM_MSG_SSID_RELEASE. */ + /** + * Response to VPU_JSM_MSG_SSID_RELEASE. + * @see vpu_ipc_msg_payload_ssid_release + */ VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e, /** * Response to VPU_JSM_MSG_METRIC_STREAMER_START. @@ -605,29 +678,56 @@ enum vpu_ipc_msg_type { /** * Asynchronous event sent from the VPU to the host either when the current * metric buffer is full or when the VPU has collected a multiple of - * @notify_sample_count samples as indicated through the start command - * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected - * metric data. + * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated + * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns + * information about collected metric data. * @see vpu_jsm_metric_streamer_done */ VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213, - /** Response to control command: Priority band setup */ + /** + * Response to control command: Priority band setup + * @see vpu_ipc_msg_payload_hws_priority_band_setup + */ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214, - /** Response to control command: Create command queue */ + /** + * Response to control command: Create command queue + * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp + */ VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215, - /** Response to control command: Destroy command queue */ + /** + * Response to control command: Destroy command queue + * @see vpu_ipc_msg_payload_hws_destroy_cmdq + */ VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216, - /** Response to control command: Set context scheduling properties */ + /** + * Response to control command: Set context scheduling properties + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties + */ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217, - /** Response to control command: Log buffer setting */ + /** + * Response to control command: Log buffer setting + * @see vpu_ipc_msg_payload_hws_set_scheduling_log + */ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218, - /* IPC Device -> Host, HWS notify index entry of log buffer written */ + /** + * IPC Device -> Host, HWS notify index entry of log buffer written + * @see vpu_ipc_msg_payload_hws_scheduling_log_notification + */ VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219, - /* IPC Device -> Host, HWS completion of a context suspend request */ + /** + * IPC Device -> Host, HWS completion of a context suspend request + * @see vpu_ipc_msg_payload_hws_suspend_cmdq + */ VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a, - /* Response to control command: Resume command queue */ + /** + * Response to control command: Resume command queue + * @see vpu_ipc_msg_payload_hws_resume_cmdq + */ VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b, - /* Response to control command: Resume engine command response */ + /** + * Response to control command: Resume engine command response + * @see vpu_ipc_msg_payload_hws_resume_engine + */ VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c, /* Response to control command: Enable survivability/DCT mode */ VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d, @@ -670,40 +770,44 @@ struct vpu_ipc_msg_payload_engine_preempt { u32 preempt_id; }; -/* - * @brief Register doorbell command structure. +/** + * Register doorbell command structure. * This structure supports doorbell registration for only OS scheduling. * @see VPU_JSM_MSG_REGISTER_DB */ struct vpu_ipc_msg_payload_register_db { - /* Index of the doorbell to register. */ + /** Index of the doorbell to register. */ u32 db_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; - /* Virtual address in Global GTT pointing to the start of job queue. */ + /** Virtual address in Global GTT pointing to the start of job queue. */ u64 jobq_base; - /* Size of the job queue in bytes. */ + /** Size of the job queue in bytes. */ u32 jobq_size; - /* Host sub-stream ID for the context assigned to the doorbell. */ + /** Host sub-stream ID for the context assigned to the doorbell. */ u32 host_ssid; }; /** - * @brief Unregister doorbell command structure. + * Unregister doorbell command structure. * Request structure to unregister a doorbell for both HW and OS scheduling. * @see VPU_JSM_MSG_UNREGISTER_DB */ struct vpu_ipc_msg_payload_unregister_db { - /* Index of the doorbell to unregister. */ + /** Index of the doorbell to unregister. */ u32 db_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; +/** + * Heartbeat request structure + * @see VPU_JSM_MSG_QUERY_ENGINE_HB + */ struct vpu_ipc_msg_payload_query_engine_hb { - /* Engine to return heartbeat value. */ + /** Engine to return heartbeat value. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -723,10 +827,14 @@ struct vpu_ipc_msg_payload_power_level { u32 reserved_0; }; +/** + * Structure for requesting ssid release + * @see VPU_JSM_MSG_SSID_RELEASE + */ struct vpu_ipc_msg_payload_ssid_release { - /* Host sub-stream ID for the context to be released. */ + /** Host sub-stream ID for the context to be released. */ u32 host_ssid; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -752,7 +860,7 @@ struct vpu_jsm_metric_streamer_start { u64 sampling_rate; /** * If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message - * after every @notify_sample_count samples is collected or dropped by the VPU. + * after every @ref notify_sample_count samples is collected or dropped by the VPU. * If set to UINT_MAX the VPU will only generate a notification when the metric * buffer is full. If set to 0 the VPU will never generate a notification. */ @@ -762,9 +870,9 @@ struct vpu_jsm_metric_streamer_start { * Address and size of the buffer where the VPU will write metric data. The * VPU writes all counters from enabled metric groups one after another. If * there is no space left to write data at the next sample period the VPU - * will switch to the next buffer (@see next_buffer_addr) and will optionally - * send a notification to the host driver if @notify_sample_count is non-zero. - * If @next_buffer_addr is NULL the VPU will stop collecting metric data. + * will switch to the next buffer (@ref next_buffer_addr) and will optionally + * send a notification to the host driver if @ref notify_sample_count is non-zero. + * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data. */ u64 buffer_addr; u64 buffer_size; @@ -844,38 +952,47 @@ struct vpu_ipc_msg_payload_job_done { u64 cmdq_id; }; -/* +/** * Notification message upon native fence signalling. * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED */ struct vpu_ipc_msg_payload_native_fence_signalled { - /* Engine ID. */ + /** Engine ID. */ u32 engine_idx; - /* Host SSID. */ + /** Host SSID. */ u32 host_ssid; - /* CMDQ ID */ + /** CMDQ ID */ u64 cmdq_id; - /* Fence object handle. */ + /** Fence object handle. */ u64 fence_handle; }; +/** + * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure + * which contains which queues caused reset if FW was able to detect any error. + * @see vpu_ipc_msg_payload_engine_reset_done + */ struct vpu_jsm_engine_reset_context { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* See VPU_ENGINE_RESET_CONTEXT_* defines */ + /** See VPU_ENGINE_RESET_CONTEXT_* defines */ u64 flags; }; +/** + * Engine reset response. + * @see VPU_JSM_MSG_ENGINE_RESET_DONE + */ struct vpu_ipc_msg_payload_engine_reset_done { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Number of impacted contexts */ + /** Number of impacted contexts */ u32 num_impacted_contexts; - /* Array of impacted command queue ids and their flags */ + /** Array of impacted command queue ids and their flags */ struct vpu_jsm_engine_reset_context impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS]; }; @@ -912,12 +1029,16 @@ struct vpu_ipc_msg_payload_unregister_db_done { u32 reserved_0; }; +/** + * Structure for heartbeat response + * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE + */ struct vpu_ipc_msg_payload_query_engine_hb_done { - /* Engine returning heartbeat value. */ + /** Engine returning heartbeat value. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; - /* Heartbeat value. */ + /** Heartbeat value. */ u64 heartbeat; }; @@ -937,7 +1058,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done { u8 power_limit[16]; }; -/* HWS priority band setup request / response */ +/** + * HWS priority band setup request / response + * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP + */ struct vpu_ipc_msg_payload_hws_priority_band_setup { /* * Grace period in 100ns units when preempting another priority band for @@ -964,15 +1088,23 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup { * TDR timeout value in milliseconds. Default value of 0 meaning no timeout. */ u32 tdr_timeout; + /* Non-interactive queue timeout for no progress of heartbeat in milliseconds. + * Default value of 0 meaning no timeout. + */ + u32 non_interactive_no_progress_timeout; + /* + * Non-interactive queue upper limit timeout value in milliseconds. Default + * value of 0 meaning no timeout. + */ + u32 non_interactive_timeout; }; -/* +/** * @brief HWS create command queue request. * Host will create a command queue via this command. * Note: Cmdq group is a handle of an object which * may contain one or more command queues. * @see VPU_JSM_MSG_CREATE_CMD_QUEUE - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP */ struct vpu_ipc_msg_payload_hws_create_cmdq { /* Process id */ @@ -993,66 +1125,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq { u32 reserved_0; }; -/* - * @brief HWS create command queue response. - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE +/** + * HWS create command queue response. * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP */ struct vpu_ipc_msg_payload_hws_create_cmdq_rsp { - /* Process id */ + /** Process id */ u64 process_id; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Engine for which queue is being created */ + /** Engine for which queue is being created */ u32 engine_idx; - /* Command queue group */ + /** Command queue group */ u64 cmdq_group; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* HWS destroy command queue request / response */ +/** + * HWS destroy command queue request / response + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP + */ struct vpu_ipc_msg_payload_hws_destroy_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* HWS set context scheduling properties request / response */ +/** + * HWS set context scheduling properties request / response + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP + */ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* + /** * Priority band to assign to work of this context. * Available priority bands: @see enum vpu_job_scheduling_priority_band */ u32 priority_band; - /* Inside realtime band assigns a further priority */ + /** Inside realtime band assigns a further priority */ u32 realtime_priority_level; - /* Priority relative to other contexts in the same process */ + /** Priority relative to other contexts in the same process */ s32 in_process_priority; - /* Zero padding / Reserved */ + /** Zero padding / Reserved */ u32 reserved_1; - /* + /** * Context quantum relative to other contexts of same priority in the same process * Minimum value supported by NPU is 1ms (10000 in 100ns units). */ u64 context_quantum; - /* Grace period when preempting context of the same priority within the same process */ + /** Grace period when preempting context of the same priority within the same process */ u64 grace_period_same_priority; - /* Grace period when preempting context of a lower priority within the same process */ + /** Grace period when preempting context of a lower priority within the same process */ u64 grace_period_lower_priority; }; -/* - * @brief Register doorbell command structure. +/** + * Register doorbell command structure. * This structure supports doorbell registration for both HW and OS scheduling. * Note: Queue base and size are added here so that the same structure can be used for * OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored @@ -1061,27 +1200,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { * @see VPU_JSM_MSG_HWS_REGISTER_DB */ struct vpu_jsm_hws_register_db { - /* Index of the doorbell to register. */ + /** Index of the doorbell to register. */ u32 db_id; - /* Host sub-stream ID for the context assigned to the doorbell. */ + /** Host sub-stream ID for the context assigned to the doorbell. */ u32 host_ssid; - /* ID of the command queue associated with the doorbell. */ + /** ID of the command queue associated with the doorbell. */ u64 cmdq_id; - /* Virtual address pointing to the start of command queue. */ + /** Virtual address pointing to the start of command queue. */ u64 cmdq_base; - /* Size of the command queue in bytes. */ + /** Size of the command queue in bytes. */ u64 cmdq_size; }; -/* - * @brief Structure to set another buffer to be used for scheduling-related logging. +/** + * Structure to set another buffer to be used for scheduling-related logging. * The size of the logging buffer and the number of entries is defined as part of the * buffer itself as described next. * The log buffer received from the host is made up of; - * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'. + * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header. * The header contains the number of log entries in the buffer. * - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in - * 'struct vpu_hws_log_buffer_entry'. + * @ref vpu_hws_log_buffer_entry. * The entry contains the VPU timestamp, operation type and data. * The host should provide the notify index value of log buffer to VPU. This is a * value defined within the log buffer and when written to will generate the @@ -1095,30 +1234,30 @@ struct vpu_jsm_hws_register_db { * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION */ struct vpu_ipc_msg_payload_hws_set_scheduling_log { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* + /** * VPU log buffer virtual address. * Set to 0 to disable logging for this engine. */ u64 vpu_log_buffer_va; - /* + /** * Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION * is generated when an event log is written to this index. */ u64 notify_index; - /* + /** * Field is now deprecated, will be removed when KMD is updated to support removal */ u32 enable_extra_events; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; }; -/* - * @brief The scheduling log notification is generated by VPU when it writes +/** + * The scheduling log notification is generated by VPU when it writes * an event into the log buffer at the notify_index. VPU notifies host with * VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous * message from VPU to host. @@ -1126,14 +1265,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log { * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG */ struct vpu_ipc_msg_payload_hws_scheduling_log_notification { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; }; -/* - * @brief HWS suspend command queue request and done structure. +/** + * HWS suspend command queue request and done structure. * Host will request the suspend of contexts and VPU will; * - Suspend all work on this context * - Preempt any running work @@ -1152,21 +1291,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification { * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE */ struct vpu_ipc_msg_payload_hws_suspend_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* + /** * Suspend fence value - reported by the VPU suspend context * completed once suspend is complete. */ u64 suspend_fence_value; }; -/* - * @brief HWS Resume command queue request / response structure. +/** + * HWS Resume command queue request / response structure. * Host will request the resume of a context; * - VPU will resume all work on this context * - Scheduler will allow this context to be scheduled @@ -1174,25 +1313,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq { * @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP */ struct vpu_ipc_msg_payload_hws_resume_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* - * @brief HWS Resume engine request / response structure. - * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume. +/** + * HWS Resume engine request / response structure. + * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume. * Host shall send this command to resume scheduling of any valid queue. - * @see VPU_JSM_MSG_HWS_RESUME_ENGINE + * @see VPU_JSM_MSG_HWS_ENGINE_RESUME * @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE */ struct vpu_ipc_msg_payload_hws_resume_engine { - /* Engine to be resumed */ + /** Engine to be resumed */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -1326,7 +1465,7 @@ struct vpu_jsm_metric_streamer_done { /** * Metric group description placed in the metric buffer after successful completion * of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more - * @vpu_jsm_metric_counter_descriptor records. + * @ref vpu_jsm_metric_counter_descriptor records. * @see VPU_JSM_MSG_METRIC_STREAMER_INFO */ struct vpu_jsm_metric_group_descriptor { diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index e09930c2b226..91f3b8afb63c 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device, lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS; else lim.max_write_zeroes_sectors = 0; + lim.max_hw_wzeroes_unmap_sectors = 0; if ((lim.discard_granularity >> SECTOR_SHIFT) > lim.max_hw_discard_sectors) { diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8acad3cc6e6e..f31652085adc 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1795,6 +1795,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill, u32 index) { zram_slot_lock(zram, index); + zram_free_page(zram, index); zram_set_flag(zram, index, ZRAM_SAME); zram_set_handle(zram, index, fill); zram_slot_unlock(zram, index); @@ -1832,6 +1833,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page, kunmap_local(src); zram_slot_lock(zram, index); + zram_free_page(zram, index); zram_set_flag(zram, index, ZRAM_HUGE); zram_set_handle(zram, index, handle); zram_set_obj_size(zram, index, PAGE_SIZE); @@ -1855,11 +1857,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) unsigned long element; bool same_filled; - /* First, free memory allocated to this slot (if any) */ - zram_slot_lock(zram, index); - zram_free_page(zram, index); - zram_slot_unlock(zram, index); - mem = kmap_local_page(page); same_filled = page_same_filled(mem, &element); kunmap_local(mem); @@ -1901,6 +1898,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) zcomp_stream_put(zstrm); zram_slot_lock(zram, index); + zram_free_page(zram, index); zram_set_handle(zram, index, handle); zram_set_obj_size(zram, index, comp_len); zram_slot_unlock(zram, index); diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c index 5bc473c2adb3..2f65fe2c6bdf 100644 --- a/drivers/clk/renesas/clk-mstp.c +++ b/drivers/clk/renesas/clk-mstp.c @@ -303,6 +303,9 @@ void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev) pm_clk_destroy(dev); } +static struct device_node *cpg_mstp_pd_np __initdata = NULL; +static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL; + void __init cpg_mstp_add_clk_domain(struct device_node *np) { struct generic_pm_domain *pd; @@ -324,5 +327,20 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np) pd->detach_dev = cpg_mstp_detach_dev; pm_genpd_init(pd, &pm_domain_always_on_gov, false); - of_genpd_add_provider_simple(np, pd); + cpg_mstp_pd_np = of_node_get(np); + cpg_mstp_pd_genpd = pd; +} + +static int __init cpg_mstp_pd_init_provider(void) +{ + int error; + + if (!cpg_mstp_pd_np) + return -ENODEV; + + error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd); + + of_node_put(cpg_mstp_pd_np); + return error; } +postcore_initcall(cpg_mstp_pd_init_provider); diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c index 354c981943b6..4221b1888b38 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.c +++ b/drivers/clk/sunxi-ng/ccu_mp.c @@ -185,7 +185,7 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw, p &= (1 << cmp->p.width) - 1; if (cmp->common.features & CCU_FEATURE_DUAL_DIV) - rate = (parent_rate / p) / m; + rate = (parent_rate / (p + cmp->p.offset)) / m; else rate = (parent_rate >> p) / m; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index e058ba027792..9f5ccc1720cb 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -2430,7 +2430,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) { int error; - __sev_platform_shutdown_locked(NULL); + __sev_platform_shutdown_locked(&error); if (sev_es_tmr) { /* diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c index 036f21cac0a9..0a852011653c 100644 --- a/drivers/dpll/dpll_netlink.c +++ b/drivers/dpll/dpll_netlink.c @@ -211,8 +211,8 @@ static int dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll, struct netlink_ext_ack *extack) { + DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 }; const struct dpll_device_ops *ops = dpll_device_ops(dpll); - DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 }; enum dpll_clock_quality_level ql; int ret; @@ -221,7 +221,7 @@ dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll, ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack); if (ret) return ret; - for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) + for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql)) return -EMSGSIZE; diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 78b10c6ef7fe..2e93189d7142 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -41,7 +41,7 @@ /* * ABI version history is documented in linux/firewire-cdev.h. */ -#define FW_CDEV_KERNEL_VERSION 5 +#define FW_CDEV_KERNEL_VERSION 6 #define FW_CDEV_VERSION_EVENT_REQUEST2 4 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c index 12b24a717e43..284e762d92c4 100644 --- a/drivers/gpio/gpiolib-acpi-core.c +++ b/drivers/gpio/gpiolib-acpi-core.c @@ -942,8 +942,9 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, { struct acpi_device *adev = to_acpi_device_node(fwnode); bool can_fallback = acpi_can_fallback_to_crs(adev, con_id); - struct acpi_gpio_info info; + struct acpi_gpio_info info = {}; struct gpio_desc *desc; + int ret; desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info); if (IS_ERR(desc)) @@ -957,6 +958,12 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, acpi_gpio_update_gpiod_flags(dflags, &info); acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info); + + /* ACPI uses hundredths of milliseconds units */ + ret = gpio_set_debounce_timeout(desc, info.debounce * 10); + if (ret) + return ERR_PTR(ret); + return desc; } @@ -992,7 +999,7 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int ret; for (i = 0, idx = 0; idx <= index; i++) { - struct acpi_gpio_info info; + struct acpi_gpio_info info = {}; struct gpio_desc *desc; /* Ignore -EPROBE_DEFER, it only matters if idx matches */ diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c index bfb04e67c4bc..7b95d1b03361 100644 --- a/drivers/gpio/gpiolib-acpi-quirks.c +++ b/drivers/gpio/gpiolib-acpi-quirks.c @@ -319,6 +319,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = { }, { /* + * Same as G1619-04. New model. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "PNP0C50:00@8", + }, + }, + { + /* * Spurious wakeups from GPIO 11 * Found in BIOS 1.04 * https://gitlab.freedesktop.org/drm/amd/-/issues/3954 diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index f7ea8e895c0c..fda170730468 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -396,6 +396,8 @@ source "drivers/gpu/drm/sprd/Kconfig" source "drivers/gpu/drm/imagination/Kconfig" +source "drivers/gpu/drm/tyr/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && HYPERV diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 4dafbdc8f86a..4b2f7d794275 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -220,6 +220,7 @@ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ obj-$(CONFIG_DRM_LIMA) += lima/ obj-$(CONFIG_DRM_PANFROST) += panfrost/ obj-$(CONFIG_DRM_PANTHOR) += panthor/ +obj-$(CONFIG_DRM_TYR) += tyr/ obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/ obj-$(CONFIG_DRM_MCDE) += mcde/ obj-$(CONFIG_DRM_TIDSS) += tidss/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 2d0fea87af79..64e7acff8f18 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -138,7 +138,6 @@ amdgpu-y += \ # add DCE block amdgpu-y += \ dce_v10_0.o \ - dce_v11_0.o \ amdgpu_vkms.o # add GFX block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 17848ce65d1f..2a0df4cabb99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -63,6 +63,7 @@ #include "kgd_pp_interface.h" #include "amd_shared.h" +#include "amdgpu_utils.h" #include "amdgpu_mode.h" #include "amdgpu_ih.h" #include "amdgpu_irq.h" @@ -434,7 +435,6 @@ struct amdgpu_clock { uint32_t default_mclk; uint32_t default_sclk; uint32_t default_dispclk; - uint32_t current_dispclk; uint32_t dp_extclk; uint32_t max_pixel_clock; }; @@ -545,7 +545,7 @@ struct amdgpu_wb { * this value can be accessed directly by using the offset as an index. * For the GPU address, it is necessary to use gpu_addr and the offset. */ - volatile uint32_t *wb; + uint32_t *wb; /** * @gpu_addr: @@ -721,7 +721,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, /* VRAM scratch page for HDP bug, default vram page */ struct amdgpu_mem_scratch { struct amdgpu_bo *robj; - volatile uint32_t *ptr; + uint32_t *ptr; u64 gpu_addr; }; @@ -752,6 +752,7 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u struct amdgpu_mmio_remap { u32 reg_offset; resource_size_t bus_addr; + struct amdgpu_bo *bo; }; /* Define the HW IP blocks will be used in driver , add more if necessary */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index fbe7616555c8..a2879d2b7c8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc) { - if (adev->kfd.dev) - kgd2kfd_suspend(adev->kfd.dev, suspend_proc); + if (adev->kfd.dev) { + if (adev->in_s0ix) + kgd2kfd_stop_sched_all_nodes(adev->kfd.dev); + else + kgd2kfd_suspend(adev->kfd.dev, suspend_proc); + } } int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc) { int r = 0; - if (adev->kfd.dev) - r = kgd2kfd_resume(adev->kfd.dev, resume_proc); + if (adev->kfd.dev) { + if (adev->in_s0ix) + r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev); + else + r = kgd2kfd_resume(adev->kfd.dev, resume_proc); + } return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 127927b16ee2..9e120c934cc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -428,7 +428,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd); void kgd2kfd_unlock_kfd(struct kfd_dev *kfd); int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); +int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd); int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); +int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd); bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id); bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry, bool retry_fault); @@ -518,11 +520,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id) return 0; } +static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd) +{ + return 0; +} + static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) { return 0; } +static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd) +{ + return 0; +} + static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) { return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 9dfdc08cc887..763f2b8dcf13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -706,7 +706,6 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); - adev->clock.current_dispclk = adev->clock.default_dispclk; adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock); if (adev->clock.max_pixel_clock == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 702f6610d024..66fb37b64388 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list) int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, struct drm_amdgpu_bo_list_entry **info_param) { - const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); + const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); + const uint32_t bo_info_size = in->bo_info_size; + const uint32_t bo_number = in->bo_number; struct drm_amdgpu_bo_list_entry *info; - int r; - - info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL); - if (!info) - return -ENOMEM; /* copy the handle array from userspace to a kernel buffer */ - r = -EFAULT; - if (likely(info_size == in->bo_info_size)) { - unsigned long bytes = in->bo_number * - in->bo_info_size; - - if (copy_from_user(info, uptr, bytes)) - goto error_free; - + if (likely(info_size == bo_info_size)) { + info = vmemdup_array_user(uptr, bo_number, info_size); + if (IS_ERR(info)) + return PTR_ERR(info); } else { - unsigned long bytes = min(in->bo_info_size, info_size); + const uint32_t bytes = min(bo_info_size, info_size); unsigned i; - memset(info, 0, in->bo_number * info_size); - for (i = 0; i < in->bo_number; ++i) { - if (copy_from_user(&info[i], uptr, bytes)) - goto error_free; + info = kvmalloc_array(bo_number, info_size, GFP_KERNEL); + if (!info) + return -ENOMEM; - uptr += in->bo_info_size; + memset(info, 0, bo_number * info_size); + for (i = 0; i < bo_number; ++i, uptr += bo_info_size) { + if (copy_from_user(&info[i], uptr, bytes)) { + kvfree(info); + return -EFAULT; + } } } *info_param = info; return 0; - -error_free: - kvfree(info); - return r; } int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2ac9729e4c86..defb511acc5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -178,25 +178,17 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, struct amdgpu_fpriv *fpriv = p->filp->driver_priv; unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { }; struct amdgpu_vm *vm = &fpriv->vm; - uint64_t *chunk_array_user; uint64_t *chunk_array; uint32_t uf_offset = 0; size_t size; int ret; int i; - chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), - GFP_KERNEL); - if (!chunk_array) - return -ENOMEM; - - /* get chunks */ - chunk_array_user = u64_to_user_ptr(cs->in.chunks); - if (copy_from_user(chunk_array, chunk_array_user, - sizeof(uint64_t)*cs->in.num_chunks)) { - ret = -EFAULT; - goto free_chunk; - } + chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks), + cs->in.num_chunks, + sizeof(uint64_t)); + if (IS_ERR(chunk_array)) + return PTR_ERR(chunk_array); p->nchunks = cs->in.num_chunks; p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), @@ -209,7 +201,6 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, for (i = 0; i < p->nchunks; i++) { struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL; struct drm_amdgpu_cs_chunk user_chunk; - uint32_t __user *cdata; chunk_ptr = u64_to_user_ptr(chunk_array[i]); if (copy_from_user(&user_chunk, chunk_ptr, @@ -222,20 +213,16 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = u64_to_user_ptr(user_chunk.chunk_data); - p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), - GFP_KERNEL); - if (p->chunks[i].kdata == NULL) { - ret = -ENOMEM; + p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data), + size, + sizeof(uint32_t)); + if (IS_ERR(p->chunks[i].kdata)) { + ret = PTR_ERR(p->chunks[i].kdata); i--; goto free_partial_kdata; } size *= sizeof(uint32_t); - if (copy_from_user(p->chunks[i].kdata, cdata, size)) { - ret = -EFAULT; - goto free_partial_kdata; - } /* Assume the worst on the following checks */ ret = -EINVAL; @@ -286,7 +273,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, } } - if (!p->gang_size) { + if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) { ret = -EINVAL; goto free_all_kdata; } @@ -1767,30 +1754,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, { struct amdgpu_device *adev = drm_to_adev(dev); union drm_amdgpu_wait_fences *wait = data; - uint32_t fence_count = wait->in.fence_count; - struct drm_amdgpu_fence *fences_user; struct drm_amdgpu_fence *fences; int r; /* Get the fences from userspace */ - fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), - GFP_KERNEL); - if (fences == NULL) - return -ENOMEM; - - fences_user = u64_to_user_ptr(wait->in.fences); - if (copy_from_user(fences, fences_user, - sizeof(struct drm_amdgpu_fence) * fence_count)) { - r = -EFAULT; - goto err_free_fences; - } + fences = memdup_array_user(u64_to_user_ptr(wait->in.fences), + wait->in.fence_count, + sizeof(struct drm_amdgpu_fence)); + if (IS_ERR(fences)) + return PTR_ERR(fences); if (wait->in.wait_all) r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); else r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); -err_free_fences: kfree(fences); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bdfb80377e6a..a77000c2e0bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5072,6 +5072,10 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) if (!adev->in_s4 && (adev->flags & AMD_IS_APU)) return 0; + /* No need to evict when going to S5 through S4 callbacks */ + if (system_state == SYSTEM_POWER_OFF) + return 0; + ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); if (ret) { dev_warn(adev->dev, "evicting device resources failed\n"); @@ -5196,7 +5200,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) adev->in_suspend = true; if (amdgpu_sriov_vf(adev)) { - if (!adev->in_s0ix && !adev->in_runpm) + if (!adev->in_runpm) amdgpu_amdkfd_suspend_process(adev); amdgpu_virt_fini_data_exchange(adev); r = amdgpu_virt_request_full_gpu(adev, false); @@ -5216,10 +5220,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) amdgpu_device_ip_suspend_phase1(adev); - if (!adev->in_s0ix) { - amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); - amdgpu_userq_suspend(adev); - } + amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); + amdgpu_userq_suspend(adev); r = amdgpu_device_evict_resources(adev); if (r) @@ -5314,15 +5316,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) goto exit; } - if (!adev->in_s0ix) { - r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); - if (r) - goto exit; + r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); + if (r) + goto exit; - r = amdgpu_userq_resume(adev); - if (r) - goto exit; - } + r = amdgpu_userq_resume(adev); + if (r) + goto exit; r = amdgpu_device_ip_late_init(adev); if (r) @@ -5335,7 +5335,7 @@ exit: amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); - if (!adev->in_s0ix && !r && !adev->in_runpm) + if (!r && !adev->in_runpm) r = amdgpu_amdkfd_resume_process(adev); } @@ -6937,7 +6937,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta { struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + struct amdgpu_hive_info *hive __free(xgmi_put_hive) = + amdgpu_get_xgmi_hive(adev); struct amdgpu_reset_context reset_context; struct list_head device_list; @@ -6976,10 +6977,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta amdgpu_device_recovery_get_reset_lock(adev, &device_list); amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list, hive, false); - if (hive) { + if (hive) mutex_unlock(&hive->hive_lock); - amdgpu_put_xgmi_hive(hive); - } return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent error, prepare for device removal */ @@ -7161,7 +7160,7 @@ static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev) struct pci_dev *parent = pci_upstream_bridge(adev->pdev); int r; - if (parent->vendor != PCI_VENDOR_ID_ATI) + if (!parent || parent->vendor != PCI_VENDOR_ID_ATI) return; /* If already saved, return */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 91d638098889..b349bb3676d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -70,6 +70,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) [AMDGPU_PL_GWS] = "gws", [AMDGPU_PL_OA] = "oa", [AMDGPU_PL_DOORBELL] = "doorbell", + [AMDGPU_PL_MMIO_REMAP] = "mmioremap", }; unsigned int hw_ip, i; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index c049848a56b2..2767e5122cb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj) struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); amdgpu_hmm_unregister(aobj); - ttm_bo_put(&aobj->tbo); + ttm_bo_fini(&aobj->tbo); } int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, @@ -458,6 +458,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, /* always clear VRAM */ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; + if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP) + return -EINVAL; + /* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 98aa99b314c9..a09ccf7d8aa2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -2280,7 +2280,7 @@ void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring) * Return: * return the latest index. */ -u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer) +u32 amdgpu_gfx_csb_preamble_start(u32 *buffer) { u32 count = 0; @@ -2304,7 +2304,7 @@ u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer) * Return: * return the latest index. */ -u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count) +u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count) { const struct cs_section_def *sect = NULL; const struct cs_extent_def *ext = NULL; @@ -2331,7 +2331,7 @@ u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, * @buffer: This is an output variable that gets the PACKET3 preamble end. * @count: Index to start set the preemble end. */ -void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count) +void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count) { buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 08f268dab8f5..fb5f7a0ee029 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -642,9 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring); void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work); void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring); -u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer); -u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count); -void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count); +u32 amdgpu_gfx_csb_preamble_start(u32 *buffer); +u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count); +void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count); void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev); void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 57101d24422f..9cb72f0c5277 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -184,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "AMDGPU i2c hw bus %s", name); i2c->adapter.algo = &amdgpu_atombios_i2c_algo; - ret = i2c_add_adapter(&i2c->adapter); + ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter); if (ret) goto out_free; } else { @@ -215,15 +215,6 @@ out_free: } -void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c) -{ - if (!i2c) - return; - WARN_ON(i2c->has_aux); - i2c_del_adapter(&i2c->adapter); - kfree(i2c); -} - void amdgpu_i2c_init(struct amdgpu_device *adev) { if (!adev->is_atom_fw) { @@ -248,12 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev) { int i; - for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { - if (adev->i2c_bus[i]) { - amdgpu_i2c_destroy(adev->i2c_bus[i]); + for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) + if (adev->i2c_bus[i]) adev->i2c_bus[i] = NULL; - } - } } /* looks up bus based on id */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 7f7ea046e209..f58b6be7fccc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -56,14 +56,14 @@ struct amdgpu_ih_ring { bool use_bus_addr; struct amdgpu_bo *ring_obj; - volatile uint32_t *ring; + uint32_t *ring; uint64_t gpu_addr; uint64_t wptr_addr; - volatile uint32_t *wptr_cpu; + uint32_t *wptr_cpu; uint64_t rptr_addr; - volatile uint32_t *rptr_cpu; + uint32_t *rptr_cpu; bool enabled; unsigned rptr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 22da65f45226..6b7d66b6d4cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -540,3 +540,68 @@ void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_pri drm_printf(p, "\nInactive Instance:JPEG%d\n", i); } } + +static inline bool amdgpu_jpeg_reg_valid(u32 reg) +{ + if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END || + (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END)) + return false; + else + return true; +} + +/** + * amdgpu_jpeg_dec_parse_cs - command submission parser + * + * @parser: Command submission parser context + * @job: the job to parse + * @ib: the IB to parse + * + * Parse the command stream, return -EINVAL for invalid packet, + * 0 otherwise + */ + +int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser, + struct amdgpu_job *job, + struct amdgpu_ib *ib) +{ + u32 i, reg, res, cond, type; + struct amdgpu_device *adev = parser->adev; + + for (i = 0; i < ib->length_dw ; i += 2) { + reg = CP_PACKETJ_GET_REG(ib->ptr[i]); + res = CP_PACKETJ_GET_RES(ib->ptr[i]); + cond = CP_PACKETJ_GET_COND(ib->ptr[i]); + type = CP_PACKETJ_GET_TYPE(ib->ptr[i]); + + if (res) /* only support 0 at the moment */ + return -EINVAL; + + switch (type) { + case PACKETJ_TYPE0: + if (cond != PACKETJ_CONDITION_CHECK0 || + !amdgpu_jpeg_reg_valid(reg)) { + dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); + return -EINVAL; + } + break; + case PACKETJ_TYPE3: + if (cond != PACKETJ_CONDITION_CHECK3 || + !amdgpu_jpeg_reg_valid(reg)) { + dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); + return -EINVAL; + } + break; + case PACKETJ_TYPE6: + if (ib->ptr[i] == CP_PACKETJ_NOP) + continue; + dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); + return -EINVAL; + default: + dev_err(adev->dev, "Unknown packet type %d !\n", type); + return -EINVAL; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 4f0775e39b54..346ae0ab09d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -25,11 +25,18 @@ #define __AMDGPU_JPEG_H__ #include "amdgpu_ras.h" +#include "amdgpu_cs.h" #define AMDGPU_MAX_JPEG_INSTANCES 4 #define AMDGPU_MAX_JPEG_RINGS 10 #define AMDGPU_MAX_JPEG_RINGS_4_0_3 8 +#define JPEG_REG_RANGE_START 0x4000 +#define JPEG_REG_RANGE_END 0x41c2 +#define JPEG_ATOMIC_RANGE_START 0x4120 +#define JPEG_ATOMIC_RANGE_END 0x412A + + #define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0) #define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1) @@ -170,5 +177,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev, const struct amdgpu_hwip_reg_entry *reg, u32 count); void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block); void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p); +int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser, + struct amdgpu_job *job, + struct amdgpu_ib *ib); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 8a76960803c6..8676400834fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -939,6 +939,10 @@ out: if (adev->gfx.config.ta_cntl2_truncate_coord_mode) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD; + /* Gang submit is not supported under SRIOV currently */ + if (!amdgpu_sriov_vf(adev)) + dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT; + if (amdgpu_passthrough(adev)) dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT << AMDGPU_IDS_FLAGS_MODE_SHIFT) & diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index d18bade9c98f..e08f58de4b17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -153,6 +153,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) c++; } + if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) { + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].mem_type = AMDGPU_PL_MMIO_REMAP; + places[c].flags = 0; + c++; + } + if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; places[c].lpfn = 0; @@ -1546,6 +1554,8 @@ uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo) return AMDGPU_PL_OA; case AMDGPU_GEM_DOMAIN_DOORBELL: return AMDGPU_PL_DOORBELL; + case AMDGPU_GEM_DOMAIN_MMIO_REMAP: + return AMDGPU_PL_MMIO_REMAP; default: return TTM_PL_SYSTEM; } @@ -1629,6 +1639,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) case AMDGPU_PL_DOORBELL: placement = "DOORBELL"; break; + case AMDGPU_PL_MMIO_REMAP: + placement = "MMIO REMAP"; + break; case TTM_PL_SYSTEM: default: placement = "CPU"; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 87523fcd4386..656b8a931dae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -167,6 +167,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) return AMDGPU_GEM_DOMAIN_OA; case AMDGPU_PL_DOORBELL: return AMDGPU_GEM_DOMAIN_DOORBELL; + case AMDGPU_PL_MMIO_REMAP: + return AMDGPU_GEM_DOMAIN_MMIO_REMAP; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 3696f48c233b..1578e4e2bf84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -506,7 +506,8 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block) } ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, - AMDGPU_GEM_DOMAIN_VRAM, + (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? + AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, &psp->fw_pri_bo, &psp->fw_pri_mc_addr, &psp->fw_pri_buf); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c index 38face981c3e..6e8aad91bcd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c @@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t copy_pos += sizeof(uint32_t); - ta_bin = kzalloc(ta_bin_len, GFP_KERNEL); - if (!ta_bin) - return -ENOMEM; - if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) { - ret = -EFAULT; - goto err_free_bin; - } + ta_bin = memdup_user(&buf[copy_pos], ta_bin_len); + if (IS_ERR(ta_bin)) + return PTR_ERR(ta_bin); /* Set TA context and functions */ set_ta_context_funcs(psp, ta_type, &context); @@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size return -EFAULT; copy_pos += sizeof(uint32_t); - shared_buf = kzalloc(shared_buf_len, GFP_KERNEL); - if (!shared_buf) - return -ENOMEM; - if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) { - ret = -EFAULT; - goto err_free_shared_buf; - } + shared_buf = memdup_user(&buf[copy_pos], shared_buf_len); + if (IS_ERR(shared_buf)) + return PTR_ERR(shared_buf); set_ta_context_funcs(psp, ta_type, &context); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 7fe5b1940df8..e0ee21150860 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -219,10 +219,17 @@ static int amdgpu_check_address_validity(struct amdgpu_device *adev, struct amdgpu_vram_block_info blk_info; uint64_t page_pfns[32] = {0}; int i, ret, count; + bool hit = false; if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) return 0; + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_virt_check_vf_critical_region(adev, address, &hit)) + return -EPERM; + return hit ? -EACCES : 0; + } + if ((address >= adev->gmc.mc_vram_size) || (address >= RAS_UMC_INJECT_ADDR_LIMIT)) return -EFAULT; @@ -2702,6 +2709,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) struct amdgpu_device *adev = ras->adev; struct list_head device_list, *device_list_handle = NULL; struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + unsigned int error_query_mode; enum ras_event_type type; if (hive) { @@ -2730,6 +2738,13 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) device_list_handle = &device_list; } + if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) { + if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) { + /* wait 500ms to ensure pmfw polling mca bank info done */ + msleep(500); + } + } + type = amdgpu_ras_get_fatal_error_event(adev); list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 50fcd86e1033..be2e56ce1355 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -91,6 +91,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, break; case TTM_PL_TT: case AMDGPU_PL_DOORBELL: + case AMDGPU_PL_MMIO_REMAP: node = to_ttm_range_mgr_node(res)->mm_nodes; while (start >= node->size << PAGE_SHIFT) start -= node++->size << PAGE_SHIFT; @@ -153,6 +154,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) break; case TTM_PL_TT: case AMDGPU_PL_DOORBELL: + case AMDGPU_PL_MMIO_REMAP: node = cur->node; cur->node = ++node; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 486c3646710c..8f6ce948c684 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -364,7 +364,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /* Allocate ring buffer */ if (ring->ring_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE, + r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &ring->ring_obj, &ring->gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 7670f5d82b9e..b6b649179776 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -114,7 +114,7 @@ struct amdgpu_sched { */ struct amdgpu_fence_driver { uint64_t gpu_addr; - volatile uint32_t *cpu_addr; + uint32_t *cpu_addr; /* sync_seq is protected by ring emission lock */ uint32_t sync_seq; atomic_t last_seq; @@ -211,7 +211,18 @@ struct amdgpu_ring_funcs { bool support_64bit_ptrs; bool no_user_fence; bool secure_submission_supported; - unsigned extra_dw; + + /** + * @extra_bytes: + * + * Optional extra space in bytes that is added to the ring size + * when allocating the BO that holds the contents of the ring. + * This space isn't used for command submission to the ring, + * but is just there to satisfy some hardware requirements or + * implement workarounds. It's up to the implementation of each + * specific ring to initialize this space. + */ + unsigned extra_bytes; /* ring read/write ptr handling */ u64 (*get_rptr)(struct amdgpu_ring *ring); @@ -298,7 +309,7 @@ struct amdgpu_ring { unsigned int ring_backup_entries_to_copy; unsigned rptr_offs; u64 rptr_gpu_addr; - volatile u32 *rptr_cpu_addr; + u32 *rptr_cpu_addr; /** * @wptr: @@ -378,19 +389,19 @@ struct amdgpu_ring { * This is the CPU address pointer in the writeback slot. This is used * to commit changes to the GPU. */ - volatile u32 *wptr_cpu_addr; + u32 *wptr_cpu_addr; unsigned fence_offs; u64 fence_gpu_addr; - volatile u32 *fence_cpu_addr; + u32 *fence_cpu_addr; uint64_t current_ctx; char name[16]; u32 trail_seq; unsigned trail_fence_offs; u64 trail_fence_gpu_addr; - volatile u32 *trail_fence_cpu_addr; + u32 *trail_fence_cpu_addr; unsigned cond_exe_offs; u64 cond_exe_gpu_addr; - volatile u32 *cond_exe_cpu_addr; + u32 *cond_exe_cpu_addr; unsigned int set_q_mode_offs; u32 *set_q_mode_ptr; u64 set_q_mode_token; @@ -470,10 +481,7 @@ static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring, static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) { - int i = 0; - while (i <= ring->buf_mask) - ring->ring[i++] = ring->funcs->nop; - + memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1); } static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index db5791e1a7ce..5aa830a02d80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -89,7 +89,7 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id) int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) { const u32 *src_ptr; - volatile u32 *dst_ptr; + u32 *dst_ptr; u32 i; int r; @@ -189,7 +189,7 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) { const __le32 *fw_data; - volatile u32 *dst_ptr; + u32 *dst_ptr; int me, i, max_me; u32 bo_offset = 0; u32 table_offset, table_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index c210625be220..2ce310b31942 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -251,7 +251,7 @@ struct amdgpu_rlc_funcs { * and it also provides a pointer to it which is used by the firmware * to load the clear state in some cases. */ - void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer); + void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer); int (*get_cp_table_num)(struct amdgpu_device *adev); int (*resume)(struct amdgpu_device *adev); void (*stop)(struct amdgpu_device *adev); @@ -275,19 +275,19 @@ struct amdgpu_rlc { /* for power gating */ struct amdgpu_bo *save_restore_obj; uint64_t save_restore_gpu_addr; - volatile uint32_t *sr_ptr; + uint32_t *sr_ptr; const u32 *reg_list; u32 reg_list_size; /* for clear state */ struct amdgpu_bo *clear_state_obj; uint64_t clear_state_gpu_addr; - volatile uint32_t *cs_ptr; + uint32_t *cs_ptr; const struct cs_section_def *cs_data; u32 clear_state_size; /* for cp tables */ struct amdgpu_bo *cp_table_obj; uint64_t cp_table_gpu_addr; - volatile uint32_t *cp_table_ptr; + uint32_t *cp_table_ptr; u32 cp_table_size; /* safe mode for updating CG/PG state */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 428265046815..71b6691edab4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -123,6 +123,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, case AMDGPU_PL_GWS: case AMDGPU_PL_OA: case AMDGPU_PL_DOORBELL: + case AMDGPU_PL_MMIO_REMAP: placement->num_placement = 0; return; @@ -448,7 +449,8 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, return false; if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || - res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL) + res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL || + res->mem_type == AMDGPU_PL_MMIO_REMAP) return true; if (res->mem_type != TTM_PL_VRAM) @@ -539,10 +541,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, old_mem->mem_type == AMDGPU_PL_GWS || old_mem->mem_type == AMDGPU_PL_OA || old_mem->mem_type == AMDGPU_PL_DOORBELL || + old_mem->mem_type == AMDGPU_PL_MMIO_REMAP || new_mem->mem_type == AMDGPU_PL_GDS || new_mem->mem_type == AMDGPU_PL_GWS || new_mem->mem_type == AMDGPU_PL_OA || - new_mem->mem_type == AMDGPU_PL_DOORBELL) { + new_mem->mem_type == AMDGPU_PL_DOORBELL || + new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) { /* Nothing to save here */ amdgpu_bo_move_notify(bo, evict, new_mem); ttm_bo_move_null(bo, new_mem); @@ -630,6 +634,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, mem->bus.is_iomem = true; mem->bus.caching = ttm_uncached; break; + case AMDGPU_PL_MMIO_REMAP: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.offset += adev->rmmio_remap.bus_addr; + mem->bus.is_iomem = true; + mem->bus.caching = ttm_uncached; + break; default: return -EINVAL; } @@ -647,6 +657,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, if (bo->resource->mem_type == AMDGPU_PL_DOORBELL) return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT; + else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP) + return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT; return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; } @@ -1356,7 +1368,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) if (mem && (mem->mem_type == TTM_PL_TT || mem->mem_type == AMDGPU_PL_DOORBELL || - mem->mem_type == AMDGPU_PL_PREEMPT)) { + mem->mem_type == AMDGPU_PL_PREEMPT || + mem->mem_type == AMDGPU_PL_MMIO_REMAP)) { flags |= AMDGPU_PTE_SYSTEM; if (ttm->caching == ttm_cached) @@ -1843,6 +1856,59 @@ static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev) adev->mman.ttm_pools = NULL; } +/** + * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO + * @adev: amdgpu device + * + * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the + * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host + * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular + * GEM object (amdgpu_bo_create). + * + * Return: + * * 0 on success or intentional skip (feature not present/unsupported) + * * negative errno on allocation failure + */ +static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev) +{ + struct amdgpu_bo_param bp; + int r; + + /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */ + if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE) + return 0; + + memset(&bp, 0, sizeof(bp)); + + /* Create exactly one GEM BO in the MMIO_REMAP domain. */ + bp.type = ttm_bo_type_device; /* userspace-mappable GEM */ + bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */ + bp.byte_align = AMDGPU_GPU_PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP; + bp.flags = 0; + bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + + r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo); + if (r) + return r; + + return 0; +} + +/** + * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO + * @adev: amdgpu device + * + * Frees the kernel-owned MMIO_REMAP BO if it was allocated by + * amdgpu_ttm_mmio_remap_bo_init(). + */ +static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_unref(&adev->rmmio_remap.bo); + adev->rmmio_remap.bo = NULL; +} + /* * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. @@ -1879,11 +1945,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } adev->mman.initialized = true; - /* Initialize VRAM pool with all of VRAM divided into pages */ - r = amdgpu_vram_mgr_init(adev); - if (r) { - dev_err(adev->dev, "Failed initializing VRAM heap.\n"); - return r; + if (!adev->gmc.is_app_apu) { + /* Initialize VRAM pool with all of VRAM divided into pages */ + r = amdgpu_vram_mgr_init(adev); + if (r) { + dev_err(adev->dev, "Failed initializing VRAM heap.\n"); + return r; + } } /* Change the size here instead of the init above so only lpfn is affected */ @@ -2010,6 +2078,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + /* Initialize MMIO-remap pool (single page 4K) */ + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1); + if (r) { + dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n"); + return r; + } + + /* Allocate the singleton MMIO_REMAP BO (4K) if supported */ + r = amdgpu_ttm_mmio_remap_bo_init(adev); + if (r) + return r; + /* Initialize preemptible memory pool */ r = amdgpu_preempt_mgr_init(adev); if (r) { @@ -2072,6 +2152,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) } amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, &adev->mman.sdma_access_ptr); + + amdgpu_ttm_mmio_remap_bo_fini(adev); amdgpu_ttm_fw_reserve_vram_fini(adev); amdgpu_ttm_drv_reserve_vram_fini(adev); @@ -2084,7 +2166,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) drm_dev_exit(idx); } - amdgpu_vram_mgr_fini(adev); + if (!adev->gmc.is_app_apu) + amdgpu_vram_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev); amdgpu_preempt_mgr_fini(adev); amdgpu_doorbell_fini(adev); @@ -2093,6 +2176,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP); ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; dev_info(adev->dev, "amdgpu: ttm finalized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index d82d107fdcc6..1c5c7836ce4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -34,7 +34,8 @@ #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) #define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) #define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4) -#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5) +#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5) +#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6) #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 467e8fa6cb8b..a22e6025de61 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -44,8 +44,40 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) return userq_ip_mask; } +int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, + u64 expected_size) +{ + struct amdgpu_bo_va_mapping *va_map; + u64 user_addr; + u64 size; + int r = 0; + + user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT; + size = expected_size >> AMDGPU_GPU_PAGE_SHIFT; + + r = amdgpu_bo_reserve(vm->root.bo, false); + if (r) + return r; + + va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr); + if (!va_map) { + r = -EINVAL; + goto out_err; + } + /* Only validate the userq whether resident in the VM mapping range */ + if (user_addr >= va_map->start && + va_map->last - user_addr + 1 >= size) { + amdgpu_bo_unreserve(vm->root.bo); + return 0; + } + +out_err: + amdgpu_bo_unreserve(vm->root.bo); + return r; +} + static int -amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, +amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) { struct amdgpu_device *adev = uq_mgr->adev; @@ -54,6 +86,49 @@ amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, int r = 0; if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { + r = userq_funcs->preempt(uq_mgr, queue); + if (r) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + } else { + queue->state = AMDGPU_USERQ_STATE_PREEMPTED; + } + } + + return r; +} + +static int +amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *userq_funcs = + adev->userq_funcs[queue->queue_type]; + int r = 0; + + if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) { + r = userq_funcs->restore(uq_mgr, queue); + if (r) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + } else { + queue->state = AMDGPU_USERQ_STATE_MAPPED; + } + } + + return r; +} + +static int +amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *userq_funcs = + adev->userq_funcs[queue->queue_type]; + int r = 0; + + if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) || + (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) { r = userq_funcs->unmap(uq_mgr, queue); if (r) queue->state = AMDGPU_USERQ_STATE_HUNG; @@ -112,22 +187,6 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr, kfree(queue); } -int -amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr) -{ - struct amdgpu_usermode_queue *queue; - int queue_id; - int ret = 0; - - mutex_lock(&uq_mgr->userq_mutex); - /* Resume all the queues for this process */ - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) - ret += queue->state == AMDGPU_USERQ_STATE_MAPPED; - - mutex_unlock(&uq_mgr->userq_mutex); - return ret; -} - static struct amdgpu_usermode_queue * amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid) { @@ -323,6 +382,11 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) debugfs_remove_recursive(queue->debugfs_queue); #endif r = amdgpu_userq_unmap_helper(uq_mgr, queue); + /*TODO: It requires a reset for userq hw unmap error*/ + if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) { + drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); + queue->state = AMDGPU_USERQ_STATE_HUNG; + } amdgpu_userq_cleanup(uq_mgr, queue, queue_id); mutex_unlock(&uq_mgr->userq_mutex); @@ -404,27 +468,10 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; - /* Usermode queues are only supported for GFX IP as of now */ - if (args->in.ip_type != AMDGPU_HW_IP_GFX && - args->in.ip_type != AMDGPU_HW_IP_DMA && - args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { - drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n", - args->in.ip_type); - return -EINVAL; - } - r = amdgpu_userq_priority_permit(filp, priority); if (r) return r; - if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && - (args->in.ip_type != AMDGPU_HW_IP_GFX) && - (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && - !amdgpu_is_tmz(adev)) { - drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n"); - return -EINVAL; - } - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n"); @@ -456,6 +503,14 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) r = -ENOMEM; goto unlock; } + + /* Validate the userq virtual address.*/ + if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) || + amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || + amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { + kfree(queue); + goto unlock; + } queue->doorbell_handle = args->in.doorbell_handle; queue->queue_type = args->in.ip_type; queue->vm = &fpriv->vm; @@ -543,22 +598,45 @@ unlock: return r; } -int amdgpu_userq_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp) +static int amdgpu_userq_input_args_validate(struct drm_device *dev, + union drm_amdgpu_userq *args, + struct drm_file *filp) { - union drm_amdgpu_userq *args = data; - int r; + struct amdgpu_device *adev = drm_to_adev(dev); switch (args->in.op) { case AMDGPU_USERQ_OP_CREATE: if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE)) return -EINVAL; - r = amdgpu_userq_create(filp, args); - if (r) - drm_file_err(filp, "Failed to create usermode queue\n"); - break; + /* Usermode queues are only supported for GFX IP as of now */ + if (args->in.ip_type != AMDGPU_HW_IP_GFX && + args->in.ip_type != AMDGPU_HW_IP_DMA && + args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { + drm_file_err(filp, "Usermode queue doesn't support IP type %u\n", + args->in.ip_type); + return -EINVAL; + } + + if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && + (args->in.ip_type != AMDGPU_HW_IP_GFX) && + (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && + !amdgpu_is_tmz(adev)) { + drm_file_err(filp, "Secure only supported on GFX/Compute queues\n"); + return -EINVAL; + } + if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET || + args->in.queue_va == 0 || + args->in.queue_size == 0) { + drm_file_err(filp, "invalidate userq queue va or size\n"); + return -EINVAL; + } + if (!args->in.wptr_va || !args->in.rptr_va) { + drm_file_err(filp, "invalidate userq queue rptr or wptr\n"); + return -EINVAL; + } + break; case AMDGPU_USERQ_OP_FREE: if (args->in.ip_type || args->in.doorbell_handle || @@ -571,6 +649,31 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data, args->in.mqd || args->in.mqd_size) return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +int amdgpu_userq_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + union drm_amdgpu_userq *args = data; + int r; + + if (amdgpu_userq_input_args_validate(dev, args, filp) < 0) + return -EINVAL; + + switch (args->in.op) { + case AMDGPU_USERQ_OP_CREATE: + r = amdgpu_userq_create(filp, args); + if (r) + drm_file_err(filp, "Failed to create usermode queue\n"); + break; + + case AMDGPU_USERQ_OP_FREE: r = amdgpu_userq_destroy(filp, args->in.queue_id); if (r) drm_file_err(filp, "Failed to destroy usermode queue\n"); @@ -593,7 +696,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) /* Resume all the queues for this process */ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { - r = amdgpu_userq_map_helper(uq_mgr, queue); + r = amdgpu_userq_restore_helper(uq_mgr, queue); if (r) ret = r; } @@ -603,108 +706,106 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) return ret; } +static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo) +{ + struct ttm_operation_ctx ctx = { false, false }; + + amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +} + +/* Handle all BOs on the invalidated list, validate them and update the PTs */ static int -amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) +amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec, + struct amdgpu_vm *vm) { struct ttm_operation_ctx ctx = { false, false }; + struct amdgpu_bo_va *bo_va; + struct amdgpu_bo *bo; int ret; - amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + spin_lock(&vm->invalidated_lock); + while (!list_empty(&vm->invalidated)) { + bo_va = list_first_entry(&vm->invalidated, + struct amdgpu_bo_va, + base.vm_status); + spin_unlock(&vm->invalidated_lock); - ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (ret) - DRM_ERROR("Fail to validate\n"); + bo = bo_va->base.bo; + ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2); + if (unlikely(ret)) + return ret; - return ret; + amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + return ret; + + /* This moves the bo_va to the done list */ + ret = amdgpu_vm_bo_update(adev, bo_va, false); + if (ret) + return ret; + + spin_lock(&vm->invalidated_lock); + } + spin_unlock(&vm->invalidated_lock); + + return 0; } +/* Make sure the whole VM is ready to be used */ static int -amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr) +amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); - struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; - struct ww_acquire_ctx *ticket; struct drm_exec exec; - struct amdgpu_bo *bo; - struct dma_resv *resv; - bool clear, unlock; - int ret = 0; + int ret; drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { - ret = amdgpu_vm_lock_pd(vm, &exec, 2); + ret = amdgpu_vm_lock_pd(vm, &exec, 1); drm_exec_retry_on_contention(&exec); - if (unlikely(ret)) { - drm_file_err(uq_mgr->file, "Failed to lock PD\n"); + if (unlikely(ret)) goto unlock_all; - } - - /* Lock the done list */ - list_for_each_entry(bo_va, &vm->done, base.vm_status) { - bo = bo_va->base.bo; - if (!bo) - continue; - - ret = drm_exec_lock_obj(&exec, &bo->tbo.base); - drm_exec_retry_on_contention(&exec); - if (unlikely(ret)) - goto unlock_all; - } - } - spin_lock(&vm->status_lock); - while (!list_empty(&vm->moved)) { - bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, - base.vm_status); - spin_unlock(&vm->status_lock); - - /* Per VM BOs never need to bo cleared in the page tables */ - ret = amdgpu_vm_bo_update(adev, bo_va, false); - if (ret) + ret = amdgpu_vm_lock_done_list(vm, &exec, 1); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) goto unlock_all; - spin_lock(&vm->status_lock); - } - - ticket = &exec.ticket; - while (!list_empty(&vm->invalidated)) { - bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, - base.vm_status); - resv = bo_va->base.bo->tbo.base.resv; - spin_unlock(&vm->status_lock); - bo = bo_va->base.bo; - ret = amdgpu_userq_validate_vm_bo(NULL, bo); - if (ret) { - drm_file_err(uq_mgr->file, "Failed to validate BO\n"); + /* This validates PDs, PTs and per VM BOs */ + ret = amdgpu_vm_validate(adev, vm, NULL, + amdgpu_userq_validate_vm, + NULL); + if (unlikely(ret)) goto unlock_all; - } - /* Try to reserve the BO to avoid clearing its ptes */ - if (!adev->debug_vm && dma_resv_trylock(resv)) { - clear = false; - unlock = true; - /* The caller is already holding the reservation lock */ - } else if (dma_resv_locking_ctx(resv) == ticket) { - clear = false; - unlock = false; - /* Somebody else is using the BO right now */ - } else { - clear = true; - unlock = false; - } + /* This locks and validates the remaining evicted BOs */ + ret = amdgpu_userq_bo_validate(adev, &exec, vm); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto unlock_all; + } - ret = amdgpu_vm_bo_update(adev, bo_va, clear); + ret = amdgpu_vm_handle_moved(adev, vm, NULL); + if (ret) + goto unlock_all; - if (unlock) - dma_resv_unlock(resv); - if (ret) - goto unlock_all; + ret = amdgpu_vm_update_pdes(adev, vm, false); + if (ret) + goto unlock_all; - spin_lock(&vm->status_lock); - } - spin_unlock(&vm->status_lock); + /* + * We need to wait for all VM updates to finish before restarting the + * queues. Using the done list like that is now ok since everything is + * locked in place. + */ + list_for_each_entry(bo_va, &vm->done, base.vm_status) + dma_fence_wait(bo_va->last_pt_update, false); + dma_fence_wait(vm->last_update, false); ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); if (ret) @@ -725,7 +826,7 @@ static void amdgpu_userq_restore_worker(struct work_struct *work) mutex_lock(&uq_mgr->userq_mutex); - ret = amdgpu_userq_validate_bos(uq_mgr); + ret = amdgpu_userq_vm_validate(uq_mgr); if (ret) { drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); goto unlock; @@ -750,7 +851,7 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) /* Try to unmap all the queues in this process ctx */ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { - r = amdgpu_userq_unmap_helper(uq_mgr, queue); + r = amdgpu_userq_preempt_helper(uq_mgr, queue); if (r) ret = r; } @@ -876,7 +977,10 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev) cancel_delayed_work_sync(&uqm->resume_work); mutex_lock(&uqm->userq_mutex); idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - r = amdgpu_userq_unmap_helper(uqm, queue); + if (adev->in_s0ix) + r = amdgpu_userq_preempt_helper(uqm, queue); + else + r = amdgpu_userq_unmap_helper(uqm, queue); if (r) ret = r; } @@ -901,7 +1005,10 @@ int amdgpu_userq_resume(struct amdgpu_device *adev) list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { mutex_lock(&uqm->userq_mutex); idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - r = amdgpu_userq_map_helper(uqm, queue); + if (adev->in_s0ix) + r = amdgpu_userq_restore_helper(uqm, queue); + else + r = amdgpu_userq_map_helper(uqm, queue); if (r) ret = r; } @@ -935,7 +1042,7 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, if (((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && (queue->xcp_id == idx)) { - r = amdgpu_userq_unmap_helper(uqm, queue); + r = amdgpu_userq_preempt_helper(uqm, queue); if (r) ret = r; } @@ -969,7 +1076,7 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, if (((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && (queue->xcp_id == idx)) { - r = amdgpu_userq_map_helper(uqm, queue); + r = amdgpu_userq_restore_helper(uqm, queue); if (r) ret = r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h index 1bd84f4cce78..c027dd916672 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h @@ -120,8 +120,6 @@ void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr, void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_eviction_fence *ev_fence); -int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr); - void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_eviction_fence_mgr *evf_mgr); @@ -139,4 +137,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx); +int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, + u64 expected_size); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h new file mode 100644 index 000000000000..1e40ca3b1584 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef AMDGPU_UTILS_H_ +#define AMDGPU_UTILS_H_ + +/* ---------- Generic 2‑bit capability attribute encoding ---------- + * 00 INVALID, 01 RO, 10 WO, 11 RW + */ +enum amdgpu_cap_attr { + AMDGPU_CAP_ATTR_INVALID = 0, + AMDGPU_CAP_ATTR_RO = 1 << 0, + AMDGPU_CAP_ATTR_WO = 1 << 1, + AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO), +}; + +#define AMDGPU_CAP_ATTR_BITS 2 +#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1) + +/* Internal helper to build helpers for a given enum NAME */ +#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \ +enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \ +struct NAME##_caps { \ + DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \ +}; \ +static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \ +{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \ +static inline void NAME##_attr_init(struct NAME##_caps *c) \ +{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \ +static inline int NAME##_attr_set(struct NAME##_caps *c, \ + enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \ +{ \ + if (!c) \ + return -EINVAL; \ + if (cap >= NAME##_COUNT) \ + return -EINVAL; \ + if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \ + return -EINVAL; \ + bitmap_write(c->bmap, (unsigned long)attr, \ + NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \ + return 0; \ +} \ +static inline int NAME##_attr_get(const struct NAME##_caps *c, \ + enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \ +{ \ + unsigned long v; \ + if (!c || !out) \ + return -EINVAL; \ + if (cap >= NAME##_COUNT) \ + return -EINVAL; \ + v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \ + *out = (enum amdgpu_cap_attr)v; \ + return 0; \ +} \ +static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \ +{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \ +static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \ +{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \ +static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \ +{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; } + +/* Element expander for enum creation */ +#define _CAP_ENUM_ELEM(x) x, + +/* Public macro: declare enum + helpers from an X‑macro list */ +#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \ + enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \ + DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) + +#endif /* AMDGPU_UTILS_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 595f0df17bcc..5e0786ea911b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -257,12 +257,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i) return 0; } -int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) +void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) { int j; if (adev->vcn.harvest_config & (1 << i)) - return 0; + return; amdgpu_bo_free_kernel( &adev->vcn.inst[i].dpg_sram_bo, @@ -292,8 +292,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock); mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround); - - return 0; } bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) @@ -1159,7 +1157,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, { struct amdgpu_vcn_inst *vcn; void *log_buf; - volatile struct amdgpu_vcn_fwlog *plog; + struct amdgpu_vcn_fwlog *plog; unsigned int read_pos, write_pos, available, i, read_bytes = 0; unsigned int read_num[2] = {0}; @@ -1172,7 +1170,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; - plog = (volatile struct amdgpu_vcn_fwlog *)log_buf; + plog = (struct amdgpu_vcn_fwlog *)log_buf; read_pos = plog->rptr; write_pos = plog->wptr; @@ -1239,11 +1237,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i, void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn) { #if defined(CONFIG_DEBUG_FS) - volatile uint32_t *flag = vcn->fw_shared.cpu_addr; + uint32_t *flag = vcn->fw_shared.cpu_addr; void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; - volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; - volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr + struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; + struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr + vcn->fw_shared.log_offset; *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG); fw_log->is_enabled = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 6d9acd36041d..bebfc2b34afe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -516,7 +516,7 @@ enum vcn_ring_type { int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i); int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i); -int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i); +void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i); int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i); int amdgpu_vcn_resume(struct amdgpu_device *adev, int i); void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 13f0cdeb59c4..3328ab63376b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -828,11 +828,14 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev) { ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1); ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1); + ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1); ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs, RATELIMIT_MSG_ON_RELEASE); ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs, RATELIMIT_MSG_ON_RELEASE); + ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs, + RATELIMIT_MSG_ON_RELEASE); mutex_init(&adev->virt.ras.ras_telemetry_mutex); @@ -1501,3 +1504,55 @@ void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev) if (virt->ops && virt->ops->req_bad_pages) virt->ops->req_bad_pages(adev); } + +static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev, + struct amdsriov_ras_telemetry *host_telemetry, + bool *hit) +{ + struct amd_sriov_ras_chk_criti *tmp = NULL; + uint32_t checksum, used_size; + + checksum = host_telemetry->header.checksum; + used_size = host_telemetry->header.used_size; + + if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + return 0; + + tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0)) + goto out; + + if (hit) + *hit = tmp->hit ? true : false; + +out: + kfree(tmp); + + return 0; +} + +int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit) +{ + struct amdgpu_virt *virt = &adev->virt; + int r = -EPERM; + + if (!virt->ops || !virt->ops->req_ras_chk_criti) + return -EOPNOTSUPP; + + /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host + * will ignore incoming guest messages. Ratelimit the guest messages to + * prevent guest self DOS. + */ + if (__ratelimit(&virt->ras.ras_chk_criti_rs)) { + mutex_lock(&virt->ras.ras_telemetry_mutex); + if (!virt->ops->req_ras_chk_criti(adev, addr)) + r = amdgpu_virt_cache_chk_criti_hit( + adev, virt->fw_reserve.ras_telemetry, hit); + mutex_unlock(&virt->ras.ras_telemetry_mutex); + } + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 58accf2259b3..d1172c8e58c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -98,6 +98,7 @@ struct amdgpu_virt_ops { int (*req_ras_err_count)(struct amdgpu_device *adev); int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr); int (*req_bad_pages)(struct amdgpu_device *adev); + int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr); }; /* @@ -252,10 +253,15 @@ struct amdgpu_virt_ras_err_handler_data { struct amdgpu_virt_ras { struct ratelimit_state ras_error_cnt_rs; struct ratelimit_state ras_cper_dump_rs; + struct ratelimit_state ras_chk_criti_rs; struct mutex ras_telemetry_mutex; uint64_t cper_rptr; }; +#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT) + +DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST); + /* GPU virtualization */ struct amdgpu_virt { uint32_t caps; @@ -274,6 +280,7 @@ struct amdgpu_virt { const struct amdgpu_virt_ops *ops; struct amdgpu_vf_error_buffer vf_errors; struct amdgpu_virt_fw_reserve fw_reserve; + struct amdgpu_virt_caps virt_caps; uint32_t gim_feature; uint32_t reg_access_mode; int req_init_data_ver; @@ -448,4 +455,5 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev); bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev, enum amdgpu_ras_block block); void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev); +int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 155bb9891a17..79bad9cbe2ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -14,7 +14,6 @@ #include "dce_v8_0.h" #endif #include "dce_v10_0.h" -#include "dce_v11_0.h" #include "ivsrcid/ivsrcid_vislands30.h" #include "amdgpu_vkms.h" #include "amdgpu_display.h" @@ -581,13 +580,6 @@ static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block) case CHIP_TONGA: dce_v10_0_disable_dce(adev); break; - case CHIP_CARRIZO: - case CHIP_STONEY: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_VEGAM: - dce_v11_0_disable_dce(adev); - break; case CHIP_TOPAZ: #ifdef CONFIG_DRM_AMDGPU_SI case CHIP_HAINAN: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index dbda3a38a2b0..112ce584a5ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -128,6 +128,17 @@ struct amdgpu_vm_tlb_seq_struct { }; /** + * amdgpu_vm_assert_locked - check if VM is correctly locked + * @vm: the VM which schould be tested + * + * Asserts that the VM root PD is locked. + */ +static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm) +{ + dma_resv_assert_held(vm->root.bo->tbo.base.resv); +} + +/** * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping * * @adev: amdgpu_device pointer @@ -143,6 +154,8 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, { int r; + amdgpu_vm_assert_locked(vm); + if (vm->pasid == pasid) return 0; @@ -181,12 +194,11 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) struct amdgpu_bo *bo = vm_bo->bo; vm_bo->moved = true; - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm); if (bo->tbo.type == ttm_bo_type_kernel) list_move(&vm_bo->vm_status, &vm->evicted); else list_move_tail(&vm_bo->vm_status, &vm->evicted); - spin_unlock(&vm_bo->vm->status_lock); } /** * amdgpu_vm_bo_moved - vm_bo is moved @@ -198,9 +210,8 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) { - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm_bo->vm); list_move(&vm_bo->vm_status, &vm_bo->vm->moved); - spin_unlock(&vm_bo->vm->status_lock); } /** @@ -213,9 +224,8 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) { - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm_bo->vm); list_move(&vm_bo->vm_status, &vm_bo->vm->idle); - spin_unlock(&vm_bo->vm->status_lock); vm_bo->moved = false; } @@ -229,9 +239,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) { - spin_lock(&vm_bo->vm->status_lock); + spin_lock(&vm_bo->vm->invalidated_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); - spin_unlock(&vm_bo->vm->status_lock); + spin_unlock(&vm_bo->vm->invalidated_lock); } /** @@ -244,10 +254,9 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo) { + amdgpu_vm_assert_locked(vm_bo->vm); vm_bo->moved = true; - spin_lock(&vm_bo->vm->status_lock); list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); - spin_unlock(&vm_bo->vm->status_lock); } /** @@ -260,13 +269,11 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) { - if (vm_bo->bo->parent) { - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm_bo->vm); + if (vm_bo->bo->parent) list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); - spin_unlock(&vm_bo->vm->status_lock); - } else { + else amdgpu_vm_bo_idle(vm_bo); - } } /** @@ -279,9 +286,8 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) */ static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) { - spin_lock(&vm_bo->vm->status_lock); + amdgpu_vm_assert_locked(vm_bo->vm); list_move(&vm_bo->vm_status, &vm_bo->vm->done); - spin_unlock(&vm_bo->vm->status_lock); } /** @@ -295,10 +301,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) { struct amdgpu_vm_bo_base *vm_bo, *tmp; - spin_lock(&vm->status_lock); + spin_lock(&vm->invalidated_lock); list_splice_init(&vm->done, &vm->invalidated); list_for_each_entry(vm_bo, &vm->invalidated, vm_status) vm_bo->moved = true; + spin_unlock(&vm->invalidated_lock); + + amdgpu_vm_assert_locked(vm_bo->vm); list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { struct amdgpu_bo *bo = vm_bo->bo; @@ -308,14 +317,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) else if (bo->parent) list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); } - spin_unlock(&vm->status_lock); } /** * amdgpu_vm_update_shared - helper to update shared memory stat * @base: base structure for tracking BO usage in a VM * - * Takes the vm status_lock and updates the shared memory stat. If the basic + * Takes the vm stats_lock and updates the shared memory stat. If the basic * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called * as well. */ @@ -327,7 +335,8 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base) uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo); bool shared; - spin_lock(&vm->status_lock); + dma_resv_assert_held(bo->tbo.base.resv); + spin_lock(&vm->stats_lock); shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); if (base->shared != shared) { base->shared = shared; @@ -339,7 +348,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base) vm->stats[bo_memtype].drm.private += size; } } - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); } /** @@ -364,11 +373,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo) * be bo->tbo.resource * @sign: if we should add (+1) or subtract (-1) from the stat * - * Caller need to have the vm status_lock held. Useful for when multiple update + * Caller need to have the vm stats_lock held. Useful for when multiple update * need to happen at the same time. */ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base, - struct ttm_resource *res, int sign) + struct ttm_resource *res, int sign) { struct amdgpu_vm *vm = base->vm; struct amdgpu_bo *bo = base->bo; @@ -392,7 +401,8 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base, */ if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) vm->stats[res_memtype].drm.purgeable += size; - if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype))) + if (!(bo->preferred_domains & + amdgpu_mem_type_to_domain(res_memtype))) vm->stats[bo_memtype].evicted += size; } } @@ -411,9 +421,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base, { struct amdgpu_vm *vm = base->vm; - spin_lock(&vm->status_lock); + spin_lock(&vm->stats_lock); amdgpu_vm_update_stats_locked(base, res, sign); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); } /** @@ -439,10 +449,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, base->next = bo->vm_bo; bo->vm_bo = base; - spin_lock(&vm->status_lock); + spin_lock(&vm->stats_lock); base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); if (!amdgpu_vm_is_bo_always_valid(vm, bo)) return; @@ -485,6 +495,42 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, } /** + * amdgpu_vm_lock_done_list - lock all BOs on the done list + * @vm: vm providing the BOs + * @exec: drm execution context + * @num_fences: number of extra fences to reserve + * + * Lock the BOs on the done list in the DRM execution context. + */ +int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, + unsigned int num_fences) +{ + struct list_head *prev = &vm->done; + struct amdgpu_bo_va *bo_va; + struct amdgpu_bo *bo; + int ret; + + /* We can only trust prev->next while holding the lock */ + spin_lock(&vm->invalidated_lock); + while (!list_is_head(prev->next, &vm->done)) { + bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status); + spin_unlock(&vm->invalidated_lock); + + bo = bo_va->base.bo; + if (bo) { + ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1); + if (unlikely(ret)) + return ret; + } + spin_lock(&vm->invalidated_lock); + prev = prev->next; + } + spin_unlock(&vm->invalidated_lock); + + return 0; +} + +/** * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU * * @adev: amdgpu device pointer @@ -575,7 +621,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); - struct amdgpu_vm_bo_base *bo_base; + struct amdgpu_vm_bo_base *bo_base, *tmp; struct amdgpu_bo *bo; int r; @@ -588,13 +634,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - spin_lock(&vm->status_lock); - while (!list_empty(&vm->evicted)) { - bo_base = list_first_entry(&vm->evicted, - struct amdgpu_vm_bo_base, - vm_status); - spin_unlock(&vm->status_lock); - + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { bo = bo_base->bo; r = validate(param, bo); @@ -607,37 +647,21 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); amdgpu_vm_bo_relocated(bo_base); } - spin_lock(&vm->status_lock); } - while (ticket && !list_empty(&vm->evicted_user)) { - bo_base = list_first_entry(&vm->evicted_user, - struct amdgpu_vm_bo_base, - vm_status); - spin_unlock(&vm->status_lock); - - bo = bo_base->bo; - if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) { - struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm); + if (ticket) { + list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user, + vm_status) { + bo = bo_base->bo; + dma_resv_assert_held(bo->tbo.base.resv); - pr_warn_ratelimited("Evicted user BO is not reserved\n"); - if (ti) { - pr_warn_ratelimited("pid %d\n", ti->task.pid); - amdgpu_vm_put_task_info(ti); - } + r = validate(param, bo); + if (r) + return r; - return -EINVAL; + amdgpu_vm_bo_invalidated(bo_base); } - - r = validate(param, bo); - if (r) - return r; - - amdgpu_vm_bo_invalidated(bo_base); - - spin_lock(&vm->status_lock); } - spin_unlock(&vm->status_lock); amdgpu_vm_eviction_lock(vm); vm->evicting = false; @@ -660,13 +684,13 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { bool ret; + amdgpu_vm_assert_locked(vm); + amdgpu_vm_eviction_lock(vm); ret = !vm->evicting; amdgpu_vm_eviction_unlock(vm); - spin_lock(&vm->status_lock); ret &= list_empty(&vm->evicted); - spin_unlock(&vm->status_lock); spin_lock(&vm->immediate.lock); ret &= !vm->immediate.stopped; @@ -957,16 +981,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate) { struct amdgpu_vm_update_params params; - struct amdgpu_vm_bo_base *entry; + struct amdgpu_vm_bo_base *entry, *tmp; bool flush_tlb_needed = false; - LIST_HEAD(relocated); int r, idx; - spin_lock(&vm->status_lock); - list_splice_init(&vm->relocated, &relocated); - spin_unlock(&vm->status_lock); + amdgpu_vm_assert_locked(vm); - if (list_empty(&relocated)) + if (list_empty(&vm->relocated)) return 0; if (!drm_dev_enter(adev_to_drm(adev), &idx)) @@ -982,7 +1003,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, if (r) goto error; - list_for_each_entry(entry, &relocated, vm_status) { + list_for_each_entry(entry, &vm->relocated, vm_status) { /* vm_flush_needed after updating moved PDEs */ flush_tlb_needed |= entry->moved; @@ -998,9 +1019,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, if (flush_tlb_needed) atomic64_inc(&vm->tlb_seq); - while (!list_empty(&relocated)) { - entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base, - vm_status); + list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) { amdgpu_vm_bo_idle(entry); } @@ -1227,9 +1246,9 @@ error_free: void amdgpu_vm_get_memory(struct amdgpu_vm *vm, struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]) { - spin_lock(&vm->status_lock); + spin_lock(&vm->stats_lock); memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); } /** @@ -1596,29 +1615,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) { - struct amdgpu_bo_va *bo_va; + struct amdgpu_bo_va *bo_va, *tmp; struct dma_resv *resv; bool clear, unlock; int r; - spin_lock(&vm->status_lock); - while (!list_empty(&vm->moved)) { - bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, - base.vm_status); - spin_unlock(&vm->status_lock); - + list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { /* Per VM BOs never need to bo cleared in the page tables */ r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; - spin_lock(&vm->status_lock); } + spin_lock(&vm->invalidated_lock); while (!list_empty(&vm->invalidated)) { bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, base.vm_status); resv = bo_va->base.bo->tbo.base.resv; - spin_unlock(&vm->status_lock); + spin_unlock(&vm->invalidated_lock); /* Try to reserve the BO to avoid clearing its ptes */ if (!adev->debug_vm && dma_resv_trylock(resv)) { @@ -1650,9 +1664,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM)) amdgpu_vm_bo_evicted_user(&bo_va->base); - spin_lock(&vm->status_lock); + spin_lock(&vm->invalidated_lock); } - spin_unlock(&vm->status_lock); + spin_unlock(&vm->invalidated_lock); return 0; } @@ -2181,9 +2195,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev, } } - spin_lock(&vm->status_lock); + spin_lock(&vm->invalidated_lock); list_del(&bo_va->base.vm_status); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->invalidated_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); @@ -2291,10 +2305,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem, for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - spin_lock(&vm->status_lock); + spin_lock(&vm->stats_lock); amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1); amdgpu_vm_update_stats_locked(bo_base, new_mem, +1); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->stats_lock); } amdgpu_vm_bo_invalidate(bo, evicted); @@ -2561,11 +2575,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, INIT_LIST_HEAD(&vm->relocated); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->idle); + spin_lock_init(&vm->invalidated_lock); INIT_LIST_HEAD(&vm->invalidated); - spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->freed); INIT_LIST_HEAD(&vm->done); INIT_KFIFO(vm->faults); + spin_lock_init(&vm->stats_lock); r = amdgpu_vm_init_entities(adev, vm); if (r) @@ -3030,7 +3045,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) unsigned int total_done_objs = 0; unsigned int id = 0; - spin_lock(&vm->status_lock); + amdgpu_vm_assert_locked(vm); + seq_puts(m, "\tIdle BOs:\n"); list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { if (!bo_va->base.bo) @@ -3068,11 +3084,13 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) id = 0; seq_puts(m, "\tInvalidated BOs:\n"); + spin_lock(&vm->invalidated_lock); list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { if (!bo_va->base.bo) continue; total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); } + spin_unlock(&vm->invalidated_lock); total_invalidated_objs = id; id = 0; @@ -3082,7 +3100,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) continue; total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); } - spin_unlock(&vm->status_lock); total_done_objs = id; seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67eaf5402e7e..988e970d9e96 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -203,11 +203,11 @@ struct amdgpu_vm_bo_base { /* protected by bo being reserved */ struct amdgpu_vm_bo_base *next; - /* protected by vm status_lock */ + /* protected by vm reservation and invalidated_lock */ struct list_head vm_status; /* if the bo is counted as shared in mem stats - * protected by vm status_lock */ + * protected by vm BO being reserved */ bool shared; /* protected by the BO being reserved */ @@ -343,18 +343,22 @@ struct amdgpu_vm { bool evicting; unsigned int saved_flags; - /* Lock to protect vm_bo add/del/move on all lists of vm */ - spinlock_t status_lock; - - /* Memory statistics for this vm, protected by status_lock */ + /* Memory statistics for this vm, protected by stats_lock */ + spinlock_t stats_lock; struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]; + /* + * The following lists contain amdgpu_vm_bo_base objects for either + * PDs, PTs or per VM BOs. The state transits are: + * + * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle + * + * Lists are protected by the root PD dma_resv lock. + */ + /* Per-VM and PT BOs who needs a validation */ struct list_head evicted; - /* BOs for user mode queues that need a validation */ - struct list_head evicted_user; - /* PT BOs which relocated and their parent need an update */ struct list_head relocated; @@ -364,15 +368,32 @@ struct amdgpu_vm { /* All BOs of this VM not currently in the state machine */ struct list_head idle; + /* + * The following lists contain amdgpu_vm_bo_base objects for BOs which + * have their own dma_resv object and not depend on the root PD. Their + * state transits are: + * + * evicted_user or invalidated -> done + * + * Lists are protected by the invalidated_lock. + */ + spinlock_t invalidated_lock; + + /* BOs for user mode queues that need a validation */ + struct list_head evicted_user; + /* regular invalidated BOs, but not yet updated in the PT */ struct list_head invalidated; - /* BO mappings freed, but not yet updated in the PT */ - struct list_head freed; - /* BOs which are invalidated, has been updated in the PTs */ struct list_head done; + /* + * This list contains amdgpu_bo_va_mapping objects which have been freed + * but not updated in the PTs + */ + struct list_head freed; + /* contains the page directory */ struct amdgpu_vm_bo_base root; struct dma_fence *last_update; @@ -491,6 +512,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, unsigned int num_fences); +int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, + unsigned int num_fences); bool amdgpu_vm_ready(struct amdgpu_vm *vm); uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index f794fb1cc06e..7a4c12ff9b18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -543,9 +543,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry) entry->bo->vm_bo = NULL; ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); - spin_lock(&entry->vm->status_lock); list_del(&entry->vm_status); - spin_unlock(&entry->vm->status_lock); amdgpu_bo_unref(&entry->bo); } @@ -589,7 +587,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params, struct amdgpu_vm_pt_cursor seek; struct amdgpu_vm_bo_base *entry; - spin_lock(¶ms->vm->status_lock); for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) { if (entry && entry->bo) list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist); @@ -597,7 +594,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params, /* enter start node now */ list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist); - spin_unlock(¶ms->vm->status_lock); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index e69db0a93378..a5adb2ed9b3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -425,45 +425,6 @@ out: return ret; } -static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man, - struct drm_printer *printer) -{ - DRM_DEBUG_DRIVER("Dummy vram mgr debug\n"); -} - -static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man, - struct ttm_resource *res, - const struct ttm_place *place, - size_t size) -{ - DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n"); - return false; -} - -static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man, - struct ttm_resource *res, - const struct ttm_place *place, - size_t size) -{ - DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n"); - return true; -} - -static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man, - struct ttm_resource *res) -{ - DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n"); -} - -static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_resource **res) -{ - DRM_DEBUG_DRIVER("Dummy vram mgr new\n"); - return -ENOSPC; -} - /** * amdgpu_vram_mgr_new - allocate new ranges * @@ -932,14 +893,6 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, mutex_unlock(&mgr->lock); } -static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = { - .alloc = amdgpu_dummy_vram_mgr_new, - .free = amdgpu_dummy_vram_mgr_del, - .intersects = amdgpu_dummy_vram_mgr_intersects, - .compatible = amdgpu_dummy_vram_mgr_compatible, - .debug = amdgpu_dummy_vram_mgr_debug -}; - static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { .alloc = amdgpu_vram_mgr_new, .free = amdgpu_vram_mgr_del, @@ -973,16 +926,10 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) INIT_LIST_HEAD(&mgr->allocated_vres_list); mgr->default_page_size = PAGE_SIZE; - if (!adev->gmc.is_app_apu) { - man->func = &amdgpu_vram_mgr_func; - - err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); - if (err) - return err; - } else { - man->func = &amdgpu_dummy_vram_mgr_func; - DRM_INFO("Setup dummy vram mgr\n"); - } + man->func = &amdgpu_vram_mgr_func; + err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); + if (err) + return err; ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); ttm_resource_manager_set_used(man, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index bba0b26fee8f..5f36aff17e79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -126,4 +126,8 @@ uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev); void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev, uint16_t max_speed, uint8_t max_width); + +/* Cleanup macro for use with __free(xgmi_put_hive) */ +DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T)) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 33edad1f9dcd..3a79ed7d8031 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -405,12 +405,17 @@ struct amd_sriov_ras_cper_dump { uint32_t buf[]; }; +struct amd_sriov_ras_chk_criti { + uint32_t hit; +}; + struct amdsriov_ras_telemetry { struct amd_sriov_ras_telemetry_header header; union { struct amd_sriov_ras_telemetry_error_count error_count; struct amd_sriov_ras_cper_dump cper_dump; + struct amd_sriov_ras_chk_criti chk_criti; } body; }; diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 1c994d0cc50b..be5d67c2c7a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1246,6 +1246,10 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, ectx.last_jump_jiffies = 0; if (ws) { ectx.ws = kcalloc(4, ws, GFP_KERNEL); + if (!ectx.ws) { + ret = -ENOMEM; + goto free; + } ectx.ws_size = ws; } else { ectx.ws = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c deleted file mode 100644 index e84608891300..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ /dev/null @@ -1,3817 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <drm/drm_edid.h> -#include <drm/drm_fourcc.h> -#include <drm/drm_modeset_helper.h> -#include <drm/drm_modeset_helper_vtables.h> -#include <drm/drm_vblank.h> - -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "amdgpu_i2c.h" -#include "vid.h" -#include "atom.h" -#include "amdgpu_atombios.h" -#include "atombios_crtc.h" -#include "atombios_encoders.h" -#include "amdgpu_pll.h" -#include "amdgpu_connectors.h" -#include "amdgpu_display.h" -#include "dce_v11_0.h" - -#include "dce/dce_11_0_d.h" -#include "dce/dce_11_0_sh_mask.h" -#include "dce/dce_11_0_enum.h" -#include "oss/oss_3_0_d.h" -#include "oss/oss_3_0_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "gmc/gmc_8_1_sh_mask.h" - -#include "ivsrcid/ivsrcid_vislands30.h" - -static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev); -static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev); -static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd); - -static const u32 crtc_offsets[] = -{ - CRTC0_REGISTER_OFFSET, - CRTC1_REGISTER_OFFSET, - CRTC2_REGISTER_OFFSET, - CRTC3_REGISTER_OFFSET, - CRTC4_REGISTER_OFFSET, - CRTC5_REGISTER_OFFSET, - CRTC6_REGISTER_OFFSET -}; - -static const u32 hpd_offsets[] = -{ - HPD0_REGISTER_OFFSET, - HPD1_REGISTER_OFFSET, - HPD2_REGISTER_OFFSET, - HPD3_REGISTER_OFFSET, - HPD4_REGISTER_OFFSET, - HPD5_REGISTER_OFFSET -}; - -static const uint32_t dig_offsets[] = { - DIG0_REGISTER_OFFSET, - DIG1_REGISTER_OFFSET, - DIG2_REGISTER_OFFSET, - DIG3_REGISTER_OFFSET, - DIG4_REGISTER_OFFSET, - DIG5_REGISTER_OFFSET, - DIG6_REGISTER_OFFSET, - DIG7_REGISTER_OFFSET, - DIG8_REGISTER_OFFSET -}; - -static const struct { - uint32_t reg; - uint32_t vblank; - uint32_t vline; - uint32_t hpd; - -} interrupt_status_offsets[] = { { - .reg = mmDISP_INTERRUPT_STATUS, - .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK -}, { - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, - .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, - .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, - .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK -} }; - -static const u32 cz_golden_settings_a11[] = -{ - mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, - mmFBC_MISC, 0x1f311fff, 0x14300000, -}; - -static const u32 cz_mgcg_cgcg_init[] = -{ - mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, - mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, -}; - -static const u32 stoney_golden_settings_a11[] = -{ - mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, - mmFBC_MISC, 0x1f311fff, 0x14302000, -}; - -static const u32 polaris11_golden_settings_a11[] = -{ - mmDCI_CLK_CNTL, 0x00000080, 0x00000000, - mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, - mmFBC_DEBUG1, 0xffffffff, 0x00000008, - mmFBC_MISC, 0x9f313fff, 0x14302008, - mmHDMI_CONTROL, 0x313f031f, 0x00000011, -}; - -static const u32 polaris10_golden_settings_a11[] = -{ - mmDCI_CLK_CNTL, 0x00000080, 0x00000000, - mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, - mmFBC_MISC, 0x9f313fff, 0x14302008, - mmHDMI_CONTROL, 0x313f031f, 0x00000011, -}; - -static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) -{ - switch (adev->asic_type) { - case CHIP_CARRIZO: - amdgpu_device_program_register_sequence(adev, - cz_mgcg_cgcg_init, - ARRAY_SIZE(cz_mgcg_cgcg_init)); - amdgpu_device_program_register_sequence(adev, - cz_golden_settings_a11, - ARRAY_SIZE(cz_golden_settings_a11)); - break; - case CHIP_STONEY: - amdgpu_device_program_register_sequence(adev, - stoney_golden_settings_a11, - ARRAY_SIZE(stoney_golden_settings_a11)); - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - amdgpu_device_program_register_sequence(adev, - polaris11_golden_settings_a11, - ARRAY_SIZE(polaris11_golden_settings_a11)); - break; - case CHIP_POLARIS10: - case CHIP_VEGAM: - amdgpu_device_program_register_sequence(adev, - polaris10_golden_settings_a11, - ARRAY_SIZE(polaris10_golden_settings_a11)); - break; - default: - break; - } -} - -static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev, - u32 block_offset, u32 reg) -{ - unsigned long flags; - u32 r; - - spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); - WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); - r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); - spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); - - return r; -} - -static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev, - u32 block_offset, u32 reg, u32 v) -{ - unsigned long flags; - - spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); - WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); - WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); - spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); -} - -static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) -{ - if (crtc < 0 || crtc >= adev->mode_info.num_crtc) - return 0; - else - return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); -} - -static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev) -{ - unsigned i; - - /* Enable pflip interrupts */ - for (i = 0; i < adev->mode_info.num_crtc; i++) - amdgpu_irq_get(adev, &adev->pageflip_irq, i); -} - -static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) -{ - unsigned i; - - /* Disable pflip interrupts */ - for (i = 0; i < adev->mode_info.num_crtc; i++) - amdgpu_irq_put(adev, &adev->pageflip_irq, i); -} - -/** - * dce_v11_0_page_flip - pageflip callback. - * - * @adev: amdgpu_device pointer - * @crtc_id: crtc to cleanup pageflip on - * @crtc_base: new address of the crtc (GPU MC address) - * @async: asynchronous flip - * - * Triggers the actual pageflip by updating the primary - * surface base address. - */ -static void dce_v11_0_page_flip(struct amdgpu_device *adev, - int crtc_id, u64 crtc_base, bool async) -{ - struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; - struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb; - u32 tmp; - - /* flip immediate for async, default is vsync */ - tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, - GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0); - WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); - /* update pitch */ - WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, - fb->pitches[0] / fb->format->cpp[0]); - /* update the scanout addresses */ - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, - upper_32_bits(crtc_base)); - /* writing to the low address triggers the update */ - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, - lower_32_bits(crtc_base)); - /* post the write */ - RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); -} - -static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, - u32 *vbl, u32 *position) -{ - if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) - return -EINVAL; - - *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); - *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); - - return 0; -} - -/** - * dce_v11_0_hpd_sense - hpd sense callback. - * - * @adev: amdgpu_device pointer - * @hpd: hpd (hotplug detect) pin - * - * Checks if a digital monitor is connected (evergreen+). - * Returns true if connected, false if not connected. - */ -static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, - enum amdgpu_hpd_id hpd) -{ - bool connected = false; - - if (hpd >= adev->mode_info.num_hpd) - return connected; - - if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) & - DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) - connected = true; - - return connected; -} - -/** - * dce_v11_0_hpd_set_polarity - hpd set polarity callback. - * - * @adev: amdgpu_device pointer - * @hpd: hpd (hotplug detect) pin - * - * Set the polarity of the hpd pin (evergreen+). - */ -static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, - enum amdgpu_hpd_id hpd) -{ - u32 tmp; - bool connected = dce_v11_0_hpd_sense(adev, hpd); - - if (hpd >= adev->mode_info.num_hpd) - return; - - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); - if (connected) - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); - else - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); -} - -/** - * dce_v11_0_hpd_init - hpd setup callback. - * - * @adev: amdgpu_device pointer - * - * Setup the hpd pins used by the card (evergreen+). - * Enable the pin, set the polarity, and enable the hpd interrupts. - */ -static void dce_v11_0_hpd_init(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_connector *connector; - struct drm_connector_list_iter iter; - u32 tmp; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - - if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) - continue; - - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { - /* don't try to enable hpd on eDP or LVDS avoid breaking the - * aux dp channel on imac and help (but not completely fix) - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 - * also avoid interrupt storms during dpms. - */ - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); - continue; - } - - tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); - WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); - - tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, - DC_HPD_CONNECT_INT_DELAY, - AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); - tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, - DC_HPD_DISCONNECT_INT_DELAY, - AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); - WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); - - dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd); - dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); - amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); - } - drm_connector_list_iter_end(&iter); -} - -/** - * dce_v11_0_hpd_fini - hpd tear down callback. - * - * @adev: amdgpu_device pointer - * - * Tear down the hpd pins used by the card (evergreen+). - * Disable the hpd interrupts. - */ -static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_connector *connector; - struct drm_connector_list_iter iter; - u32 tmp; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - - if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) - continue; - - tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); - WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); - - amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); - } - drm_connector_list_iter_end(&iter); -} - -static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev) -{ - return mmDC_GPIO_HPD_A; -} - -static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) -{ - u32 crtc_hung = 0; - u32 crtc_status[6]; - u32 i, j, tmp; - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { - crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); - crtc_hung |= (1 << i); - } - } - - for (j = 0; j < 10; j++) { - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (crtc_hung & (1 << i)) { - tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); - if (tmp != crtc_status[i]) - crtc_hung &= ~(1 << i); - } - } - if (crtc_hung == 0) - return false; - udelay(100); - } - - return true; -} - -static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, - bool render) -{ - u32 tmp; - - /* Lockout access through VGA aperture*/ - tmp = RREG32(mmVGA_HDP_CONTROL); - if (render) - tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); - else - tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); - WREG32(mmVGA_HDP_CONTROL, tmp); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - if (render) - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); - else - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); -} - -static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) -{ - int num_crtc = 0; - - switch (adev->asic_type) { - case CHIP_CARRIZO: - num_crtc = 3; - break; - case CHIP_STONEY: - num_crtc = 2; - break; - case CHIP_POLARIS10: - case CHIP_VEGAM: - num_crtc = 6; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - num_crtc = 5; - break; - default: - num_crtc = 0; - } - return num_crtc; -} - -void dce_v11_0_disable_dce(struct amdgpu_device *adev) -{ - /*Disable VGA render and enabled crtc, if has DCE engine*/ - if (amdgpu_atombios_has_dce_engine_info(adev)) { - u32 tmp; - int crtc_enabled, i; - - dce_v11_0_set_vga_render_state(adev, false); - - /*Disable crtc*/ - for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - } - } -} - -static void dce_v11_0_program_fmt(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); - struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); - int bpc = 0; - u32 tmp = 0; - enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; - - if (connector) { - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - bpc = amdgpu_connector_get_monitor_bpc(connector); - dither = amdgpu_connector->dither; - } - - /* LVDS/eDP FMT is set up by atom */ - if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) - return; - - /* not needed for analog */ - if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || - (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) - return; - - if (bpc == 0) - return; - - switch (bpc) { - case 6: - if (dither == AMDGPU_FMT_DITHER_ENABLE) { - /* XXX sort out optimal dither settings */ - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); - } else { - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); - } - break; - case 8: - if (dither == AMDGPU_FMT_DITHER_ENABLE) { - /* XXX sort out optimal dither settings */ - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); - } else { - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); - } - break; - case 10: - if (dither == AMDGPU_FMT_DITHER_ENABLE) { - /* XXX sort out optimal dither settings */ - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); - } else { - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); - } - break; - default: - /* not needed */ - break; - } - - WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); -} - - -/* display watermark setup */ -/** - * dce_v11_0_line_buffer_adjust - Set up the line buffer - * - * @adev: amdgpu_device pointer - * @amdgpu_crtc: the selected display controller - * @mode: the current display mode on the selected display - * controller - * - * Setup up the line buffer allocation for - * the selected display controller (CIK). - * Returns the line buffer size in pixels. - */ -static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev, - struct amdgpu_crtc *amdgpu_crtc, - struct drm_display_mode *mode) -{ - u32 tmp, buffer_alloc, i, mem_cfg; - u32 pipe_offset = amdgpu_crtc->crtc_id; - /* - * Line Buffer Setup - * There are 6 line buffers, one for each display controllers. - * There are 3 partitions per LB. Select the number of partitions - * to enable based on the display width. For display widths larger - * than 4096, you need use to use 2 display controllers and combine - * them using the stereo blender. - */ - if (amdgpu_crtc->base.enabled && mode) { - if (mode->crtc_hdisplay < 1920) { - mem_cfg = 1; - buffer_alloc = 2; - } else if (mode->crtc_hdisplay < 2560) { - mem_cfg = 2; - buffer_alloc = 2; - } else if (mode->crtc_hdisplay < 4096) { - mem_cfg = 0; - buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; - } else { - DRM_DEBUG_KMS("Mode too big for LB!\n"); - mem_cfg = 0; - buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; - } - } else { - mem_cfg = 1; - buffer_alloc = 0; - } - - tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); - WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); - tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); - WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); - if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) - break; - udelay(1); - } - - if (amdgpu_crtc->base.enabled && mode) { - switch (mem_cfg) { - case 0: - default: - return 4096 * 2; - case 1: - return 1920 * 2; - case 2: - return 2560 * 2; - } - } - - /* controller not enabled, so no lb used */ - return 0; -} - -/** - * cik_get_number_of_dram_channels - get the number of dram channels - * - * @adev: amdgpu_device pointer - * - * Look up the number of video ram channels (CIK). - * Used for display watermark bandwidth calculations - * Returns the number of dram channels - */ -static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) -{ - u32 tmp = RREG32(mmMC_SHARED_CHMAP); - - switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { - case 0: - default: - return 1; - case 1: - return 2; - case 2: - return 4; - case 3: - return 8; - case 4: - return 3; - case 5: - return 6; - case 6: - return 10; - case 7: - return 12; - case 8: - return 16; - } -} - -struct dce10_wm_params { - u32 dram_channels; /* number of dram channels */ - u32 yclk; /* bandwidth per dram data pin in kHz */ - u32 sclk; /* engine clock in kHz */ - u32 disp_clk; /* display clock in kHz */ - u32 src_width; /* viewport width */ - u32 active_time; /* active display time in ns */ - u32 blank_time; /* blank time in ns */ - bool interlaced; /* mode is interlaced */ - fixed20_12 vsc; /* vertical scale ratio */ - u32 num_heads; /* number of active crtcs */ - u32 bytes_per_pixel; /* bytes per pixel display + overlay */ - u32 lb_size; /* line buffer allocated to pipe */ - u32 vtaps; /* vertical scaler taps */ -}; - -/** - * dce_v11_0_dram_bandwidth - get the dram bandwidth - * - * @wm: watermark calculation data - * - * Calculate the raw dram bandwidth (CIK). - * Used for display watermark bandwidth calculations - * Returns the dram bandwidth in MBytes/s - */ -static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate raw DRAM Bandwidth */ - fixed20_12 dram_efficiency; /* 0.7 */ - fixed20_12 yclk, dram_channels, bandwidth; - fixed20_12 a; - - a.full = dfixed_const(1000); - yclk.full = dfixed_const(wm->yclk); - yclk.full = dfixed_div(yclk, a); - dram_channels.full = dfixed_const(wm->dram_channels * 4); - a.full = dfixed_const(10); - dram_efficiency.full = dfixed_const(7); - dram_efficiency.full = dfixed_div(dram_efficiency, a); - bandwidth.full = dfixed_mul(dram_channels, yclk); - bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display - * - * @wm: watermark calculation data - * - * Calculate the dram bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the dram bandwidth for display in MBytes/s - */ -static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) -{ - /* Calculate DRAM Bandwidth and the part allocated to display. */ - fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ - fixed20_12 yclk, dram_channels, bandwidth; - fixed20_12 a; - - a.full = dfixed_const(1000); - yclk.full = dfixed_const(wm->yclk); - yclk.full = dfixed_div(yclk, a); - dram_channels.full = dfixed_const(wm->dram_channels * 4); - a.full = dfixed_const(10); - disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ - disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); - bandwidth.full = dfixed_mul(dram_channels, yclk); - bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_data_return_bandwidth - get the data return bandwidth - * - * @wm: watermark calculation data - * - * Calculate the data return bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the data return bandwidth in MBytes/s - */ -static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate the display Data return Bandwidth */ - fixed20_12 return_efficiency; /* 0.8 */ - fixed20_12 sclk, bandwidth; - fixed20_12 a; - - a.full = dfixed_const(1000); - sclk.full = dfixed_const(wm->sclk); - sclk.full = dfixed_div(sclk, a); - a.full = dfixed_const(10); - return_efficiency.full = dfixed_const(8); - return_efficiency.full = dfixed_div(return_efficiency, a); - a.full = dfixed_const(32); - bandwidth.full = dfixed_mul(a, sclk); - bandwidth.full = dfixed_mul(bandwidth, return_efficiency); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth - * - * @wm: watermark calculation data - * - * Calculate the dmif bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the dmif bandwidth in MBytes/s - */ -static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate the DMIF Request Bandwidth */ - fixed20_12 disp_clk_request_efficiency; /* 0.8 */ - fixed20_12 disp_clk, bandwidth; - fixed20_12 a, b; - - a.full = dfixed_const(1000); - disp_clk.full = dfixed_const(wm->disp_clk); - disp_clk.full = dfixed_div(disp_clk, a); - a.full = dfixed_const(32); - b.full = dfixed_mul(a, disp_clk); - - a.full = dfixed_const(10); - disp_clk_request_efficiency.full = dfixed_const(8); - disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); - - bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_available_bandwidth - get the min available bandwidth - * - * @wm: watermark calculation data - * - * Calculate the min available bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the min available bandwidth in MBytes/s - */ -static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ - u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm); - u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm); - u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm); - - return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); -} - -/** - * dce_v11_0_average_bandwidth - get the average available bandwidth - * - * @wm: watermark calculation data - * - * Calculate the average available bandwidth used for display (CIK). - * Used for display watermark bandwidth calculations - * Returns the average available bandwidth in MBytes/s - */ -static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm) -{ - /* Calculate the display mode Average Bandwidth - * DisplayMode should contain the source and destination dimensions, - * timing, etc. - */ - fixed20_12 bpp; - fixed20_12 line_time; - fixed20_12 src_width; - fixed20_12 bandwidth; - fixed20_12 a; - - a.full = dfixed_const(1000); - line_time.full = dfixed_const(wm->active_time + wm->blank_time); - line_time.full = dfixed_div(line_time, a); - bpp.full = dfixed_const(wm->bytes_per_pixel); - src_width.full = dfixed_const(wm->src_width); - bandwidth.full = dfixed_mul(src_width, bpp); - bandwidth.full = dfixed_mul(bandwidth, wm->vsc); - bandwidth.full = dfixed_div(bandwidth, line_time); - - return dfixed_trunc(bandwidth); -} - -/** - * dce_v11_0_latency_watermark - get the latency watermark - * - * @wm: watermark calculation data - * - * Calculate the latency watermark (CIK). - * Used for display watermark bandwidth calculations - * Returns the latency watermark in ns - */ -static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm) -{ - /* First calculate the latency in ns */ - u32 mc_latency = 2000; /* 2000 ns. */ - u32 available_bandwidth = dce_v11_0_available_bandwidth(wm); - u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; - u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; - u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ - u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + - (wm->num_heads * cursor_line_pair_return_time); - u32 latency = mc_latency + other_heads_data_return_time + dc_latency; - u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; - u32 tmp, dmif_size = 12288; - fixed20_12 a, b, c; - - if (wm->num_heads == 0) - return 0; - - a.full = dfixed_const(2); - b.full = dfixed_const(1); - if ((wm->vsc.full > a.full) || - ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || - (wm->vtaps >= 5) || - ((wm->vsc.full >= a.full) && wm->interlaced)) - max_src_lines_per_dst_line = 4; - else - max_src_lines_per_dst_line = 2; - - a.full = dfixed_const(available_bandwidth); - b.full = dfixed_const(wm->num_heads); - a.full = dfixed_div(a, b); - tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); - tmp = min(dfixed_trunc(a), tmp); - - lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); - - a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); - b.full = dfixed_const(1000); - c.full = dfixed_const(lb_fill_bw); - b.full = dfixed_div(c, b); - a.full = dfixed_div(a, b); - line_fill_time = dfixed_trunc(a); - - if (line_fill_time < wm->active_time) - return latency; - else - return latency + (line_fill_time - wm->active_time); - -} - -/** - * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check - * average and available dram bandwidth - * - * @wm: watermark calculation data - * - * Check if the display average bandwidth fits in the display - * dram bandwidth (CIK). - * Used for display watermark bandwidth calculations - * Returns true if the display fits, false if not. - */ -static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) -{ - if (dce_v11_0_average_bandwidth(wm) <= - (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads)) - return true; - else - return false; -} - -/** - * dce_v11_0_average_bandwidth_vs_available_bandwidth - check - * average and available bandwidth - * - * @wm: watermark calculation data - * - * Check if the display average bandwidth fits in the display - * available bandwidth (CIK). - * Used for display watermark bandwidth calculations - * Returns true if the display fits, false if not. - */ -static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) -{ - if (dce_v11_0_average_bandwidth(wm) <= - (dce_v11_0_available_bandwidth(wm) / wm->num_heads)) - return true; - else - return false; -} - -/** - * dce_v11_0_check_latency_hiding - check latency hiding - * - * @wm: watermark calculation data - * - * Check latency hiding (CIK). - * Used for display watermark bandwidth calculations - * Returns true if the display fits, false if not. - */ -static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm) -{ - u32 lb_partitions = wm->lb_size / wm->src_width; - u32 line_time = wm->active_time + wm->blank_time; - u32 latency_tolerant_lines; - u32 latency_hiding; - fixed20_12 a; - - a.full = dfixed_const(1); - if (wm->vsc.full > a.full) - latency_tolerant_lines = 1; - else { - if (lb_partitions <= (wm->vtaps + 1)) - latency_tolerant_lines = 1; - else - latency_tolerant_lines = 2; - } - - latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); - - if (dce_v11_0_latency_watermark(wm) <= latency_hiding) - return true; - else - return false; -} - -/** - * dce_v11_0_program_watermarks - program display watermarks - * - * @adev: amdgpu_device pointer - * @amdgpu_crtc: the selected display controller - * @lb_size: line buffer size - * @num_heads: number of display controllers in use - * - * Calculate and program the display watermarks for the - * selected display controller (CIK). - */ -static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, - struct amdgpu_crtc *amdgpu_crtc, - u32 lb_size, u32 num_heads) -{ - struct drm_display_mode *mode = &amdgpu_crtc->base.mode; - struct dce10_wm_params wm_low, wm_high; - u32 active_time; - u32 line_time = 0; - u32 latency_watermark_a = 0, latency_watermark_b = 0; - u32 tmp, wm_mask, lb_vblank_lead_lines = 0; - - if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, - (u32)mode->clock); - line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, - (u32)mode->clock); - line_time = min_t(u32, line_time, 65535); - - /* watermark for high clocks */ - if (adev->pm.dpm_enabled) { - wm_high.yclk = - amdgpu_dpm_get_mclk(adev, false) * 10; - wm_high.sclk = - amdgpu_dpm_get_sclk(adev, false) * 10; - } else { - wm_high.yclk = adev->pm.current_mclk * 10; - wm_high.sclk = adev->pm.current_sclk * 10; - } - - wm_high.disp_clk = mode->clock; - wm_high.src_width = mode->crtc_hdisplay; - wm_high.active_time = active_time; - wm_high.blank_time = line_time - wm_high.active_time; - wm_high.interlaced = false; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - wm_high.interlaced = true; - wm_high.vsc = amdgpu_crtc->vsc; - wm_high.vtaps = 1; - if (amdgpu_crtc->rmx_type != RMX_OFF) - wm_high.vtaps = 2; - wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ - wm_high.lb_size = lb_size; - wm_high.dram_channels = cik_get_number_of_dram_channels(adev); - wm_high.num_heads = num_heads; - - /* set for high clocks */ - latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535); - - /* possibly force display priority to high */ - /* should really do this at mode validation time... */ - if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || - !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) || - !dce_v11_0_check_latency_hiding(&wm_high) || - (adev->mode_info.disp_priority == 2)) { - DRM_DEBUG_KMS("force priority to high\n"); - } - - /* watermark for low clocks */ - if (adev->pm.dpm_enabled) { - wm_low.yclk = - amdgpu_dpm_get_mclk(adev, true) * 10; - wm_low.sclk = - amdgpu_dpm_get_sclk(adev, true) * 10; - } else { - wm_low.yclk = adev->pm.current_mclk * 10; - wm_low.sclk = adev->pm.current_sclk * 10; - } - - wm_low.disp_clk = mode->clock; - wm_low.src_width = mode->crtc_hdisplay; - wm_low.active_time = active_time; - wm_low.blank_time = line_time - wm_low.active_time; - wm_low.interlaced = false; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - wm_low.interlaced = true; - wm_low.vsc = amdgpu_crtc->vsc; - wm_low.vtaps = 1; - if (amdgpu_crtc->rmx_type != RMX_OFF) - wm_low.vtaps = 2; - wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ - wm_low.lb_size = lb_size; - wm_low.dram_channels = cik_get_number_of_dram_channels(adev); - wm_low.num_heads = num_heads; - - /* set for low clocks */ - latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535); - - /* possibly force display priority to high */ - /* should really do this at mode validation time... */ - if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || - !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) || - !dce_v11_0_check_latency_hiding(&wm_low) || - (adev->mode_info.disp_priority == 2)) { - DRM_DEBUG_KMS("force priority to high\n"); - } - lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); - } - - /* select wm A */ - wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); - WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); - tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); - WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); - /* select wm B */ - tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); - WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); - tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b); - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); - WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); - /* restore original selection */ - WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); - - /* save values for DPM */ - amdgpu_crtc->line_time = line_time; - - /* Save number of lines the linebuffer leads before the scanout */ - amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; -} - -/** - * dce_v11_0_bandwidth_update - program display watermarks - * - * @adev: amdgpu_device pointer - * - * Calculate and program the display watermarks and line - * buffer allocation (CIK). - */ -static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev) -{ - struct drm_display_mode *mode = NULL; - u32 num_heads = 0, lb_size; - int i; - - amdgpu_display_update_priority(adev); - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->mode_info.crtcs[i]->base.enabled) - num_heads++; - } - for (i = 0; i < adev->mode_info.num_crtc; i++) { - mode = &adev->mode_info.crtcs[i]->base.mode; - lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); - dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i], - lb_size, num_heads); - } -} - -static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev) -{ - int i; - u32 offset, tmp; - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - offset = adev->mode_info.audio.pin[i].offset; - tmp = RREG32_AUDIO_ENDPT(offset, - ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); - if (((tmp & - AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> - AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) - adev->mode_info.audio.pin[i].connected = false; - else - adev->mode_info.audio.pin[i].connected = true; - } -} - -static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev) -{ - int i; - - dce_v11_0_audio_get_connected_pins(adev); - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - if (adev->mode_info.audio.pin[i].connected) - return &adev->mode_info.audio.pin[i]; - } - DRM_ERROR("No connected audio pins found!\n"); - return NULL; -} - -static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) -{ - struct amdgpu_device *adev = drm_to_adev(encoder->dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - u32 tmp; - - if (!dig || !dig->afmt || !dig->afmt->pin) - return; - - tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); - WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); -} - -static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, - struct drm_display_mode *mode) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct drm_connector *connector; - struct drm_connector_list_iter iter; - struct amdgpu_connector *amdgpu_connector = NULL; - u32 tmp; - int interlace = 0; - - if (!dig || !dig->afmt || !dig->afmt->pin) - return; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - if (connector->encoder == encoder) { - amdgpu_connector = to_amdgpu_connector(connector); - break; - } - } - drm_connector_list_iter_end(&iter); - - if (!amdgpu_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return; - } - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - interlace = 1; - if (connector->latency_present[interlace]) { - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, - VIDEO_LIPSYNC, connector->video_latency[interlace]); - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, - AUDIO_LIPSYNC, connector->audio_latency[interlace]); - } else { - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, - VIDEO_LIPSYNC, 0); - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, - AUDIO_LIPSYNC, 0); - } - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, - ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); -} - -static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct drm_connector *connector; - struct drm_connector_list_iter iter; - struct amdgpu_connector *amdgpu_connector = NULL; - u32 tmp; - u8 *sadb = NULL; - int sad_count; - - if (!dig || !dig->afmt || !dig->afmt->pin) - return; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - if (connector->encoder == encoder) { - amdgpu_connector = to_amdgpu_connector(connector); - break; - } - } - drm_connector_list_iter_end(&iter); - - if (!amdgpu_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return; - } - - sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb); - if (sad_count < 0) { - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); - sad_count = 0; - } - - /* program the speaker allocation */ - tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, - ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, - DP_CONNECTION, 0); - /* set HDMI mode */ - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, - HDMI_CONNECTION, 1); - if (sad_count) - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, - SPEAKER_ALLOCATION, sadb[0]); - else - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, - SPEAKER_ALLOCATION, 5); /* stereo */ - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, - ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); - - kfree(sadb); -} - -static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct drm_connector *connector; - struct drm_connector_list_iter iter; - struct amdgpu_connector *amdgpu_connector = NULL; - struct cea_sad *sads; - int i, sad_count; - - static const u16 eld_reg_to_type[][2] = { - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, - }; - - if (!dig || !dig->afmt || !dig->afmt->pin) - return; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - if (connector->encoder == encoder) { - amdgpu_connector = to_amdgpu_connector(connector); - break; - } - } - drm_connector_list_iter_end(&iter); - - if (!amdgpu_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return; - } - - sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads); - if (sad_count < 0) - DRM_ERROR("Couldn't read SADs: %d\n", sad_count); - if (sad_count <= 0) - return; - BUG_ON(!sads); - - for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { - u32 tmp = 0; - u8 stereo_freqs = 0; - int max_channels = -1; - int j; - - for (j = 0; j < sad_count; j++) { - struct cea_sad *sad = &sads[j]; - - if (sad->format == eld_reg_to_type[i][1]) { - if (sad->channels > max_channels) { - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - MAX_CHANNELS, sad->channels); - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - DESCRIPTOR_BYTE_2, sad->byte2); - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - SUPPORTED_FREQUENCIES, sad->freq); - max_channels = sad->channels; - } - - if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) - stereo_freqs |= sad->freq; - else - break; - } - } - - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); - } - - kfree(sads); -} - -static void dce_v11_0_audio_enable(struct amdgpu_device *adev, - struct amdgpu_audio_pin *pin, - bool enable) -{ - if (!pin) - return; - - WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, - enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); -} - -static const u32 pin_offsets[] = -{ - AUD0_REGISTER_OFFSET, - AUD1_REGISTER_OFFSET, - AUD2_REGISTER_OFFSET, - AUD3_REGISTER_OFFSET, - AUD4_REGISTER_OFFSET, - AUD5_REGISTER_OFFSET, - AUD6_REGISTER_OFFSET, - AUD7_REGISTER_OFFSET, -}; - -static int dce_v11_0_audio_init(struct amdgpu_device *adev) -{ - int i; - - if (!amdgpu_audio) - return 0; - - adev->mode_info.audio.enabled = true; - - switch (adev->asic_type) { - case CHIP_CARRIZO: - case CHIP_STONEY: - adev->mode_info.audio.num_pins = 7; - break; - case CHIP_POLARIS10: - case CHIP_VEGAM: - adev->mode_info.audio.num_pins = 8; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - adev->mode_info.audio.num_pins = 6; - break; - default: - return -EINVAL; - } - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - adev->mode_info.audio.pin[i].channels = -1; - adev->mode_info.audio.pin[i].rate = -1; - adev->mode_info.audio.pin[i].bits_per_sample = -1; - adev->mode_info.audio.pin[i].status_bits = 0; - adev->mode_info.audio.pin[i].category_code = 0; - adev->mode_info.audio.pin[i].connected = false; - adev->mode_info.audio.pin[i].offset = pin_offsets[i]; - adev->mode_info.audio.pin[i].id = i; - /* disable audio. it will be set up later */ - /* XXX remove once we switch to ip funcs */ - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - } - - return 0; -} - -static void dce_v11_0_audio_fini(struct amdgpu_device *adev) -{ - if (!amdgpu_audio) - return; - - if (!adev->mode_info.audio.enabled) - return; - - adev->mode_info.audio.enabled = false; -} - -/* - * update the N and CTS parameters for a given pixel clock rate - */ -static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - u32 tmp; - - tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); - WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); - tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); - WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); - WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); - tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); - WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); - WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); - tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); - WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); - -} - -/* - * build a HDMI Video Info Frame - */ -static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, - void *buffer, size_t size) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - uint8_t *frame = buffer + 3; - uint8_t *header = buffer; - - WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, - frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); - WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, - frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); - WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, - frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); - WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, - frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); -} - -static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); - u32 dto_phase = 24 * 1000; - u32 dto_modulo = clock; - u32 tmp; - - if (!dig || !dig->afmt) - return; - - /* XXX two dtos; generally use dto0 for hdmi */ - /* Express [24MHz / target pixel clock] as an exact rational - * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE - * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator - */ - tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); - tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, - amdgpu_crtc->crtc_id); - WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); - WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); - WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); -} - -/* - * update the info frames with the data from the current display mode - */ -static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, - struct drm_display_mode *mode) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); - u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; - struct hdmi_avi_infoframe frame; - ssize_t err; - u32 tmp; - int bpc = 8; - - if (!dig || !dig->afmt) - return; - - /* Silent, r600_hdmi_enable will raise WARN for us */ - if (!dig->afmt->enabled) - return; - - /* hdmi deep color mode general control packets setup, if bpc > 8 */ - if (encoder->crtc) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); - bpc = amdgpu_crtc->bpc; - } - - /* disable audio prior to setting up hw */ - dig->afmt->pin = dce_v11_0_audio_get_pin(adev); - dce_v11_0_audio_enable(adev, dig->afmt->pin, false); - - dce_v11_0_audio_set_dto(encoder, mode->clock); - - tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); - WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ - - WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); - - tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); - switch (bpc) { - case 0: - case 6: - case 8: - case 16: - default: - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); - DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", - connector->name, bpc); - break; - case 10: - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); - DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", - connector->name); - break; - case 12: - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); - DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", - connector->name); - break; - } - WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ - WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); - /* enable audio info frames (frames won't be set until audio is enabled) */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); - /* required for audio info values to be updated */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); - WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); - /* required for audio info values to be updated */ - tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); - WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); - /* anything other than 0 */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); - WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); - - WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ - - tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); - /* set the default audio delay */ - tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); - /* should be suffient for all audio modes and small enough for all hblanks */ - tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); - WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); - /* allow 60958 channel status fields to be updated */ - tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); - WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); - if (bpc > 8) - /* clear SW CTS value */ - tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); - else - /* select SW CTS value */ - tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); - /* allow hw to sent ACR packets when required */ - tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); - WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); - - dce_v11_0_afmt_update_ACR(encoder, mode->clock); - - tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); - WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); - WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); - WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); - - dce_v11_0_audio_write_speaker_allocation(encoder); - - WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, - (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); - - dce_v11_0_afmt_audio_select_pin(encoder); - dce_v11_0_audio_write_sad_regs(encoder); - dce_v11_0_audio_write_latency_fields(encoder, mode); - - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); - if (err < 0) { - DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); - return; - } - - err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); - if (err < 0) { - DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); - return; - } - - dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); - - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); - /* enable AVI info frames */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); - /* required for audio info values to be updated */ - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); - WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); - - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); - WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); - - tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); - /* send audio packets */ - tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); - WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); - - WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); - WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); - WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); - WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); - - /* enable audio after to setting up hw */ - dce_v11_0_audio_enable(adev, dig->afmt->pin, true); -} - -static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - - if (!dig || !dig->afmt) - return; - - /* Silent, r600_hdmi_enable will raise WARN for us */ - if (enable && dig->afmt->enabled) - return; - if (!enable && !dig->afmt->enabled) - return; - - if (!enable && dig->afmt->pin) { - dce_v11_0_audio_enable(adev, dig->afmt->pin, false); - dig->afmt->pin = NULL; - } - - dig->afmt->enabled = enable; - - DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", - enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); -} - -static int dce_v11_0_afmt_init(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->mode_info.num_dig; i++) - adev->mode_info.afmt[i] = NULL; - - /* DCE11 has audio blocks tied to DIG encoders */ - for (i = 0; i < adev->mode_info.num_dig; i++) { - adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); - if (adev->mode_info.afmt[i]) { - adev->mode_info.afmt[i]->offset = dig_offsets[i]; - adev->mode_info.afmt[i]->id = i; - } else { - int j; - for (j = 0; j < i; j++) { - kfree(adev->mode_info.afmt[j]); - adev->mode_info.afmt[j] = NULL; - } - return -ENOMEM; - } - } - return 0; -} - -static void dce_v11_0_afmt_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->mode_info.num_dig; i++) { - kfree(adev->mode_info.afmt[i]); - adev->mode_info.afmt[i] = NULL; - } -} - -static const u32 vga_control_regs[6] = -{ - mmD1VGA_CONTROL, - mmD2VGA_CONTROL, - mmD3VGA_CONTROL, - mmD4VGA_CONTROL, - mmD5VGA_CONTROL, - mmD6VGA_CONTROL, -}; - -static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - u32 vga_control; - - vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; - if (enable) - WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); - else - WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); -} - -static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - - if (enable) - WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); - else - WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); -} - -static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y, int atomic) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct drm_framebuffer *target_fb; - struct drm_gem_object *obj; - struct amdgpu_bo *abo; - uint64_t fb_location, tiling_flags; - uint32_t fb_format, fb_pitch_pixels; - u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); - u32 pipe_config; - u32 tmp, viewport_w, viewport_h; - int r; - bool bypass_lut = false; - - /* no fb bound */ - if (!atomic && !crtc->primary->fb) { - DRM_DEBUG_KMS("No FB bound\n"); - return 0; - } - - if (atomic) - target_fb = fb; - else - target_fb = crtc->primary->fb; - - /* If atomic, assume fb object is pinned & idle & fenced and - * just update base pointers - */ - obj = target_fb->obj[0]; - abo = gem_to_amdgpu_bo(obj); - r = amdgpu_bo_reserve(abo, false); - if (unlikely(r != 0)) - return r; - - if (!atomic) { - abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); - if (unlikely(r != 0)) { - amdgpu_bo_unreserve(abo); - return -EINVAL; - } - } - fb_location = amdgpu_bo_gpu_offset(abo); - - amdgpu_bo_get_tiling_flags(abo, &tiling_flags); - amdgpu_bo_unreserve(abo); - - pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); - - switch (target_fb->format->format) { - case DRM_FORMAT_C8: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); - break; - case DRM_FORMAT_XRGB4444: - case DRM_FORMAT_ARGB4444: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN16); -#endif - break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_ARGB1555: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN16); -#endif - break; - case DRM_FORMAT_BGRX5551: - case DRM_FORMAT_BGRA5551: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN16); -#endif - break; - case DRM_FORMAT_RGB565: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN16); -#endif - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN32); -#endif - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_ARGB2101010: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN32); -#endif - /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ - bypass_lut = true; - break; - case DRM_FORMAT_BGRX1010102: - case DRM_FORMAT_BGRA1010102: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN32); -#endif - /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ - bypass_lut = true; - break; - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2); - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2); -#ifdef __BIG_ENDIAN - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, - ENDIAN_8IN32); -#endif - break; - default: - DRM_ERROR("Unsupported screen format %p4cc\n", - &target_fb->format->format); - return -EINVAL; - } - - if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { - unsigned bankw, bankh, mtaspect, tile_split, num_banks; - - bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); - bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); - mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); - tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); - num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); - - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, - ARRAY_2D_TILED_THIN1); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, - tile_split); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, - mtaspect); - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, - ADDR_SURF_MICRO_TILING_DISPLAY); - } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, - ARRAY_1D_TILED_THIN1); - } - - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, - pipe_config); - - dce_v11_0_vga_enable(crtc, false); - - /* Make sure surface address is updated at vertical blank rather than - * horizontal blank - */ - tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, - GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); - WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, - upper_32_bits(fb_location)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, - upper_32_bits(fb_location)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, - (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, - (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); - WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); - WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); - - /* - * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT - * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to - * retain the full precision throughout the pipeline. - */ - tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); - if (bypass_lut) - tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); - else - tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); - WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); - - if (bypass_lut) - DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); - - WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); - WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); - WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); - WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); - WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); - WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); - - fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; - WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); - - dce_v11_0_grph_enable(crtc, true); - - WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, - target_fb->height); - - x &= ~3; - y &= ~1; - WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, - (x << 16) | y); - viewport_w = crtc->mode.hdisplay; - viewport_h = (crtc->mode.vdisplay + 1) & ~1; - WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, - (viewport_w << 16) | viewport_h); - - /* set pageflip to happen anywhere in vblank interval */ - WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); - - if (!atomic && fb && fb != crtc->primary->fb) { - abo = gem_to_amdgpu_bo(fb->obj[0]); - r = amdgpu_bo_reserve(abo, true); - if (unlikely(r != 0)) - return r; - amdgpu_bo_unpin(abo); - amdgpu_bo_unreserve(abo); - } - - /* Bytes per pixel may have changed */ - dce_v11_0_bandwidth_update(adev); - - return 0; -} - -static void dce_v11_0_set_interleave(struct drm_crtc *crtc, - struct drm_display_mode *mode) -{ - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - u32 tmp; - - tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); - else - tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); - WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); -} - -static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - u16 *r, *g, *b; - int i; - u32 tmp; - - DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); - - tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); - WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); - WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); - WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); - - WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); - WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); - WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); - - WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); - WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); - WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); - - WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); - WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); - - WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); - r = crtc->gamma_store; - g = r + crtc->gamma_size; - b = g + crtc->gamma_size; - for (i = 0; i < 256; i++) { - WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, - ((*r++ & 0xffc0) << 14) | - ((*g++ & 0xffc0) << 4) | - (*b++ >> 6)); - } - - tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); - tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); - tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0); - WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); - WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); - WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); - WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); - - /* XXX match this to the depth of the crtc fmt block, move to modeset? */ - WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); - /* XXX this only needs to be programmed once per crtc at startup, - * not sure where the best place for it is - */ - tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); - WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); -} - -static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder) -{ - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - - switch (amdgpu_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - if (dig->linkb) - return 1; - else - return 0; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - if (dig->linkb) - return 3; - else - return 2; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (dig->linkb) - return 5; - else - return 4; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: - return 6; - default: - DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); - return 0; - } -} - -/** - * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc. - * - * @crtc: drm crtc - * - * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors - * a single PPLL can be used for all DP crtcs/encoders. For non-DP - * monitors a dedicated PPLL must be used. If a particular board has - * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming - * as there is no need to program the PLL itself. If we are not able to - * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to - * avoid messing up an existing monitor. - * - * Asic specific PLL information - * - * DCE 10.x - * Tonga - * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) - * CI - * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC - * - */ -static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - u32 pll_in_use; - int pll; - - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12) || - (adev->asic_type == CHIP_VEGAM)) { - struct amdgpu_encoder *amdgpu_encoder = - to_amdgpu_encoder(amdgpu_crtc->encoder); - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - - if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) - return ATOM_DP_DTO; - - switch (amdgpu_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - if (dig->linkb) - return ATOM_COMBOPHY_PLL1; - else - return ATOM_COMBOPHY_PLL0; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - if (dig->linkb) - return ATOM_COMBOPHY_PLL3; - else - return ATOM_COMBOPHY_PLL2; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (dig->linkb) - return ATOM_COMBOPHY_PLL5; - else - return ATOM_COMBOPHY_PLL4; - default: - DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); - return ATOM_PPLL_INVALID; - } - } - - if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { - if (adev->clock.dp_extclk) - /* skip PPLL programming if using ext clock */ - return ATOM_PPLL_INVALID; - else { - /* use the same PPLL for all DP monitors */ - pll = amdgpu_pll_get_shared_dp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } - } else { - /* use the same PPLL for all monitors with the same clock */ - pll = amdgpu_pll_get_shared_nondp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } - - /* XXX need to determine what plls are available on each DCE11 part */ - pll_in_use = amdgpu_pll_get_use_mask(crtc); - if (adev->flags & AMD_IS_APU) { - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - if (!(pll_in_use & (1 << ATOM_PPLL0))) - return ATOM_PPLL0; - DRM_ERROR("unable to allocate a PPLL\n"); - return ATOM_PPLL_INVALID; - } else { - if (!(pll_in_use & (1 << ATOM_PPLL2))) - return ATOM_PPLL2; - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - if (!(pll_in_use & (1 << ATOM_PPLL0))) - return ATOM_PPLL0; - DRM_ERROR("unable to allocate a PPLL\n"); - return ATOM_PPLL_INVALID; - } - return ATOM_PPLL_INVALID; -} - -static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) -{ - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - uint32_t cur_lock; - - cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); - if (lock) - cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); - else - cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); - WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); -} - -static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - u32 tmp; - - tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); - WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); -} - -static void dce_v11_0_show_cursor(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - u32 tmp; - - WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, - upper_32_bits(amdgpu_crtc->cursor_addr)); - WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, - lower_32_bits(amdgpu_crtc->cursor_addr)); - - tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); - tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); - tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); - WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); -} - -static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, - int x, int y) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = drm_to_adev(crtc->dev); - int xorigin = 0, yorigin = 0; - - amdgpu_crtc->cursor_x = x; - amdgpu_crtc->cursor_y = y; - - /* avivo cursor are offset into the total surface */ - x += crtc->x; - y += crtc->y; - DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); - - if (x < 0) { - xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); - x = 0; - } - if (y < 0) { - yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); - y = 0; - } - - WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); - WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); - - return 0; -} - -static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc, - int x, int y) -{ - int ret; - - dce_v11_0_lock_cursor(crtc, true); - ret = dce_v11_0_cursor_move_locked(crtc, x, y); - dce_v11_0_lock_cursor(crtc, false); - - return ret; -} - -static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, - uint32_t height, - int32_t hot_x, - int32_t hot_y) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_gem_object *obj; - struct amdgpu_bo *aobj; - int ret; - - if (!handle) { - /* turn off cursor */ - dce_v11_0_hide_cursor(crtc); - obj = NULL; - goto unpin; - } - - if ((width > amdgpu_crtc->max_cursor_width) || - (height > amdgpu_crtc->max_cursor_height)) { - DRM_ERROR("bad cursor width or height %d x %d\n", width, height); - return -EINVAL; - } - - obj = drm_gem_object_lookup(file_priv, handle); - if (!obj) { - DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); - return -ENOENT; - } - - aobj = gem_to_amdgpu_bo(obj); - ret = amdgpu_bo_reserve(aobj, false); - if (ret != 0) { - drm_gem_object_put(obj); - return ret; - } - - aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); - amdgpu_bo_unreserve(aobj); - if (ret) { - DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_put(obj); - return ret; - } - amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); - - dce_v11_0_lock_cursor(crtc, true); - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height || - hot_x != amdgpu_crtc->cursor_hot_x || - hot_y != amdgpu_crtc->cursor_hot_y) { - int x, y; - - x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; - y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; - - dce_v11_0_cursor_move_locked(crtc, x, y); - - amdgpu_crtc->cursor_width = width; - amdgpu_crtc->cursor_height = height; - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - dce_v11_0_show_cursor(crtc); - dce_v11_0_lock_cursor(crtc, false); - -unpin: - if (amdgpu_crtc->cursor_bo) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - ret = amdgpu_bo_reserve(aobj, true); - if (likely(ret == 0)) { - amdgpu_bo_unpin(aobj); - amdgpu_bo_unreserve(aobj); - } - drm_gem_object_put(amdgpu_crtc->cursor_bo); - } - - amdgpu_crtc->cursor_bo = obj; - return 0; -} - -static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - if (amdgpu_crtc->cursor_bo) { - dce_v11_0_lock_cursor(crtc, true); - - dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, - amdgpu_crtc->cursor_y); - - dce_v11_0_show_cursor(crtc); - - dce_v11_0_lock_cursor(crtc, false); - } -} - -static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t size, - struct drm_modeset_acquire_ctx *ctx) -{ - dce_v11_0_crtc_load_lut(crtc); - - return 0; -} - -static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - drm_crtc_cleanup(crtc); - kfree(amdgpu_crtc); -} - -static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { - .cursor_set2 = dce_v11_0_crtc_cursor_set2, - .cursor_move = dce_v11_0_crtc_cursor_move, - .gamma_set = dce_v11_0_crtc_gamma_set, - .set_config = amdgpu_display_crtc_set_config, - .destroy = dce_v11_0_crtc_destroy, - .page_flip_target = amdgpu_display_crtc_page_flip_target, - .get_vblank_counter = amdgpu_get_vblank_counter_kms, - .enable_vblank = amdgpu_enable_vblank_kms, - .disable_vblank = amdgpu_disable_vblank_kms, - .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, -}; - -static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - unsigned type; - - switch (mode) { - case DRM_MODE_DPMS_ON: - amdgpu_crtc->enabled = true; - amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); - dce_v11_0_vga_enable(crtc, true); - amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); - dce_v11_0_vga_enable(crtc, false); - /* Make sure VBLANK and PFLIP interrupts are still enabled */ - type = amdgpu_display_crtc_idx_to_irq_type(adev, - amdgpu_crtc->crtc_id); - amdgpu_irq_update(adev, &adev->crtc_irq, type); - amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_crtc_vblank_on(crtc); - dce_v11_0_crtc_load_lut(crtc); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - drm_crtc_vblank_off(crtc); - if (amdgpu_crtc->enabled) { - dce_v11_0_vga_enable(crtc, true); - amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); - dce_v11_0_vga_enable(crtc, false); - } - amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); - amdgpu_crtc->enabled = false; - break; - } - /* adjust pm to dpms */ - amdgpu_dpm_compute_clocks(adev); -} - -static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc) -{ - /* disable crtc pair power gating before programming */ - amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); - amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); - dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); -} - -static void dce_v11_0_crtc_commit(struct drm_crtc *crtc) -{ - dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); - amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); -} - -static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - struct amdgpu_atom_ss ss; - int i; - - dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - if (crtc->primary->fb) { - int r; - struct amdgpu_bo *abo; - - abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); - r = amdgpu_bo_reserve(abo, true); - if (unlikely(r)) - DRM_ERROR("failed to reserve abo before unpin\n"); - else { - amdgpu_bo_unpin(abo); - amdgpu_bo_unreserve(abo); - } - } - /* disable the GRPH */ - dce_v11_0_grph_enable(crtc, false); - - amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->mode_info.crtcs[i] && - adev->mode_info.crtcs[i]->enabled && - i != amdgpu_crtc->crtc_id && - amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { - /* one other crtc is using this pll don't turn - * off the pll - */ - goto done; - } - } - - switch (amdgpu_crtc->pll_id) { - case ATOM_PPLL0: - case ATOM_PPLL1: - case ATOM_PPLL2: - /* disable the ppll */ - amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); - break; - case ATOM_COMBOPHY_PLL0: - case ATOM_COMBOPHY_PLL1: - case ATOM_COMBOPHY_PLL2: - case ATOM_COMBOPHY_PLL3: - case ATOM_COMBOPHY_PLL4: - case ATOM_COMBOPHY_PLL5: - /* disable the ppll */ - amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); - break; - default: - break; - } -done: - amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; - amdgpu_crtc->adjusted_clock = 0; - amdgpu_crtc->encoder = NULL; - amdgpu_crtc->connector = NULL; -} - -static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, struct drm_framebuffer *old_fb) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - - if (!amdgpu_crtc->adjusted_clock) - return -EINVAL; - - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12) || - (adev->asic_type == CHIP_VEGAM)) { - struct amdgpu_encoder *amdgpu_encoder = - to_amdgpu_encoder(amdgpu_crtc->encoder); - int encoder_mode = - amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder); - - /* SetPixelClock calculates the plls and ss values now */ - amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, - amdgpu_crtc->pll_id, - encoder_mode, amdgpu_encoder->encoder_id, - adjusted_mode->clock, 0, 0, 0, 0, - amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss); - } else { - amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); - } - amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); - dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); - amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); - amdgpu_atombios_crtc_scaler_setup(crtc); - dce_v11_0_cursor_reset(crtc); - /* update the hw version fpr dpm */ - amdgpu_crtc->hw_mode = *adjusted_mode; - - return 0; -} - -static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct drm_encoder *encoder; - - /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - amdgpu_crtc->encoder = encoder; - amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); - break; - } - } - if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { - amdgpu_crtc->encoder = NULL; - amdgpu_crtc->connector = NULL; - return false; - } - if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) - return false; - if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) - return false; - /* pick pll */ - amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc); - /* if we can't get a PPLL for a non-DP encoder, fail */ - if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && - !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) - return false; - - return true; -} - -static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) -{ - return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); -} - -static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y, enum mode_set_atomic state) -{ - return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1); -} - -static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { - .dpms = dce_v11_0_crtc_dpms, - .mode_fixup = dce_v11_0_crtc_mode_fixup, - .mode_set = dce_v11_0_crtc_mode_set, - .mode_set_base = dce_v11_0_crtc_set_base, - .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic, - .prepare = dce_v11_0_crtc_prepare, - .commit = dce_v11_0_crtc_commit, - .disable = dce_v11_0_crtc_disable, - .get_scanout_position = amdgpu_crtc_get_scanout_position, -}; - -static void dce_v11_0_panic_flush(struct drm_plane *plane) -{ - struct drm_framebuffer *fb; - struct amdgpu_crtc *amdgpu_crtc; - struct amdgpu_device *adev; - uint32_t fb_format; - - if (!plane->fb) - return; - - fb = plane->fb; - amdgpu_crtc = to_amdgpu_crtc(plane->crtc); - adev = drm_to_adev(fb->dev); - - /* Disable DC tiling */ - fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset); - fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK; - WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); - -} - -static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = { - .get_scanout_buffer = amdgpu_display_get_scanout_buffer, - .panic_flush = dce_v11_0_panic_flush, -}; - -static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) -{ - struct amdgpu_crtc *amdgpu_crtc; - - amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + - (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); - if (amdgpu_crtc == NULL) - return -ENOMEM; - - drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); - - drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); - amdgpu_crtc->crtc_id = index; - adev->mode_info.crtcs[index] = amdgpu_crtc; - - amdgpu_crtc->max_cursor_width = 128; - amdgpu_crtc->max_cursor_height = 128; - adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; - - switch (amdgpu_crtc->crtc_id) { - case 0: - default: - amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; - break; - case 1: - amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; - break; - case 2: - amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; - break; - case 3: - amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; - break; - case 4: - amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; - break; - case 5: - amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; - break; - } - - amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; - amdgpu_crtc->adjusted_clock = 0; - amdgpu_crtc->encoder = NULL; - amdgpu_crtc->connector = NULL; - drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs); - drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs); - - return 0; -} - -static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - - adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; - adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; - - dce_v11_0_set_display_funcs(adev); - - adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev); - - switch (adev->asic_type) { - case CHIP_CARRIZO: - adev->mode_info.num_hpd = 6; - adev->mode_info.num_dig = 9; - break; - case CHIP_STONEY: - adev->mode_info.num_hpd = 6; - adev->mode_info.num_dig = 9; - break; - case CHIP_POLARIS10: - case CHIP_VEGAM: - adev->mode_info.num_hpd = 6; - adev->mode_info.num_dig = 6; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - adev->mode_info.num_hpd = 5; - adev->mode_info.num_dig = 5; - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; - } - - dce_v11_0_set_irq_funcs(adev); - - return 0; -} - -static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block) -{ - int r, i; - struct amdgpu_device *adev = ip_block->adev; - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); - if (r) - return r; - } - - for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) { - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); - if (r) - return r; - } - - /* HPD hotplug */ - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); - if (r) - return r; - - adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; - - adev_to_drm(adev)->mode_config.async_page_flip = true; - - adev_to_drm(adev)->mode_config.max_width = 16384; - adev_to_drm(adev)->mode_config.max_height = 16384; - - adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; - - adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; - - r = amdgpu_display_modeset_create_props(adev); - if (r) - return r; - - adev_to_drm(adev)->mode_config.max_width = 16384; - adev_to_drm(adev)->mode_config.max_height = 16384; - - - /* allocate crtcs */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = dce_v11_0_crtc_init(adev, i); - if (r) - return r; - } - - if (amdgpu_atombios_get_connector_info_from_object_table(adev)) - amdgpu_display_print_display_setup(adev_to_drm(adev)); - else - return -EINVAL; - - /* setup afmt */ - r = dce_v11_0_afmt_init(adev); - if (r) - return r; - - r = dce_v11_0_audio_init(adev); - if (r) - return r; - - /* Disable vblank IRQs aggressively for power-saving */ - /* XXX: can this be enabled for DC? */ - adev_to_drm(adev)->vblank_disable_immediate = true; - - r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); - if (r) - return r; - - INIT_DELAYED_WORK(&adev->hotplug_work, - amdgpu_display_hotplug_work_func); - - drm_kms_helper_poll_init(adev_to_drm(adev)); - - adev->mode_info.mode_config_initialized = true; - return 0; -} - -static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - - drm_edid_free(adev->mode_info.bios_hardcoded_edid); - - drm_kms_helper_poll_fini(adev_to_drm(adev)); - - dce_v11_0_audio_fini(adev); - - dce_v11_0_afmt_fini(adev); - - drm_mode_config_cleanup(adev_to_drm(adev)); - adev->mode_info.mode_config_initialized = false; - - return 0; -} - -static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block) -{ - int i; - struct amdgpu_device *adev = ip_block->adev; - - dce_v11_0_init_golden_registers(adev); - - /* disable vga render */ - dce_v11_0_set_vga_render_state(adev, false); - /* init dig PHYs, disp eng pll */ - amdgpu_atombios_crtc_powergate_init(adev); - amdgpu_atombios_encoder_init_dig(adev); - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12) || - (adev->asic_type == CHIP_VEGAM)) { - amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk, - DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS); - amdgpu_atombios_crtc_set_dce_clock(adev, 0, - DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS); - } else { - amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); - } - - /* initialize hpd */ - dce_v11_0_hpd_init(adev); - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - } - - dce_v11_0_pageflip_interrupt_init(adev); - - return 0; -} - -static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) -{ - int i; - struct amdgpu_device *adev = ip_block->adev; - - dce_v11_0_hpd_fini(adev); - - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); - } - - dce_v11_0_pageflip_interrupt_fini(adev); - - flush_delayed_work(&adev->hotplug_work); - - return 0; -} - -static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int r; - - r = amdgpu_display_suspend_helper(adev); - if (r) - return r; - - adev->mode_info.bl_level = - amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - - return dce_v11_0_hw_fini(ip_block); -} - -static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - int ret; - - amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, - adev->mode_info.bl_level); - - ret = dce_v11_0_hw_init(ip_block); - - /* turn on the BL */ - if (adev->mode_info.bl_encoder) { - u8 bl_level = amdgpu_display_backlight_get_level(adev, - adev->mode_info.bl_encoder); - amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, - bl_level); - } - if (ret) - return ret; - - return amdgpu_display_resume_helper(adev); -} - -static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block) -{ - return true; -} - -static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) -{ - u32 srbm_soft_reset = 0, tmp; - struct amdgpu_device *adev = ip_block->adev; - - if (dce_v11_0_is_display_hung(adev)) - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; - - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - udelay(50); - - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - - /* Wait a little for things to settle down */ - udelay(50); - } - return 0; -} - -static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, - int crtc, - enum amdgpu_interrupt_state state) -{ - u32 lb_interrupt_mask; - - if (crtc >= adev->mode_info.num_crtc) { - DRM_DEBUG("invalid crtc %d\n", crtc); - return; - } - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, - VBLANK_INTERRUPT_MASK, 0); - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); - break; - case AMDGPU_IRQ_STATE_ENABLE: - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, - VBLANK_INTERRUPT_MASK, 1); - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); - break; - default: - break; - } -} - -static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, - int crtc, - enum amdgpu_interrupt_state state) -{ - u32 lb_interrupt_mask; - - if (crtc >= adev->mode_info.num_crtc) { - DRM_DEBUG("invalid crtc %d\n", crtc); - return; - } - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, - VLINE_INTERRUPT_MASK, 0); - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); - break; - case AMDGPU_IRQ_STATE_ENABLE: - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, - VLINE_INTERRUPT_MASK, 1); - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); - break; - default: - break; - } -} - -static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned hpd, - enum amdgpu_interrupt_state state) -{ - u32 tmp; - - if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hpd %d\n", hpd); - return 0; - } - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); - break; - case AMDGPU_IRQ_STATE_ENABLE: - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); - break; - default: - break; - } - - return 0; -} - -static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - switch (type) { - case AMDGPU_CRTC_IRQ_VBLANK1: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK2: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK3: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK4: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK5: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state); - break; - case AMDGPU_CRTC_IRQ_VBLANK6: - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state); - break; - case AMDGPU_CRTC_IRQ_VLINE1: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state); - break; - case AMDGPU_CRTC_IRQ_VLINE2: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state); - break; - case AMDGPU_CRTC_IRQ_VLINE3: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state); - break; - case AMDGPU_CRTC_IRQ_VLINE4: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state); - break; - case AMDGPU_CRTC_IRQ_VLINE5: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state); - break; - case AMDGPU_CRTC_IRQ_VLINE6: - dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state); - break; - default: - break; - } - return 0; -} - -static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, - unsigned type, - enum amdgpu_interrupt_state state) -{ - u32 reg; - - if (type >= adev->mode_info.num_crtc) { - DRM_ERROR("invalid pageflip crtc %d\n", type); - return -EINVAL; - } - - reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); - if (state == AMDGPU_IRQ_STATE_DISABLE) - WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], - reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); - else - WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], - reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); - - return 0; -} - -static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - unsigned long flags; - unsigned crtc_id; - struct amdgpu_crtc *amdgpu_crtc; - struct amdgpu_flip_work *works; - - crtc_id = (entry->src_id - 8) >> 1; - amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; - - if (crtc_id >= adev->mode_info.num_crtc) { - DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); - return -EINVAL; - } - - if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & - GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) - WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], - GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); - - /* IRQ could occur when in initial stage */ - if(amdgpu_crtc == NULL) - return 0; - - spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - works = amdgpu_crtc->pflip_works; - if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ - DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " - "AMDGPU_FLIP_SUBMITTED(%d)\n", - amdgpu_crtc->pflip_status, - AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); - return 0; - } - - /* page flip completed. clean up */ - amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; - amdgpu_crtc->pflip_works = NULL; - - /* wakeup usersapce */ - if(works->event) - drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - - spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); - - drm_crtc_vblank_put(&amdgpu_crtc->base); - schedule_work(&works->unpin_work); - - return 0; -} - -static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, - int hpd) -{ - u32 tmp; - - if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hpd %d\n", hpd); - return; - } - - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); -} - -static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev, - int crtc) -{ - u32 tmp; - - if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { - DRM_DEBUG("invalid crtc %d\n", crtc); - return; - } - - tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); - tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); - WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); -} - -static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev, - int crtc) -{ - u32 tmp; - - if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { - DRM_DEBUG("invalid crtc %d\n", crtc); - return; - } - - tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); - tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); - WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); -} - -static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - unsigned crtc = entry->src_id - 1; - uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); - unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, - crtc); - - switch (entry->src_data[0]) { - case 0: /* vblank */ - if (disp_int & interrupt_status_offsets[crtc].vblank) - dce_v11_0_crtc_vblank_int_ack(adev, crtc); - else - DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); - - if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev_to_drm(adev), crtc); - } - DRM_DEBUG("IH: D%d vblank\n", crtc + 1); - - break; - case 1: /* vline */ - if (disp_int & interrupt_status_offsets[crtc].vline) - dce_v11_0_crtc_vline_int_ack(adev, crtc); - else - DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); - - DRM_DEBUG("IH: D%d vline\n", crtc + 1); - - break; - default: - DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); - break; - } - - return 0; -} - -static int dce_v11_0_hpd_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - uint32_t disp_int, mask; - unsigned hpd; - - if (entry->src_data[0] >= adev->mode_info.num_hpd) { - DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); - return 0; - } - - hpd = entry->src_data[0]; - disp_int = RREG32(interrupt_status_offsets[hpd].reg); - mask = interrupt_status_offsets[hpd].hpd; - - if (disp_int & mask) { - dce_v11_0_hpd_int_ack(adev, hpd); - schedule_delayed_work(&adev->hotplug_work, 0); - DRM_DEBUG("IH: HPD%d\n", hpd + 1); - } - - return 0; -} - -static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, - enum amd_clockgating_state state) -{ - return 0; -} - -static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block, - enum amd_powergating_state state) -{ - return 0; -} - -static const struct amd_ip_funcs dce_v11_0_ip_funcs = { - .name = "dce_v11_0", - .early_init = dce_v11_0_early_init, - .sw_init = dce_v11_0_sw_init, - .sw_fini = dce_v11_0_sw_fini, - .hw_init = dce_v11_0_hw_init, - .hw_fini = dce_v11_0_hw_fini, - .suspend = dce_v11_0_suspend, - .resume = dce_v11_0_resume, - .is_idle = dce_v11_0_is_idle, - .soft_reset = dce_v11_0_soft_reset, - .set_clockgating_state = dce_v11_0_set_clockgating_state, - .set_powergating_state = dce_v11_0_set_powergating_state, -}; - -static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - - amdgpu_encoder->pixel_clock = adjusted_mode->clock; - - /* need to call this here rather than in prepare() since we need some crtc info */ - amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - - /* set scaler clears this on some chips */ - dce_v11_0_set_interleave(encoder->crtc, mode); - - if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { - dce_v11_0_afmt_enable(encoder, true); - dce_v11_0_afmt_setmode(encoder, adjusted_mode); - } -} - -static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) -{ - struct amdgpu_device *adev = drm_to_adev(encoder->dev); - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); - - if ((amdgpu_encoder->active_device & - (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || - (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != - ENCODER_OBJECT_ID_NONE)) { - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; - if (dig) { - dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder); - if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) - dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; - } - } - - amdgpu_atombios_scratch_regs_lock(adev, true); - - if (connector) { - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - - /* select the clock/data port if it uses a router */ - if (amdgpu_connector->router.cd_valid) - amdgpu_i2c_router_select_cd_port(amdgpu_connector); - - /* turn eDP panel on for mode set */ - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) - amdgpu_atombios_encoder_set_edp_panel_power(connector, - ATOM_TRANSMITTER_ACTION_POWER_ON); - } - - /* this is needed for the pll/ss setup to work correctly in some cases */ - amdgpu_atombios_encoder_set_crtc_source(encoder); - /* set up the FMT blocks */ - dce_v11_0_program_fmt(encoder); -} - -static void dce_v11_0_encoder_commit(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = drm_to_adev(dev); - - /* need to call this here as we need the crtc set up */ - amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); - amdgpu_atombios_scratch_regs_lock(adev, false); -} - -static void dce_v11_0_encoder_disable(struct drm_encoder *encoder) -{ - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig; - - amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - - if (amdgpu_atombios_encoder_is_digital(encoder)) { - if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) - dce_v11_0_afmt_enable(encoder, false); - dig = amdgpu_encoder->enc_priv; - dig->dig_encoder = -1; - } - amdgpu_encoder->active_device = 0; -} - -/* these are handled by the primary encoders */ -static void dce_v11_0_ext_prepare(struct drm_encoder *encoder) -{ - -} - -static void dce_v11_0_ext_commit(struct drm_encoder *encoder) -{ - -} - -static void -dce_v11_0_ext_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - -} - -static void dce_v11_0_ext_disable(struct drm_encoder *encoder) -{ - -} - -static void -dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode) -{ - -} - -static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = { - .dpms = dce_v11_0_ext_dpms, - .prepare = dce_v11_0_ext_prepare, - .mode_set = dce_v11_0_ext_mode_set, - .commit = dce_v11_0_ext_commit, - .disable = dce_v11_0_ext_disable, - /* no detect for TMDS/LVDS yet */ -}; - -static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = { - .dpms = amdgpu_atombios_encoder_dpms, - .mode_fixup = amdgpu_atombios_encoder_mode_fixup, - .prepare = dce_v11_0_encoder_prepare, - .mode_set = dce_v11_0_encoder_mode_set, - .commit = dce_v11_0_encoder_commit, - .disable = dce_v11_0_encoder_disable, - .detect = amdgpu_atombios_encoder_dig_detect, -}; - -static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = { - .dpms = amdgpu_atombios_encoder_dpms, - .mode_fixup = amdgpu_atombios_encoder_mode_fixup, - .prepare = dce_v11_0_encoder_prepare, - .mode_set = dce_v11_0_encoder_mode_set, - .commit = dce_v11_0_encoder_commit, - .detect = amdgpu_atombios_encoder_dac_detect, -}; - -static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder) -{ - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); - kfree(amdgpu_encoder->enc_priv); - drm_encoder_cleanup(encoder); - kfree(amdgpu_encoder); -} - -static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = { - .destroy = dce_v11_0_encoder_destroy, -}; - -static void dce_v11_0_encoder_add(struct amdgpu_device *adev, - uint32_t encoder_enum, - uint32_t supported_device, - u16 caps) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_encoder *encoder; - struct amdgpu_encoder *amdgpu_encoder; - - /* see if we already added it */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - amdgpu_encoder = to_amdgpu_encoder(encoder); - if (amdgpu_encoder->encoder_enum == encoder_enum) { - amdgpu_encoder->devices |= supported_device; - return; - } - - } - - /* add a new one */ - amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); - if (!amdgpu_encoder) - return; - - encoder = &amdgpu_encoder->base; - switch (adev->mode_info.num_crtc) { - case 1: - encoder->possible_crtcs = 0x1; - break; - case 2: - default: - encoder->possible_crtcs = 0x3; - break; - case 3: - encoder->possible_crtcs = 0x7; - break; - case 4: - encoder->possible_crtcs = 0xf; - break; - case 5: - encoder->possible_crtcs = 0x1f; - break; - case 6: - encoder->possible_crtcs = 0x3f; - break; - } - - amdgpu_encoder->enc_priv = NULL; - - amdgpu_encoder->encoder_enum = encoder_enum; - amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - amdgpu_encoder->devices = supported_device; - amdgpu_encoder->rmx_type = RMX_OFF; - amdgpu_encoder->underscan_type = UNDERSCAN_OFF; - amdgpu_encoder->is_ext_encoder = false; - amdgpu_encoder->caps = caps; - - switch (amdgpu_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); - drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: - if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - amdgpu_encoder->rmx_type = RMX_FULL; - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); - amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); - } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); - amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); - } else { - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); - } - drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); - break; - case ENCODER_OBJECT_ID_SI170B: - case ENCODER_OBJECT_ID_CH7303: - case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: - case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: - case ENCODER_OBJECT_ID_TITFP513: - case ENCODER_OBJECT_ID_VT1623: - case ENCODER_OBJECT_ID_HDMI_SI1930: - case ENCODER_OBJECT_ID_TRAVIS: - case ENCODER_OBJECT_ID_NUTMEG: - /* these are handled by the primary encoders */ - amdgpu_encoder->is_ext_encoder = true; - if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); - else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); - else - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); - break; - } -} - -static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { - .bandwidth_update = &dce_v11_0_bandwidth_update, - .vblank_get_counter = &dce_v11_0_vblank_get_counter, - .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, - .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, - .hpd_sense = &dce_v11_0_hpd_sense, - .hpd_set_polarity = &dce_v11_0_hpd_set_polarity, - .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg, - .page_flip = &dce_v11_0_page_flip, - .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, - .add_encoder = &dce_v11_0_encoder_add, - .add_connector = &amdgpu_connector_add, -}; - -static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) -{ - adev->mode_info.funcs = &dce_v11_0_display_funcs; -} - -static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = { - .set = dce_v11_0_set_crtc_irq_state, - .process = dce_v11_0_crtc_irq, -}; - -static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = { - .set = dce_v11_0_set_pageflip_irq_state, - .process = dce_v11_0_pageflip_irq, -}; - -static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = { - .set = dce_v11_0_set_hpd_irq_state, - .process = dce_v11_0_hpd_irq, -}; - -static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev) -{ - if (adev->mode_info.num_crtc > 0) - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; - else - adev->crtc_irq.num_types = 0; - adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs; - - adev->pageflip_irq.num_types = adev->mode_info.num_crtc; - adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs; - - adev->hpd_irq.num_types = adev->mode_info.num_hpd; - adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; -} - -const struct amdgpu_ip_block_version dce_v11_0_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_DCE, - .major = 11, - .minor = 0, - .rev = 0, - .funcs = &dce_v11_0_ip_funcs, -}; - -const struct amdgpu_ip_block_version dce_v11_2_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_DCE, - .major = 11, - .minor = 2, - .rev = 0, - .funcs = &dce_v11_0_ip_funcs, -}; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h deleted file mode 100644 index 0d878ca3acba..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __DCE_V11_0_H__ -#define __DCE_V11_0_H__ - -extern const struct amdgpu_ip_block_version dce_v11_0_ip_block; -extern const struct amdgpu_ip_block_version dce_v11_2_ip_block; - -void dce_v11_0_disable_dce(struct amdgpu_device *adev); - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 264183ab24ec..8841d7213de4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4075,7 +4075,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *f = NULL; unsigned int index; uint64_t gpu_addr; - volatile uint32_t *cpu_ptr; + uint32_t *cpu_ptr; long r; memset(&ib, 0, sizeof(ib)); @@ -4322,8 +4322,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; int ctx_reg_offset; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 3d9c045a8a64..66c47c466532 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -603,7 +603,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *f = NULL; unsigned index; uint64_t gpu_addr; - volatile uint32_t *cpu_ptr; + uint32_t *cpu_ptr; long r; /* MES KIQ fw hasn't indirect buffer support for now */ @@ -850,8 +850,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; int ctx_reg_offset; @@ -1654,6 +1653,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) } } break; + case IP_VERSION(11, 0, 1): + case IP_VERSION(11, 0, 4): + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); + if (adev->gfx.pfp_fw_version >= 102 && + adev->gfx.mec_fw_version >= 66 && + adev->mes.fw_version[0] >= 128) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 1): adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 5dbc5dbc694a..710ec9c34e43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -497,7 +497,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *f = NULL; unsigned index; uint64_t gpu_addr; - volatile uint32_t *cpu_ptr; + uint32_t *cpu_ptr; long r; /* MES KIQ fw hasn't indirect buffer support for now */ @@ -685,8 +685,7 @@ static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0, clustercount = 0, i; const struct cs_section_def *sect = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 70d7a1f434c4..7693b7953426 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -86,7 +86,7 @@ MODULE_FIRMWARE("amdgpu/hainan_ce.bin"); MODULE_FIRMWARE("amdgpu/hainan_rlc.bin"); static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev); -static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); +static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer); //static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev); static void gfx_v6_0_init_pg(struct amdgpu_device *adev); @@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) { const u32 *src_ptr; - volatile u32 *dst_ptr; + u32 *dst_ptr; u32 dws; u64 reg_list_mc_addr; const struct cs_section_def *cs_data; @@ -2855,8 +2855,7 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 2aa323dab34e..5976ed55d9db 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -883,7 +883,7 @@ static const u32 kalindi_rlc_save_restore_register_list[] = { }; static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); -static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); +static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer); static void gfx_v7_0_init_pg(struct amdgpu_device *adev); static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); @@ -3882,8 +3882,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 367449d8061b..0856ff65288c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1220,8 +1220,7 @@ out: return err; } -static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index a6ff9a137a83..dd19a97436db 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1648,8 +1648,7 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev) return count; } -static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, - volatile u32 *buffer) +static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) { u32 count = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index 76d3c40735b0..404cc8c2ff2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -337,7 +337,7 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, int vmid, i; if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready && - (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x81) { + (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x83) { struct mes_inv_tlbs_pasid_input input = {0}; input.pasid = pasid; input.flush_type = flush_type; @@ -521,6 +521,7 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev, *flags &= ~AMDGPU_PTE_NOALLOC; if (vm_flags & AMDGPU_VM_PAGE_PRT) { + *flags |= AMDGPU_PTE_PRT_GFX12; *flags |= AMDGPU_PTE_SNOOPED; *flags |= AMDGPU_PTE_SYSTEM; *flags |= AMDGPU_PTE_IS_PTE; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 8404695eb13f..0d1dd587db5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1834,11 +1834,19 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev) static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) { + static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; + u32 vram_info; + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_width = 128 * 64; if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; + + if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { + vram_info = RREG32(regBIF_BIOS_SCRATCH_4); + adev->gmc.vram_vendor = vram_info & 0xF; + } } static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index 9e428e669ada..b5bb7f4d607c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -557,7 +557,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = { .nop = PACKET0(0x81ff, 0), .support_64bit_ptrs = false, .no_user_fence = true, - .extra_dw = 64, + .extra_bytes = 256, .get_rptr = jpeg_v1_0_decode_ring_get_rptr, .get_wptr = jpeg_v1_0_decode_ring_get_wptr, .set_wptr = jpeg_v1_0_decode_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 58239c405fda..27c76bd424cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -23,7 +23,6 @@ #include "amdgpu.h" #include "amdgpu_jpeg.h" -#include "amdgpu_cs.h" #include "amdgpu_pm.h" #include "soc15.h" #include "soc15d.h" @@ -806,7 +805,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { .get_rptr = jpeg_v2_0_dec_ring_get_rptr, .get_wptr = jpeg_v2_0_dec_ring_get_wptr, .set_wptr = jpeg_v2_0_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + @@ -854,58 +853,3 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = { .rev = 0, .funcs = &jpeg_v2_0_ip_funcs, }; - -/** - * jpeg_v2_dec_ring_parse_cs - command submission parser - * - * @parser: Command submission parser context - * @job: the job to parse - * @ib: the IB to parse - * - * Parse the command stream, return -EINVAL for invalid packet, - * 0 otherwise - */ -int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, - struct amdgpu_job *job, - struct amdgpu_ib *ib) -{ - u32 i, reg, res, cond, type; - struct amdgpu_device *adev = parser->adev; - - for (i = 0; i < ib->length_dw ; i += 2) { - reg = CP_PACKETJ_GET_REG(ib->ptr[i]); - res = CP_PACKETJ_GET_RES(ib->ptr[i]); - cond = CP_PACKETJ_GET_COND(ib->ptr[i]); - type = CP_PACKETJ_GET_TYPE(ib->ptr[i]); - - if (res) /* only support 0 at the moment */ - return -EINVAL; - - switch (type) { - case PACKETJ_TYPE0: - if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || - reg > JPEG_REG_RANGE_END) { - dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); - return -EINVAL; - } - break; - case PACKETJ_TYPE3: - if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || - reg > JPEG_REG_RANGE_END) { - dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); - return -EINVAL; - } - break; - case PACKETJ_TYPE6: - if (ib->ptr[i] == CP_PACKETJ_NOP) - continue; - dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); - return -EINVAL; - default: - dev_err(adev->dev, "Unknown packet type %d !\n", type); - return -EINVAL; - } - } - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h index 63fadda7a673..654e43e83e2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -45,9 +45,6 @@ #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 -#define JPEG_REG_RANGE_START 0x4000 -#define JPEG_REG_RANGE_END 0x41c2 - void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring); void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring); void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, @@ -60,9 +57,6 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count); -int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, - struct amdgpu_job *job, - struct amdgpu_ib *ib); extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 3e2c389242db..20983f126b49 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -696,7 +696,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + @@ -727,7 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { .get_rptr = jpeg_v2_5_dec_ring_get_rptr, .get_wptr = jpeg_v2_5_dec_ring_get_wptr, .set_wptr = jpeg_v2_5_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index a44eb2667664..d1a011c40ba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -597,7 +597,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { .get_rptr = jpeg_v3_0_dec_ring_get_rptr, .get_wptr = jpeg_v3_0_dec_ring_get_wptr, .set_wptr = jpeg_v3_0_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index da3ee69f1a3b..33db2c1ae6cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -762,7 +762,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { .get_rptr = jpeg_v4_0_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index a78144773fab..aae7328973d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -1177,7 +1177,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 5d86e1d846eb..54fd9c800c40 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -807,7 +807,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr, .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr, .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index 34c70270ea1d..46bf15dce2bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -683,7 +683,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { .get_rptr = jpeg_v5_0_0_dec_ring_get_rptr, .get_wptr = jpeg_v5_0_0_dec_ring_get_wptr, .set_wptr = jpeg_v5_0_0_dec_ring_set_wptr, - .parse_cs = jpeg_v2_dec_ring_parse_cs, + .parse_cs = amdgpu_jpeg_dec_parse_cs, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index aee26f80bd53..2db9b2c63693 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -254,6 +254,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; struct drm_amdgpu_userq_in *mqd_user = args_in; struct amdgpu_mqd_prop *userq_props; + struct amdgpu_gfx_shadow_info shadow_info; int r; /* Structure to initialize MQD for userqueue using generic MQD init function */ @@ -263,13 +264,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, return -ENOMEM; } - if (!mqd_user->wptr_va || !mqd_user->rptr_va || - !mqd_user->queue_va || mqd_user->queue_size == 0) { - DRM_ERROR("Invalid MQD parameters for userqueue\n"); - r = -EINVAL; - goto free_props; - } - r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size); if (r) { DRM_ERROR("Failed to create MQD object for userqueue\n"); @@ -286,6 +280,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->doorbell_index = queue->doorbell_index; userq_props->fence_address = queue->fence_drv->gpu_addr; + if (adev->gfx.funcs->get_gfx_shadow_info) + adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; @@ -302,6 +298,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, goto free_mqd; } + if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va, + max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE))) + goto free_mqd; + userq_props->eop_gpu_addr = compute_mqd->eop_va; userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; @@ -329,6 +329,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->csa_addr = mqd_gfx_v11->csa_va; userq_props->tmz_queue = mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; + + if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va, + shadow_info.shadow_size)) + goto free_mqd; + kfree(mqd_gfx_v11); } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11; @@ -346,6 +351,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, goto free_mqd; } + if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va, + shadow_info.csa_size)) + goto free_mqd; + userq_props->csa_addr = mqd_sdma_v11->csa_va; kfree(mqd_sdma_v11); } @@ -395,10 +404,82 @@ mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr, amdgpu_userq_destroy_object(uq_mgr, &queue->mqd); } +static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + struct mes_suspend_gang_input queue_input; + struct amdgpu_userq_obj *ctx = &queue->fw_obj; + signed long timeout = 2100000; /* 2100 ms */ + u64 fence_gpu_addr; + u32 fence_offset; + u64 *fence_ptr; + int i, r; + + if (queue->state != AMDGPU_USERQ_STATE_MAPPED) + return 0; + r = amdgpu_device_wb_get(adev, &fence_offset); + if (r) + return r; + + fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4); + fence_ptr = (u64 *)&adev->wb.wb[fence_offset]; + *fence_ptr = 0; + + memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input)); + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; + queue_input.suspend_fence_addr = fence_gpu_addr; + queue_input.suspend_fence_value = 1; + amdgpu_mes_lock(&adev->mes); + r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); + if (r) { + DRM_ERROR("Failed to suspend gang: %d\n", r); + goto out; + } + + for (i = 0; i < timeout; i++) { + if (*fence_ptr == 1) + goto out; + udelay(1); + } + r = -ETIMEDOUT; + +out: + amdgpu_device_wb_free(adev, fence_offset); + return r; +} + +static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + struct mes_resume_gang_input queue_input; + struct amdgpu_userq_obj *ctx = &queue->fw_obj; + int r; + + if (queue->state == AMDGPU_USERQ_STATE_HUNG) + return -EINVAL; + if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED) + return 0; + + memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input)); + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; + + amdgpu_mes_lock(&adev->mes); + r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); + if (r) + dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r); + return r; +} + const struct amdgpu_userq_funcs userq_mes_funcs = { .mqd_create = mes_userq_mqd_create, .mqd_destroy = mes_userq_mqd_destroy, .unmap = mes_userq_unmap, .map = mes_userq_map, .detect_and_reset = mes_userq_detect_and_reset, + .preempt = mes_userq_preempt, + .restore = mes_userq_restore, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 457972aa5632..e5282a5d05d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -202,6 +202,9 @@ send_request: case IDH_REQ_RAS_CPER_DUMP: event = IDH_RAS_CPER_DUMP_READY; break; + case IDH_REQ_RAS_CHK_CRITI: + event = IDH_REQ_RAS_CHK_CRITI_READY; + break; default: break; } @@ -556,6 +559,16 @@ static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev) return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES); } +static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr) +{ + uint32_t addr_hi, addr_lo; + + addr_hi = (uint32_t)(addr >> 32); + addr_lo = (uint32_t)(addr & 0xFFFFFFFF); + return xgpu_nv_send_access_requests_with_param( + adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0); +} + const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, @@ -569,4 +582,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_ras_err_count = xgpu_nv_req_ras_err_count, .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump, .req_bad_pages = xgpu_nv_req_ras_bad_pages, + .req_ras_chk_criti = xgpu_nv_check_vf_critical_region }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 5808689562cc..c1083e5e41e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -43,6 +43,7 @@ enum idh_request { IDH_REQ_RAS_ERROR_COUNT = 203, IDH_REQ_RAS_CPER_DUMP = 204, IDH_REQ_RAS_BAD_PAGES = 205, + IDH_REQ_RAS_CHK_CRITI = 206 }; enum idh_event { @@ -62,6 +63,7 @@ enum idh_event { IDH_RAS_BAD_PAGES_READY = 15, IDH_RAS_BAD_PAGES_NOTIFICATION = 16, IDH_UNRECOV_ERR_NOTIFICATION = 17, + IDH_REQ_RAS_CHK_CRITI_READY = 18, IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c index dd2d66090d23..68aef47254a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c @@ -743,7 +743,7 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev) adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) DRM_ERROR("Failed to register hw i2c, err: %d\n", res); @@ -752,9 +752,6 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev) void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev) { - struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus; - - i2c_del_adapter(control); adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 1e89ba153d9d..a316797875a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -193,7 +193,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode; if (amdgpu_vcnfw_log) { - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; amdgpu_vcn_fwlog_init(adev->vcn.inst); @@ -230,11 +230,11 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block) jpeg_v1_0_sw_fini(ip_block); - r = amdgpu_vcn_sw_fini(adev, 0); + amdgpu_vcn_sw_fini(adev, 0); kfree(adev->vcn.ip_dump); - return r; + return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index b115137ab2d6..8897dcc9c1a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -137,7 +137,7 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_ring *ring; int i, r; struct amdgpu_device *adev = ip_block->adev; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, @@ -252,7 +252,7 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) { int r, idx; struct amdgpu_device *adev = ip_block->adev; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; if (drm_dev_enter(adev_to_drm(adev), &idx)) { fw_shared->present_flag_0 = 0; @@ -267,9 +267,9 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) amdgpu_vcn_sysfs_reset_mask_fini(adev); - r = amdgpu_vcn_sw_fini(adev, 0); + amdgpu_vcn_sw_fini(adev, 0); - return r; + return 0; } /** @@ -853,7 +853,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst) static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { struct amdgpu_device *adev = vinst->adev; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; int ret; @@ -1001,7 +1001,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; @@ -1308,7 +1308,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); if (!ret_code) { - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; /* pause DPG */ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 904b94bc8693..cebee453871c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -277,7 +277,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << j)) continue; @@ -420,7 +420,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) { int i, r, idx; struct amdgpu_device *adev = ip_block->adev; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { @@ -442,9 +442,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) r = amdgpu_vcn_suspend(adev, i); if (r) return r; - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; + amdgpu_vcn_sw_fini(adev, i); } return 0; @@ -1000,7 +998,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; int ret; @@ -1157,7 +1155,7 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared = + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; @@ -1669,7 +1667,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); if (!ret_code) { - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; /* pause DPG */ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index f3085137ba08..d9cf8f0feeb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -191,7 +191,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) } for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -327,7 +327,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -349,9 +349,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; + amdgpu_vcn_sw_fini(adev, i); } return 0; @@ -1031,7 +1029,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; int ret; @@ -1196,7 +1194,7 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; int j, k, r; @@ -1717,7 +1715,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t reg_data = 0; int ret_code; @@ -1836,7 +1834,7 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_fw_shared *fw_shared; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index bc9dfe5ffea7..3ae666522d57 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -148,7 +148,7 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block) static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); @@ -278,7 +278,7 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -302,11 +302,8 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block) amdgpu_vcn_sysfs_reset_mask_fini(adev); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; - } + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + amdgpu_vcn_sw_fini(adev, i); return 0; } @@ -1000,7 +997,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t tmp; int ret; @@ -1140,7 +1137,7 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int j, k, r; @@ -1357,8 +1354,8 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) struct mmsch_v4_0_cmd_end end = { {0} }; struct mmsch_v4_0_init_header header; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; - volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_fw_shared_rb_setup *rb_setup; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; @@ -1609,7 +1606,7 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; uint32_t tmp; int r = 0; @@ -1980,7 +1977,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, - .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata), + .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata), .get_rptr = vcn_v4_0_unified_ring_get_rptr, .get_wptr = vcn_v4_0_unified_ring_get_wptr, .set_wptr = vcn_v4_0_unified_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 7b93a275ec4f..eacf4e93ba2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -212,7 +212,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + + /* There are no per-instance irq source IDs on 4.0.3, the IH + * packets use a separate field to differentiate instances. + */ + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0, AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score); if (r) @@ -259,7 +263,7 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(&adev->ddev, &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; @@ -279,11 +283,8 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) amdgpu_vcn_sysfs_reset_mask_fini(adev); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; - } + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + amdgpu_vcn_sw_fini(adev, i); return 0; } @@ -844,7 +845,7 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared = + struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; int vcn_inst, ret; @@ -1011,8 +1012,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev) struct mmsch_v4_0_cmd_end end = { {0} }; struct mmsch_v4_0_3_init_header header; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; - volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_fw_shared_rb_setup *rb_setup; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; @@ -1186,7 +1187,7 @@ static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; int j, k, r, vcn_inst; uint32_t tmp; @@ -1396,7 +1397,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; int r = 0, vcn_inst; uint32_t tmp; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 6dbf33b26ee2..b107ee80e472 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -149,7 +149,7 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) int i, r; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -249,7 +249,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -270,9 +270,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; + amdgpu_vcn_sw_fini(adev, i); } return 0; @@ -912,7 +910,7 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t tmp; int ret; @@ -1049,7 +1047,7 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int j, k, r; @@ -1268,7 +1266,7 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn4_fw_shared *fw_shared; + struct amdgpu_vcn4_fw_shared *fw_shared; uint32_t tmp; int r = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index 536f06b81706..0202df5db1e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -129,7 +129,7 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) int i, r; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -211,7 +211,7 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; if (adev->vcn.harvest_config & (1 << i)) continue; @@ -232,11 +232,8 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block) amdgpu_vcn_sysfs_reset_mask_fini(adev); - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; - } + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + amdgpu_vcn_sw_fini(adev, i); return 0; } @@ -695,7 +692,7 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t tmp; int ret; @@ -805,7 +802,7 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int j, k, r; @@ -998,7 +995,7 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; uint32_t tmp; int r = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index 4b01e35ad7ef..9c281ba6bced 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -226,7 +226,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; @@ -245,14 +245,11 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) return r; } - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - r = amdgpu_vcn_sw_fini(adev, i); - if (r) - return r; - } - amdgpu_vcn_sysfs_reset_mask_fini(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + amdgpu_vcn_sw_fini(adev, i); + return 0; } @@ -643,7 +640,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, { struct amdgpu_device *adev = vinst->adev; int inst_idx = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared = + struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; @@ -779,8 +776,8 @@ static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev) struct mmsch_v5_0_cmd_end end = { {0} }; struct mmsch_v5_0_init_header header; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; - volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_fw_shared_rb_setup *rb_setup; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; @@ -954,7 +951,7 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; struct amdgpu_ring *ring; uint32_t tmp; int j, k, r, vcn_inst; @@ -1146,7 +1143,7 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst) { struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; - volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_vcn5_fw_shared *fw_shared; uint32_t tmp; int r = 0, vcn_inst; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 9b3510e53112..a611a7345125 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -67,7 +67,6 @@ #include "sdma_v2_4.h" #include "sdma_v3_0.h" #include "dce_v10_0.h" -#include "dce_v11_0.h" #include "iceland_ih.h" #include "tonga_ih.h" #include "cz_ih.h" @@ -2124,8 +2123,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif - else - amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); break; @@ -2142,8 +2139,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif - else - amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); #if defined(CONFIG_DRM_AMD_ACP) @@ -2163,8 +2158,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif - else - amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); #if defined(CONFIG_DRM_AMD_ACP) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 8535a52a62ca..0f0719528bcc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -521,15 +521,10 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, cu_mask_size = sizeof(uint32_t) * (max_num_cus/32); } - minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL); - if (!minfo.cu_mask.ptr) - return -ENOMEM; - - retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size); - if (retval) { + minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size); + if (IS_ERR(minfo.cu_mask.ptr)) { pr_debug("Could not copy CU mask from userspace"); - retval = -EFAULT; - goto out; + return PTR_ERR(minfo.cu_mask.ptr); } mutex_lock(&p->mutex); @@ -538,7 +533,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, mutex_unlock(&p->mutex); -out: kfree(minfo.cu_mask.ptr); return retval; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 7e749f9b6d69..349c351e242b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -1550,6 +1550,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id) return ret; } +int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd) +{ + struct kfd_node *node; + int i, r; + + if (!kfd->init_complete) + return 0; + + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + r = node->dqm->ops.unhalt(node->dqm); + if (r) { + dev_err(kfd_device, "Error in starting scheduler\n"); + return r; + } + } + return 0; +} + int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) { struct kfd_node *node; @@ -1567,6 +1586,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) return node->dqm->ops.halt(node->dqm); } +int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd) +{ + struct kfd_node *node; + int i, r; + + if (!kfd->init_complete) + return 0; + + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + r = node->dqm->ops.halt(node->dqm); + if (r) + return r; + } + return 0; +} + bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) { struct kfd_node *node; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 86315ecb6f1d..59a5a3fea65d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -39,22 +39,22 @@ #endif #define dev_fmt(fmt) "kfd_migrate: " fmt -static uint64_t -svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) +static u64 +svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr) { return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); } static int -svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, - dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) +svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages, + dma_addr_t *addr, u64 *gart_addr, u64 flags) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; unsigned int num_dw, num_bytes; struct dma_fence *fence; - uint64_t src_addr, dst_addr; - uint64_t pte_flags; + u64 src_addr, dst_addr; + u64 pte_flags; void *cpu_addr; int r; @@ -123,15 +123,15 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, static int svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, - uint64_t *vram, uint64_t npages, + u64 *vram, u64 npages, enum MIGRATION_COPY_DIR direction, struct dma_fence **mfence) { - const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; + const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - uint64_t gart_s, gart_d; + u64 gart_s, gart_d; struct dma_fence *next; - uint64_t size; + u64 size; int r; mutex_lock(&adev->mman.gtt_window_lock); @@ -261,30 +261,42 @@ static void svm_migrate_put_sys_page(unsigned long addr) put_page(page); } -static long +static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) +{ + unsigned long mpages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->dst[i] & MIGRATE_PFN_VALID && + migrate->src[i] & MIGRATE_PFN_MIGRATE) + mpages++; + } + return mpages; +} + +static int svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, - dma_addr_t *scratch, uint64_t ttm_res_offset) + dma_addr_t *scratch, u64 ttm_res_offset) { - uint64_t npages = migrate->npages; + u64 npages = migrate->npages; struct amdgpu_device *adev = node->adev; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; - long mpages; + u64 mpages = 0; dma_addr_t *src; - uint64_t *dst; - uint64_t i, j; + u64 *dst; + u64 i, j; int r; pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start, prange->last, ttm_res_offset); src = scratch; - dst = (uint64_t *)(scratch + npages); + dst = (u64 *)(scratch + npages); amdgpu_res_first(prange->ttm_res, ttm_res_offset, npages << PAGE_SHIFT, &cursor); - mpages = 0; for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) { struct page *spage; @@ -345,14 +357,13 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, out_free_vram_pages: if (r) { pr_debug("failed %d to copy memory to vram\n", r); - while (i-- && mpages) { + for (i = 0; i < npages && mpages; i++) { if (!dst[i]) continue; svm_migrate_put_vram_page(adev, dst[i]); migrate->dst[i] = 0; mpages--; } - mpages = r; } #ifdef DEBUG_FORCE_MIXED_DOMAINS @@ -370,22 +381,22 @@ out_free_vram_pages: } #endif - return mpages; + return r; } static long svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, - struct vm_area_struct *vma, uint64_t start, - uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) + struct vm_area_struct *vma, u64 start, + u64 end, uint32_t trigger, u64 ttm_res_offset) { struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); - uint64_t npages = (end - start) >> PAGE_SHIFT; + u64 npages = (end - start) >> PAGE_SHIFT; struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; unsigned long cpages = 0; - long mpages = 0; + unsigned long mpages = 0; dma_addr_t *scratch; void *buf; int r = -ENOMEM; @@ -398,7 +409,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); buf = kvcalloc(npages, - 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), + 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t), GFP_KERNEL); if (!buf) goto out; @@ -431,17 +442,15 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, else pr_debug("0x%lx pages collected\n", cpages); - mpages = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); + r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); - if (mpages >= 0) - pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", - mpages, cpages, migrate.npages); - else - r = mpages; + mpages = svm_migrate_successful_pages(&migrate); + pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", + mpages, cpages, migrate.npages); svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); @@ -451,13 +460,14 @@ out_free: start >> PAGE_SHIFT, end >> PAGE_SHIFT, 0, node->id, trigger, r); out: - if (!r && mpages > 0) { + if (!r && mpages) { pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) WRITE_ONCE(pdd->page_in, pdd->page_in + mpages); - } - return r ? r : mpages; + return mpages; + } + return r; } /** @@ -481,7 +491,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, { unsigned long addr, start, end; struct vm_area_struct *vma; - uint64_t ttm_res_offset; + u64 ttm_res_offset; struct kfd_node *node; unsigned long mpages = 0; long r = 0; @@ -568,18 +578,17 @@ static void svm_migrate_page_free(struct page *page) } } -static long +static int svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, - dma_addr_t *scratch, uint64_t npages) + dma_addr_t *scratch, u64 npages) { struct device *dev = adev->dev; - uint64_t *src; + u64 *src; dma_addr_t *dst; struct page *dpage; - long mpages; - uint64_t i = 0, j; - uint64_t addr; + u64 i = 0, j; + u64 addr; int r = 0; pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, @@ -587,10 +596,9 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, addr = migrate->start; - src = (uint64_t *)(scratch + npages); + src = (u64 *)(scratch + npages); dst = scratch; - mpages = 0; for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { struct page *spage; @@ -639,7 +647,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); - mpages++; j++; } @@ -649,17 +656,13 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, out_oom: if (r) { pr_debug("failed %d copy to ram\n", r); - while (i-- && mpages) { - if (!migrate->dst[i]) - continue; + while (i--) { svm_migrate_put_sys_page(dst[i]); migrate->dst[i] = 0; - mpages--; } - mpages = r; } - return mpages; + return r; } /** @@ -681,13 +684,13 @@ out_oom: */ static long svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, - struct vm_area_struct *vma, uint64_t start, uint64_t end, + struct vm_area_struct *vma, u64 start, u64 end, uint32_t trigger, struct page *fault_page) { struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); - uint64_t npages = (end - start) >> PAGE_SHIFT; + u64 npages = (end - start) >> PAGE_SHIFT; unsigned long cpages = 0; - long mpages = 0; + unsigned long mpages = 0; struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; @@ -707,7 +710,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; buf = kvcalloc(npages, - 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), + 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t), GFP_KERNEL); if (!buf) goto out; @@ -741,15 +744,13 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, else pr_debug("0x%lx pages collected\n", cpages); - mpages = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, + r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, scratch, npages); migrate_vma_pages(&migrate); - if (mpages >= 0) - pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", + mpages = svm_migrate_successful_pages(&migrate); + pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", mpages, cpages, migrate.npages); - else - r = mpages; svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); @@ -762,7 +763,7 @@ out_free: start >> PAGE_SHIFT, end >> PAGE_SHIFT, node->id, 0, trigger, r); out: - if (!r && mpages > 0) { + if (!r && mpages) { pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); @@ -846,8 +847,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, if (r >= 0) { WARN_ONCE(prange->vram_pages < mpages, - "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).", - prange->vram_pages, mpages); + "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).", + prange->vram_pages, mpages); prange->vram_pages -= mpages; /* prange does not have vram page set its actual_loc to system diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 62defeccbb5c..7012b2c692b3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2081,6 +2081,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_hardware_init(adev->dm.dc); + adev->dm.restore_backlight = true; + adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); if (!adev->dm.hpd_rx_offload_wq) { drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); @@ -2945,7 +2947,7 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev) return -ENOMEM; } - r = i2c_add_adapter(&oem_i2c->base); + r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base); if (r) { drm_info(adev_to_drm(adev), "Failed to register oem i2c\n"); kfree(oem_i2c); @@ -2957,17 +2959,6 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev) return 0; } -static void dm_oem_i2c_hw_fini(struct amdgpu_device *adev) -{ - struct amdgpu_display_manager *dm = &adev->dm; - - if (dm->oem_i2c) { - i2c_del_adapter(&dm->oem_i2c->base); - kfree(dm->oem_i2c); - dm->oem_i2c = NULL; - } -} - /** * dm_hw_init() - Initialize DC device * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. @@ -3018,8 +3009,6 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; - dm_oem_i2c_hw_fini(adev); - amdgpu_dm_hpd_fini(adev); amdgpu_dm_irq_fini(adev); @@ -3047,14 +3036,20 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n", enable ? "enable" : "disable"); - if (enable) { - if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) - rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); - } else - rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); - - if (rc) - drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); + if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) { + if (enable) { + if (amdgpu_dm_crtc_vrr_active( + to_dm_crtc_state(acrtc->base.state))) + rc = amdgpu_dm_crtc_set_vupdate_irq( + &acrtc->base, true); + } else + rc = amdgpu_dm_crtc_set_vupdate_irq( + &acrtc->base, false); + + if (rc) + drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", + enable ? "en" : "dis"); + } irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; /* During gpu-reset we disable and then enable vblank irq, so @@ -3443,6 +3438,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); dc_resume(dm->dc); + adev->dm.restore_backlight = true; amdgpu_dm_irq_resume_early(adev); @@ -6427,6 +6423,10 @@ static void fill_stream_properties_from_drm_display_mode( && aconnector && aconnector->force_yuv420_output) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422) + && aconnector + && aconnector->force_yuv422_output) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; @@ -7384,10 +7384,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); drm_connector_unregister(connector); drm_connector_cleanup(connector); - if (aconnector->i2c) { - i2c_del_adapter(&aconnector->i2c->base); - kfree(aconnector->i2c); - } kfree(aconnector->dm_dp_aux.aux.name); kfree(connector); @@ -7687,6 +7683,7 @@ create_validate_stream_for_sink(struct drm_connector *connector, bpc_limit = 8; do { + drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc); stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, requested_bpc); @@ -7722,16 +7719,41 @@ create_validate_stream_for_sink(struct drm_connector *connector, } while (stream == NULL && requested_bpc >= bpc_limit); - if ((dc_result == DC_FAIL_ENC_VALIDATE || - dc_result == DC_EXCEED_DONGLE_CAP) && - !aconnector->force_yuv420_output) { - DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", - __func__, __LINE__); - - aconnector->force_yuv420_output = true; + switch (dc_result) { + /* + * If we failed to validate DP bandwidth stream with the requested RGB color depth, + * we try to fallback and configure in order: + * YUV422 (8bpc, 6bpc) + * YUV420 (8bpc, 6bpc) + */ + case DC_FAIL_ENC_VALIDATE: + case DC_EXCEED_DONGLE_CAP: + case DC_NO_DP_LINK_BANDWIDTH: + /* recursively entered twice and already tried both YUV422 and YUV420 */ + if (aconnector->force_yuv422_output && aconnector->force_yuv420_output) + break; + /* first failure; try YUV422 */ + if (!aconnector->force_yuv422_output) { + drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n", + __func__, __LINE__, dc_result); + aconnector->force_yuv422_output = true; + /* recursively entered and YUV422 failed, try YUV420 */ + } else if (!aconnector->force_yuv420_output) { + drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n", + __func__, __LINE__, dc_result); + aconnector->force_yuv420_output = true; + } stream = create_validate_stream_for_sink(connector, drm_mode, - dm_state, old_stream); + dm_state, old_stream); + aconnector->force_yuv422_output = false; aconnector->force_yuv420_output = false; + break; + case DC_OK: + break; + default: + drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n", + __func__, __LINE__, dc_result); + break; } return stream; @@ -8719,7 +8741,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, } aconnector->i2c = i2c; - res = i2c_add_adapter(&i2c->base); + res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base); if (res) { drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index); @@ -8817,7 +8839,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev, static void manage_dm_interrupts(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dm_crtc_state *acrtc_state) -{ +{ /* + * We cannot be sure that the frontend index maps to the same + * backend index - some even map to more than one. + * So we have to go through the CRTC to find the right IRQ. + */ + int irq_type = amdgpu_display_crtc_idx_to_irq_type( + adev, + acrtc->crtc_id); + struct drm_device *dev = adev_to_drm(adev); + struct drm_vblank_crtc_config config = {0}; struct dc_crtc_timing *timing; int offdelay; @@ -8870,7 +8901,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, drm_crtc_vblank_on_config(&acrtc->base, &config); + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 2, 0): + if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n"); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n"); +#endif + } + } else { + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/ + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 2, 0): +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n"); +#endif + if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type)) + drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n"); + } + drm_crtc_vblank_off(&acrtc->base); } } @@ -9892,7 +9951,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, bool mode_set_reset_required = false; u32 i; struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; - bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -10012,7 +10070,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; - set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { drm_dbg_atomic(dev, "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -10069,13 +10126,16 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, * to fix a flicker issue. * It will cause the dm->actual_brightness is not the current panel brightness * level. (the dm->brightness is the correct panel level) - * So we set the backlight level with dm->brightness value after set mode + * So we set the backlight level with dm->brightness value after initial + * set mode. Use restore_backlight flag to avoid setting backlight level + * for every subsequent mode set. */ - if (set_backlight_level) { + if (dm->restore_backlight) { for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i]) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } + dm->restore_backlight = false; } } @@ -10788,6 +10848,8 @@ static void get_freesync_config_for_crtc( } else { config.state = VRR_STATE_INACTIVE; } + } else { + config.state = VRR_STATE_UNSUPPORTED; } out: new_crtc_state->freesync_config = config; @@ -12689,7 +12751,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, dm_con_state = to_dm_connector_state(connector->state); - if (!adev->dm.freesync_module) + if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version)) goto update; edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 159f8ded0439..c41c0ee4687f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -631,6 +631,13 @@ struct amdgpu_display_manager { u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; /** + * @restore_backlight: + * + * Flag to indicate whether to restore backlight after modeset. + */ + bool restore_backlight; + + /** * @aux_hpd_discon_quirk: * * quirk for hpd discon while aux is on-going. @@ -799,6 +806,7 @@ struct amdgpu_dm_connector { bool fake_enable; bool force_yuv420_output; + bool force_yuv422_output; struct dsc_preferred_settings dsc_settings; union dp_downstream_port_present mst_downstream_port_present; /* Cached display modes */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 45feb404b097..466dccb355d7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -317,13 +317,17 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) dc->config.disable_ips != DMUB_IPS_DISABLE_ALL && sr_supported && vblank->config.disable_immediate) drm_crtc_vblank_restore(crtc); + } - /* vblank irq on -> Only need vupdate irq in vrr mode */ - if (amdgpu_dm_crtc_vrr_active(acrtc_state)) - rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); - } else { - /* vblank irq off -> vupdate irq off */ - rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false); + if (dc_supports_vrr(dm->dc->ctx->dce_version)) { + if (enable) { + /* vblank irq on -> Only need vupdate irq in vrr mode */ + if (amdgpu_dm_crtc_vrr_active(acrtc_state)) + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); + } else { + /* vblank irq off -> vupdate irq off */ + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false); + } } if (rc) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index eef51652ca35..08f629c64df3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -146,7 +146,7 @@ static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64 if (*cap - *size < 1) { uint64_t new_cap = *cap * 2; - uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); + uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL); if (!new_mods) { kfree(*mods); @@ -732,7 +732,7 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig if (adev->family < AMDGPU_FAMILY_AI) return 0; - *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); + *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL); if (plane_type == DRM_PLANE_TYPE_CURSOR) { amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index e5771f490f2e..11b2ea6edf95 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -98,6 +98,7 @@ bool dm_pp_apply_display_requirements( const struct dm_pp_single_disp_config *dc_cfg = &pp_display_cfg->disp_configs[i]; adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; + adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock; } amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c index 82ea3fe5e764..80704d709e44 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -31,7 +31,7 @@ #include "amdgpu_dm.h" #include "modules/power/power_helpers.h" #include "dmub/inc/dmub_cmd.h" -#include "dc/inc/link.h" +#include "dc/inc/link_service.h" /* * amdgpu_dm_link_supports_replay() - check if the link supports replay diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 4071851f9e86..15cf13ec5302 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -28,7 +28,7 @@ #include "dccg.h" #include "clk_mgr_internal.h" #include "dc_state_priv.h" -#include "link.h" +#include "link_service.h" #include "dce100/dce_clk_mgr.h" #include "dce110/dce110_clk_mgr.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index 13cf415e38e5..d50b9440210e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -164,7 +164,7 @@ void dce110_fill_display_configs( stream->link->cur_link_settings.link_rate; cfg->link_settings.link_spread = stream->link->cur_link_settings.link_spread; - cfg->sym_clock = stream->phy_pix_clk; + cfg->pixel_clock = stream->phy_pix_clk; /* Round v_refresh*/ cfg->v_refresh = stream->timing.pix_clk_100hz * 100; cfg->v_refresh /= stream->timing.h_total; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index bc123f1884da..051052bd10c9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -47,7 +47,7 @@ #include "dcn30/dcn30_clk_mgr.h" #include "dc_dmub_srv.h" -#include "link.h" +#include "link_service.h" #include "logger_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 91d872d6d392..790bbd8235b1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -48,7 +48,7 @@ #include "dcn31/dcn31_clk_mgr.h" #include "dc_dmub_srv.h" -#include "link.h" +#include "link_service.h" #include "dcn314_smu.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index e4d22f74f986..b315ed91e010 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -46,7 +46,7 @@ #define DC_LOGGER \ clk_mgr->base.base.ctx->logger -#include "link.h" +#include "link_service.h" #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 49efea0c8fcf..1769b1f26e75 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -39,7 +39,7 @@ #include "dcn316_smu.h" #include "dm_helpers.h" #include "dc_dmub_srv.h" -#include "link.h" +#include "link_service.h" // DCN316 this is CLK1 instance #define MAX_INSTANCE 7 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 8376e2b0e73d..7da7b41bd092 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -33,7 +33,7 @@ #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #include "atomfirmware.h" #include "dcn32_smu13_driver_if.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index bb1ac12a2b09..86edf11b8c5a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -44,7 +44,7 @@ #include "dcn31/dcn31_clk_mgr.h" #include "dc_dmub_srv.h" -#include "link.h" +#include "link_service.h" #include "logger_types.h" #undef DC_LOGGER diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index 47ff4c965d76..47461f249e83 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -13,7 +13,7 @@ #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #include "atomfirmware.h" diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5963019d1e74..c4dd52ed377d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -60,7 +60,7 @@ #include "link_encoder.h" #include "link_enc_cfg.h" -#include "link.h" +#include "link_service.h" #include "dm_helpers.h" #include "mem_input.h" @@ -5622,8 +5622,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe); } } - - DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", + if (!dc->caps.is_apu) + DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2], subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 814f68d76257..a180f68f711c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -24,7 +24,7 @@ #include "link_enc_cfg.h" #include "resource.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER dc->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index b7a5de4ecb61..9acd30019717 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -33,8 +33,9 @@ * dc.h with detail interface documentation, then add function implementation * in this file which calls link functions. */ -#include "link.h" +#include "link_service.h" #include "dce/dce_i2c.h" + struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index) { if (link_index >= MAX_LINKS) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index d712548b1927..cbca3c67f439 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -40,7 +40,7 @@ #include "virtual/virtual_stream_encoder.h" #include "dpcd_defs.h" #include "link_enc_cfg.h" -#include "link.h" +#include "link_service.h" #include "clk_mgr.h" #include "dc_state_priv.h" #include "dc_stream_priv.h" @@ -95,7 +95,6 @@ #define DC_LOGGER \ dc->ctx->logger #define DC_LOGGER_INIT(logger) - #include "dml2/dml2_wrapper.h" #define UNABLE_TO_SPLIT -1 @@ -2149,7 +2148,7 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master, h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right + - otg_master->hblank_borrow; + otg_master->dsc_padding_params.dsc_hactive_padding; width = h_active / count; if (otg_master->stream_res.tg) @@ -4267,39 +4266,33 @@ fail: return res; } +#if defined(CONFIG_DRM_AMD_DC_FP) +#endif /* CONFIG_DRM_AMD_DC_FP */ + /** - * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context. + * calculate_timing_params_for_dsc_with_padding - Calculates timing parameters for DSC with padding. * @pipe_ctx: Pointer to the pipe context structure. * - * This function calculates the horizontal blanking borrow value for a given pipe context based on the + * This function calculates the timing parameters for a given pipe context based on the * display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less - * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the - * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow + * than the total width of the DSC slices, it sets the dsc_hactive_padding value to the difference. If the + * total horizontal timing minus the dsc_hactive_padding value is less than 32, it resets the dsc_hactive_padding * value to 0. */ -static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) +static void calculate_timing_params_for_dsc_with_padding(struct pipe_ctx *pipe_ctx) { - uint32_t hactive; - uint32_t ceil_slice_width; struct dc_stream_state *stream = NULL; if (!pipe_ctx) return; stream = pipe_ctx->stream; + pipe_ctx->dsc_padding_params.dsc_hactive_padding = 0; + pipe_ctx->dsc_padding_params.dsc_htotal_padding = 0; - if (stream->timing.flags.DSC) { - hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + if (stream) + pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz = stream->timing.pix_clk_100hz; - /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/ - if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) { - ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1; - pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive; - - if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32) - pipe_ctx->hblank_borrow = 0; - } - } } /** @@ -4342,7 +4335,7 @@ enum dc_status dc_validate_global_state( /* Decide whether hblank borrow is needed and save it in pipe_ctx */ if (dc->debug.enable_hblank_borrow) - decide_hblank_borrow(pipe_ctx); + calculate_timing_params_for_dsc_with_padding(pipe_ctx); if (dc->res_pool->funcs->patch_unknown_plane_state && pipe_ctx->plane_state && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 883054bb18e7..c61300a7cb1c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -211,7 +211,7 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p return NULL; } - if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) { + if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) { dc_state_release(state); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 954a2786fbe2..9d15ba591772 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -55,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.349" +#define DC_VER "3.2.350" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 51e41aed7316..5a365bd19933 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -755,3 +755,8 @@ char *dce_version_to_string(const int version) return "Unknown"; } } + +bool dc_supports_vrr(const enum dce_version v) +{ + return v >= DCE_VERSION_8_0; +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index 7f57661433eb..55704d4457ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -128,7 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx); // Make spl input basic out info output_size width point to stream h active spl_in->basic_out.output_size.width = - stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow; + stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding; // Make spl input basic out info output_size height point to v active spl_in->basic_out.output_size.height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index 65b979617b0c..9e2a473a8852 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -3,7 +3,7 @@ // Copyright 2024 Advanced Micro Devices, Inc. #include "dc.h" -#include "link.h" +#include "link_service.h" #include "dc_dmub_srv.h" #include "dmub/dmub_srv.h" #include "core_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c index 22e66b375a7f..d928b4dcf6b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c @@ -28,7 +28,7 @@ #include "dcn10_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #include "dcn30/dcn30_afmt.h" diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c index 0b47aeb60e79..bec0b4aaeb2b 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn20_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c index 9a92f73d5b7f..84cc2ddc52fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c @@ -37,7 +37,7 @@ #include "link_enc_cfg.h" #include "dc_dmub_srv.h" #include "dal_asic_id.h" -#include "link.h" +#include "link_service.h" #define CTX \ enc10->base.ctx diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c index ae81451a3a72..3e85e9c3d2cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c @@ -30,7 +30,7 @@ #include "dcn314_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c index 1a9bb614c41e..3523d1cdc1a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn32_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c index 6f30b6cc3c76..fd5d1dbf9dc6 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c @@ -29,7 +29,7 @@ #include "dcn35_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c index d5fa551dd3c9..99aab70ef3e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c @@ -32,7 +32,7 @@ #include "dcn401_dio_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 7b9c22c45453..7b398d4f4439 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -311,4 +311,6 @@ void dm_dtn_log_end(struct dc_context *ctx, char *dce_version_to_string(const int version); +bool dc_supports_vrr(const enum dce_version v); + #endif /* __DM_SERVICES_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index bf63da266a18..3b093b8699ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -127,7 +127,7 @@ struct dm_pp_single_disp_config { uint32_t src_height; uint32_t src_width; uint32_t v_refresh; - uint32_t sym_clock; /* HDMI only */ + uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */ struct dc_link_settings link_settings; /* DP only */ }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 2a2eaf6adf26..7aaf13bbd4e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -30,8 +30,7 @@ #include "dcn20/dcn20_resource.h" #include "dcn21/dcn21_resource.h" #include "clk_mgr/dcn21/rn_clk_mgr.h" - -#include "link.h" +#include "link_service.h" #include "dcn20_fpu.h" #include "dc_state_priv.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 18388fb00be8..8a0f128722b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -31,7 +31,7 @@ // We need this includes for WATERMARKS_* defines #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" #include "dcn30/dcn30_resource.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 5d73efa2f0c9..c9dd920744c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -31,7 +31,7 @@ #include "dml/dcn31/dcn31_fpu.h" #include "dml/dml_inline_defs.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER_INIT(logger) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 6f516af82956..8cda18ce1a76 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -10,7 +10,7 @@ #include "dml/dcn35/dcn35_fpu.h" #include "dml/dml_inline_defs.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER_INIT(logger) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 715f9019a33e..4b9b2e84d381 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -6529,7 +6529,7 @@ static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mo mode_lib->ms.TotImmediateFlipBytes = 0; for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { if (!(mode_lib->ms.policy.ImmediateFlipRequirement[k] == dml_immediate_flip_not_required)) { - mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]; + mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]); if (mode_lib->ms.use_one_row_for_frame_flip[j][k]) { mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (2 * mode_lib->ms.DPTEBytesPerRow[j][k]); } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index f6879e622271..bf5e7f4e0416 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -84,25 +84,29 @@ static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stre static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing, struct dc_stream_state *stream, + struct pipe_ctx *pipe_ctx, struct dml2_context *dml_ctx) { unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz; + uint32_t pix_clk_100hz; - timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding; timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; timing->h_front_porch = stream->timing.h_front_porch; timing->v_front_porch = stream->timing.v_front_porch; timing->pixel_clock_khz = stream->timing.pix_clk_100hz / 10; + if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) + timing->pixel_clock_khz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz / 10; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) timing->pixel_clock_khz *= 2; - timing->h_total = stream->timing.h_total; + timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding; timing->v_total = stream->timing.v_total; timing->h_sync_width = stream->timing.h_sync_width; timing->interlaced = stream->timing.flags.INTERLACE; hblank_start = stream->timing.h_total - stream->timing.h_front_porch; - timing->h_blank_end = hblank_start - stream->timing.h_addressable + timing->h_blank_end = hblank_start - stream->timing.h_addressable - pipe_ctx->dsc_padding_params.dsc_hactive_padding - stream->timing.h_border_left - stream->timing.h_border_right; if (hblank_start < stream->timing.h_addressable) @@ -121,8 +125,13 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf /* limit min refresh rate to DC cap */ min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz; if (stream->ctx->dc->caps.max_v_total != 0) { - min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL), - (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream))); + if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) { + pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz; + } else { + pix_clk_100hz = stream->timing.pix_clk_100hz; + } + min_hardware_refresh_in_uhz = div64_u64((pix_clk_100hz * 100000000ULL), + (timing->h_total * (long long)calc_max_hardware_v_total(stream))); } timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz); @@ -173,21 +182,6 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf timing->vblank_nom = timing->v_total - timing->v_active; } -/** - * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration - * based on the pipe context. - * @timing: Pointer to the dml2_timing_cfg structure to be adjusted. - * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value. - * - * This function modifies the horizontal active and blank end timings by adding and subtracting - * the horizontal blanking borrow value from the pipe context, respectively. - */ -static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe) -{ - timing->h_active += pipe->hblank_borrow; - timing->h_blank_end -= pipe->hblank_borrow; -} - static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output, struct dc_stream_state *stream, const struct pipe_ctx *pipe) { @@ -487,7 +481,9 @@ static const struct scaler_data *get_scaler_data_for_plane( temp_pipe->plane_state = pipe->plane_state; temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps; temp_pipe->stream_res = pipe->stream_res; - temp_pipe->hblank_borrow = pipe->hblank_borrow; + temp_pipe->dsc_padding_params.dsc_hactive_padding = pipe->dsc_padding_params.dsc_hactive_padding; + temp_pipe->dsc_padding_params.dsc_htotal_padding = pipe->dsc_padding_params.dsc_htotal_padding; + temp_pipe->dsc_padding_params.dsc_pix_clk_100hz = pipe->dsc_padding_params.dsc_pix_clk_100hz; dml_ctx->config.callbacks.build_scaling_params(temp_pipe); break; } @@ -755,8 +751,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s disp_cfg_stream_location = dml_dispcfg->num_streams++; ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__); - populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx); - adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]); + populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index], dml_ctx); populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]); populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index 798abb2b2e67..08f7f03b1023 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -224,7 +224,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); /* Populate stream, plane mappings and other fields in display config. */ + DC_FP_START(); result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); + DC_FP_END(); if (!result) return false; @@ -279,7 +281,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); mode_support->dml2_instance = dml_init->dml2_instance; + DC_FP_START(); dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); + DC_FP_END(); dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; DC_FP_START(); is_supported = dml2_check_mode_supported(mode_support); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h index 7de10a95cfdb..41adb1104d0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h @@ -16,9 +16,9 @@ struct dml2_instance; enum dml2_project_id { dml2_project_invalid = 0, - dml2_project_dcn4x_stage1 = 1, - dml2_project_dcn4x_stage2 = 2, - dml2_project_dcn4x_stage2_auto_drr_svp = 3, + dml2_project_dcn4x_stage1, + dml2_project_dcn4x_stage2, + dml2_project_dcn4x_stage2_auto_drr_svp, }; enum dml2_pstate_change_support { diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c index bd1b9aef6d5c..89f0d999bf35 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c @@ -406,9 +406,10 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; + // Need to find the ceiling value for the slice width + dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h; // TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. - dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h; dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v; ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height); diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 1313a7c5d87b..73a1e6a03719 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -28,7 +28,7 @@ #include "include/hdcp_msg_types.h" #include "include/signal_types.h" #include "core_types.h" -#include "link.h" +#include "link_service.h" #include "link_hwss.h" #include "link/protocols/link_dpcd.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 153d68375fa3..24184b4eb352 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -48,7 +48,7 @@ #include "link_encoder.h" #include "link_enc_cfg.h" #include "link_hwss.h" -#include "link.h" +#include "link_service.h" #include "dccg.h" #include "clock_source.h" #include "clk_mgr.h" @@ -1601,17 +1601,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( } if (pipe_ctx->stream_res.audio != NULL) { - build_audio_output(context, pipe_ctx, &pipe_ctx->stream_res.audio_output); + struct audio_output audio_output = {0}; - link_hwss->setup_audio_output(pipe_ctx, &pipe_ctx->stream_res.audio_output, + build_audio_output(context, pipe_ctx, &audio_output); + + link_hwss->setup_audio_output(pipe_ctx, &audio_output, pipe_ctx->stream_res.audio->inst); pipe_ctx->stream_res.audio->funcs->az_configure( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, - &pipe_ctx->stream_res.audio_output.crtc_info, + &audio_output.crtc_info, &pipe_ctx->stream->audio_info, - &pipe_ctx->stream_res.audio_output.dp_link_info); + &audio_output.dp_link_info); if (dc->config.disable_hbr_audio_dp2) if (pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio && @@ -1923,10 +1925,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) get_edp_streams(context, edp_streams, &edp_stream_num); - // Check fastboot support, disable on DCE8 because of blank screens - if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 && - dc->ctx->dce_version != DCE_VERSION_8_1 && - dc->ctx->dce_version != DCE_VERSION_8_3) { + /* Check fastboot support, disable on DCE 6-8 because of blank screens */ + if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) { for (i = 0; i < edp_num; i++) { edp_link = edp_links[i]; if (edp_link != edp_streams[0]->link) @@ -2385,7 +2385,9 @@ static void dce110_setup_audio_dto( if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A) continue; if (pipe_ctx->stream_res.audio != NULL) { - build_audio_output(context, pipe_ctx, &pipe_ctx->stream_res.audio_output); + struct audio_output audio_output; + + build_audio_output(context, pipe_ctx, &audio_output); if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) { struct dtbclk_dto_params dto_params = {0}; @@ -2396,14 +2398,14 @@ static void dce110_setup_audio_dto( pipe_ctx->stream_res.audio->funcs->wall_dto_setup( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, - &pipe_ctx->stream_res.audio_output.crtc_info, - &pipe_ctx->stream_res.audio_output.pll_info); + &audio_output.crtc_info, + &audio_output.pll_info); } else pipe_ctx->stream_res.audio->funcs->wall_dto_setup( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, - &pipe_ctx->stream_res.audio_output.crtc_info, - &pipe_ctx->stream_res.audio_output.pll_info); + &audio_output.crtc_info, + &audio_output.pll_info); break; } } @@ -2423,15 +2425,15 @@ static void dce110_setup_audio_dto( continue; if (pipe_ctx->stream_res.audio != NULL) { - build_audio_output(context, - pipe_ctx, - &pipe_ctx->stream_res.audio_output); + struct audio_output audio_output = {0}; + + build_audio_output(context, pipe_ctx, &audio_output); pipe_ctx->stream_res.audio->funcs->wall_dto_setup( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, - &pipe_ctx->stream_res.audio_output.crtc_info, - &pipe_ctx->stream_res.audio_output.pll_info); + &audio_output.crtc_info, + &audio_output.pll_info); break; } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 506c3bbbf221..74f5e05f9cb4 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -55,7 +55,7 @@ #include "dce/dmub_hw_lock_mgr.h" #include "dc_trace.h" #include "dce/dmub_outbox.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index cc377fcda6ff..417f2679723e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -54,7 +54,7 @@ #include "dpcd_defs.h" #include "inc/link_enc_cfg.h" #include "link_hwss.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 61efb15572ff..e2269211553c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -35,7 +35,7 @@ #include "hw/clk_mgr.h" #include "dc_dmub_srv.h" #include "abm.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER_INIT(logger) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 139a63101488..e47ed5571dfd 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -50,7 +50,7 @@ #include "dpcd_defs.h" #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_resource.h" -#include "link.h" +#include "link_service.h" #include "dc_state_priv.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 8ba934b83957..b822f2dffff0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -45,7 +45,7 @@ #include "link_hwss.h" #include "dpcd_defs.h" #include "dce/dmub_outbox.h" -#include "link.h" +#include "link_service.h" #include "dcn10/dcn10_hwseq.h" #include "dcn21/dcn21_hwseq.h" #include "inc/link_enc_cfg.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index 560984533950..f925f669f2a4 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -46,7 +46,7 @@ #include "link_hwss.h" #include "dpcd_defs.h" #include "dce/dmub_outbox.h" -#include "link.h" +#include "link_service.h" #include "dcn10/dcn10_hwseq.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 416b1dca3dac..f39292952702 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -49,7 +49,7 @@ #include "dcn20/dcn20_optc.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn32/dcn32_resource.h" -#include "link.h" +#include "link_service.h" #include "../dcn20/dcn20_hwseq.h" #include "dc_state_priv.h" @@ -1052,7 +1052,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) } /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow + + dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 764eff6a4ec6..05011061822c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -46,7 +46,7 @@ #include "link_hwss.h" #include "dpcd_defs.h" #include "dce/dmub_outbox.h" -#include "link.h" +#include "link_service.h" #include "dcn10/dcn10_hwseq.h" #include "inc/link_enc_cfg.h" #include "dcn30/dcn30_vpg.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index d5b5e2ce6ff6..1b0b772fc5dd 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -25,7 +25,7 @@ #include "dpcd_defs.h" #include "clk_mgr.h" #include "dsc.h" -#include "link.h" +#include "link_service.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn10/dcn10_cm_common.h" @@ -810,9 +810,12 @@ enum dc_status dcn401_enable_stream_timing( if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); - /* if we are borrowing from hblank, h_addressable needs to be adjusted */ - if (dc->debug.enable_hblank_borrow) - patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow; + /* if we are padding, h_addressable needs to be adjusted */ + if (dc->debug.enable_hblank_borrow) { + patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding; + patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding; + patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz; + } pipe_ctx->stream_res.tg->funcs->program_timing( pipe_ctx->stream_res.tg, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index d30f94c35f11..d11893f8c916 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -228,7 +228,8 @@ struct resource_funcs { enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link, struct dc_link_settings *link_setting, uint8_t pipe_count, - struct pipe_ctx *pipes); + struct pipe_ctx *pipes, + struct audio_output *audio_output); }; struct audio_support{ @@ -360,8 +361,6 @@ struct stream_resource { uint8_t gsl_group; struct test_pattern_params test_pattern_params; - - struct audio_output audio_output; }; struct plane_resource { @@ -437,6 +436,13 @@ enum p_state_switch_method { P_STATE_V_BLANK_SUB_VP, }; +struct dsc_padding_params { + /* pixels borrowed from hblank to hactive */ + uint8_t dsc_hactive_padding; + uint32_t dsc_htotal_padding; + uint32_t dsc_pix_clk_100hz; +}; + struct pipe_ctx { struct dc_plane_state *plane_state; struct dc_stream_state *stream; @@ -494,8 +500,7 @@ struct pipe_ctx { /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/ uint8_t subvp_index; struct pixel_rate_divider pixel_rate_divider; - /* pixels borrowed from hblank to hactive */ - uint8_t hblank_borrow; + struct dsc_padding_params dsc_padding_params; /* next vupdate */ uint32_t next_vupdate; uint32_t wait_frame_count; diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h index 0cce49d95e26..1e34e84160aa 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h @@ -42,8 +42,8 @@ * dc_link_exports.c or other dc files implement dc.h * * DC to Link: - * dc_link_exports.c or other dc files include link.h - * link_factory.c implements link.h + * dc_link_exports.c or other dc files include link_service.h + * link_factory.c implements link_service.h * * Link sub-component to Link sub-component: * link_factory.c includes --> link_xxx.h @@ -73,7 +73,7 @@ * 2. Implement your function in the suitable link_xxx.c file. * 3. Assign the function to link_service in link_factory.c * 4. NEVER include link_xxx.h headers outside link component. - * 5. NEVER include link.h on DM side. + * 5. NEVER include link_service.h on DM side. */ #include "core_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index a890f581f4e8..4e26a16a8743 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -45,6 +45,7 @@ enum dce_version resource_parse_asic_id( struct resource_caps { int num_timing_generator; int num_opp; + int num_dpp; int num_video_plane; int num_audio; int num_stream_encoder; diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 23f41c99fa38..9e33bf937a69 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -75,7 +75,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link, bool is_hpo_acquired; uint8_t count; int i; - + struct audio_output audio_output[MAX_PIPES]; struct dc_stream_state *streams_on_link[MAX_PIPES]; int num_streams_on_link = 0; @@ -101,7 +101,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link, if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) { link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link, link_setting, count, - *pipes); + *pipes, &audio_output[0]); for (i = 0; i < count; i++) { pipes[i]->clock_source->funcs->program_pix_clk( pipes[i]->clock_source, @@ -113,16 +113,15 @@ static void dp_retrain_link_dp_test(struct dc_link *link, const struct link_hwss *link_hwss = get_link_hwss( link, &pipes[i]->link_res); - link_hwss->setup_audio_output(pipes[i], - &pipes[i]->stream_res.audio_output, - pipes[i]->stream_res.audio->inst); + link_hwss->setup_audio_output(pipes[i], &audio_output[i], + pipes[i]->stream_res.audio->inst); pipes[i]->stream_res.audio->funcs->az_configure( pipes[i]->stream_res.audio, pipes[i]->stream->signal, - &pipes[i]->stream_res.audio_output.crtc_info, + &audio_output[i].crtc_info, &pipes[i]->stream->audio_info, - &pipes[i]->stream_res.audio_output.dp_link_info); + &audio_output[i].dp_link_info); if (link->dc->config.disable_hbr_audio_dp2 && pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio && diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h index eae23ea7f6ec..033650cdb811 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h @@ -24,7 +24,7 @@ */ #ifndef __LINK_DP_CTS_H__ #define __LINK_DP_CTS_H__ -#include "link.h" +#include "link_service.h" void dp_handle_automated_test(struct dc_link *link); bool dp_set_test_pattern( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h index ab437a0c9101..9ff4a6c46a2b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h @@ -24,7 +24,7 @@ */ #ifndef __LINK_DP_TRACE_H__ #define __LINK_DP_TRACE_H__ -#include "link.h" +#include "link_service.h" void dp_trace_init(struct dc_link *link); void dp_trace_reset(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h index 45f0e091fcb0..4a25210a344f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -27,7 +27,7 @@ #define __LINK_HWSS_DIO_H__ #include "link_hwss.h" -#include "link.h" +#include "link_service.h" const struct link_hwss *get_dio_link_hwss(void); bool can_use_dio_link_hwss(const struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h index 9ac08a332540..cf578a8662a4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h @@ -25,7 +25,7 @@ #ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ #define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ -#include "link.h" +#include "link_service.h" uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link); uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h index 1d3ed8ca83b5..7c9005bc2587 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h @@ -26,7 +26,7 @@ #define __LINK_HWSS_HPO_DP_H__ #include "link_hwss.h" -#include "link.h" +#include "link_service.h" void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size); diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h index 82301187bc7c..8bf36827ecfb 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h @@ -25,7 +25,7 @@ #ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ #define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ -#include "link.h" +#include "link_service.h" bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link); const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index b717e430051a..85303167a553 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -1140,6 +1140,10 @@ static bool detect_link_and_local_sink(struct dc_link *link, if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && !sink->edid_caps.edid_hdmi) sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + else if (dc_is_dvi_signal(sink->sink_signal) && + aud_support->hdmi_audio_native && + sink->edid_caps.edid_hdmi) + sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) dp_trace_init(link); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h index 7da05078721e..1ab29476060b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h @@ -25,7 +25,7 @@ #ifndef __DC_LINK_DETECTION_H__ #define __DC_LINK_DETECTION_H__ -#include "link.h" +#include "link_service.h" bool link_detect(struct dc_link *link, enum dc_detect_reason reason); bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *type); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 08ee8d2f777b..83419e1a9036 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -832,7 +832,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ - dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow + + dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h index 9398f9c1666a..bd6fc63064a3 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DPMS_H__ #define __DC_LINK_DPMS_H__ -#include "link.h" +#include "link_service.h" void link_set_dpms_on( struct dc_state *state, struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h index e96220d48d03..aad36ca1a31c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h @@ -24,7 +24,7 @@ */ #ifndef __LINK_FACTORY_H__ #define __LINK_FACTORY_H__ -#include "link.h" +#include "link_service.h" struct dc_link *link_create(const struct link_init_data *init_params); void link_destroy(struct dc_link **link); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h index 1907bda3cb6e..f7aa3bc3a93a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_resource.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h @@ -24,7 +24,7 @@ */ #ifndef __LINK_RESOURCE_H__ #define __LINK_RESOURCE_H__ -#include "link.h" +#include "link_service.h" void link_get_cur_res_map(const struct dc *dc, uint32_t *map); void link_restore_res_map(const struct dc *dc, uint32_t *map); void link_get_cur_link_res(const struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h index 9553c81053fe..595774e76453 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -24,7 +24,7 @@ */ #ifndef __LINK_VALIDATION_H__ #define __LINK_VALIDATION_H__ -#include "link.h" +#include "link_service.h" enum dc_status link_validate_mode_timing( const struct dc_stream_state *stream, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h index a3e25e55bed6..d3e6f01a6a90 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h @@ -26,7 +26,7 @@ #ifndef __DAL_DDC_SERVICE_H__ #define __DAL_DDC_SERVICE_H__ -#include "link.h" +#include "link_service.h" #define AUX_POWER_UP_WA_DELAY 500 #define I2C_OVER_AUX_DEFER_WA_DELAY 70 diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h index 7170db5a1c13..6e17f72a752f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DP_CAPABILITY_H__ #define __DC_LINK_DP_CAPABILITY_H__ -#include "link.h" +#include "link_service.h" bool detect_dp_sink_caps(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h index a61edfc9ca7a..7cd03fa4892b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h @@ -27,7 +27,7 @@ #ifndef __DC_LINK_DPIA_H__ #define __DC_LINK_DPIA_H__ -#include "link.h" +#include "link_service.h" /* Read tunneling device capability from DPCD and update link capability * accordingly. diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h index 41efcb3e44e2..30cd8e2b9d35 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -26,7 +26,7 @@ #ifndef DC_INC_LINK_DP_DPIA_BW_H_ #define DC_INC_LINK_DP_DPIA_BW_H_ -#include "link.h" +#include "link_service.h" /* diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h index ac33730fedd4..87516fb3b45a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DP_IRQ_HANDLER_H__ #define __DC_LINK_DP_IRQ_HANDLER_H__ -#include "link.h" +#include "link_service.h" bool dp_parse_link_loss_status( struct dc_link *link, union hpd_irq_data *hpd_irq_dpcd_data); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h index ab1c1f8f1f8b..58e154494582 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DP_PHY_H__ #define __DC_LINK_DP_PHY_H__ -#include "link.h" +#include "link_service.h" void dp_enable_link_phy( struct dc_link *link, const struct link_resource *link_res, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 134093ce5a8e..08e2b572e0ff 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1729,6 +1729,15 @@ bool perform_link_training_with_retries( break; } + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && + !link->dc->config.enable_dpia_pre_training) { + if (j == (attempts - 1)) + do_fallback = true; + else + do_fallback = false; + } + if (j == (attempts - 1)) { DC_LOG_WARNING( "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h index 574b083e0936..ce52de22ab7a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_DP_TRAINING_H__ #define __DC_LINK_DP_TRAINING_H__ -#include "link.h" +#include "link_service.h" bool perform_link_training_with_retries( const struct dc_link_settings *link_setting, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h index 08d787a1e451..c2717c678c72 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h @@ -25,7 +25,7 @@ #ifndef __LINK_DPCD_H__ #define __LINK_DPCD_H__ -#include "link.h" +#include "link_service.h" #include "dpcd_defs.h" enum dc_status core_link_read_dpcd( diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 4a475d5b9dde..62a6344e613e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -25,7 +25,7 @@ #ifndef __DC_LINK_EDP_PANEL_CONTROL_H__ #define __DC_LINK_EDP_PANEL_CONTROL_H__ -#include "link.h" +#include "link_service.h" enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h index 4fb526b264f9..af529328ba17 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h @@ -26,7 +26,7 @@ #ifndef __DC_LINK_HPD_H__ #define __DC_LINK_HPD_H__ -#include "link.h" +#include "link_service.h" enum hpd_source_id get_hpd_line(struct dc_link *link); /* diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 2f23cc6df571..540e04ec1e2d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -67,7 +67,7 @@ #include "reg_helper.h" #include "dce100/dce100_resource.h" -#include "link.h" +#include "link_service.h" #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index 53b60044653f..c164d2500c2a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -881,7 +881,16 @@ static enum dc_status dce60_validate_bandwidth( context->bw_ctx.bw.dce.dispclk_khz = 681000; context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { - context->bw_ctx.bw.dce.dispclk_khz = 0; + /* On DCE 6.0 and 6.4 the PLL0 is both the display engine clock and + * the DP clock, and shouldn't be turned off. Just select the display + * clock value from its low power mode. + */ + if (dc->ctx->dce_version == DCE_VERSION_6_0 || + dc->ctx->dce_version == DCE_VERSION_6_4) + context->bw_ctx.bw.dce.dispclk_khz = 352000; + else + context->bw_ctx.bw.dce.dispclk_khz = 0; + context->bw_ctx.bw.dce.yclk_khz = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index f9cbdad3ef37..84b38d2d6967 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -85,7 +85,7 @@ #include "vm_helper.h" #include "link_enc_cfg.h" -#include "link.h" +#include "link_service.h" #define DC_LOGGER_INIT(logger) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index 201ed863b69e..ff63f59ff928 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -60,7 +60,7 @@ #include "dml/display_mode_vba.h" #include "dcn30/dcn30_dccg.h" #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "dce/dce_panel_cntl.h" #include "dcn30/dcn30_dwb.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 3345068a878c..61623cb518d9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -47,7 +47,8 @@ #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" + #include "dce/dce_abm.h" #include "dce/dce_audio.h" #include "dce/dce_aux.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 3479e1eab4cd..02b9a84f2db3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -47,7 +47,7 @@ #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "dce/dce_abm.h" #include "dce/dce_audio.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index ca17e5d8fdc2..3ed7f50554e2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -2239,7 +2239,8 @@ struct resource_pool *dcn31_create_resource_pool( enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, struct dc_link_settings *link_setting, uint8_t pipe_count, - struct pipe_ctx *pipes) + struct pipe_ctx *pipes, + struct audio_output *audio_output) { struct dc_state *state = link->dc->current_state; int i; @@ -2254,7 +2255,7 @@ enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, // Setup audio if (pipes[i].stream_res.audio != NULL) - build_audio_output(state, &pipes[i], &pipes[i].stream_res.audio_output); + build_audio_output(state, &pipes[i], &audio_output[i]); } #else /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index 7e8fde65528f..c32c85ef0ba4 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h @@ -69,7 +69,8 @@ unsigned int dcn31_get_det_buffer_size( enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, struct dc_link_settings *link_setting, uint8_t pipe_count, - struct pipe_ctx *pipes); + struct pipe_ctx *pipes, + struct audio_output *audio_output); /*temp: B0 specific before switch to dcn313 headers*/ #ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 9917b366f00c..8f80ccb846d7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -69,7 +69,7 @@ #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" @@ -2852,7 +2852,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head( free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx]; free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx]; free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst; - free_pipe->hblank_borrow = otg_master->hblank_borrow; + free_pipe->dsc_padding_params = otg_master->dsc_padding_params; if (free_pipe->stream->timing.flags.DSC == 1) { dcn20_acquire_dsc(free_pipe->stream->ctx->dc, &new_ctx->res_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 061c0907d802..ad214986f7ac 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -72,7 +72,7 @@ #include "dml/display_mode_vba.h" #include "dcn32/dcn32_dccg.h" #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 8475c6eec547..07552445e424 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -61,7 +61,7 @@ #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_apg.h" #include "dcn32/dcn32_dio_link_encoder.h" #include "dcn31/dcn31_vpg.h" @@ -1900,9 +1900,6 @@ static bool dcn35_resource_construct( dc->caps.num_of_host_routers = 2; dc->caps.num_of_dpias_per_host_router = 2; - dc->caps.num_of_host_routers = 2; - dc->caps.num_of_dpias_per_host_router = 2; - /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order * to provide some margin. * It's expected for furture ASIC to have equal or higher value, in order to diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 0971c0f74186..cb0478a9a34d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -40,7 +40,7 @@ #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_apg.h" #include "dcn32/dcn32_dio_link_encoder.h" #include "dcn31/dcn31_vpg.h" @@ -1872,9 +1872,6 @@ static bool dcn351_resource_construct( dc->caps.num_of_host_routers = 2; dc->caps.num_of_dpias_per_host_router = 2; - dc->caps.num_of_host_routers = 2; - dc->caps.num_of_dpias_per_host_router = 2; - /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order * to provide some margin. * It's expected for furture ASIC to have equal or higher value, in order to diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index 8bae7fcedc22..126090c9bb8a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -40,7 +40,7 @@ #include "dcn31/dcn31_hpo_dp_stream_encoder.h" #include "dcn31/dcn31_hpo_dp_link_encoder.h" #include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "link.h" +#include "link_service.h" #include "dcn31/dcn31_apg.h" #include "dcn32/dcn32_dio_link_encoder.h" #include "dcn31/dcn31_vpg.h" @@ -1873,9 +1873,6 @@ static bool dcn36_resource_construct( dc->caps.num_of_host_routers = 2; dc->caps.num_of_dpias_per_host_router = 2; - dc->caps.num_of_host_routers = 2; - dc->caps.num_of_dpias_per_host_router = 2; - /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order * to provide some margin. * It's expected for furture ASIC to have equal or higher value, in order to diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 068c123ea8a8..1d18807e4749 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -50,7 +50,7 @@ #include "dml/display_mode_vba.h" #include "dcn401/dcn401_dccg.h" #include "dcn10/dcn10_resource.h" -#include "link.h" +#include "link_service.h" #include "link_enc_cfg.h" #include "dcn31/dcn31_panel_cntl.h" @@ -1699,6 +1699,9 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) + pixel_clk_params->requested_pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz; + if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment) link_enc = link_enc_cfg_get_link_enc(link); if (link_enc) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index e7056205b050..ce041f6239dc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -89,44 +89,50 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in, void dmub_dcn32_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; - const uint32_t timeout = 30; - uint32_t in_reset, scratch, i; + const uint32_t timeout = 100000; + uint32_t in_reset, is_enabled, scratch, i, pwait_mode; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); - if (in_reset == 0) { + if (in_reset == 0 && is_enabled != 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - /** - * Timeout covers both the ACK and the wait - * for remaining work to finish. - * - * This is mostly bound by the PHY disable sequence. - * Each register check will be greater than 1us, so - * don't bother using udelay. - */ - for (i = 0; i < timeout; ++i) { if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) break; + + udelay(1); } for (i = 0; i < timeout; ++i) { - scratch = dmub->hw_funcs.get_gpint_response(dmub); + scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; + + udelay(1); } + for (i = 0; i < timeout; ++i) { + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); + if (pwait_mode & (1 << 0)) + break; + + udelay(1); + } /* Force reset in case we timed out, DMCUB is likely hung. */ } - REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + if (is_enabled) { + REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); + udelay(1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); + } + REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); @@ -135,7 +141,7 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); - /* Clear the GPINT command manually so we don't reset again. */ + /* Clear the GPINT command manually so we don't send anything during boot. */ cmd.all = 0; dmub->hw_funcs.set_gpint(dmub, cmd); } @@ -419,8 +425,8 @@ uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub) void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; - uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; + uint32_t is_dmub_enabled, is_soft_reset, is_pwait; + uint32_t is_traceport_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; if (!dmub) @@ -470,18 +476,15 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; - REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); - dmub->debug.is_dmcub_secure_reset = is_sec_reset; - REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); dmub->debug.is_traceport_en = is_traceport_enabled; - REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); - dmub->debug.is_cw0_enabled = is_cw0_enabled; - REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); dmub->debug.is_cw6_enabled = is_cw6_enabled; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h index 1a229450c53d..daf81027d663 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h @@ -89,6 +89,9 @@ struct dmub_srv; DMUB_SR(DMCUB_REGION5_OFFSET) \ DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \ DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION6_OFFSET) \ + DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \ DMUB_SR(DMCUB_SCRATCH0) \ DMUB_SR(DMCUB_SCRATCH1) \ DMUB_SR(DMCUB_SCRATCH2) \ @@ -155,6 +158,8 @@ struct dmub_srv; DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \ + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ @@ -162,7 +167,8 @@ struct dmub_srv; DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \ DMUB_SF(DMCUB_REGION3_TMR_AXI_SPACE, DMCUB_REGION3_TMR_AXI_SPACE) \ DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \ - DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \ + DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS) struct dmub_srv_dcn32_reg_offset { #define DMUB_SR(reg) uint32_t reg; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index bfb446736ca8..75efda2969cf 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -239,18 +239,51 @@ enum amd_harvest_ip_mask { AMD_HARVEST_IP_DMU_MASK = 0x4, }; +/** + * enum DC_FEATURE_MASK - Bits that control DC feature defaults + */ enum DC_FEATURE_MASK { //Default value can be found at "uint amdgpu_dc_feature_mask" - DC_FBC_MASK = (1 << 0), //0x1, disabled by default - DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default - DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default - DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1 - DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default - DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default - DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default - DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default - DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default - DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4 + /** + * @DC_FBC_MASK: (0x1) disabled by default + */ + DC_FBC_MASK = (1 << 0), + /** + * @DC_MULTI_MON_PP_MCLK_SWITCH_MASK: (0x2) enabled by default + */ + DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), + /** + * @DC_DISABLE_FRACTIONAL_PWM_MASK: (0x4) disabled by default + */ + DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), + /** + * @DC_PSR_MASK: (0x8) disabled by default for DCN < 3.1 + */ + DC_PSR_MASK = (1 << 3), + /** + * @DC_EDP_NO_POWER_SEQUENCING: (0x10) disabled by default + */ + DC_EDP_NO_POWER_SEQUENCING = (1 << 4), + /** + * @DC_DISABLE_LTTPR_DP1_4A: (0x20) disabled by default + */ + DC_DISABLE_LTTPR_DP1_4A = (1 << 5), + /** + * @DC_DISABLE_LTTPR_DP2_0: (0x40) disabled by default + */ + DC_DISABLE_LTTPR_DP2_0 = (1 << 6), + /** + * @DC_PSR_ALLOW_SMU_OPT: (0x80) disabled by default + */ + DC_PSR_ALLOW_SMU_OPT = (1 << 7), + /** + * @DC_PSR_ALLOW_MULTI_DISP_OPT: (0x100) disabled by default + */ + DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), + /** + * @DC_REPLAY_MASK: (0x200) disabled by default for DCN < 3.1.4 + */ + DC_REPLAY_MASK = (1 << 9), }; /** @@ -258,64 +291,64 @@ enum DC_FEATURE_MASK { */ enum DC_DEBUG_MASK { /** - * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting + * @DC_DISABLE_PIPE_SPLIT: (0x1) If set, disable pipe-splitting */ DC_DISABLE_PIPE_SPLIT = 0x1, /** - * @DC_DISABLE_STUTTER: If set, disable memory stutter mode + * @DC_DISABLE_STUTTER: (0x2) If set, disable memory stutter mode */ DC_DISABLE_STUTTER = 0x2, /** - * @DC_DISABLE_DSC: If set, disable display stream compression + * @DC_DISABLE_DSC: (0x4) If set, disable display stream compression */ DC_DISABLE_DSC = 0x4, /** - * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations + * @DC_DISABLE_CLOCK_GATING: (0x8) If set, disable clock gating optimizations */ DC_DISABLE_CLOCK_GATING = 0x8, /** - * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU + * @DC_DISABLE_PSR: (0x10) If set, disable Panel self refresh v1 and PSR-SU */ DC_DISABLE_PSR = 0x10, /** - * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even + * @DC_FORCE_SUBVP_MCLK_SWITCH: (0x20) If set, force mclk switch in subvp, even * if mclk switch in vblank is possible */ DC_FORCE_SUBVP_MCLK_SWITCH = 0x20, /** - * @DC_DISABLE_MPO: If set, disable multi-plane offloading + * @DC_DISABLE_MPO: (0x40) If set, disable multi-plane offloading */ DC_DISABLE_MPO = 0x40, /** - * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA + * @DC_ENABLE_DPIA_TRACE: (0x80) If set, enable trace logging for DPIA */ DC_ENABLE_DPIA_TRACE = 0x80, /** - * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version + * @DC_ENABLE_DML2: (0x100) If set, force usage of DML2, even if the DCN version * does not default to it. */ DC_ENABLE_DML2 = 0x100, /** - * @DC_DISABLE_PSR_SU: If set, disable PSR SU + * @DC_DISABLE_PSR_SU: (0x200) If set, disable PSR SU */ DC_DISABLE_PSR_SU = 0x200, /** - * @DC_DISABLE_REPLAY: If set, disable Panel Replay + * @DC_DISABLE_REPLAY: (0x400) If set, disable Panel Replay */ DC_DISABLE_REPLAY = 0x400, /** - * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time. + * @DC_DISABLE_IPS: (0x800) If set, disable all Idle Power States, all the time. * If more than one IPS debug bit is set, the lowest bit takes * precedence. For example, if DC_FORCE_IPS_ENABLE and * DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes @@ -324,56 +357,57 @@ enum DC_DEBUG_MASK { DC_DISABLE_IPS = 0x800, /** - * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time, + * @DC_DISABLE_IPS_DYNAMIC: (0x1000) If set, disable all IPS, all the time, * *except* when driver goes into suspend. */ DC_DISABLE_IPS_DYNAMIC = 0x1000, /** - * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if + * @DC_DISABLE_IPS2_DYNAMIC: (0x2000) If set, disable IPS2 (IPS1 allowed) if * there is an enabled display. Otherwise, enable all IPS. */ DC_DISABLE_IPS2_DYNAMIC = 0x2000, /** - * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time. + * @DC_FORCE_IPS_ENABLE: (0x4000) If set, force enable all IPS, all the time. */ DC_FORCE_IPS_ENABLE = 0x4000, /** - * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for + * @DC_DISABLE_ACPI_EDID: (0x8000) If set, don't attempt to fetch EDID for * eDP display from ACPI _DDC method. */ DC_DISABLE_ACPI_EDID = 0x8000, /** - * @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver. + * @DC_DISABLE_HDMI_CEC: (0x10000) If set, disable HDMI-CEC feature in amdgpu driver. */ DC_DISABLE_HDMI_CEC = 0x10000, /** - * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted + * @DC_DISABLE_SUBVP_FAMS: (0x20000) If set, disable DCN Sub-Viewport & Firmware Assisted * Memory Clock Switching (FAMS) feature in amdgpu driver. */ DC_DISABLE_SUBVP_FAMS = 0x20000, /** - * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves + * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: (0x40000) If set, disable support for custom + * brightness curves */ DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE = 0x40000, /** - * @DC_HDCP_LC_FORCE_FW_ENABLE: If set, use HDCP Locality Check FW + * @DC_HDCP_LC_FORCE_FW_ENABLE: (0x80000) If set, use HDCP Locality Check FW * path regardless of reported HW capabilities. */ DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000, /** - * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW + * @DC_HDCP_LC_ENABLE_SW_FALLBACK: (0x100000) If set, upon HDCP Locality Check FW * path failure, retry using legacy SW path. */ DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000, /** - * @DC_SKIP_DETECTION_LT: If set, skip detection link training + * @DC_SKIP_DETECTION_LT: (0x200000) If set, skip detection link training */ DC_SKIP_DETECTION_LT = 0x200000, }; diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h index acd1cef61b7c..349544504c93 100644 --- a/drivers/gpu/drm/amd/include/dm_pp_interface.h +++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h @@ -65,6 +65,7 @@ struct single_display_configuration { uint32_t view_resolution_cy; enum amd_pp_display_config_type displayconfigtype; uint32_t vertical_refresh; /* for active display */ + uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */ }; #define MAX_NUM_DISPLAY 32 diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 2f7e4b5bebf3..2b0cdb2a2775 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -162,6 +162,10 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK, AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK, AMDGPU_PP_SENSOR_VCN_LOAD, + AMDGPU_PP_SENSOR_NODEPOWERLIMIT, + AMDGPU_PP_SENSOR_NODEPOWER, + AMDGPU_PP_SENSOR_GPPTRESIDENCY, + AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, }; enum amd_pp_task { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c index 2d2d2d5e6763..b5e9c3ecf703 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c @@ -27,76 +27,69 @@ #include "amdgpu_smu.h" #include "amdgpu_dpm_internal.h" -void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) +void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev) { struct drm_device *ddev = adev_to_drm(adev); + struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg; + struct single_display_configuration *display_cfg; struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_connector *conn; + int num_crtcs = 0; + int vrefresh; + u32 vblank_in_pixels, vblank_time_us; + + cfg->min_vblank_time = 0xffffffff; /* if the displays are off, vblank time is max */ - adev->pm.dpm.new_active_crtcs = 0; - adev->pm.dpm.new_active_crtc_count = 0; if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { amdgpu_crtc = to_amdgpu_crtc(crtc); - if (amdgpu_crtc->enabled) { - adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); - adev->pm.dpm.new_active_crtc_count++; - } - } - } -} -u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - u32 vblank_in_pixels; - u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ + /* The array should only contain active displays. */ + if (!amdgpu_crtc->enabled) + continue; + + conn = to_amdgpu_connector(amdgpu_crtc->connector); + display_cfg = &adev->pm.pm_display_cfg.displays[num_crtcs++]; + + if (amdgpu_crtc->hw_mode.clock) { + vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { vblank_in_pixels = amdgpu_crtc->hw_mode.crtc_htotal * (amdgpu_crtc->hw_mode.crtc_vblank_end - amdgpu_crtc->hw_mode.crtc_vdisplay + (amdgpu_crtc->v_border * 2)); - vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; + vblank_time_us = + vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; - /* we have issues with mclk switching with - * refresh rates over 120 hz on the non-DC code. + /* The legacy (non-DC) code has issues with mclk switching + * with refresh rates over 120 Hz. Disable mclk switching. */ - if (drm_mode_vrefresh(&amdgpu_crtc->hw_mode) > 120) + if (vrefresh > 120) vblank_time_us = 0; - break; - } - } - } + /* Find minimum vblank time. */ + if (vblank_time_us < cfg->min_vblank_time) + cfg->min_vblank_time = vblank_time_us; - return vblank_time_us; -} - -u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - u32 vrefresh = 0; + /* Find vertical refresh rate of first active display. */ + if (!cfg->vrefresh) + cfg->vrefresh = vrefresh; + } - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); - break; + if (amdgpu_crtc->crtc_id < cfg->crtc_index) { + /* Find first active CRTC and its line time. */ + cfg->crtc_index = amdgpu_crtc->crtc_id; + cfg->line_time_in_us = amdgpu_crtc->line_time; } + + display_cfg->controller_id = amdgpu_crtc->crtc_id; + display_cfg->pixel_clock = conn->pixelclock_for_modeset; } } - return vrefresh; + cfg->display_clk = adev->clock.default_dispclk; + cfg->num_display = num_crtcs; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 96590c1da553..b5fbb0fd1dc0 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1421,9 +1421,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, return -EINVAL; } -static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, - enum amd_pp_sensors sensor, - void *query) +static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev, + enum amd_pp_sensors sensor, + void *query) { int r, size = sizeof(uint32_t); @@ -1456,7 +1456,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, unsigned int value; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value); if (r) return r; @@ -1480,7 +1480,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, unsigned int value; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value); if (r) return r; @@ -1504,7 +1504,7 @@ static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev, unsigned int value; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); if (r) return r; @@ -1783,7 +1783,7 @@ static int amdgpu_show_powershift_percent(struct device *dev, uint32_t ss_power; int r = 0, i; - r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); + r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power); if (r == -EOPNOTSUPP) { /* sensor not available on dGPU, try to read from APU */ adev = NULL; @@ -1796,7 +1796,7 @@ static int amdgpu_show_powershift_percent(struct device *dev, } mutex_unlock(&mgpu_info.mutex); if (adev) - r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); + r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power); } if (r) @@ -1906,11 +1906,11 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; - else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, - (void *)&ss_power)) + else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, + (void *)&ss_power)) *states = ATTR_STATE_UNSUPPORTED; - else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, - (void *)&ss_power)) + else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, + (void *)&ss_power)) *states = ATTR_STATE_UNSUPPORTED; return 0; @@ -2081,8 +2081,9 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd * for user application to monitor various board reated attributes. * * The amdgpu driver provides a sysfs API for reporting board attributes. Presently, - * only two types of attributes are reported, baseboard temperature and - * gpu board temperature. Both of them are reported as binary files. + * seven types of attributes are reported. Baseboard temperature and + * gpu board temperature are reported as binary files. Npm status, current node power limit, + * max node power limit, node power and global ppt residency is reported as ASCII text file. * * * .. code-block:: console * @@ -2090,6 +2091,15 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd * * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp * + * hexdump /sys/bus/pci/devices/.../board/npm_status + * + * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit + * + * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit + * + * hexdump /sys/bus/pci/devices/.../board/node_power + * + * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid */ /** @@ -2168,8 +2178,129 @@ out: return size; } +/** + * DOC: cur_node_power_limit + * + * The amdgpu driver provides a sysfs API for retrieving current node power limit. + * The file cur_node_power_limit is used for this. + */ +static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 nplimit; + int r; + + /* get the current node power limit */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT, + (void *)&nplimit); + if (r) + return r; + + return sysfs_emit(buf, "%u\n", nplimit); +} + +/** + * DOC: node_power + * + * The amdgpu driver provides a sysfs API for retrieving current node power. + * The file node_power is used for this. + */ +static ssize_t amdgpu_show_node_power(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 npower; + int r; + + /* get the node power */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER, + (void *)&npower); + if (r) + return r; + + return sysfs_emit(buf, "%u\n", npower); +} + +/** + * DOC: npm_status + * + * The amdgpu driver provides a sysfs API for retrieving current node power management status. + * The file npm_status is used for this. It shows the status as enabled or disabled based on + * current node power value. If node power is zero, status is disabled else enabled. + */ +static ssize_t amdgpu_show_npm_status(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 npower; + int r; + + /* get the node power */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER, + (void *)&npower); + if (r) + return r; + + return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled"); +} + +/** + * DOC: global_ppt_resid + * + * The amdgpu driver provides a sysfs API for retrieving global ppt residency. + * The file global_ppt_resid is used for this. + */ +static ssize_t amdgpu_show_global_ppt_resid(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 gpptresid; + int r; + + /* get the global ppt residency */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY, + (void *)&gpptresid); + if (r) + return r; + + return sysfs_emit(buf, "%u\n", gpptresid); +} + +/** + * DOC: max_node_power_limit + * + * The amdgpu driver provides a sysfs API for retrieving maximum node power limit. + * The file max_node_power_limit is used for this. + */ +static ssize_t amdgpu_show_max_node_power_limit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + u32 max_nplimit; + int r; + + /* get the max node power limit */ + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, + (void *)&max_nplimit); + if (r) + return r; + + return sysfs_emit(buf, "%u\n", max_nplimit); +} + static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL); static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL); +static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL); +static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL); +static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL); +static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL); +static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL); static struct attribute *board_attrs[] = { &dev_attr_baseboard_temp.attr, @@ -2636,18 +2767,18 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, switch (channel) { case PP_TEMP_JUNCTION: /* get current junction temperature */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, - (void *)&temp); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, + (void *)&temp); break; case PP_TEMP_EDGE: /* get current edge temperature */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, - (void *)&temp); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, + (void *)&temp); break; case PP_TEMP_MEM: /* get current memory temperature */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP, - (void *)&temp); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP, + (void *)&temp); break; default: r = -EINVAL; @@ -2909,8 +3040,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, u32 min_rpm = 0; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, - (void *)&min_rpm); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, + (void *)&min_rpm); if (r) return r; @@ -2926,8 +3057,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, u32 max_rpm = 0; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, - (void *)&max_rpm); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + (void *)&max_rpm); if (r) return r; @@ -3060,8 +3191,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, int r; /* get the voltage */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX, - (void *)&vddgfx); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX, + (void *)&vddgfx); if (r) return r; @@ -3077,8 +3208,8 @@ static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev, int r; /* get the voltage */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, - (void *)&vddboard); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, + (void *)&vddboard); if (r) return r; @@ -3111,8 +3242,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, return -EINVAL; /* get the voltage */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB, - (void *)&vddnb); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB, + (void *)&vddnb); if (r) return r; @@ -3134,7 +3265,7 @@ static int amdgpu_hwmon_get_power(struct device *dev, u32 query = 0; int r; - r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query); + r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query); if (r) return r; @@ -3254,9 +3385,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, int err; u32 value; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - err = kstrtou32(buf, 10, &value); if (err) return err; @@ -3287,8 +3415,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, int r; /* get the sclk */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK, - (void *)&sclk); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK, + (void *)&sclk); if (r) return r; @@ -3311,8 +3439,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, int r; /* get the sclk */ - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK, - (void *)&mclk); + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK, + (void *)&mclk); if (r) return r; @@ -3598,6 +3726,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; } + if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr && + amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT)) + effective_mode |= S_IWUSR; + /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */ if (((adev->family == AMDGPU_FAMILY_SI) || ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) && @@ -3606,10 +3738,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* not all products support both average and instantaneous */ if (attr == &sensor_dev_attr_power1_average.dev_attr.attr && - amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP) + amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, + (void *)&tmp) == -EOPNOTSUPP) return 0; if (attr == &sensor_dev_attr_power1_input.dev_attr.attr && - amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP) + amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, + (void *)&tmp) == -EOPNOTSUPP) return 0; /* hide max/min values if we can't both query and manage the fan */ @@ -3648,8 +3782,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* only few boards support vddboard */ if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr || attr == &sensor_dev_attr_in2_label.dev_attr.attr) && - amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, - (void *)&tmp) == -EOPNOTSUPP) + amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, + (void *)&tmp) == -EOPNOTSUPP) return 0; /* no mclk on APUs other than gc 9,4,3*/ @@ -4531,6 +4665,7 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { enum amdgpu_sriov_vf_mode mode; uint32_t mask = 0; + uint32_t tmp; int ret; if (adev->pm.sysfs_initialized) @@ -4597,6 +4732,21 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) &amdgpu_board_attr_group); if (ret) goto err_out0; + if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, + (void *)&tmp) != -EOPNOTSUPP) { + sysfs_add_file_to_group(&adev->dev->kobj, + &dev_attr_cur_node_power_limit.attr, + amdgpu_board_attr_group.name); + sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr, + amdgpu_board_attr_group.name); + sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr, + amdgpu_board_attr_group.name); + sysfs_add_file_to_group(&adev->dev->kobj, + &dev_attr_max_node_power_limit.attr, + amdgpu_board_attr_group.name); + sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr, + amdgpu_board_attr_group.name); + } } adev->pm.sysfs_initialized = true; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 9748744133d9..65c1d98af26c 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -263,10 +263,6 @@ struct amdgpu_dpm { u32 voltage_response_time; u32 backbias_response_time; void *priv; - u32 new_active_crtcs; - int new_active_crtc_count; - u32 current_active_crtcs; - int current_active_crtc_count; struct amdgpu_dpm_dynamic_state dyn_state; struct amdgpu_dpm_fan fan; u32 tdp_limit; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h index 5c2a89f0d5d5..cc6d7ba040e9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h @@ -23,10 +23,6 @@ #ifndef __AMDGPU_DPM_INTERNAL_H__ #define __AMDGPU_DPM_INTERNAL_H__ -void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev); - -u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); - -u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); +void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 307ebf7e3226..33eb85dd68e9 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -2299,7 +2299,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, if (pi->sys_info.nb_dpm_enable) { force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || - pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || + pi->video_start || (adev->pm.pm_display_cfg.num_display >= 3) || pi->disable_nb_ps3_in_battery; ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; ps->dpm0_pg_nb_ps_hi = 0x2; @@ -2358,7 +2358,7 @@ static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) return 0; force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || - (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); + (adev->pm.pm_display_cfg.num_display >= 3) || pi->video_start); if (force_high) { for (i = pi->lowest_valid; i <= pi->highest_valid; i++) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index 52dbf6d0469d..c7ed0b457129 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -771,7 +771,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, int i; struct amdgpu_ps *ps; u32 ui_class; - bool single_display = adev->pm.dpm.new_active_crtc_count < 2; + bool single_display = adev->pm.pm_display_cfg.num_display < 2; /* check if the vblank period is too short to adjust the mclk */ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { @@ -944,9 +944,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) amdgpu_dpm_post_set_power_state(adev); - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; - if (pp_funcs->force_performance_level) { if (adev->pm.dpm.thermal_active) { enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; @@ -967,7 +964,8 @@ void amdgpu_legacy_dpm_compute_clocks(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_dpm_get_active_displays(adev); + if (!adev->dc_enabled) + amdgpu_dpm_get_display_cfg(adev); amdgpu_dpm_change_power_state_locked(adev); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 6595a611ce6e..cf9932e68055 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3081,7 +3081,7 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev, static bool si_dpm_vblank_too_short(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); + u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time; /* we never hit the non-gddr5 limit so disable it */ u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; @@ -3447,9 +3447,10 @@ static void rv770_get_engine_memory_ss(struct amdgpu_device *adev) static void si_apply_state_adjust_rules(struct amdgpu_device *adev, struct amdgpu_ps *rps) { + const struct amd_pp_display_configuration *display_cfg = + &adev->pm.pm_display_cfg; struct si_ps *ps = si_get_ps(rps); struct amdgpu_clock_and_voltage_limits *max_limits; - struct amdgpu_connector *conn; bool disable_mclk_switching = false; bool disable_sclk_switching = false; u32 mclk, sclk; @@ -3488,14 +3489,9 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, * For example, 4K 60Hz and 1080p 144Hz fall into this category. * Find number of such displays connected. */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (!(adev->pm.dpm.new_active_crtcs & (1 << i)) || - !adev->mode_info.crtcs[i]->enabled) - continue; - - conn = to_amdgpu_connector(adev->mode_info.crtcs[i]->connector); - - if (conn->pixelclock_for_modeset > 297000) + for (i = 0; i < display_cfg->num_display; i++) { + /* The array only contains active displays. */ + if (display_cfg->displays[i].pixel_clock > 297000) high_pixelclock_count++; } @@ -3523,7 +3519,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, rps->ecclk = 0; } - if ((adev->pm.dpm.new_active_crtc_count > 1) || + if ((adev->pm.pm_display_cfg.num_display > 1) || si_dpm_vblank_too_short(adev)) disable_mclk_switching = true; @@ -3671,7 +3667,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, ps->performance_levels[i].mclk, max_limits->vddc, &ps->performance_levels[i].vddc); btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, - adev->clock.current_dispclk, + display_cfg->display_clk, max_limits->vddc, &ps->performance_levels[i].vddc); } @@ -4196,16 +4192,16 @@ static void si_program_ds_registers(struct amdgpu_device *adev) static void si_program_display_gap(struct amdgpu_device *adev) { + const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg; u32 tmp, pipe; - int i; tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK); - if (adev->pm.dpm.new_active_crtc_count > 0) + if (cfg->num_display > 0) tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; else tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; - if (adev->pm.dpm.new_active_crtc_count > 1) + if (cfg->num_display > 1) tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; else tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; @@ -4215,17 +4211,8 @@ static void si_program_display_gap(struct amdgpu_device *adev) tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; - if ((adev->pm.dpm.new_active_crtc_count > 0) && - (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) { - /* find the first active crtc */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->pm.dpm.new_active_crtcs & (1 << i)) - break; - } - if (i == adev->mode_info.num_crtc) - pipe = 0; - else - pipe = i; + if (cfg->num_display > 0 && pipe != cfg->crtc_index) { + pipe = cfg->crtc_index; tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK; tmp |= DCCG_DISP1_SLOW_SELECT(pipe); @@ -4236,7 +4223,7 @@ static void si_program_display_gap(struct amdgpu_device *adev) * This can be a problem on PowerXpress systems or if you want to use the card * for offscreen rendering or compute if there are no crtcs enabled. */ - si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0); + si_notify_smc_display_change(adev, cfg->num_display > 0); } static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) @@ -5545,7 +5532,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev, (pl->mclk <= pi->mclk_stutter_mode_threshold) && !eg_pi->uvd_enabled && (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && - (adev->pm.dpm.new_active_crtc_count <= 2)) { + (adev->pm.pm_display_cfg.num_display <= 2)) { level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; } @@ -5694,7 +5681,7 @@ static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, /* XXX validate against display requirements! */ for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) { - if (adev->clock.current_dispclk <= + if (adev->pm.pm_display_cfg.display_clk <= adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { if (ulv->pl.vddc < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) @@ -5848,30 +5835,22 @@ static int si_upload_ulv_state(struct amdgpu_device *adev) static int si_upload_smc_data(struct amdgpu_device *adev) { - struct amdgpu_crtc *amdgpu_crtc = NULL; - int i; + const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg; u32 crtc_index = 0; u32 mclk_change_block_cp_min = 0; u32 mclk_change_block_cp_max = 0; - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->pm.dpm.new_active_crtcs & (1 << i)) { - amdgpu_crtc = adev->mode_info.crtcs[i]; - break; - } - } - /* When a display is plugged in, program these so that the SMC * performs MCLK switching when it doesn't cause flickering. * When no display is plugged in, there is no need to restrict * MCLK switching, so program them to zero. */ - if (adev->pm.dpm.new_active_crtc_count && amdgpu_crtc) { - crtc_index = amdgpu_crtc->crtc_id; + if (cfg->num_display) { + crtc_index = cfg->crtc_index; - if (amdgpu_crtc->line_time) { - mclk_change_block_cp_min = 200 / amdgpu_crtc->line_time; - mclk_change_block_cp_max = 100 / amdgpu_crtc->line_time; + if (cfg->line_time_in_us) { + mclk_change_block_cp_min = 200 / cfg->line_time_in_us; + mclk_change_block_cp_max = 100 / cfg->line_time_in_us; } } diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index b48a031cbba0..554492dfa3c0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1554,16 +1554,7 @@ static void pp_pm_compute_clocks(void *handle) struct amdgpu_device *adev = hwmgr->adev; if (!adev->dc_enabled) { - amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we have issues with mclk switching with - * refresh rates over 120 hz on the non-DC code. - */ - if (adev->pm.pm_display_cfg.vrefresh > 120) - adev->pm.pm_display_cfg.min_vblank_time = 0; - + amdgpu_dpm_get_display_cfg(adev); pp_display_configuration_change(handle, &adev->pm.pm_display_cfg); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index c5965924e7c6..fb8086859857 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -766,7 +766,6 @@ static int smu_set_funcs(struct amdgpu_device *adev) case IP_VERSION(13, 0, 14): case IP_VERSION(13, 0, 12): smu_v13_0_6_set_ppt_funcs(smu); - smu_v13_0_6_set_temp_funcs(smu); /* Enable pp_od_clk_voltage node */ smu->od_enabled = true; break; @@ -1316,6 +1315,33 @@ static void smu_init_power_profile(struct smu_context *smu) smu_power_profile_mode_get(smu, smu->power_profile_mode); } +void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id) +{ + struct smu_feature_cap *fea_cap = &smu->fea_cap; + + if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) + return; + + set_bit(fea_id, fea_cap->cap_map); +} + +bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id) +{ + struct smu_feature_cap *fea_cap = &smu->fea_cap; + + if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) + return false; + + return test_bit(fea_id, fea_cap->cap_map); +} + +static void smu_feature_cap_init(struct smu_context *smu) +{ + struct smu_feature_cap *fea_cap = &smu->fea_cap; + + bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT); +} + static int smu_sw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -1348,6 +1374,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) INIT_DELAYED_WORK(&smu->swctf_delayed_work, smu_swctf_delayed_work_handler); + smu_feature_cap_init(smu); + ret = smu_smc_table_sw_init(smu); if (ret) { dev_err(adev->dev, "Failed to sw init smc table!\n"); @@ -1897,7 +1925,6 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block) for (i = 0; i < adev->vcn.num_vcn_inst; i++) smu_dpm_set_vcn_enable(smu, true, i); smu_dpm_set_jpeg_enable(smu, true); - smu_dpm_set_vpe_enable(smu, true); smu_dpm_set_umsch_mm_enable(smu, true); smu_set_mall_enable(smu); smu_set_gfx_cgpg(smu, true); @@ -2105,7 +2132,6 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block) } smu_dpm_set_jpeg_enable(smu, false); adev->jpeg.cur_state = AMD_PG_STATE_GATE; - smu_dpm_set_vpe_enable(smu, false); smu_dpm_set_umsch_mm_enable(smu, false); if (!smu->pm_enabled) @@ -2237,7 +2263,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) return ret; } - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) { ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); if (ret) return ret; @@ -3508,15 +3534,10 @@ bool smu_mode1_reset_is_support(struct smu_context *smu) bool smu_link_reset_is_support(struct smu_context *smu) { - bool ret = false; - if (!smu->pm_enabled) return false; - if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support) - ret = smu->ppt_funcs->link_reset_is_support(smu); - - return ret; + return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET); } int smu_mode1_reset(struct smu_context *smu) @@ -4106,12 +4127,7 @@ int smu_send_rma_reason(struct smu_context *smu) */ bool smu_reset_sdma_is_supported(struct smu_context *smu) { - bool ret = false; - - if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported) - ret = smu->ppt_funcs->reset_sdma_is_supported(smu); - - return ret; + return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET); } int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) @@ -4126,12 +4142,7 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) bool smu_reset_vcn_is_supported(struct smu_context *smu) { - bool ret = false; - - if (smu->ppt_funcs && smu->ppt_funcs->reset_vcn_is_supported) - ret = smu->ppt_funcs->reset_vcn_is_supported(smu); - - return ret; + return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET); } int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 5976eda80035..582c186d8b62 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -528,6 +528,17 @@ enum smu_fw_status { */ #define SMU_WBRF_EVENT_HANDLING_PACE 10 +enum smu_feature_cap_id { + SMU_FEATURE_CAP_ID__LINK_RESET = 0, + SMU_FEATURE_CAP_ID__SDMA_RESET, + SMU_FEATURE_CAP_ID__VCN_RESET, + SMU_FEATURE_CAP_ID__COUNT, +}; + +struct smu_feature_cap { + DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT); +}; + struct smu_context { struct amdgpu_device *adev; struct amdgpu_irq_src irq_source; @@ -550,6 +561,7 @@ struct smu_context { struct amd_pp_display_configuration *display_config; struct smu_baco_context smu_baco; struct smu_temperature_range thermal_range; + struct smu_feature_cap fea_cap; void *od_settings; struct smu_umd_pstate_table pstate_table; @@ -1273,11 +1285,6 @@ struct pptable_funcs { bool (*mode1_reset_is_support)(struct smu_context *smu); /** - * @link_reset_is_support: Check if GPU supports link reset. - */ - bool (*link_reset_is_support)(struct smu_context *smu); - - /** * @mode1_reset: Perform mode1 reset. * * Complete GPU reset. @@ -1427,19 +1434,11 @@ struct pptable_funcs { * @reset_sdma: message SMU to soft reset sdma instance. */ int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask); - /** - * @reset_sdma_is_supported: Check if support resets the SDMA engine. - */ - bool (*reset_sdma_is_supported)(struct smu_context *smu); /** * @reset_vcn: message SMU to soft reset vcn instance. */ int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask); - /** - * @reset_vcn_is_supported: Check if support resets vcn. - */ - bool (*reset_vcn_is_supported)(struct smu_context *smu); /** * @get_ecc_table: message SMU to get ECC INFO table. @@ -1788,4 +1787,7 @@ ssize_t smu_get_pm_policy_info(struct smu_context *smu, enum pp_pm_policy p_type, char *sysbuf); #endif + +void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id); +bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id); #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h index 1c407a8e96ee..bf6aa9620911 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -191,7 +191,7 @@ typedef enum { #define SMU_METRICS_TABLE_VERSION 0x14 -#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x0 +#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1 typedef struct __attribute__((packed, aligned(4))) { uint64_t AccumulationCounter; @@ -304,7 +304,12 @@ typedef struct { int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius - int16_t spare[3]; + int16_t spare[7]; + + //NPM: NODE POWER MANAGEMENT + uint32_t NodePowerLimit; + uint32_t NodePower; + uint32_t GlobalPPTResidencyAcc; } SystemMetricsTable_t; #pragma pack(pop) @@ -359,6 +364,9 @@ typedef struct { // General info uint32_t pldmVersion[2]; + + //Node Power Limit + uint32_t MaxNodePowerLimit; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 599eddb5a67d..4fff78da81ff 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1745,10 +1745,10 @@ static int arcturus_i2c_control_init(struct smu_context *smu) snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -1756,27 +1756,12 @@ static int arcturus_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void arcturus_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index aac202d0c30e..0028f10ead42 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3145,10 +3145,10 @@ static int navi10_i2c_control_init(struct smu_context *smu) control->quirks = &navi10_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -3156,27 +3156,12 @@ static int navi10_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void navi10_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index d57591509aed..31c2c0386b1f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2648,10 +2648,10 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu) control->quirks = &sienna_cichlid_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } /* assign the buses used for the FRU EEPROM and RAS EEPROM */ @@ -2660,27 +2660,12 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void sienna_cichlid_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index b067147b7c41..18d5d0704509 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1641,33 +1641,22 @@ static int aldebaran_i2c_control_init(struct smu_context *smu) control->quirks = &aldebaran_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - i2c_del_adapter(control); - - return res; } static void aldebaran_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index e084ed99ec0e..c1062e5f0393 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2825,10 +2825,10 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) control->quirks = &smu_v13_0_0_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -2838,27 +2838,12 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index 0bec12b348ce..1842a33b2bce 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -137,7 +137,7 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), - MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 0), + MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1), }; int smu_v13_0_12_tables_init(struct smu_context *smu) @@ -341,6 +341,9 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) static_metrics->pldmVersion[0] != 0xFFFFFFFF) smu->adev->firmware.pldm_version = static_metrics->pldmVersion[0]; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS))) + pptable->MaxNodePowerLimit = + SMUQ10_ROUND(static_metrics->MaxNodePowerLimit); smu_v13_0_12_init_xgmi_data(smu, static_metrics); pptable->Init = true; } @@ -580,6 +583,50 @@ static bool smu_v13_0_12_is_temp_metrics_supported(struct smu_context *smu, return false; } +int smu_v13_0_12_get_npm_data(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + struct smu_table *tables = smu_table->tables; + SystemMetricsTable_t *metrics; + struct smu_table *sys_table; + int ret; + + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS))) + return -EOPNOTSUPP; + + if (sensor == AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT) { + *value = pptable->MaxNodePowerLimit; + return 0; + } + + ret = smu_v13_0_12_get_system_metrics_table(smu); + if (ret) + return ret; + + sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS]; + metrics = (SystemMetricsTable_t *)sys_table->cache.buffer; + + switch (sensor) { + case AMDGPU_PP_SENSOR_NODEPOWERLIMIT: + *value = SMUQ10_ROUND(metrics->NodePowerLimit); + break; + case AMDGPU_PP_SENSOR_NODEPOWER: + *value = SMUQ10_ROUND(metrics->NodePower); + break; + case AMDGPU_PP_SENSOR_GPPTRESIDENCY: + *value = SMUQ10_ROUND(metrics->GlobalPPTResidencyAcc); + break; + default: + return -EINVAL; + } + + return ret; +} + static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu, enum smu_temp_metric_type type, void *table) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index ebee659f8a1c..349b6b8be010 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -143,7 +143,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), - MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), @@ -354,7 +354,11 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) } if (fw_ver >= 0x04560700) { - if (!amdgpu_sriov_vf(smu->adev)) + if (fw_ver >= 0x04560900) { + smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS)); + if (smu->adev->gmc.xgmi.physical_node_id == 0) + smu_v13_0_6_cap_set(smu, SMU_CAP(NPM_METRICS)); + } else if (!amdgpu_sriov_vf(smu->adev)) smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS)); } else { smu_v13_0_12_tables_fini(smu); @@ -413,6 +417,10 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); if (amdgpu_sriov_vf(adev)) { + if (fw_ver >= 0x00558200) + amdgpu_virt_attr_set(&adev->virt.virt_caps, + AMDGPU_VIRT_CAP_POWER_LIMIT, + AMDGPU_CAP_ATTR_RW); if ((pgm == 0 && fw_ver >= 0x00558000) || (pgm == 7 && fw_ver >= 0x7551000)) { smu_v13_0_6_cap_set(smu, @@ -1795,6 +1803,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; break; } + case AMDGPU_PP_SENSOR_NODEPOWERLIMIT: + case AMDGPU_PP_SENSOR_NODEPOWER: + case AMDGPU_PP_SENSOR_GPPTRESIDENCY: + case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT: + ret = smu_v13_0_12_get_npm_data(smu, sensor, (uint32_t *)data); + if (ret) + return ret; + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: default: ret = -EOPNOTSUPP; @@ -2490,10 +2507,10 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu) control->quirks = &smu_v13_0_6_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -2501,27 +2518,12 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } @@ -3223,6 +3225,20 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) } +static int smu_v13_0_6_post_init(struct smu_context *smu) +{ + if (smu_v13_0_6_is_link_reset_supported(smu)) + smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__LINK_RESET); + + if (smu_v13_0_6_reset_sdma_is_supported(smu)) + smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__SDMA_RESET); + + if (smu_v13_0_6_reset_vcn_is_supported(smu)) + smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__VCN_RESET); + + return 0; +} + static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -3839,6 +3855,12 @@ static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { .parse_error_code = aca_smu_parse_error_code, }; +static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu) +{ + smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) + == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL; +} + static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -3886,7 +3908,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, - .link_reset_is_support = smu_v13_0_6_is_link_reset_supported, .mode1_reset = smu_v13_0_6_mode1_reset, .mode2_reset = smu_v13_0_6_mode2_reset, .link_reset = smu_v13_0_6_link_reset, @@ -3896,9 +3917,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, .send_rma_reason = smu_v13_0_6_send_rma_reason, .reset_sdma = smu_v13_0_6_reset_sdma, - .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported, .dpm_reset_vcn = smu_v13_0_6_reset_vcn, - .reset_vcn_is_supported = smu_v13_0_6_reset_vcn_is_supported, + .post_init = smu_v13_0_6_post_init, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) @@ -3913,12 +3933,8 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI; smu_v13_0_set_smu_mailbox_registers(smu); + smu_v13_0_6_set_temp_funcs(smu); amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs); } -void smu_v13_0_6_set_temp_funcs(struct smu_context *smu) -{ - smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) - == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL; -} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index aae9a546a67e..7ef5f3e66c27 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -49,6 +49,7 @@ struct PPTable_t { uint32_t MaxLclkDpmRange; uint32_t MinLclkDpmRange; uint64_t PublicSerialNumber_AID; + uint32_t MaxNodePowerLimit; bool Init; }; @@ -70,11 +71,11 @@ enum smu_v13_0_6_caps { SMU_CAP(BOARD_VOLTAGE), SMU_CAP(PLDM_VERSION), SMU_CAP(TEMP_METRICS), + SMU_CAP(NPM_METRICS), SMU_CAP(ALL), }; extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); -extern void smu_v13_0_6_set_temp_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, @@ -92,6 +93,9 @@ ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, void *smu_metrics); int smu_v13_0_12_tables_init(struct smu_context *smu); void smu_v13_0_12_tables_fini(struct smu_context *smu); +int smu_v13_0_12_get_npm_data(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value); extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index f32474af90b3..086501cc5213 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -2087,10 +2087,10 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) control->quirks = &smu_v14_0_2_i2c_control_quirks; i2c_set_adapdata(control, smu_i2c); - res = i2c_add_adapter(control); + res = devm_i2c_add_adapter(adev->dev, control); if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); - goto Out_err; + return res; } } @@ -2100,27 +2100,12 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; return 0; -Out_err: - for ( ; i >= 0; i--) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } - return res; } static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - int i; - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; - struct i2c_adapter *control = &smu_i2c->adapter; - - i2c_del_adapter(control); - } adev->pm.ras_eeprom_i2c_bus = NULL; adev->pm.fru_eeprom_i2c_bus = NULL; } diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 19c04687b0fe..8e650a02c528 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -134,7 +134,7 @@ static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, si * 3. The Delays are often longer a lot when system resume from S3/S4. */ if (j) - mdelay(j + 1); + msleep(j + 1); /* Wait for EDID offset to show up in mirror register */ vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7); diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 609cdb9d371e..6f3fdcb6afdb 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2678,7 +2678,7 @@ static int anx7625_i2c_probe(struct i2c_client *client) ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq, NULL, anx7625_intr_hpd_isr, IRQF_TRIGGER_FALLING | - IRQF_ONESHOT, + IRQF_ONESHOT | IRQF_NO_AUTOEN, "anx7625-intp", platform); if (ret) { DRM_DEV_ERROR(dev, "fail to request irq\n"); @@ -2747,8 +2747,10 @@ static int anx7625_i2c_probe(struct i2c_client *client) } /* Add work function */ - if (platform->pdata.intp_irq) + if (platform->pdata.intp_irq) { + enable_irq(platform->pdata.intp_irq); queue_work(platform->workqueue, &platform->work); + } if (platform->pdata.audio_en) anx7625_register_audio(dev, platform); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index a614d1384f71..38726ae1bf15 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -1984,8 +1984,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge, mhdp_state = to_cdns_mhdp_bridge_state(new_state); mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); - if (!mhdp_state->current_mode) - return; + if (!mhdp_state->current_mode) { + ret = -EINVAL; + goto out; + } drm_mode_set_name(mhdp_state->current_mode); diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index 5d272916e200..122502968927 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -683,11 +683,6 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int imx8qxp_ldb_runtime_suspend(struct device *dev) -{ - return 0; -} - static int imx8qxp_ldb_runtime_resume(struct device *dev) { struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev); @@ -700,7 +695,7 @@ static int imx8qxp_ldb_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qxp_ldb_pm_ops = { - RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL) + RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_ldb_dt_ids[] = { diff --git a/drivers/gpu/drm/bridge/waveshare-dsi.c b/drivers/gpu/drm/bridge/waveshare-dsi.c index 01c70e7d3d3b..43f4e7412d72 100644 --- a/drivers/gpu/drm/bridge/waveshare-dsi.c +++ b/drivers/gpu/drm/bridge/waveshare-dsi.c @@ -147,8 +147,8 @@ static int ws_bridge_probe(struct i2c_client *i2c) int ret; ws = devm_drm_bridge_alloc(dev, struct ws_bridge, bridge, &ws_bridge_bridge_funcs); - if (!ws) - return -ENOMEM; + if (IS_ERR(ws)) + return PTR_ERR(ws); ws->dev = dev; diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index baacd21e7341..a5bdd6c10643 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -137,10 +137,9 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector, { struct drm_bridge_connector *bridge_connector = to_drm_bridge_connector(connector); - struct drm_bridge *bridge; /* Notify all bridges in the pipeline of hotplug events. */ - drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) { if (bridge->funcs->hpd_notify) bridge->funcs->hpd_notify(bridge, status); } @@ -639,7 +638,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, struct drm_bridge_connector *bridge_connector; struct drm_connector *connector; struct i2c_adapter *ddc = NULL; - struct drm_bridge *bridge, *panel_bridge = NULL; + struct drm_bridge *panel_bridge = NULL; unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB); unsigned int max_bpc = 8; bool support_hdcp = false; @@ -667,7 +666,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, * detection are available, we don't support hotplug detection at all. */ connector_type = DRM_MODE_CONNECTOR_Unknown; - drm_for_each_bridge_in_chain(encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { if (!bridge->interlace_allowed) connector->interlace_allowed = false; if (!bridge->ycbcr_420_allowed) @@ -818,7 +817,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { - bridge = bridge_connector->bridge_hdmi_cec; + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_notifier_register(connector, NULL, @@ -829,7 +828,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { - bridge = bridge_connector->bridge_hdmi_cec; + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_register(connector, &drm_bridge_connector_hdmi_cec_funcs, diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 4aaeae4fa03c..5426db21e53f 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -123,6 +123,14 @@ bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], } EXPORT_SYMBOL(drm_dp_clock_recovery_ok); +bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); + + return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS; +} +EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress); + u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], int lane) { diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index cd15cf52f0c9..ed5359a71f7e 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1308,7 +1308,6 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_bridge_state *bridge_state; - struct drm_bridge *bridge; if (!encoder) return 0; @@ -1317,7 +1316,7 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, "Adding all bridges for [encoder:%d:%s] to %p\n", encoder->base.id, encoder->name, state); - drm_for_each_bridge_in_chain(encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { /* Skip bridges that don't implement the atomic state hooks. */ if (!bridge->funcs->atomic_duplicate_state) continue; diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index d031447eebc9..5f265e8b4d49 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -1121,7 +1121,6 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_bridge_state *bridge_state, *next_bridge_state; - struct drm_bridge *next_bridge; u32 output_flags = 0; bridge_state = drm_atomic_get_new_bridge_state(state, bridge); @@ -1130,7 +1129,7 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, if (!bridge_state) return; - next_bridge = drm_bridge_get_next_bridge(bridge); + struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge); /* * Let's try to apply the most common case here, that is, propagate @@ -1480,10 +1479,9 @@ static int encoder_bridges_show(struct seq_file *m, void *data) { struct drm_encoder *encoder = m->private; struct drm_printer p = drm_seq_file_printer(m); - struct drm_bridge *bridge; unsigned int idx = 0; - drm_for_each_bridge_in_chain(encoder, bridge) + drm_for_each_bridge_in_chain_scoped(encoder, bridge) drm_bridge_debugfs_show_bridge(&p, bridge, idx++); return 0; diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 9c2c3b0c8c47..fc4caf7da5fc 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -1293,6 +1293,50 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode) } EXPORT_SYMBOL(drm_client_modeset_dpms); +/** + * drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur + * @client: DRM client + * @crtc_index: The ndex of the CRTC to wait on + * + * Block the caller until the given CRTC has seen a VBLANK. Do nothing + * if the CRTC is disabled. If there's another DRM master present, fail + * with -EBUSY. + * + * Returns: + * 0 on success, or negative error code otherwise. + */ +int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index) +{ + struct drm_device *dev = client->dev; + struct drm_crtc *crtc; + int ret; + + /* + * Rate-limit update frequency to vblank. If there's a DRM master + * present, it could interfere while we're waiting for the vblank + * event. Don't wait in this case. + */ + if (!drm_master_internal_acquire(dev)) + return -EBUSY; + + crtc = client->modesets[crtc_index].crtc; + + /* + * Only wait for a vblank event if the CRTC is enabled, otherwise + * just don't do anything, not even report an error. + */ + ret = drm_crtc_vblank_get(crtc); + if (!ret) { + drm_crtc_wait_one_vblank(crtc); + drm_crtc_vblank_put(crtc); + } + + drm_master_internal_release(dev); + + return 0; +} +EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank); + #ifdef CONFIG_DRM_KUNIT_TEST #include "tests/drm_client_modeset_test.c" #endif diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 0ac723a46a91..8e3cb08241c8 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -696,7 +696,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->clientlist_mutex); mutex_destroy(&dev->filelist_mutex); - mutex_destroy(&dev->struct_mutex); } static int drm_dev_init(struct drm_device *dev, @@ -737,7 +736,6 @@ static int drm_dev_init(struct drm_device *dev, INIT_LIST_HEAD(&dev->vblank_event_list); spin_lock_init(&dev->event_lock); - mutex_init(&dev->struct_mutex); mutex_init(&dev->filelist_mutex); mutex_init(&dev->clientlist_mutex); mutex_init(&dev->master_mutex); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 11a5b60cb9ce..53e9dc0543de 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -368,6 +368,10 @@ static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper) unsigned long flags; int ret; + mutex_lock(&helper->lock); + drm_client_modeset_wait_for_vblank(&helper->client, 0); + mutex_unlock(&helper->lock); + if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty)) return; @@ -1068,15 +1072,9 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - struct drm_crtc *crtc; int ret = 0; - mutex_lock(&fb_helper->lock); - if (!drm_master_internal_acquire(dev)) { - ret = -EBUSY; - goto unlock; - } + guard(mutex)(&fb_helper->lock); switch (cmd) { case FBIO_WAITFORVSYNC: @@ -1096,28 +1094,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, * make. If we're not smart enough here, one should * just consider switch the userspace to KMS. */ - crtc = fb_helper->client.modesets[0].crtc; - - /* - * Only wait for a vblank event if the CRTC is - * enabled, otherwise just don't do anythintg, - * not even report an error. - */ - ret = drm_crtc_vblank_get(crtc); - if (!ret) { - drm_crtc_wait_one_vblank(crtc); - drm_crtc_vblank_put(crtc); - } - - ret = 0; + ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0); break; default: ret = -ENOTTY; } - drm_master_internal_release(dev); -unlock: - mutex_unlock(&fb_helper->lock); return ret; } EXPORT_SYMBOL(drm_fb_helper_ioctl); diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 006836554cc2..6cddf05c493b 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -1165,97 +1165,6 @@ void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_ } EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444); -/** - * drm_fb_blit - Copy parts of a framebuffer to display memory - * @dst: Array of display-memory addresses to copy to - * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines - * within @dst; can be NULL if scanlines are stored next to each other. - * @dst_format: FOURCC code of the display's color format - * @src: The framebuffer memory to copy from - * @fb: The framebuffer to copy from - * @clip: Clip rectangle area to copy - * @state: Transform and conversion state - * - * This function copies parts of a framebuffer to display memory. If the - * formats of the display and the framebuffer mismatch, the blit function - * will attempt to convert between them during the process. The parameters @dst, - * @dst_pitch and @src refer to arrays. Each array must have at least as many - * entries as there are planes in @dst_format's format. Each entry stores the - * value for the format's respective color plane at the same index. - * - * This function does not apply clipping on @dst (i.e. the destination is at the - * top-left corner). - * - * Returns: - * 0 on success, or - * -EINVAL if the color-format conversion failed, or - * a negative error code otherwise. - */ -int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, - const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, struct drm_format_conv_state *state) -{ - uint32_t fb_format = fb->format->format; - - if (fb_format == dst_format) { - drm_fb_memcpy(dst, dst_pitch, src, fb, clip); - return 0; - } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (fb_format == DRM_FORMAT_XRGB8888) { - if (dst_format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XRGB1555) { - drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB1555) { - drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGBA5551) { - drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_BGR888) { - drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB8888) { - drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XBGR8888) { - drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ABGR8888) { - drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XRGB2101010) { - drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB2101010) { - drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_BGRX8888) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGB332) { - drm_fb_xrgb8888_to_rgb332(dst, dst_pitch, src, fb, clip, state); - return 0; - } - } - - drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n", - &fb_format, &dst_format); - - return -EINVAL; -} -EXPORT_SYMBOL(drm_fb_blit); - static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels) { u8 *dbuf8 = dbuf; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8d25cc65707d..cbeb76b2124f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -101,10 +101,8 @@ drm_gem_init(struct drm_device *dev) vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), GFP_KERNEL); - if (!vma_offset_manager) { - DRM_ERROR("out of memory\n"); + if (!vma_offset_manager) return -ENOMEM; - } dev->vma_offset_manager = vma_offset_manager; drm_vma_offset_manager_init(vma_offset_manager, @@ -785,9 +783,10 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count, int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out) { - int ret; - u32 *handles; + struct drm_device *dev = filp->minor->dev; struct drm_gem_object **objs; + u32 *handles; + int ret; if (!count) return 0; @@ -807,7 +806,7 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { ret = -EFAULT; - DRM_DEBUG("Failed to copy in GEM handles\n"); + drm_dbg_core(dev, "Failed to copy in GEM handles\n"); goto out; } @@ -855,12 +854,13 @@ EXPORT_SYMBOL(drm_gem_object_lookup); long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout) { - long ret; + struct drm_device *dev = filep->minor->dev; struct drm_gem_object *obj; + long ret; obj = drm_gem_object_lookup(filep, handle); if (!obj) { - DRM_DEBUG("Failed to look up GEM BO %d\n", handle); + drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index 4f0320df858f..a507cf517015 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -582,7 +582,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev, ret = dma_buf_vmap_unlocked(attach->dmabuf, &map); if (ret) { - DRM_ERROR("Failed to vmap PRIME buffer\n"); + drm_err(dev, "Failed to vmap PRIME buffer\n"); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 5d1349c34afd..50594cf8e17c 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -48,28 +48,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { .vm_ops = &drm_gem_shmem_vm_ops, }; -static struct drm_gem_shmem_object * -__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, - struct vfsmount *gemfs) +static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, + size_t size, bool private, struct vfsmount *gemfs) { - struct drm_gem_shmem_object *shmem; - struct drm_gem_object *obj; + struct drm_gem_object *obj = &shmem->base; int ret = 0; - size = PAGE_ALIGN(size); - - if (dev->driver->gem_create_object) { - obj = dev->driver->gem_create_object(dev, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - shmem = to_drm_gem_shmem_obj(obj); - } else { - shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); - if (!shmem) - return ERR_PTR(-ENOMEM); - obj = &shmem->base; - } - if (!obj->funcs) obj->funcs = &drm_gem_shmem_funcs; @@ -81,7 +65,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, } if (ret) { drm_gem_private_object_fini(obj); - goto err_free; + return ret; } ret = drm_gem_create_mmap_offset(obj); @@ -102,14 +86,55 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, __GFP_RETRY_MAYFAIL | __GFP_NOWARN); } - return shmem; - + return 0; err_release: drm_gem_object_release(obj); -err_free: - kfree(obj); + return ret; +} - return ERR_PTR(ret); +/** + * drm_gem_shmem_init - Initialize an allocated object. + * @dev: DRM device + * @obj: The allocated shmem GEM object. + * + * Returns: + * 0 on success, or a negative error code on failure. + */ +int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size) +{ + return __drm_gem_shmem_init(dev, shmem, size, false, NULL); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_init); + +static struct drm_gem_shmem_object * +__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, + struct vfsmount *gemfs) +{ + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj; + int ret = 0; + + size = PAGE_ALIGN(size); + + if (dev->driver->gem_create_object) { + obj = dev->driver->gem_create_object(dev, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + shmem = to_drm_gem_shmem_obj(obj); + } else { + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) + return ERR_PTR(-ENOMEM); + obj = &shmem->base; + } + + ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs); + if (ret) { + kfree(obj); + return ERR_PTR(ret); + } + + return shmem; } /** * drm_gem_shmem_create - Allocate an object with the given size @@ -150,13 +175,13 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt); /** - * drm_gem_shmem_free - Free resources associated with a shmem GEM object - * @shmem: shmem GEM object to free + * drm_gem_shmem_release - Release resources associated with a shmem GEM object. + * @shmem: shmem GEM object * - * This function cleans up the GEM object state and frees the memory used to - * store the object itself. + * This function cleans up the GEM object state, but does not free the memory used to store the + * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings. */ -void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; @@ -183,6 +208,19 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } drm_gem_object_release(obj); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_release); + +/** + * drm_gem_shmem_free - Free resources associated with a shmem GEM object + * @shmem: shmem GEM object to free + * + * This function cleans up the GEM object state and frees the memory used to + * store the object itself. + */ +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +{ + drm_gem_shmem_release(shmem); kfree(shmem); } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index b04cde4a60e7..90760d0ca071 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -107,7 +107,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) { - /* We got here via ttm_bo_put(), which means that the + /* We got here via ttm_bo_fini(), which means that the * TTM buffer object in 'bo' has already been cleaned * up; only release the GEM object. */ @@ -234,11 +234,11 @@ EXPORT_SYMBOL(drm_gem_vram_create); * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object * @gbo: the GEM VRAM object * - * See ttm_bo_put() for more information. + * See ttm_bo_fini() for more information. */ void drm_gem_vram_put(struct drm_gem_vram_object *gbo) { - ttm_bo_put(&gbo->bo); + ttm_bo_fini(&gbo->bo); } EXPORT_SYMBOL(drm_gem_vram_put); diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index e2a9a6ae1d54..eeeeb99cfdf6 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -373,6 +373,12 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = { * * This function initializes the GPU SVM. * + * Note: If only using the simple drm_gpusvm_pages API (get/unmap/free), + * then only @gpusvm, @name, and @drm are expected. However, the same base + * @gpusvm can also be used with both modes together in which case the full + * setup is needed, where the core drm_gpusvm_pages API will simply never use + * the other fields. + * * Return: 0 on success, a negative error code on failure. */ int drm_gpusvm_init(struct drm_gpusvm *gpusvm, @@ -383,8 +389,16 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm, const struct drm_gpusvm_ops *ops, const unsigned long *chunk_sizes, int num_chunks) { - if (!ops->invalidate || !num_chunks) - return -EINVAL; + if (mm) { + if (!ops->invalidate || !num_chunks) + return -EINVAL; + mmgrab(mm); + } else { + /* No full SVM mode, only core drm_gpusvm_pages API. */ + if (ops || num_chunks || mm_range || notifier_size || + device_private_page_owner) + return -EINVAL; + } gpusvm->name = name; gpusvm->drm = drm; @@ -397,7 +411,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm, gpusvm->chunk_sizes = chunk_sizes; gpusvm->num_chunks = num_chunks; - mmgrab(mm); gpusvm->root = RB_ROOT_CACHED; INIT_LIST_HEAD(&gpusvm->notifier_list); @@ -489,7 +502,8 @@ void drm_gpusvm_fini(struct drm_gpusvm *gpusvm) drm_gpusvm_range_remove(gpusvm, range); } - mmdrop(gpusvm->mm); + if (gpusvm->mm) + mmdrop(gpusvm->mm); WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root)); } EXPORT_SYMBOL_GPL(drm_gpusvm_fini); @@ -629,13 +643,42 @@ drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm, range->itree.start = ALIGN_DOWN(fault_addr, chunk_size); range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1; INIT_LIST_HEAD(&range->entry); - range->notifier_seq = LONG_MAX; - range->flags.migrate_devmem = migrate_devmem ? 1 : 0; + range->pages.notifier_seq = LONG_MAX; + range->pages.flags.migrate_devmem = migrate_devmem ? 1 : 0; return range; } /** + * drm_gpusvm_hmm_pfn_to_order() - Get the largest CPU mapping order. + * @hmm_pfn: The current hmm_pfn. + * @hmm_pfn_index: Index of the @hmm_pfn within the pfn array. + * @npages: Number of pages within the pfn array i.e the hmm range size. + * + * To allow skipping PFNs with the same flags (like when they belong to + * the same huge PTE) when looping over the pfn array, take a given a hmm_pfn, + * and return the largest order that will fit inside the CPU PTE, but also + * crucially accounting for the original hmm range boundaries. + * + * Return: The largest order that will safely fit within the size of the hmm_pfn + * CPU PTE. + */ +static unsigned int drm_gpusvm_hmm_pfn_to_order(unsigned long hmm_pfn, + unsigned long hmm_pfn_index, + unsigned long npages) +{ + unsigned long size; + + size = 1UL << hmm_pfn_to_map_order(hmm_pfn); + size -= (hmm_pfn & ~HMM_PFN_FLAGS) & (size - 1); + hmm_pfn_index += size; + if (hmm_pfn_index > npages) + size -= (hmm_pfn_index - npages); + + return ilog2(size); +} + +/** * drm_gpusvm_check_pages() - Check pages * @gpusvm: Pointer to the GPU SVM structure * @notifier: Pointer to the GPU SVM notifier structure @@ -693,7 +736,7 @@ static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm, err = -EFAULT; goto err_free; } - i += 0x1 << hmm_pfn_to_map_order(pfns[i]); + i += 0x1 << drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages); } err_free: @@ -951,31 +994,31 @@ err_mmunlock: EXPORT_SYMBOL_GPL(drm_gpusvm_range_find_or_insert); /** - * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal) + * __drm_gpusvm_unmap_pages() - Unmap pages associated with GPU SVM pages (internal) * @gpusvm: Pointer to the GPU SVM structure - * @range: Pointer to the GPU SVM range structure + * @svm_pages: Pointer to the GPU SVM pages structure * @npages: Number of pages to unmap * - * This function unmap pages associated with a GPU SVM range. Assumes and + * This function unmap pages associated with a GPU SVM pages struct. Assumes and * asserts correct locking is in place when called. */ -static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, - struct drm_gpusvm_range *range, - unsigned long npages) +static void __drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + unsigned long npages) { - unsigned long i, j; - struct drm_pagemap *dpagemap = range->dpagemap; + struct drm_pagemap *dpagemap = svm_pages->dpagemap; struct device *dev = gpusvm->drm->dev; + unsigned long i, j; lockdep_assert_held(&gpusvm->notifier_lock); - if (range->flags.has_dma_mapping) { - struct drm_gpusvm_range_flags flags = { - .__flags = range->flags.__flags, + if (svm_pages->flags.has_dma_mapping) { + struct drm_gpusvm_pages_flags flags = { + .__flags = svm_pages->flags.__flags, }; for (i = 0, j = 0; i < npages; j++) { - struct drm_pagemap_addr *addr = &range->dma_addr[j]; + struct drm_pagemap_addr *addr = &svm_pages->dma_addr[j]; if (addr->proto == DRM_INTERCONNECT_SYSTEM) dma_unmap_page(dev, @@ -991,31 +1034,52 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */ flags.has_devmem_pages = false; flags.has_dma_mapping = false; - WRITE_ONCE(range->flags.__flags, flags.__flags); + WRITE_ONCE(svm_pages->flags.__flags, flags.__flags); - range->dpagemap = NULL; + svm_pages->dpagemap = NULL; } } /** - * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range + * __drm_gpusvm_free_pages() - Free dma array associated with GPU SVM pages * @gpusvm: Pointer to the GPU SVM structure - * @range: Pointer to the GPU SVM range structure + * @svm_pages: Pointer to the GPU SVM pages structure * * This function frees the dma address array associated with a GPU SVM range. */ -static void drm_gpusvm_range_free_pages(struct drm_gpusvm *gpusvm, - struct drm_gpusvm_range *range) +static void __drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages) { lockdep_assert_held(&gpusvm->notifier_lock); - if (range->dma_addr) { - kvfree(range->dma_addr); - range->dma_addr = NULL; + if (svm_pages->dma_addr) { + kvfree(svm_pages->dma_addr); + svm_pages->dma_addr = NULL; } } /** + * drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages + * struct + * @gpusvm: Pointer to the GPU SVM structure + * @svm_pages: Pointer to the GPU SVM pages structure + * @npages: Number of mapped pages + * + * This function unmaps and frees the dma address array associated with a GPU + * SVM pages struct. + */ +void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + unsigned long npages) +{ + drm_gpusvm_notifier_lock(gpusvm); + __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages); + __drm_gpusvm_free_pages(gpusvm, svm_pages); + drm_gpusvm_notifier_unlock(gpusvm); +} +EXPORT_SYMBOL_GPL(drm_gpusvm_free_pages); + +/** * drm_gpusvm_range_remove() - Remove GPU SVM range * @gpusvm: Pointer to the GPU SVM structure * @range: Pointer to the GPU SVM range to be removed @@ -1040,8 +1104,8 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm, return; drm_gpusvm_notifier_lock(gpusvm); - __drm_gpusvm_range_unmap_pages(gpusvm, range, npages); - drm_gpusvm_range_free_pages(gpusvm, range); + __drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages); + __drm_gpusvm_free_pages(gpusvm, &range->pages); __drm_gpusvm_range_remove(notifier, range); drm_gpusvm_notifier_unlock(gpusvm); @@ -1107,6 +1171,28 @@ void drm_gpusvm_range_put(struct drm_gpusvm_range *range) EXPORT_SYMBOL_GPL(drm_gpusvm_range_put); /** + * drm_gpusvm_pages_valid() - GPU SVM range pages valid + * @gpusvm: Pointer to the GPU SVM structure + * @svm_pages: Pointer to the GPU SVM pages structure + * + * This function determines if a GPU SVM range pages are valid. Expected be + * called holding gpusvm->notifier_lock and as the last step before committing a + * GPU binding. This is akin to a notifier seqno check in the HMM documentation + * but due to wider notifiers (i.e., notifiers which span multiple ranges) this + * function is required for finer grained checking (i.e., per range) if pages + * are valid. + * + * Return: True if GPU SVM range has valid pages, False otherwise + */ +static bool drm_gpusvm_pages_valid(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages) +{ + lockdep_assert_held(&gpusvm->notifier_lock); + + return svm_pages->flags.has_devmem_pages || svm_pages->flags.has_dma_mapping; +} + +/** * drm_gpusvm_range_pages_valid() - GPU SVM range pages valid * @gpusvm: Pointer to the GPU SVM structure * @range: Pointer to the GPU SVM range structure @@ -1123,9 +1209,7 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_put); bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range) { - lockdep_assert_held(&gpusvm->notifier_lock); - - return range->flags.has_devmem_pages || range->flags.has_dma_mapping; + return drm_gpusvm_pages_valid(gpusvm, &range->pages); } EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid); @@ -1139,66 +1223,71 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid); * * Return: True if GPU SVM range has valid pages, False otherwise */ -static bool -drm_gpusvm_range_pages_valid_unlocked(struct drm_gpusvm *gpusvm, - struct drm_gpusvm_range *range) +static bool drm_gpusvm_pages_valid_unlocked(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages) { bool pages_valid; - if (!range->dma_addr) + if (!svm_pages->dma_addr) return false; drm_gpusvm_notifier_lock(gpusvm); - pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range); + pages_valid = drm_gpusvm_pages_valid(gpusvm, svm_pages); if (!pages_valid) - drm_gpusvm_range_free_pages(gpusvm, range); + __drm_gpusvm_free_pages(gpusvm, svm_pages); drm_gpusvm_notifier_unlock(gpusvm); return pages_valid; } /** - * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range + * drm_gpusvm_get_pages() - Get pages and populate GPU SVM pages struct * @gpusvm: Pointer to the GPU SVM structure - * @range: Pointer to the GPU SVM range structure + * @svm_pages: The SVM pages to populate. This will contain the dma-addresses + * @mm: The mm corresponding to the CPU range + * @notifier: The corresponding notifier for the given CPU range + * @pages_start: Start CPU address for the pages + * @pages_end: End CPU address for the pages (exclusive) * @ctx: GPU SVM context * - * This function gets pages for a GPU SVM range and ensures they are mapped for - * DMA access. + * This function gets and maps pages for CPU range and ensures they are + * mapped for DMA access. * * Return: 0 on success, negative error code on failure. */ -int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm, - struct drm_gpusvm_range *range, - const struct drm_gpusvm_ctx *ctx) +int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + struct mm_struct *mm, + struct mmu_interval_notifier *notifier, + unsigned long pages_start, unsigned long pages_end, + const struct drm_gpusvm_ctx *ctx) { - struct mmu_interval_notifier *notifier = &range->notifier->notifier; struct hmm_range hmm_range = { .default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 : HMM_PFN_REQ_WRITE), .notifier = notifier, - .start = drm_gpusvm_range_start(range), - .end = drm_gpusvm_range_end(range), + .start = pages_start, + .end = pages_end, .dev_private_owner = gpusvm->device_private_page_owner, }; - struct mm_struct *mm = gpusvm->mm; void *zdd; unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); unsigned long i, j; - unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), - drm_gpusvm_range_end(range)); + unsigned long npages = npages_in_range(pages_start, pages_end); unsigned long num_dma_mapped; unsigned int order = 0; unsigned long *pfns; int err = 0; struct dev_pagemap *pagemap; struct drm_pagemap *dpagemap; - struct drm_gpusvm_range_flags flags; + struct drm_gpusvm_pages_flags flags; + enum dma_data_direction dma_dir = ctx->read_only ? DMA_TO_DEVICE : + DMA_BIDIRECTIONAL; retry: hmm_range.notifier_seq = mmu_interval_read_begin(notifier); - if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range)) + if (drm_gpusvm_pages_valid_unlocked(gpusvm, svm_pages)) goto set_seqno; pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); @@ -1238,7 +1327,7 @@ map_pages: */ drm_gpusvm_notifier_lock(gpusvm); - flags.__flags = range->flags.__flags; + flags.__flags = svm_pages->flags.__flags; if (flags.unmapped) { drm_gpusvm_notifier_unlock(gpusvm); err = -EFAULT; @@ -1251,13 +1340,12 @@ map_pages: goto retry; } - if (!range->dma_addr) { + if (!svm_pages->dma_addr) { /* Unlock and restart mapping to allocate memory. */ drm_gpusvm_notifier_unlock(gpusvm); - range->dma_addr = kvmalloc_array(npages, - sizeof(*range->dma_addr), - GFP_KERNEL); - if (!range->dma_addr) { + svm_pages->dma_addr = + kvmalloc_array(npages, sizeof(*svm_pages->dma_addr), GFP_KERNEL); + if (!svm_pages->dma_addr) { err = -ENOMEM; goto err_free; } @@ -1270,7 +1358,7 @@ map_pages: for (i = 0, j = 0; i < npages; ++j) { struct page *page = hmm_pfn_to_page(pfns[i]); - order = hmm_pfn_to_map_order(pfns[i]); + order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages); if (is_device_private_page(page) || is_device_coherent_page(page)) { if (zdd != page->zone_device_data && i > 0) { @@ -1296,13 +1384,13 @@ map_pages: goto err_unmap; } } - range->dma_addr[j] = + svm_pages->dma_addr[j] = dpagemap->ops->device_map(dpagemap, gpusvm->drm->dev, page, order, - DMA_BIDIRECTIONAL); + dma_dir); if (dma_mapping_error(gpusvm->drm->dev, - range->dma_addr[j].addr)) { + svm_pages->dma_addr[j].addr)) { err = -EFAULT; goto err_unmap; } @@ -1322,15 +1410,15 @@ map_pages: addr = dma_map_page(gpusvm->drm->dev, page, 0, PAGE_SIZE << order, - DMA_BIDIRECTIONAL); + dma_dir); if (dma_mapping_error(gpusvm->drm->dev, addr)) { err = -EFAULT; goto err_unmap; } - range->dma_addr[j] = drm_pagemap_addr_encode + svm_pages->dma_addr[j] = drm_pagemap_addr_encode (addr, DRM_INTERCONNECT_SYSTEM, order, - DMA_BIDIRECTIONAL); + dma_dir); } i += 1 << order; num_dma_mapped = i; @@ -1339,21 +1427,21 @@ map_pages: if (pagemap) { flags.has_devmem_pages = true; - range->dpagemap = dpagemap; + svm_pages->dpagemap = dpagemap; } /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */ - WRITE_ONCE(range->flags.__flags, flags.__flags); + WRITE_ONCE(svm_pages->flags.__flags, flags.__flags); drm_gpusvm_notifier_unlock(gpusvm); kvfree(pfns); set_seqno: - range->notifier_seq = hmm_range.notifier_seq; + svm_pages->notifier_seq = hmm_range.notifier_seq; return 0; err_unmap: - __drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped); + __drm_gpusvm_unmap_pages(gpusvm, svm_pages, num_dma_mapped); drm_gpusvm_notifier_unlock(gpusvm); err_free: kvfree(pfns); @@ -1361,11 +1449,62 @@ err_free: goto retry; return err; } +EXPORT_SYMBOL_GPL(drm_gpusvm_get_pages); + +/** + * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range + * @gpusvm: Pointer to the GPU SVM structure + * @range: Pointer to the GPU SVM range structure + * @ctx: GPU SVM context + * + * This function gets pages for a GPU SVM range and ensures they are mapped for + * DMA access. + * + * Return: 0 on success, negative error code on failure. + */ +int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_range *range, + const struct drm_gpusvm_ctx *ctx) +{ + return drm_gpusvm_get_pages(gpusvm, &range->pages, gpusvm->mm, + &range->notifier->notifier, + drm_gpusvm_range_start(range), + drm_gpusvm_range_end(range), ctx); +} EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages); /** + * drm_gpusvm_unmap_pages() - Unmap GPU svm pages + * @gpusvm: Pointer to the GPU SVM structure + * @svm_pages: Pointer to the GPU SVM pages structure + * @npages: Number of pages in @svm_pages. + * @ctx: GPU SVM context + * + * This function unmaps pages associated with a GPU SVM pages struct. If + * @in_notifier is set, it is assumed that gpusvm->notifier_lock is held in + * write mode; if it is clear, it acquires gpusvm->notifier_lock in read mode. + * Must be called in the invalidate() callback of the corresponding notifier for + * IOMMU security model. + */ +void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + unsigned long npages, + const struct drm_gpusvm_ctx *ctx) +{ + if (ctx->in_notifier) + lockdep_assert_held_write(&gpusvm->notifier_lock); + else + drm_gpusvm_notifier_lock(gpusvm); + + __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages); + + if (!ctx->in_notifier) + drm_gpusvm_notifier_unlock(gpusvm); +} +EXPORT_SYMBOL_GPL(drm_gpusvm_unmap_pages); + +/** * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range - * drm_gpusvm_range_evict() - Evict GPU SVM range * @gpusvm: Pointer to the GPU SVM structure * @range: Pointer to the GPU SVM range structure * @ctx: GPU SVM context @@ -1383,15 +1522,7 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), drm_gpusvm_range_end(range)); - if (ctx->in_notifier) - lockdep_assert_held_write(&gpusvm->notifier_lock); - else - drm_gpusvm_notifier_lock(gpusvm); - - __drm_gpusvm_range_unmap_pages(gpusvm, range, npages); - - if (!ctx->in_notifier) - drm_gpusvm_notifier_unlock(gpusvm); + return drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages, ctx); } EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages); @@ -1489,10 +1620,10 @@ void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range, { lockdep_assert_held_write(&range->gpusvm->notifier_lock); - range->flags.unmapped = true; + range->pages.flags.unmapped = true; if (drm_gpusvm_range_start(range) < mmu_range->start || drm_gpusvm_range_end(range) > mmu_range->end) - range->flags.partial_unmap = true; + range->pages.flags.partial_unmap = true; } EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped); diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index a52e95555549..af63f4d00315 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -2533,8 +2533,6 @@ static const struct drm_gpuvm_ops lock_ops = { * * The expected usage is:: * - * .. code-block:: c - * * vm_bind { * struct drm_exec exec; * diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 1cf394369127..c0feca58511d 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -726,8 +726,8 @@ void oaktrail_hdmi_teardown(struct drm_device *dev) if (hdmi_dev) { pdev = hdmi_dev->dev; - pci_set_drvdata(pdev, NULL); oaktrail_hdmi_i2c_exit(pdev); + pci_set_drvdata(pdev, NULL); iounmap(hdmi_dev->regs); kfree(hdmi_dev); pci_dev_put(pdev); diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c index 4a15695fa933..1726a3fadff8 100644 --- a/drivers/gpu/drm/gud/gud_connector.c +++ b/drivers/gpu/drm/gud/gud_connector.c @@ -561,11 +561,11 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn continue; /* not a DRM property */ property = gud_connector_property_lookup(connector, prop); - if (WARN_ON(IS_ERR(property))) + if (drm_WARN_ON(drm, IS_ERR(property))) continue; state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state); - if (WARN_ON(IS_ERR(state_val))) + if (drm_WARN_ON(drm, IS_ERR(state_val))) continue; *state_val = val; @@ -593,7 +593,7 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state, unsigned int *state_val; state_val = gud_connector_tv_state_val(prop, &connector_state->tv); - if (WARN_ON_ONCE(IS_ERR(state_val))) + if (drm_WARN_ON_ONCE(connector_state->connector->dev, IS_ERR(state_val))) return PTR_ERR(state_val); val = *state_val; @@ -667,7 +667,7 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index, return ret; } - if (WARN_ON(connector->index != index)) + if (drm_WARN_ON(drm, connector->index != index)) return -EINVAL; if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS) diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index 54d9aa9998e5..3a208e956dff 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -61,7 +61,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format size_t len; void *buf; - WARN_ON_ONCE(format->char_per_block[0] != 1); + drm_WARN_ON_ONCE(fb->dev, format->char_per_block[0] != 1); /* Start on a byte boundary */ rect->x1 = ALIGN_DOWN(rect->x1, block_width); @@ -138,7 +138,7 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7); break; default: - WARN_ON_ONCE(1); + drm_WARN_ON_ONCE(fb->dev, 1); return len; } @@ -527,7 +527,7 @@ int gud_plane_atomic_check(struct drm_plane *plane, drm_connector_list_iter_end(&conn_iter); } - if (WARN_ON_ONCE(!connector_state)) + if (drm_WARN_ON_ONCE(plane->dev, !connector_state)) return -ENOENT; len = struct_size(req, properties, @@ -539,7 +539,7 @@ int gud_plane_atomic_check(struct drm_plane *plane, gud_from_display_mode(&req->mode, mode); req->format = gud_from_fourcc(format->format); - if (WARN_ON_ONCE(!req->format)) { + if (drm_WARN_ON_ONCE(plane->dev, !req->format)) { ret = -EINVAL; goto out; } @@ -561,7 +561,7 @@ int gud_plane_atomic_check(struct drm_plane *plane, val = new_plane_state->rotation; break; default: - WARN_ON_ONCE(1); + drm_WARN_ON_ONCE(plane->dev, 1); ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 853543443072..78a45a6681df 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -32,6 +32,7 @@ i915-y += \ i915_scatterlist.o \ i915_switcheroo.o \ i915_sysfs.o \ + i915_timer_util.o \ i915_utils.o \ intel_clock_gating.o \ intel_cpu_info.o \ @@ -280,6 +281,7 @@ i915-y += \ display/intel_modeset_setup.o \ display/intel_modeset_verify.o \ display/intel_overlay.o \ + display/intel_panic.o \ display/intel_pch.o \ display/intel_pch_display.o \ display/intel_pch_refclk.o \ @@ -298,6 +300,7 @@ i915-y += \ display/skl_scaler.o \ display/skl_universal_plane.o \ display/skl_watermark.o \ + display/vlv_clock.o \ display/vlv_sideband.o i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 3eb96d8abba8..407deb5dfb57 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -15,7 +15,6 @@ #include "i9xx_plane.h" #include "i9xx_plane_regs.h" #include "intel_atomic.h" -#include "intel_bo.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_regs.h" @@ -23,6 +22,7 @@ #include "intel_fb.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" +#include "intel_panic.h" #include "intel_plane.h" #include "intel_sprite.h" @@ -1178,7 +1178,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, drm_WARN_ON(display->drm, pipe != crtc->pipe); - intel_fb = intel_bo_alloc_framebuffer(); + intel_fb = intel_framebuffer_alloc(); if (!intel_fb) { drm_dbg_kms(display->drm, "failed to alloc fb\n"); return; diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c index ed7a7ed486b5..749119cc0b28 100644 --- a/drivers/gpu/drm/i915/display/intel_alpm.c +++ b/drivers/gpu/drm/i915/display/intel_alpm.c @@ -58,43 +58,32 @@ static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state) 1000 / 1000; } -static int get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state, - int *min, int *max) +static void get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state, + int *min, int *max) { if (crtc_state->port_clock < 540000) { *min = 65 * LFPS_CYCLE_COUNT; *max = 75 * LFPS_CYCLE_COUNT; - } else if (crtc_state->port_clock <= 810000) { + } else { *min = 140; *max = 800; - } else { - *min = *max = -1; - return -1; } - - return 0; } static int get_lfps_cycle_time(const struct intel_crtc_state *crtc_state) { - int tlfps_cycle_min, tlfps_cycle_max, ret; + int tlfps_cycle_min, tlfps_cycle_max; - ret = get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min, - &tlfps_cycle_max); - if (ret) - return ret; + get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min, + &tlfps_cycle_max); return tlfps_cycle_min + (tlfps_cycle_max - tlfps_cycle_min) / 2; } static int get_lfps_half_cycle_clocks(const struct intel_crtc_state *crtc_state) { - int lfps_cycle_time = get_lfps_cycle_time(crtc_state); - - if (lfps_cycle_time < 0) - return -1; - - return lfps_cycle_time * crtc_state->port_clock / 1000 / 1000 / (2 * LFPS_CYCLE_COUNT); + return get_lfps_cycle_time(crtc_state) * crtc_state->port_clock / 1000 / + 1000 / (2 * LFPS_CYCLE_COUNT); } /* @@ -146,8 +135,6 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp, silence_period = get_silence_period_symbols(crtc_state); lfps_half_cycle = get_lfps_half_cycle_clocks(crtc_state); - if (lfps_half_cycle < 0) - return false; if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK || silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK || diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c index d29c1508ccb9..6ae1374d5c2b 100644 --- a/drivers/gpu/drm/i915/display/intel_bo.c +++ b/drivers/gpu/drm/i915/display/intel_bo.c @@ -59,18 +59,3 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) { i915_debugfs_describe_obj(m, to_intel_bo(obj)); } - -struct intel_framebuffer *intel_bo_alloc_framebuffer(void) -{ - return i915_gem_object_alloc_framebuffer(); -} - -int intel_bo_panic_setup(struct drm_scanout_buffer *sb) -{ - return i915_gem_object_panic_setup(sb); -} - -void intel_bo_panic_finish(struct intel_framebuffer *fb) -{ - return i915_gem_object_panic_finish(fb); -} diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h index 97087a64d23b..48d87019e48a 100644 --- a/drivers/gpu/drm/i915/display/intel_bo.h +++ b/drivers/gpu/drm/i915/display/intel_bo.h @@ -25,8 +25,5 @@ struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, struct intel_frontbuffer *front); void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj); -struct intel_framebuffer *intel_bo_alloc_framebuffer(void); -int intel_bo_panic_setup(struct drm_scanout_buffer *sb); -void intel_bo_panic_finish(struct intel_framebuffer *fb); #endif /* __INTEL_BO__ */ diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 9725eebe5706..b54b1006aeb0 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -49,6 +49,7 @@ #include "intel_vdsc.h" #include "skl_watermark.h" #include "skl_watermark_regs.h" +#include "vlv_clock.h" #include "vlv_dsi.h" #include "vlv_sideband.h" @@ -563,8 +564,7 @@ static void hsw_get_cdclk(struct intel_display *display, static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? + int freq_320 = (vlv_clock_get_hpll_vco(display->drm) << 1) % 320000 != 0 ? 333333 : 320000; /* @@ -584,8 +584,6 @@ static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk) static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (display->platform.valleyview) { if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ return 2; @@ -599,7 +597,7 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk) * hardware has shown that we just need to write the desired * CCK divider into the Punit register. */ - return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; + return DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1, cdclk) - 1; } } @@ -608,17 +606,12 @@ static void vlv_get_cdclk(struct intel_display *display, { u32 val; - vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); - - cdclk_config->vco = vlv_get_hpll_vco(display->drm); - cdclk_config->cdclk = vlv_get_cck_clock(display->drm, "cdclk", - CCK_DISPLAY_CLOCK_CONTROL, - cdclk_config->vco); + cdclk_config->vco = vlv_clock_get_hpll_vco(display->drm); + cdclk_config->cdclk = vlv_clock_get_cdclk(display->drm); + vlv_punit_get(display->drm); val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM); - - vlv_iosf_sb_put(display->drm, - BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); + vlv_punit_put(display->drm); if (display->platform.valleyview) cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >> @@ -630,7 +623,6 @@ static void vlv_get_cdclk(struct intel_display *display, static void vlv_program_pfi_credits(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); unsigned int credits, default_credits; if (display->platform.cherryview) @@ -638,7 +630,7 @@ static void vlv_program_pfi_credits(struct intel_display *display) else default_credits = PFI_CREDIT(8); - if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) { + if (display->cdclk.hw.cdclk >= vlv_clock_get_czclk(display->drm)) { /* CHV suggested value is 31 or 63 */ if (display->platform.cherryview) credits = PFI_CREDIT_63; @@ -670,7 +662,6 @@ static void vlv_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; @@ -715,7 +706,7 @@ static void vlv_set_cdclk(struct intel_display *display, if (cdclk == 400000) { u32 divider; - divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, + divider = DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1, cdclk) - 1; /* adjust cdclk divider */ @@ -3565,13 +3556,6 @@ static int pch_rawclk(struct intel_display *display) return (intel_de_read(display, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; } -static int vlv_hrawclk(struct intel_display *display) -{ - /* RAWCLK_FREQ_VLV register updated from power well code */ - return vlv_get_cck_clock_hpll(display->drm, "hrawclk", - CCK_DISPLAY_REF_CLOCK_CONTROL); -} - static int i9xx_hrawclk(struct intel_display *display) { struct drm_i915_private *i915 = to_i915(display->drm); @@ -3605,7 +3589,7 @@ u32 intel_read_rawclk(struct intel_display *display) else if (HAS_PCH_SPLIT(display)) freq = pch_rawclk(display); else if (display->platform.valleyview || display->platform.cherryview) - freq = vlv_hrawclk(display); + freq = vlv_clock_get_hrawclk(display->drm); else if (DISPLAY_VER(display) >= 3) freq = i9xx_hrawclk(display); else diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index d4d181f9dca5..c47c84935871 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -662,7 +662,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb, cntl = plane_state->ctl | i9xx_cursor_ctl_crtc(crtc_state); - if (width != height) + if (DISPLAY_VER(display) < 14 && width != height) fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1); base = plane_state->surf; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 46017091bb0b..c09aa759f4d4 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -26,6 +26,7 @@ */ #include <linux/iopoll.h> +#include <linux/seq_buf.h> #include <linux/string_helpers.h> #include <drm/display/drm_dp_helper.h> @@ -596,8 +597,9 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, enum transcoder master; master = crtc_state->mst_master_transcoder; - drm_WARN_ON(display->drm, - master == INVALID_TRANSCODER); + if (drm_WARN_ON(display->drm, + master == INVALID_TRANSCODER)) + master = TRANSCODER_A; temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master); } } else { @@ -5067,11 +5069,45 @@ static bool port_in_use(struct intel_display *display, enum port port) return false; } +static const char *intel_ddi_encoder_name(struct intel_display *display, + enum port port, enum phy phy, + struct seq_buf *s) +{ + if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) { + seq_buf_printf(s, "DDI %c/PHY %c", + port_name(port - PORT_D_XELPD + PORT_D), + phy_name(phy)); + } else if (DISPLAY_VER(display) >= 12) { + enum tc_port tc_port = intel_port_to_tc(display, port); + + seq_buf_printf(s, "DDI %s%c/PHY %s%c", + port >= PORT_TC1 ? "TC" : "", + port >= PORT_TC1 ? port_tc_name(port) : port_name(port), + tc_port != TC_PORT_NONE ? "TC" : "", + tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); + } else if (DISPLAY_VER(display) >= 11) { + enum tc_port tc_port = intel_port_to_tc(display, port); + + seq_buf_printf(s, "DDI %c%s/PHY %s%c", + port_name(port), + port >= PORT_C ? " (TC)" : "", + tc_port != TC_PORT_NONE ? "TC" : "", + tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); + } else { + seq_buf_printf(s, "DDI %c/PHY %c", port_name(port), phy_name(phy)); + } + + drm_WARN_ON(display->drm, seq_buf_has_overflowed(s)); + + return seq_buf_str(s); +} + void intel_ddi_init(struct intel_display *display, const struct intel_bios_encoder_data *devdata) { struct intel_digital_port *dig_port; struct intel_encoder *encoder; + DECLARE_SEQ_BUF(encoder_name, 20); bool init_hdmi, init_dp; enum port port; enum phy phy; @@ -5156,37 +5192,9 @@ void intel_ddi_init(struct intel_display *display, encoder = &dig_port->base; encoder->devdata = devdata; - if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) { - drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS, - "DDI %c/PHY %c", - port_name(port - PORT_D_XELPD + PORT_D), - phy_name(phy)); - } else if (DISPLAY_VER(display) >= 12) { - enum tc_port tc_port = intel_port_to_tc(display, port); - - drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS, - "DDI %s%c/PHY %s%c", - port >= PORT_TC1 ? "TC" : "", - port >= PORT_TC1 ? port_tc_name(port) : port_name(port), - tc_port != TC_PORT_NONE ? "TC" : "", - tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); - } else if (DISPLAY_VER(display) >= 11) { - enum tc_port tc_port = intel_port_to_tc(display, port); - - drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS, - "DDI %c%s/PHY %s%c", - port_name(port), - port >= PORT_C ? " (TC)" : "", - tc_port != TC_PORT_NONE ? "TC" : "", - tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); - } else { - drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS, - "DDI %c/PHY %c", port_name(port), phy_name(phy)); - } + drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs, + DRM_MODE_ENCODER_TMDS, "%s", + intel_ddi_encoder_name(display, port, phy, &encoder_name)); intel_encoder_link_check_init(encoder, intel_ddi_link_check); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index c1a3a95c65f0..18b9baa96241 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -129,11 +129,9 @@ #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" -#include "vlv_dpio_phy_regs.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" #include "vlv_dsi_regs.h" -#include "vlv_sideband.h" static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); @@ -141,65 +139,6 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipe_misc(struct intel_dsb *dsb, const struct intel_crtc_state *crtc_state); -/* returns HPLL frequency in kHz */ -int vlv_get_hpll_vco(struct drm_device *drm) -{ - int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; - - /* Obtain SKU information */ - hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) & - CCK_FUSE_HPLL_FREQ_MASK; - - return vco_freq[hpll_freq] * 1000; -} - -int vlv_get_cck_clock(struct drm_device *drm, - const char *name, u32 reg, int ref_freq) -{ - u32 val; - int divider; - - val = vlv_cck_read(drm, reg); - divider = val & CCK_FREQUENCY_VALUES; - - drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) != - (divider << CCK_FREQUENCY_STATUS_SHIFT), - "%s change in progress\n", name); - - return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); -} - -int vlv_get_cck_clock_hpll(struct drm_device *drm, - const char *name, u32 reg) -{ - struct drm_i915_private *dev_priv = to_i915(drm); - int hpll; - - vlv_cck_get(drm); - - if (dev_priv->hpll_freq == 0) - dev_priv->hpll_freq = vlv_get_hpll_vco(drm); - - hpll = vlv_get_cck_clock(drm, name, reg, dev_priv->hpll_freq); - - vlv_cck_put(drm); - - return hpll; -} - -void intel_update_czclk(struct intel_display *display) -{ - struct drm_i915_private *dev_priv = to_i915(display->drm); - - if (!display->platform.valleyview && !display->platform.cherryview) - return; - - dev_priv->czclk_freq = vlv_get_cck_clock_hpll(display->drm, "czclk", - CCK_CZ_CLOCK_CONTROL); - - drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq); -} - static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) { return (crtc_state->active_planes & @@ -3952,6 +3891,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, intel_joiner_get_config(pipe_config); intel_dsc_get_config(pipe_config); + /* intel_vrr_get_config() depends on .framestart_delay */ + if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { + tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder)); + + pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; + } else { + /* no idea if this is correct */ + pipe_config->framestart_delay = 1; + } + + /* + * intel_vrr_get_config() depends on TRANS_SET_CONTEXT_LATENCY + * readout done by intel_get_transcoder_timings(). + */ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || DISPLAY_VER(display) >= 11) intel_get_transcoder_timings(crtc, pipe_config); @@ -4003,15 +3956,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = 1; } - if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { - tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder)); - - pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; - } else { - /* no idea if this is correct */ - pipe_config->framestart_delay = 1; - } - out: intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains); @@ -7271,6 +7215,9 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state, intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit, state, crtc); + intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit, + new_crtc_state); + if (new_crtc_state->use_dsb) intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 37e2ab301a80..9a9a44b61f7f 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -435,11 +435,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state); void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state); void i830_enable_pipe(struct intel_display *display, enum pipe pipe); void i830_disable_pipe(struct intel_display *display, enum pipe pipe); -int vlv_get_hpll_vco(struct drm_device *drm); -int vlv_get_cck_clock(struct drm_device *drm, - const char *name, u32 reg, int ref_freq); -int vlv_get_cck_clock_hpll(struct drm_device *drm, - const char *name, u32 reg); bool intel_has_pending_fb_unpin(struct intel_display *display); void intel_encoder_destroy(struct drm_encoder *encoder); struct drm_display_mode * @@ -528,7 +523,6 @@ void intel_init_display_hooks(struct intel_display *display); void intel_setup_outputs(struct intel_display *display); int intel_initial_commit(struct intel_display *display); void intel_panel_sanitize_ssc(struct intel_display *display); -void intel_update_czclk(struct intel_display *display); enum drm_mode_status intel_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode); int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 8c226406c5cd..df4da52cbdb3 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -475,7 +475,21 @@ struct intel_display { struct work_struct vblank_notify_work; - u32 de_irq_mask[I915_MAX_PIPES]; + /* + * Cached value of VLV/CHV IMR to avoid reads in updating the + * bitfield. + */ + u32 vlv_imr_mask; + /* + * Cached value of gen 5-7 DE IMR to avoid reads in updating the + * bitfield. + */ + u32 ilk_de_imr_mask; + /* + * Cached value of BDW+ DE pipe IMR to avoid reads in updating + * the bitfield. + */ + u32 de_pipe_imr_mask[I915_MAX_PIPES]; u32 pipestat_irq_mask[I915_MAX_PIPES]; } irq; @@ -568,6 +582,11 @@ struct intel_display { } state; struct { + unsigned int hpll_freq; + unsigned int czclk_freq; + } vlv_clock; + + struct { /* ordered wq for modesets */ struct workqueue_struct *modeset; diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index 65f0efc35bb7..a002bc6ce7b0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -1944,6 +1944,11 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq); } +bool intel_display_device_present(struct intel_display *display) +{ + return display && HAS_DISPLAY(display); +} + /* * Assuming the device has display hardware, should it be enabled? * diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 6e87b763fe7c..1f091fbcd0ec 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -155,7 +155,7 @@ struct intel_display_platforms { #define HAS_DISPLAY(__display) (DISPLAY_RUNTIME_INFO(__display)->pipe_mask != 0) #define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc) #define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20) -#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell) +#define HAS_DOUBLE_BUFFERED_M_N(__display) (IS_DISPLAY_VER((__display), 9, 14) || (__display)->platform.broadwell) #define HAS_DOUBLE_BUFFERED_LUT(__display) (DISPLAY_VER(__display) >= 30) #define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4) #define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14) @@ -306,6 +306,7 @@ struct intel_display_device_info { } color; }; +bool intel_display_device_present(struct intel_display *display); bool intel_display_device_enabled(struct intel_display *display); struct intel_display *intel_display_device_probe(struct pci_dev *pdev); void intel_display_device_remove(struct intel_display *display); diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index cf1c14412abe..f84a0b26b7a6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -482,7 +482,6 @@ int intel_display_driver_probe_nogem(struct intel_display *display) intel_dpll_init(display); intel_fdi_pll_freq_update(display); - intel_update_czclk(display); intel_display_driver_init_hw(display); intel_dpll_update_ref_clks(display); diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index 123e054affbe..c6f367e6159e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -140,14 +140,14 @@ void ilk_update_display_irq(struct intel_display *display, lockdep_assert_held(&display->irq.lock); drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); - new_val = dev_priv->irq_mask; + new_val = display->irq.ilk_de_imr_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != dev_priv->irq_mask && + if (new_val != display->irq.ilk_de_imr_mask && !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) { - dev_priv->irq_mask = new_val; - intel_de_write(display, DEIMR, dev_priv->irq_mask); + display->irq.ilk_de_imr_mask = new_val; + intel_de_write(display, DEIMR, display->irq.ilk_de_imr_mask); intel_de_posting_read(display, DEIMR); } } @@ -215,13 +215,13 @@ static void bdw_update_pipe_irq(struct intel_display *display, if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) return; - new_val = display->irq.de_irq_mask[pipe]; + new_val = display->irq.de_pipe_imr_mask[pipe]; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != display->irq.de_irq_mask[pipe]) { - display->irq.de_irq_mask[pipe] = new_val; - intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]); + if (new_val != display->irq.de_pipe_imr_mask[pipe]) { + display->irq.de_pipe_imr_mask[pipe] = new_val; + intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_pipe_imr_mask[pipe]); intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe)); } } @@ -1865,8 +1865,6 @@ void vlv_display_error_irq_handler(struct intel_display *display, static void _vlv_display_irq_reset(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (display->platform.cherryview) intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else @@ -1881,7 +1879,7 @@ static void _vlv_display_irq_reset(struct intel_display *display) i9xx_pipestat_irq_reset(display); intel_display_irq_regs_reset(display, VLV_IRQ_REGS); - dev_priv->irq_mask = ~0u; + display->irq.vlv_imr_mask = ~0u; } void vlv_display_irq_reset(struct intel_display *display) @@ -1939,7 +1937,6 @@ static u32 vlv_error_mask(void) static void _vlv_display_irq_postinstall(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 pipestat_mask; u32 enable_mask; enum pipe pipe; @@ -1973,11 +1970,11 @@ static void _vlv_display_irq_postinstall(struct intel_display *display) enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | I915_LPE_PIPE_C_INTERRUPT; - drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u); + drm_WARN_ON(display->drm, display->irq.vlv_imr_mask != ~0u); - dev_priv->irq_mask = ~enable_mask; + display->irq.vlv_imr_mask = ~enable_mask; - intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); + intel_display_irq_regs_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask); } void vlv_display_irq_postinstall(struct intel_display *display) @@ -1988,7 +1985,7 @@ void vlv_display_irq_postinstall(struct intel_display *display) spin_unlock_irq(&display->irq.lock); } -void ibx_display_irq_reset(struct intel_display *display) +static void ibx_display_irq_reset(struct intel_display *display) { if (HAS_PCH_NOP(display)) return; @@ -1999,6 +1996,24 @@ void ibx_display_irq_reset(struct intel_display *display) intel_de_write(display, SERR_INT, 0xffffffff); } +void ilk_display_irq_reset(struct intel_display *display) +{ + struct intel_uncore *uncore = to_intel_uncore(display->drm); + + gen2_irq_reset(uncore, DE_IRQ_REGS); + display->irq.ilk_de_imr_mask = ~0u; + + if (DISPLAY_VER(display) == 7) + intel_de_write(display, GEN7_ERR_INT, 0xffffffff); + + if (display->platform.haswell) { + intel_de_write(display, EDP_PSR_IMR, 0xffffffff); + intel_de_write(display, EDP_PSR_IIR, 0xffffffff); + } + + ibx_display_irq_reset(display); +} + void gen8_display_irq_reset(struct intel_display *display) { enum pipe pipe; @@ -2088,8 +2103,8 @@ void gen8_irq_power_well_post_enable(struct intel_display *display, for_each_pipe_masked(display, pipe, pipe_mask) intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), - display->irq.de_irq_mask[pipe], - ~display->irq.de_irq_mask[pipe] | extra_ier); + display->irq.de_pipe_imr_mask[pipe], + ~display->irq.de_pipe_imr_mask[pipe] | extra_ier); spin_unlock_irq(&display->irq.lock); } @@ -2183,8 +2198,6 @@ out: void ilk_de_irq_postinstall(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - u32 display_mask, extra_mask; if (DISPLAY_VER(display) >= 7) { @@ -2216,11 +2229,11 @@ void ilk_de_irq_postinstall(struct intel_display *display) if (display->platform.ironlake && display->platform.mobile) extra_mask |= DE_PCU_EVENT; - i915->irq_mask = ~display_mask; + display->irq.ilk_de_imr_mask = ~display_mask; ibx_irq_postinstall(display); - intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask, + intel_display_irq_regs_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask, display_mask | extra_mask); } @@ -2305,12 +2318,12 @@ void gen8_de_irq_postinstall(struct intel_display *display) } for_each_pipe(display, pipe) { - display->irq.de_irq_mask[pipe] = ~de_pipe_masked; + display->irq.de_pipe_imr_mask[pipe] = ~de_pipe_masked; if (intel_display_power_is_enabled(display, POWER_DOMAIN_PIPE(pipe))) intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), - display->irq.de_irq_mask[pipe], + display->irq.de_pipe_imr_mask[pipe], de_pipe_enables); } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h index c66db3851da4..cee120347064 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.h +++ b/drivers/gpu/drm/i915/display/intel_display_irq.h @@ -56,7 +56,7 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl); void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir); void i9xx_display_irq_reset(struct intel_display *display); -void ibx_display_irq_reset(struct intel_display *display); +void ilk_display_irq_reset(struct intel_display *display); void vlv_display_irq_reset(struct intel_display *display); void gen8_display_irq_reset(struct intel_display *display); void gen11_display_irq_reset(struct intel_display *display); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index fd9d2527889b..358ab922d7a7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -60,6 +60,7 @@ struct intel_ddi_buf_trans; struct intel_fbc; struct intel_global_objs_state; struct intel_hdcp_shim; +struct intel_panic; struct intel_tc_port; /* @@ -149,6 +150,7 @@ struct intel_framebuffer { unsigned int vtd_guard; unsigned int (*panic_tiling)(unsigned int x, unsigned int y, unsigned int width); + struct intel_panic *panic; }; enum intel_hotplug_state { diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 77a0199f9ea5..517bebb0b4aa 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -1141,7 +1141,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, } num_entries = package_header->num_entries; - if (WARN_ON(package_header->num_entries > max_entries)) + if (WARN_ON(num_entries > max_entries)) num_entries = max_entries; fw_info = (const struct intel_fw_info *) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 12084a542fc5..eb05ef4bd9f6 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -508,9 +508,6 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - if (panel->backlight.edp.vesa.luminance_control_support) - return; - drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info); if (!panel->backlight.edp.vesa.info.aux_enable) @@ -533,7 +530,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, luminance_range->max_luminance, panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd, ¤t_level, ¤t_mode, - false); + panel->backlight.edp.vesa.luminance_control_support); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index b210c3250501..69237dabdae8 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -20,6 +20,7 @@ #include "intel_fb.h" #include "intel_fb_bo.h" #include "intel_frontbuffer.h" +#include "intel_panic.h" #include "intel_plane.h" #define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a)) @@ -563,11 +564,11 @@ static bool plane_has_modifier(struct intel_display *display, return false; if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS && - (GRAPHICS_VER(i915) < 20 || !display->platform.dgfx)) + (DISPLAY_VER(display) < 14 || !display->platform.dgfx)) return false; if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS && - (GRAPHICS_VER(i915) < 20 || display->platform.dgfx)) + (DISPLAY_VER(display) < 20 || display->platform.dgfx)) return false; return true; @@ -2343,6 +2344,26 @@ intel_user_framebuffer_create(struct drm_device *dev, return fb; } +struct intel_framebuffer *intel_framebuffer_alloc(void) +{ + struct intel_framebuffer *intel_fb; + struct intel_panic *panic; + + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); + if (!intel_fb) + return NULL; + + panic = intel_panic_alloc(); + if (!panic) { + kfree(intel_fb); + return NULL; + } + + intel_fb->panic = panic; + + return intel_fb; +} + struct drm_framebuffer * intel_framebuffer_create(struct drm_gem_object *obj, const struct drm_format_info *info, @@ -2351,7 +2372,7 @@ intel_framebuffer_create(struct drm_gem_object *obj, struct intel_framebuffer *intel_fb; int ret; - intel_fb = intel_bo_alloc_framebuffer(); + intel_fb = intel_framebuffer_alloc(); if (!intel_fb) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 403b8b63721a..22514d5f2bb6 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -104,6 +104,9 @@ int intel_framebuffer_init(struct intel_framebuffer *ifb, struct drm_gem_object *obj, const struct drm_format_info *info, struct drm_mode_fb_cmd2 *mode_cmd); + +struct intel_framebuffer *intel_framebuffer_alloc(void); + struct drm_framebuffer * intel_framebuffer_create(struct drm_gem_object *obj, const struct drm_format_info *info, diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index d4c5deff9cbe..0d380c825791 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -98,11 +98,7 @@ struct intel_fbc { struct intel_display *display; const struct intel_fbc_funcs *funcs; - /* - * This is always the inner lock when overlapping with - * struct_mutex and it's the outer lock when overlapping - * with stolen_lock. - */ + /* This is always the outer lock when overlapping with stolen_lock */ struct mutex lock; unsigned int busy_bits; @@ -383,11 +379,11 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) struct drm_i915_private *i915 = to_i915(display->drm); drm_WARN_ON(display->drm, - range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), + range_end_overflows_t(u64, i915_gem_stolen_area_address(i915), i915_gem_stolen_node_offset(&fbc->compressed_fb), U32_MAX)); drm_WARN_ON(display->drm, - range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), + range_end_overflows_t(u64, i915_gem_stolen_area_address(i915), i915_gem_stolen_node_offset(&fbc->compressed_llb), U32_MAX)); intel_de_write(display, FBC_CFB_BASE, @@ -1550,14 +1546,14 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, * having a Y offset that isn't divisible by 4 causes FIFO underrun * and screen flicker. */ - if (DISPLAY_VER(display) >= 9 && + if (IS_DISPLAY_VER(display, 9, 12) && plane_state->view.color_plane[0].y & 3) { plane_state->no_fbc_reason = "plane start Y offset misaligned"; return 0; } /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ - if (DISPLAY_VER(display) >= 11 && + if (IS_DISPLAY_VER(display, 9, 12) && (plane_state->view.color_plane[0].y + (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { plane_state->no_fbc_reason = "plane end Y offset misaligned"; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 7c4709d58aa3..3fbdf75415cc 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -207,6 +207,60 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { .fb_set_suspend = intelfb_set_suspend, }; +static void intel_fbdev_fill_mode_cmd(struct drm_fb_helper_surface_size *sizes, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + /* we don't do packed 24bpp */ + if (sizes->surface_bpp == 24) + sizes->surface_bpp = 32; + + mode_cmd->width = sizes->surface_width; + mode_cmd->height = sizes->surface_height; + + mode_cmd->pitches[0] = ALIGN(mode_cmd->width * DIV_ROUND_UP(sizes->surface_bpp, 8), 64); + mode_cmd->pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); +} + +static struct intel_framebuffer * +__intel_fbdev_fb_alloc(struct intel_display *display, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_mode_fb_cmd2 mode_cmd = {}; + struct drm_framebuffer *fb; + struct drm_gem_object *obj; + int size; + + intel_fbdev_fill_mode_cmd(sizes, &mode_cmd); + + size = mode_cmd.pitches[0] * mode_cmd.height; + size = PAGE_ALIGN(size); + + obj = intel_fbdev_fb_bo_create(display->drm, size); + if (IS_ERR(obj)) { + fb = ERR_CAST(obj); + goto err; + } + + fb = intel_framebuffer_create(obj, + drm_get_format_info(display->drm, + mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd); + if (IS_ERR(fb)) { + intel_fbdev_fb_bo_destroy(obj); + goto err; + } + + drm_gem_object_put(obj); + + return to_intel_framebuffer(fb); + +err: + return ERR_CAST(fb); + +} + int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { @@ -237,7 +291,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) { drm_dbg_kms(display->drm, "no BIOS fb, allocating a new one\n"); - fb = intel_fbdev_fb_alloc(helper, sizes); + + fb = __intel_fbdev_fb_alloc(display, sizes); if (IS_ERR(fb)) return PTR_ERR(fb); } else { @@ -277,7 +332,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, obj = intel_fb_bo(&fb->base); - ret = intel_fbdev_fb_fill_info(display, info, obj, vma); + ret = intel_fbdev_fb_fill_info(display->drm, info, obj, vma); if (ret) goto out_unpin; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c index 210aee9ae88b..56b145841473 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c @@ -3,40 +3,17 @@ * Copyright © 2023 Intel Corporation */ -#include <drm/drm_fb_helper.h> +#include <linux/fb.h> #include "gem/i915_gem_lmem.h" #include "i915_drv.h" -#include "intel_display_core.h" -#include "intel_display_types.h" -#include "intel_fb.h" #include "intel_fbdev_fb.h" -struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size) { - struct intel_display *display = to_intel_display(helper->dev); - struct drm_i915_private *dev_priv = to_i915(display->drm); - struct drm_framebuffer *fb; - struct drm_mode_fb_cmd2 mode_cmd = {}; + struct drm_i915_private *dev_priv = to_i915(drm); struct drm_i915_gem_object *obj; - int size; - - /* we don't do packed 24bpp */ - if (sizes->surface_bpp == 24) - sizes->surface_bpp = 32; - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - - mode_cmd.pitches[0] = ALIGN(mode_cmd.width * - DIV_ROUND_UP(sizes->surface_bpp, 8), 64); - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - size = mode_cmd.pitches[0] * mode_cmd.height; - size = PAGE_ALIGN(size); obj = ERR_PTR(-ENODEV); if (HAS_LMEM(dev_priv)) { @@ -51,31 +28,29 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, * * Also skip stolen on MTL as Wa_22018444074 mitigation. */ - if (!display->platform.meteorlake && size * 2 < dev_priv->dsm.usable_size) + if (!IS_METEORLAKE(dev_priv) && size * 2 < dev_priv->dsm.usable_size) obj = i915_gem_object_create_stolen(dev_priv, size); if (IS_ERR(obj)) obj = i915_gem_object_create_shmem(dev_priv, size); } if (IS_ERR(obj)) { - drm_err(display->drm, "failed to allocate framebuffer (%pe)\n", obj); + drm_err(drm, "failed to allocate framebuffer (%pe)\n", obj); return ERR_PTR(-ENOMEM); } - fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), - drm_get_format_info(display->drm, - mode_cmd.pixel_format, - mode_cmd.modifier[0]), - &mode_cmd); - i915_gem_object_put(obj); + return &obj->base; +} - return to_intel_framebuffer(fb); +void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj) +{ + drm_gem_object_put(obj); } -int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info, +int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info, struct drm_gem_object *_obj, struct i915_vma *vma) { - struct drm_i915_private *i915 = to_i915(display->drm); + struct drm_i915_private *i915 = to_i915(drm); struct drm_i915_gem_object *obj = to_intel_bo(_obj); struct i915_gem_ww_ctx ww; void __iomem *vaddr; @@ -107,7 +82,7 @@ int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info vaddr = i915_vma_pin_iomap(vma); if (IS_ERR(vaddr)) { - drm_err(display->drm, + drm_err(drm, "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); ret = PTR_ERR(vaddr); continue; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h index cb7957272715..1fa44ed28543 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h @@ -6,16 +6,15 @@ #ifndef __INTEL_FBDEV_FB_H__ #define __INTEL_FBDEV_FB_H__ -struct drm_fb_helper; -struct drm_fb_helper_surface_size; +struct drm_device; struct drm_gem_object; +struct drm_mode_fb_cmd2; struct fb_info; struct i915_vma; -struct intel_display; -struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes); -int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info, +struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size); +void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj); +int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info, struct drm_gem_object *obj, struct i915_vma *vma); #endif diff --git a/drivers/gpu/drm/i915/display/intel_panic.c b/drivers/gpu/drm/i915/display/intel_panic.c new file mode 100644 index 000000000000..7311ce4e8b6c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_panic.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include <drm/drm_panic.h> + +#include "gem/i915_gem_object.h" +#include "intel_display_types.h" +#include "intel_fb.h" +#include "intel_panic.h" + +struct intel_panic *intel_panic_alloc(void) +{ + return i915_gem_object_alloc_panic(); +} + +int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb) +{ + struct intel_framebuffer *fb = sb->private; + struct drm_gem_object *obj = intel_fb_bo(&fb->base); + + return i915_gem_object_panic_setup(panic, sb, obj, fb->panic_tiling); +} + +void intel_panic_finish(struct intel_panic *panic) +{ + return i915_gem_object_panic_finish(panic); +} diff --git a/drivers/gpu/drm/i915/display/intel_panic.h b/drivers/gpu/drm/i915/display/intel_panic.h new file mode 100644 index 000000000000..afb472e924aa --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_panic.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __INTEL_PANIC_H__ +#define __INTEL_PANIC_H__ + +struct drm_scanout_buffer; +struct intel_panic; + +struct intel_panic *intel_panic_alloc(void); +int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb); +void intel_panic_finish(struct intel_panic *panic); + +#endif /* __INTEL_PANIC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c index 81f05ee9a21a..2329f09d413d 100644 --- a/drivers/gpu/drm/i915/display/intel_plane.c +++ b/drivers/gpu/drm/i915/display/intel_plane.c @@ -47,7 +47,6 @@ #include "gem/i915_gem_object.h" #include "i915_scheduler_types.h" #include "i9xx_plane_regs.h" -#include "intel_bo.h" #include "intel_cdclk.h" #include "intel_cursor.h" #include "intel_display_rps.h" @@ -56,6 +55,7 @@ #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_fbdev.h" +#include "intel_panic.h" #include "intel_plane.h" #include "intel_psr.h" #include "skl_scaler.h" @@ -1326,7 +1326,7 @@ static void intel_panic_flush(struct drm_plane *plane) struct drm_framebuffer *fb = plane_state->hw.fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - intel_bo_panic_finish(intel_fb); + intel_panic_finish(intel_fb->panic); if (crtc_state->enable_psr2_sel_fetch) { /* Force a full update for psr2 */ @@ -1409,7 +1409,7 @@ static int intel_get_scanout_buffer(struct drm_plane *plane, return -EOPNOTSUPP; } sb->private = intel_fb; - ret = intel_bo_panic_setup(sb); + ret = intel_panic_setup(intel_fb->panic, sb); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 22433fe2ee14..01bf304c705f 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -42,6 +42,7 @@ #include "intel_dmc.h" #include "intel_dp.h" #include "intel_dp_aux.h" +#include "intel_dsb.h" #include "intel_frontbuffer.h" #include "intel_hdmi.h" #include "intel_psr.h" @@ -494,12 +495,14 @@ static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp) { u8 su_capability = 0; - if (intel_dp->psr.sink_panel_replay_su_support) - drm_dp_dpcd_readb(&intel_dp->aux, - DP_PANEL_REPLAY_CAP_CAPABILITY, - &su_capability); - else + if (intel_dp->psr.sink_panel_replay_su_support) { + if (drm_dp_dpcd_read_byte(&intel_dp->aux, + DP_PANEL_REPLAY_CAP_CAPABILITY, + &su_capability) < 0) + return 0; + } else { su_capability = intel_dp->psr_dpcd[1]; + } return su_capability; } @@ -2997,35 +3000,57 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state, } } -static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) +/* + * From bspec: Panel Self Refresh (BDW+) + * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of + * exit training time + 1.5 ms of aux channel handshake. 50 ms is + * defensive enough to cover everything. + */ +#define PSR_IDLE_TIMEOUT_MS 50 + +static int +_psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state, + struct intel_dsb *dsb) { - struct intel_display *display = to_intel_display(intel_dp); - enum transcoder cpu_transcoder = intel_dp->psr.transcoder; + struct intel_display *display = to_intel_display(new_crtc_state); + enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; /* * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough. * As all higher states has bit 4 of PSR2 state set we can just wait for * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. */ + if (dsb) { + intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder), + EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200, + PSR_IDLE_TIMEOUT_MS * 1000 / 200); + return true; + } + return intel_de_wait_for_clear(display, EDP_PSR2_STATUS(display, cpu_transcoder), - EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50); + EDP_PSR2_STATUS_STATE_DEEP_SLEEP, + PSR_IDLE_TIMEOUT_MS); } -static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) +static int +_psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state, + struct intel_dsb *dsb) { - struct intel_display *display = to_intel_display(intel_dp); - enum transcoder cpu_transcoder = intel_dp->psr.transcoder; + struct intel_display *display = to_intel_display(new_crtc_state); + enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; + + if (dsb) { + intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder), + EDP_PSR_STATUS_STATE_MASK, 0, 200, + PSR_IDLE_TIMEOUT_MS * 1000 / 200); + return true; + } - /* - * From bspec: Panel Self Refresh (BDW+) - * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of - * exit training time + 1.5 ms of aux channel handshake. 50 ms is - * defensive enough to cover everything. - */ return intel_de_wait_for_clear(display, psr_status_reg(display, cpu_transcoder), - EDP_PSR_STATUS_STATE_MASK, 50); + EDP_PSR_STATUS_STATE_MASK, + PSR_IDLE_TIMEOUT_MS); } /** @@ -3054,9 +3079,11 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat continue; if (intel_dp->psr.sel_update_enabled) - ret = _psr2_ready_for_pipe_update_locked(intel_dp); + ret = _psr2_ready_for_pipe_update_locked(new_crtc_state, + NULL); else - ret = _psr1_ready_for_pipe_update_locked(intel_dp); + ret = _psr1_ready_for_pipe_update_locked(new_crtc_state, + NULL); if (ret) drm_err(display->drm, @@ -3064,6 +3091,18 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat } } +void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb, + const struct intel_crtc_state *new_crtc_state) +{ + if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay) + return; + + if (new_crtc_state->has_sel_update) + _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb); + else + _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb); +} + static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 9b061a22361f..077751aa599f 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -52,6 +52,8 @@ void intel_psr_get_config(struct intel_encoder *encoder, void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir); void intel_psr_short_pulse(struct intel_dp *intel_dp); void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state); +void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb, + const struct intel_crtc_state *new_crtc_state); bool intel_psr_enabled(struct intel_dp *intel_dp); int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc); diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 3eed37f271b0..9e007aab1452 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -106,17 +106,36 @@ int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state) intel_vrr_extra_vblank_delay(display); } -static int intel_vrr_flipline_offset(struct intel_display *display) +static int intel_vrr_vmin_flipline_offset(struct intel_display *display) { - /* ICL/TGL hardware imposes flipline>=vmin+1 */ + /* + * ICL/TGL hardware imposes flipline>=vmin+1 + * + * We reduce the vmin value to compensate when programming the + * hardware. This approach allows flipline to remain set at the + * original value, and thus the frame will have the desired + * minimum vtotal. + */ return DISPLAY_VER(display) < 13 ? 1 : 0; } static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state) { - struct intel_display *display = to_intel_display(crtc_state); + return crtc_state->vrr.vmin; +} - return crtc_state->vrr.vmin + intel_vrr_flipline_offset(display); +static int intel_vrr_guardband_to_pipeline_full(const struct intel_crtc_state *crtc_state, + int guardband) +{ + /* hardware imposes one extra scanline somewhere */ + return guardband - crtc_state->framestart_delay - 1; +} + +static int intel_vrr_pipeline_full_to_guardband(const struct intel_crtc_state *crtc_state, + int pipeline_full) +{ + /* hardware imposes one extra scanline somewhere */ + return pipeline_full + crtc_state->framestart_delay + 1; } /* @@ -137,36 +156,18 @@ static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state) */ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state) { - struct intel_display *display = to_intel_display(crtc_state); - - if (DISPLAY_VER(display) >= 13) - return crtc_state->vrr.guardband; - else - /* hardware imposes one extra scanline somewhere */ - return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1; + return crtc_state->vrr.guardband; } int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state) { - struct intel_display *display = to_intel_display(crtc_state); - /* Min vblank actually determined by flipline */ - if (DISPLAY_VER(display) >= 13) - return intel_vrr_vmin_flipline(crtc_state); - else - return intel_vrr_vmin_flipline(crtc_state) + - intel_vrr_real_vblank_delay(crtc_state); + return intel_vrr_vmin_flipline(crtc_state); } int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state) { - struct intel_display *display = to_intel_display(crtc_state); - - if (DISPLAY_VER(display) >= 13) - return crtc_state->vrr.vmax; - else - return crtc_state->vrr.vmax + - intel_vrr_real_vblank_delay(crtc_state); + return crtc_state->vrr.vmax; } int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state) @@ -250,42 +251,50 @@ void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state) crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } +static int intel_vrr_hw_value(const struct intel_crtc_state *crtc_state, + int value) +{ + struct intel_display *display = to_intel_display(crtc_state); + + /* + * On TGL vmin/vmax/flipline also need to be + * adjusted by the SCL to maintain correct vtotals. + */ + if (DISPLAY_VER(display) >= 13) + return value; + else + return value - intel_vrr_real_vblank_delay(crtc_state); +} + /* * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to * Vtotal value. */ static -int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state) +int intel_vrr_fixed_rr_hw_vtotal(const struct intel_crtc_state *crtc_state) { - struct intel_display *display = to_intel_display(crtc_state); - int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal; - - if (DISPLAY_VER(display) >= 13) - return crtc_vtotal; - else - return crtc_vtotal - - intel_vrr_real_vblank_delay(crtc_state); + return intel_vrr_hw_value(crtc_state, crtc_state->hw.adjusted_mode.crtc_vtotal); } static -int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state) +int intel_vrr_fixed_rr_hw_vmax(const struct intel_crtc_state *crtc_state) { - return intel_vrr_fixed_rr_vtotal(crtc_state); + return intel_vrr_fixed_rr_hw_vtotal(crtc_state); } static -int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state) +int intel_vrr_fixed_rr_hw_vmin(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - return intel_vrr_fixed_rr_vtotal(crtc_state) - - intel_vrr_flipline_offset(display); + return intel_vrr_fixed_rr_hw_vtotal(crtc_state) - + intel_vrr_vmin_flipline_offset(display); } static -int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state) +int intel_vrr_fixed_rr_hw_flipline(const struct intel_crtc_state *crtc_state) { - return intel_vrr_fixed_rr_vtotal(crtc_state); + return intel_vrr_fixed_rr_hw_vtotal(crtc_state); } void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state) @@ -297,11 +306,11 @@ void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state) return; intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), - intel_vrr_fixed_rr_vmin(crtc_state) - 1); + intel_vrr_fixed_rr_hw_vmin(crtc_state) - 1); intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), - intel_vrr_fixed_rr_vmax(crtc_state) - 1); + intel_vrr_fixed_rr_hw_vmax(crtc_state) - 1); intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), - intel_vrr_fixed_rr_flipline(crtc_state) - 1); + intel_vrr_fixed_rr_hw_flipline(crtc_state) - 1); } static @@ -396,13 +405,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, else intel_vrr_compute_fixed_rr_timings(crtc_state); - /* - * flipline determines the min vblank length the hardware will - * generate, and on ICL/TGL flipline>=vmin+1, hence we reduce - * vmin by one to make sure we can get the actual min vblank length. - */ - crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display); - if (HAS_AS_SDP(display)) { crtc_state->vrr.vsync_start = (crtc_state->hw.adjusted_mode.crtc_vtotal - @@ -421,22 +423,19 @@ void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state) if (!intel_vrr_possible(crtc_state)) return; - if (DISPLAY_VER(display) >= 13) { + crtc_state->vrr.guardband = + crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start - + intel_vrr_extra_vblank_delay(display); + + if (DISPLAY_VER(display) < 13) { + /* FIXME handle the limit in a proper way */ crtc_state->vrr.guardband = - crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start; - } else { - /* hardware imposes one extra scanline somewhere */ - crtc_state->vrr.pipeline_full = - min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start - - crtc_state->framestart_delay - 1); + min(crtc_state->vrr.guardband, + intel_vrr_pipeline_full_to_guardband(crtc_state, 255)); - /* - * vmin/vmax/flipline also need to be adjusted by - * the vblank delay to maintain correct vtotals. - */ - crtc_state->vrr.vmin -= intel_vrr_real_vblank_delay(crtc_state); - crtc_state->vrr.vmax -= intel_vrr_real_vblank_delay(crtc_state); - crtc_state->vrr.flipline -= intel_vrr_real_vblank_delay(crtc_state); + crtc_state->vrr.pipeline_full = + intel_vrr_guardband_to_pipeline_full(crtc_state, + crtc_state->vrr.guardband); } } @@ -595,6 +594,24 @@ void intel_vrr_set_db_point_and_transmission_line(const struct intel_crtc_state EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start)); } +static int intel_vrr_hw_vmin(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmin) - + intel_vrr_vmin_flipline_offset(display); +} + +static int intel_vrr_hw_vmax(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmax); +} + +static int intel_vrr_hw_flipline(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_hw_value(crtc_state, crtc_state->vrr.flipline); +} + void intel_vrr_enable(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); @@ -604,11 +621,11 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state) return; intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), - crtc_state->vrr.vmin - 1); + intel_vrr_hw_vmin(crtc_state) - 1); intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), - crtc_state->vrr.vmax - 1); + intel_vrr_hw_vmax(crtc_state) - 1); intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), - crtc_state->vrr.flipline - 1); + intel_vrr_hw_flipline(crtc_state) - 1); intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN); @@ -720,14 +737,20 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) TRANS_CMRR_M_HI(display, cpu_transcoder)); } - if (DISPLAY_VER(display) >= 13) + if (DISPLAY_VER(display) >= 13) { crtc_state->vrr.guardband = REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl); - else - if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) + } else { + if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) { crtc_state->vrr.pipeline_full = REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl); + crtc_state->vrr.guardband = + intel_vrr_pipeline_full_to_guardband(crtc_state, + crtc_state->vrr.pipeline_full); + } + } + if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) { crtc_state->vrr.flipline = intel_de_read(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder)) + 1; @@ -736,6 +759,15 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) crtc_state->vrr.vmin = intel_de_read(display, TRANS_VRR_VMIN(display, cpu_transcoder)) + 1; + if (DISPLAY_VER(display) < 13) { + /* undo what intel_vrr_hw_value() does when writing the values */ + crtc_state->vrr.flipline += intel_vrr_real_vblank_delay(crtc_state); + crtc_state->vrr.vmax += intel_vrr_real_vblank_delay(crtc_state); + crtc_state->vrr.vmin += intel_vrr_real_vblank_delay(crtc_state); + + crtc_state->vrr.vmin += intel_vrr_vmin_flipline_offset(display); + } + /* * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal * bits are not filled. Since for these platforms TRAN_VMIN is always diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 950dc79dbdd4..e13fb781e7b2 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -20,6 +20,7 @@ #include "intel_fb.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" +#include "intel_panic.h" #include "intel_plane.h" #include "intel_psr.h" #include "intel_psr_regs.h" @@ -3028,7 +3029,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, return; } - intel_fb = intel_bo_alloc_framebuffer(); + intel_fb = intel_framebuffer_alloc(); if (!intel_fb) { drm_dbg_kms(display->drm, "failed to alloc fb\n"); return; diff --git a/drivers/gpu/drm/i915/display/vlv_clock.c b/drivers/gpu/drm/i915/display/vlv_clock.c new file mode 100644 index 000000000000..1abdae453514 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_clock.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include <drm/drm_print.h> + +#include "intel_display_core.h" +#include "intel_display_types.h" +#include "vlv_clock.h" +#include "vlv_sideband.h" + +/* + * FIXME: The caching of hpll_freq and czclk_freq relies on the first calls + * occurring at a time when they can actually be read. This appears to be the + * case, but is somewhat fragile. Make the initialization explicit at a point + * where they can be reliably read. + */ + +/* returns HPLL frequency in kHz */ +int vlv_clock_get_hpll_vco(struct drm_device *drm) +{ + struct intel_display *display = to_intel_display(drm); + int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; + + if (!display->vlv_clock.hpll_freq) { + vlv_cck_get(drm); + /* Obtain SKU information */ + hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) & + CCK_FUSE_HPLL_FREQ_MASK; + vlv_cck_put(drm); + + display->vlv_clock.hpll_freq = vco_freq[hpll_freq] * 1000; + + drm_dbg_kms(drm, "HPLL frequency: %d kHz\n", display->vlv_clock.hpll_freq); + } + + return display->vlv_clock.hpll_freq; +} + +static int vlv_clock_get_cck(struct drm_device *drm, + const char *name, u32 reg, int ref_freq) +{ + u32 val; + int divider; + + vlv_cck_get(drm); + val = vlv_cck_read(drm, reg); + vlv_cck_put(drm); + + divider = val & CCK_FREQUENCY_VALUES; + + drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) != + (divider << CCK_FREQUENCY_STATUS_SHIFT), + "%s change in progress\n", name); + + return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); +} + +int vlv_clock_get_hrawclk(struct drm_device *drm) +{ + /* RAWCLK_FREQ_VLV register updated from power well code */ + return vlv_clock_get_cck(drm, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL, + vlv_clock_get_hpll_vco(drm)); +} + +int vlv_clock_get_czclk(struct drm_device *drm) +{ + struct intel_display *display = to_intel_display(drm); + + if (!display->vlv_clock.czclk_freq) { + display->vlv_clock.czclk_freq = vlv_clock_get_cck(drm, "czclk", CCK_CZ_CLOCK_CONTROL, + vlv_clock_get_hpll_vco(drm)); + drm_dbg_kms(drm, "CZ clock rate: %d kHz\n", display->vlv_clock.czclk_freq); + } + + return display->vlv_clock.czclk_freq; +} + +int vlv_clock_get_cdclk(struct drm_device *drm) +{ + return vlv_clock_get_cck(drm, "cdclk", CCK_DISPLAY_CLOCK_CONTROL, + vlv_clock_get_hpll_vco(drm)); +} + +int vlv_clock_get_gpll(struct drm_device *drm) +{ + return vlv_clock_get_cck(drm, "GPLL ref", CCK_GPLL_CLOCK_CONTROL, + vlv_clock_get_czclk(drm)); +} diff --git a/drivers/gpu/drm/i915/display/vlv_clock.h b/drivers/gpu/drm/i915/display/vlv_clock.h new file mode 100644 index 000000000000..5742ed3c628d --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_clock.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __VLV_CLOCK_H__ +#define __VLV_CLOCK_H__ + +struct drm_device; + +#ifdef I915 +int vlv_clock_get_hpll_vco(struct drm_device *drm); +int vlv_clock_get_hrawclk(struct drm_device *drm); +int vlv_clock_get_czclk(struct drm_device *drm); +int vlv_clock_get_cdclk(struct drm_device *drm); +int vlv_clock_get_gpll(struct drm_device *drm); +#else +static inline int vlv_clock_get_hpll_vco(struct drm_device *drm) +{ + return 0; +} +static inline int vlv_clock_get_hrawclk(struct drm_device *drm) +{ + return 0; +} +static inline int vlv_clock_get_czclk(struct drm_device *drm) +{ + return 0; +} +static inline int vlv_clock_get_cdclk(struct drm_device *drm) +{ + return 0; +} +static inline int vlv_clock_get_gpll(struct drm_device *drm) +{ + return 0; +} +#endif + +#endif /* __VLV_CLOCK_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index f243f8a5215d..39c7c32e1e74 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -182,7 +182,7 @@ enum { * the object. Simple! ... The relocation entries are stored in user memory * and so to access them we have to copy them into a local buffer. That copy * has to avoid taking any pagefaults as they may lead back to a GEM object - * requiring the struct_mutex (i.e. recursive deadlock). So once again we split + * requiring the vm->mutex (i.e. recursive deadlock). So once again we split * the relocation into multiple passes. First we try to do everything within an * atomic context (avoid the pagefaults) which requires that we never wait. If * we detect that we may wait, or if we need to fault, then we have to fallback diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 1f38e367c60b..478011e5ecb3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -459,8 +459,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) atomic_inc(&i915->mm.free_count); /* - * Since we require blocking on struct_mutex to unbind the freed - * object from the GPU before releasing resources back to the + * Since we require blocking on drm_i915_gem_object->vma.lock to unbind + * the freed object from the GPU before releasing resources back to the * system, we can not do that directly from the RCU callback (which may * be a softirq context), but must instead then defer that work onto a * kthread. We use the RCU callback rather than move the freed object diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 565f8fa330db..148034ef504d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -16,9 +16,9 @@ #include "i915_gem_ww.h" #include "i915_vma_types.h" -struct drm_scanout_buffer; enum intel_region_id; -struct intel_framebuffer; +struct drm_scanout_buffer; +struct intel_panic; #define obj_to_i915(obj__) to_i915((obj__)->base.dev) @@ -693,9 +693,10 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int i915_gem_object_truncate(struct drm_i915_gem_object *obj); -struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void); -int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb); -void i915_gem_object_panic_finish(struct intel_framebuffer *fb); +struct intel_panic *i915_gem_object_alloc_panic(void); +int i915_gem_object_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb, + struct drm_gem_object *_obj, bool panic_tiling); +void i915_gem_object_panic_finish(struct intel_panic *panic); /** * i915_gem_object_pin_map - return a contiguous mapping of the entire object diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index c16a57160b26..76d2178572b6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -357,23 +357,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, return vaddr ?: ERR_PTR(-ENOMEM); } -struct i915_panic_data { +struct intel_panic { struct page **pages; int page; void *vaddr; }; -struct i915_framebuffer { - struct intel_framebuffer base; - struct i915_panic_data panic; -}; - -static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb) -{ - return &container_of_const(fb, struct i915_framebuffer, base)->panic; -} - -static void i915_panic_kunmap(struct i915_panic_data *panic) +static void i915_panic_kunmap(struct intel_panic *panic) { if (panic->vaddr) { drm_clflush_virt_range(panic->vaddr, PAGE_SIZE); @@ -420,7 +410,7 @@ static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int new_page; unsigned int offset; struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; - struct i915_panic_data *panic = to_i915_panic_data(fb); + struct intel_panic *panic = fb->panic; if (fb->panic_tiling) offset = fb->panic_tiling(sb->width, x, y); @@ -441,14 +431,13 @@ static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, } } -struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void) +struct intel_panic *i915_gem_object_alloc_panic(void) { - struct i915_framebuffer *i915_fb; + struct intel_panic *panic; + + panic = kzalloc(sizeof(*panic), GFP_KERNEL); - i915_fb = kzalloc(sizeof(*i915_fb), GFP_KERNEL); - if (i915_fb) - return &i915_fb->base; - return NULL; + return panic; } /* @@ -456,12 +445,11 @@ struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void) * Use current vaddr if it exists, or setup a list of pages. * pfn is not supported yet. */ -int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb) +int i915_gem_object_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb, + struct drm_gem_object *_obj, bool panic_tiling) { enum i915_map_type has_type; - struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; - struct i915_panic_data *panic = to_i915_panic_data(fb); - struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base)); + struct drm_i915_gem_object *obj = to_intel_bo(_obj); void *ptr; ptr = page_unpack_bits(obj->mm.mapping, &has_type); @@ -471,7 +459,7 @@ int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb) else iosys_map_set_vaddr(&sb->map[0], ptr); - if (fb->panic_tiling) + if (panic_tiling) sb->set_pixel = i915_gem_object_panic_map_set_pixel; return 0; } @@ -486,10 +474,8 @@ int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb) return -EOPNOTSUPP; } -void i915_gem_object_panic_finish(struct intel_framebuffer *fb) +void i915_gem_object_panic_finish(struct intel_panic *panic) { - struct i915_panic_data *panic = to_i915_panic_data(fb); - i915_panic_kunmap(panic); panic->page = -1; kfree(panic->pages); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index b81e67504bbe..7a3e74a6676e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -170,7 +170,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, * Also note that although these lists do not hold a reference to * the object we can safely grab one here: The final object * unreferencing and the bound_list are both protected by the - * dev->struct_mutex and so we won't ever be able to observe an + * i915->mm.obj_lock and so we won't ever be able to observe an * object on the bound_list with a reference count equals 0. */ for (phase = phases; phase->list; phase++) { @@ -185,7 +185,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, /* * We serialize our access to unreferenced objects through - * the use of the struct_mutex. While the objects are not + * the use of the obj_lock. While the objects are not * yet freed (due to RCU then a workqueue) we still want * to be able to shrink their pages, so they remain on * the unbound/bound list until actually freed. diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 1f4814968868..57bb111d65da 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -1029,7 +1029,7 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!obj->ttm.created); - ttm_bo_put(i915_gem_to_ttm(obj)); + ttm_bo_fini(i915_gem_to_ttm(obj)); } static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) @@ -1325,7 +1325,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, * If this function fails, it will call the destructor, but * our caller still owns the object. So no freeing in the * destructor until obj->ttm.created is true. - * Similarly, in delayed_destroy, we can't call ttm_bo_put() + * Similarly, in delayed_destroy, we can't call ttm_bo_fini() * until successful initialization. */ ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index 991666fd9f85..54829801d3f7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -217,10 +217,10 @@ static unsigned long to_wait_timeout(s64 timeout_ns) * * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any * non-zero timeout parameter the wait ioctl will wait for the given number of - * nanoseconds on an object becoming unbusy. Since the wait itself does so - * without holding struct_mutex the object may become re-busied before this - * function completes. A similar but shorter * race condition exists in the busy - * ioctl + * nanoseconds on an object becoming unbusy. Since the wait occurs without + * holding a global or exclusive lock the object may become re-busied before + * this function completes. A similar but shorter * race condition exists + * in the busy ioctl */ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index e747f5ed195e..539c620364e3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -5,7 +5,7 @@ #include "i915_selftest.h" -#include "display/intel_display_core.h" +#include "display/intel_display_device.h" #include "gt/intel_context.h" #include "gt/intel_engine_regs.h" #include "gt/intel_engine_user.h" @@ -122,7 +122,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915) if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) return false; - return HAS_DISPLAY(display); + return intel_display_device_present(display); } static bool fast_blit_ok(const struct blit_buffer *buf) diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 8116fd5987e2..8c01fb6d4e7b 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -292,15 +292,15 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine) { - engine->i915->irq_mask &= ~engine->irq_enable_mask; - intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); + engine->i915->gen2_imr_mask &= ~engine->irq_enable_mask; + intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask); intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); } void gen2_irq_disable(struct intel_engine_cs *engine) { - engine->i915->irq_mask |= engine->irq_enable_mask; - intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); + engine->i915->gen2_imr_mask |= engine->irq_enable_mask; + intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask); } void gen5_irq_enable(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 98c7f6052069..10070ee4d74c 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -14,7 +14,6 @@ #include "i915_active_types.h" #include "i915_sw_fence.h" -#include "i915_utils.h" #include "intel_engine_types.h" #include "intel_sseu.h" #include "intel_wakeref.h" diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 03baa7fa0a27..7f389cb0bde4 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -106,14 +106,18 @@ * preemption, but just sampling the new tail pointer). * */ + #include <linux/interrupt.h> #include <linux/string_helpers.h> +#include "gen8_engine_cs.h" #include "i915_drv.h" +#include "i915_list_util.h" #include "i915_reg.h" +#include "i915_timer_util.h" #include "i915_trace.h" #include "i915_vgpu.h" -#include "gen8_engine_cs.h" +#include "i915_wait_util.h" #include "intel_breadcrumbs.h" #include "intel_context.h" #include "intel_engine_heartbeat.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index 86b5a9ba323d..c7befc5c20d0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -7,6 +7,7 @@ #include "gem/i915_gem_object.h" #include "i915_drv.h" +#include "i915_list_util.h" #include "intel_engine_pm.h" #include "intel_gt_buffer_pool.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c index a60822e2b5d4..c3afa321fe30 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c @@ -4,6 +4,7 @@ */ #include "i915_drv.h" +#include "i915_wait_util.h" #include "intel_gt.h" #include "intel_gt_mcr.h" #include "intel_gt_print.h" diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 9ca42589da4d..932f9f1b06b2 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -6,6 +6,7 @@ #include <linux/pm_runtime.h> #include <linux/string_helpers.h> +#include "display/vlv_clock.h" #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_reg.h" @@ -341,7 +342,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6) return PTR_ERR(pctx); } - GEM_BUG_ON(range_overflows_end_t(u64, + GEM_BUG_ON(range_end_overflows_t(u64, i915->dsm.stolen.start, pctx->stolen->start, U32_MAX)); @@ -802,7 +803,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, enum intel_rc6_res_type id) /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { mul = 1000000; - div = i915->czclk_freq; + div = vlv_clock_get_czclk(&i915->drm); overflow_hw = BIT_ULL(40); time_hw = vlv_residency_raw(uncore, reg); } else { diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 4a1675dea1c7..41b5036dc538 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -9,18 +9,17 @@ #include "display/intel_display_reset.h" #include "display/intel_overlay.h" - #include "gem/i915_gem_context.h" - #include "gt/intel_gt_regs.h" - #include "gt/uc/intel_gsc_fw.h" +#include "uc/intel_guc.h" #include "i915_drv.h" #include "i915_file_private.h" #include "i915_gpu_error.h" #include "i915_irq.h" #include "i915_reg.h" +#include "i915_wait_util.h" #include "intel_breadcrumbs.h" #include "intel_engine_pm.h" #include "intel_engine_regs.h" @@ -32,8 +31,6 @@ #include "intel_pci_config.h" #include "intel_reset.h" -#include "uc/intel_guc.h" - #define RESET_MAX_RETRIES 3 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h index 4f5fd393af6f..ee4eb574a219 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset_types.h +++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h @@ -20,7 +20,7 @@ struct intel_reset { * FENCE registers). * * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to - * acquire the struct_mutex to reset an engine, we need an explicit + * acquire a global lock to reset an engine, we need an explicit * flag to prevent two concurrent reset attempts in the same engine. * As the number of engines continues to grow, allocate the flags from * the most significant bits. diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 2a6d79abf25b..8314a4b0505e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -15,18 +15,19 @@ #include "i915_irq.h" #include "i915_mitigations.h" #include "i915_reg.h" +#include "i915_wait_util.h" #include "intel_breadcrumbs.h" #include "intel_context.h" +#include "intel_engine_heartbeat.h" +#include "intel_engine_pm.h" #include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_irq.h" +#include "intel_gt_print.h" #include "intel_gt_regs.h" #include "intel_reset.h" #include "intel_ring.h" #include "shmem_utils.h" -#include "intel_engine_heartbeat.h" -#include "intel_engine_pm.h" -#include "intel_gt_print.h" /* Rough estimate of the typical request size, performing a flush, * set-context and then emitting the batch. diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 006042e0b229..b01c837ab646 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -7,12 +7,14 @@ #include <drm/intel/i915_drm.h> -#include "display/intel_display.h" #include "display/intel_display_rps.h" +#include "display/vlv_clock.h" #include "soc/intel_dram.h" + #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" +#include "i915_wait_util.h" #include "intel_breadcrumbs.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" @@ -1688,10 +1690,7 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - rps->gpll_ref_freq = - vlv_get_cck_clock(&i915->drm, "GPLL ref", - CCK_GPLL_CLOCK_CONTROL, - i915->czclk_freq); + rps->gpll_ref_freq = vlv_clock_get_gpll(&i915->drm); drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", rps->gpll_ref_freq); @@ -1701,13 +1700,13 @@ static void vlv_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + vlv_init_gpll_ref_freq(rps); + vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT) | BIT(VLV_IOSF_SB_NC) | BIT(VLV_IOSF_SB_CCK)); - vlv_init_gpll_ref_freq(rps); - rps->max_freq = vlv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", @@ -1735,13 +1734,13 @@ static void chv_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + vlv_init_gpll_ref_freq(rps); + vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT) | BIT(VLV_IOSF_SB_NC) | BIT(VLV_IOSF_SB_CCK)); - vlv_init_gpll_ref_freq(rps); - rps->max_freq = chv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", @@ -1778,6 +1777,7 @@ static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) { + struct drm_i915_private *i915 = rps_to_i915(rps); struct intel_uncore *uncore = rps_to_uncore(rps); const struct intel_rps_ei *prev = &rps->ei; struct intel_rps_ei now; @@ -1794,7 +1794,7 @@ static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) time = ktime_us_delta(now.ktime, prev->ktime); - time *= rps_to_i915(rps)->czclk_freq; + time *= vlv_clock_get_czclk(&i915->drm); /* Workload can be split between render + media, * e.g. SwapBuffers being blitted in X after being rendered in diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h index 57308c4d664a..85b43f9b9d95 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline.h @@ -9,6 +9,7 @@ #include <linux/lockdep.h> #include "i915_active.h" +#include "i915_list_util.h" #include "i915_syncmap.h" #include "intel_timeline_types.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c index 69ed946a39e5..a5184f09d1de 100644 --- a/drivers/gpu/drm/i915/gt/selftest_tlb.c +++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c @@ -3,17 +3,17 @@ * Copyright © 2022 Intel Corporation */ -#include "i915_selftest.h" - #include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "gen8_engine_cs.h" #include "i915_gem_ww.h" +#include "i915_selftest.h" +#include "i915_wait_util.h" +#include "intel_context.h" #include "intel_engine_regs.h" #include "intel_gpu_commands.h" -#include "intel_context.h" #include "intel_gt.h" #include "intel_ring.h" diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c index aab2759067d2..4a81bc396b21 100644 --- a/drivers/gpu/drm/i915/gt/sysfs_engines.c +++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c @@ -7,6 +7,7 @@ #include <linux/sysfs.h> #include "i915_drv.h" +#include "i915_timer_util.h" #include "intel_engine.h" #include "intel_engine_heartbeat.h" #include "sysfs_engines.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c index d8edd7c054c8..e7444ebc373e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c @@ -10,11 +10,13 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_print.h" + +#include "i915_drv.h" +#include "i915_reg.h" +#include "i915_wait_util.h" #include "intel_gsc_proxy.h" #include "intel_gsc_uc.h" #include "intel_gsc_uc_heci_cmd_submit.h" -#include "i915_drv.h" -#include "i915_reg.h" /* * GSC proxy: diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c index 2fde5c360cff..9bd29be7656f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c @@ -8,6 +8,8 @@ #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_ring.h" + +#include "i915_wait_util.h" #include "intel_gsc_uc_heci_cmd_submit.h" struct gsc_heci_pkt { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index f360f020d8f1..52ec4421a211 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -8,15 +8,17 @@ #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm_irq.h" #include "gt/intel_gt_regs.h" + +#include "i915_drv.h" +#include "i915_irq.h" +#include "i915_reg.h" +#include "i915_wait_util.h" #include "intel_guc.h" #include "intel_guc_ads.h" #include "intel_guc_capture.h" #include "intel_guc_print.h" #include "intel_guc_slpc.h" #include "intel_guc_submission.h" -#include "i915_drv.h" -#include "i915_irq.h" -#include "i915_reg.h" /** * DOC: GuC diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 380a11c92d63..3e7e5badcc2b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -5,11 +5,12 @@ #include <linux/circ_buf.h> #include <linux/ktime.h> -#include <linux/time64.h> #include <linux/string_helpers.h> +#include <linux/time64.h> #include <linux/timekeeping.h> #include "i915_drv.h" +#include "i915_wait_util.h" #include "intel_guc_ct.h" #include "intel_guc_print.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 384d1400134d..b1bda1b84f0a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -13,9 +13,11 @@ #include "gt/intel_gt_mcr.h" #include "gt/intel_gt_regs.h" #include "gt/intel_rps.h" + +#include "i915_drv.h" +#include "i915_wait_util.h" #include "intel_guc_fw.h" #include "intel_guc_print.h" -#include "i915_drv.h" static void guc_prepare_xfer(struct intel_gt *gt) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 09a64f224c49..cdff48920ee6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -6,6 +6,8 @@ #include <linux/debugfs.h> #include <linux/string_helpers.h> +#include <drm/drm_managed.h> + #include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_irq.h" @@ -511,7 +513,11 @@ static void guc_log_relay_unmap(struct intel_guc_log *log) void intel_guc_log_init_early(struct intel_guc_log *log) { - mutex_init(&log->relay.lock); + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *i915 = guc_to_i915(guc); + + drmm_mutex_init(&i915->drm, &log->relay.lock); + drmm_mutex_init(&i915->drm, &log->guc_lock); INIT_WORK(&log->relay.flush_work, copy_debug_logs_work); log->relay.started = false; } @@ -677,7 +683,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) return -EINVAL; - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(&log->guc_lock); if (log->level == level) goto out_unlock; @@ -695,7 +701,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) log->level = level; out_unlock: - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(&log->guc_lock); return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h index 02127703be80..13cb93ad0710 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -42,6 +42,14 @@ enum { struct intel_guc_log { u32 level; + /* + * Protects concurrent access and modification of intel_guc_log->level. + * + * This lock replaces the legacy struct_mutex usage in + * intel_guc_log system. + */ + struct mutex guc_lock; + /* Allocation settings */ struct { s32 bytes; /* Size in bytes */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index d5ee6e5e1443..fa9af08f9708 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -3,17 +3,20 @@ * Copyright © 2021 Intel Corporation */ -#include <drm/drm_cache.h> #include <linux/string_helpers.h> +#include <drm/drm_cache.h> + +#include "gt/intel_gt.h" +#include "gt/intel_gt_regs.h" +#include "gt/intel_rps.h" + #include "i915_drv.h" #include "i915_reg.h" -#include "intel_guc_slpc.h" +#include "i915_wait_util.h" #include "intel_guc_print.h" +#include "intel_guc_slpc.h" #include "intel_mchbar_regs.h" -#include "gt/intel_gt.h" -#include "gt/intel_gt_regs.h" -#include "gt/intel_rps.h" /** * DOC: SLPC - Dynamic Frequency management diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 127316d2c8aa..68f2b8d363ac 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -25,16 +25,16 @@ #include "gt/intel_mocs.h" #include "gt/intel_ring.h" +#include "i915_drv.h" +#include "i915_irq.h" +#include "i915_reg.h" +#include "i915_trace.h" +#include "i915_wait_util.h" #include "intel_guc_ads.h" #include "intel_guc_capture.h" #include "intel_guc_print.h" #include "intel_guc_submission.h" -#include "i915_drv.h" -#include "i915_reg.h" -#include "i915_irq.h" -#include "i915_trace.h" - /** * DOC: GuC-based command submission * diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index a91e23c22ea1..d432fdd69833 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1921,7 +1921,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) if (!bb) return -ENOMEM; - bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true; + bb->ppgtt = s->buf_addr_type != GTT_BUFFER; /* * The start_offset stores the batch buffer's start gma's diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index da1135fa7cda..db5cd65100fe 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -49,7 +49,7 @@ * @gpa: guest physical address * * Returns: - * Zero on success, negative error code if failed + * The MMIO offset of the given GPA */ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) { @@ -58,7 +58,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) } #define reg_is_mmio(gvt, reg) \ - (reg >= 0 && reg < gvt->device_info.mmio_size) + (reg < gvt->device_info.mmio_size) #define reg_is_gtt(gvt, reg) \ (reg >= gvt->device_info.gtt_start_offset \ diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 2f7208843367..0b810baad20a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -33,14 +33,16 @@ * */ -#include "i915_drv.h" -#include "i915_reg.h" #include "gt/intel_context.h" #include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt_regs.h" #include "gt/intel_ring.h" + #include "gvt.h" +#include "i915_drv.h" +#include "i915_reg.h" +#include "i915_wait_util.h" #include "trace.h" #define GEN9_MOCS_SIZE 64 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 23fa098c4479..c2e38d4bcd01 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -26,11 +26,11 @@ * */ +#include <linux/debugfs.h> #include <linux/sched/mm.h> #include <linux/sort.h> #include <linux/string_helpers.h> -#include <linux/debugfs.h> #include <drm/drm_debugfs.h> #include "gem/i915_gem_context.h" @@ -54,6 +54,7 @@ #include "i915_irq.h" #include "i915_reg.h" #include "i915_scheduler.h" +#include "i915_wait_util.h" #include "intel_mchbar_regs.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 70f042ce8705..95165e45de74 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -51,13 +51,15 @@ #include "display/intel_bw.h" #include "display/intel_cdclk.h" #include "display/intel_crtc.h" -#include "display/intel_display_core.h" +#include "display/intel_display_device.h" #include "display/intel_display_driver.h" +#include "display/intel_display_power.h" #include "display/intel_dmc.h" #include "display/intel_dp.h" #include "display/intel_dpt.h" #include "display/intel_encoder.h" #include "display/intel_fbdev.h" +#include "display/intel_gmbus.h" #include "display/intel_hotplug.h" #include "display/intel_opregion.h" #include "display/intel_overlay.h" @@ -977,7 +979,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_power_domains_disable(display); drm_client_dev_suspend(&i915->drm, false); - if (HAS_DISPLAY(display)) { + if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(&i915->drm); intel_display_driver_disable_user_access(display); @@ -989,7 +991,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_irq_suspend(i915); intel_hpd_cancel_work(display); - if (HAS_DISPLAY(display)) + if (intel_display_device_present(display)) intel_display_driver_suspend_access(display); intel_encoder_suspend_all(display); @@ -1051,7 +1053,6 @@ static int i915_drm_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_display *display = dev_priv->display; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); pci_power_t opregion_target_state; disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); @@ -1060,19 +1061,17 @@ static int i915_drm_suspend(struct drm_device *dev) * properly. */ intel_power_domains_disable(display); drm_client_dev_suspend(dev, false); - if (HAS_DISPLAY(display)) { + if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(dev); intel_display_driver_disable_user_access(display); } - pci_save_state(pdev); - intel_display_driver_suspend(display); intel_irq_suspend(dev_priv); intel_hpd_cancel_work(display); - if (HAS_DISPLAY(display)) + if (intel_display_device_present(display)) intel_display_driver_suspend_access(display); intel_encoder_suspend_all(display); @@ -1101,7 +1100,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_display *display = dev_priv->display; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; struct intel_gt *gt; int ret, i; @@ -1122,11 +1120,21 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) if (ret) { drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret); intel_display_power_resume_early(display); - - goto out; } - pci_disable_device(pdev); + enable_rpm_wakeref_asserts(rpm); + + if (!dev_priv->uncore.user_forcewake_count) + intel_runtime_pm_driver_release(rpm); + + return ret; +} + +static int i915_drm_suspend_noirq(struct drm_device *dev, bool hibernation) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + /* * During hibernation on some platforms the BIOS may try to access * the device even though it's already in D3 and hang the machine. So @@ -1138,21 +1146,20 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) * Lenovo Thinkpad X301, X61s, X60, T60, X41 * Fujitsu FSC S7110 * Acer Aspire 1830T + * + * pci_save_state() prevents drivers/pci from + * automagically putting the device into D3. */ - if (!(hibernation && GRAPHICS_VER(dev_priv) < 6)) - pci_set_power_state(pdev, PCI_D3hot); + if (hibernation && GRAPHICS_VER(dev_priv) < 6) + pci_save_state(pdev); -out: - enable_rpm_wakeref_asserts(rpm); - if (!dev_priv->uncore.user_forcewake_count) - intel_runtime_pm_driver_release(rpm); - - return ret; + return 0; } int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) { + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); int error; if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND && @@ -1166,7 +1173,14 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, if (error) return error; - return i915_drm_suspend_late(&i915->drm, false); + error = i915_drm_suspend_late(&i915->drm, false); + if (error) + return error; + + pci_save_state(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; } static int i915_drm_resume(struct drm_device *dev) @@ -1219,7 +1233,7 @@ static int i915_drm_resume(struct drm_device *dev) */ intel_irq_resume(dev_priv); - if (HAS_DISPLAY(display)) + if (intel_display_device_present(display)) drm_mode_config_reset(dev); i915_gem_resume(dev_priv); @@ -1228,14 +1242,14 @@ static int i915_drm_resume(struct drm_device *dev) intel_clock_gating_init(dev_priv); - if (HAS_DISPLAY(display)) + if (intel_display_device_present(display)) intel_display_driver_resume_access(display); intel_hpd_init(display); intel_display_driver_resume(display); - if (HAS_DISPLAY(display)) { + if (intel_display_device_present(display)) { intel_display_driver_enable_user_access(display); drm_kms_helper_poll_enable(dev); } @@ -1258,7 +1272,6 @@ static int i915_drm_resume_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_display *display = dev_priv->display; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct intel_gt *gt; int ret, i; @@ -1272,41 +1285,6 @@ static int i915_drm_resume_early(struct drm_device *dev) * similar so that power domains can be employed. */ - /* - * Note that we need to set the power state explicitly, since we - * powered off the device during freeze and the PCI core won't power - * it back up for us during thaw. Powering off the device during - * freeze is not a hard requirement though, and during the - * suspend/resume phases the PCI core makes sure we get here with the - * device powered on. So in case we change our freeze logic and keep - * the device powered we can also remove the following set power state - * call. - */ - ret = pci_set_power_state(pdev, PCI_D0); - if (ret) { - drm_err(&dev_priv->drm, - "failed to set PCI D0 power state (%d)\n", ret); - return ret; - } - - /* - * Note that pci_enable_device() first enables any parent bridge - * device and only then sets the power state for this device. The - * bridge enabling is a nop though, since bridge devices are resumed - * first. The order of enabling power and enabling the device is - * imposed by the PCI core as described above, so here we preserve the - * same order for the freeze/thaw phases. - * - * TODO: eventually we should remove pci_disable_device() / - * pci_enable_enable_device() from suspend/resume. Due to how they - * depend on the device enable refcount we can't anyway depend on them - * disabling/enabling the device. - */ - if (pci_enable_device(pdev)) - return -EIO; - - pci_set_master(pdev); - disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); ret = vlv_resume_prepare(dev_priv, false); @@ -1326,11 +1304,18 @@ static int i915_drm_resume_early(struct drm_device *dev) int i915_driver_resume_switcheroo(struct drm_i915_private *i915) { + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); int ret; if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; + ret = pci_set_power_state(pdev, PCI_D0); + if (ret) + return ret; + + pci_restore_state(pdev); + ret = i915_drm_resume_early(&i915->drm); if (ret) return ret; @@ -1387,6 +1372,16 @@ static int i915_pm_suspend_late(struct device *kdev) return i915_drm_suspend_late(&i915->drm, false); } +static int i915_pm_suspend_noirq(struct device *kdev) +{ + struct drm_i915_private *i915 = kdev_to_i915(kdev); + + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + return i915_drm_suspend_noirq(&i915->drm, false); +} + static int i915_pm_poweroff_late(struct device *kdev) { struct drm_i915_private *i915 = kdev_to_i915(kdev); @@ -1397,6 +1392,16 @@ static int i915_pm_poweroff_late(struct device *kdev) return i915_drm_suspend_late(&i915->drm, true); } +static int i915_pm_poweroff_noirq(struct device *kdev) +{ + struct drm_i915_private *i915 = kdev_to_i915(kdev); + + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + return i915_drm_suspend_noirq(&i915->drm, true); +} + static int i915_pm_resume_early(struct device *kdev) { struct drm_i915_private *i915 = kdev_to_i915(kdev); @@ -1662,24 +1667,25 @@ const struct dev_pm_ops i915_pm_ops = { .prepare = i915_pm_prepare, .suspend = i915_pm_suspend, .suspend_late = i915_pm_suspend_late, + .suspend_noirq = i915_pm_suspend_noirq, .resume_early = i915_pm_resume_early, .resume = i915_pm_resume, .complete = i915_pm_complete, /* * S4 event handlers - * @freeze, @freeze_late : called (1) before creating the - * hibernation image [PMSG_FREEZE] and - * (2) after rebooting, before restoring - * the image [PMSG_QUIESCE] - * @thaw, @thaw_early : called (1) after creating the hibernation - * image, before writing it [PMSG_THAW] - * and (2) after failing to create or - * restore the image [PMSG_RECOVER] - * @poweroff, @poweroff_late: called after writing the hibernation - * image, before rebooting [PMSG_HIBERNATE] - * @restore, @restore_early : called after rebooting and restoring the - * hibernation image [PMSG_RESTORE] + * @freeze* : called (1) before creating the + * hibernation image [PMSG_FREEZE] and + * (2) after rebooting, before restoring + * the image [PMSG_QUIESCE] + * @thaw* : called (1) after creating the hibernation + * image, before writing it [PMSG_THAW] + * and (2) after failing to create or + * restore the image [PMSG_RECOVER] + * @poweroff* : called after writing the hibernation + * image, before rebooting [PMSG_HIBERNATE] + * @restore* : called after rebooting and restoring the + * hibernation image [PMSG_RESTORE] */ .freeze = i915_pm_freeze, .freeze_late = i915_pm_freeze_late, @@ -1687,6 +1693,7 @@ const struct dev_pm_ops i915_pm_ops = { .thaw = i915_pm_thaw, .poweroff = i915_pm_suspend, .poweroff_late = i915_pm_poweroff_late, + .poweroff_noirq = i915_pm_poweroff_noirq, .restore_early = i915_pm_restore_early, .restore = i915_pm_restore, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2f3965feada1..03e497d2081e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -114,8 +114,7 @@ struct i915_gem_mm { struct intel_memory_region *stolen_region; /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; - /** Protects the usage of the GTT stolen memory allocator. This is - * always the inner lock when overlapping with struct_mutex. */ + /** Protects the usage of the GTT stolen memory allocator */ struct mutex stolen_lock; /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ @@ -222,6 +221,9 @@ struct drm_i915_private { bool irqs_enabled; + /* LPT/WPT IOSF sideband protection */ + struct mutex sbi_lock; + /* VLV/CHV IOSF sideband */ struct { struct mutex lock; /* protect sideband access */ @@ -232,14 +234,11 @@ struct drm_i915_private { /* Sideband mailbox protection */ struct mutex sb_lock; - /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 irq_mask; + /* Cached value of gen 2-4 IMR to avoid reads in updating the bitfield */ + u32 gen2_imr_mask; bool preserve_bios_swizzle; - unsigned int hpll_freq; - unsigned int czclk_freq; - /** * wq - Driver workqueue for GEM. * diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8c8d43451f35..e14a0c3db999 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -847,8 +847,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) /* * Only called during RPM suspend. All users of the userfault_list * must be holding an RPM wakeref to ensure that this can not - * run concurrently with themselves (and use the struct_mutex for - * protection between themselves). + * run concurrently with themselves. */ list_for_each_entry_safe(obj, on, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a5fa40ab5de2..7c7c6dcbce88 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -163,11 +163,6 @@ static void ivb_parity_work(struct work_struct *work) u32 misccpctl; u8 slice = 0; - /* We must turn off DOP level clock gating to access the L3 registers. - * In order to prevent a get/put style interface, acquire struct mutex - * any time we access those registers. - */ - mutex_lock(&dev_priv->drm.struct_mutex); /* If we've screwed up tracking, just let the interrupt fire again */ if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) @@ -225,7 +220,6 @@ out: gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); spin_unlock_irq(gt->irq_lock); - mutex_unlock(&dev_priv->drm.struct_mutex); } static irqreturn_t valleyview_irq_handler(int irq, void *arg) @@ -662,22 +656,10 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) static void ilk_irq_reset(struct drm_i915_private *dev_priv) { struct intel_display *display = dev_priv->display; - struct intel_uncore *uncore = &dev_priv->uncore; - - gen2_irq_reset(uncore, DE_IRQ_REGS); - dev_priv->irq_mask = ~0u; - - if (GRAPHICS_VER(dev_priv) == 7) - intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); - - if (IS_HASWELL(dev_priv)) { - intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); - intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); - } + /* The master interrupt enable is in DEIER, reset display irq first */ + ilk_display_irq_reset(display); gen5_gt_irq_reset(to_gt(dev_priv)); - - ibx_display_irq_reset(display); } static void valleyview_irq_reset(struct drm_i915_private *dev_priv) @@ -903,7 +885,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv) gen2_error_reset(uncore, GEN2_ERROR_REGS); gen2_irq_reset(uncore, GEN2_IRQ_REGS); - dev_priv->irq_mask = ~0u; + dev_priv->gen2_imr_mask = ~0u; } static void i915_irq_postinstall(struct drm_i915_private *dev_priv) @@ -914,7 +896,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv)); - dev_priv->irq_mask = + dev_priv->gen2_imr_mask = ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_MASTER_ERROR_INTERRUPT); @@ -926,16 +908,16 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) I915_USER_INTERRUPT; if (DISPLAY_VER(display) >= 3) { - dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT; + dev_priv->gen2_imr_mask &= ~I915_ASLE_INTERRUPT; enable_mask |= I915_ASLE_INTERRUPT; } if (HAS_HOTPLUG(display)) { - dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; + dev_priv->gen2_imr_mask &= ~I915_DISPLAY_PORT_INTERRUPT; enable_mask |= I915_DISPLAY_PORT_INTERRUPT; } - gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask); i915_display_irq_postinstall(display); } @@ -1005,7 +987,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv) gen2_error_reset(uncore, GEN2_ERROR_REGS); gen2_irq_reset(uncore, GEN2_IRQ_REGS); - dev_priv->irq_mask = ~0u; + dev_priv->gen2_imr_mask = ~0u; } static u32 i965_error_mask(struct drm_i915_private *i915) @@ -1035,7 +1017,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv)); - dev_priv->irq_mask = + dev_priv->gen2_imr_mask = ~(I915_ASLE_INTERRUPT | I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | @@ -1053,7 +1035,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) if (IS_G4X(dev_priv)) enable_mask |= I915_BSD_USER_INTERRUPT; - gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask); + gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask); i965_display_irq_postinstall(display); } diff --git a/drivers/gpu/drm/i915/i915_list_util.h b/drivers/gpu/drm/i915/i915_list_util.h new file mode 100644 index 000000000000..4e515dc8a3e0 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_list_util.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __I915_LIST_UTIL_H__ +#define __I915_LIST_UTIL_H__ + +#include <linux/list.h> +#include <asm/rwonce.h> + +static inline void __list_del_many(struct list_head *head, + struct list_head *first) +{ + first->prev = head; + WRITE_ONCE(head->next, first); +} + +static inline int list_is_last_rcu(const struct list_head *list, + const struct list_head *head) +{ + return READ_ONCE(list->next) == head; +} + +#endif /* __I915_LIST_UTIL_H__ */ diff --git a/drivers/gpu/drm/i915/i915_ptr_util.h b/drivers/gpu/drm/i915/i915_ptr_util.h new file mode 100644 index 000000000000..9f8931d7d99b --- /dev/null +++ b/drivers/gpu/drm/i915/i915_ptr_util.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __I915_PTR_UTIL_H__ +#define __I915_PTR_UTIL_H__ + +#include <linux/types.h> + +#define ptr_mask_bits(ptr, n) ({ \ + unsigned long __v = (unsigned long)(ptr); \ + (typeof(ptr))(__v & -BIT(n)); \ +}) + +#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1)) + +#define ptr_unpack_bits(ptr, bits, n) ({ \ + unsigned long __v = (unsigned long)(ptr); \ + *(bits) = __v & (BIT(n) - 1); \ + (typeof(ptr))(__v & -BIT(n)); \ +}) + +#define ptr_pack_bits(ptr, bits, n) ({ \ + unsigned long __bits = (bits); \ + GEM_BUG_ON(__bits & -BIT(n)); \ + ((typeof(ptr))((unsigned long)(ptr) | __bits)); \ +}) + +#define ptr_dec(ptr) ({ \ + unsigned long __v = (unsigned long)(ptr); \ + (typeof(ptr))(__v - 1); \ +}) + +#define ptr_inc(ptr) ({ \ + unsigned long __v = (unsigned long)(ptr); \ + (typeof(ptr))(__v + 1); \ +}) + +#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT) +#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT) +#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT) +#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT) + +static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b) +{ + return a - b; +} + +#define u64_to_ptr(T, x) ({ \ + typecheck(u64, x); \ + (T *)(uintptr_t)(x); \ +}) + +/* + * container_of_user: Extract the superclass from a pointer to a member. + * + * Exactly like container_of() with the exception that it plays nicely + * with sparse for __user @ptr. + */ +#define container_of_user(ptr, type, member) ({ \ + void __user *__mptr = (void __user *)(ptr); \ + BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \ + !__same_type(*(ptr), void), \ + "pointer type mismatch in container_of()"); \ + ((type __user *)(__mptr - offsetof(type, member))); }) + +#endif /* __I915_PTR_UTIL_H__ */ diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 5f7e8138ec14..b09135301f39 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -31,19 +31,20 @@ #include <linux/llist.h> #include <linux/lockdep.h> +#include <uapi/drm/i915_drm.h> + #include "gem/i915_gem_context_types.h" #include "gt/intel_context_types.h" #include "gt/intel_engine_types.h" #include "gt/intel_timeline_types.h" #include "i915_gem.h" +#include "i915_ptr_util.h" #include "i915_scheduler.h" #include "i915_selftest.h" #include "i915_sw_fence.h" #include "i915_vma_resource.h" -#include <uapi/drm/i915_drm.h> - struct drm_file; struct drm_i915_gem_object; struct drm_printer; diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c index 3a95a55b2e87..d5b6d8ab31a2 100644 --- a/drivers/gpu/drm/i915/i915_switcheroo.c +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -5,7 +5,7 @@ #include <linux/vga_switcheroo.h> -#include "display/intel_display_core.h" +#include "display/intel_display_device.h" #include "i915_driver.h" #include "i915_drv.h" @@ -22,7 +22,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n"); return; } - if (!HAS_DISPLAY(display)) { + if (!intel_display_device_present(display)) { dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n"); return; } @@ -52,7 +52,8 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return i915 && HAS_DISPLAY(display) && atomic_read(&i915->drm.open_count) == 0; + return i915 && intel_display_device_present(display) && + atomic_read(&i915->drm.open_count) == 0; } static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { diff --git a/drivers/gpu/drm/i915/i915_timer_util.c b/drivers/gpu/drm/i915/i915_timer_util.c new file mode 100644 index 000000000000..ee4cfd8b3c07 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_timer_util.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include <linux/jiffies.h> + +#include "i915_timer_util.h" + +void cancel_timer(struct timer_list *t) +{ + if (!timer_active(t)) + return; + + timer_delete(t); + WRITE_ONCE(t->expires, 0); +} + +void set_timer_ms(struct timer_list *t, unsigned long timeout) +{ + if (!timeout) { + cancel_timer(t); + return; + } + + timeout = msecs_to_jiffies(timeout); + + /* + * Paranoia to make sure the compiler computes the timeout before + * loading 'jiffies' as jiffies is volatile and may be updated in + * the background by a timer tick. All to reduce the complexity + * of the addition and reduce the risk of losing a jiffy. + */ + barrier(); + + /* Keep t->expires = 0 reserved to indicate a canceled timer. */ + mod_timer(t, jiffies + timeout ?: 1); +} diff --git a/drivers/gpu/drm/i915/i915_timer_util.h b/drivers/gpu/drm/i915/i915_timer_util.h new file mode 100644 index 000000000000..f35ad730820c --- /dev/null +++ b/drivers/gpu/drm/i915/i915_timer_util.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __I915_TIMER_UTIL_H__ +#define __I915_TIMER_UTIL_H__ + +#include <linux/timer.h> +#include <asm/rwonce.h> + +void cancel_timer(struct timer_list *t); +void set_timer_ms(struct timer_list *t, unsigned long timeout); + +static inline bool timer_active(const struct timer_list *t) +{ + return READ_ONCE(t->expires); +} + +static inline bool timer_expired(const struct timer_list *t) +{ + return timer_active(t) && !timer_pending(t); +} + +#endif /* __I915_TIMER_UTIL_H__ */ diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index b60c28fbd207..49f7ed413132 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -47,36 +47,6 @@ bool i915_error_injected(void) #endif -void cancel_timer(struct timer_list *t) -{ - if (!timer_active(t)) - return; - - timer_delete(t); - WRITE_ONCE(t->expires, 0); -} - -void set_timer_ms(struct timer_list *t, unsigned long timeout) -{ - if (!timeout) { - cancel_timer(t); - return; - } - - timeout = msecs_to_jiffies(timeout); - - /* - * Paranoia to make sure the compiler computes the timeout before - * loading 'jiffies' as jiffies is volatile and may be updated in - * the background by a timer tick. All to reduce the complexity - * of the addition and reduce the risk of losing a jiffy. - */ - barrier(); - - /* Keep t->expires = 0 reserved to indicate a canceled timer. */ - mod_timer(t, jiffies + timeout ?: 1); -} - bool i915_vtd_active(struct drm_i915_private *i915) { if (device_iommu_mapped(i915->drm.dev)) diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 9cb40c2c4b12..a0c892e4c40d 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -25,7 +25,6 @@ #ifndef __I915_UTILS_H #define __I915_UTILS_H -#include <linux/list.h> #include <linux/overflow.h> #include <linux/sched.h> #include <linux/string_helpers.h> @@ -38,7 +37,6 @@ #endif struct drm_i915_private; -struct timer_list; #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ __stringify(x), (long)(x)) @@ -67,88 +65,12 @@ bool i915_error_injected(void); drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \ }) -#define range_overflows(start, size, max) ({ \ - typeof(start) start__ = (start); \ - typeof(size) size__ = (size); \ - typeof(max) max__ = (max); \ - (void)(&start__ == &size__); \ - (void)(&start__ == &max__); \ - start__ >= max__ || size__ > max__ - start__; \ -}) - -#define range_overflows_t(type, start, size, max) \ - range_overflows((type)(start), (type)(size), (type)(max)) - -#define range_overflows_end(start, size, max) ({ \ - typeof(start) start__ = (start); \ - typeof(size) size__ = (size); \ - typeof(max) max__ = (max); \ - (void)(&start__ == &size__); \ - (void)(&start__ == &max__); \ - start__ > max__ || size__ > max__ - start__; \ -}) - -#define range_overflows_end_t(type, start, size, max) \ - range_overflows_end((type)(start), (type)(size), (type)(max)) - -#define ptr_mask_bits(ptr, n) ({ \ - unsigned long __v = (unsigned long)(ptr); \ - (typeof(ptr))(__v & -BIT(n)); \ -}) - -#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1)) - -#define ptr_unpack_bits(ptr, bits, n) ({ \ - unsigned long __v = (unsigned long)(ptr); \ - *(bits) = __v & (BIT(n) - 1); \ - (typeof(ptr))(__v & -BIT(n)); \ -}) - -#define ptr_pack_bits(ptr, bits, n) ({ \ - unsigned long __bits = (bits); \ - GEM_BUG_ON(__bits & -BIT(n)); \ - ((typeof(ptr))((unsigned long)(ptr) | __bits)); \ -}) - -#define ptr_dec(ptr) ({ \ - unsigned long __v = (unsigned long)(ptr); \ - (typeof(ptr))(__v - 1); \ -}) - -#define ptr_inc(ptr) ({ \ - unsigned long __v = (unsigned long)(ptr); \ - (typeof(ptr))(__v + 1); \ -}) - -#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT) -#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT) -#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT) -#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT) - #define fetch_and_zero(ptr) ({ \ typeof(*ptr) __T = *(ptr); \ *(ptr) = (typeof(*ptr))0; \ __T; \ }) -static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b) -{ - return a - b; -} - -/* - * container_of_user: Extract the superclass from a pointer to a member. - * - * Exactly like container_of() with the exception that it plays nicely - * with sparse for __user @ptr. - */ -#define container_of_user(ptr, type, member) ({ \ - void __user *__mptr = (void __user *)(ptr); \ - BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \ - !__same_type(*(ptr), void), \ - "pointer type mismatch in container_of()"); \ - ((type __user *)(__mptr - offsetof(type, member))); }) - /* * check_user_mbz: Check that a user value exists and is zero * @@ -167,11 +89,6 @@ static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b) get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \ }) -#define u64_to_ptr(T, x) ({ \ - typecheck(u64, x); \ - (T *)(uintptr_t)(x); \ -}) - #define __mask_next_bit(mask) ({ \ int __idx = ffs(mask) - 1; \ mask &= ~BIT(__idx); \ @@ -183,19 +100,6 @@ static inline bool is_power_of_2_u64(u64 n) return (n != 0 && ((n & (n - 1)) == 0)); } -static inline void __list_del_many(struct list_head *head, - struct list_head *first) -{ - first->prev = head; - WRITE_ONCE(head->next, first); -} - -static inline int list_is_last_rcu(const struct list_head *list, - const struct list_head *head) -{ - return READ_ONCE(list->next) == head; -} - static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) { unsigned long j = msecs_to_jiffies(m); @@ -230,112 +134,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) } } -/* - * __wait_for - magic wait macro - * - * Macro to help avoid open coding check/wait/timeout patterns. Note that it's - * important that we check the condition again after having timed out, since the - * timeout could be due to preemption or similar and we've never had a chance to - * check the condition before the timeout. - */ -#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ - const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ - long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ - int ret__; \ - might_sleep(); \ - for (;;) { \ - const bool expired__ = ktime_after(ktime_get_raw(), end__); \ - OP; \ - /* Guarantee COND check prior to timeout */ \ - barrier(); \ - if (COND) { \ - ret__ = 0; \ - break; \ - } \ - if (expired__) { \ - ret__ = -ETIMEDOUT; \ - break; \ - } \ - usleep_range(wait__, wait__ * 2); \ - if (wait__ < (Wmax)) \ - wait__ <<= 1; \ - } \ - ret__; \ -}) - -#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ - (Wmax)) -#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) - -/* - * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. - * On PREEMPT_RT the context isn't becoming atomic because it is used in an - * interrupt handler or because a spinlock_t is acquired. This leads to - * warnings which don't occur otherwise and therefore the check is disabled. - */ -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT) -# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) -#else -# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) -#endif - -#define _wait_for_atomic(COND, US, ATOMIC) \ -({ \ - int cpu, ret, timeout = (US) * 1000; \ - u64 base; \ - _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ - if (!(ATOMIC)) { \ - preempt_disable(); \ - cpu = smp_processor_id(); \ - } \ - base = local_clock(); \ - for (;;) { \ - u64 now = local_clock(); \ - if (!(ATOMIC)) \ - preempt_enable(); \ - /* Guarantee COND check prior to timeout */ \ - barrier(); \ - if (COND) { \ - ret = 0; \ - break; \ - } \ - if (now - base >= timeout) { \ - ret = -ETIMEDOUT; \ - break; \ - } \ - cpu_relax(); \ - if (!(ATOMIC)) { \ - preempt_disable(); \ - if (unlikely(cpu != smp_processor_id())) { \ - timeout -= now - base; \ - cpu = smp_processor_id(); \ - base = local_clock(); \ - } \ - } \ - } \ - ret; \ -}) - -#define wait_for_us(COND, US) \ -({ \ - int ret__; \ - BUILD_BUG_ON(!__builtin_constant_p(US)); \ - if ((US) > 10) \ - ret__ = _wait_for((COND), (US), 10, 10); \ - else \ - ret__ = _wait_for_atomic((COND), (US), 0); \ - ret__; \ -}) - -#define wait_for_atomic_us(COND, US) \ -({ \ - BUILD_BUG_ON(!__builtin_constant_p(US)); \ - BUILD_BUG_ON((US) > 50000); \ - _wait_for_atomic((COND), (US), 1); \ -}) - -#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000) - #define KHz(x) (1000 * (x)) #define MHz(x) KHz(1000 * (x)) @@ -351,19 +149,6 @@ static inline void __add_taint_for_CI(unsigned int taint) add_taint(taint, LOCKDEP_STILL_OK); } -void cancel_timer(struct timer_list *t); -void set_timer_ms(struct timer_list *t, unsigned long timeout); - -static inline bool timer_active(const struct timer_list *t) -{ - return READ_ONCE(t->expires); -} - -static inline bool timer_expired(const struct timer_list *t) -{ - return timer_active(t) && !timer_pending(t); -} - static inline bool i915_run_as_guest(void) { #if IS_ENABLED(CONFIG_X86) diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 0f9eee6d18d2..8054047840aa 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -30,12 +30,12 @@ #include <drm/drm_mm.h> -#include "gt/intel_ggtt_fencing.h" #include "gem/i915_gem_object.h" - -#include "i915_gem_gtt.h" +#include "gt/intel_ggtt_fencing.h" #include "i915_active.h" +#include "i915_gem_gtt.h" +#include "i915_ptr_util.h" #include "i915_request.h" #include "i915_vma_resource.h" #include "i915_vma_types.h" diff --git a/drivers/gpu/drm/i915/i915_wait_util.h b/drivers/gpu/drm/i915/i915_wait_util.h new file mode 100644 index 000000000000..7376898e3bf8 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_wait_util.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __I915_WAIT_UTIL_H__ +#define __I915_WAIT_UTIL_H__ + +#include <linux/compiler.h> +#include <linux/delay.h> +#include <linux/ktime.h> +#include <linux/sched/clock.h> +#include <linux/smp.h> + +/* + * __wait_for - magic wait macro + * + * Macro to help avoid open coding check/wait/timeout patterns. Note that it's + * important that we check the condition again after having timed out, since the + * timeout could be due to preemption or similar and we've never had a chance to + * check the condition before the timeout. + */ +#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ + const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ + long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ + int ret__; \ + might_sleep(); \ + for (;;) { \ + const bool expired__ = ktime_after(ktime_get_raw(), end__); \ + OP; \ + /* Guarantee COND check prior to timeout */ \ + barrier(); \ + if (COND) { \ + ret__ = 0; \ + break; \ + } \ + if (expired__) { \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + usleep_range(wait__, wait__ * 2); \ + if (wait__ < (Wmax)) \ + wait__ <<= 1; \ + } \ + ret__; \ +}) + +#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ + (Wmax)) +#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) + +/* + * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. + * On PREEMPT_RT the context isn't becoming atomic because it is used in an + * interrupt handler or because a spinlock_t is acquired. This leads to + * warnings which don't occur otherwise and therefore the check is disabled. + */ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT) +# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) +#else +# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) +#endif + +#define _wait_for_atomic(COND, US, ATOMIC) \ +({ \ + int cpu, ret, timeout = (US) * 1000; \ + u64 base; \ + _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ + if (!(ATOMIC)) { \ + preempt_disable(); \ + cpu = smp_processor_id(); \ + } \ + base = local_clock(); \ + for (;;) { \ + u64 now = local_clock(); \ + if (!(ATOMIC)) \ + preempt_enable(); \ + /* Guarantee COND check prior to timeout */ \ + barrier(); \ + if (COND) { \ + ret = 0; \ + break; \ + } \ + if (now - base >= timeout) { \ + ret = -ETIMEDOUT; \ + break; \ + } \ + cpu_relax(); \ + if (!(ATOMIC)) { \ + preempt_disable(); \ + if (unlikely(cpu != smp_processor_id())) { \ + timeout -= now - base; \ + cpu = smp_processor_id(); \ + base = local_clock(); \ + } \ + } \ + } \ + ret; \ +}) + +#define wait_for_us(COND, US) \ +({ \ + int ret__; \ + BUILD_BUG_ON(!__builtin_constant_p(US)); \ + if ((US) > 10) \ + ret__ = _wait_for((COND), (US), 10, 10); \ + else \ + ret__ = _wait_for_atomic((COND), (US), 0); \ + ret__; \ +}) + +#define wait_for_atomic_us(COND, US) \ +({ \ + BUILD_BUG_ON(!__builtin_constant_p(US)); \ + BUILD_BUG_ON((US) > 50000); \ + _wait_for_atomic((COND), (US), 1); \ +}) + +#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000) + +#endif /* __I915_WAIT_UTIL_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c index 81da75108c60..55ffedad2490 100644 --- a/drivers/gpu/drm/i915/intel_pcode.c +++ b/drivers/gpu/drm/i915/intel_pcode.c @@ -5,6 +5,7 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "i915_wait_util.h" #include "intel_pcode.h" static int gen6_check_mailbox_status(u32 mbox) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4ccba7c8ffb3..8cb59f8d1f4c 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -21,19 +21,20 @@ * IN THE SOFTWARE. */ -#include <drm/drm_managed.h> #include <linux/pm_runtime.h> -#include "display/intel_display_core.h" +#include <drm/drm_managed.h> -#include "gt/intel_gt.h" +#include "display/intel_display_core.h" #include "gt/intel_engine_regs.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_regs.h" #include "i915_drv.h" #include "i915_iosf_mbi.h" #include "i915_reg.h" #include "i915_vgpu.h" +#include "i915_wait_util.h" #include "intel_uncore_trace.h" #define FORCEWAKE_ACK_TIMEOUT_MS 50 diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index f8da693ad3ce..27d545c4e6a5 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -2,15 +2,15 @@ /* * Copyright(c) 2020 Intel Corporation. */ + #include <linux/workqueue.h> #include "gem/i915_gem_context.h" - #include "gt/intel_context.h" #include "gt/intel_gt.h" #include "i915_drv.h" - +#include "i915_wait_util.h" #include "intel_pxp.h" #include "intel_pxp_gsccs.h" #include "intel_pxp_irq.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 2fb7a9e7efec..48cd617247d1 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -22,14 +22,13 @@ * */ -#include <linux/prime_numbers.h> #include <linux/pm_qos.h> +#include <linux/prime_numbers.h> #include <linux/sort.h> #include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gem/selftests/mock_context.h" - #include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_pm.h" #include "gt/intel_engine_user.h" @@ -40,11 +39,11 @@ #include "i915_random.h" #include "i915_selftest.h" +#include "i915_wait_util.h" #include "igt_flush_test.h" #include "igt_live_test.h" #include "igt_spinner.h" #include "lib_sw_fence.h" - #include "mock_drm.h" #include "mock_gem_device.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 889281819c5b..9c276c9d0a75 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -31,7 +31,7 @@ #include "i915_driver.h" #include "i915_drv.h" #include "i915_selftest.h" - +#include "i915_wait_util.h" #include "igt_flush_test.h" struct i915_selftest i915_selftest __read_mostly = { diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 8c3e1f20e5a1..820364171ebe 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -3,12 +3,13 @@ * * Copyright © 2018 Intel Corporation */ -#include "gt/intel_gpu_commands.h" -#include "gt/intel_gt.h" #include "gem/i915_gem_internal.h" #include "gem/selftests/igt_gem_utils.h" +#include "gt/intel_gpu_commands.h" +#include "gt/intel_gt.h" +#include "i915_wait_util.h" #include "igt_spinner.h" int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index 149527827624..edffaed8f9a7 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -732,7 +732,7 @@ int intel_dram_detect(struct drm_i915_private *i915) struct dram_info *dram_info; int ret; - if (IS_DG2(i915) || !HAS_DISPLAY(display)) + if (IS_DG2(i915) || !intel_display_device_present(display)) return 0; dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c index fc9f311ea1db..221e4c0b2c58 100644 --- a/drivers/gpu/drm/i915/vlv_suspend.c +++ b/drivers/gpu/drm/i915/vlv_suspend.c @@ -8,16 +8,17 @@ #include <drm/drm_print.h> +#include "gt/intel_gt_regs.h" + #include "i915_drv.h" #include "i915_reg.h" #include "i915_trace.h" #include "i915_utils.h" +#include "i915_wait_util.h" #include "intel_clock_gating.h" #include "intel_uncore_trace.h" #include "vlv_suspend.h" -#include "gt/intel_gt_regs.h" - struct vlv_s0ix_state { /* GAM */ u32 wr_watermark; diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c index c5629e155d25..63f23b821b0b 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c @@ -368,17 +368,20 @@ static unsigned long clk_tve_di_recalc_rate(struct clk_hw *hw, return 0; } -static long clk_tve_di_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int clk_tve_di_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { unsigned long div; - div = *prate / rate; + div = req->best_parent_rate / req->rate; if (div >= 4) - return *prate / 4; + req->rate = req->best_parent_rate / 4; else if (div >= 2) - return *prate / 2; - return *prate; + req->rate = req->best_parent_rate / 2; + else + req->rate = req->best_parent_rate; + + return 0; } static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate, @@ -409,7 +412,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate, } static const struct clk_ops clk_tve_di_ops = { - .round_rate = clk_tve_di_round_rate, + .determine_rate = clk_tve_di_determine_rate, .set_rate = clk_tve_di_set_rate, .recalc_rate = clk_tve_di_recalc_rate, }; diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 6d8325c76697..dfdeb926fe9c 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -134,10 +134,10 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); struct drm_display_info *di = &conn_state->connector->display_info; struct drm_bridge_state *next_bridge_state = NULL; - struct drm_bridge *next_bridge; u32 bus_flags, bus_fmt; - next_bridge = drm_bridge_get_next_bridge(bridge); + struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge); + if (next_bridge) next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, next_bridge); diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c index a720d8f53209..22d0eced95da 100644 --- a/drivers/gpu/drm/loongson/lsdc_gem.c +++ b/drivers/gpu/drm/loongson/lsdc_gem.c @@ -57,7 +57,7 @@ static void lsdc_gem_object_free(struct drm_gem_object *obj) struct ttm_buffer_object *tbo = to_ttm_bo(obj); if (tbo) - ttm_bo_put(tbo); + ttm_bo_fini(tbo); } static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map) diff --git a/drivers/gpu/drm/mcde/mcde_clk_div.c b/drivers/gpu/drm/mcde/mcde_clk_div.c index 3056ac566473..8c5af2677357 100644 --- a/drivers/gpu/drm/mcde/mcde_clk_div.c +++ b/drivers/gpu/drm/mcde/mcde_clk_div.c @@ -71,12 +71,15 @@ static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, return best_div; } -static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int mcde_clk_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - int div = mcde_clk_div_choose_div(hw, rate, prate, true); + int div = mcde_clk_div_choose_div(hw, req->rate, + &req->best_parent_rate, true); - return DIV_ROUND_UP_ULL(*prate, div); + req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div); + + return 0; } static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw, @@ -132,7 +135,7 @@ static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops mcde_clk_div_ops = { .enable = mcde_clk_div_enable, .recalc_rate = mcde_clk_div_recalc_rate, - .round_rate = mcde_clk_div_round_rate, + .determine_rate = mcde_clk_div_determine_rate, .set_rate = mcde_clk_div_set_rate, }; diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index c88776d1e784..3b5757aed9c8 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -28,6 +28,7 @@ config DRM_NOUVEAU select THERMAL if ACPI && X86 select ACPI_VIDEO if ACPI && X86 select SND_HDA_COMPONENT if SND_HDA_CORE + select PM_DEVFREQ if ARCH_TEGRA help Choose this option for open-source NVIDIA support. diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h index 22f74fc88cd7..57bc542780bb 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h @@ -9,6 +9,8 @@ struct nvkm_device_tegra { struct nvkm_device device; struct platform_device *pdev; + void __iomem *regs; + struct reset_control *rst; struct clk *clk; struct clk *clk_ref; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h index d5d8877064a7..6a09d397c651 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h @@ -134,4 +134,5 @@ int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int gp10b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index d59fd12268b9..6c26beeb427f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -57,7 +57,7 @@ nouveau_bo(struct ttm_buffer_object *bo) static inline void nouveau_bo_fini(struct nouveau_bo *bo) { - ttm_bo_put(&bo->bo); + ttm_bo_fini(&bo->bo); } extern struct ttm_device_funcs nouveau_bo_driver; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 690e10fbf0bd..395d92ab6271 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -87,7 +87,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem) return; } - ttm_bo_put(&nvbo->bo); + ttm_bo_fini(&nvbo->bo); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index 8d5853deeee4..9fd351273236 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -21,6 +21,8 @@ */ #include "nouveau_platform.h" +#include <nvkm/subdev/clk/gk20a_devfreq.h> + static int nouveau_platform_probe(struct platform_device *pdev) { const struct nvkm_device_tegra_func *func; @@ -40,6 +42,21 @@ static void nouveau_platform_remove(struct platform_device *pdev) nouveau_drm_device_remove(drm); } +#ifdef CONFIG_PM_SLEEP +static int nouveau_platform_suspend(struct device *dev) +{ + return gk20a_devfreq_suspend(dev); +} + +static int nouveau_platform_resume(struct device *dev) +{ + return gk20a_devfreq_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(nouveau_pm_ops, nouveau_platform_suspend, + nouveau_platform_resume); +#endif + #if IS_ENABLED(CONFIG_OF) static const struct nvkm_device_tegra_func gk20a_platform_data = { .iommu_bit = 34, @@ -81,6 +98,9 @@ struct platform_driver nouveau_platform_driver = { .driver = { .name = "nouveau", .of_match_table = of_match_ptr(nouveau_platform_match), +#ifdef CONFIG_PM_SLEEP + .pm = &nouveau_pm_ops, +#endif }, .probe = nouveau_platform_probe, .remove = nouveau_platform_remove, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 3375a59ebf1a..2517b65d8faa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2280,6 +2280,7 @@ nv13b_chipset = { .acr = { 0x00000001, gp10b_acr_new }, .bar = { 0x00000001, gm20b_bar_new }, .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gp10b_clk_new }, .fault = { 0x00000001, gp10b_fault_new }, .fb = { 0x00000001, gp10b_fb_new }, .fuse = { 0x00000001, gm107_fuse_new }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 114e50ca1827..03aa6f09ec89 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -259,6 +259,10 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, tdev->func = func; tdev->pdev = pdev; + tdev->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(tdev->regs)) + return PTR_ERR(tdev->regs); + if (func->require_vdd) { tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); if (IS_ERR(tdev->vdd)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild index dcecd499d8df..be8f3283ee16 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild @@ -10,6 +10,8 @@ nvkm-y += nvkm/subdev/clk/gf100.o nvkm-y += nvkm/subdev/clk/gk104.o nvkm-y += nvkm/subdev/clk/gk20a.o nvkm-y += nvkm/subdev/clk/gm20b.o +nvkm-y += nvkm/subdev/clk/gp10b.o +nvkm-$(CONFIG_PM_DEVFREQ) += nvkm/subdev/clk/gk20a_devfreq.o nvkm-y += nvkm/subdev/clk/pllnv04.o nvkm-y += nvkm/subdev/clk/pllgt215.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c index d573fb0917fc..65f5d0f1f3bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c @@ -23,6 +23,7 @@ * */ #include "priv.h" +#include "gk20a_devfreq.h" #include "gk20a.h" #include <core/tegra.h> @@ -589,6 +590,10 @@ gk20a_clk_init(struct nvkm_clk *base) return ret; } + ret = gk20a_devfreq_init(base, &clk->devfreq); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h index 286413ff4a9e..ea5b0bab4cce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h @@ -118,6 +118,7 @@ struct gk20a_clk { const struct gk20a_clk_pllg_params *params; struct gk20a_pll pll; u32 parent_rate; + struct gk20a_devfreq *devfreq; u32 (*div_to_pl)(u32); u32 (*pl_to_div)(u32); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c new file mode 100644 index 000000000000..41003cbcdbfa --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: MIT +#include <linux/clk.h> +#include <linux/math64.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> + +#include <drm/drm_managed.h> + +#include <subdev/clk.h> + +#include "nouveau_drv.h" +#include "nouveau_chan.h" +#include "priv.h" +#include "gk20a_devfreq.h" +#include "gk20a.h" +#include "gp10b.h" + +#define PMU_BUSY_CYCLES_NORM_MAX 1000U + +#define PWR_PMU_IDLE_COUNTER_TOTAL 0U +#define PWR_PMU_IDLE_COUNTER_BUSY 4U + +#define PWR_PMU_IDLE_COUNT_REG_OFFSET 0x0010A508U +#define PWR_PMU_IDLE_COUNT_REG_SIZE 16U +#define PWR_PMU_IDLE_COUNT_MASK 0x7FFFFFFFU +#define PWR_PMU_IDLE_COUNT_RESET_VALUE (0x1U << 31U) + +#define PWR_PMU_IDLE_INTR_REG_OFFSET 0x0010A9E8U +#define PWR_PMU_IDLE_INTR_ENABLE_VALUE 0U + +#define PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET 0x0010A9ECU +#define PWR_PMU_IDLE_INTR_STATUS_MASK 0x00000001U +#define PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE 0x1U + +#define PWR_PMU_IDLE_THRESHOLD_REG_OFFSET 0x0010A8A0U +#define PWR_PMU_IDLE_THRESHOLD_REG_SIZE 4U +#define PWR_PMU_IDLE_THRESHOLD_MAX_VALUE 0x7FFFFFFFU + +#define PWR_PMU_IDLE_CTRL_REG_OFFSET 0x0010A50CU +#define PWR_PMU_IDLE_CTRL_REG_SIZE 16U +#define PWR_PMU_IDLE_CTRL_VALUE_MASK 0x3U +#define PWR_PMU_IDLE_CTRL_VALUE_BUSY 0x2U +#define PWR_PMU_IDLE_CTRL_VALUE_ALWAYS 0x3U +#define PWR_PMU_IDLE_CTRL_FILTER_MASK (0x1U << 2) +#define PWR_PMU_IDLE_CTRL_FILTER_DISABLED 0x0U + +#define PWR_PMU_IDLE_MASK_REG_OFFSET 0x0010A504U +#define PWR_PMU_IDLE_MASK_REG_SIZE 16U +#define PWM_PMU_IDLE_MASK_GR_ENABLED 0x1U +#define PWM_PMU_IDLE_MASK_CE_2_ENABLED 0x200000U + +/** + * struct gk20a_devfreq - Device frequency management + */ +struct gk20a_devfreq { + /** @devfreq: devfreq device. */ + struct devfreq *devfreq; + + /** @regs: Device registers. */ + void __iomem *regs; + + /** @gov_data: Governor data. */ + struct devfreq_simple_ondemand_data gov_data; + + /** @busy_time: Busy time. */ + ktime_t busy_time; + + /** @total_time: Total time. */ + ktime_t total_time; + + /** @time_last_update: Last update time. */ + ktime_t time_last_update; +}; + +static struct gk20a_devfreq *dev_to_gk20a_devfreq(struct device *dev) +{ + struct nouveau_drm *drm = dev_get_drvdata(dev); + struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); + struct nvkm_clk *base = nvkm_clk(subdev); + + switch (drm->nvkm->chipset) { + case 0x13b: return gp10b_clk(base)->devfreq; break; + default: return gk20a_clk(base)->devfreq; break; + } +} + +static void gk20a_pmu_init_perfmon_counter(struct gk20a_devfreq *gdevfreq) +{ + u32 data; + + // Set pmu idle intr status bit on total counter overflow + writel(PWR_PMU_IDLE_INTR_ENABLE_VALUE, + gdevfreq->regs + PWR_PMU_IDLE_INTR_REG_OFFSET); + + writel(PWR_PMU_IDLE_THRESHOLD_MAX_VALUE, + gdevfreq->regs + PWR_PMU_IDLE_THRESHOLD_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_THRESHOLD_REG_SIZE)); + + // Setup counter for total cycles + data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE)); + data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK); + data |= PWR_PMU_IDLE_CTRL_VALUE_ALWAYS | PWR_PMU_IDLE_CTRL_FILTER_DISABLED; + writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE)); + + // Setup counter for busy cycles + writel(PWM_PMU_IDLE_MASK_GR_ENABLED | PWM_PMU_IDLE_MASK_CE_2_ENABLED, + gdevfreq->regs + PWR_PMU_IDLE_MASK_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_MASK_REG_SIZE)); + + data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE)); + data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK); + data |= PWR_PMU_IDLE_CTRL_VALUE_BUSY | PWR_PMU_IDLE_CTRL_FILTER_DISABLED; + writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET + + (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE)); +} + +static u32 gk20a_pmu_read_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id) +{ + u32 ret; + + ret = readl(gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET + + (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE)); + + return ret & PWR_PMU_IDLE_COUNT_MASK; +} + +static void gk20a_pmu_reset_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id) +{ + writel(PWR_PMU_IDLE_COUNT_RESET_VALUE, gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET + + (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE)); +} + +static u32 gk20a_pmu_read_idle_intr_status(struct gk20a_devfreq *gdevfreq) +{ + u32 ret; + + ret = readl(gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET); + + return ret & PWR_PMU_IDLE_INTR_STATUS_MASK; +} + +static void gk20a_pmu_clear_idle_intr_status(struct gk20a_devfreq *gdevfreq) +{ + writel(PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE, + gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET); +} + +static void gk20a_devfreq_update_utilization(struct gk20a_devfreq *gdevfreq) +{ + ktime_t now, last; + u64 busy_cycles, total_cycles; + u32 norm, intr_status; + + now = ktime_get(); + last = gdevfreq->time_last_update; + gdevfreq->total_time = ktime_us_delta(now, last); + + busy_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); + total_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); + intr_status = gk20a_pmu_read_idle_intr_status(gdevfreq); + + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); + + if (intr_status != 0UL) { + norm = PMU_BUSY_CYCLES_NORM_MAX; + gk20a_pmu_clear_idle_intr_status(gdevfreq); + } else if (total_cycles == 0ULL || busy_cycles > total_cycles) { + norm = PMU_BUSY_CYCLES_NORM_MAX; + } else { + norm = (u32)div64_u64(busy_cycles * PMU_BUSY_CYCLES_NORM_MAX, + total_cycles); + } + + gdevfreq->busy_time = div_u64(gdevfreq->total_time * norm, PMU_BUSY_CYCLES_NORM_MAX); + gdevfreq->time_last_update = now; +} + +static int gk20a_devfreq_target(struct device *dev, unsigned long *freq, + u32 flags) +{ + struct nouveau_drm *drm = dev_get_drvdata(dev); + struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); + struct nvkm_clk *base = nvkm_clk(subdev); + struct nvkm_pstate *pstates = base->func->pstates; + int nr_pstates = base->func->nr_pstates; + int i, ret; + + for (i = 0; i < nr_pstates - 1; i++) + if (pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV >= *freq) + break; + + ret = nvkm_clk_ustate(base, pstates[i].pstate, 0); + ret |= nvkm_clk_ustate(base, pstates[i].pstate, 1); + if (ret) { + nvkm_error(subdev, "cannot update clock\n"); + return ret; + } + + *freq = pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV; + + return 0; +} + +static int gk20a_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct nouveau_drm *drm = dev_get_drvdata(dev); + struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0); + struct nvkm_clk *base = nvkm_clk(subdev); + + *freq = nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV; + + return 0; +} + +static void gk20a_devfreq_reset(struct gk20a_devfreq *gdevfreq) +{ + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY); + gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL); + gk20a_pmu_clear_idle_intr_status(gdevfreq); + + gdevfreq->busy_time = 0; + gdevfreq->total_time = 0; + gdevfreq->time_last_update = ktime_get(); +} + +static int gk20a_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *status) +{ + struct nouveau_drm *drm = dev_get_drvdata(dev); + struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); + + gk20a_devfreq_get_cur_freq(dev, &status->current_frequency); + + gk20a_devfreq_update_utilization(gdevfreq); + + status->busy_time = ktime_to_ns(gdevfreq->busy_time); + status->total_time = ktime_to_ns(gdevfreq->total_time); + + gk20a_devfreq_reset(gdevfreq); + + NV_DEBUG(drm, "busy %lu total %lu %lu %% freq %lu MHz\n", + status->busy_time, status->total_time, + status->busy_time / (status->total_time / 100), + status->current_frequency / 1000 / 1000); + + return 0; +} + +static struct devfreq_dev_profile gk20a_devfreq_profile = { + .timer = DEVFREQ_TIMER_DELAYED, + .polling_ms = 50, + .target = gk20a_devfreq_target, + .get_cur_freq = gk20a_devfreq_get_cur_freq, + .get_dev_status = gk20a_devfreq_get_dev_status, +}; + +int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **gdevfreq) +{ + struct nvkm_device *device = base->subdev.device; + struct nouveau_drm *drm = dev_get_drvdata(device->dev); + struct nvkm_device_tegra *tdev = device->func->tegra(device); + struct nvkm_pstate *pstates = base->func->pstates; + int nr_pstates = base->func->nr_pstates; + struct gk20a_devfreq *new_gdevfreq; + int i; + + new_gdevfreq = drmm_kzalloc(drm->dev, sizeof(struct gk20a_devfreq), GFP_KERNEL); + if (!new_gdevfreq) + return -ENOMEM; + + new_gdevfreq->regs = tdev->regs; + + for (i = 0; i < nr_pstates; i++) + dev_pm_opp_add(base->subdev.device->dev, + pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV, 0); + + gk20a_pmu_init_perfmon_counter(new_gdevfreq); + gk20a_devfreq_reset(new_gdevfreq); + + gk20a_devfreq_profile.initial_freq = + nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV; + + new_gdevfreq->gov_data.upthreshold = 45; + new_gdevfreq->gov_data.downdifferential = 5; + + new_gdevfreq->devfreq = devm_devfreq_add_device(device->dev, + &gk20a_devfreq_profile, + DEVFREQ_GOV_SIMPLE_ONDEMAND, + &new_gdevfreq->gov_data); + if (IS_ERR(new_gdevfreq->devfreq)) + return PTR_ERR(new_gdevfreq->devfreq); + + *gdevfreq = new_gdevfreq; + + return 0; +} + +int gk20a_devfreq_resume(struct device *dev) +{ + struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); + + if (!gdevfreq || !gdevfreq->devfreq) + return 0; + + return devfreq_resume_device(gdevfreq->devfreq); +} + +int gk20a_devfreq_suspend(struct device *dev) +{ + struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev); + + if (!gdevfreq || !gdevfreq->devfreq) + return 0; + + return devfreq_suspend_device(gdevfreq->devfreq); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h new file mode 100644 index 000000000000..5b7ca8a7a5cd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __GK20A_DEVFREQ_H__ +#define __GK20A_DEVFREQ_H__ + +#include <linux/devfreq.h> + +struct gk20a_devfreq; + +#if defined(CONFIG_PM_DEVFREQ) +int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq); + +int gk20a_devfreq_resume(struct device *dev); +int gk20a_devfreq_suspend(struct device *dev); +#else +static inline int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq) +{ + return 0; +} + +static inline int gk20a_devfreq_resume(struct device dev) { return 0; } +static inline int gk20a_devfreq_suspend(struct device *dev) { return 0; } +#endif /* CONFIG_PM_DEVFREQ */ + +#endif /* __GK20A_DEVFREQ_H__ */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c index 7c33542f651b..fa8ca53acbd1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c @@ -27,6 +27,7 @@ #include <core/tegra.h> #include "priv.h" +#include "gk20a_devfreq.h" #include "gk20a.h" #define GPCPLL_CFG_SYNC_MODE BIT(2) @@ -869,6 +870,10 @@ gm20b_clk_init(struct nvkm_clk *base) return ret; } + ret = gk20a_devfreq_init(base, &clk->devfreq); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c new file mode 100644 index 000000000000..492b62c0ee96 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: MIT +#include <subdev/clk.h> +#include <subdev/timer.h> +#include <core/device.h> +#include <core/tegra.h> + +#include "priv.h" +#include "gk20a_devfreq.h" +#include "gk20a.h" +#include "gp10b.h" + +static int +gp10b_clk_init(struct nvkm_clk *base) +{ + struct gp10b_clk *clk = gp10b_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + int ret; + + /* Start with the highest frequency, matching the BPMP default */ + base->func->calc(base, &base->func->pstates[base->func->nr_pstates - 1].base); + ret = base->func->prog(base); + if (ret) { + nvkm_error(subdev, "cannot initialize clock\n"); + return ret; + } + + ret = gk20a_devfreq_init(base, &clk->devfreq); + if (ret) + return ret; + + return 0; +} + +static int +gp10b_clk_read(struct nvkm_clk *base, enum nv_clk_src src) +{ + struct gp10b_clk *clk = gp10b_clk(base); + struct nvkm_subdev *subdev = &clk->base.subdev; + + switch (src) { + case nv_clk_src_gpc: + return clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV; + default: + nvkm_error(subdev, "invalid clock source %d\n", src); + return -EINVAL; + } +} + +static int +gp10b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct gp10b_clk *clk = gp10b_clk(base); + u32 target_rate = cstate->domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV; + + clk->new_rate = clk_round_rate(clk->clk, target_rate) / GK20A_CLK_GPC_MDIV; + + return 0; +} + +static int +gp10b_clk_prog(struct nvkm_clk *base) +{ + struct gp10b_clk *clk = gp10b_clk(base); + int ret; + + ret = clk_set_rate(clk->clk, clk->new_rate * GK20A_CLK_GPC_MDIV); + if (ret < 0) + return ret; + + clk->rate = clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV; + + return 0; +} + +static struct nvkm_pstate +gp10b_pstates[] = { + { + .base = { + .domain[nv_clk_src_gpc] = 114750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 216750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 318750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 420750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 522750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 624750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 726750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 828750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 930750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 1032750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 1134750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 1236750, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 1300500, + }, + }, +}; + +static const struct nvkm_clk_func +gp10b_clk = { + .init = gp10b_clk_init, + .read = gp10b_clk_read, + .calc = gp10b_clk_calc, + .prog = gp10b_clk_prog, + .tidy = gk20a_clk_tidy, + .pstates = gp10b_pstates, + .nr_pstates = ARRAY_SIZE(gp10b_pstates), + .domains = { + { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, + { nv_clk_src_max } + } +}; + +int +gp10b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) +{ + struct nvkm_device_tegra *tdev = device->func->tegra(device); + const struct nvkm_clk_func *func = &gp10b_clk; + struct gp10b_clk *clk; + int ret, i; + + clk = kzalloc(sizeof(*clk), GFP_KERNEL); + if (!clk) + return -ENOMEM; + *pclk = &clk->base; + clk->clk = tdev->clk; + + /* Finish initializing the pstates */ + for (i = 0; i < func->nr_pstates; i++) { + INIT_LIST_HEAD(&func->pstates[i].list); + func->pstates[i].pstate = i + 1; + } + + ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h new file mode 100644 index 000000000000..178e3bcdbbf7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_CLK_GP10B_H__ +#define __NVKM_CLK_GP10B_H__ + +struct gp10b_clk { + /* currently applied parameters */ + struct nvkm_clk base; + struct gk20a_devfreq *devfreq; + struct clk *clk; + u32 rate; + + /* new parameters to apply */ + u32 new_rate; +}; + +#define gp10b_clk(p) container_of((p), struct gp10b_clk, base) + +#endif diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs index b28b2e05cc15..91b7380f83ab 100644 --- a/drivers/gpu/drm/nova/driver.rs +++ b/drivers/gpu/drm/nova/driver.rs @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -use kernel::{auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, types::ARef}; +use kernel::{ + auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, sync::aref::ARef, +}; use crate::file::File; use crate::gem::NovaObject; diff --git a/drivers/gpu/drm/nova/gem.rs b/drivers/gpu/drm/nova/gem.rs index 33b62d21400c..2760ba4f3450 100644 --- a/drivers/gpu/drm/nova/gem.rs +++ b/drivers/gpu/drm/nova/gem.rs @@ -4,7 +4,7 @@ use kernel::{ drm, drm::{gem, gem::BaseObject}, prelude::*, - types::ARef, + sync::aref::ARef, }; use crate::{ @@ -16,16 +16,14 @@ use crate::{ #[pin_data] pub(crate) struct NovaObject {} -impl gem::BaseDriverObject<gem::Object<NovaObject>> for NovaObject { +impl gem::DriverObject for NovaObject { + type Driver = NovaDriver; + fn new(_dev: &NovaDevice, _size: usize) -> impl PinInit<Self, Error> { try_pin_init!(NovaObject {}) } } -impl gem::DriverObject for NovaObject { - type Driver = NovaDriver; -} - impl NovaObject { /// Create a new DRM GEM object. pub(crate) fn new(dev: &NovaDevice, size: usize) -> Result<ARef<gem::Object<Self>>> { diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 4dd05bc732da..195715b162e3 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -77,7 +77,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, struct omap_dss_device *output = omap_encoder->output; struct drm_device *dev = encoder->dev; struct drm_connector *connector; - struct drm_bridge *bridge; struct videomode vm = { 0 }; u32 bus_flags; @@ -97,8 +96,7 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, * * A better solution is to use DRM's bus-flags through the whole driver. */ - for (bridge = output->bridge; bridge; - bridge = drm_bridge_get_next_bridge(bridge)) { + drm_for_each_bridge_in_chain_from(output->bridge, bridge) { if (!bridge->timings) continue; diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 62435e3cd9f4..ad6b03b59b52 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1909,6 +1909,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0xb7a9, &delay_200_500_e50, "B140HAK03.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xcdba, &delay_200_500_e50, "B140UAX01.2"), @@ -1974,6 +1975,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf2, &delay_200_500_e200, "NV156FHM-N4S"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"), @@ -2007,10 +2009,12 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x1441, &delay_200_500_e80_d50, "N140JCA-ELK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x148f, &delay_200_500_e80, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1565, &delay_200_500_e80, "N156HCA-EAB"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x7402, &delay_200_500_e200_d50, "N116BCA-EAK"), @@ -2022,10 +2026,12 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x143f, &delay_200_500_e50, "MNE007QS3-6"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x144b, &delay_200_500_e80, "MNE001BS1-4"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x146e, &delay_80_500_e50_d50, "MNE007QB3-1"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x1519, &delay_200_500_e80_d50, "MNF601BS1-3"), EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"), @@ -2046,6 +2052,8 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('K', 'D', 'B', 0x1212, &delay_200_500_e50, "KD116N0930A16"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x1707, &delay_200_150_e50, "KD116N2130B12"), + EDP_PANEL_ENTRY('K', 'D', 'C', 0x0110, &delay_200_500_e50, "KD116N3730A07"), + EDP_PANEL_ENTRY('K', 'D', 'C', 0x0397, &delay_200_500_e50, "KD116N3730A12"), EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"), EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"), EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"), diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 077525a3ad68..1e73efad02a8 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -10,11 +10,13 @@ #include <linux/pm.h> #include <linux/regulator/consumer.h> #include <linux/spinlock.h> +#include <drm/drm_auth.h> #include <drm/drm_device.h> #include <drm/drm_mm.h> #include <drm/gpu_scheduler.h> #include "panfrost_devfreq.h" +#include "panfrost_job.h" struct panfrost_device; struct panfrost_mmu; @@ -22,7 +24,6 @@ struct panfrost_job_slot; struct panfrost_job; struct panfrost_perfcnt; -#define NUM_JOB_SLOTS 3 #define MAX_PM_DOMAINS 5 enum panfrost_drv_comp_bits { @@ -206,13 +207,19 @@ struct panfrost_engine_usage { struct panfrost_file_priv { struct panfrost_device *pfdev; - struct drm_sched_entity sched_entity[NUM_JOB_SLOTS]; + struct xarray jm_ctxs; struct panfrost_mmu *mmu; struct panfrost_engine_usage engine_usage; }; +static inline bool panfrost_high_prio_allowed(struct drm_file *file) +{ + /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */ + return (capable(CAP_SYS_NICE) || drm_is_current_master(file)); +} + static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev) { return ddev->dev_private; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 1ea6c509a5d5..22350ce8a08f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -109,6 +109,14 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct #endif break; + case DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES: + param->value = BIT(PANFROST_JM_CTX_PRIORITY_LOW) | + BIT(PANFROST_JM_CTX_PRIORITY_MEDIUM); + + if (panfrost_high_prio_allowed(file)) + param->value |= BIT(PANFROST_JM_CTX_PRIORITY_HIGH); + break; + default: return -EINVAL; } @@ -279,9 +287,13 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, struct panfrost_file_priv *file_priv = file->driver_priv; struct drm_panfrost_submit *args = data; struct drm_syncobj *sync_out = NULL; + struct panfrost_jm_ctx *jm_ctx; struct panfrost_job *job; int ret = 0, slot; + if (args->pad) + return -EINVAL; + if (!args->jc) return -EINVAL; @@ -294,10 +306,16 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, return -ENODEV; } + jm_ctx = panfrost_jm_ctx_from_handle(file, args->jm_ctx_handle); + if (!jm_ctx) { + ret = -EINVAL; + goto out_put_syncout; + } + job = kzalloc(sizeof(*job), GFP_KERNEL); if (!job) { ret = -ENOMEM; - goto out_put_syncout; + goto out_put_jm_ctx; } kref_init(&job->refcount); @@ -307,12 +325,13 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, job->requirements = args->requirements; job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); job->mmu = file_priv->mmu; + job->ctx = panfrost_jm_ctx_get(jm_ctx); job->engine_usage = &file_priv->engine_usage; slot = panfrost_job_get_slot(job); ret = drm_sched_job_init(&job->base, - &file_priv->sched_entity[slot], + &jm_ctx->slot_entity[slot], 1, NULL, file->client_id); if (ret) goto out_put_job; @@ -338,6 +357,8 @@ out_cleanup_job: drm_sched_job_cleanup(&job->base); out_put_job: panfrost_job_put(job); +out_put_jm_ctx: + panfrost_jm_ctx_put(jm_ctx); out_put_syncout: if (sync_out) drm_syncobj_put(sync_out); @@ -536,6 +557,27 @@ err_put_obj: return ret; } +static int panfrost_ioctl_jm_ctx_create(struct drm_device *dev, void *data, + struct drm_file *file) +{ + return panfrost_jm_ctx_create(file, data); +} + +static int panfrost_ioctl_jm_ctx_destroy(struct drm_device *dev, void *data, + struct drm_file *file) +{ + const struct drm_panfrost_jm_ctx_destroy *args = data; + + if (args->pad) + return -EINVAL; + + /* We can't destroy the default context created when the file is opened. */ + if (!args->handle) + return -EINVAL; + + return panfrost_jm_ctx_destroy(file, args->handle); +} + int panfrost_unstable_ioctl_check(void) { if (!unstable_ioctls) @@ -564,7 +606,7 @@ panfrost_open(struct drm_device *dev, struct drm_file *file) goto err_free; } - ret = panfrost_job_open(panfrost_priv); + ret = panfrost_job_open(file); if (ret) goto err_job; @@ -583,7 +625,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file) struct panfrost_file_priv *panfrost_priv = file->driver_priv; panfrost_perfcnt_close(file); - panfrost_job_close(panfrost_priv); + panfrost_job_close(file); panfrost_mmu_ctx_put(panfrost_priv->mmu); kfree(panfrost_priv); @@ -603,6 +645,8 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW), + PANFROST_IOCTL(JM_CTX_CREATE, jm_ctx_create, DRM_RENDER_ALLOW), + PANFROST_IOCTL(JM_CTX_DESTROY, jm_ctx_destroy, DRM_RENDER_ALLOW), }; static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, @@ -672,6 +716,47 @@ static int panthor_gems_show(struct seq_file *m, void *data) return 0; } +static void show_panfrost_jm_ctx(struct panfrost_jm_ctx *jm_ctx, u32 handle, + struct seq_file *m) +{ + struct drm_device *ddev = ((struct drm_info_node *)m->private)->minor->dev; + const char *prio = "UNKNOWN"; + + static const char * const prios[] = { + [DRM_SCHED_PRIORITY_HIGH] = "HIGH", + [DRM_SCHED_PRIORITY_NORMAL] = "NORMAL", + [DRM_SCHED_PRIORITY_LOW] = "LOW", + }; + + if (jm_ctx->slot_entity[0].priority != + jm_ctx->slot_entity[1].priority) + drm_warn(ddev, "Slot priorities should be the same in a single context"); + + if (jm_ctx->slot_entity[0].priority < ARRAY_SIZE(prios)) + prio = prios[jm_ctx->slot_entity[0].priority]; + + seq_printf(m, " JM context %u: priority %s\n", handle, prio); +} + +static int show_file_jm_ctxs(struct panfrost_file_priv *pfile, + struct seq_file *m) +{ + struct panfrost_jm_ctx *jm_ctx; + unsigned long i; + + xa_lock(&pfile->jm_ctxs); + xa_for_each(&pfile->jm_ctxs, i, jm_ctx) { + jm_ctx = panfrost_jm_ctx_get(jm_ctx); + xa_unlock(&pfile->jm_ctxs); + show_panfrost_jm_ctx(jm_ctx, i, m); + panfrost_jm_ctx_put(jm_ctx); + xa_lock(&pfile->jm_ctxs); + } + xa_unlock(&pfile->jm_ctxs); + + return 0; +} + static struct drm_info_list panthor_debugfs_list[] = { {"gems", panthor_gems_show, 0, NULL}, }; @@ -685,9 +770,64 @@ static int panthor_gems_debugfs_init(struct drm_minor *minor) return 0; } +static int show_each_file(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *ddev = node->minor->dev; + int (*show)(struct panfrost_file_priv *, struct seq_file *) = + node->info_ent->data; + struct drm_file *file; + int ret; + + ret = mutex_lock_interruptible(&ddev->filelist_mutex); + if (ret) + return ret; + + list_for_each_entry(file, &ddev->filelist, lhead) { + struct task_struct *task; + struct panfrost_file_priv *pfile = file->driver_priv; + struct pid *pid; + + /* + * Although we have a valid reference on file->pid, that does + * not guarantee that the task_struct who called get_pid() is + * still alive (e.g. get_pid(current) => fork() => exit()). + * Therefore, we need to protect this ->comm access using RCU. + */ + rcu_read_lock(); + pid = rcu_dereference(file->pid); + task = pid_task(pid, PIDTYPE_TGID); + seq_printf(m, "client_id %8llu pid %8d command %s:\n", + file->client_id, pid_nr(pid), + task ? task->comm : "<unknown>"); + rcu_read_unlock(); + + ret = show(pfile, m); + if (ret < 0) + break; + + seq_puts(m, "\n"); + } + + mutex_unlock(&ddev->filelist_mutex); + return ret; +} + +static struct drm_info_list panfrost_sched_debugfs_list[] = { + { "sched_ctxs", show_each_file, 0, show_file_jm_ctxs }, +}; + +static void panfrost_sched_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(panfrost_sched_debugfs_list, + ARRAY_SIZE(panfrost_sched_debugfs_list), + minor->debugfs_root, minor); +} + static void panfrost_debugfs_init(struct drm_minor *minor) { panthor_gems_debugfs_init(minor); + panfrost_sched_debugfs_init(minor); } #endif @@ -699,6 +839,8 @@ static void panfrost_debugfs_init(struct drm_minor *minor) * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT * - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries * - 1.4 - adds SET_LABEL_BO + * - 1.5 - adds JM_CTX_{CREATE,DESTROY} ioctls and extend SUBMIT to allow + * context creation with configurable priorities/affinity */ static const struct drm_driver panfrost_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, @@ -711,7 +853,7 @@ static const struct drm_driver panfrost_drm_driver = { .name = "panfrost", .desc = "panfrost DRM", .major = 1, - .minor = 4, + .minor = 5, .gem_create_object = panfrost_gem_create_object, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 82acabb21b27..c47d14eabbae 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -22,6 +22,7 @@ #include "panfrost_mmu.h" #include "panfrost_dump.h" +#define MAX_JM_CTX_PER_FILE 64 #define JOB_TIMEOUT_MS 500 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) @@ -359,6 +360,7 @@ static void panfrost_job_cleanup(struct kref *ref) kvfree(job->bos); } + panfrost_jm_ctx_put(job->ctx); kfree(job); } @@ -383,6 +385,9 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) int slot = panfrost_job_get_slot(job); struct dma_fence *fence = NULL; + if (job->ctx->destroyed) + return ERR_PTR(-ECANCELED); + if (unlikely(job->base.s_fence->finished.error)) return NULL; @@ -917,39 +922,176 @@ void panfrost_job_fini(struct panfrost_device *pfdev) destroy_workqueue(pfdev->reset.wq); } -int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) +int panfrost_job_open(struct drm_file *file) +{ + struct panfrost_file_priv *panfrost_priv = file->driver_priv; + int ret; + + struct drm_panfrost_jm_ctx_create default_jm_ctx = { + .priority = PANFROST_JM_CTX_PRIORITY_MEDIUM, + }; + + xa_init_flags(&panfrost_priv->jm_ctxs, XA_FLAGS_ALLOC); + + ret = panfrost_jm_ctx_create(file, &default_jm_ctx); + if (ret) + return ret; + + /* We expect the default context to be assigned handle 0. */ + if (WARN_ON(default_jm_ctx.handle)) + return -EINVAL; + + return 0; +} + +void panfrost_job_close(struct drm_file *file) +{ + struct panfrost_file_priv *panfrost_priv = file->driver_priv; + struct panfrost_jm_ctx *jm_ctx; + unsigned long i; + + xa_for_each(&panfrost_priv->jm_ctxs, i, jm_ctx) + panfrost_jm_ctx_destroy(file, i); + + xa_destroy(&panfrost_priv->jm_ctxs); +} + +int panfrost_job_is_idle(struct panfrost_device *pfdev) { - struct panfrost_device *pfdev = panfrost_priv->pfdev; struct panfrost_job_slot *js = pfdev->js; - struct drm_gpu_scheduler *sched; - int ret, i; + int i; for (i = 0; i < NUM_JOB_SLOTS; i++) { - sched = &js->queue[i].sched; - ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], - DRM_SCHED_PRIORITY_NORMAL, &sched, - 1, NULL); - if (WARN_ON(ret)) - return ret; + /* If there are any jobs in the HW queue, we're not idle */ + if (atomic_read(&js->queue[i].sched.credit_count)) + return false; + } + + return true; +} + +static void panfrost_jm_ctx_release(struct kref *kref) +{ + struct panfrost_jm_ctx *jm_ctx = container_of(kref, struct panfrost_jm_ctx, refcnt); + + WARN_ON(!jm_ctx->destroyed); + + for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++) + drm_sched_entity_destroy(&jm_ctx->slot_entity[i]); + + kfree(jm_ctx); +} + +void +panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx) +{ + if (jm_ctx) + kref_put(&jm_ctx->refcnt, panfrost_jm_ctx_release); +} + +struct panfrost_jm_ctx * +panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx) +{ + if (jm_ctx) + kref_get(&jm_ctx->refcnt); + + return jm_ctx; +} + +struct panfrost_jm_ctx * +panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle) +{ + struct panfrost_file_priv *priv = file->driver_priv; + struct panfrost_jm_ctx *jm_ctx; + + xa_lock(&priv->jm_ctxs); + jm_ctx = panfrost_jm_ctx_get(xa_load(&priv->jm_ctxs, handle)); + xa_unlock(&priv->jm_ctxs); + + return jm_ctx; +} + +static int jm_ctx_prio_to_drm_sched_prio(struct drm_file *file, + enum drm_panfrost_jm_ctx_priority in, + enum drm_sched_priority *out) +{ + switch (in) { + case PANFROST_JM_CTX_PRIORITY_LOW: + *out = DRM_SCHED_PRIORITY_LOW; + return 0; + case PANFROST_JM_CTX_PRIORITY_MEDIUM: + *out = DRM_SCHED_PRIORITY_NORMAL; + return 0; + case PANFROST_JM_CTX_PRIORITY_HIGH: + if (!panfrost_high_prio_allowed(file)) + return -EACCES; + + *out = DRM_SCHED_PRIORITY_HIGH; + return 0; + default: + return -EINVAL; } +} + +int panfrost_jm_ctx_create(struct drm_file *file, + struct drm_panfrost_jm_ctx_create *args) +{ + struct panfrost_file_priv *priv = file->driver_priv; + struct panfrost_device *pfdev = priv->pfdev; + enum drm_sched_priority sched_prio; + struct panfrost_jm_ctx *jm_ctx; + int ret; + + jm_ctx = kzalloc(sizeof(*jm_ctx), GFP_KERNEL); + if (!jm_ctx) + return -ENOMEM; + + kref_init(&jm_ctx->refcnt); + + ret = jm_ctx_prio_to_drm_sched_prio(file, args->priority, &sched_prio); + if (ret) + goto err_put_jm_ctx; + + for (u32 i = 0; i < NUM_JOB_SLOTS; i++) { + struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; + + ret = drm_sched_entity_init(&jm_ctx->slot_entity[i], sched_prio, + &sched, 1, NULL); + if (ret) + goto err_put_jm_ctx; + } + + ret = xa_alloc(&priv->jm_ctxs, &args->handle, jm_ctx, + XA_LIMIT(0, MAX_JM_CTX_PER_FILE), GFP_KERNEL); + if (ret) + goto err_put_jm_ctx; + return 0; + +err_put_jm_ctx: + jm_ctx->destroyed = true; + panfrost_jm_ctx_put(jm_ctx); + return ret; } -void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) +int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle) { - struct panfrost_device *pfdev = panfrost_priv->pfdev; - int i; + struct panfrost_file_priv *priv = file->driver_priv; + struct panfrost_device *pfdev = priv->pfdev; + struct panfrost_jm_ctx *jm_ctx; - for (i = 0; i < NUM_JOB_SLOTS; i++) - drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]); + jm_ctx = xa_erase(&priv->jm_ctxs, handle); + if (!jm_ctx) + return -EINVAL; + + jm_ctx->destroyed = true; /* Kill in-flight jobs */ spin_lock(&pfdev->js->job_lock); - for (i = 0; i < NUM_JOB_SLOTS; i++) { - struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i]; - int j; + for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++) { + struct drm_sched_entity *entity = &jm_ctx->slot_entity[i]; - for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) { + for (int j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) { struct panfrost_job *job = pfdev->jobs[i][j]; u32 cmd; @@ -980,18 +1122,7 @@ void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) } } spin_unlock(&pfdev->js->job_lock); -} - -int panfrost_job_is_idle(struct panfrost_device *pfdev) -{ - struct panfrost_job_slot *js = pfdev->js; - int i; - for (i = 0; i < NUM_JOB_SLOTS; i++) { - /* If there are any jobs in the HW queue, we're not idle */ - if (atomic_read(&js->queue[i].sched.credit_count)) - return false; - } - - return true; + panfrost_jm_ctx_put(jm_ctx); + return 0; } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index ec581b97852b..5a30ff1503c6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -18,6 +18,7 @@ struct panfrost_job { struct panfrost_device *pfdev; struct panfrost_mmu *mmu; + struct panfrost_jm_ctx *ctx; /* Fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *done_fence; @@ -39,10 +40,30 @@ struct panfrost_job { u64 start_cycles; }; +struct panfrost_js_ctx { + struct drm_sched_entity sched_entity; + bool enabled; +}; + +#define NUM_JOB_SLOTS 3 + +struct panfrost_jm_ctx { + struct kref refcnt; + bool destroyed; + struct drm_sched_entity slot_entity[NUM_JOB_SLOTS]; +}; + +int panfrost_jm_ctx_create(struct drm_file *file, + struct drm_panfrost_jm_ctx_create *args); +int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle); +void panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx); +struct panfrost_jm_ctx *panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx); +struct panfrost_jm_ctx *panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle); + int panfrost_job_init(struct panfrost_device *pfdev); void panfrost_job_fini(struct panfrost_device *pfdev); -int panfrost_job_open(struct panfrost_file_priv *panfrost_priv); -void panfrost_job_close(struct panfrost_file_priv *panfrost_priv); +int panfrost_job_open(struct drm_file *file); +void panfrost_job_close(struct drm_file *file); int panfrost_job_get_slot(struct panfrost_job *job); int panfrost_job_push(struct panfrost_job *job); void panfrost_job_put(struct panfrost_job *job); diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index 4c202fc5ce05..fdbe89ef7f43 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1105,7 +1105,7 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, if (ret) goto out; - ret = panthor_group_create(pfile, args, queue_args); + ret = panthor_group_create(pfile, args, queue_args, file->client_id); if (ret < 0) goto out; args->group_handle = ret; diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index ba5dc3e443d9..0cc9055f4ee5 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -360,6 +360,9 @@ struct panthor_queue { /** @entity: DRM scheduling entity used for this queue. */ struct drm_sched_entity entity; + /** @name: DRM scheduler name for this queue. */ + char *name; + /** * @remaining_time: Time remaining before the job timeout expires. * @@ -901,6 +904,8 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * if (queue->scheduler.ops) drm_sched_fini(&queue->scheduler); + kfree(queue->name); + panthor_queue_put_syncwait_obj(queue); panthor_kernel_bo_destroy(queue->ringbuf); @@ -1412,7 +1417,7 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev, fault = cs_iface->output->fault; info = cs_iface->output->fault_info; - if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) { + if (queue) { u64 cs_extract = queue->iface.output->extract; struct panthor_job *job; @@ -3308,9 +3313,10 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev, static struct panthor_queue * group_create_queue(struct panthor_group *group, - const struct drm_panthor_queue_create *args) + const struct drm_panthor_queue_create *args, + u64 drm_client_id, u32 gid, u32 qid) { - const struct drm_sched_init_args sched_args = { + struct drm_sched_init_args sched_args = { .ops = &panthor_queue_sched_ops, .submit_wq = group->ptdev->scheduler->wq, .num_rqs = 1, @@ -3323,7 +3329,6 @@ group_create_queue(struct panthor_group *group, .credit_limit = args->ringbuf_size / sizeof(u64), .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), .timeout_wq = group->ptdev->reset.wq, - .name = "panthor-queue", .dev = group->ptdev->base.dev, }; struct drm_gpu_scheduler *drm_sched; @@ -3398,6 +3403,15 @@ group_create_queue(struct panthor_group *group, if (ret) goto err_free_queue; + /* assign a unique name */ + queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid); + if (!queue->name) { + ret = -ENOMEM; + goto err_free_queue; + } + + sched_args.name = queue->name; + ret = drm_sched_init(&queue->scheduler, &sched_args); if (ret) goto err_free_queue; @@ -3447,7 +3461,8 @@ static void add_group_kbo_sizes(struct panthor_device *ptdev, int panthor_group_create(struct panthor_file *pfile, const struct drm_panthor_group_create *group_args, - const struct drm_panthor_queue_create *queue_args) + const struct drm_panthor_queue_create *queue_args, + u64 drm_client_id) { struct panthor_device *ptdev = pfile->ptdev; struct panthor_group_pool *gpool = pfile->groups; @@ -3540,12 +3555,16 @@ int panthor_group_create(struct panthor_file *pfile, memset(group->syncobjs->kmap, 0, group_args->queues.count * sizeof(struct panthor_syncobj_64b)); + ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); + if (ret) + goto err_put_group; + for (i = 0; i < group_args->queues.count; i++) { - group->queues[i] = group_create_queue(group, &queue_args[i]); + group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i); if (IS_ERR(group->queues[i])) { ret = PTR_ERR(group->queues[i]); group->queues[i] = NULL; - goto err_put_group; + goto err_erase_gid; } group->queue_count++; @@ -3553,10 +3572,6 @@ int panthor_group_create(struct panthor_file *pfile, group->idle_queues = GENMASK(group->queue_count - 1, 0); - ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); - if (ret) - goto err_put_group; - mutex_lock(&sched->reset.lock); if (atomic_read(&sched->reset.in_progress)) { panthor_group_stop(group); @@ -3575,6 +3590,9 @@ int panthor_group_create(struct panthor_file *pfile, return gid; +err_erase_gid: + xa_erase(&gpool->xa, gid); + err_put_group: group_put(group); return ret; diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h index 742b0b4ff3a3..f4a475aa34c0 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.h +++ b/drivers/gpu/drm/panthor/panthor_sched.h @@ -21,7 +21,8 @@ struct panthor_job; int panthor_group_create(struct panthor_file *pfile, const struct drm_panthor_group_create *group_args, - const struct drm_panthor_queue_create *queue_args); + const struct drm_panthor_queue_create *queue_args, + u64 drm_client_id); int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle); int panthor_group_get_state(struct panthor_file *pfile, struct drm_panthor_group_get_state *get_state); diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index b9fe926a49e8..6d567e5c7c6f 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -473,12 +473,15 @@ static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, return best_div; } -static long pl111_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int pl111_clk_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - int div = pl111_clk_div_choose_div(hw, rate, prate, true); + int div = pl111_clk_div_choose_div(hw, req->rate, + &req->best_parent_rate, true); - return DIV_ROUND_UP_ULL(*prate, div); + req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div); + + return 0; } static unsigned long pl111_clk_div_recalc_rate(struct clk_hw *hw, @@ -528,7 +531,7 @@ static int pl111_clk_div_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops pl111_clk_div_ops = { .recalc_rate = pl111_clk_div_recalc_rate, - .round_rate = pl111_clk_div_round_rate, + .determine_rate = pl111_clk_div_determine_rate, .set_rate = pl111_clk_div_set_rate, }; diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index fc5e3763c359..d26043424e95 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -39,7 +39,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj) qxl_surface_evict(qdev, qobj, false); tbo = &qobj->tbo; - ttm_bo_put(tbo); + ttm_bo_fini(tbo); } int qxl_gem_object_create(struct qxl_device *qdev, int size, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index f86773f3db20..18ca1bcfd2f9 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -86,7 +86,7 @@ static void radeon_gem_object_free(struct drm_gem_object *gobj) if (robj) { radeon_mn_unregister(robj); - ttm_bo_put(&robj->tbo); + ttm_bo_fini(&robj->tbo); } } diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h index 7f31d35780cc..553d45abd057 100644 --- a/drivers/gpu/drm/scheduler/tests/sched_tests.h +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h @@ -31,9 +31,8 @@ * * @base: DRM scheduler base class * @test: Backpointer to owning the kunit test case - * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list + * @lock: Lock to protect the simulated @hw_timeline and @job_list * @job_list: List of jobs submitted to the mock GPU - * @done_list: List of jobs completed by the mock GPU * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and * @cur_seqno for implementing a struct dma_fence signaling the * simulated job completion. diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index eec43d1a5595..8368f0ffbe1e 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -1498,7 +1498,7 @@ static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc, if (ret) return ret; - ssd130x_state->data_array = kmalloc(ssd130x->width * pages, GFP_KERNEL); + ssd130x_state->data_array = kmalloc_array(ssd130x->width, pages, GFP_KERNEL); if (!ssd130x_state->data_array) return -ENOMEM; @@ -1519,7 +1519,7 @@ static int ssd132x_crtc_atomic_check(struct drm_crtc *crtc, if (ret) return ret; - ssd130x_state->data_array = kmalloc(columns * ssd130x->height, GFP_KERNEL); + ssd130x_state->data_array = kmalloc_array(columns, ssd130x->height, GFP_KERNEL); if (!ssd130x_state->data_array) return -ENOMEM; @@ -1546,7 +1546,7 @@ static int ssd133x_crtc_atomic_check(struct drm_crtc *crtc, pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width); - ssd130x_state->data_array = kmalloc(pitch * ssd130x->height, GFP_KERNEL); + ssd130x_state->data_array = kmalloc_array(pitch, ssd130x->height, GFP_KERNEL); if (!ssd130x_state->data_array) return -ENOMEM; diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c index 2c7bc064bc66..58eae6804cc8 100644 --- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c +++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c @@ -274,8 +274,8 @@ static unsigned long dw_mipi_dsi_clk_recalc_rate(struct clk_hw *hw, return (unsigned long)pll_out_khz * 1000; } -static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static int dw_mipi_dsi_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct dw_mipi_dsi_stm *dsi = clk_to_dw_mipi_dsi_stm(hw); unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz; @@ -283,14 +283,14 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate, DRM_DEBUG_DRIVER("\n"); - pll_in_khz = (unsigned int)(*parent_rate / 1000); + pll_in_khz = (unsigned int)(req->best_parent_rate / 1000); /* Compute best pll parameters */ idf = 0; ndiv = 0; odf = 0; - ret = dsi_pll_get_params(dsi, pll_in_khz, rate / 1000, + ret = dsi_pll_get_params(dsi, pll_in_khz, req->rate / 1000, &idf, &ndiv, &odf); if (ret) DRM_WARN("Warning dsi_pll_get_params(): bad params\n"); @@ -298,7 +298,9 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate, /* Get the adjusted pll out value */ pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf); - return pll_out_khz * 1000; + req->rate = pll_out_khz * 1000; + + return 0; } static int dw_mipi_dsi_clk_set_rate(struct clk_hw *hw, unsigned long rate, @@ -351,7 +353,7 @@ static const struct clk_ops dw_mipi_dsi_stm_clk_ops = { .disable = dw_mipi_dsi_clk_disable, .is_enabled = dw_mipi_dsi_clk_is_enabled, .recalc_rate = dw_mipi_dsi_clk_recalc_rate, - .round_rate = dw_mipi_dsi_clk_round_rate, + .determine_rate = dw_mipi_dsi_clk_determine_rate, .set_rate = dw_mipi_dsi_clk_set_rate, }; diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c index 07788e8d3d83..fe38c0984b2b 100644 --- a/drivers/gpu/drm/stm/lvds.c +++ b/drivers/gpu/drm/stm/lvds.c @@ -682,8 +682,8 @@ static unsigned long lvds_pixel_clk_recalc_rate(struct clk_hw *hw, return (unsigned long)lvds->pixel_clock_rate; } -static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static int lvds_pixel_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct stm_lvds *lvds = container_of(hw, struct stm_lvds, lvds_ck_px); unsigned int pll_in_khz, bdiv = 0, mdiv = 0, ndiv = 0; @@ -703,7 +703,7 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate, mode = list_first_entry(&connector->modes, struct drm_display_mode, head); - pll_in_khz = (unsigned int)(*parent_rate / 1000); + pll_in_khz = (unsigned int)(req->best_parent_rate / 1000); if (lvds_is_dual_link(lvds->link_type)) multiplier = 2; @@ -719,14 +719,16 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate, lvds->pixel_clock_rate = (unsigned long)pll_get_clkout_khz(pll_in_khz, bdiv, mdiv, ndiv) * 1000 * multiplier / 7; - return lvds->pixel_clock_rate; + req->rate = lvds->pixel_clock_rate; + + return 0; } static const struct clk_ops lvds_pixel_clk_ops = { .enable = lvds_pixel_clk_enable, .disable = lvds_pixel_clk_disable, .recalc_rate = lvds_pixel_clk_recalc_rate, - .round_rate = lvds_pixel_clk_round_rate, + .determine_rate = lvds_pixel_clk_determine_rate, }; static const struct clk_init_data clk_data = { diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c index 12430b9d4e93..b1beadb9bb59 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c @@ -59,13 +59,15 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate, return best_rate; } -static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int sun4i_ddc_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct sun4i_ddc *ddc = hw_to_ddc(hw); - return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div, - ddc->m_offset, NULL, NULL); + req->rate = sun4i_ddc_calc_divider(req->rate, req->best_parent_rate, + ddc->pre_div, ddc->m_offset, NULL, NULL); + + return 0; } static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw, @@ -101,7 +103,7 @@ static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate, static const struct clk_ops sun4i_ddc_ops = { .recalc_rate = sun4i_ddc_recalc_rate, - .round_rate = sun4i_ddc_round_rate, + .determine_rate = sun4i_ddc_determine_rate, .set_rate = sun4i_ddc_set_rate, }; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c index 03d7de1911cd..4afb12bd5281 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c @@ -67,8 +67,8 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw, return parent_rate / val; } -static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static int sun4i_dclk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { struct sun4i_dclk *dclk = hw_to_dclk(hw); struct sun4i_tcon *tcon = dclk->tcon; @@ -77,7 +77,7 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, int i; for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) { - u64 ideal = (u64)rate * i; + u64 ideal = (u64)req->rate * i; unsigned long rounded; /* @@ -99,17 +99,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, goto out; } - if (abs(rate - rounded / i) < - abs(rate - best_parent / best_div)) { + if (abs(req->rate - rounded / i) < + abs(req->rate - best_parent / best_div)) { best_parent = rounded; best_div = i; } } out: - *parent_rate = best_parent; + req->best_parent_rate = best_parent; - return best_parent / best_div; + req->rate = best_parent / best_div; + + return 0; } static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, @@ -155,7 +157,7 @@ static const struct clk_ops sun4i_dclk_ops = { .is_enabled = sun4i_dclk_is_enabled, .recalc_rate = sun4i_dclk_recalc_rate, - .round_rate = sun4i_dclk_round_rate, + .determine_rate = sun4i_dclk_determine_rate, .set_rate = sun4i_dclk_set_rate, .get_phase = sun4i_dclk_get_phase, diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h index 89633e30ca62..da670d7eeb2e 100644 --- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h +++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h @@ -10,12 +10,19 @@ #include <drm/drm_crtc.h> #include <drm/drm_device.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_modes.h> struct drm_format_info; struct drm_scanout_buffer; struct screen_info; +typedef void (*drm_sysfb_blit_func)(struct iosys_map *, const unsigned int *, + const struct iosys_map *, + const struct drm_framebuffer *, + const struct drm_rect *, + struct drm_format_conv_state *); + /* * Input parsing */ @@ -93,10 +100,25 @@ static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *de * Plane */ +struct drm_sysfb_plane_state { + struct drm_shadow_plane_state base; + + /* transfers framebuffer data to scanout buffer in CRTC format */ + drm_sysfb_blit_func blit_to_crtc; +}; + +static inline struct drm_sysfb_plane_state * +to_drm_sysfb_plane_state(struct drm_plane_state *base) +{ + return container_of(to_drm_shadow_plane_state(base), struct drm_sysfb_plane_state, base); +} + size_t drm_sysfb_build_fourcc_list(struct drm_device *dev, const u32 *native_fourccs, size_t native_nfourccs, u32 *fourccs_out, size_t nfourccs_out); +int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane, + struct drm_plane_state *plane_state); int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *new_state); void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, @@ -114,16 +136,24 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane, DRM_FORMAT_MOD_INVALID #define DRM_SYSFB_PLANE_HELPER_FUNCS \ - DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \ + .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access, \ + .end_fb_access = drm_gem_end_shadow_fb_access, \ .atomic_check = drm_sysfb_plane_helper_atomic_check, \ .atomic_update = drm_sysfb_plane_helper_atomic_update, \ .atomic_disable = drm_sysfb_plane_helper_atomic_disable, \ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer +void drm_sysfb_plane_reset(struct drm_plane *plane); +struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane); +void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *plane_state); + #define DRM_SYSFB_PLANE_FUNCS \ + .reset = drm_sysfb_plane_reset, \ .update_plane = drm_atomic_helper_update_plane, \ .disable_plane = drm_atomic_helper_disable_plane, \ - DRM_GEM_SHADOW_PLANE_FUNCS + .atomic_duplicate_state = drm_sysfb_plane_atomic_duplicate_state, \ + .atomic_destroy_state = drm_sysfb_plane_atomic_destroy_state /* * CRTC diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c index ddb4a7523ee6..8517c490e815 100644 --- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c +++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c @@ -11,7 +11,6 @@ #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> -#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_panic.h> #include <drm/drm_print.h> @@ -185,6 +184,104 @@ size_t drm_sysfb_build_fourcc_list(struct drm_device *dev, } EXPORT_SYMBOL(drm_sysfb_build_fourcc_list); +static void drm_sysfb_plane_state_destroy(struct drm_sysfb_plane_state *sysfb_plane_state) +{ + __drm_gem_destroy_shadow_plane_state(&sysfb_plane_state->base); + + kfree(sysfb_plane_state); +} + +static void drm_sysfb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state) +{ + drm_fb_memcpy(dst, dst_pitch, src, fb, clip); +} + +static drm_sysfb_blit_func drm_sysfb_get_blit_func(u32 dst_format, u32 src_format) +{ + if (src_format == dst_format) { + return drm_sysfb_memcpy; + } else if (src_format == DRM_FORMAT_XRGB8888) { + switch (dst_format) { + case DRM_FORMAT_RGB565: + return drm_fb_xrgb8888_to_rgb565; + case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN: + return drm_fb_xrgb8888_to_rgb565be; + case DRM_FORMAT_XRGB1555: + return drm_fb_xrgb8888_to_xrgb1555; + case DRM_FORMAT_ARGB1555: + return drm_fb_xrgb8888_to_argb1555; + case DRM_FORMAT_RGBA5551: + return drm_fb_xrgb8888_to_rgba5551; + case DRM_FORMAT_RGB888: + return drm_fb_xrgb8888_to_rgb888; + case DRM_FORMAT_BGR888: + return drm_fb_xrgb8888_to_bgr888; + case DRM_FORMAT_ARGB8888: + return drm_fb_xrgb8888_to_argb8888; + case DRM_FORMAT_XBGR8888: + return drm_fb_xrgb8888_to_xbgr8888; + case DRM_FORMAT_ABGR8888: + return drm_fb_xrgb8888_to_abgr8888; + case DRM_FORMAT_XRGB2101010: + return drm_fb_xrgb8888_to_xrgb2101010; + case DRM_FORMAT_ARGB2101010: + return drm_fb_xrgb8888_to_argb2101010; + case DRM_FORMAT_BGRX8888: + return drm_fb_xrgb8888_to_bgrx8888; + case DRM_FORMAT_RGB332: + return drm_fb_xrgb8888_to_rgb332; + } + } + + return NULL; +} + +int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct drm_device *dev = plane->dev; + struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_crtc_state *crtc_state; + struct drm_sysfb_crtc_state *sysfb_crtc_state; + drm_sysfb_blit_func blit_to_crtc; + int ret; + + ret = drm_gem_begin_shadow_fb_access(plane, plane_state); + if (ret) + return ret; + + if (!fb) + return 0; + + ret = -EINVAL; + + crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc); + if (drm_WARN_ON_ONCE(dev, !crtc_state)) + goto err_drm_gem_end_shadow_fb_access; + sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); + + if (drm_WARN_ON_ONCE(dev, !sysfb_crtc_state->format)) + goto err_drm_gem_end_shadow_fb_access; + blit_to_crtc = drm_sysfb_get_blit_func(sysfb_crtc_state->format->format, + fb->format->format); + if (!blit_to_crtc) { + drm_warn_once(dev, "No blit helper from %p4cc to %p4cc found.\n", + &fb->format->format, &sysfb_crtc_state->format->format); + goto err_drm_gem_end_shadow_fb_access; + } + sysfb_plane_state->blit_to_crtc = blit_to_crtc; + + return 0; + +err_drm_gem_end_shadow_fb_access: + drm_gem_end_shadow_fb_access(plane, plane_state); + return ret; +} +EXPORT_SYMBOL(drm_sysfb_plane_helper_begin_fb_access); + int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *new_state) { @@ -235,12 +332,14 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev); struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); - struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state); + struct drm_shadow_plane_state *shadow_plane_state = &sysfb_plane_state->base; struct drm_framebuffer *fb = plane_state->fb; unsigned int dst_pitch = sysfb->fb_pitch; struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); const struct drm_format_info *dst_format = sysfb_crtc_state->format; + drm_sysfb_blit_func blit_to_crtc = sysfb_plane_state->blit_to_crtc; struct drm_atomic_helper_damage_iter iter; struct drm_rect damage; int ret, idx; @@ -261,8 +360,8 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at continue; iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip)); - drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb, - &damage, &shadow_plane_state->fmtcnv_state); + blit_to_crtc(&dst, &dst_pitch, shadow_plane_state->data, fb, &damage, + &shadow_plane_state->fmtcnv_state); } drm_dev_exit(idx); @@ -321,6 +420,52 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane, } EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer); +void drm_sysfb_plane_reset(struct drm_plane *plane) +{ + struct drm_sysfb_plane_state *sysfb_plane_state; + + if (plane->state) + drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane->state)); + + sysfb_plane_state = kzalloc(sizeof(*sysfb_plane_state), GFP_KERNEL); + if (sysfb_plane_state) + __drm_gem_reset_shadow_plane(plane, &sysfb_plane_state->base); + else + __drm_gem_reset_shadow_plane(plane, NULL); +} +EXPORT_SYMBOL(drm_sysfb_plane_reset); + +struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct drm_plane_state *plane_state = plane->state; + struct drm_sysfb_plane_state *sysfb_plane_state; + struct drm_sysfb_plane_state *new_sysfb_plane_state; + struct drm_shadow_plane_state *new_shadow_plane_state; + + if (drm_WARN_ON(dev, !plane_state)) + return NULL; + sysfb_plane_state = to_drm_sysfb_plane_state(plane_state); + + new_sysfb_plane_state = kzalloc(sizeof(*new_sysfb_plane_state), GFP_KERNEL); + if (!new_sysfb_plane_state) + return NULL; + new_shadow_plane_state = &new_sysfb_plane_state->base; + + __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); + new_sysfb_plane_state->blit_to_crtc = sysfb_plane_state->blit_to_crtc; + + return &new_shadow_plane_state->base; +} +EXPORT_SYMBOL(drm_sysfb_plane_atomic_duplicate_state); + +void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane_state)); +} +EXPORT_SYMBOL(drm_sysfb_plane_atomic_destroy_state); + /* * CRTC */ diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c index 16a4b52d45c6..c318df0adad5 100644 --- a/drivers/gpu/drm/sysfb/vesadrm.c +++ b/drivers/gpu/drm/sysfb/vesadrm.c @@ -295,7 +295,8 @@ static int vesadrm_primary_plane_helper_atomic_check(struct drm_plane *plane, } static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = { - DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access, + .end_fb_access = drm_gem_end_shadow_fb_access, .atomic_check = vesadrm_primary_plane_helper_atomic_check, .atomic_update = drm_sysfb_plane_helper_atomic_update, .atomic_disable = drm_sysfb_plane_helper_atomic_disable, diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 8cd2969e7d4b..c4820f5e7658 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -658,7 +658,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data, { const u8 *ptr = data; unsigned long offset; - size_t i, j; + size_t i; u32 value; switch (ptr[0]) { @@ -691,7 +691,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data, * - subpack_low: bytes 0 - 3 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00) */ - for (i = 3, j = 0; i < size; i += 7, j += 8) { + for (i = 3; i < size; i += 7) { size_t rem = size - i, num = min_t(size_t, rem, 4); value = tegra_hdmi_subpack(&ptr[i], num); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 21f3dfdcc5c9..bc7dd562cf6b 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -1864,7 +1864,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor, { const u8 *ptr = data; unsigned long offset; - size_t i, j; + size_t i; u32 value; switch (ptr[0]) { @@ -1897,7 +1897,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor, * - subpack_low: bytes 0 - 3 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00) */ - for (i = 3, j = 0; i < size; i += 7, j += 8) { + for (i = 3; i < size; i += 7) { size_t rem = size - i, num = min_t(size_t, rem, 4); value = tegra_sor_hdmi_subpack(&ptr[i], num); diff --git a/drivers/gpu/drm/tests/.kunitconfig b/drivers/gpu/drm/tests/.kunitconfig index 6ec04b4c979d..5be8e71f45d5 100644 --- a/drivers/gpu/drm/tests/.kunitconfig +++ b/drivers/gpu/drm/tests/.kunitconfig @@ -1,3 +1,5 @@ CONFIG_KUNIT=y CONFIG_DRM=y +CONFIG_DRM_VKMS=y +CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_KUNIT_TEST=y diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 94a5bf61a115..7d9e85e932d7 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -86,7 +86,7 @@ config DRM_PIXPAPER tristate "DRM support for PIXPAPER display panels" depends on DRM && SPI select DRM_CLIENT_SELECTION - select DRM_GEM_DMA_HELPER + select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help DRM driver for the Mayqueen Pixpaper e-ink display panel. diff --git a/drivers/gpu/drm/tiny/pixpaper.c b/drivers/gpu/drm/tiny/pixpaper.c index b1379cb5f030..32598fb2fee7 100644 --- a/drivers/gpu/drm/tiny/pixpaper.c +++ b/drivers/gpu/drm/tiny/pixpaper.c @@ -968,8 +968,8 @@ static const struct drm_crtc_funcs pixpaper_crtc_funcs = { .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; -static int pixpaper_mode_valid(struct drm_crtc *crtc, - const struct drm_display_mode *mode) +static enum drm_mode_status +pixpaper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) { if (mode->hdisplay == PIXPAPER_WIDTH && mode->vdisplay == PIXPAPER_HEIGHT) { diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c index 6c77550c51af..5426b435f702 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c @@ -379,7 +379,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test) dma_resv_fini(resv); } -static void ttm_bo_put_basic(struct kunit *test) +static void ttm_bo_fini_basic(struct kunit *test) { struct ttm_test_devices *priv = test->priv; struct ttm_buffer_object *bo; @@ -410,7 +410,7 @@ static void ttm_bo_put_basic(struct kunit *test) dma_resv_unlock(bo->base.resv); KUNIT_EXPECT_EQ(test, err, 0); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static const char *mock_name(struct dma_fence *f) @@ -423,7 +423,7 @@ static const struct dma_fence_ops mock_fence_ops = { .get_timeline_name = mock_name, }; -static void ttm_bo_put_shared_resv(struct kunit *test) +static void ttm_bo_fini_shared_resv(struct kunit *test) { struct ttm_test_devices *priv = test->priv; struct ttm_buffer_object *bo; @@ -463,7 +463,7 @@ static void ttm_bo_put_shared_resv(struct kunit *test) bo->type = ttm_bo_type_device; bo->base.resv = external_resv; - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_pin_basic(struct kunit *test) @@ -616,8 +616,8 @@ static struct kunit_case ttm_bo_test_cases[] = { KUNIT_CASE(ttm_bo_unreserve_basic), KUNIT_CASE(ttm_bo_unreserve_pinned), KUNIT_CASE(ttm_bo_unreserve_bulk), - KUNIT_CASE(ttm_bo_put_basic), - KUNIT_CASE(ttm_bo_put_shared_resv), + KUNIT_CASE(ttm_bo_fini_basic), + KUNIT_CASE(ttm_bo_fini_shared_resv), KUNIT_CASE(ttm_bo_pin_basic), KUNIT_CASE(ttm_bo_pin_unpin_resource), KUNIT_CASE(ttm_bo_multiple_pin_one_unpin), diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c index 1bcc67977f48..3a1eef83190c 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c @@ -144,7 +144,7 @@ static void ttm_bo_init_reserved_sys_man(struct kunit *test) drm_mm_node_allocated(&bo->base.vma_node.vm_node)); ttm_resource_free(bo, &bo->resource); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_init_reserved_mock_man(struct kunit *test) @@ -186,7 +186,7 @@ static void ttm_bo_init_reserved_mock_man(struct kunit *test) drm_mm_node_allocated(&bo->base.vma_node.vm_node)); ttm_resource_free(bo, &bo->resource); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, mem_type); } @@ -221,7 +221,7 @@ static void ttm_bo_init_reserved_resv(struct kunit *test) KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv); ttm_resource_free(bo, &bo->resource); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_validate_basic(struct kunit *test) @@ -265,7 +265,7 @@ static void ttm_bo_validate_basic(struct kunit *test) KUNIT_EXPECT_EQ(test, bo->resource->placement, DRM_BUDDY_TOPDOWN_ALLOCATION); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, snd_mem); } @@ -292,7 +292,7 @@ static void ttm_bo_validate_invalid_placement(struct kunit *test) KUNIT_EXPECT_EQ(test, err, -ENOMEM); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_validate_failed_alloc(struct kunit *test) @@ -321,7 +321,7 @@ static void ttm_bo_validate_failed_alloc(struct kunit *test) KUNIT_EXPECT_EQ(test, err, -ENOMEM); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_bad_manager_fini(priv->ttm_dev, mem_type); } @@ -353,7 +353,7 @@ static void ttm_bo_validate_pinned(struct kunit *test) ttm_bo_unpin(bo); dma_resv_unlock(bo->base.resv); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = { @@ -403,7 +403,7 @@ static void ttm_bo_validate_same_placement(struct kunit *test) KUNIT_EXPECT_EQ(test, err, 0); KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0); - ttm_bo_put(bo); + ttm_bo_fini(bo); if (params->mem_type != TTM_PL_SYSTEM) ttm_mock_manager_fini(priv->ttm_dev, params->mem_type); @@ -452,7 +452,7 @@ static void ttm_bo_validate_busy_placement(struct kunit *test) KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem); KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority])); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_bad_manager_fini(priv->ttm_dev, fst_mem); ttm_mock_manager_fini(priv->ttm_dev, snd_mem); } @@ -495,7 +495,7 @@ static void ttm_bo_validate_multihop(struct kunit *test) KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2); KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, fst_mem); ttm_mock_manager_fini(priv->ttm_dev, tmp_mem); @@ -567,7 +567,7 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test) KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC); } - ttm_bo_put(bo); + ttm_bo_fini(bo); } static int threaded_dma_resv_signal(void *arg) @@ -635,7 +635,7 @@ static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test) /* Make sure we have an idle object at this point */ dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT); - ttm_bo_put(bo); + ttm_bo_fini(bo); } static void ttm_bo_validate_move_fence_signaled(struct kunit *test) @@ -668,7 +668,7 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test) KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type); KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size); - ttm_bo_put(bo); + ttm_bo_fini(bo); dma_fence_put(man->move); } @@ -753,7 +753,7 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test) else KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem); - ttm_bo_put(bo); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, fst_mem); ttm_mock_manager_fini(priv->ttm_dev, snd_mem); } @@ -807,8 +807,8 @@ static void ttm_bo_validate_happy_evict(struct kunit *test) KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type); for (i = 0; i < bo_no; i++) - ttm_bo_put(&bos[i]); - ttm_bo_put(bo_val); + ttm_bo_fini(&bos[i]); + ttm_bo_fini(bo_val); ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); @@ -852,12 +852,12 @@ static void ttm_bo_validate_all_pinned_evict(struct kunit *test) KUNIT_EXPECT_EQ(test, err, -ENOMEM); - ttm_bo_put(bo_small); + ttm_bo_fini(bo_small); ttm_bo_reserve(bo_big, false, false, NULL); ttm_bo_unpin(bo_big); dma_resv_unlock(bo_big->base.resv); - ttm_bo_put(bo_big); + ttm_bo_fini(bo_big); ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); @@ -916,13 +916,13 @@ static void ttm_bo_validate_allowed_only_evict(struct kunit *test) KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict); KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE); - ttm_bo_put(bo); - ttm_bo_put(bo_evictable); + ttm_bo_fini(bo); + ttm_bo_fini(bo_evictable); ttm_bo_reserve(bo_pinned, false, false, NULL); ttm_bo_unpin(bo_pinned); dma_resv_unlock(bo_pinned->base.resv); - ttm_bo_put(bo_pinned); + ttm_bo_fini(bo_pinned); ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); @@ -973,8 +973,8 @@ static void ttm_bo_validate_deleted_evict(struct kunit *test) KUNIT_EXPECT_NULL(test, bo_big->ttm); KUNIT_EXPECT_NULL(test, bo_big->resource); - ttm_bo_put(bo_small); - ttm_bo_put(bo_big); + ttm_bo_fini(bo_small); + ttm_bo_fini(bo_big); ttm_mock_manager_fini(priv->ttm_dev, mem_type); } @@ -1025,8 +1025,8 @@ static void ttm_bo_validate_busy_domain_evict(struct kunit *test) KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type); KUNIT_EXPECT_NULL(test, bo_val->resource); - ttm_bo_put(bo_init); - ttm_bo_put(bo_val); + ttm_bo_fini(bo_init); + ttm_bo_fini(bo_val); ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict); @@ -1070,8 +1070,8 @@ static void ttm_bo_validate_evict_gutting(struct kunit *test) KUNIT_ASSERT_NULL(test, bo_evict->resource); KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC); - ttm_bo_put(bo_evict); - ttm_bo_put(bo); + ttm_bo_fini(bo_evict); + ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, mem_type); } @@ -1128,9 +1128,9 @@ static void ttm_bo_validate_recrusive_evict(struct kunit *test) ttm_mock_manager_fini(priv->ttm_dev, mem_type); ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict); - ttm_bo_put(bo_val); - ttm_bo_put(bo_tt); - ttm_bo_put(bo_mock); + ttm_bo_fini(bo_val); + ttm_bo_fini(bo_tt); + ttm_bo_fini(bo_mock); } static struct kunit_case ttm_bo_validate_test_cases[] = { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 29423ceeec5c..fba2a68a556e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -318,18 +318,17 @@ static void ttm_bo_release(struct kref *kref) bo->destroy(bo); } -/** - * ttm_bo_put - * - * @bo: The buffer object. - * - * Unreference a buffer object. - */ +/* TODO: remove! */ void ttm_bo_put(struct ttm_buffer_object *bo) { kref_put(&bo->kref, ttm_bo_release); } -EXPORT_SYMBOL(ttm_bo_put); + +void ttm_bo_fini(struct ttm_buffer_object *bo) +{ + ttm_bo_put(bo); +} +EXPORT_SYMBOL(ttm_bo_fini); static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h index 9d8b747a34db..e0d48eac74b0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_internal.h +++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h @@ -55,4 +55,6 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) return bo; } +void ttm_bo_put(struct ttm_buffer_object *bo); + #endif diff --git a/drivers/gpu/drm/tyr/Kconfig b/drivers/gpu/drm/tyr/Kconfig new file mode 100644 index 000000000000..4b55308fd2eb --- /dev/null +++ b/drivers/gpu/drm/tyr/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 or MIT + +config DRM_TYR + tristate "Tyr (Rust DRM support for ARM Mali CSF-based GPUs)" + depends on DRM=y + depends on RUST + depends on ARM || ARM64 || COMPILE_TEST + depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE + default n + help + Rust DRM driver for ARM Mali CSF-based GPUs. + + This driver is for Mali (or Immortalis) Valhall Gxxx GPUs. + + Note that the Mali-G68 and Mali-G78, while Valhall architecture, will + be supported with the panfrost driver as they are not CSF GPUs. + + if M is selected, the module will be called tyr. This driver is work + in progress and may not be functional. diff --git a/drivers/gpu/drm/tyr/Makefile b/drivers/gpu/drm/tyr/Makefile new file mode 100644 index 000000000000..ba545f65f2c0 --- /dev/null +++ b/drivers/gpu/drm/tyr/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 or MIT + +obj-$(CONFIG_DRM_TYR) += tyr.o diff --git a/drivers/gpu/drm/tyr/driver.rs b/drivers/gpu/drm/tyr/driver.rs new file mode 100644 index 000000000000..d5625dd1e41c --- /dev/null +++ b/drivers/gpu/drm/tyr/driver.rs @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT + +use kernel::c_str; +use kernel::clk::Clk; +use kernel::clk::OptionalClk; +use kernel::device::Bound; +use kernel::device::Core; +use kernel::device::Device; +use kernel::devres::Devres; +use kernel::drm; +use kernel::drm::ioctl; +use kernel::new_mutex; +use kernel::of; +use kernel::platform; +use kernel::prelude::*; +use kernel::regulator; +use kernel::regulator::Regulator; +use kernel::sizes::SZ_2M; +use kernel::sync::Arc; +use kernel::sync::Mutex; +use kernel::time; +use kernel::types::ARef; + +use crate::file::File; +use crate::gem::TyrObject; +use crate::gpu; +use crate::gpu::GpuInfo; +use crate::regs; + +pub(crate) type IoMem = kernel::io::mem::IoMem<SZ_2M>; + +/// Convenience type alias for the DRM device type for this driver. +pub(crate) type TyrDevice = drm::Device<TyrDriver>; + +#[pin_data(PinnedDrop)] +pub(crate) struct TyrDriver { + device: ARef<TyrDevice>, +} + +#[pin_data(PinnedDrop)] +pub(crate) struct TyrData { + pub(crate) pdev: ARef<platform::Device>, + + #[pin] + clks: Mutex<Clocks>, + + #[pin] + regulators: Mutex<Regulators>, + + /// Some information on the GPU. + /// + /// This is mainly queried by userspace, i.e.: Mesa. + pub(crate) gpu_info: GpuInfo, +} + +// Both `Clk` and `Regulator` do not implement `Send` or `Sync`, but they +// should. There are patches on the mailing list to address this, but they have +// not landed yet. +// +// For now, add this workaround so that this patch compiles with the promise +// that it will be removed in a future patch. +// +// SAFETY: This will be removed in a future patch. +unsafe impl Send for TyrData {} +// SAFETY: This will be removed in a future patch. +unsafe impl Sync for TyrData {} + +fn issue_soft_reset(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result { + regs::GPU_CMD.write(dev, iomem, regs::GPU_CMD_SOFT_RESET)?; + + // TODO: We cannot poll, as there is no support in Rust currently, so we + // sleep. Change this when read_poll_timeout() is implemented in Rust. + kernel::time::delay::fsleep(time::Delta::from_millis(100)); + + if regs::GPU_IRQ_RAWSTAT.read(dev, iomem)? & regs::GPU_IRQ_RAWSTAT_RESET_COMPLETED == 0 { + dev_err!(dev, "GPU reset failed with errno\n"); + dev_err!( + dev, + "GPU_INT_RAWSTAT is {}\n", + regs::GPU_IRQ_RAWSTAT.read(dev, iomem)? + ); + + return Err(EIO); + } + + Ok(()) +} + +kernel::of_device_table!( + OF_TABLE, + MODULE_OF_TABLE, + <TyrDriver as platform::Driver>::IdInfo, + [ + (of::DeviceId::new(c_str!("rockchip,rk3588-mali")), ()), + (of::DeviceId::new(c_str!("arm,mali-valhall-csf")), ()) + ] +); + +impl platform::Driver for TyrDriver { + type IdInfo = (); + const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE); + + fn probe( + pdev: &platform::Device<Core>, + _info: Option<&Self::IdInfo>, + ) -> Result<Pin<KBox<Self>>> { + let core_clk = Clk::get(pdev.as_ref(), Some(c_str!("core")))?; + let stacks_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("stacks")))?; + let coregroup_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("coregroup")))?; + + core_clk.prepare_enable()?; + stacks_clk.prepare_enable()?; + coregroup_clk.prepare_enable()?; + + let mali_regulator = Regulator::<regulator::Enabled>::get(pdev.as_ref(), c_str!("mali"))?; + let sram_regulator = Regulator::<regulator::Enabled>::get(pdev.as_ref(), c_str!("sram"))?; + + let request = pdev.io_request_by_index(0).ok_or(ENODEV)?; + let iomem = Arc::pin_init(request.iomap_sized::<SZ_2M>(), GFP_KERNEL)?; + + issue_soft_reset(pdev.as_ref(), &iomem)?; + gpu::l2_power_on(pdev.as_ref(), &iomem)?; + + let gpu_info = GpuInfo::new(pdev.as_ref(), &iomem)?; + gpu_info.log(pdev); + + let platform: ARef<platform::Device> = pdev.into(); + + let data = try_pin_init!(TyrData { + pdev: platform.clone(), + clks <- new_mutex!(Clocks { + core: core_clk, + stacks: stacks_clk, + coregroup: coregroup_clk, + }), + regulators <- new_mutex!(Regulators { + mali: mali_regulator, + sram: sram_regulator, + }), + gpu_info, + }); + + let tdev: ARef<TyrDevice> = drm::Device::new(pdev.as_ref(), data)?; + drm::driver::Registration::new_foreign_owned(&tdev, pdev.as_ref(), 0)?; + + let driver = KBox::pin_init(try_pin_init!(TyrDriver { device: tdev }), GFP_KERNEL)?; + + // We need this to be dev_info!() because dev_dbg!() does not work at + // all in Rust for now, and we need to see whether probe succeeded. + dev_info!(pdev.as_ref(), "Tyr initialized correctly.\n"); + Ok(driver) + } +} + +#[pinned_drop] +impl PinnedDrop for TyrDriver { + fn drop(self: Pin<&mut Self>) {} +} + +#[pinned_drop] +impl PinnedDrop for TyrData { + fn drop(self: Pin<&mut Self>) { + // TODO: the type-state pattern for Clks will fix this. + let clks = self.clks.lock(); + clks.core.disable_unprepare(); + clks.stacks.disable_unprepare(); + clks.coregroup.disable_unprepare(); + } +} + +// We need to retain the name "panthor" to achieve drop-in compatibility with +// the C driver in the userspace stack. +const INFO: drm::DriverInfo = drm::DriverInfo { + major: 1, + minor: 5, + patchlevel: 0, + name: c_str!("panthor"), + desc: c_str!("ARM Mali Tyr DRM driver"), +}; + +#[vtable] +impl drm::Driver for TyrDriver { + type Data = TyrData; + type File = File; + type Object = drm::gem::Object<TyrObject>; + + const INFO: drm::DriverInfo = INFO; + + kernel::declare_drm_ioctls! { + (PANTHOR_DEV_QUERY, drm_panthor_dev_query, ioctl::RENDER_ALLOW, File::dev_query), + } +} + +#[pin_data] +struct Clocks { + core: Clk, + stacks: OptionalClk, + coregroup: OptionalClk, +} + +#[pin_data] +struct Regulators { + mali: Regulator<regulator::Enabled>, + sram: Regulator<regulator::Enabled>, +} diff --git a/drivers/gpu/drm/tyr/file.rs b/drivers/gpu/drm/tyr/file.rs new file mode 100644 index 000000000000..0ef432947b73 --- /dev/null +++ b/drivers/gpu/drm/tyr/file.rs @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT + +use kernel::drm; +use kernel::prelude::*; +use kernel::uaccess::UserSlice; +use kernel::uapi; + +use crate::driver::TyrDevice; +use crate::TyrDriver; + +#[pin_data] +pub(crate) struct File {} + +/// Convenience type alias for our DRM `File` type +pub(crate) type DrmFile = drm::file::File<File>; + +impl drm::file::DriverFile for File { + type Driver = TyrDriver; + + fn open(_dev: &drm::Device<Self::Driver>) -> Result<Pin<KBox<Self>>> { + KBox::try_pin_init(try_pin_init!(Self {}), GFP_KERNEL) + } +} + +impl File { + pub(crate) fn dev_query( + tdev: &TyrDevice, + devquery: &mut uapi::drm_panthor_dev_query, + _file: &DrmFile, + ) -> Result<u32> { + if devquery.pointer == 0 { + match devquery.type_ { + uapi::drm_panthor_dev_query_type_DRM_PANTHOR_DEV_QUERY_GPU_INFO => { + devquery.size = core::mem::size_of_val(&tdev.gpu_info) as u32; + Ok(0) + } + _ => Err(EINVAL), + } + } else { + match devquery.type_ { + uapi::drm_panthor_dev_query_type_DRM_PANTHOR_DEV_QUERY_GPU_INFO => { + let mut writer = UserSlice::new( + UserPtr::from_addr(devquery.pointer as usize), + devquery.size as usize, + ) + .writer(); + + writer.write(&tdev.gpu_info)?; + + Ok(0) + } + _ => Err(EINVAL), + } + } + } +} diff --git a/drivers/gpu/drm/tyr/gem.rs b/drivers/gpu/drm/tyr/gem.rs new file mode 100644 index 000000000000..1273bf89dbd5 --- /dev/null +++ b/drivers/gpu/drm/tyr/gem.rs @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT + +use crate::driver::TyrDevice; +use crate::driver::TyrDriver; +use kernel::drm::gem; +use kernel::prelude::*; + +/// GEM Object inner driver data +#[pin_data] +pub(crate) struct TyrObject {} + +impl gem::DriverObject for TyrObject { + type Driver = TyrDriver; + + fn new(_dev: &TyrDevice, _size: usize) -> impl PinInit<Self, Error> { + try_pin_init!(TyrObject {}) + } +} diff --git a/drivers/gpu/drm/tyr/gpu.rs b/drivers/gpu/drm/tyr/gpu.rs new file mode 100644 index 000000000000..6c582910dd5d --- /dev/null +++ b/drivers/gpu/drm/tyr/gpu.rs @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT + +use kernel::bits::genmask_u32; +use kernel::device::Bound; +use kernel::device::Device; +use kernel::devres::Devres; +use kernel::platform; +use kernel::prelude::*; +use kernel::time; +use kernel::transmute::AsBytes; + +use crate::driver::IoMem; +use crate::regs; + +/// Struct containing information that can be queried by userspace. This is read from +/// the GPU's registers. +/// +/// # Invariants +/// +/// - The layout of this struct identical to the C `struct drm_panthor_gpu_info`. +#[repr(C)] +pub(crate) struct GpuInfo { + pub(crate) gpu_id: u32, + pub(crate) gpu_rev: u32, + pub(crate) csf_id: u32, + pub(crate) l2_features: u32, + pub(crate) tiler_features: u32, + pub(crate) mem_features: u32, + pub(crate) mmu_features: u32, + pub(crate) thread_features: u32, + pub(crate) max_threads: u32, + pub(crate) thread_max_workgroup_size: u32, + pub(crate) thread_max_barrier_size: u32, + pub(crate) coherency_features: u32, + pub(crate) texture_features: [u32; 4], + pub(crate) as_present: u32, + pub(crate) pad0: u32, + pub(crate) shader_present: u64, + pub(crate) l2_present: u64, + pub(crate) tiler_present: u64, + pub(crate) core_features: u32, + pub(crate) pad: u32, +} + +impl GpuInfo { + pub(crate) fn new(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result<Self> { + let gpu_id = regs::GPU_ID.read(dev, iomem)?; + let csf_id = regs::GPU_CSF_ID.read(dev, iomem)?; + let gpu_rev = regs::GPU_REVID.read(dev, iomem)?; + let core_features = regs::GPU_CORE_FEATURES.read(dev, iomem)?; + let l2_features = regs::GPU_L2_FEATURES.read(dev, iomem)?; + let tiler_features = regs::GPU_TILER_FEATURES.read(dev, iomem)?; + let mem_features = regs::GPU_MEM_FEATURES.read(dev, iomem)?; + let mmu_features = regs::GPU_MMU_FEATURES.read(dev, iomem)?; + let thread_features = regs::GPU_THREAD_FEATURES.read(dev, iomem)?; + let max_threads = regs::GPU_THREAD_MAX_THREADS.read(dev, iomem)?; + let thread_max_workgroup_size = regs::GPU_THREAD_MAX_WORKGROUP_SIZE.read(dev, iomem)?; + let thread_max_barrier_size = regs::GPU_THREAD_MAX_BARRIER_SIZE.read(dev, iomem)?; + let coherency_features = regs::GPU_COHERENCY_FEATURES.read(dev, iomem)?; + + let texture_features = regs::GPU_TEXTURE_FEATURES0.read(dev, iomem)?; + + let as_present = regs::GPU_AS_PRESENT.read(dev, iomem)?; + + let shader_present = u64::from(regs::GPU_SHADER_PRESENT_LO.read(dev, iomem)?); + let shader_present = + shader_present | u64::from(regs::GPU_SHADER_PRESENT_HI.read(dev, iomem)?) << 32; + + let tiler_present = u64::from(regs::GPU_TILER_PRESENT_LO.read(dev, iomem)?); + let tiler_present = + tiler_present | u64::from(regs::GPU_TILER_PRESENT_HI.read(dev, iomem)?) << 32; + + let l2_present = u64::from(regs::GPU_L2_PRESENT_LO.read(dev, iomem)?); + let l2_present = l2_present | u64::from(regs::GPU_L2_PRESENT_HI.read(dev, iomem)?) << 32; + + Ok(Self { + gpu_id, + gpu_rev, + csf_id, + l2_features, + tiler_features, + mem_features, + mmu_features, + thread_features, + max_threads, + thread_max_workgroup_size, + thread_max_barrier_size, + coherency_features, + // TODO: Add texture_features_{1,2,3}. + texture_features: [texture_features, 0, 0, 0], + as_present, + pad0: 0, + shader_present, + l2_present, + tiler_present, + core_features, + pad: 0, + }) + } + + pub(crate) fn log(&self, pdev: &platform::Device) { + let major = (self.gpu_id >> 16) & 0xff; + let minor = (self.gpu_id >> 8) & 0xff; + let status = self.gpu_id & 0xff; + + let model_name = if let Some(model) = GPU_MODELS + .iter() + .find(|&f| f.major == major && f.minor == minor) + { + model.name + } else { + "unknown" + }; + + dev_info!( + pdev.as_ref(), + "mali-{} id 0x{:x} major 0x{:x} minor 0x{:x} status 0x{:x}", + model_name, + self.gpu_id >> 16, + major, + minor, + status + ); + + dev_info!( + pdev.as_ref(), + "Features: L2:{:#x} Tiler:{:#x} Mem:{:#x} MMU:{:#x} AS:{:#x}", + self.l2_features, + self.tiler_features, + self.mem_features, + self.mmu_features, + self.as_present + ); + + dev_info!( + pdev.as_ref(), + "shader_present=0x{:016x} l2_present=0x{:016x} tiler_present=0x{:016x}", + self.shader_present, + self.l2_present, + self.tiler_present + ); + } + + /// Returns the number of virtual address bits supported by the GPU. + #[expect(dead_code)] + pub(crate) fn va_bits(&self) -> u32 { + self.mmu_features & genmask_u32(0..=7) + } + + /// Returns the number of physical address bits supported by the GPU. + #[expect(dead_code)] + pub(crate) fn pa_bits(&self) -> u32 { + (self.mmu_features >> 8) & genmask_u32(0..=7) + } +} + +// SAFETY: `GpuInfo`'s invariant guarantees that it is the same type that is +// already exposed to userspace by the C driver. This implies that it fulfills +// the requirements for `AsBytes`. +// +// This means: +// +// - No implicit padding, +// - No kernel pointers, +// - No interior mutability. +unsafe impl AsBytes for GpuInfo {} + +struct GpuModels { + name: &'static str, + major: u32, + minor: u32, +} + +const GPU_MODELS: [GpuModels; 1] = [GpuModels { + name: "g610", + major: 10, + minor: 7, +}]; + +#[allow(dead_code)] +pub(crate) struct GpuId { + pub(crate) arch_major: u32, + pub(crate) arch_minor: u32, + pub(crate) arch_rev: u32, + pub(crate) prod_major: u32, + pub(crate) ver_major: u32, + pub(crate) ver_minor: u32, + pub(crate) ver_status: u32, +} + +impl From<u32> for GpuId { + fn from(value: u32) -> Self { + GpuId { + arch_major: (value & genmask_u32(28..=31)) >> 28, + arch_minor: (value & genmask_u32(24..=27)) >> 24, + arch_rev: (value & genmask_u32(20..=23)) >> 20, + prod_major: (value & genmask_u32(16..=19)) >> 16, + ver_major: (value & genmask_u32(12..=15)) >> 12, + ver_minor: (value & genmask_u32(4..=11)) >> 4, + ver_status: value & genmask_u32(0..=3), + } + } +} + +/// Powers on the l2 block. +pub(crate) fn l2_power_on(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result { + regs::L2_PWRON_LO.write(dev, iomem, 1)?; + + // TODO: We cannot poll, as there is no support in Rust currently, so we + // sleep. Change this when read_poll_timeout() is implemented in Rust. + kernel::time::delay::fsleep(time::Delta::from_millis(100)); + + if regs::L2_READY_LO.read(dev, iomem)? != 1 { + dev_err!(dev, "Failed to power on the GPU\n"); + return Err(EIO); + } + + Ok(()) +} diff --git a/drivers/gpu/drm/tyr/regs.rs b/drivers/gpu/drm/tyr/regs.rs new file mode 100644 index 000000000000..f46933aaa221 --- /dev/null +++ b/drivers/gpu/drm/tyr/regs.rs @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT + +// We don't expect that all the registers and fields will be used, even in the +// future. +// +// Nevertheless, it is useful to have most of them defined, like the C driver +// does. +#![allow(dead_code)] + +use kernel::bits::bit_u32; +use kernel::device::Bound; +use kernel::device::Device; +use kernel::devres::Devres; +use kernel::prelude::*; + +use crate::driver::IoMem; + +/// Represents a register in the Register Set +/// +/// TODO: Replace this with the Nova `register!()` macro when it is available. +/// In particular, this will automatically give us 64bit register reads and +/// writes. +pub(crate) struct Register<const OFFSET: usize>; + +impl<const OFFSET: usize> Register<OFFSET> { + #[inline] + pub(crate) fn read(&self, dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result<u32> { + let value = (*iomem).access(dev)?.read32(OFFSET); + Ok(value) + } + + #[inline] + pub(crate) fn write(&self, dev: &Device<Bound>, iomem: &Devres<IoMem>, value: u32) -> Result { + (*iomem).access(dev)?.write32(value, OFFSET); + Ok(()) + } +} + +pub(crate) const GPU_ID: Register<0x0> = Register; +pub(crate) const GPU_L2_FEATURES: Register<0x4> = Register; +pub(crate) const GPU_CORE_FEATURES: Register<0x8> = Register; +pub(crate) const GPU_CSF_ID: Register<0x1c> = Register; +pub(crate) const GPU_REVID: Register<0x280> = Register; +pub(crate) const GPU_TILER_FEATURES: Register<0xc> = Register; +pub(crate) const GPU_MEM_FEATURES: Register<0x10> = Register; +pub(crate) const GPU_MMU_FEATURES: Register<0x14> = Register; +pub(crate) const GPU_AS_PRESENT: Register<0x18> = Register; +pub(crate) const GPU_IRQ_RAWSTAT: Register<0x20> = Register; + +pub(crate) const GPU_IRQ_RAWSTAT_FAULT: u32 = bit_u32(0); +pub(crate) const GPU_IRQ_RAWSTAT_PROTECTED_FAULT: u32 = bit_u32(1); +pub(crate) const GPU_IRQ_RAWSTAT_RESET_COMPLETED: u32 = bit_u32(8); +pub(crate) const GPU_IRQ_RAWSTAT_POWER_CHANGED_SINGLE: u32 = bit_u32(9); +pub(crate) const GPU_IRQ_RAWSTAT_POWER_CHANGED_ALL: u32 = bit_u32(10); +pub(crate) const GPU_IRQ_RAWSTAT_CLEAN_CACHES_COMPLETED: u32 = bit_u32(17); +pub(crate) const GPU_IRQ_RAWSTAT_DOORBELL_STATUS: u32 = bit_u32(18); +pub(crate) const GPU_IRQ_RAWSTAT_MCU_STATUS: u32 = bit_u32(19); + +pub(crate) const GPU_IRQ_CLEAR: Register<0x24> = Register; +pub(crate) const GPU_IRQ_MASK: Register<0x28> = Register; +pub(crate) const GPU_IRQ_STAT: Register<0x2c> = Register; +pub(crate) const GPU_CMD: Register<0x30> = Register; +pub(crate) const GPU_CMD_SOFT_RESET: u32 = 1 | (1 << 8); +pub(crate) const GPU_CMD_HARD_RESET: u32 = 1 | (2 << 8); +pub(crate) const GPU_THREAD_FEATURES: Register<0xac> = Register; +pub(crate) const GPU_THREAD_MAX_THREADS: Register<0xa0> = Register; +pub(crate) const GPU_THREAD_MAX_WORKGROUP_SIZE: Register<0xa4> = Register; +pub(crate) const GPU_THREAD_MAX_BARRIER_SIZE: Register<0xa8> = Register; +pub(crate) const GPU_TEXTURE_FEATURES0: Register<0xb0> = Register; +pub(crate) const GPU_SHADER_PRESENT_LO: Register<0x100> = Register; +pub(crate) const GPU_SHADER_PRESENT_HI: Register<0x104> = Register; +pub(crate) const GPU_TILER_PRESENT_LO: Register<0x110> = Register; +pub(crate) const GPU_TILER_PRESENT_HI: Register<0x114> = Register; +pub(crate) const GPU_L2_PRESENT_LO: Register<0x120> = Register; +pub(crate) const GPU_L2_PRESENT_HI: Register<0x124> = Register; +pub(crate) const L2_READY_LO: Register<0x160> = Register; +pub(crate) const L2_READY_HI: Register<0x164> = Register; +pub(crate) const L2_PWRON_LO: Register<0x1a0> = Register; +pub(crate) const L2_PWRON_HI: Register<0x1a4> = Register; +pub(crate) const L2_PWRTRANS_LO: Register<0x220> = Register; +pub(crate) const L2_PWRTRANS_HI: Register<0x204> = Register; +pub(crate) const L2_PWRACTIVE_LO: Register<0x260> = Register; +pub(crate) const L2_PWRACTIVE_HI: Register<0x264> = Register; + +pub(crate) const MCU_CONTROL: Register<0x700> = Register; +pub(crate) const MCU_CONTROL_ENABLE: u32 = 1; +pub(crate) const MCU_CONTROL_AUTO: u32 = 2; +pub(crate) const MCU_CONTROL_DISABLE: u32 = 0; + +pub(crate) const MCU_STATUS: Register<0x704> = Register; +pub(crate) const MCU_STATUS_DISABLED: u32 = 0; +pub(crate) const MCU_STATUS_ENABLED: u32 = 1; +pub(crate) const MCU_STATUS_HALT: u32 = 2; +pub(crate) const MCU_STATUS_FATAL: u32 = 3; + +pub(crate) const GPU_COHERENCY_FEATURES: Register<0x300> = Register; + +pub(crate) const JOB_IRQ_RAWSTAT: Register<0x1000> = Register; +pub(crate) const JOB_IRQ_CLEAR: Register<0x1004> = Register; +pub(crate) const JOB_IRQ_MASK: Register<0x1008> = Register; +pub(crate) const JOB_IRQ_STAT: Register<0x100c> = Register; + +pub(crate) const JOB_IRQ_GLOBAL_IF: u32 = bit_u32(31); + +pub(crate) const MMU_IRQ_RAWSTAT: Register<0x2000> = Register; +pub(crate) const MMU_IRQ_CLEAR: Register<0x2004> = Register; +pub(crate) const MMU_IRQ_MASK: Register<0x2008> = Register; +pub(crate) const MMU_IRQ_STAT: Register<0x200c> = Register; diff --git a/drivers/gpu/drm/tyr/tyr.rs b/drivers/gpu/drm/tyr/tyr.rs new file mode 100644 index 000000000000..861d1db43072 --- /dev/null +++ b/drivers/gpu/drm/tyr/tyr.rs @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT + +//! Arm Mali Tyr DRM driver. +//! +//! The name "Tyr" is inspired by Norse mythology, reflecting Arm's tradition of +//! naming their GPUs after Nordic mythological figures and places. + +use crate::driver::TyrDriver; + +mod driver; +mod file; +mod gem; +mod gpu; +mod regs; + +kernel::module_platform_driver! { + type: TyrDriver, + name: "tyr", + authors: ["The Tyr driver authors"], + description: "Arm Mali Tyr DRM driver", + license: "Dual MIT/GPL", +} diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 123ab0ce1781..bb8c40be3250 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -35,6 +35,7 @@ config DRM_VC4_HDMI_CEC bool "Broadcom VC4 HDMI CEC Support" depends on DRM_VC4 select CEC_CORE + select DRM_DISPLAY_HDMI_CEC_HELPER help Choose this option if you have a Broadcom VC4 GPU and want to use CEC. diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 07c91b450f93..049c92dd5d27 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -32,6 +32,7 @@ */ #include <drm/display/drm_hdmi_audio_helper.h> +#include <drm/display/drm_hdmi_cec_helper.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/display/drm_hdmi_state_helper.h> #include <drm/display/drm_scdc_helper.h> @@ -375,14 +376,6 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi, drm_atomic_helper_connector_hdmi_hotplug(connector, status); - if (status == connector_status_disconnected) { - cec_phys_addr_invalidate(vc4_hdmi->cec_adap); - return; - } - - cec_s_phys_addr(vc4_hdmi->cec_adap, - connector->display_info.source_physical_address, false); - if (status != connector_status_connected) return; @@ -2384,8 +2377,8 @@ static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv) struct vc4_hdmi *vc4_hdmi = priv; if (vc4_hdmi->cec_rx_msg.len) - cec_received_msg(vc4_hdmi->cec_adap, - &vc4_hdmi->cec_rx_msg); + drm_connector_hdmi_cec_received_msg(&vc4_hdmi->connector, + &vc4_hdmi->cec_rx_msg); return IRQ_HANDLED; } @@ -2395,15 +2388,17 @@ static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv) struct vc4_hdmi *vc4_hdmi = priv; if (vc4_hdmi->cec_tx_ok) { - cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK, - 0, 0, 0, 0); + drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector, + CEC_TX_STATUS_OK, + 0, 0, 0, 0); } else { /* * This CEC implementation makes 1 retry, so if we * get a NACK, then that means it made 2 attempts. */ - cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK, - 0, 2, 0, 0); + drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector, + CEC_TX_STATUS_NACK, + 0, 2, 0, 0); } return IRQ_HANDLED; } @@ -2560,9 +2555,9 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv) return ret; } -static int vc4_hdmi_cec_enable(struct cec_adapter *adap) +static int vc4_hdmi_cec_enable(struct drm_connector *connector) { - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct drm_device *drm = vc4_hdmi->connector.dev; /* clock period in microseconds */ const u32 usecs = 1000000 / CEC_CLOCK_FREQ; @@ -2627,9 +2622,9 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap) return 0; } -static int vc4_hdmi_cec_disable(struct cec_adapter *adap) +static int vc4_hdmi_cec_disable(struct drm_connector *connector) { - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct drm_device *drm = vc4_hdmi->connector.dev; unsigned long flags; int idx; @@ -2663,17 +2658,17 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap) return 0; } -static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) +static int vc4_hdmi_cec_adap_enable(struct drm_connector *connector, bool enable) { if (enable) - return vc4_hdmi_cec_enable(adap); + return vc4_hdmi_cec_enable(connector); else - return vc4_hdmi_cec_disable(adap); + return vc4_hdmi_cec_disable(connector); } -static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) +static int vc4_hdmi_cec_adap_log_addr(struct drm_connector *connector, u8 log_addr) { - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct drm_device *drm = vc4_hdmi->connector.dev; unsigned long flags; int idx; @@ -2699,10 +2694,10 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) return 0; } -static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, +static int vc4_hdmi_cec_adap_transmit(struct drm_connector *connector, u8 attempts, u32 signal_free_time, struct cec_msg *msg) { - struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct drm_device *dev = vc4_hdmi->connector.dev; unsigned long flags; u32 val; @@ -2745,84 +2740,65 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, return 0; } -static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = { - .adap_enable = vc4_hdmi_cec_adap_enable, - .adap_log_addr = vc4_hdmi_cec_adap_log_addr, - .adap_transmit = vc4_hdmi_cec_adap_transmit, -}; - -static void vc4_hdmi_cec_release(void *ptr) -{ - struct vc4_hdmi *vc4_hdmi = ptr; - - cec_unregister_adapter(vc4_hdmi->cec_adap); - vc4_hdmi->cec_adap = NULL; -} - -static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) +static int vc4_hdmi_cec_init(struct drm_connector *connector) { - struct cec_connector_info conn_info; + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct platform_device *pdev = vc4_hdmi->pdev; struct device *dev = &pdev->dev; int ret; - if (!of_property_present(dev->of_node, "interrupts")) { - dev_warn(dev, "'interrupts' DT property is missing, no CEC\n"); - return 0; - } - - vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, - vc4_hdmi, - vc4_hdmi->variant->card_name, - CEC_CAP_DEFAULTS | - CEC_CAP_CONNECTOR_INFO, 1); - ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap); - if (ret < 0) - return ret; - - cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector); - cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info); - if (vc4_hdmi->variant->external_irq_controller) { ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"), vc4_cec_irq_handler_rx_bare, vc4_cec_irq_handler_rx_thread, 0, "vc4 hdmi cec rx", vc4_hdmi); if (ret) - goto err_delete_cec_adap; + return ret; ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"), vc4_cec_irq_handler_tx_bare, vc4_cec_irq_handler_tx_thread, 0, "vc4 hdmi cec tx", vc4_hdmi); if (ret) - goto err_delete_cec_adap; + return ret; } else { ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), vc4_cec_irq_handler, vc4_cec_irq_handler_thread, 0, "vc4 hdmi cec", vc4_hdmi); if (ret) - goto err_delete_cec_adap; + return ret; } - ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev); - if (ret < 0) - goto err_delete_cec_adap; + return 0; +} + +static const struct drm_connector_hdmi_cec_funcs vc4_hdmi_cec_funcs = { + .init = vc4_hdmi_cec_init, + .enable = vc4_hdmi_cec_adap_enable, + .log_addr = vc4_hdmi_cec_adap_log_addr, + .transmit = vc4_hdmi_cec_adap_transmit, +}; + +static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi) +{ + struct platform_device *pdev = vc4_hdmi->pdev; + struct device *dev = &pdev->dev; + + if (!of_property_present(dev->of_node, "interrupts")) { + dev_warn(dev, "'interrupts' DT property is missing, no CEC\n"); + return 0; + } /* - * NOTE: Strictly speaking, we should probably use a DRM-managed - * registration there to avoid removing the CEC adapter by the - * time the DRM driver doesn't have any user anymore. + * NOTE: the CEC adapter will be unregistered by drmm cleanup from + * drm_managed_release(), which is called from drm_dev_release() + * during device unbind. * * However, the CEC framework already cleans up the CEC adapter * only when the last user has closed its file descriptor, so we * don't need to handle it in DRM. * - * By the time the device-managed hook is executed, we will give - * up our reference to the CEC adapter and therefore don't - * really care when it's actually freed. - * * There's still a problematic sequence: if we unregister our * CEC adapter, but the userspace keeps a handle on the CEC * adapter but not the DRM device for some reason. In such a @@ -2833,19 +2809,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) * the CEC framework already handles this too, by calling * cec_is_registered() in cec_ioctl() and cec_poll(). */ - ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi); - if (ret) - return ret; - - return 0; - -err_delete_cec_adap: - cec_delete_adapter(vc4_hdmi->cec_adap); - - return ret; + return drmm_connector_hdmi_cec_register(&vc4_hdmi->connector, + &vc4_hdmi_cec_funcs, + vc4_hdmi->variant->card_name, + 1, + &pdev->dev); } #else -static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) +static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi) { return 0; } @@ -3250,7 +3221,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) if (ret) goto err_put_runtime_pm; - ret = vc4_hdmi_cec_init(vc4_hdmi); + ret = vc4_hdmi_cec_register(vc4_hdmi); if (ret) goto err_put_runtime_pm; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index a31157c99bee..8d069718df00 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -147,7 +147,6 @@ struct vc4_hdmi { */ bool disable_wifi_frequencies; - struct cec_adapter *cec_adap; struct cec_msg cec_rx_msg; bool cec_tx_ok; bool cec_irq_was_rx; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index eedf1fe60be7..39f8c46550c2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -37,7 +37,7 @@ static void vmw_gem_object_free(struct drm_gem_object *gobj) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); if (bo) - ttm_bo_put(bo); + ttm_bo_fini(bo); } static int vmw_gem_object_open(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 714d5702dfd7..7219f6b884b6 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -40,12 +40,12 @@ config DRM_XE select DRM_TTM select DRM_TTM_HELPER select DRM_EXEC + select DRM_GPUSVM if !UML && DEVICE_PRIVATE select DRM_GPUVM select DRM_SCHED select MMU_NOTIFIER select WANT_DEV_COREDUMP select AUXILIARY_BUS - select HMM_MIRROR select REGMAP if I2C help Driver for Intel Xe2 series GPUs and later. Experimental support diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug index 01735c6ece8b..87902b4bd6d3 100644 --- a/drivers/gpu/drm/xe/Kconfig.debug +++ b/drivers/gpu/drm/xe/Kconfig.debug @@ -104,6 +104,7 @@ config DRM_XE_DEBUG_GUC config DRM_XE_USERPTR_INVAL_INJECT bool "Inject userptr invalidation -EINVAL errors" + depends on DRM_GPUSVM default n help Choose this option when debugging error paths that diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 987e4fe10538..d9c6cf0f189e 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -84,6 +84,7 @@ xe-y += xe_bb.o \ xe_hw_error.o \ xe_hw_fence.o \ xe_irq.o \ + xe_late_bind_fw.o \ xe_lrc.o \ xe_migrate.o \ xe_mmio.o \ @@ -130,6 +131,7 @@ xe-y += xe_bb.o \ xe_tuning.o \ xe_uc.o \ xe_uc_fw.o \ + xe_validation.o \ xe_vm.o \ xe_vm_madvise.o \ xe_vram.o \ @@ -140,8 +142,8 @@ xe-y += xe_bb.o \ xe_wopcm.o xe-$(CONFIG_I2C) += xe_i2c.o -xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o +xe-$(CONFIG_DRM_GPUSVM) += xe_userptr.o # graphics hardware monitoring (HWMON) support xe-$(CONFIG_HWMON) += xe_hwmon.o @@ -210,6 +212,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ display/xe_dsb_buffer.o \ display/xe_fb_pin.o \ display/xe_hdcp_gsc.o \ + display/xe_panic.o \ display/xe_plane_initial.o \ display/xe_tdf.o @@ -325,6 +328,7 @@ ifeq ($(CONFIG_DEBUG_FS),y) xe_gt_stats.o \ xe_guc_debugfs.o \ xe_huc_debugfs.o \ + xe_tile_debugfs.o \ xe_uc_debugfs.o xe-$(CONFIG_PCI_IOV) += xe_gt_sriov_pf_debugfs.o diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h index d8cf68a0516d..31090c69dfbe 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h @@ -117,6 +117,7 @@ enum xe_guc_action { XE_GUC_ACTION_ENTER_S_STATE = 0x501, XE_GUC_ACTION_EXIT_S_STATE = 0x502, XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506, + XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509, XE_GUC_ACTION_SCHED_CONTEXT = 0x1000, XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001, XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002, @@ -154,6 +155,8 @@ enum xe_guc_action { XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003, XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004, XE_GUC_ACTION_NOTIFY_EXCEPTION = 0x8005, + XE_GUC_ACTION_TEST_G2G_SEND = 0xF001, + XE_GUC_ACTION_TEST_G2G_RECV = 0xF002, XE_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h index b28c8fa061f7..ce5c59517528 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h @@ -210,6 +210,11 @@ struct slpc_shared_data { u8 reserved_mode_definition[4096]; } __packed; +enum slpc_power_profile { + SLPC_POWER_PROFILE_BASE = 0x0, + SLPC_POWER_PROFILE_POWER_SAVING = 0x1 +}; + /** * DOC: SLPC H2G MESSAGE FORMAT * diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h index 0e78351c6ef5..265a135e7061 100644 --- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h @@ -17,6 +17,7 @@ * | 0 | 31:16 | **KEY** - KLV key identifier | * | | | - `GuC Self Config KLVs`_ | * | | | - `GuC Opt In Feature KLVs`_ | + * | | | - `GuC Scheduling Policies KLVs`_ | * | | | - `GuC VGT Policy KLVs`_ | * | | | - `GuC VF Configuration KLVs`_ | * | | | | @@ -153,6 +154,30 @@ enum { #define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u /** + * DOC: GuC Scheduling Policies KLVs + * + * `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV. + * + * _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001 + * Some platforms do not allow concurrent execution of RCS and CCS + * workloads from different address spaces. By default, the GuC prioritizes + * RCS submissions over CCS ones, which can lead to CCS workloads being + * significantly (or completely) starved of execution time. This KLV allows + * the driver to specify a quantum (in ms) and a ratio (percentage value + * between 0 and 100), and the GuC will prioritize the CCS for that + * percentage of each quantum. For example, specifying 100ms and 30% will + * make the GuC prioritize the CCS for 30ms of every 100ms. + * Note that this does not necessarly mean that RCS and CCS engines will + * only be active for their percentage of the quantum, as the restriction + * only kicks in if both classes are fully busy with non-compatible address + * spaces; i.e., if one engine is idle or running the same address space, + * a pending job on the other engine will still be submitted to the HW no + * matter what the ratio is + */ +#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001 +#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u + +/** * DOC: GuC VGT Policy KLVs * * `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY. diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h index 41d39d67817a..f097fc6d5127 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h @@ -8,6 +8,7 @@ #include "xe_ttm_stolen_mgr.h" #include "xe_res_cursor.h" +#include "xe_validation.h" struct xe_bo; @@ -21,7 +22,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, u32 start, u32 end) { struct xe_bo *bo; - int err; + int err = 0; u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN; if (start < SZ_4K) @@ -32,21 +33,13 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, start = ALIGN(start, align); } - bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe), - NULL, size, start, end, - ttm_bo_type_kernel, flags, 0); + bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe), + size, start, end, ttm_bo_type_kernel, flags); if (IS_ERR(bo)) { err = PTR_ERR(bo); bo = NULL; return err; } - err = xe_bo_pin(bo); - xe_bo_unlock_vm_held(bo); - - if (err) { - xe_bo_put(fb->bo); - bo = NULL; - } fb->bo = bo; diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c index 910632f57c3d..27437c22bd70 100644 --- a/drivers/gpu/drm/xe/display/intel_bo.c +++ b/drivers/gpu/drm/xe/display/intel_bo.c @@ -1,12 +1,7 @@ // SPDX-License-Identifier: MIT /* Copyright © 2024 Intel Corporation */ -#include <drm/drm_cache.h> #include <drm/drm_gem.h> -#include <drm/drm_panic.h> - -#include "intel_fb.h" -#include "intel_display_types.h" #include "xe_bo.h" #include "intel_bo.h" @@ -64,89 +59,3 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) { /* FIXME */ } - -struct xe_panic_data { - struct page **pages; - int page; - void *vaddr; -}; - -struct xe_framebuffer { - struct intel_framebuffer base; - struct xe_panic_data panic; -}; - -static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb) -{ - return &container_of_const(fb, struct xe_framebuffer, base)->panic; -} - -static void xe_panic_kunmap(struct xe_panic_data *panic) -{ - if (panic->vaddr) { - drm_clflush_virt_range(panic->vaddr, PAGE_SIZE); - kunmap_local(panic->vaddr); - panic->vaddr = NULL; - } -} - -/* - * The scanout buffer pages are not mapped, so for each pixel, - * use kmap_local_page_try_from_panic() to map the page, and write the pixel. - * Try to keep the map from the previous pixel, to avoid too much map/unmap. - */ -static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, - unsigned int y, u32 color) -{ - struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; - struct xe_panic_data *panic = to_xe_panic_data(fb); - struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base)); - unsigned int new_page; - unsigned int offset; - - if (fb->panic_tiling) - offset = fb->panic_tiling(sb->width, x, y); - else - offset = y * sb->pitch[0] + x * sb->format->cpp[0]; - - new_page = offset >> PAGE_SHIFT; - offset = offset % PAGE_SIZE; - if (new_page != panic->page) { - xe_panic_kunmap(panic); - panic->page = new_page; - panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm, - panic->page); - } - if (panic->vaddr) { - u32 *pix = panic->vaddr + offset; - *pix = color; - } -} - -struct intel_framebuffer *intel_bo_alloc_framebuffer(void) -{ - struct xe_framebuffer *xe_fb; - - xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL); - if (xe_fb) - return &xe_fb->base; - return NULL; -} - -int intel_bo_panic_setup(struct drm_scanout_buffer *sb) -{ - struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; - struct xe_panic_data *panic = to_xe_panic_data(fb); - - panic->page = -1; - sb->set_pixel = xe_panic_page_set_pixel; - return 0; -} - -void intel_bo_panic_finish(struct intel_framebuffer *fb) -{ - struct xe_panic_data *panic = to_xe_panic_data(fb); - - xe_panic_kunmap(panic); - panic->page = -1; -} diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index d96ba2b51065..35a5b07eeba4 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -3,11 +3,8 @@ * Copyright © 2023 Intel Corporation */ -#include <drm/drm_fb_helper.h> +#include <linux/fb.h> -#include "intel_display_core.h" -#include "intel_display_types.h" -#include "intel_fb.h" #include "intel_fbdev_fb.h" #include "xe_bo.h" #include "xe_ttm_stolen_mgr.h" @@ -15,38 +12,19 @@ #include <generated/xe_wa_oob.h> -struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) +struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size) { - struct drm_framebuffer *fb; - struct drm_device *dev = helper->dev; - struct xe_device *xe = to_xe_device(dev); - struct drm_mode_fb_cmd2 mode_cmd = {}; + struct xe_device *xe = to_xe_device(drm); struct xe_bo *obj; - int size; - /* we don't do packed 24bpp */ - if (sizes->surface_bpp == 24) - sizes->surface_bpp = 32; - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - - mode_cmd.pitches[0] = ALIGN(mode_cmd.width * - DIV_ROUND_UP(sizes->surface_bpp, 8), XE_PAGE_SIZE); - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - size = mode_cmd.pitches[0] * mode_cmd.height; - size = PAGE_ALIGN(size); obj = ERR_PTR(-ENODEV); if (!IS_DGFX(xe) && !XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display)) { - obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), - NULL, size, - ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | - XE_BO_FLAG_STOLEN | - XE_BO_FLAG_GGTT); + obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), + size, + ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_GGTT, false); if (!IS_ERR(obj)) drm_info(&xe->drm, "Allocated fbdev into stolen\n"); else @@ -54,41 +32,30 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, } if (IS_ERR(obj)) { - obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size, - ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | - XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | - XE_BO_FLAG_GGTT); + obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), size, + ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | + XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | + XE_BO_FLAG_GGTT, false); } if (IS_ERR(obj)) { drm_err(&xe->drm, "failed to allocate framebuffer (%pe)\n", obj); - fb = ERR_PTR(-ENOMEM); - goto err; - } - - fb = intel_framebuffer_create(&obj->ttm.base, - drm_get_format_info(dev, - mode_cmd.pixel_format, - mode_cmd.modifier[0]), - &mode_cmd); - if (IS_ERR(fb)) { - xe_bo_unpin_map_no_vm(obj); - goto err; + return ERR_PTR(-ENOMEM); } - drm_gem_object_put(&obj->ttm.base); - - return to_intel_framebuffer(fb); + return &obj->ttm.base; +} -err: - return ERR_CAST(fb); +void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj) +{ + xe_bo_unpin_map_no_vm(gem_to_xe_bo(obj)); } -int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info, +int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info, struct drm_gem_object *_obj, struct i915_vma *vma) { struct xe_bo *obj = gem_to_xe_bo(_obj); - struct pci_dev *pdev = to_pci_dev(display->drm->dev); + struct pci_dev *pdev = to_pci_dev(drm->dev); if (!(obj->flags & XE_BO_FLAG_SYSTEM)) { if (obj->flags & XE_BO_FLAG_STOLEN) diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 8b68d70db6c8..19e691fccf8c 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -20,7 +20,7 @@ #include "intel_audio.h" #include "intel_bw.h" #include "intel_display.h" -#include "intel_display_core.h" +#include "intel_display_device.h" #include "intel_display_driver.h" #include "intel_display_irq.h" #include "intel_display_types.h" @@ -37,13 +37,6 @@ /* Xe device functions */ -static bool has_display(struct xe_device *xe) -{ - struct intel_display *display = xe->display; - - return HAS_DISPLAY(display); -} - /** * xe_display_driver_probe_defer - Detect if we need to wait for other drivers * early on @@ -290,7 +283,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe) intel_dmc_suspend(display); - if (has_display(xe)) + if (intel_display_device_present(display)) intel_hpd_poll_enable(display); } @@ -303,14 +296,14 @@ static void xe_display_disable_d3cold(struct xe_device *xe) intel_dmc_resume(display); - if (has_display(xe)) + if (intel_display_device_present(display)) drm_mode_config_reset(&xe->drm); intel_display_driver_init_hw(display); intel_hpd_init(display); - if (has_display(xe)) + if (intel_display_device_present(display)) intel_hpd_poll_disable(display); intel_opregion_resume(display); @@ -333,7 +326,7 @@ void xe_display_pm_suspend(struct xe_device *xe) intel_power_domains_disable(display); drm_client_dev_suspend(&xe->drm, false); - if (has_display(xe)) { + if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(&xe->drm); intel_display_driver_disable_user_access(display); intel_display_driver_suspend(display); @@ -345,7 +338,7 @@ void xe_display_pm_suspend(struct xe_device *xe) intel_hpd_cancel_work(display); - if (has_display(xe)) { + if (intel_display_device_present(display)) { intel_display_driver_suspend_access(display); intel_encoder_suspend_all(display); } @@ -365,7 +358,7 @@ void xe_display_pm_shutdown(struct xe_device *xe) intel_power_domains_disable(display); drm_client_dev_suspend(&xe->drm, false); - if (has_display(xe)) { + if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(&xe->drm); intel_display_driver_disable_user_access(display); intel_display_driver_suspend(display); @@ -376,7 +369,7 @@ void xe_display_pm_shutdown(struct xe_device *xe) intel_encoder_block_all_hpds(display); intel_hpd_cancel_work(display); - if (has_display(xe)) + if (intel_display_device_present(display)) intel_display_driver_suspend_access(display); intel_encoder_suspend_all(display); @@ -465,25 +458,25 @@ void xe_display_pm_resume(struct xe_device *xe) intel_dmc_resume(display); - if (has_display(xe)) + if (intel_display_device_present(display)) drm_mode_config_reset(&xe->drm); intel_display_driver_init_hw(display); - if (has_display(xe)) + if (intel_display_device_present(display)) intel_display_driver_resume_access(display); intel_hpd_init(display); intel_encoder_unblock_all_hpds(display); - if (has_display(xe)) { + if (intel_display_device_present(display)) { intel_display_driver_resume(display); drm_kms_helper_poll_enable(&xe->drm); intel_display_driver_enable_user_access(display); } - if (has_display(xe)) + if (intel_display_device_present(display)) intel_hpd_poll_disable(display); intel_opregion_resume(display); @@ -548,7 +541,7 @@ int xe_display_probe(struct xe_device *xe) xe->display = display; - if (has_display(xe)) + if (intel_display_device_present(display)) return 0; no_display: diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index 9f941fc2e36b..58581d7aaae6 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -43,11 +43,11 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d return false; /* Set scanout flag for WC mapping */ - obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), - NULL, PAGE_ALIGN(size), - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | - XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT); + obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), + PAGE_ALIGN(size), + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | + XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT, false); if (IS_ERR(obj)) { kfree(vma); return false; diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index f1f8b5ab53ef..1fd4a815e784 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -102,29 +102,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, XE_PAGE_SIZE); if (IS_DGFX(xe)) - dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, - dpt_size, ~0ull, - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM0 | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE, - alignment); + dpt = xe_bo_create_pin_map_at_novm(xe, tile0, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM0 | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + alignment, false); else - dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, - dpt_size, ~0ull, - ttm_bo_type_kernel, - XE_BO_FLAG_STOLEN | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE, - alignment); + dpt = xe_bo_create_pin_map_at_novm(xe, tile0, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + alignment, false); if (IS_ERR(dpt)) - dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, - dpt_size, ~0ull, - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_PAGETABLE, - alignment); + dpt = xe_bo_create_pin_map_at_novm(xe, tile0, + dpt_size, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE, + alignment, false); if (IS_ERR(dpt)) return PTR_ERR(dpt); @@ -281,7 +281,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); struct drm_gem_object *obj = intel_fb_bo(&fb->base); struct xe_bo *bo = gem_to_xe_bo(obj); - int ret; + struct xe_validation_ctx ctx; + struct drm_exec exec; + int ret = 0; if (!vma) return ERR_PTR(-ENODEV); @@ -308,17 +310,22 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, * Pin the framebuffer, we can't use xe_bo_(un)pin functions as the * assumptions are incorrect for framebuffers */ - ret = ttm_bo_reserve(&bo->ttm, false, false, NULL); - if (ret) - goto err; - - if (IS_DGFX(xe)) - ret = xe_bo_migrate(bo, XE_PL_VRAM0); - else - ret = xe_bo_validate(bo, NULL, true); - if (!ret) - ttm_bo_pin(&bo->ttm); - ttm_bo_unreserve(&bo->ttm); + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, + ret) { + ret = drm_exec_lock_obj(&exec, &bo->ttm.base); + drm_exec_retry_on_contention(&exec); + if (ret) + break; + + if (IS_DGFX(xe)) + ret = xe_bo_migrate(bo, XE_PL_VRAM0, NULL, &exec); + else + ret = xe_bo_validate(bo, NULL, true, &exec); + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &ret); + if (!ret) + ttm_bo_pin(&bo->ttm); + } if (ret) goto err; diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index 30f1073141fc..4ae847b628e2 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -72,10 +72,10 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, int ret = 0; /* allocate object of two page for HDCP command memory and store it */ - bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2, - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT); + bo = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), PAGE_SIZE * 2, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT, false); if (IS_ERR(bo)) { drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n"); diff --git a/drivers/gpu/drm/xe/display/xe_panic.c b/drivers/gpu/drm/xe/display/xe_panic.c new file mode 100644 index 000000000000..f32b23338331 --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_panic.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include <drm/drm_cache.h> +#include <drm/drm_panic.h> + +#include "intel_display_types.h" +#include "intel_fb.h" +#include "intel_panic.h" +#include "xe_bo.h" + +struct intel_panic { + struct page **pages; + int page; + void *vaddr; +}; + +static void xe_panic_kunmap(struct intel_panic *panic) +{ + if (panic->vaddr) { + drm_clflush_virt_range(panic->vaddr, PAGE_SIZE); + kunmap_local(panic->vaddr); + panic->vaddr = NULL; + } +} + +/* + * The scanout buffer pages are not mapped, so for each pixel, + * use kmap_local_page_try_from_panic() to map the page, and write the pixel. + * Try to keep the map from the previous pixel, to avoid too much map/unmap. + */ +static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color) +{ + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + struct intel_panic *panic = fb->panic; + struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base)); + unsigned int new_page; + unsigned int offset; + + if (fb->panic_tiling) + offset = fb->panic_tiling(sb->width, x, y); + else + offset = y * sb->pitch[0] + x * sb->format->cpp[0]; + + new_page = offset >> PAGE_SHIFT; + offset = offset % PAGE_SIZE; + if (new_page != panic->page) { + xe_panic_kunmap(panic); + panic->page = new_page; + panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm, + panic->page); + } + if (panic->vaddr) { + u32 *pix = panic->vaddr + offset; + *pix = color; + } +} + +struct intel_panic *intel_panic_alloc(void) +{ + struct intel_panic *panic; + + panic = kzalloc(sizeof(*panic), GFP_KERNEL); + + return panic; +} + +int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb) +{ + panic->page = -1; + sb->set_pixel = xe_panic_page_set_pixel; + return 0; +} + +void intel_panic_finish(struct intel_panic *panic) +{ + xe_panic_kunmap(panic); + panic->page = -1; +} diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index 826ac3d578b7..94f00def811b 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -140,8 +140,8 @@ initial_plane_bo(struct xe_device *xe, page_size); size -= base; - bo = xe_bo_create_pin_map_at(xe, tile0, NULL, size, phys_base, - ttm_bo_type_kernel, flags); + bo = xe_bo_create_pin_map_at_novm(xe, tile0, size, phys_base, + ttm_bo_type_kernel, flags, 0, false); if (IS_ERR(bo)) { drm_dbg(&xe->drm, "Failed to create bo phys_base=%pa size %u with flags %x: %li\n", diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index f96b2e2b3064..06cb6b02ec64 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -522,6 +522,7 @@ #define TDL_CHICKEN XE_REG_MCR(0xe5f4, XE_REG_OPTION_MASKED) #define QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE REG_BIT(12) +#define EUSTALL_PERF_SAMPLING_DISABLE REG_BIT(5) #define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8) #define DISABLE_D8_D16_COASLESCE REG_BIT(30) diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h index 1b101edb838b..b5eff383902c 100644 --- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h +++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h @@ -40,7 +40,4 @@ #define INDIRECT_CTX_RING_START_UDW (0x08 + 1) #define INDIRECT_CTX_RING_CTL (0x0a + 1) -#define CTX_INDIRECT_CTX_OFFSET_MASK REG_GENMASK(15, 6) -#define CTX_INDIRECT_CTX_OFFSET_DEFAULT REG_FIELD_PREP(CTX_INDIRECT_CTX_OFFSET_MASK, 0xd) - #endif diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 7b40cc8be1c9..2294cf89f3e1 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -23,7 +23,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo, bool clear, u64 get_val, u64 assign_val, - struct kunit *test) + struct kunit *test, struct drm_exec *exec) { struct dma_fence *fence; struct ttm_tt *ttm; @@ -35,7 +35,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo, u32 offset; /* Move bo to VRAM if not already there. */ - ret = xe_bo_validate(bo, NULL, false); + ret = xe_bo_validate(bo, NULL, false, exec); if (ret) { KUNIT_FAIL(test, "Failed to validate bo.\n"); return ret; @@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo, } /* Evict to system. CCS data should be copied. */ - ret = xe_bo_evict(bo); + ret = xe_bo_evict(bo, exec); if (ret) { KUNIT_FAIL(test, "Failed to evict bo.\n"); return ret; @@ -132,14 +132,15 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, /* TODO: Sanity check */ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile); + struct drm_exec *exec = XE_VALIDATION_OPT_OUT; if (IS_DGFX(xe)) kunit_info(test, "Testing vram id %u\n", tile->id); else kunit_info(test, "Testing system memory\n"); - bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC, - bo_flags); + bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC, + bo_flags, exec); if (IS_ERR(bo)) { KUNIT_FAIL(test, "Failed to create bo.\n"); return; @@ -149,18 +150,18 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, kunit_info(test, "Verifying that CCS data is cleared on creation.\n"); ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL, - test); + test, exec); if (ret) goto out_unlock; kunit_info(test, "Verifying that CCS data survives migration.\n"); ret = ccs_test_migrate(tile, bo, false, 0xdeadbeefdeadbeefULL, - 0xdeadbeefdeadbeefULL, test); + 0xdeadbeefdeadbeefULL, test, exec); if (ret) goto out_unlock; kunit_info(test, "Verifying that CCS data can be properly cleared.\n"); - ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test); + ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test, exec); out_unlock: xe_bo_unlock(bo); @@ -210,6 +211,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc struct xe_bo *bo, *external; unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile); struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate); + struct drm_exec *exec = XE_VALIDATION_OPT_OUT; struct xe_gt *__gt; int err, i, id; @@ -218,25 +220,25 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc for (i = 0; i < 2; ++i) { xe_vm_lock(vm, false); - bo = xe_bo_create_user(xe, NULL, vm, 0x10000, + bo = xe_bo_create_user(xe, vm, 0x10000, DRM_XE_GEM_CPU_CACHING_WC, - bo_flags); + bo_flags, exec); xe_vm_unlock(vm); if (IS_ERR(bo)) { KUNIT_FAIL(test, "bo create err=%pe\n", bo); break; } - external = xe_bo_create_user(xe, NULL, NULL, 0x10000, + external = xe_bo_create_user(xe, NULL, 0x10000, DRM_XE_GEM_CPU_CACHING_WC, - bo_flags); + bo_flags, NULL); if (IS_ERR(external)) { KUNIT_FAIL(test, "external bo create err=%pe\n", external); goto cleanup_bo; } xe_bo_lock(external, false); - err = xe_bo_pin_external(external, false); + err = xe_bo_pin_external(external, false, exec); xe_bo_unlock(external); if (err) { KUNIT_FAIL(test, "external bo pin err=%pe\n", @@ -294,7 +296,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc if (i) { down_read(&vm->lock); xe_vm_lock(vm, false); - err = xe_bo_validate(bo, bo->vm, false); + err = xe_bo_validate(bo, bo->vm, false, exec); xe_vm_unlock(vm); up_read(&vm->lock); if (err) { @@ -303,7 +305,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc goto cleanup_all; } xe_bo_lock(external, false); - err = xe_bo_validate(external, NULL, false); + err = xe_bo_validate(external, NULL, false, exec); xe_bo_unlock(external); if (err) { KUNIT_FAIL(test, "external bo valid err=%pe\n", @@ -495,9 +497,9 @@ static int shrink_test_run_device(struct xe_device *xe) INIT_LIST_HEAD(&link->link); /* We can create bos using WC caching here. But it is slower. */ - bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE, + bo = xe_bo_create_user(xe, NULL, XE_BO_SHRINK_SIZE, DRM_XE_GEM_CPU_CACHING_WB, - XE_BO_FLAG_SYSTEM); + XE_BO_FLAG_SYSTEM, NULL); if (IS_ERR(bo)) { if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) && bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS)) diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c index 5baeab6b6fb7..a7e548a2bdfb 100644 --- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -27,7 +27,8 @@ static bool is_dynamic(struct dma_buf_test_params *params) } static void check_residency(struct kunit *test, struct xe_bo *exported, - struct xe_bo *imported, struct dma_buf *dmabuf) + struct xe_bo *imported, struct dma_buf *dmabuf, + struct drm_exec *exec) { struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv); u32 mem_type; @@ -62,7 +63,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, * importer is on a different device. If they're on the same device, * the exporter and the importer should be the same bo. */ - ret = xe_bo_evict(exported); + ret = xe_bo_evict(exported, exec); if (ret) { if (ret != -EINTR && ret != -ERESTARTSYS) KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n", @@ -77,7 +78,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, } /* Re-validate the importer. This should move also exporter in. */ - ret = xe_bo_validate(imported, NULL, false); + ret = xe_bo_validate(imported, NULL, false, exec); if (ret) { if (ret != -EINTR && ret != -ERESTARTSYS) KUNIT_FAIL(test, "Validating importer failed with err=%d.\n", @@ -113,8 +114,8 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) size = SZ_64K; kunit_info(test, "running %s\n", __func__); - bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC, - params->mem_mask); + bo = xe_bo_create_user(xe, NULL, size, DRM_XE_GEM_CPU_CACHING_WC, + params->mem_mask, NULL); if (IS_ERR(bo)) { KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n", PTR_ERR(bo)); @@ -142,11 +143,12 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) KUNIT_FAIL(test, "xe_gem_prime_import() succeeded when it shouldn't have\n"); } else { + struct drm_exec *exec = XE_VALIDATION_OPT_OUT; int err; /* Is everything where we expect it to be? */ xe_bo_lock(import_bo, false); - err = xe_bo_validate(import_bo, NULL, false); + err = xe_bo_validate(import_bo, NULL, false, exec); /* Pinning in VRAM is not allowed. */ if (!is_dynamic(params) && @@ -159,7 +161,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) err == -ERESTARTSYS); if (!err) - check_residency(test, bo, import_bo, dmabuf); + check_residency(test, bo, import_bo, dmabuf, exec); xe_bo_unlock(import_bo); } drm_gem_object_put(import); diff --git a/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c b/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c new file mode 100644 index 000000000000..3b213fcae916 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c @@ -0,0 +1,776 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/delay.h> + +#include <kunit/test.h> +#include <kunit/visibility.h> + +#include "tests/xe_kunit_helpers.h" +#include "tests/xe_pci_test.h" +#include "tests/xe_test.h" + +#include "xe_bo.h" +#include "xe_device.h" +#include "xe_pm.h" + +/* + * There are different ways to allocate the G2G buffers. The plan for this test + * is to make sure that all the possible options work. The particular option + * chosen by the driver may vary from one platform to another, it may also change + * with time. So to ensure consistency of testing, the relevant driver code is + * replicated here to guarantee it won't change without the test being updated + * to keep testing the other options. + * + * In order to test the actual code being used by the driver, there is also the + * 'default' scheme. That will use the official driver routines to test whatever + * method the driver is using on the current platform at the current time. + */ +enum { + /* Driver defined allocation scheme */ + G2G_CTB_TYPE_DEFAULT, + /* Single buffer in host memory */ + G2G_CTB_TYPE_HOST, + /* Single buffer in a specific tile, loops across all tiles */ + G2G_CTB_TYPE_TILE, +}; + +/* + * Payload is opaque to GuC. So KMD can define any structure or size it wants. + */ +struct g2g_test_payload { + u32 tx_dev; + u32 tx_tile; + u32 rx_dev; + u32 rx_tile; + u32 seqno; +}; + +static void g2g_test_send(struct kunit *test, struct xe_guc *guc, + u32 far_tile, u32 far_dev, + struct g2g_test_payload *payload) +{ + struct xe_device *xe = guc_to_xe(guc); + struct xe_gt *gt = guc_to_gt(guc); + u32 *action, total; + size_t payload_len; + int ret; + + static_assert(IS_ALIGNED(sizeof(*payload), sizeof(u32))); + payload_len = sizeof(*payload) / sizeof(u32); + + total = 4 + payload_len; + action = kunit_kmalloc_array(test, total, sizeof(*action), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, action); + + action[0] = XE_GUC_ACTION_TEST_G2G_SEND; + action[1] = far_tile; + action[2] = far_dev; + action[3] = payload_len; + memcpy(action + 4, payload, payload_len * sizeof(u32)); + + atomic_inc(&xe->g2g_test_count); + + /* + * Should specify the expected response notification here. Problem is that + * the response will be coming from a different GuC. By the end, it should + * all add up as long as an equal number of messages are sent from each GuC + * and to each GuC. However, in the middle negative reservation space errors + * and such like can occur. Rather than add intrusive changes to the CT layer + * it is simpler to just not bother counting it at all. The system should be + * idle when running the selftest, and the selftest's notification total size + * is well within the G2H allocation size. So there should be no issues with + * needing to block for space, which is all the tracking code is really for. + */ + ret = xe_guc_ct_send(&guc->ct, action, total, 0, 0); + kunit_kfree(test, action); + KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G send failed: %d [%d:%d -> %d:%d]\n", ret, + gt_to_tile(gt)->id, G2G_DEV(gt), far_tile, far_dev); +} + +/* + * NB: Can't use KUNIT_ASSERT and friends in here as this is called asynchronously + * from the G2H notification handler. Need that to actually complete rather than + * thread-abort in order to keep the rest of the driver alive! + */ +int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *msg, u32 len) +{ + struct xe_device *xe = guc_to_xe(guc); + struct xe_gt *rx_gt = guc_to_gt(guc), *test_gt, *tx_gt = NULL; + u32 tx_tile, tx_dev, rx_tile, rx_dev, idx, got_len; + struct g2g_test_payload *payload; + size_t payload_len; + int ret = 0, i; + + payload_len = sizeof(*payload) / sizeof(u32); + + if (unlikely(len != (G2H_LEN_DW_G2G_NOTIFY_MIN + payload_len))) { + xe_gt_err(rx_gt, "G2G test notification invalid length %u", len); + ret = -EPROTO; + goto done; + } + + tx_tile = msg[0]; + tx_dev = msg[1]; + got_len = msg[2]; + payload = (struct g2g_test_payload *)(msg + 3); + + rx_tile = gt_to_tile(rx_gt)->id; + rx_dev = G2G_DEV(rx_gt); + + if (got_len != payload_len) { + xe_gt_err(rx_gt, "G2G: Invalid payload length: %u vs %zu\n", got_len, payload_len); + ret = -EPROTO; + goto done; + } + + if (payload->tx_dev != tx_dev || payload->tx_tile != tx_tile || + payload->rx_dev != rx_dev || payload->rx_tile != rx_tile) { + xe_gt_err(rx_gt, "G2G: Invalid payload: %d:%d -> %d:%d vs %d:%d -> %d:%d! [%d]\n", + payload->tx_tile, payload->tx_dev, payload->rx_tile, payload->rx_dev, + tx_tile, tx_dev, rx_tile, rx_dev, payload->seqno); + ret = -EPROTO; + goto done; + } + + if (!xe->g2g_test_array) { + xe_gt_err(rx_gt, "G2G: Missing test array!\n"); + ret = -ENOMEM; + goto done; + } + + for_each_gt(test_gt, xe, i) { + if (gt_to_tile(test_gt)->id != tx_tile) + continue; + + if (G2G_DEV(test_gt) != tx_dev) + continue; + + if (tx_gt) { + xe_gt_err(rx_gt, "G2G: Got duplicate TX GTs: %d vs %d for %d:%d!\n", + tx_gt->info.id, test_gt->info.id, tx_tile, tx_dev); + ret = -EINVAL; + goto done; + } + + tx_gt = test_gt; + } + if (!tx_gt) { + xe_gt_err(rx_gt, "G2G: Failed to find a TX GT for %d:%d!\n", tx_tile, tx_dev); + ret = -EINVAL; + goto done; + } + + idx = (tx_gt->info.id * xe->info.gt_count) + rx_gt->info.id; + + if (xe->g2g_test_array[idx] != payload->seqno - 1) { + xe_gt_err(rx_gt, "G2G: Seqno mismatch %d vs %d for %d:%d -> %d:%d!\n", + xe->g2g_test_array[idx], payload->seqno - 1, + tx_tile, tx_dev, rx_tile, rx_dev); + ret = -EINVAL; + goto done; + } + + xe->g2g_test_array[idx] = payload->seqno; + +done: + atomic_dec(&xe->g2g_test_count); + return ret; +} + +/* + * Send the given seqno from all GuCs to all other GuCs in tile/GT order + */ +static void g2g_test_in_order(struct kunit *test, struct xe_device *xe, u32 seqno) +{ + struct xe_gt *near_gt, *far_gt; + int i, j; + + for_each_gt(near_gt, xe, i) { + u32 near_tile = gt_to_tile(near_gt)->id; + u32 near_dev = G2G_DEV(near_gt); + + for_each_gt(far_gt, xe, j) { + u32 far_tile = gt_to_tile(far_gt)->id; + u32 far_dev = G2G_DEV(far_gt); + struct g2g_test_payload payload; + + if (far_gt->info.id == near_gt->info.id) + continue; + + payload.tx_dev = near_dev; + payload.tx_tile = near_tile; + payload.rx_dev = far_dev; + payload.rx_tile = far_tile; + payload.seqno = seqno; + g2g_test_send(test, &near_gt->uc.guc, far_tile, far_dev, &payload); + } + } +} + +#define WAIT_TIME_MS 100 +#define WAIT_COUNT (1000 / WAIT_TIME_MS) + +static void g2g_wait_for_complete(void *_xe) +{ + struct xe_device *xe = (struct xe_device *)_xe; + struct kunit *test = kunit_get_current_test(); + int wait = 0; + + /* Wait for all G2H messages to be received */ + while (atomic_read(&xe->g2g_test_count)) { + if (++wait > WAIT_COUNT) + break; + + msleep(WAIT_TIME_MS); + } + + KUNIT_ASSERT_EQ_MSG(test, 0, atomic_read(&xe->g2g_test_count), + "Timed out waiting for notifications\n"); + kunit_info(test, "Got all notifications back\n"); +} + +#undef WAIT_TIME_MS +#undef WAIT_COUNT + +static void g2g_clean_array(void *_xe) +{ + struct xe_device *xe = (struct xe_device *)_xe; + + xe->g2g_test_array = NULL; +} + +#define NUM_LOOPS 16 + +static void g2g_run_test(struct kunit *test, struct xe_device *xe) +{ + u32 seqno, max_array; + int ret, i, j; + + max_array = xe->info.gt_count * xe->info.gt_count; + xe->g2g_test_array = kunit_kcalloc(test, max_array, sizeof(u32), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe->g2g_test_array); + + ret = kunit_add_action_or_reset(test, g2g_clean_array, xe); + KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n"); + + /* + * Send incrementing seqnos from all GuCs to all other GuCs in tile/GT order. + * Tile/GT order doesn't really mean anything to the hardware but it is going + * to be a fixed sequence every time. + * + * Verify that each one comes back having taken the correct route. + */ + ret = kunit_add_action(test, g2g_wait_for_complete, xe); + KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n"); + for (seqno = 1; seqno < NUM_LOOPS; seqno++) + g2g_test_in_order(test, xe, seqno); + seqno--; + + kunit_release_action(test, &g2g_wait_for_complete, xe); + + /* Check for the final seqno in each slot */ + for (i = 0; i < xe->info.gt_count; i++) { + for (j = 0; j < xe->info.gt_count; j++) { + u32 idx = (j * xe->info.gt_count) + i; + + if (i == j) + KUNIT_ASSERT_EQ_MSG(test, 0, xe->g2g_test_array[idx], + "identity seqno modified: %d for %dx%d!\n", + xe->g2g_test_array[idx], i, j); + else + KUNIT_ASSERT_EQ_MSG(test, seqno, xe->g2g_test_array[idx], + "invalid seqno: %d vs %d for %dx%d!\n", + xe->g2g_test_array[idx], seqno, i, j); + } + } + + kunit_kfree(test, xe->g2g_test_array); + kunit_release_action(test, &g2g_clean_array, xe); + + kunit_info(test, "Test passed\n"); +} + +#undef NUM_LOOPS + +static void g2g_ct_stop(struct xe_guc *guc) +{ + struct xe_gt *remote_gt, *gt = guc_to_gt(guc); + struct xe_device *xe = gt_to_xe(gt); + int i, t; + + for_each_gt(remote_gt, xe, i) { + u32 tile, dev; + + if (remote_gt->info.id == gt->info.id) + continue; + + tile = gt_to_tile(remote_gt)->id; + dev = G2G_DEV(remote_gt); + + for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) + guc_g2g_deregister(guc, tile, dev, t); + } +} + +/* Size of a single allocation that contains all G2G CTBs across all GTs */ +static u32 g2g_ctb_size(struct kunit *test, struct xe_device *xe) +{ + unsigned int count = xe->info.gt_count; + u32 num_channels = (count * (count - 1)) / 2; + + kunit_info(test, "Size: (%d * %d / 2) * %d * 0x%08X + 0x%08X => 0x%08X [%d]\n", + count, count - 1, XE_G2G_TYPE_LIMIT, G2G_BUFFER_SIZE, G2G_DESC_AREA_SIZE, + num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE, + num_channels * XE_G2G_TYPE_LIMIT); + + return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE; +} + +/* + * Use the driver's regular CTB allocation scheme. + */ +static void g2g_alloc_default(struct kunit *test, struct xe_device *xe) +{ + struct xe_gt *gt; + int i; + + kunit_info(test, "Default [tiles = %d, GTs = %d]\n", + xe->info.tile_count, xe->info.gt_count); + + for_each_gt(gt, xe, i) { + struct xe_guc *guc = >->uc.guc; + int ret; + + ret = guc_g2g_alloc(guc); + KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G alloc failed: %pe", ERR_PTR(ret)); + continue; + } +} + +static void g2g_distribute(struct kunit *test, struct xe_device *xe, struct xe_bo *bo) +{ + struct xe_gt *root_gt, *gt; + int i; + + root_gt = xe_device_get_gt(xe, 0); + root_gt->uc.guc.g2g.bo = bo; + root_gt->uc.guc.g2g.owned = true; + kunit_info(test, "[%d.%d] Assigned 0x%p\n", gt_to_tile(root_gt)->id, root_gt->info.id, bo); + + for_each_gt(gt, xe, i) { + if (gt->info.id != 0) { + gt->uc.guc.g2g.owned = false; + gt->uc.guc.g2g.bo = xe_bo_get(bo); + kunit_info(test, "[%d.%d] Pinned 0x%p\n", + gt_to_tile(gt)->id, gt->info.id, gt->uc.guc.g2g.bo); + } + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt->uc.guc.g2g.bo); + } +} + +/* + * Allocate a single blob on the host and split between all G2G CTBs. + */ +static void g2g_alloc_host(struct kunit *test, struct xe_device *xe) +{ + struct xe_bo *bo; + u32 g2g_size; + + kunit_info(test, "Host [tiles = %d, GTs = %d]\n", xe->info.tile_count, xe->info.gt_count); + + g2g_size = g2g_ctb_size(test, xe); + bo = xe_managed_bo_create_pin_map(xe, xe_device_get_root_tile(xe), g2g_size, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_ALL | + XE_BO_FLAG_GGTT_INVALIDATE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo); + kunit_info(test, "[HST] G2G buffer create: 0x%p\n", bo); + + xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size); + + g2g_distribute(test, xe, bo); +} + +/* + * Allocate a single blob on the given tile and split between all G2G CTBs. + */ +static void g2g_alloc_tile(struct kunit *test, struct xe_device *xe, struct xe_tile *tile) +{ + struct xe_bo *bo; + u32 g2g_size; + + KUNIT_ASSERT_TRUE(test, IS_DGFX(xe)); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tile); + + kunit_info(test, "Tile %d [tiles = %d, GTs = %d]\n", + tile->id, xe->info.tile_count, xe->info.gt_count); + + g2g_size = g2g_ctb_size(test, xe); + bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size, + XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_ALL | + XE_BO_FLAG_GGTT_INVALIDATE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo); + kunit_info(test, "[%d.*] G2G buffer create: 0x%p\n", tile->id, bo); + + xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size); + + g2g_distribute(test, xe, bo); +} + +static void g2g_free(struct kunit *test, struct xe_device *xe) +{ + struct xe_gt *gt; + struct xe_bo *bo; + int i; + + for_each_gt(gt, xe, i) { + bo = gt->uc.guc.g2g.bo; + if (!bo) + continue; + + if (gt->uc.guc.g2g.owned) { + xe_managed_bo_unpin_map_no_vm(bo); + kunit_info(test, "[%d.%d] Unmapped 0x%p\n", + gt_to_tile(gt)->id, gt->info.id, bo); + } else { + xe_bo_put(bo); + kunit_info(test, "[%d.%d] Unpinned 0x%p\n", + gt_to_tile(gt)->id, gt->info.id, bo); + } + + gt->uc.guc.g2g.bo = NULL; + } +} + +static void g2g_stop(struct kunit *test, struct xe_device *xe) +{ + struct xe_gt *gt; + int i; + + for_each_gt(gt, xe, i) { + struct xe_guc *guc = >->uc.guc; + + if (!guc->g2g.bo) + continue; + + g2g_ct_stop(guc); + } + + g2g_free(test, xe); +} + +/* + * Generate a unique id for each bi-directional CTB for each pair of + * near and far tiles/devices. The id can then be used as an index into + * a single allocation that is sub-divided into multiple CTBs. + * + * For example, with two devices per tile and two tiles, the table should + * look like: + * Far <tile>.<dev> + * 0.0 0.1 1.0 1.1 + * N 0.0 --/-- 00/01 02/03 04/05 + * e 0.1 01/00 --/-- 06/07 08/09 + * a 1.0 03/02 07/06 --/-- 10/11 + * r 1.1 05/04 09/08 11/10 --/-- + * + * Where each entry is Rx/Tx channel id. + * + * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would + * be reading from channel #11 and writing to channel #10. Whereas, + * GuC #2 talking to GuC #3 would be read on #10 and write to #11. + */ +static int g2g_slot_flat(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev, + u32 type, u32 max_inst, bool have_dev) +{ + u32 near = near_tile, far = far_tile; + u32 idx = 0, x, y, direction; + int i; + + if (have_dev) { + near = (near << 1) | near_dev; + far = (far << 1) | far_dev; + } + + /* No need to send to one's self */ + if (far == near) + return -1; + + if (far > near) { + /* Top right table half */ + x = far; + y = near; + + /* T/R is 'forwards' direction */ + direction = type; + } else { + /* Bottom left table half */ + x = near; + y = far; + + /* B/L is 'backwards' direction */ + direction = (1 - type); + } + + /* Count the rows prior to the target */ + for (i = y; i > 0; i--) + idx += max_inst - i; + + /* Count this row up to the target */ + idx += (x - 1 - y); + + /* Slots are in Rx/Tx pairs */ + idx *= 2; + + /* Pick Rx/Tx direction */ + idx += direction; + + return idx; +} + +static int g2g_register_flat(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type, bool have_dev) +{ + struct xe_gt *gt = guc_to_gt(guc); + struct xe_device *xe = gt_to_xe(gt); + u32 near_tile = gt_to_tile(gt)->id; + u32 near_dev = G2G_DEV(gt); + u32 max = xe->info.gt_count; + int idx; + u32 base, desc, buf; + + if (!guc->g2g.bo) + return -ENODEV; + + idx = g2g_slot_flat(near_tile, near_dev, far_tile, far_dev, type, max, have_dev); + xe_assert(xe, idx >= 0); + + base = guc_bo_ggtt_addr(guc, guc->g2g.bo); + desc = base + idx * G2G_DESC_SIZE; + buf = base + idx * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE; + + xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE); + xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(guc->g2g.bo)); + + return guc_action_register_g2g_buffer(guc, type, far_tile, far_dev, + desc, buf, G2G_BUFFER_SIZE); +} + +static void g2g_start(struct kunit *test, struct xe_guc *guc) +{ + struct xe_gt *remote_gt, *gt = guc_to_gt(guc); + struct xe_device *xe = gt_to_xe(gt); + unsigned int i; + int t, ret; + bool have_dev; + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, guc->g2g.bo); + + /* GuC interface will need extending if more GT device types are ever created. */ + KUNIT_ASSERT_TRUE(test, + (gt->info.type == XE_GT_TYPE_MAIN) || + (gt->info.type == XE_GT_TYPE_MEDIA)); + + /* Channel numbering depends on whether there are multiple GTs per tile */ + have_dev = xe->info.gt_count > xe->info.tile_count; + + for_each_gt(remote_gt, xe, i) { + u32 tile, dev; + + if (remote_gt->info.id == gt->info.id) + continue; + + tile = gt_to_tile(remote_gt)->id; + dev = G2G_DEV(remote_gt); + + for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) { + ret = g2g_register_flat(guc, tile, dev, t, have_dev); + KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G register failed: %pe", ERR_PTR(ret)); + } + } +} + +static void g2g_reinit(struct kunit *test, struct xe_device *xe, int ctb_type, struct xe_tile *tile) +{ + struct xe_gt *gt; + int i, found = 0; + + g2g_stop(test, xe); + + for_each_gt(gt, xe, i) { + struct xe_guc *guc = >->uc.guc; + + KUNIT_ASSERT_NULL(test, guc->g2g.bo); + } + + switch (ctb_type) { + case G2G_CTB_TYPE_DEFAULT: + g2g_alloc_default(test, xe); + break; + + case G2G_CTB_TYPE_HOST: + g2g_alloc_host(test, xe); + break; + + case G2G_CTB_TYPE_TILE: + g2g_alloc_tile(test, xe, tile); + break; + + default: + KUNIT_ASSERT_TRUE(test, false); + } + + for_each_gt(gt, xe, i) { + struct xe_guc *guc = >->uc.guc; + + if (!guc->g2g.bo) + continue; + + if (ctb_type == G2G_CTB_TYPE_DEFAULT) + guc_g2g_start(guc); + else + g2g_start(test, guc); + found++; + } + + KUNIT_ASSERT_GT_MSG(test, found, 1, "insufficient G2G channels running: %d", found); + + kunit_info(test, "Testing across %d GTs\n", found); +} + +static void g2g_recreate_ctb(void *_xe) +{ + struct xe_device *xe = (struct xe_device *)_xe; + struct kunit *test = kunit_get_current_test(); + + g2g_stop(test, xe); + + if (xe_guc_g2g_wanted(xe)) + g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL); +} + +static void g2g_pm_runtime_put(void *_xe) +{ + struct xe_device *xe = (struct xe_device *)_xe; + + xe_pm_runtime_put(xe); +} + +static void g2g_pm_runtime_get(struct kunit *test) +{ + struct xe_device *xe = test->priv; + int ret; + + xe_pm_runtime_get(xe); + ret = kunit_add_action_or_reset(test, g2g_pm_runtime_put, xe); + KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register runtime PM action\n"); +} + +static void g2g_check_skip(struct kunit *test) +{ + struct xe_device *xe = test->priv; + struct xe_gt *gt; + int i; + + if (IS_SRIOV_VF(xe)) + kunit_skip(test, "not supported from a VF"); + + if (xe->info.gt_count <= 1) + kunit_skip(test, "not enough GTs"); + + for_each_gt(gt, xe, i) { + struct xe_guc *guc = >->uc.guc; + + if (guc->fw.build_type == CSS_UKERNEL_INFO_BUILDTYPE_PROD) + kunit_skip(test, + "G2G test interface not available in production firmware builds\n"); + } +} + +/* + * Simple test that does not try to recreate the CTBs. + * Requires that the platform already enables G2G comms + * but has no risk of leaving the system in a broken state + * afterwards. + */ +static void xe_live_guc_g2g_kunit_default(struct kunit *test) +{ + struct xe_device *xe = test->priv; + + if (!xe_guc_g2g_wanted(xe)) + kunit_skip(test, "G2G not enabled"); + + g2g_check_skip(test); + + g2g_pm_runtime_get(test); + + kunit_info(test, "Testing default CTBs\n"); + g2g_run_test(test, xe); + + kunit_release_action(test, &g2g_pm_runtime_put, xe); +} + +/* + * More complex test that re-creates the CTBs in various location to + * test access to each location from each GuC. Can be run even on + * systems that do not enable G2G by default. On the other hand, + * because it recreates the CTBs, if something goes wrong it could + * leave the system with broken G2G comms. + */ +static void xe_live_guc_g2g_kunit_allmem(struct kunit *test) +{ + struct xe_device *xe = test->priv; + int ret; + + g2g_check_skip(test); + + g2g_pm_runtime_get(test); + + /* Make sure to leave the system as we found it */ + ret = kunit_add_action_or_reset(test, g2g_recreate_ctb, xe); + KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register CTB re-creation action\n"); + + kunit_info(test, "Testing CTB type 'default'...\n"); + g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL); + g2g_run_test(test, xe); + + kunit_info(test, "Testing CTB type 'host'...\n"); + g2g_reinit(test, xe, G2G_CTB_TYPE_HOST, NULL); + g2g_run_test(test, xe); + + if (IS_DGFX(xe)) { + struct xe_tile *tile; + int id; + + for_each_tile(tile, xe, id) { + kunit_info(test, "Testing CTB type 'tile: #%d'...\n", id); + + g2g_reinit(test, xe, G2G_CTB_TYPE_TILE, tile); + g2g_run_test(test, xe); + } + } else { + kunit_info(test, "Skipping local memory on integrated platform\n"); + } + + kunit_release_action(test, g2g_recreate_ctb, xe); + kunit_release_action(test, g2g_pm_runtime_put, xe); +} + +static struct kunit_case xe_guc_g2g_tests[] = { + KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_default, xe_pci_live_device_gen_param), + KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_allmem, xe_pci_live_device_gen_param), + {} +}; + +VISIBLE_IF_KUNIT +struct kunit_suite xe_guc_g2g_test_suite = { + .name = "xe_guc_g2g", + .test_cases = xe_guc_g2g_tests, + .init = xe_kunit_helper_xe_device_live_test_init, +}; +EXPORT_SYMBOL_IF_KUNIT(xe_guc_g2g_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c index 81277c77016d..c55e46f1ae92 100644 --- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c +++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c @@ -10,12 +10,14 @@ extern struct kunit_suite xe_bo_shrink_test_suite; extern struct kunit_suite xe_dma_buf_test_suite; extern struct kunit_suite xe_migrate_test_suite; extern struct kunit_suite xe_mocs_test_suite; +extern struct kunit_suite xe_guc_g2g_test_suite; kunit_test_suite(xe_bo_test_suite); kunit_test_suite(xe_bo_shrink_test_suite); kunit_test_suite(xe_dma_buf_test_suite); kunit_test_suite(xe_migrate_test_suite); kunit_test_suite(xe_mocs_test_suite); +kunit_test_suite(xe_guc_g2g_test_suite); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index edd1e701aa1c..5904d658d1f2 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -70,7 +70,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe, } } while (0) static void test_copy(struct xe_migrate *m, struct xe_bo *bo, - struct kunit *test, u32 region) + struct kunit *test, u32 region, struct drm_exec *exec) { struct xe_device *xe = tile_to_xe(m->tile); u64 retval, expected = 0; @@ -84,14 +84,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, ttm_bo_type_kernel, region | XE_BO_FLAG_NEEDS_CPU_ACCESS | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_PINNED, + exec); if (IS_ERR(remote)) { KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n", str, remote); return; } - err = xe_bo_validate(remote, NULL, false); + err = xe_bo_validate(remote, NULL, false, exec); if (err) { KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n", str, err); @@ -161,13 +162,13 @@ out_unlock: } static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo, - struct kunit *test) + struct drm_exec *exec, struct kunit *test) { - test_copy(m, bo, test, XE_BO_FLAG_SYSTEM); + test_copy(m, bo, test, XE_BO_FLAG_SYSTEM, exec); } static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo, - struct kunit *test) + struct drm_exec *exec, struct kunit *test) { u32 region; @@ -178,10 +179,11 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo, region = XE_BO_FLAG_VRAM1; else region = XE_BO_FLAG_VRAM0; - test_copy(m, bo, test, region); + test_copy(m, bo, test, region, exec); } -static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) +static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test, + struct drm_exec *exec) { struct xe_tile *tile = m->tile; struct xe_device *xe = tile_to_xe(tile); @@ -202,7 +204,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile)); + XE_BO_FLAG_VRAM_IF_DGFX(tile), + exec); if (IS_ERR(big)) { KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big)); goto vunmap; @@ -210,7 +213,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile)); + XE_BO_FLAG_VRAM_IF_DGFX(tile), + exec); if (IS_ERR(pt)) { KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n", PTR_ERR(pt)); @@ -220,7 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) tiny = xe_bo_create_pin_map(xe, tile, m->q->vm, 2 * SZ_4K, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile)); + XE_BO_FLAG_VRAM_IF_DGFX(tile), + exec); if (IS_ERR(tiny)) { KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n", PTR_ERR(tiny)); @@ -290,10 +295,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) check(retval, expected, "Command clear small last value", test); kunit_info(test, "Copying small buffer object to system\n"); - test_copy_sysmem(m, tiny, test); + test_copy_sysmem(m, tiny, exec, test); if (xe->info.tile_count > 1) { kunit_info(test, "Copying small buffer object to other vram\n"); - test_copy_vram(m, tiny, test); + test_copy_vram(m, tiny, exec, test); } /* Clear a big bo */ @@ -312,10 +317,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) check(retval, expected, "Command clear big last value", test); kunit_info(test, "Copying big buffer object to system\n"); - test_copy_sysmem(m, big, test); + test_copy_sysmem(m, big, exec, test); if (xe->info.tile_count > 1) { kunit_info(test, "Copying big buffer object to other vram\n"); - test_copy_vram(m, big, test); + test_copy_vram(m, big, exec, test); } out: @@ -343,10 +348,11 @@ static int migrate_test_run_device(struct xe_device *xe) for_each_tile(tile, xe, id) { struct xe_migrate *m = tile->migrate; + struct drm_exec *exec = XE_VALIDATION_OPT_OUT; kunit_info(test, "Testing tile id %d.\n", id); xe_vm_lock(m->q->vm, false); - xe_migrate_sanity_test(m, test); + xe_migrate_sanity_test(m, test, exec); xe_vm_unlock(m->q->vm); } @@ -490,7 +496,7 @@ err_sync: static void test_migrate(struct xe_device *xe, struct xe_tile *tile, struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo, - struct kunit *test) + struct drm_exec *exec, struct kunit *test) { struct dma_fence *fence; u64 expected, retval; @@ -509,7 +515,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile, dma_fence_put(fence); kunit_info(test, "Evict vram buffer object\n"); - ret = xe_bo_evict(vram_bo); + ret = xe_bo_evict(vram_bo, exec); if (ret) { KUNIT_FAIL(test, "Failed to evict bo.\n"); return; @@ -538,7 +544,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile, dma_fence_put(fence); kunit_info(test, "Restore vram buffer object\n"); - ret = xe_bo_validate(vram_bo, NULL, false); + ret = xe_bo_validate(vram_bo, NULL, false, exec); if (ret) { KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret); return; @@ -636,13 +642,14 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til { struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL; unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile); + struct drm_exec *exec; long ret; - sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M, + sys_bo = xe_bo_create_user(xe, NULL, SZ_4M, DRM_XE_GEM_CPU_CACHING_WC, XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_PINNED, NULL); if (IS_ERR(sys_bo)) { KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n", @@ -650,8 +657,9 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til return; } + exec = XE_VALIDATION_OPT_OUT; xe_bo_lock(sys_bo, false); - ret = xe_bo_validate(sys_bo, NULL, false); + ret = xe_bo_validate(sys_bo, NULL, false, exec); if (ret) { KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret); goto free_sysbo; @@ -664,10 +672,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til } xe_bo_unlock(sys_bo); - ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M, + ccs_bo = xe_bo_create_user(xe, NULL, SZ_4M, DRM_XE_GEM_CPU_CACHING_WC, bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_PINNED, NULL); if (IS_ERR(ccs_bo)) { KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n", @@ -676,7 +684,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til } xe_bo_lock(ccs_bo, false); - ret = xe_bo_validate(ccs_bo, NULL, false); + ret = xe_bo_validate(ccs_bo, NULL, false, exec); if (ret) { KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret); goto free_ccsbo; @@ -689,10 +697,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til } xe_bo_unlock(ccs_bo); - vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M, + vram_bo = xe_bo_create_user(xe, NULL, SZ_4M, DRM_XE_GEM_CPU_CACHING_WC, bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | - XE_BO_FLAG_PINNED); + XE_BO_FLAG_PINNED, NULL); if (IS_ERR(vram_bo)) { KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n", PTR_ERR(vram_bo)); @@ -700,7 +708,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til } xe_bo_lock(vram_bo, false); - ret = xe_bo_validate(vram_bo, NULL, false); + ret = xe_bo_validate(vram_bo, NULL, false, exec); if (ret) { KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret); goto free_vrambo; @@ -713,7 +721,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til } test_clear(xe, tile, sys_bo, vram_bo, test); - test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test); + test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, exec, test); xe_bo_unlock(vram_bo); xe_bo_lock(vram_bo, false); diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c index db30c5156d0c..aa29ac759d5d 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci.c +++ b/drivers/gpu/drm/xe/tests/xe_pci.c @@ -12,12 +12,219 @@ #include <kunit/test-bug.h> #include <kunit/visibility.h> +#define PLATFORM_CASE(platform__, graphics_step__) \ + { \ + .platform = XE_ ## platform__, \ + .subplatform = XE_SUBPLATFORM_NONE, \ + .step = { .graphics = STEP_ ## graphics_step__ } \ + } + +#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \ + { \ + .platform = XE_ ## platform__, \ + .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \ + .step = { .graphics = STEP_ ## graphics_step__ } \ + } + +#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \ + media_verx100__, media_step__) \ + { \ + .platform = XE_ ## platform__, \ + .subplatform = XE_SUBPLATFORM_NONE, \ + .graphics_verx100 = graphics_verx100__, \ + .media_verx100 = media_verx100__, \ + .step = { .graphics = STEP_ ## graphics_step__, \ + .media = STEP_ ## media_step__ } \ + } + +static const struct xe_pci_fake_data cases[] = { + PLATFORM_CASE(TIGERLAKE, B0), + PLATFORM_CASE(DG1, A0), + PLATFORM_CASE(DG1, B0), + PLATFORM_CASE(ALDERLAKE_S, A0), + PLATFORM_CASE(ALDERLAKE_S, B0), + PLATFORM_CASE(ALDERLAKE_S, C0), + PLATFORM_CASE(ALDERLAKE_S, D0), + PLATFORM_CASE(ALDERLAKE_P, A0), + PLATFORM_CASE(ALDERLAKE_P, B0), + PLATFORM_CASE(ALDERLAKE_P, C0), + SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0), + SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0), + SUBPLATFORM_CASE(DG2, G10, C0), + SUBPLATFORM_CASE(DG2, G11, B1), + SUBPLATFORM_CASE(DG2, G12, A1), + GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0), + GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0), + GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0), + GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0), + GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0), + GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1), + GMDID_CASE(PANTHERLAKE, 3000, A0, 3000, A0), +}; + +KUNIT_ARRAY_PARAM(platform, cases, xe_pci_fake_data_desc); + +/** + * xe_pci_fake_data_gen_params - Generate struct xe_pci_fake_data parameters + * @prev: the pointer to the previous parameter to iterate from or NULL + * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE + * + * This function prepares struct xe_pci_fake_data parameter. + * + * To be used only as a parameter generator function in &KUNIT_CASE_PARAM. + * + * Return: pointer to the next parameter or NULL if no more parameters + */ +const void *xe_pci_fake_data_gen_params(const void *prev, char *desc) +{ + return platform_gen_params(prev, desc); +} +EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_gen_params); + +static const struct xe_device_desc *lookup_desc(enum xe_platform p) +{ + const struct xe_device_desc *desc; + const struct pci_device_id *ids; + + for (ids = pciidlist; ids->driver_data; ids++) { + desc = (const void *)ids->driver_data; + if (desc->platform == p) + return desc; + } + return NULL; +} + +static const struct xe_subplatform_desc *lookup_sub_desc(enum xe_platform p, enum xe_subplatform s) +{ + const struct xe_device_desc *desc = lookup_desc(p); + const struct xe_subplatform_desc *spd; + + if (desc && desc->subplatforms) + for (spd = desc->subplatforms; spd->subplatform; spd++) + if (spd->subplatform == s) + return spd; + return NULL; +} + +static const char *lookup_platform_name(enum xe_platform p) +{ + const struct xe_device_desc *desc = lookup_desc(p); + + return desc ? desc->platform_name : "INVALID"; +} + +static const char *__lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s) +{ + const struct xe_subplatform_desc *desc = lookup_sub_desc(p, s); + + return desc ? desc->name : "INVALID"; +} + +static const char *lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s) +{ + return s == XE_SUBPLATFORM_NONE ? "" : __lookup_subplatform_name(p, s); +} + +static const char *subplatform_prefix(enum xe_subplatform s) +{ + return s == XE_SUBPLATFORM_NONE ? "" : " "; +} + +static const char *step_prefix(enum xe_step step) +{ + return step == STEP_NONE ? "" : " "; +} + +static const char *step_name(enum xe_step step) +{ + return step == STEP_NONE ? "" : xe_step_name(step); +} + +static const char *sriov_prefix(enum xe_sriov_mode mode) +{ + return mode <= XE_SRIOV_MODE_NONE ? "" : " "; +} + +static const char *sriov_name(enum xe_sriov_mode mode) +{ + return mode <= XE_SRIOV_MODE_NONE ? "" : xe_sriov_mode_to_string(mode); +} + +static const char *lookup_graphics_name(unsigned int verx100) +{ + const struct xe_ip *ip = find_graphics_ip(verx100); + + return ip ? ip->name : ""; +} + +static const char *lookup_media_name(unsigned int verx100) +{ + const struct xe_ip *ip = find_media_ip(verx100); + + return ip ? ip->name : ""; +} + +/** + * xe_pci_fake_data_desc - Describe struct xe_pci_fake_data parameter + * @param: the &struct xe_pci_fake_data parameter to describe + * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE + * + * This function prepares description of the struct xe_pci_fake_data parameter. + * + * It is tailored for use in parameterized KUnit tests where parameter generator + * is based on the struct xe_pci_fake_data arrays. + */ +void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc) +{ + if (param->graphics_verx100 || param->media_verx100) + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s %u.%02u(%s)%s%s %u.%02u(%s)%s%s%s%s", + lookup_platform_name(param->platform), + subplatform_prefix(param->subplatform), + lookup_subplatform_name(param->platform, param->subplatform), + param->graphics_verx100 / 100, param->graphics_verx100 % 100, + lookup_graphics_name(param->graphics_verx100), + step_prefix(param->step.graphics), step_name(param->step.graphics), + param->media_verx100 / 100, param->media_verx100 % 100, + lookup_media_name(param->media_verx100), + step_prefix(param->step.media), step_name(param->step.media), + sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode)); + else + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s%s%s%s%s", + lookup_platform_name(param->platform), + subplatform_prefix(param->subplatform), + lookup_subplatform_name(param->platform, param->subplatform), + step_prefix(param->step.graphics), step_name(param->step.graphics), + sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode)); +} +EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_desc); + static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc) { snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s", param->verx100 / 100, param->verx100 % 100, param->name); } +/* + * Pre-GMDID Graphics and Media IPs definitions. + * + * Mimic the way GMDID IPs are declared so the same + * param generator can be used for both + */ +static const struct xe_ip pre_gmdid_graphics_ips[] = { + graphics_ip_xelp, + graphics_ip_xelpp, + graphics_ip_xehpg, + graphics_ip_xehpc, +}; + +static const struct xe_ip pre_gmdid_media_ips[] = { + media_ip_xem, + media_ip_xehpm, +}; + +KUNIT_ARRAY_PARAM(pre_gmdid_graphics_ip, pre_gmdid_graphics_ips, xe_ip_kunit_desc); +KUNIT_ARRAY_PARAM(pre_gmdid_media_ip, pre_gmdid_media_ips, xe_ip_kunit_desc); + KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc); KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc); @@ -46,6 +253,13 @@ KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc); */ const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc) { + const void *next = pre_gmdid_graphics_ip_gen_params(prev, desc); + + if (next) + return next; + if (is_insidevar(prev, pre_gmdid_graphics_ips)) + prev = NULL; + return graphics_ip_gen_params(prev, desc); } EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param); @@ -63,6 +277,13 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param); */ const void *xe_pci_media_ip_gen_param(const void *prev, char *desc) { + const void *next = pre_gmdid_media_ip_gen_params(prev, desc); + + if (next) + return next; + if (is_insidevar(prev, pre_gmdid_media_ips)) + prev = NULL; + return media_ip_gen_params(prev, desc); } EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param); @@ -94,10 +315,10 @@ static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, if (type == GMDID_MEDIA) { *ver = data->media_verx100; - *revid = xe_step_to_gmdid(data->media_step); + *revid = xe_step_to_gmdid(data->step.media); } else { *ver = data->graphics_verx100; - *revid = xe_step_to_gmdid(data->graphics_step); + *revid = xe_step_to_gmdid(data->step.graphics); } } diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h index ce4d2b86b778..5e9a7ffc747f 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.h +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h @@ -10,6 +10,7 @@ #include "xe_platform_types.h" #include "xe_sriov_types.h" +#include "xe_step_types.h" struct xe_device; @@ -17,13 +18,14 @@ struct xe_pci_fake_data { enum xe_sriov_mode sriov_mode; enum xe_platform platform; enum xe_subplatform subplatform; + struct xe_step_info step; u32 graphics_verx100; u32 media_verx100; - u32 graphics_step; - u32 media_step; }; int xe_pci_fake_device_init(struct xe_device *xe); +const void *xe_pci_fake_data_gen_params(const void *prev, char *desc); +void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc); const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc); const void *xe_pci_media_ip_gen_param(const void *prev, char *desc); diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c index 416258c193f6..49d191043dfa 100644 --- a/drivers/gpu/drm/xe/tests/xe_wa_test.c +++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c @@ -15,87 +15,10 @@ #include "xe_tuning.h" #include "xe_wa.h" -struct platform_test_case { - const char *name; - enum xe_platform platform; - enum xe_subplatform subplatform; - u32 graphics_verx100; - u32 media_verx100; - struct xe_step_info step; -}; - -#define PLATFORM_CASE(platform__, graphics_step__) \ - { \ - .name = #platform__ " (" #graphics_step__ ")", \ - .platform = XE_ ## platform__, \ - .subplatform = XE_SUBPLATFORM_NONE, \ - .step = { .graphics = STEP_ ## graphics_step__ } \ - } - - -#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \ - { \ - .name = #platform__ "_" #subplatform__ " (" #graphics_step__ ")", \ - .platform = XE_ ## platform__, \ - .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \ - .step = { .graphics = STEP_ ## graphics_step__ } \ - } - -#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \ - media_verx100__, media_step__) \ - { \ - .name = #platform__ " (g:" #graphics_step__ ", m:" #media_step__ ")",\ - .platform = XE_ ## platform__, \ - .subplatform = XE_SUBPLATFORM_NONE, \ - .graphics_verx100 = graphics_verx100__, \ - .media_verx100 = media_verx100__, \ - .step = { .graphics = STEP_ ## graphics_step__, \ - .media = STEP_ ## media_step__ } \ - } - -static const struct platform_test_case cases[] = { - PLATFORM_CASE(TIGERLAKE, B0), - PLATFORM_CASE(DG1, A0), - PLATFORM_CASE(DG1, B0), - PLATFORM_CASE(ALDERLAKE_S, A0), - PLATFORM_CASE(ALDERLAKE_S, B0), - PLATFORM_CASE(ALDERLAKE_S, C0), - PLATFORM_CASE(ALDERLAKE_S, D0), - PLATFORM_CASE(ALDERLAKE_P, A0), - PLATFORM_CASE(ALDERLAKE_P, B0), - PLATFORM_CASE(ALDERLAKE_P, C0), - SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0), - SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0), - SUBPLATFORM_CASE(DG2, G10, C0), - SUBPLATFORM_CASE(DG2, G11, B1), - SUBPLATFORM_CASE(DG2, G12, A1), - GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0), - GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0), - GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0), - GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0), - GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0), - GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1), - GMDID_CASE(PANTHERLAKE, 3000, A0, 3000, A0), -}; - -static void platform_desc(const struct platform_test_case *t, char *desc) -{ - strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); -} - -KUNIT_ARRAY_PARAM(platform, cases, platform_desc); - static int xe_wa_test_init(struct kunit *test) { - const struct platform_test_case *param = test->param_value; - struct xe_pci_fake_data data = { - .platform = param->platform, - .subplatform = param->subplatform, - .graphics_verx100 = param->graphics_verx100, - .media_verx100 = param->media_verx100, - .graphics_step = param->step.graphics, - .media_step = param->step.media, - }; + const struct xe_pci_fake_data *param = test->param_value; + struct xe_pci_fake_data data = *param; struct xe_device *xe; struct device *dev; int ret; @@ -120,13 +43,6 @@ static int xe_wa_test_init(struct kunit *test) return 0; } -static void xe_wa_test_exit(struct kunit *test) -{ - struct xe_device *xe = test->priv; - - drm_kunit_helper_free_device(test, xe->drm.dev); -} - static void xe_wa_gt(struct kunit *test) { struct xe_device *xe = test->priv; @@ -144,14 +60,13 @@ static void xe_wa_gt(struct kunit *test) } static struct kunit_case xe_wa_tests[] = { - KUNIT_CASE_PARAM(xe_wa_gt, platform_gen_params), + KUNIT_CASE_PARAM(xe_wa_gt, xe_pci_fake_data_gen_params), {} }; static struct kunit_suite xe_rtp_test_suite = { .name = "xe_wa", .init = xe_wa_test_init, - .exit = xe_wa_test_exit, .test_cases = xe_wa_tests, }; diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c index feb6e013dc38..6d20229c11de 100644 --- a/drivers/gpu/drm/xe/xe_bb.c +++ b/drivers/gpu/drm/xe/xe_bb.c @@ -64,7 +64,7 @@ struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords, enum xe_sriov_vf_ccs_rw_ctxs ctx_id) { struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL); - struct xe_tile *tile = gt_to_tile(gt); + struct xe_device *xe = gt_to_xe(gt); struct xe_sa_manager *bb_pool; int err; @@ -78,7 +78,7 @@ struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords, * So, this extra DW acts as a guard here. */ - bb_pool = tile->sriov.vf.ccs[ctx_id].mem.ccs_bb_pool; + bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool; bb->bo = xe_sa_bo_new(bb_pool, 4 * (dwords + 1)); if (IS_ERR(bb->bo)) { diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 870f43347281..edc6e29f0cc1 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -974,11 +974,11 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, * CCS meta data is migrated from TT -> SMEM. So, let us detach the * BBs from BO as it is no longer needed. */ - if (IS_VF_CCS_BB_VALID(xe, bo) && old_mem_type == XE_PL_TT && + if (IS_VF_CCS_READY(xe) && old_mem_type == XE_PL_TT && new_mem->mem_type == XE_PL_SYSTEM) xe_sriov_vf_ccs_detach_bo(bo); - if (IS_SRIOV_VF(xe) && + if (IS_VF_CCS_READY(xe) && ((move_lacks_source && new_mem->mem_type == XE_PL_TT) || (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) && handle_system_ccs) @@ -994,7 +994,7 @@ out: if (timeout < 0) ret = timeout; - if (IS_VF_CCS_BB_VALID(xe, bo)) + if (IS_VF_CCS_READY(xe)) xe_sriov_vf_ccs_detach_bo(bo); xe_tt_unmap_sg(xe, ttm_bo->ttm); @@ -1141,42 +1141,47 @@ out_unref: int xe_bo_notifier_prepare_pinned(struct xe_bo *bo) { struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); + struct xe_validation_ctx ctx; + struct drm_exec exec; struct xe_bo *backup; int ret = 0; - xe_bo_lock(bo, false); + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) { + ret = drm_exec_lock_obj(&exec, &bo->ttm.base); + drm_exec_retry_on_contention(&exec); + xe_assert(xe, !ret); + xe_assert(xe, !bo->backup_obj); - xe_assert(xe, !bo->backup_obj); + /* + * Since this is called from the PM notifier we might have raced with + * someone unpinning this after we dropped the pinned list lock and + * grabbing the above bo lock. + */ + if (!xe_bo_is_pinned(bo)) + break; - /* - * Since this is called from the PM notifier we might have raced with - * someone unpinning this after we dropped the pinned list lock and - * grabbing the above bo lock. - */ - if (!xe_bo_is_pinned(bo)) - goto out_unlock_bo; + if (!xe_bo_is_vram(bo)) + break; - if (!xe_bo_is_vram(bo)) - goto out_unlock_bo; + if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) + break; - if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) - goto out_unlock_bo; + backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo), + DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | + XE_BO_FLAG_PINNED, &exec); + if (IS_ERR(backup)) { + drm_exec_retry_on_contention(&exec); + ret = PTR_ERR(backup); + xe_validation_retry_on_oom(&ctx, &ret); + break; + } - backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo), - DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | - XE_BO_FLAG_PINNED); - if (IS_ERR(backup)) { - ret = PTR_ERR(backup); - goto out_unlock_bo; + backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ + ttm_bo_pin(&backup->ttm); + bo->backup_obj = backup; } - backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ - ttm_bo_pin(&backup->ttm); - bo->backup_obj = backup; - -out_unlock_bo: - xe_bo_unlock(bo); return ret; } @@ -1202,57 +1207,12 @@ int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo) return 0; } -/** - * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory - * @bo: The buffer object to move. - * - * On successful completion, the object memory will be moved to system memory. - * - * This is needed to for special handling of pinned VRAM object during - * suspend-resume. - * - * Return: 0 on success. Negative error code on failure. - */ -int xe_bo_evict_pinned(struct xe_bo *bo) +static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup) { - struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); - struct xe_bo *backup = bo->backup_obj; - bool backup_created = false; + struct xe_device *xe = xe_bo_device(bo); bool unmap = false; int ret = 0; - xe_bo_lock(bo, false); - - if (WARN_ON(!bo->ttm.resource)) { - ret = -EINVAL; - goto out_unlock_bo; - } - - if (WARN_ON(!xe_bo_is_pinned(bo))) { - ret = -EINVAL; - goto out_unlock_bo; - } - - if (!xe_bo_is_vram(bo)) - goto out_unlock_bo; - - if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) - goto out_unlock_bo; - - if (!backup) { - backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, - NULL, xe_bo_size(bo), - DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | - XE_BO_FLAG_PINNED); - if (IS_ERR(backup)) { - ret = PTR_ERR(backup); - goto out_unlock_bo; - } - backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ - backup_created = true; - } - if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) { struct xe_migrate *migrate; struct dma_fence *fence; @@ -1262,14 +1222,11 @@ int xe_bo_evict_pinned(struct xe_bo *bo) else migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); + xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv); ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); if (ret) goto out_backup; - ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1); - if (ret) - goto out_backup; - fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource, backup->ttm.resource, false); if (IS_ERR(fence)) { @@ -1279,8 +1236,6 @@ int xe_bo_evict_pinned(struct xe_bo *bo) dma_resv_add_fence(bo->ttm.base.resv, fence, DMA_RESV_USAGE_KERNEL); - dma_resv_add_fence(backup->ttm.base.resv, fence, - DMA_RESV_USAGE_KERNEL); dma_fence_put(fence); } else { ret = xe_bo_vmap(backup); @@ -1290,7 +1245,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo) if (iosys_map_is_null(&bo->vmap)) { ret = xe_bo_vmap(bo); if (ret) - goto out_backup; + goto out_vunmap; unmap = true; } @@ -1300,15 +1255,78 @@ int xe_bo_evict_pinned(struct xe_bo *bo) if (!bo->backup_obj) bo->backup_obj = backup; - -out_backup: +out_vunmap: xe_bo_vunmap(backup); - if (ret && backup_created) - xe_bo_put(backup); -out_unlock_bo: +out_backup: if (unmap) xe_bo_vunmap(bo); - xe_bo_unlock(bo); + + return ret; +} + +/** + * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory + * @bo: The buffer object to move. + * + * On successful completion, the object memory will be moved to system memory. + * + * This is needed to for special handling of pinned VRAM object during + * suspend-resume. + * + * Return: 0 on success. Negative error code on failure. + */ +int xe_bo_evict_pinned(struct xe_bo *bo) +{ + struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); + struct xe_validation_ctx ctx; + struct drm_exec exec; + struct xe_bo *backup = bo->backup_obj; + bool backup_created = false; + int ret = 0; + + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) { + ret = drm_exec_lock_obj(&exec, &bo->ttm.base); + drm_exec_retry_on_contention(&exec); + xe_assert(xe, !ret); + + if (WARN_ON(!bo->ttm.resource)) { + ret = -EINVAL; + break; + } + + if (WARN_ON(!xe_bo_is_pinned(bo))) { + ret = -EINVAL; + break; + } + + if (!xe_bo_is_vram(bo)) + break; + + if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) + break; + + if (!backup) { + backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, + xe_bo_size(bo), + DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | + XE_BO_FLAG_PINNED, &exec); + if (IS_ERR(backup)) { + drm_exec_retry_on_contention(&exec); + ret = PTR_ERR(backup); + xe_validation_retry_on_oom(&ctx, &ret); + break; + } + backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ + backup_created = true; + } + + ret = xe_bo_evict_pinned_copy(bo, backup); + } + + if (ret && backup_created) + xe_bo_put(backup); + return ret; } @@ -1358,10 +1376,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo) if (ret) goto out_unlock_bo; - ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1); - if (ret) - goto out_unlock_bo; - fence = xe_migrate_copy(migrate, backup, bo, backup->ttm.resource, bo->ttm.resource, false); @@ -1372,8 +1386,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo) dma_resv_add_fence(bo->ttm.base.resv, fence, DMA_RESV_USAGE_KERNEL); - dma_resv_add_fence(backup->ttm.base.resv, fence, - DMA_RESV_USAGE_KERNEL); dma_fence_put(fence); } else { ret = xe_bo_vmap(backup); @@ -1529,7 +1541,7 @@ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo) if (!xe_bo_is_xe_bo(ttm_bo)) return; - if (IS_VF_CCS_BB_VALID(ttm_to_xe_device(ttm_bo->bdev), bo)) + if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev))) xe_sriov_vf_ccs_detach_bo(bo); /* @@ -1696,7 +1708,7 @@ static void xe_gem_object_free(struct drm_gem_object *obj) * refcount directly if needed. */ __xe_bo_vunmap(gem_to_xe_bo(obj)); - ttm_bo_put(container_of(obj, struct ttm_buffer_object, base)); + ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base)); } static void xe_gem_object_close(struct drm_gem_object *obj, @@ -1725,65 +1737,234 @@ static bool should_migrate_to_smem(struct xe_bo *bo) bo->attr.atomic_access == DRM_XE_ATOMIC_CPU; } -static vm_fault_t xe_gem_fault(struct vm_fault *vmf) +/* Populate the bo if swapped out, or migrate if the access mode requires that. */ +static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx, + struct drm_exec *exec) +{ + struct ttm_buffer_object *tbo = &bo->ttm; + int err = 0; + + if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) { + xe_assert(xe_bo_device(bo), + dma_resv_test_signaled(tbo->base.resv, DMA_RESV_USAGE_KERNEL) || + (tbo->ttm && ttm_tt_is_populated(tbo->ttm))); + err = ttm_bo_populate(&bo->ttm, ctx); + } else if (should_migrate_to_smem(bo)) { + xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM); + err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec); + } + + return err; +} + +/* Call into TTM to populate PTEs, and register bo for PTE removal on runtime suspend. */ +static vm_fault_t __xe_bo_cpu_fault(struct vm_fault *vmf, struct xe_device *xe, struct xe_bo *bo) +{ + vm_fault_t ret; + + trace_xe_bo_cpu_fault(bo); + + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + /* + * When TTM is actually called to insert PTEs, ensure no blocking conditions + * remain, in which case TTM may drop locks and return VM_FAULT_RETRY. + */ + xe_assert(xe, ret != VM_FAULT_RETRY); + + if (ret == VM_FAULT_NOPAGE && + mem_type_is_vram(bo->ttm.resource->mem_type)) { + mutex_lock(&xe->mem_access.vram_userfault.lock); + if (list_empty(&bo->vram_userfault_link)) + list_add(&bo->vram_userfault_link, + &xe->mem_access.vram_userfault.list); + mutex_unlock(&xe->mem_access.vram_userfault.lock); + } + + return ret; +} + +static vm_fault_t xe_err_to_fault_t(int err) +{ + switch (err) { + case 0: + case -EINTR: + case -ERESTARTSYS: + case -EAGAIN: + return VM_FAULT_NOPAGE; + case -ENOMEM: + case -ENOSPC: + return VM_FAULT_OOM; + default: + break; + } + return VM_FAULT_SIGBUS; +} + +static bool xe_ttm_bo_is_imported(struct ttm_buffer_object *tbo) +{ + dma_resv_assert_held(tbo->base.resv); + + return tbo->ttm && + (tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) == + TTM_TT_FLAG_EXTERNAL; +} + +static vm_fault_t xe_bo_cpu_fault_fastpath(struct vm_fault *vmf, struct xe_device *xe, + struct xe_bo *bo, bool needs_rpm) +{ + struct ttm_buffer_object *tbo = &bo->ttm; + vm_fault_t ret = VM_FAULT_RETRY; + struct xe_validation_ctx ctx; + struct ttm_operation_ctx tctx = { + .interruptible = true, + .no_wait_gpu = true, + .gfp_retry_mayfail = true, + + }; + int err; + + if (needs_rpm && !xe_pm_runtime_get_if_active(xe)) + return VM_FAULT_RETRY; + + err = xe_validation_ctx_init(&ctx, &xe->val, NULL, + (struct xe_val_flags) { + .interruptible = true, + .no_block = true + }); + if (err) + goto out_pm; + + if (!dma_resv_trylock(tbo->base.resv)) + goto out_validation; + + if (xe_ttm_bo_is_imported(tbo)) { + ret = VM_FAULT_SIGBUS; + drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n"); + goto out_unlock; + } + + err = xe_bo_fault_migrate(bo, &tctx, NULL); + if (err) { + /* Return VM_FAULT_RETRY on these errors. */ + if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY) + ret = xe_err_to_fault_t(err); + goto out_unlock; + } + + if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) + ret = __xe_bo_cpu_fault(vmf, xe, bo); + +out_unlock: + dma_resv_unlock(tbo->base.resv); +out_validation: + xe_validation_ctx_fini(&ctx); +out_pm: + if (needs_rpm) + xe_pm_runtime_put(xe); + + return ret; +} + +static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf) { struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; struct drm_device *ddev = tbo->base.dev; struct xe_device *xe = to_xe_device(ddev); struct xe_bo *bo = ttm_to_xe_bo(tbo); bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK; + bool retry_after_wait = false; + struct xe_validation_ctx ctx; + struct drm_exec exec; vm_fault_t ret; - int idx, r = 0; + int err = 0; + int idx; - if (needs_rpm) - xe_pm_runtime_get(xe); + if (!drm_dev_enter(&xe->drm, &idx)) + return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); - ret = ttm_bo_vm_reserve(tbo, vmf); - if (ret) + ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm); + if (ret != VM_FAULT_RETRY) goto out; - if (drm_dev_enter(ddev, &idx)) { - trace_xe_bo_cpu_fault(bo); + if (fault_flag_allow_retry_first(vmf->flags)) { + if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) + goto out; + retry_after_wait = true; + xe_bo_get(bo); + mmap_read_unlock(vmf->vma->vm_mm); + } else { + ret = VM_FAULT_NOPAGE; + } + + /* + * The fastpath failed and we were not required to return and retry immediately. + * We're now running in one of two modes: + * + * 1) retry_after_wait == true: The mmap_read_lock() is dropped, and we're trying + * to resolve blocking waits. But we can't resolve the fault since the + * mmap_read_lock() is dropped. After retrying the fault, the aim is that the fastpath + * should succeed. But it may fail since we drop the bo lock. + * + * 2) retry_after_wait == false: The fastpath failed, typically even after + * a retry. Do whatever's necessary to resolve the fault. + * + * This construct is recommended to avoid excessive waits under the mmap_lock. + */ + + if (needs_rpm) + xe_pm_runtime_get(xe); - if (should_migrate_to_smem(bo)) { - xe_assert(xe, bo->flags & XE_BO_FLAG_SYSTEM); + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, + err) { + struct ttm_operation_ctx tctx = { + .interruptible = true, + .no_wait_gpu = false, + .gfp_retry_mayfail = retry_after_wait, + }; + long lerr; + + err = drm_exec_lock_obj(&exec, &tbo->base); + drm_exec_retry_on_contention(&exec); + if (err) + break; - r = xe_bo_migrate(bo, XE_PL_TT); - if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR) - ret = VM_FAULT_NOPAGE; - else if (r) - ret = VM_FAULT_SIGBUS; + if (xe_ttm_bo_is_imported(tbo)) { + err = -EFAULT; + drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n"); + break; } - if (!ret) - ret = ttm_bo_vm_fault_reserved(vmf, - vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT); - drm_dev_exit(idx); - if (ret == VM_FAULT_RETRY && - !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - goto out; + err = xe_bo_fault_migrate(bo, &tctx, &exec); + if (err) { + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &err); + break; + } - /* - * ttm_bo_vm_reserve() already has dma_resv_lock. - */ - if (ret == VM_FAULT_NOPAGE && - mem_type_is_vram(tbo->resource->mem_type)) { - mutex_lock(&xe->mem_access.vram_userfault.lock); - if (list_empty(&bo->vram_userfault_link)) - list_add(&bo->vram_userfault_link, - &xe->mem_access.vram_userfault.list); - mutex_unlock(&xe->mem_access.vram_userfault.lock); + lerr = dma_resv_wait_timeout(tbo->base.resv, + DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); + if (lerr < 0) { + err = lerr; + break; } - } else { - ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); + + if (!retry_after_wait) + ret = __xe_bo_cpu_fault(vmf, xe, bo); } + /* if retry_after_wait == true, we *must* return VM_FAULT_RETRY. */ + if (err && !retry_after_wait) + ret = xe_err_to_fault_t(err); - dma_resv_unlock(tbo->base.resv); -out: if (needs_rpm) xe_pm_runtime_put(xe); + if (retry_after_wait) + xe_bo_put(bo); +out: + drm_dev_exit(idx); + return ret; } @@ -1827,7 +2008,7 @@ int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size) } static const struct vm_operations_struct xe_gem_vm_ops = { - .fault = xe_gem_fault, + .fault = xe_bo_cpu_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, .access = xe_bo_vm_access, @@ -1875,11 +2056,32 @@ void xe_bo_free(struct xe_bo *bo) kfree(bo); } -struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, - struct xe_tile *tile, struct dma_resv *resv, - struct ttm_lru_bulk_move *bulk, size_t size, - u16 cpu_caching, enum ttm_bo_type type, - u32 flags) +/** + * xe_bo_init_locked() - Initialize or create an xe_bo. + * @xe: The xe device. + * @bo: An already allocated buffer object or NULL + * if the function should allocate a new one. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @resv: Pointer to a locked shared reservation object to use fo this bo, + * or NULL for the xe_bo to use its own. + * @bulk: The bulk move to use for LRU bumping, or NULL for external bos. + * @size: The storage size to use for the bo. + * @cpu_caching: The cpu caching used for system memory backing store. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @exec: The drm_exec transaction to use for exhaustive eviction. + * + * Initialize or create an xe buffer object. On failure, any allocated buffer + * object passed in @bo will have been unreferenced. + * + * Return: The buffer object on success. Negative error pointer on failure. + */ +struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo, + struct xe_tile *tile, struct dma_resv *resv, + struct ttm_lru_bulk_move *bulk, size_t size, + u16 cpu_caching, enum ttm_bo_type type, + u32 flags, struct drm_exec *exec) { struct ttm_operation_ctx ctx = { .interruptible = true, @@ -1948,6 +2150,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, ctx.resv = resv; } + xe_validation_assert_exec(xe, exec, &bo->ttm.base); if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) { err = __xe_bo_placement_for_flags(xe, bo, bo->flags); if (WARN_ON(err)) { @@ -2049,7 +2252,7 @@ __xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, u16 cpu_caching, enum ttm_bo_type type, u32 flags, - u64 alignment) + u64 alignment, struct drm_exec *exec) { struct xe_bo *bo = NULL; int err; @@ -2070,11 +2273,11 @@ __xe_bo_create_locked(struct xe_device *xe, } } - bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, - vm && !xe_vm_in_fault_mode(vm) && - flags & XE_BO_FLAG_USER ? - &vm->lru_bulk_move : NULL, size, - cpu_caching, type, flags); + bo = xe_bo_init_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, + vm && !xe_vm_in_fault_mode(vm) && + flags & XE_BO_FLAG_USER ? + &vm->lru_bulk_move : NULL, size, + cpu_caching, type, flags, exec); if (IS_ERR(bo)) return bo; @@ -2108,9 +2311,10 @@ __xe_bo_create_locked(struct xe_device *xe, if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, - start + xe_bo_size(bo), U64_MAX); + start + xe_bo_size(bo), U64_MAX, + exec); } else { - err = xe_ggtt_insert_bo(t->mem.ggtt, bo); + err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec); } if (err) goto err_unlock_put_bo; @@ -2127,82 +2331,166 @@ err_unlock_put_bo: return ERR_PTR(err); } -struct xe_bo * -xe_bo_create_locked_range(struct xe_device *xe, - struct xe_tile *tile, struct xe_vm *vm, - size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags, u64 alignment) -{ - return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, - flags, alignment); -} - +/** + * xe_bo_create_locked() - Create a BO + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @vm: The local vm or NULL for external objects. + * @size: The storage size to use for the bo. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @exec: The drm_exec transaction to use for exhaustive eviction. + * + * Create a locked xe BO with no range- nor alignment restrictions. + * + * Return: The buffer object on success. Negative error pointer on failure. + */ struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, - enum ttm_bo_type type, u32 flags) + enum ttm_bo_type type, u32 flags, + struct drm_exec *exec) { return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, - flags, 0); + flags, 0, exec); } -struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm, size_t size, - u16 cpu_caching, - u32 flags) +static struct xe_bo *xe_bo_create_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, u16 cpu_caching, + enum ttm_bo_type type, u32 flags, + u64 alignment, bool intr) { - struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, - cpu_caching, ttm_bo_type_device, - flags | XE_BO_FLAG_USER, 0); - if (!IS_ERR(bo)) - xe_bo_unlock_vm_held(bo); + struct xe_validation_ctx ctx; + struct drm_exec exec; + struct xe_bo *bo; + int ret = 0; - return bo; + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr}, + ret) { + bo = __xe_bo_create_locked(xe, tile, NULL, size, 0, ~0ULL, + cpu_caching, type, flags, alignment, &exec); + drm_exec_retry_on_contention(&exec); + if (IS_ERR(bo)) { + ret = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &ret); + } else { + xe_bo_unlock(bo); + } + } + + return ret ? ERR_PTR(ret) : bo; } -struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm, size_t size, - enum ttm_bo_type type, u32 flags) +/** + * xe_bo_create_user() - Create a user BO + * @xe: The xe device. + * @vm: The local vm or NULL for external objects. + * @size: The storage size to use for the bo. + * @cpu_caching: The caching mode to be used for system backing store. + * @flags: XE_BO_FLAG_ flags. + * @exec: The drm_exec transaction to use for exhaustive eviction, or NULL + * if such a transaction should be initiated by the call. + * + * Create a bo on behalf of user-space. + * + * Return: The buffer object on success. Negative error pointer on failure. + */ +struct xe_bo *xe_bo_create_user(struct xe_device *xe, + struct xe_vm *vm, size_t size, + u16 cpu_caching, + u32 flags, struct drm_exec *exec) { - struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags); + struct xe_bo *bo; + + flags |= XE_BO_FLAG_USER; - if (!IS_ERR(bo)) - xe_bo_unlock_vm_held(bo); + if (vm || exec) { + xe_assert(xe, exec); + bo = __xe_bo_create_locked(xe, NULL, vm, size, 0, ~0ULL, + cpu_caching, ttm_bo_type_device, + flags, 0, exec); + if (!IS_ERR(bo)) + xe_bo_unlock_vm_held(bo); + } else { + bo = xe_bo_create_novm(xe, NULL, size, cpu_caching, + ttm_bo_type_device, flags, 0, true); + } return bo; } -struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm, - size_t size, u64 offset, - enum ttm_bo_type type, u32 flags) +/** + * xe_bo_create_pin_range_novm() - Create and pin a BO with range options. + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @size: The storage size to use for the bo. + * @start: Start of fixed VRAM range or 0. + * @end: End of fixed VRAM range or ~0ULL. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * + * Create an Xe BO with range- and options. If @start and @end indicate + * a fixed VRAM range, this must be a ttm_bo_type_kernel bo with VRAM placement + * only. + * + * Return: The buffer object on success. Negative error pointer on failure. + */ +struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, u64 start, u64 end, + enum ttm_bo_type type, u32 flags) { - return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, - type, flags, 0); + struct xe_validation_ctx ctx; + struct drm_exec exec; + struct xe_bo *bo; + int err = 0; + + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) { + bo = __xe_bo_create_locked(xe, tile, NULL, size, start, end, + 0, type, flags, 0, &exec); + if (IS_ERR(bo)) { + drm_exec_retry_on_contention(&exec); + err = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &err); + break; + } + + err = xe_bo_pin(bo, &exec); + xe_bo_unlock(bo); + if (err) { + xe_bo_put(bo); + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &err); + break; + } + } + + return err ? ERR_PTR(err) : bo; } -struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, - struct xe_tile *tile, - struct xe_vm *vm, - size_t size, u64 offset, - enum ttm_bo_type type, u32 flags, - u64 alignment) +static struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment, struct drm_exec *exec) { struct xe_bo *bo; int err; u64 start = offset == ~0ull ? 0 : offset; - u64 end = offset == ~0ull ? offset : start + size; + u64 end = offset == ~0ull ? ~0ull : start + size; if (flags & XE_BO_FLAG_STOLEN && xe_ttm_stolen_cpu_access_needs_ggtt(xe)) flags |= XE_BO_FLAG_GGTT; - bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, - flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED, - alignment); + bo = __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, + flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED, + alignment, exec); if (IS_ERR(bo)) return bo; - err = xe_bo_pin(bo); + err = xe_bo_pin(bo, exec); if (err) goto err_put; @@ -2222,11 +2510,100 @@ err_put: return ERR_PTR(err); } +/** + * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @size: The storage size to use for the bo. + * @offset: Optional VRAM offset or %~0ull for don't care. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @alignment: GGTT alignment. + * @intr: Whether to execute any waits for backing store interruptible. + * + * Create a pinned and optionally mapped bo with VRAM offset and GGTT alignment + * options. The bo will be external and not associated with a VM. + * + * Return: The buffer object on success. Negative error pointer on failure. + * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set + * to true on entry. + */ +struct xe_bo * +xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, u64 offset, enum ttm_bo_type type, u32 flags, + u64 alignment, bool intr) +{ + struct xe_validation_ctx ctx; + struct drm_exec exec; + struct xe_bo *bo; + int ret = 0; + + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr}, + ret) { + bo = xe_bo_create_pin_map_at_aligned(xe, tile, NULL, size, offset, + type, flags, alignment, &exec); + if (IS_ERR(bo)) { + drm_exec_retry_on_contention(&exec); + ret = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &ret); + } + } + + return ret ? ERR_PTR(ret) : bo; +} + +/** + * xe_bo_create_pin_map() - Create pinned and mapped bo + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * @vm: The vm to associate the buffer object with. The vm's resv must be locked + * with the transaction represented by @exec. + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @size: The storage size to use for the bo. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @exec: The drm_exec transaction to use for exhaustive eviction, and + * previously used for locking @vm's resv. + * + * Create a pinned and mapped bo. The bo will be external and not associated + * with a VM. + * + * Return: The buffer object on success. Negative error pointer on failure. + * In particular, the function may return ERR_PTR(%-EINTR) if @exec was + * configured for interruptible locking. + */ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, - enum ttm_bo_type type, u32 flags) + enum ttm_bo_type type, u32 flags, + struct drm_exec *exec) +{ + return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags, + 0, exec); +} + +/** + * xe_bo_create_pin_map_novm() - Create pinned and mapped bo + * @xe: The xe device. + * @tile: The tile to select for migration of this bo, and the tile used for + * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. + * @size: The storage size to use for the bo. + * @type: The TTM buffer object type. + * @flags: XE_BO_FLAG_ flags. + * @intr: Whether to execut any waits for backing store interruptible. + * + * Create a pinned and mapped bo. The bo will be external and not associated + * with a VM. + * + * Return: The buffer object on success. Negative error pointer on failure. + * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set + * to true on entry. + */ +struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, enum ttm_bo_type type, u32 flags, + bool intr) { - return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags); + return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr); } static void __xe_bo_unpin_map_no_vm(void *arg) @@ -2241,8 +2618,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile int ret; KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags); - - bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); + bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true); if (IS_ERR(bo)) return bo; @@ -2253,6 +2629,11 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile return bo; } +void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo) +{ + devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo); +} + struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, u32 flags) { @@ -2325,6 +2706,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res) * xe_bo_pin_external - pin an external BO * @bo: buffer object to be pinned * @in_place: Pin in current placement, don't attempt to migrate. + * @exec: The drm_exec transaction to use for exhaustive eviction. * * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD) * BO. Unique call compared to xe_bo_pin as this function has it own set of @@ -2332,7 +2714,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res) * * Returns 0 for success, negative error code otherwise. */ -int xe_bo_pin_external(struct xe_bo *bo, bool in_place) +int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec) { struct xe_device *xe = xe_bo_device(bo); int err; @@ -2342,7 +2724,7 @@ int xe_bo_pin_external(struct xe_bo *bo, bool in_place) if (!xe_bo_is_pinned(bo)) { if (!in_place) { - err = xe_bo_validate(bo, NULL, false); + err = xe_bo_validate(bo, NULL, false, exec); if (err) return err; } @@ -2365,7 +2747,17 @@ int xe_bo_pin_external(struct xe_bo *bo, bool in_place) return 0; } -int xe_bo_pin(struct xe_bo *bo) +/** + * xe_bo_pin() - Pin a kernel bo after potentially migrating it + * @bo: The kernel bo to pin. + * @exec: The drm_exec transaction to use for exhaustive eviction. + * + * Attempts to migrate a bo to @bo->placement. If that succeeds, + * pins the bo. + * + * Return: %0 on success, negative error code on migration failure. + */ +int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec) { struct ttm_place *place = &bo->placements[0]; struct xe_device *xe = xe_bo_device(bo); @@ -2387,7 +2779,7 @@ int xe_bo_pin(struct xe_bo *bo) /* We only expect at most 1 pin */ xe_assert(xe, !xe_bo_is_pinned(bo)); - err = xe_bo_validate(bo, NULL, false); + err = xe_bo_validate(bo, NULL, false, exec); if (err) return err; @@ -2480,6 +2872,7 @@ void xe_bo_unpin(struct xe_bo *bo) * NULL. Used together with @allow_res_evict. * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's * reservation object. + * @exec: The drm_exec transaction to use for exhaustive eviction. * * Make sure the bo is in allowed placement, migrating it if necessary. If * needed, other bos will be evicted. If bos selected for eviction shares @@ -2489,7 +2882,8 @@ void xe_bo_unpin(struct xe_bo *bo) * Return: 0 on success, negative error code on failure. May return * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal. */ -int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict) +int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict, + struct drm_exec *exec) { struct ttm_operation_ctx ctx = { .interruptible = true, @@ -2511,6 +2905,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict) xe_vm_set_validating(vm, allow_res_evict); trace_xe_bo_validate(bo); + xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base); ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); xe_vm_clear_validating(vm, allow_res_evict); @@ -2706,8 +3101,9 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, struct xe_device *xe = to_xe_device(dev); struct xe_file *xef = to_xe_file(file); struct drm_xe_gem_create *args = data; + struct xe_validation_ctx ctx; + struct drm_exec exec; struct xe_vm *vm = NULL; - ktime_t end = 0; struct xe_bo *bo; unsigned int bo_flags; u32 handle; @@ -2781,25 +3177,26 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOENT; } -retry: - if (vm) { - err = xe_vm_lock(vm, true); - if (err) - goto out_vm; + err = 0; + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, + err) { + if (vm) { + err = xe_vm_drm_exec_lock(vm, &exec); + drm_exec_retry_on_contention(&exec); + if (err) + break; + } + bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching, + bo_flags, &exec); + drm_exec_retry_on_contention(&exec); + if (IS_ERR(bo)) { + err = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &err); + break; + } } - - bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, - bo_flags); - - if (vm) - xe_vm_unlock(vm); - - if (IS_ERR(bo)) { - err = PTR_ERR(bo); - if (xe_vm_validate_should_retry(NULL, err, &end)) - goto retry; + if (err) goto out_vm; - } if (args->extensions) { err = gem_create_user_extensions(xe, bo, args->extensions, 0); @@ -2948,6 +3345,9 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place) * xe_bo_migrate - Migrate an object to the desired region id * @bo: The buffer object to migrate. * @mem_type: The TTM region type to migrate to. + * @tctx: A pointer to a struct ttm_operation_ctx or NULL if + * a default interruptibe ctx is to be used. + * @exec: The drm_exec transaction to use for exhaustive eviction. * * Attempt to migrate the buffer object to the desired memory region. The * buffer object may not be pinned, and must be locked. @@ -2959,7 +3359,8 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place) * Return: 0 on success. Negative error code on failure. In particular may * return -EINTR or -ERESTARTSYS if signal pending. */ -int xe_bo_migrate(struct xe_bo *bo, u32 mem_type) +int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *tctx, + struct drm_exec *exec) { struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); struct ttm_operation_ctx ctx = { @@ -2971,6 +3372,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type) struct ttm_place requested; xe_bo_assert_held(bo); + tctx = tctx ? tctx : &ctx; if (bo->ttm.resource->mem_type == mem_type) return 0; @@ -2997,19 +3399,22 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type) add_vram(xe, bo, &requested, bo->flags, mem_type, &c); } - return ttm_bo_validate(&bo->ttm, &placement, &ctx); + if (!tctx->no_wait_gpu) + xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base); + return ttm_bo_validate(&bo->ttm, &placement, tctx); } /** * xe_bo_evict - Evict an object to evict placement * @bo: The buffer object to migrate. + * @exec: The drm_exec transaction to use for exhaustive eviction. * * On successful completion, the object memory will be moved to evict * placement. This function blocks until the object has been fully moved. * * Return: 0 on success. Negative error code on failure. */ -int xe_bo_evict(struct xe_bo *bo) +int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec) { struct ttm_operation_ctx ctx = { .interruptible = false, @@ -3169,11 +3574,11 @@ int xe_bo_dumb_create(struct drm_file *file_priv, args->size = ALIGN(mul_u32_u32(args->pitch, args->height), page_size); - bo = xe_bo_create_user(xe, NULL, NULL, args->size, + bo = xe_bo_create_user(xe, NULL, args->size, DRM_XE_GEM_CPU_CACHING_WC, XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | XE_BO_FLAG_SCANOUT | - XE_BO_FLAG_NEEDS_CPU_ACCESS); + XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL); if (IS_ERR(bo)) return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index cfb1ec266a6d..a77af42b5f9e 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -10,6 +10,7 @@ #include "xe_bo_types.h" #include "xe_macros.h" +#include "xe_validation.h" #include "xe_vm_types.h" #include "xe_vm.h" #include "xe_vram_types.h" @@ -88,40 +89,34 @@ struct sg_table; struct xe_bo *xe_bo_alloc(void); void xe_bo_free(struct xe_bo *bo); -struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, - struct xe_tile *tile, struct dma_resv *resv, - struct ttm_lru_bulk_move *bulk, size_t size, - u16 cpu_caching, enum ttm_bo_type type, - u32 flags); -struct xe_bo * -xe_bo_create_locked_range(struct xe_device *xe, - struct xe_tile *tile, struct xe_vm *vm, - size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags, u64 alignment); +struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo, + struct xe_tile *tile, struct dma_resv *resv, + struct ttm_lru_bulk_move *bulk, size_t size, + u16 cpu_caching, enum ttm_bo_type type, + u32 flags, struct drm_exec *exec); struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, - enum ttm_bo_type type, u32 flags); -struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm, size_t size, - enum ttm_bo_type type, u32 flags); -struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm, size_t size, - u16 cpu_caching, - u32 flags); + enum ttm_bo_type type, u32 flags, + struct drm_exec *exec); +struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_vm *vm, size_t size, + u16 cpu_caching, u32 flags, struct drm_exec *exec); struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, - enum ttm_bo_type type, u32 flags); -struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm, size_t size, u64 offset, - enum ttm_bo_type type, u32 flags); -struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, - struct xe_tile *tile, - struct xe_vm *vm, - size_t size, u64 offset, - enum ttm_bo_type type, u32 flags, - u64 alignment); + enum ttm_bo_type type, u32 flags, + struct drm_exec *exec); +struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, enum ttm_bo_type type, u32 flags, + bool intr); +struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, u64 start, u64 end, + enum ttm_bo_type type, u32 flags); +struct xe_bo * +xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile, + size_t size, u64 offset, enum ttm_bo_type type, + u32 flags, u64 alignment, bool intr); struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, size_t size, u32 flags); +void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo); struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, u32 flags); int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src); @@ -200,11 +195,12 @@ static inline void xe_bo_unlock_vm_held(struct xe_bo *bo) } } -int xe_bo_pin_external(struct xe_bo *bo, bool in_place); -int xe_bo_pin(struct xe_bo *bo); +int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec); +int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec); void xe_bo_unpin_external(struct xe_bo *bo); void xe_bo_unpin(struct xe_bo *bo); -int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict); +int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict, + struct drm_exec *exec); static inline bool xe_bo_is_pinned(struct xe_bo *bo) { @@ -285,8 +281,9 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res); bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type); -int xe_bo_migrate(struct xe_bo *bo, u32 mem_type); -int xe_bo_evict(struct xe_bo *bo); +int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *ctc, + struct drm_exec *exec); +int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec); int xe_bo_evict_pinned(struct xe_bo *bo); int xe_bo_notifier_prepare_pinned(struct xe_bo *bo); @@ -315,6 +312,21 @@ static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo) return PAGE_ALIGN(xe_bo_size(bo)); } +/** + * xe_bo_has_valid_ccs_bb - Check if CCS's BBs were setup for the BO. + * @bo: the &xe_bo to check + * + * The CCS's BBs should only be setup by the driver VF, but it is safe + * to call this function also by non-VF driver. + * + * Return: true iff the CCS's BBs are setup, false otherwise. + */ +static inline bool xe_bo_has_valid_ccs_bb(struct xe_bo *bo) +{ + return bo->bb_ccs[XE_SRIOV_VF_CCS_READ_CTX] && + bo->bb_ccs[XE_SRIOV_VF_CCS_WRITE_CTX]; +} + static inline bool xe_bo_has_pages(struct xe_bo *bo) { if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) || diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index 7484ce55a303..d5dbc51e8612 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -158,8 +158,8 @@ int xe_bo_evict_all(struct xe_device *xe) if (ret) return ret; - ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, - &xe->pinned.late.evicted, xe_bo_evict_pinned); + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external, + &xe->pinned.late.external, xe_bo_evict_pinned); if (!ret) ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present, diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 314652afdca7..d4fe3c8dca5b 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -25,7 +25,9 @@ struct xe_vm; /* TODO: To be selected with VM_MADVISE */ #define XE_BO_PRIORITY_NORMAL 1 -/** @xe_bo: XE buffer object */ +/** + * struct xe_bo - Xe buffer object + */ struct xe_bo { /** @ttm: TTM base buffer object */ struct ttm_buffer_object ttm; @@ -47,7 +49,7 @@ struct xe_bo { struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE]; /** @vmap: iosys map of this buffer */ struct iosys_map vmap; - /** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */ + /** @kmap: TTM bo kmap object for internal use only. Keep off. */ struct ttm_bo_kmap_obj kmap; /** @pinned_link: link to present / evicted list of pinned BO */ struct list_head pinned_link; @@ -82,10 +84,10 @@ struct xe_bo { /** @created: Whether the bo has passed initial creation */ bool created; - /** @ccs_cleared */ + /** @ccs_cleared: true means that CCS region of BO is already cleared */ bool ccs_cleared; - /** @bb_ccs_rw: BB instructions of CCS read/write. Valid only for VF */ + /** @bb_ccs: BB instructions of CCS read/write. Valid only for VF */ struct xe_bb *bb_ccs[XE_SRIOV_VF_CCS_CTX_COUNT]; /** @@ -99,9 +101,10 @@ struct xe_bo { struct drm_pagemap_devmem devmem_allocation; /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */ - struct list_head vram_userfault_link; + struct list_head vram_userfault_link; - /** @min_align: minimum alignment needed for this BO if different + /** + * @min_align: minimum alignment needed for this BO if different * from default */ u64 min_align; diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c index 1025d3979b06..8a9b950e7a6d 100644 --- a/drivers/gpu/drm/xe/xe_configfs.c +++ b/drivers/gpu/drm/xe/xe_configfs.c @@ -4,6 +4,7 @@ */ #include <linux/bitops.h> +#include <linux/ctype.h> #include <linux/configfs.h> #include <linux/cleanup.h> #include <linux/find.h> @@ -12,6 +13,7 @@ #include <linux/pci.h> #include <linux/string.h> +#include "instructions/xe_mi_commands.h" #include "xe_configfs.h" #include "xe_hw_engine_types.h" #include "xe_module.h" @@ -21,7 +23,7 @@ * DOC: Xe Configfs * * Overview - * ========= + * ======== * * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a * configfs subsystem called ``xe`` that creates a directory in the mounted @@ -34,7 +36,7 @@ * * To create a device, the ``xe`` module should already be loaded, but some * attributes can only be set before binding the device. It can be accomplished - * by blocking the driver autoprobe: + * by blocking the driver autoprobe:: * * # echo 0 > /sys/bus/pci/drivers_autoprobe * # modprobe xe @@ -115,6 +117,45 @@ * * This attribute can only be set before binding to the device. * + * Context restore BB + * ------------------ + * + * Allow to execute a batch buffer during any context switches. When the + * GPU is restoring the context, it executes additional commands. It's useful + * for testing additional workarounds and validating certain HW behaviors: it's + * not intended for normal execution and will taint the kernel with TAINT_TEST + * when used. + * + * Currently this is implemented only for post and mid context restore. + * Examples: + * + * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the + * normal context restore:: + * + * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \ + * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb + * + * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the + * beginning of the context restore:: + * + * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \ + * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb + + * #. Load certain values in a couple of registers (it can be used as a simpler + * alternative to the `cmd`) action:: + * + * # cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF + * rcs reg 4F100 DEADBEEF + * rcs reg 4F104 FFFFFFFF + * EOF + * + * .. note:: + * + * When using multiple lines, make sure to use a command that is + * implemented with a single write syscall, like HEREDOC. + * + * These attributes can only be set before binding to the device. + * * Remove devices * ============== * @@ -123,17 +164,27 @@ * # rmdir /sys/kernel/config/xe/0000:03:00.0/ */ +/* Similar to struct xe_bb, but not tied to HW (yet) */ +struct wa_bb { + u32 *cs; + u32 len; /* in dwords */ +}; + struct xe_config_group_device { struct config_group group; struct xe_config_device { u64 engines_allowed; + struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX]; + struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX]; bool survivability_mode; bool enable_psmi; } config; /* protects attributes */ struct mutex lock; + /* matching descriptor */ + const struct xe_device_desc *desc; }; static const struct xe_config_device device_defaults = { @@ -150,6 +201,7 @@ static void set_device_defaults(struct xe_config_device *config) struct engine_info { const char *cls; u64 mask; + enum xe_engine_class engine_class; }; /* Some helpful macros to aid on the sizing of buffer allocation when parsing */ @@ -157,12 +209,12 @@ struct engine_info { #define MAX_ENGINE_INSTANCE_CHARS 2 static const struct engine_info engine_info[] = { - { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK }, - { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK }, - { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK }, - { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK }, - { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK }, - { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK }, + { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER }, + { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY }, + { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE }, + { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE }, + { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE }, + { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER }, }; static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item) @@ -251,7 +303,18 @@ static ssize_t engines_allowed_show(struct config_item *item, char *page) return p - page; } -static bool lookup_engine_mask(const char *pattern, u64 *mask) +/* + * Lookup engine_info. If @mask is not NULL, reduce the mask according to the + * instance in @pattern. + * + * Examples of inputs: + * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and + * mask == BIT_ULL(XE_HW_ENGINE_RCS0) + * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and + * mask == XE_HW_ENGINE_RCS_MASK + * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info + */ +static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask) { for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) { u8 instance; @@ -261,44 +324,62 @@ static bool lookup_engine_mask(const char *pattern, u64 *mask) continue; pattern += strlen(engine_info[i].cls); + if (!mask && !*pattern) + return &engine_info[i]; if (!strcmp(pattern, "*")) { *mask = engine_info[i].mask; - return true; + return &engine_info[i]; } if (kstrtou8(pattern, 10, &instance)) - return false; + return NULL; bit = __ffs64(engine_info[i].mask) + instance; if (bit >= fls64(engine_info[i].mask)) - return false; + return NULL; *mask = BIT_ULL(bit); - return true; + return &engine_info[i]; } - return false; + return NULL; +} + +static int parse_engine(const char *s, const char *end_chars, u64 *mask, + const struct engine_info **pinfo) +{ + char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1]; + const struct engine_info *info; + size_t len; + + len = strcspn(s, end_chars); + if (len >= sizeof(buf)) + return -EINVAL; + + memcpy(buf, s, len); + buf[len] = '\0'; + + info = lookup_engine_info(buf, mask); + if (!info) + return -ENOENT; + + if (pinfo) + *pinfo = info; + + return len; } static ssize_t engines_allowed_store(struct config_item *item, const char *page, size_t len) { struct xe_config_group_device *dev = to_xe_config_group_device(item); - size_t patternlen, p; + ssize_t patternlen, p; u64 mask, val = 0; for (p = 0; p < len; p += patternlen + 1) { - char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1]; - - patternlen = strcspn(page + p, ",\n"); - if (patternlen >= sizeof(buf)) - return -EINVAL; - - memcpy(buf, page + p, patternlen); - buf[patternlen] = '\0'; - - if (!lookup_engine_mask(buf, &mask)) + patternlen = parse_engine(page + p, ",\n", &mask, NULL); + if (patternlen < 0) return -EINVAL; val |= mask; @@ -339,11 +420,250 @@ static ssize_t enable_psmi_store(struct config_item *item, const char *page, siz return len; } +static bool wa_bb_read_advance(bool dereference, char **p, + const char *append, size_t len, + size_t *max_size) +{ + if (dereference) { + if (len >= *max_size) + return false; + *max_size -= len; + if (append) + memcpy(*p, append, len); + } + + *p += len; + + return true; +} + +static ssize_t wa_bb_show(struct xe_config_group_device *dev, + struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX], + char *data, size_t sz) +{ + char *p = data; + + guard(mutex)(&dev->lock); + + for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) { + enum xe_engine_class ec = engine_info[i].engine_class; + size_t len; + + if (!wa_bb[ec].len) + continue; + + len = snprintf(p, sz, "%s:", engine_info[i].cls); + if (!wa_bb_read_advance(data, &p, NULL, len, &sz)) + return -ENOBUFS; + + for (size_t j = 0; j < wa_bb[ec].len; j++) { + len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]); + if (!wa_bb_read_advance(data, &p, NULL, len, &sz)) + return -ENOBUFS; + } + + if (!wa_bb_read_advance(data, &p, "\n", 1, &sz)) + return -ENOBUFS; + } + + if (!wa_bb_read_advance(data, &p, "", 1, &sz)) + return -ENOBUFS; + + /* Reserve one more to match check for '\0' */ + if (!data) + p++; + + return p - data; +} + +static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page) +{ + struct xe_config_group_device *dev = to_xe_config_group_device(item); + + return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K); +} + +static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page) +{ + struct xe_config_group_device *dev = to_xe_config_group_device(item); + + return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K); +} + +static void wa_bb_append(struct wa_bb *wa_bb, u32 val) +{ + if (wa_bb->cs) + wa_bb->cs[wa_bb->len] = val; + + wa_bb->len++; +} + +static ssize_t parse_hex(const char *line, u32 *pval) +{ + char numstr[12]; + const char *p; + ssize_t numlen; + + p = line + strspn(line, " \t"); + if (!*p || *p == '\n') + return 0; + + numlen = strcspn(p, " \t\n"); + if (!numlen || numlen >= sizeof(numstr) - 1) + return -EINVAL; + + memcpy(numstr, p, numlen); + numstr[numlen] = '\0'; + p += numlen; + + if (kstrtou32(numstr, 16, pval)) + return -EINVAL; + + return p - line; +} + +/* + * Parse lines with the format + * + * <engine-class> cmd <u32> <u32...> + * <engine-class> reg <u32_addr> <u32_val> + * + * and optionally save them in @wa_bb[i].cs is non-NULL. + * + * Return the number of dwords parsed. + */ +static ssize_t parse_wa_bb_lines(const char *lines, + struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX]) +{ + ssize_t dwords = 0, ret; + const char *p; + + for (p = lines; *p; p++) { + const struct engine_info *info = NULL; + u32 val, val2; + + /* Also allow empty lines */ + p += strspn(p, " \t\n"); + if (!*p) + break; + + ret = parse_engine(p, " \t\n", NULL, &info); + if (ret < 0) + return ret; + + p += ret; + p += strspn(p, " \t"); + + if (str_has_prefix(p, "cmd")) { + for (p += strlen("cmd"); *p;) { + ret = parse_hex(p, &val); + if (ret < 0) + return -EINVAL; + if (!ret) + break; + + p += ret; + dwords++; + wa_bb_append(&wa_bb[info->engine_class], val); + } + } else if (str_has_prefix(p, "reg")) { + p += strlen("reg"); + ret = parse_hex(p, &val); + if (ret <= 0) + return -EINVAL; + + p += ret; + ret = parse_hex(p, &val2); + if (ret <= 0) + return -EINVAL; + + p += ret; + dwords += 3; + wa_bb_append(&wa_bb[info->engine_class], + MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1)); + wa_bb_append(&wa_bb[info->engine_class], val); + wa_bb_append(&wa_bb[info->engine_class], val2); + } else { + return -EINVAL; + } + } + + return dwords; +} + +static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX], + struct xe_config_group_device *dev, + const char *page, size_t len) +{ + /* tmp_wa_bb must match wa_bb's size */ + struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { }; + ssize_t count, class; + u32 *tmp; + + /* 1. Count dwords - wa_bb[i].cs is NULL for all classes */ + count = parse_wa_bb_lines(page, tmp_wa_bb); + if (count < 0) + return count; + + guard(mutex)(&dev->lock); + + if (is_bound(dev)) + return -EBUSY; + + /* + * 2. Allocate a u32 array and set the pointers to the right positions + * according to the length of each class' wa_bb + */ + tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + if (!count) { + memset(wa_bb, 0, sizeof(tmp_wa_bb)); + return len; + } + + for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) { + tmp_wa_bb[class].cs = tmp + count; + count += tmp_wa_bb[class].len; + tmp_wa_bb[class].len = 0; + } + + /* 3. Parse wa_bb lines again, this time saving the values */ + count = parse_wa_bb_lines(page, tmp_wa_bb); + if (count < 0) + return count; + + memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb)); + + return len; +} + +static ssize_t ctx_restore_mid_bb_store(struct config_item *item, + const char *data, size_t sz) +{ + struct xe_config_group_device *dev = to_xe_config_group_device(item); + + return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz); +} + +static ssize_t ctx_restore_post_bb_store(struct config_item *item, + const char *data, size_t sz) +{ + struct xe_config_group_device *dev = to_xe_config_group_device(item); + + return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz); +} + +CONFIGFS_ATTR(, ctx_restore_mid_bb); +CONFIGFS_ATTR(, ctx_restore_post_bb); CONFIGFS_ATTR(, enable_psmi); CONFIGFS_ATTR(, engines_allowed); CONFIGFS_ATTR(, survivability_mode); static struct configfs_attribute *xe_config_device_attrs[] = { + &attr_ctx_restore_mid_bb, + &attr_ctx_restore_post_bb, &attr_enable_psmi, &attr_engines_allowed, &attr_survivability_mode, @@ -355,6 +675,8 @@ static void xe_config_device_release(struct config_item *item) struct xe_config_group_device *dev = to_xe_config_group_device(item); mutex_destroy(&dev->lock); + + kfree(dev->config.ctx_restore_post_bb[0].cs); kfree(dev); } @@ -362,8 +684,26 @@ static struct configfs_item_operations xe_config_device_ops = { .release = xe_config_device_release, }; +static bool xe_config_device_is_visible(struct config_item *item, + struct configfs_attribute *attr, int n) +{ + struct xe_config_group_device *dev = to_xe_config_group_device(item); + + if (attr == &attr_survivability_mode) { + if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE) + return false; + } + + return true; +} + +static struct configfs_group_operations xe_config_device_group_ops = { + .is_visible = xe_config_device_is_visible, +}; + static const struct config_item_type xe_config_device_type = { .ct_item_ops = &xe_config_device_ops, + .ct_group_ops = &xe_config_device_group_ops, .ct_attrs = xe_config_device_attrs, .ct_owner = THIS_MODULE, }; @@ -442,6 +782,7 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro if (!dev) return ERR_PTR(-ENOMEM); + dev->desc = match; set_device_defaults(&dev->config); config_group_init_type_name(&dev->group, name, &xe_config_device_type); @@ -451,12 +792,12 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro return &dev->group; } -static struct configfs_group_operations xe_config_device_group_ops = { +static struct configfs_group_operations xe_config_group_ops = { .make_group = xe_config_make_device_group, }; static const struct config_item_type xe_configfs_type = { - .ct_group_ops = &xe_config_device_group_ops, + .ct_group_ops = &xe_config_group_ops, .ct_owner = THIS_MODULE, }; @@ -543,23 +884,6 @@ bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) } /** - * xe_configfs_clear_survivability_mode - clear configfs survivability mode - * @pdev: pci device - */ -void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) -{ - struct xe_config_group_device *dev = find_xe_config_group_device(pdev); - - if (!dev) - return; - - guard(mutex)(&dev->lock); - dev->config.survivability_mode = 0; - - config_group_put(&dev->group); -} - -/** * xe_configfs_get_engines_allowed - get engine allowed mask from configfs * @pdev: pci device * @@ -594,11 +918,63 @@ bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) return false; ret = dev->config.enable_psmi; - config_item_put(&dev->group.cg_item); + config_group_put(&dev->group); return ret; } +/** + * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting + * @pdev: pci device + * @class: hw engine class + * @cs: pointer to the bb to use - only valid during probe + * + * Return: Number of dwords used in the mid_ctx_restore setting in configfs + */ +u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, + enum xe_engine_class class, + const u32 **cs) +{ + struct xe_config_group_device *dev = find_xe_config_group_device(pdev); + u32 len; + + if (!dev) + return 0; + + if (cs) + *cs = dev->config.ctx_restore_mid_bb[class].cs; + + len = dev->config.ctx_restore_mid_bb[class].len; + config_group_put(&dev->group); + + return len; +} + +/** + * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting + * @pdev: pci device + * @class: hw engine class + * @cs: pointer to the bb to use - only valid during probe + * + * Return: Number of dwords used in the post_ctx_restore setting in configfs + */ +u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, + enum xe_engine_class class, + const u32 **cs) +{ + struct xe_config_group_device *dev = find_xe_config_group_device(pdev); + u32 len; + + if (!dev) + return 0; + + *cs = dev->config.ctx_restore_post_bb[class].cs; + len = dev->config.ctx_restore_post_bb[class].len; + config_group_put(&dev->group); + + return len; +} + int __init xe_configfs_init(void) { int ret; @@ -614,7 +990,7 @@ int __init xe_configfs_init(void) return 0; } -void __exit xe_configfs_exit(void) +void xe_configfs_exit(void) { configfs_unregister_subsystem(&xe_configfs); mutex_destroy(&xe_configfs.su_mutex); diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h index 58c8c3164000..c61e0e47ed94 100644 --- a/drivers/gpu/drm/xe/xe_configfs.h +++ b/drivers/gpu/drm/xe/xe_configfs.h @@ -8,6 +8,8 @@ #include <linux/limits.h> #include <linux/types.h> +#include <xe_hw_engine_types.h> + struct pci_dev; #if IS_ENABLED(CONFIG_CONFIGFS_FS) @@ -15,17 +17,23 @@ int xe_configfs_init(void); void xe_configfs_exit(void); void xe_configfs_check_device(struct pci_dev *pdev); bool xe_configfs_get_survivability_mode(struct pci_dev *pdev); -void xe_configfs_clear_survivability_mode(struct pci_dev *pdev); u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev); bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev); +u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, + const u32 **cs); +u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, + const u32 **cs); #else static inline int xe_configfs_init(void) { return 0; } static inline void xe_configfs_exit(void) { } static inline void xe_configfs_check_device(struct pci_dev *pdev) { } static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; } -static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) { } static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; } static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; } +static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, + const u32 **cs) { return 0; } +static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, + const u32 **cs) { return 0; } #endif #endif diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index 8d6df6bd885e..cd977dbd1ef6 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -24,7 +24,9 @@ #include "xe_pxp_debugfs.h" #include "xe_sriov.h" #include "xe_sriov_pf.h" +#include "xe_sriov_vf.h" #include "xe_step.h" +#include "xe_tile_debugfs.h" #include "xe_wa.h" #include "xe_vsec.h" @@ -38,7 +40,7 @@ DECLARE_FAULT_ATTR(gt_reset_failure); DECLARE_FAULT_ATTR(inject_csc_hw_error); static void read_residency_counter(struct xe_device *xe, struct xe_mmio *mmio, - u32 offset, char *name, struct drm_printer *p) + u32 offset, const char *name, struct drm_printer *p) { u64 residency = 0; int ret; @@ -134,9 +136,9 @@ static int dgfx_pkg_residencies_show(struct seq_file *m, void *data) p = drm_seq_file_printer(m); xe_pm_runtime_get(xe); mmio = xe_root_tile_mmio(xe); - struct { + static const struct { u32 offset; - char *name; + const char *name; } residencies[] = { {BMG_G2_RESIDENCY_OFFSET, "Package G2"}, {BMG_G6_RESIDENCY_OFFSET, "Package G6"}, @@ -163,9 +165,9 @@ static int dgfx_pcie_link_residencies_show(struct seq_file *m, void *data) xe_pm_runtime_get(xe); mmio = xe_root_tile_mmio(xe); - struct { + static const struct { u32 offset; - char *name; + const char *name; } residencies[] = { {BMG_PCIE_LINK_L0_RESIDENCY_OFFSET, "PCIE LINK L0 RESIDENCY"}, {BMG_PCIE_LINK_L1_RESIDENCY_OFFSET, "PCIE LINK L1 RESIDENCY"}, @@ -329,23 +331,44 @@ static const struct file_operations atomic_svm_timeslice_ms_fops = { .write = atomic_svm_timeslice_ms_set, }; -static void create_tile_debugfs(struct xe_tile *tile, struct dentry *root) +static ssize_t disable_late_binding_show(struct file *f, char __user *ubuf, + size_t size, loff_t *pos) { - char name[8]; + struct xe_device *xe = file_inode(f)->i_private; + struct xe_late_bind *late_bind = &xe->late_bind; + char buf[32]; + int len; - snprintf(name, sizeof(name), "tile%u", tile->id); - tile->debugfs = debugfs_create_dir(name, root); - if (IS_ERR(tile->debugfs)) - return; + len = scnprintf(buf, sizeof(buf), "%d\n", late_bind->disable); + + return simple_read_from_buffer(ubuf, size, pos, buf, len); +} + +static ssize_t disable_late_binding_set(struct file *f, const char __user *ubuf, + size_t size, loff_t *pos) +{ + struct xe_device *xe = file_inode(f)->i_private; + struct xe_late_bind *late_bind = &xe->late_bind; + u32 uval; + ssize_t ret; - /* - * Store the xe_tile pointer as private data of the tile/ directory - * node so other tile specific attributes under that directory may - * refer to it by looking at its parent node private data. - */ - tile->debugfs->d_inode->i_private = tile; + ret = kstrtouint_from_user(ubuf, size, sizeof(uval), &uval); + if (ret) + return ret; + + if (uval > 1) + return -EINVAL; + + late_bind->disable = !!uval; + return size; } +static const struct file_operations disable_late_binding_fops = { + .owner = THIS_MODULE, + .read = disable_late_binding_show, + .write = disable_late_binding_set, +}; + void xe_debugfs_register(struct xe_device *xe) { struct ttm_device *bdev = &xe->ttm; @@ -362,7 +385,7 @@ void xe_debugfs_register(struct xe_device *xe) ARRAY_SIZE(debugfs_list), root, minor); - if (xe->info.platform == XE_BATTLEMAGE) { + if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) { drm_debugfs_create_files(debugfs_residencies, ARRAY_SIZE(debugfs_residencies), root, minor); @@ -379,6 +402,9 @@ void xe_debugfs_register(struct xe_device *xe) debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe, &atomic_svm_timeslice_ms_fops); + debugfs_create_file("disable_late_binding", 0600, root, xe, + &disable_late_binding_fops); + for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) { man = ttm_manager_type(bdev, mem_type); @@ -398,7 +424,7 @@ void xe_debugfs_register(struct xe_device *xe) ttm_resource_manager_create_debugfs(man, root, "stolen_mm"); for_each_tile(tile, xe, tile_id) - create_tile_debugfs(tile, root); + xe_tile_debugfs_register(tile); for_each_gt(gt, xe, id) xe_gt_debugfs_register(gt); @@ -411,4 +437,6 @@ void xe_debugfs_register(struct xe_device *xe) if (IS_SRIOV_PF(xe)) xe_sriov_pf_debugfs_register(xe, root); + else if (IS_SRIOV_VF(xe)) + xe_sriov_vf_debugfs_register(xe, root); } diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 9e4773a17ef8..fdb7b7498920 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -45,6 +45,7 @@ #include "xe_hwmon.h" #include "xe_i2c.h" #include "xe_irq.h" +#include "xe_late_bind_fw.h" #include "xe_mmio.h" #include "xe_module.h" #include "xe_nvm.h" @@ -457,6 +458,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, if (err) goto err; + xe_validation_device_init(&xe->val); + init_waitqueue_head(&xe->ufence_wq); init_rwsem(&xe->usm.lock); @@ -530,7 +533,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe) * re-init and saving/restoring (or re-populating) the wiped memory. Since we * perform the FLR as the very last action before releasing access to the HW * during the driver release flow, we don't attempt recovery at all, because - * if/when a new instance of i915 is bound to the device it will do a full + * if/when a new instance of Xe is bound to the device it will do a full * re-init anyway. */ static void __xe_driver_flr(struct xe_device *xe) @@ -901,6 +904,10 @@ int xe_device_probe(struct xe_device *xe) if (err) return err; + err = xe_late_bind_init(&xe->late_bind); + if (err) + return err; + err = xe_oa_init(xe); if (err) return err; @@ -950,7 +957,7 @@ int xe_device_probe(struct xe_device *xe) xe_vsec_init(xe); - err = xe_sriov_late_init(xe); + err = xe_sriov_init_late(xe); if (err) goto err_unregister_display; diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c index 6ee422594b56..c5151c86a98a 100644 --- a/drivers/gpu/drm/xe/xe_device_sysfs.c +++ b/drivers/gpu/drm/xe/xe_device_sysfs.c @@ -71,6 +71,15 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RW(vram_d3cold_threshold); +static struct attribute *vram_attrs[] = { + &dev_attr_vram_d3cold_threshold.attr, + NULL +}; + +static const struct attribute_group vram_attr_group = { + .attrs = vram_attrs, +}; + static ssize_t lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -149,41 +158,16 @@ out: } static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version); -static int late_bind_create_files(struct device *dev) -{ - struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); - struct xe_tile *root = xe_device_get_root_tile(xe); - u32 cap = 0; - int ret; - - xe_pm_runtime_get(xe); - - ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), - &cap, NULL); - if (ret) { - if (ret == -ENXIO) { - drm_dbg(&xe->drm, "Late binding not supported by firmware\n"); - ret = 0; - } - goto out; - } - - if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) { - ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr); - if (ret) - goto out; - } - - if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap)) - ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr); -out: - xe_pm_runtime_put(xe); - - return ret; -} +static struct attribute *late_bind_attrs[] = { + &dev_attr_lb_fan_control_version.attr, + &dev_attr_lb_voltage_regulator_version.attr, + NULL +}; -static void late_bind_remove_files(struct device *dev) +static umode_t late_bind_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) { + struct device *dev = kobj_to_dev(kobj); struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); struct xe_tile *root = xe_device_get_root_tile(xe); u32 cap = 0; @@ -193,18 +177,25 @@ static void late_bind_remove_files(struct device *dev) ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), &cap, NULL); + xe_pm_runtime_put(xe); if (ret) - goto out; + return 0; - if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) - sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr); + if (attr == &dev_attr_lb_fan_control_version.attr && + REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) + return attr->mode; + if (attr == &dev_attr_lb_voltage_regulator_version.attr && + REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap)) + return attr->mode; - if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap)) - sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr); -out: - xe_pm_runtime_put(xe); + return 0; } +static const struct attribute_group late_bind_attr_group = { + .attrs = late_bind_attrs, + .is_visible = late_bind_attr_is_visible, +}; + /** * DOC: PCIe Gen5 Limitations * @@ -278,24 +269,15 @@ auto_link_downgrade_status_show(struct device *dev, struct device_attribute *att } static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status); -static const struct attribute *auto_link_downgrade_attrs[] = { +static struct attribute *auto_link_downgrade_attrs[] = { &dev_attr_auto_link_downgrade_capable.attr, &dev_attr_auto_link_downgrade_status.attr, NULL }; -static void xe_device_sysfs_fini(void *arg) -{ - struct xe_device *xe = arg; - - if (xe->d3cold.capable) - sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr); - - if (xe->info.platform == XE_BATTLEMAGE) { - sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs); - late_bind_remove_files(xe->drm.dev); - } -} +static const struct attribute_group auto_link_downgrade_attr_group = { + .attrs = auto_link_downgrade_attrs, +}; int xe_device_sysfs_init(struct xe_device *xe) { @@ -303,20 +285,20 @@ int xe_device_sysfs_init(struct xe_device *xe) int ret; if (xe->d3cold.capable) { - ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr); + ret = devm_device_add_group(dev, &vram_attr_group); if (ret) return ret; } - if (xe->info.platform == XE_BATTLEMAGE) { - ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs); + if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) { + ret = devm_device_add_group(dev, &auto_link_downgrade_attr_group); if (ret) return ret; - ret = late_bind_create_files(dev); + ret = devm_device_add_group(dev, &late_bind_attr_group); if (ret) return ret; } - return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe); + return 0; } diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 1e780f8a2a8c..a6c361db11d9 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -14,6 +14,7 @@ #include "xe_devcoredump_types.h" #include "xe_heci_gsc.h" +#include "xe_late_bind_fw_types.h" #include "xe_lmtt_types.h" #include "xe_memirq_types.h" #include "xe_oa_types.h" @@ -26,6 +27,7 @@ #include "xe_sriov_vf_ccs_types.h" #include "xe_step_types.h" #include "xe_survivability_mode_types.h" +#include "xe_validation.h" #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) #define TEST_VM_OPS_ERROR @@ -183,9 +185,6 @@ struct xe_tile { struct { /** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */ struct xe_ggtt_node *ggtt_balloon[2]; - - /** @sriov.vf.ccs: CCS read and write contexts for VF. */ - struct xe_tile_vf_ccs ccs[XE_SRIOV_VF_CCS_CTX_COUNT]; } vf; } sriov; @@ -282,6 +281,8 @@ struct xe_device { u8 has_heci_cscfi:1; /** @info.has_heci_gscfi: device has heci gscfi */ u8 has_heci_gscfi:1; + /** @info.has_late_bind: Device has firmware late binding support */ + u8 has_late_bind:1; /** @info.has_llc: Device has a shared CPU+GPU last level cache */ u8 has_llc:1; /** @info.has_mbx_power_limits: Device has support to manage power limits using @@ -535,6 +536,9 @@ struct xe_device { /** @nvm: discrete graphics non-volatile memory */ struct intel_dg_nvm_dev *nvm; + /** @late_bind: xe mei late bind interface */ + struct xe_late_bind late_bind; + /** @oa: oa observation subsystem */ struct xe_oa oa; @@ -586,6 +590,8 @@ struct xe_device { */ atomic64_t global_total_pages; #endif + /** @val: The domain for exhaustive eviction, which is currently per device. */ + struct xe_validation_device val; /** @psmi: GPU debugging via additional validation HW */ struct { @@ -595,6 +601,13 @@ struct xe_device { u8 region_mask; } psmi; +#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) + /** @g2g_test_array: for testing G2G communications */ + u32 *g2g_test_array; + /** @g2g_test_count: for testing G2G communications */ + atomic_t g2g_test_count; +#endif + /* private: */ #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) @@ -617,18 +630,9 @@ struct xe_device { /* To shut up runtime pm macros.. */ struct xe_runtime_pm {} runtime_pm; - /* only to allow build, not used functionally */ - u32 irq_mask; - struct intel_uncore { spinlock_t lock; } uncore; - - /* only to allow build, not used functionally */ - struct { - unsigned int hpll_freq; - unsigned int czclk_freq; - }; #endif }; diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index 95d06bd65b0f..a7d67725c3ee 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -51,6 +51,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach) struct drm_gem_object *obj = attach->dmabuf->priv; struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_device *xe = xe_bo_device(bo); + struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED; int ret; /* @@ -63,7 +64,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach) return -EINVAL; } - ret = xe_bo_migrate(bo, XE_PL_TT); + ret = xe_bo_migrate(bo, XE_PL_TT, NULL, exec); if (ret) { if (ret != -EINTR && ret != -ERESTARTSYS) drm_dbg(&xe->drm, @@ -72,7 +73,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach) return ret; } - ret = xe_bo_pin_external(bo, true); + ret = xe_bo_pin_external(bo, true, exec); xe_assert(xe, !ret); return 0; @@ -92,6 +93,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach, struct dma_buf *dma_buf = attach->dmabuf; struct drm_gem_object *obj = dma_buf->priv; struct xe_bo *bo = gem_to_xe_bo(obj); + struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED; struct sg_table *sgt; int r = 0; @@ -100,9 +102,9 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach, if (!xe_bo_is_pinned(bo)) { if (!attach->peer2peer) - r = xe_bo_migrate(bo, XE_PL_TT); + r = xe_bo_migrate(bo, XE_PL_TT, NULL, exec); else - r = xe_bo_validate(bo, NULL, false); + r = xe_bo_validate(bo, NULL, false, exec); if (r) return ERR_PTR(r); } @@ -161,15 +163,26 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, struct xe_bo *bo = gem_to_xe_bo(obj); bool reads = (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE); + struct xe_validation_ctx ctx; + struct drm_exec exec; + int ret = 0; if (!reads) return 0; /* Can we do interruptible lock here? */ - xe_bo_lock(bo, false); - (void)xe_bo_migrate(bo, XE_PL_TT); - xe_bo_unlock(bo); + xe_validation_guard(&ctx, &xe_bo_device(bo)->val, &exec, (struct xe_val_flags) {}, ret) { + ret = drm_exec_lock_obj(&exec, &bo->ttm.base); + drm_exec_retry_on_contention(&exec); + if (ret) + break; + + ret = xe_bo_migrate(bo, XE_PL_TT, NULL, &exec); + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &ret); + } + /* If we failed, cpu-access takes place in current placement. */ return 0; } @@ -220,32 +233,45 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage, { struct dma_resv *resv = dma_buf->resv; struct xe_device *xe = to_xe_device(dev); + struct xe_validation_ctx ctx; + struct drm_gem_object *dummy_obj; + struct drm_exec exec; struct xe_bo *bo; - int ret; - - dma_resv_lock(resv, NULL); - bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size, - 0, /* Will require 1way or 2way for vm_bind */ - ttm_bo_type_sg, XE_BO_FLAG_SYSTEM); - if (IS_ERR(bo)) { - ret = PTR_ERR(bo); - goto error; + int ret = 0; + + dummy_obj = drm_gpuvm_resv_object_alloc(&xe->drm); + if (!dummy_obj) + return ERR_PTR(-ENOMEM); + + dummy_obj->resv = resv; + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, ret) { + ret = drm_exec_lock_obj(&exec, dummy_obj); + drm_exec_retry_on_contention(&exec); + if (ret) + break; + + bo = xe_bo_init_locked(xe, storage, NULL, resv, NULL, dma_buf->size, + 0, /* Will require 1way or 2way for vm_bind */ + ttm_bo_type_sg, XE_BO_FLAG_SYSTEM, &exec); + drm_exec_retry_on_contention(&exec); + if (IS_ERR(bo)) { + ret = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &ret); + break; + } } - dma_resv_unlock(resv); - - return &bo->ttm.base; + drm_gem_object_put(dummy_obj); -error: - dma_resv_unlock(resv); - return ERR_PTR(ret); + return ret ? ERR_PTR(ret) : &bo->ttm.base; } static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach) { struct drm_gem_object *obj = attach->importer_priv; struct xe_bo *bo = gem_to_xe_bo(obj); + struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED; - XE_WARN_ON(xe_bo_evict(bo)); + XE_WARN_ON(xe_bo_evict(bo, exec)); } static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = { diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c index fdd514fec5ef..f5cfdf29fde3 100644 --- a/drivers/gpu/drm/xe/xe_eu_stall.c +++ b/drivers/gpu/drm/xe/xe_eu_stall.c @@ -617,9 +617,8 @@ static int xe_eu_stall_data_buf_alloc(struct xe_eu_stall_data_stream *stream, size = stream->per_xecore_buf_size * last_xecore; - bo = xe_bo_create_pin_map_at_aligned(tile->xe, tile, NULL, - size, ~0ull, ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64); + bo = xe_bo_create_pin_map_at_novm(tile->xe, tile, size, ~0ull, ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64, false); if (IS_ERR(bo)) { kfree(stream->xecore_buf); return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 374c831e691b..7715e74bb945 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -19,6 +19,7 @@ #include "xe_ring_ops_types.h" #include "xe_sched_job.h" #include "xe_sync.h" +#include "xe_svm.h" #include "xe_vm.h" /** @@ -97,9 +98,13 @@ static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec) { struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm); + int ret; /* The fence slot added here is intended for the exec sched job. */ - return xe_vm_validate_rebind(vm, &vm_exec->exec, 1); + xe_vm_set_validation_exec(vm, &vm_exec->exec); + ret = xe_vm_validate_rebind(vm, &vm_exec->exec, 1); + xe_vm_set_validation_exec(vm, NULL); + return ret; } int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -115,10 +120,10 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn}; struct drm_exec *exec = &vm_exec.exec; u32 i, num_syncs, num_ufence = 0; + struct xe_validation_ctx ctx; struct xe_sched_job *job; struct xe_vm *vm; bool write_locked, skip_retry = false; - ktime_t end = 0; int err = 0; struct xe_hw_engine_group *group; enum xe_hw_engine_group_execution_mode mode, previous_mode; @@ -246,17 +251,12 @@ retry: if (err) goto err_unlock_list; - vm_exec.vm = &vm->gpuvm; - vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT; - if (xe_vm_in_lr_mode(vm)) { - drm_exec_init(exec, vm_exec.flags, 0); - } else { - err = drm_gpuvm_exec_lock(&vm_exec); - if (err) { - if (xe_vm_validate_should_retry(exec, err, &end)) - err = -EAGAIN; + if (!xe_vm_in_lr_mode(vm)) { + vm_exec.vm = &vm->gpuvm; + vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT; + err = xe_validation_exec_lock(&ctx, &vm_exec, &xe->val); + if (err) goto err_unlock_list; - } } if (xe_vm_is_closed_or_banned(q->vm)) { @@ -303,7 +303,7 @@ retry: if (err) goto err_put_job; - err = down_read_interruptible(&vm->userptr.notifier_lock); + err = xe_svm_notifier_lock_interruptible(vm); if (err) goto err_put_job; @@ -345,12 +345,13 @@ retry: err_repin: if (!xe_vm_in_lr_mode(vm)) - up_read(&vm->userptr.notifier_lock); + xe_svm_notifier_unlock(vm); err_put_job: if (err) xe_sched_job_put(job); err_exec: - drm_exec_fini(exec); + if (!xe_vm_in_lr_mode(vm)) + xe_validation_ctx_fini(&ctx); err_unlock_list: up_read(&vm->lock); if (err == -EAGAIN && !skip_retry) diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 063c89d981e5..37b2b93b73d6 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -199,6 +199,16 @@ err_lrc: return err; } +static void __xe_exec_queue_fini(struct xe_exec_queue *q) +{ + int i; + + q->ops->fini(q); + + for (i = 0; i < q->width; ++i) + xe_lrc_put(q->lrc[i]); +} + struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, u32 logical_mask, u16 width, struct xe_hw_engine *hwe, u32 flags, @@ -229,11 +239,13 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v if (xe_exec_queue_uses_pxp(q)) { err = xe_pxp_exec_queue_add(xe->pxp, q); if (err) - goto err_post_alloc; + goto err_post_init; } return q; +err_post_init: + __xe_exec_queue_fini(q); err_post_alloc: __xe_exec_queue_free(q); return ERR_PTR(err); @@ -331,13 +343,11 @@ void xe_exec_queue_destroy(struct kref *ref) xe_exec_queue_put(eq); } - q->ops->fini(q); + q->ops->destroy(q); } void xe_exec_queue_fini(struct xe_exec_queue *q) { - int i; - /* * Before releasing our ref to lrc and xef, accumulate our run ticks * and wakeup any waiters. @@ -346,9 +356,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal)) wake_up_var(&q->xef->exec_queue.pending_removal); - for (i = 0; i < q->width; ++i) - xe_lrc_put(q->lrc[i]); - + __xe_exec_queue_fini(q); __xe_exec_queue_free(q); } diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index ba443a497b38..27b76cf9da89 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -181,8 +181,14 @@ struct xe_exec_queue_ops { int (*init)(struct xe_exec_queue *q); /** @kill: Kill inflight submissions for backend */ void (*kill)(struct xe_exec_queue *q); - /** @fini: Fini exec queue for submission backend */ + /** @fini: Undoes the init() for submission backend */ void (*fini)(struct xe_exec_queue *q); + /** + * @destroy: Destroy exec queue for submission backend. The backend + * function must call xe_exec_queue_fini() (which will in turn call the + * fini() backend function) to ensure the queue is properly cleaned up. + */ + void (*destroy)(struct xe_exec_queue *q); /** @set_priority: Set priority for exec queue */ int (*set_priority)(struct xe_exec_queue *q, enum xe_exec_queue_priority priority); diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index 788f56b066b6..f83d421ac9d3 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -385,10 +385,20 @@ err_free: return err; } -static void execlist_exec_queue_fini_async(struct work_struct *w) +static void execlist_exec_queue_fini(struct xe_exec_queue *q) +{ + struct xe_execlist_exec_queue *exl = q->execlist; + + drm_sched_entity_fini(&exl->entity); + drm_sched_fini(&exl->sched); + + kfree(exl); +} + +static void execlist_exec_queue_destroy_async(struct work_struct *w) { struct xe_execlist_exec_queue *ee = - container_of(w, struct xe_execlist_exec_queue, fini_async); + container_of(w, struct xe_execlist_exec_queue, destroy_async); struct xe_exec_queue *q = ee->q; struct xe_execlist_exec_queue *exl = q->execlist; struct xe_device *xe = gt_to_xe(q->gt); @@ -401,10 +411,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w) list_del(&exl->active_link); spin_unlock_irqrestore(&exl->port->lock, flags); - drm_sched_entity_fini(&exl->entity); - drm_sched_fini(&exl->sched); - kfree(exl); - xe_exec_queue_fini(q); } @@ -413,10 +419,10 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q) /* NIY */ } -static void execlist_exec_queue_fini(struct xe_exec_queue *q) +static void execlist_exec_queue_destroy(struct xe_exec_queue *q) { - INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async); - queue_work(system_unbound_wq, &q->execlist->fini_async); + INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async); + queue_work(system_unbound_wq, &q->execlist->destroy_async); } static int execlist_exec_queue_set_priority(struct xe_exec_queue *q, @@ -467,6 +473,7 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = { .init = execlist_exec_queue_init, .kill = execlist_exec_queue_kill, .fini = execlist_exec_queue_fini, + .destroy = execlist_exec_queue_destroy, .set_priority = execlist_exec_queue_set_priority, .set_timeslice = execlist_exec_queue_set_timeslice, .set_preempt_timeout = execlist_exec_queue_set_preempt_timeout, diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h index 415140936f11..92c4ba52db0c 100644 --- a/drivers/gpu/drm/xe/xe_execlist_types.h +++ b/drivers/gpu/drm/xe/xe_execlist_types.h @@ -42,7 +42,7 @@ struct xe_execlist_exec_queue { bool has_run; - struct work_struct fini_async; + struct work_struct destroy_async; enum xe_exec_queue_priority active_priority; struct list_head active_link; diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 71c7690a92b3..7fdd0a97a628 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -28,6 +28,7 @@ #include "xe_pm.h" #include "xe_res_cursor.h" #include "xe_sriov.h" +#include "xe_tile_printk.h" #include "xe_tile_sriov_vf.h" #include "xe_tlb_inval.h" #include "xe_wa.h" @@ -269,7 +270,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) gsm_size = probe_gsm_size(pdev); if (gsm_size == 0) { - drm_err(&xe->drm, "Hardware reported no preallocated GSM\n"); + xe_tile_err(ggtt->tile, "Hardware reported no preallocated GSM\n"); return -ENOMEM; } @@ -466,8 +467,8 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt, if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf)); - xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n", - node->start, node->start + node->size, buf, description); + xe_tile_dbg(ggtt->tile, "GGTT %#llx-%#llx (%s) %s\n", + node->start, node->start + node->size, buf, description); } } @@ -499,9 +500,8 @@ int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 err = drm_mm_reserve_node(&ggtt->mm, &node->base); - if (xe_gt_WARN(ggtt->tile->primary_gt, err, - "Failed to balloon GGTT %#llx-%#llx (%pe)\n", - node->base.start, node->base.start + node->base.size, ERR_PTR(err))) + if (xe_tile_WARN(ggtt->tile, err, "Failed to balloon GGTT %#llx-%#llx (%pe)\n", + node->base.start, node->base.start + node->base.size, ERR_PTR(err))) return err; xe_ggtt_dump_node(ggtt, &node->base, "balloon"); @@ -731,7 +731,7 @@ void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo) } static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, - u64 start, u64 end) + u64 start, u64 end, struct drm_exec *exec) { u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE; u8 tile_id = ggtt->tile->id; @@ -746,7 +746,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, return 0; } - err = xe_bo_validate(bo, NULL, false); + err = xe_bo_validate(bo, NULL, false, exec); if (err) return err; @@ -788,25 +788,28 @@ out: * @bo: the &xe_bo to be inserted * @start: address where it will be inserted * @end: end of the range where it will be inserted + * @exec: The drm_exec transaction to use for exhaustive eviction. * * Return: 0 on success or a negative error code on failure. */ int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, - u64 start, u64 end) + u64 start, u64 end, struct drm_exec *exec) { - return __xe_ggtt_insert_bo_at(ggtt, bo, start, end); + return __xe_ggtt_insert_bo_at(ggtt, bo, start, end, exec); } /** * xe_ggtt_insert_bo - Insert BO into GGTT * @ggtt: the &xe_ggtt where bo will be inserted * @bo: the &xe_bo to be inserted + * @exec: The drm_exec transaction to use for exhaustive eviction. * * Return: 0 on success or a negative error code on failure. */ -int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) +int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo, + struct drm_exec *exec) { - return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX); + return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX, exec); } /** diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h index fbe1e397d05d..75fc7a1efea7 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.h +++ b/drivers/gpu/drm/xe/xe_ggtt.h @@ -10,6 +10,7 @@ struct drm_printer; struct xe_tile; +struct drm_exec; struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile); int xe_ggtt_init_early(struct xe_ggtt *ggtt); @@ -31,9 +32,9 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node); void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node, struct xe_bo *bo, u16 pat_index); void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo); -int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); +int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo, struct drm_exec *exec); int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, - u64 start, u64 end); + u64 start, u64 end, struct drm_exec *exec); void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare); diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index f5ae28af60d4..83d61bf8ec62 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -136,10 +136,10 @@ static int query_compatibility_version(struct xe_gsc *gsc) u64 ggtt_offset; int err; - bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2, - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT); + bo = xe_bo_create_pin_map_novm(xe, tile, GSC_VER_PKT_SZ * 2, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT, false); if (IS_ERR(bo)) { xe_gt_err(gt, "failed to allocate bo for GSC version query\n"); return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 34505a6d93ed..3e0ad7e5b5df 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -98,7 +98,7 @@ void xe_gt_sanitize(struct xe_gt *gt) * FIXME: if xe_uc_sanitize is called here, on TGL driver will not * reload */ - gt->uc.guc.submission_state.enabled = false; + xe_guc_submit_disable(>->uc.guc); } static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index bf3a67b5951c..f253e2df4907 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -31,6 +31,7 @@ #include "xe_reg_whitelist.h" #include "xe_sa.h" #include "xe_sriov.h" +#include "xe_sriov_vf_ccs.h" #include "xe_tuning.h" #include "xe_uc_debugfs.h" #include "xe_wa.h" @@ -123,45 +124,6 @@ static int powergate_info(struct xe_gt *gt, struct drm_printer *p) return ret; } -static int sa_info(struct xe_gt *gt, struct drm_printer *p) -{ - struct xe_tile *tile = gt_to_tile(gt); - - xe_pm_runtime_get(gt_to_xe(gt)); - drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p, - xe_sa_manager_gpu_addr(tile->mem.kernel_bb_pool)); - xe_pm_runtime_put(gt_to_xe(gt)); - - return 0; -} - -static int sa_info_vf_ccs(struct xe_gt *gt, struct drm_printer *p) -{ - struct xe_tile *tile = gt_to_tile(gt); - struct xe_sa_manager *bb_pool; - enum xe_sriov_vf_ccs_rw_ctxs ctx_id; - - if (!IS_VF_CCS_READY(gt_to_xe(gt))) - return 0; - - xe_pm_runtime_get(gt_to_xe(gt)); - - for_each_ccs_rw_ctx(ctx_id) { - bb_pool = tile->sriov.vf.ccs[ctx_id].mem.ccs_bb_pool; - if (!bb_pool) - break; - - drm_printf(p, "ccs %s bb suballoc info\n", ctx_id ? "write" : "read"); - drm_printf(p, "-------------------------\n"); - drm_suballoc_dump_debug_info(&bb_pool->base, p, xe_sa_manager_gpu_addr(bb_pool)); - drm_puts(p, "\n"); - } - - xe_pm_runtime_put(gt_to_xe(gt)); - - return 0; -} - static int topology(struct xe_gt *gt, struct drm_printer *p) { xe_pm_runtime_get(gt_to_xe(gt)); @@ -316,7 +278,6 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p) * - without access to the PF specific data */ static const struct drm_info_list vf_safe_debugfs_list[] = { - {"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info}, {"topology", .show = xe_gt_debugfs_simple_show, .data = topology}, {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt}, {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore}, @@ -327,17 +288,9 @@ static const struct drm_info_list vf_safe_debugfs_list[] = { {"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc}, {"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc}, {"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc}, - {"stats", .show = xe_gt_debugfs_simple_show, .data = xe_gt_stats_print_info}, {"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig}, }; -/* - * only for GT debugfs files which are valid on VF. Not valid on PF. - */ -static const struct drm_info_list vf_only_debugfs_list[] = { - {"sa_info_vf_ccs", .show = xe_gt_debugfs_simple_show, .data = sa_info_vf_ccs}, -}; - /* everything else should be added here */ static const struct drm_info_list pf_only_debugfs_list[] = { {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines}, @@ -363,6 +316,24 @@ static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t return count; } +static ssize_t stats_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct xe_gt *gt = s->private; + + return write_to_gt_call(userbuf, count, ppos, xe_gt_stats_clear, gt); +} + +static int stats_show(struct seq_file *s, void *unused) +{ + struct drm_printer p = drm_seq_file_printer(s); + struct xe_gt *gt = s->private; + + return xe_gt_stats_print_info(gt, &p); +} +DEFINE_SHOW_STORE_ATTRIBUTE(stats); + static void force_reset(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); @@ -448,6 +419,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt) root->d_inode->i_private = gt; /* VF safe */ + debugfs_create_file("stats", 0600, root, gt, &stats_fops); debugfs_create_file("force_reset", 0600, root, gt, &force_reset_fops); debugfs_create_file("force_reset_sync", 0600, root, gt, &force_reset_sync_fops); @@ -459,11 +431,6 @@ void xe_gt_debugfs_register(struct xe_gt *gt) drm_debugfs_create_files(pf_only_debugfs_list, ARRAY_SIZE(pf_only_debugfs_list), root, minor); - else - drm_debugfs_create_files(vf_only_debugfs_list, - ARRAY_SIZE(vf_only_debugfs_list), - root, minor); - xe_uc_debugfs_register(>->uc, root); diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c index 60d9354e7dbf..4ff1b6b58d6b 100644 --- a/drivers/gpu/drm/xe/xe_gt_freq.c +++ b/drivers/gpu/drm/xe/xe_gt_freq.c @@ -227,6 +227,33 @@ static ssize_t max_freq_store(struct kobject *kobj, } static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq); +static ssize_t power_profile_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buff) +{ + struct device *dev = kobj_to_dev(kobj); + + xe_guc_pc_get_power_profile(dev_to_pc(dev), buff); + + return strlen(buff); +} + +static ssize_t power_profile_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buff, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct xe_guc_pc *pc = dev_to_pc(dev); + int err; + + xe_pm_runtime_get(dev_to_xe(dev)); + err = xe_guc_pc_set_power_profile(pc, buff); + xe_pm_runtime_put(dev_to_xe(dev)); + + return err ?: count; +} +static struct kobj_attribute attr_power_profile = __ATTR_RW(power_profile); + static const struct attribute *freq_attrs[] = { &attr_act_freq.attr, &attr_cur_freq.attr, @@ -236,6 +263,7 @@ static const struct attribute *freq_attrs[] = { &attr_rpn_freq.attr, &attr_min_freq.attr, &attr_max_freq.attr, + &attr_power_profile.attr, NULL }; diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index 683ac021a06d..8fb1cae91724 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -362,7 +362,7 @@ fallback: * @group: pointer to storage for steering group ID * @instance: pointer to storage for steering instance ID */ -void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance) +void xe_gt_mcr_get_dss_steering(const struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance) { xe_gt_assert(gt, dss < XE_MAX_DSS_FUSE_BITS); diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h index bc06520befab..283a1c9770e2 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.h +++ b/drivers/gpu/drm/xe/xe_gt_mcr.h @@ -31,7 +31,8 @@ bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, u8 *group, u8 *instance); void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p); -void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance); +void xe_gt_mcr_get_dss_steering(const struct xe_gt *gt, + unsigned int dss, u16 *group, u16 *instance); u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance); /* diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index d02d22fb3659..a054d6010ae0 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -87,10 +87,8 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma, if (!bo) return 0; - err = need_vram_move ? xe_bo_migrate(bo, vram->placement) : - xe_bo_validate(bo, vm, true); - - return err; + return need_vram_move ? xe_bo_migrate(bo, vram->placement, NULL, exec) : + xe_bo_validate(bo, vm, true, exec); } static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma, @@ -98,9 +96,9 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma, { struct xe_vm *vm = xe_vma_vm(vma); struct xe_tile *tile = gt_to_tile(gt); + struct xe_validation_ctx ctx; struct drm_exec exec; struct dma_fence *fence; - ktime_t end = 0; int err, needs_vram; lockdep_assert_held_write(&vm->lock); @@ -129,22 +127,22 @@ retry_userptr: } /* Lock VM and BOs dma-resv */ - drm_exec_init(&exec, 0, 0); + xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {}); drm_exec_until_all_locked(&exec) { err = xe_pf_begin(&exec, vma, needs_vram == 1, tile->mem.vram); drm_exec_retry_on_contention(&exec); - if (xe_vm_validate_should_retry(&exec, err, &end)) - err = -EAGAIN; + xe_validation_retry_on_oom(&ctx, &err); if (err) goto unlock_dma_resv; /* Bind VMA only to the GT that has faulted */ trace_xe_vma_pf_bind(vma); + xe_vm_set_validation_exec(vm, &exec); fence = xe_vma_rebind(vm, vma, BIT(tile->id)); + xe_vm_set_validation_exec(vm, NULL); if (IS_ERR(fence)) { err = PTR_ERR(fence); - if (xe_vm_validate_should_retry(&exec, err, &end)) - err = -EAGAIN; + xe_validation_retry_on_oom(&ctx, &err); goto unlock_dma_resv; } } @@ -153,7 +151,7 @@ retry_userptr: dma_fence_put(fence); unlock_dma_resv: - drm_exec_fini(&exec); + xe_validation_ctx_fini(&ctx); if (err == -EAGAIN) goto retry_userptr; @@ -535,6 +533,7 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc) { struct xe_device *xe = gt_to_xe(gt); struct xe_tile *tile = gt_to_tile(gt); + struct xe_validation_ctx ctx; struct drm_exec exec; struct xe_vm *vm; struct xe_vma *vma; @@ -564,15 +563,14 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc) goto unlock_vm; /* Lock VM and BOs dma-resv */ - drm_exec_init(&exec, 0, 0); + xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {}); drm_exec_until_all_locked(&exec) { ret = xe_pf_begin(&exec, vma, IS_DGFX(vm->xe), tile->mem.vram); drm_exec_retry_on_contention(&exec); - if (ret) - break; + xe_validation_retry_on_oom(&ctx, &ret); } - drm_exec_fini(&exec); + xe_validation_ctx_fini(&ctx); unlock_vm: up_read(&vm->lock); xe_vm_put(vm); diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h index 11da0228cea7..1313d32862db 100644 --- a/drivers/gpu/drm/xe/xe_gt_printk.h +++ b/drivers/gpu/drm/xe/xe_gt_printk.h @@ -6,18 +6,22 @@ #ifndef _XE_GT_PRINTK_H_ #define _XE_GT_PRINTK_H_ -#include <drm/drm_print.h> - #include "xe_gt_types.h" +#include "xe_tile_printk.h" + +#define __XE_GT_PRINTK_FMT(_gt, _fmt, _args...) "GT%u: " _fmt, (_gt)->info.id, ##_args #define xe_gt_printk(_gt, _level, _fmt, ...) \ - drm_##_level(>_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + xe_tile_printk((_gt)->tile, _level, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__)) + +#define xe_gt_err(_gt, _fmt, ...) \ + xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__) #define xe_gt_err_once(_gt, _fmt, ...) \ xe_gt_printk((_gt), err_once, _fmt, ##__VA_ARGS__) -#define xe_gt_err(_gt, _fmt, ...) \ - xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__) +#define xe_gt_err_ratelimited(_gt, _fmt, ...) \ + xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__) #define xe_gt_warn(_gt, _fmt, ...) \ xe_gt_printk((_gt), warn, _fmt, ##__VA_ARGS__) @@ -31,20 +35,20 @@ #define xe_gt_dbg(_gt, _fmt, ...) \ xe_gt_printk((_gt), dbg, _fmt, ##__VA_ARGS__) -#define xe_gt_err_ratelimited(_gt, _fmt, ...) \ - xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__) +#define xe_gt_WARN_type(_gt, _type, _condition, _fmt, ...) \ + xe_tile_WARN##_type((_gt)->tile, _condition, _fmt, ## __VA_ARGS__) #define xe_gt_WARN(_gt, _condition, _fmt, ...) \ - drm_WARN(>_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + xe_gt_WARN_type((_gt),, _condition, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__)) #define xe_gt_WARN_ONCE(_gt, _condition, _fmt, ...) \ - drm_WARN_ONCE(>_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + xe_gt_WARN_type((_gt), _ONCE, _condition, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__)) #define xe_gt_WARN_ON(_gt, _condition) \ - xe_gt_WARN((_gt), _condition, "%s(%s)", "gt_WARN_ON", __stringify(_condition)) + xe_gt_WARN((_gt), _condition, "%s(%s)", "WARN_ON", __stringify(_condition)) #define xe_gt_WARN_ON_ONCE(_gt, _condition) \ - xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "gt_WARN_ON_ONCE", __stringify(_condition)) + xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition)) static inline void __xe_gt_printfn_err(struct drm_printer *p, struct va_format *vaf) { @@ -67,12 +71,12 @@ static inline void __xe_gt_printfn_dbg(struct drm_printer *p, struct va_format * /* * The original xe_gt_dbg() callsite annotations are useless here, - * redirect to the tweaked drm_dbg_printer() instead. + * redirect to the tweaked xe_tile_dbg_printer() instead. */ - dbg = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, NULL); + dbg = xe_tile_dbg_printer((gt)->tile); dbg.origin = p->origin; - drm_printf(&dbg, "GT%u: %pV", gt->info.id, vaf); + drm_printf(&dbg, __XE_GT_PRINTK_FMT(gt, "%pV", vaf)); } /** diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index c8f0320d032f..6344b5205c08 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1478,23 +1478,16 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) return 0; xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M); - bo = xe_bo_create_locked(xe, tile, NULL, - ALIGN(size, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_NEEDS_2M | - XE_BO_FLAG_PINNED | - XE_BO_FLAG_PINNED_LATE_RESTORE); + bo = xe_bo_create_pin_range_novm(xe, tile, + ALIGN(size, PAGE_SIZE), 0, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_NEEDS_2M | + XE_BO_FLAG_PINNED | + XE_BO_FLAG_PINNED_LATE_RESTORE); if (IS_ERR(bo)) return PTR_ERR(bo); - err = xe_bo_pin(bo); - xe_bo_unlock(bo); - if (unlikely(err)) { - xe_bo_put(bo); - return err; - } - config->lmem_obj = bo; if (xe_device_has_lmtt(xe)) { @@ -1636,7 +1629,6 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs) u64 fair; fair = div_u64(available, num_vfs); - fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */ fair = ALIGN_DOWN(fair, alignment); #ifdef MAX_FAIR_LMEM fair = min_t(u64, MAX_FAIR_LMEM, fair); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c index c712111aa30d..44cc612b0a75 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c @@ -55,12 +55,12 @@ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid, xe_gt_assert(gt, size % sizeof(u32) == 0); xe_gt_assert(gt, size == ndwords * sizeof(u32)); - bo = xe_bo_create_pin_map(xe, tile, NULL, - ALIGN(size, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + bo = xe_bo_create_pin_map_novm(xe, tile, + ALIGN(size, PAGE_SIZE), + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE, false); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -91,12 +91,12 @@ static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid, xe_gt_assert(gt, size % sizeof(u32) == 0); xe_gt_assert(gt, size == ndwords * sizeof(u32)); - bo = xe_bo_create_pin_map(xe, tile, NULL, - ALIGN(size, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + bo = xe_bo_create_pin_map_novm(xe, tile, + ALIGN(size, PAGE_SIZE), + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE, false); if (IS_ERR(bo)) return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c index 30f942671c2b..5f74706bab81 100644 --- a/drivers/gpu/drm/xe/xe_gt_stats.c +++ b/drivers/gpu/drm/xe/xe_gt_stats.c @@ -26,11 +26,46 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr) atomic64_add(incr, >->stats.counters[id]); } +#define DEF_STAT_STR(ID, name) [XE_GT_STATS_ID_##ID] = name + static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = { - "svm_pagefault_count", - "tlb_inval_count", - "vma_pagefault_count", - "vma_pagefault_kb", + DEF_STAT_STR(SVM_PAGEFAULT_COUNT, "svm_pagefault_count"), + DEF_STAT_STR(TLB_INVAL, "tlb_inval_count"), + DEF_STAT_STR(SVM_TLB_INVAL_COUNT, "svm_tlb_inval_count"), + DEF_STAT_STR(SVM_TLB_INVAL_US, "svm_tlb_inval_us"), + DEF_STAT_STR(VMA_PAGEFAULT_COUNT, "vma_pagefault_count"), + DEF_STAT_STR(VMA_PAGEFAULT_KB, "vma_pagefault_kb"), + DEF_STAT_STR(SVM_4K_PAGEFAULT_COUNT, "svm_4K_pagefault_count"), + DEF_STAT_STR(SVM_64K_PAGEFAULT_COUNT, "svm_64K_pagefault_count"), + DEF_STAT_STR(SVM_2M_PAGEFAULT_COUNT, "svm_2M_pagefault_count"), + DEF_STAT_STR(SVM_4K_VALID_PAGEFAULT_COUNT, "svm_4K_valid_pagefault_count"), + DEF_STAT_STR(SVM_64K_VALID_PAGEFAULT_COUNT, "svm_64K_valid_pagefault_count"), + DEF_STAT_STR(SVM_2M_VALID_PAGEFAULT_COUNT, "svm_2M_valid_pagefault_count"), + DEF_STAT_STR(SVM_4K_PAGEFAULT_US, "svm_4K_pagefault_us"), + DEF_STAT_STR(SVM_64K_PAGEFAULT_US, "svm_64K_pagefault_us"), + DEF_STAT_STR(SVM_2M_PAGEFAULT_US, "svm_2M_pagefault_us"), + DEF_STAT_STR(SVM_4K_MIGRATE_COUNT, "svm_4K_migrate_count"), + DEF_STAT_STR(SVM_64K_MIGRATE_COUNT, "svm_64K_migrate_count"), + DEF_STAT_STR(SVM_2M_MIGRATE_COUNT, "svm_2M_migrate_count"), + DEF_STAT_STR(SVM_4K_MIGRATE_US, "svm_4K_migrate_us"), + DEF_STAT_STR(SVM_64K_MIGRATE_US, "svm_64K_migrate_us"), + DEF_STAT_STR(SVM_2M_MIGRATE_US, "svm_2M_migrate_us"), + DEF_STAT_STR(SVM_DEVICE_COPY_US, "svm_device_copy_us"), + DEF_STAT_STR(SVM_4K_DEVICE_COPY_US, "svm_4K_device_copy_us"), + DEF_STAT_STR(SVM_64K_DEVICE_COPY_US, "svm_64K_device_copy_us"), + DEF_STAT_STR(SVM_2M_DEVICE_COPY_US, "svm_2M_device_copy_us"), + DEF_STAT_STR(SVM_CPU_COPY_US, "svm_cpu_copy_us"), + DEF_STAT_STR(SVM_4K_CPU_COPY_US, "svm_4K_cpu_copy_us"), + DEF_STAT_STR(SVM_64K_CPU_COPY_US, "svm_64K_cpu_copy_us"), + DEF_STAT_STR(SVM_2M_CPU_COPY_US, "svm_2M_cpu_copy_us"), + DEF_STAT_STR(SVM_DEVICE_COPY_KB, "svm_device_copy_kb"), + DEF_STAT_STR(SVM_CPU_COPY_KB, "svm_cpu_copy_kb"), + DEF_STAT_STR(SVM_4K_GET_PAGES_US, "svm_4K_get_pages_us"), + DEF_STAT_STR(SVM_64K_GET_PAGES_US, "svm_64K_get_pages_us"), + DEF_STAT_STR(SVM_2M_GET_PAGES_US, "svm_2M_get_pages_us"), + DEF_STAT_STR(SVM_4K_BIND_US, "svm_4K_bind_us"), + DEF_STAT_STR(SVM_64K_BIND_US, "svm_64K_bind_us"), + DEF_STAT_STR(SVM_2M_BIND_US, "svm_2M_bind_us"), }; /** @@ -50,3 +85,17 @@ int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p) return 0; } + +/** + * xe_gt_stats_clear - Clear the GT stats + * @gt: GT structure + * + * This clear (zeros) all the available GT stats. + */ +void xe_gt_stats_clear(struct xe_gt *gt) +{ + int id; + + for (id = 0; id < ARRAY_SIZE(gt->stats.counters); ++id) + atomic64_set(>->stats.counters[id], 0); +} diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h index 38325ef53617..e8aea32bc971 100644 --- a/drivers/gpu/drm/xe/xe_gt_stats.h +++ b/drivers/gpu/drm/xe/xe_gt_stats.h @@ -13,6 +13,7 @@ struct drm_printer; #ifdef CONFIG_DEBUG_FS int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p); +void xe_gt_stats_clear(struct xe_gt *gt); void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr); #else static inline void diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h index be3244d7133c..d8348a8de2e1 100644 --- a/drivers/gpu/drm/xe/xe_gt_stats_types.h +++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h @@ -9,8 +9,41 @@ enum xe_gt_stats_id { XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, XE_GT_STATS_ID_TLB_INVAL, + XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, + XE_GT_STATS_ID_SVM_TLB_INVAL_US, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, + XE_GT_STATS_ID_SVM_4K_PAGEFAULT_COUNT, + XE_GT_STATS_ID_SVM_64K_PAGEFAULT_COUNT, + XE_GT_STATS_ID_SVM_2M_PAGEFAULT_COUNT, + XE_GT_STATS_ID_SVM_4K_VALID_PAGEFAULT_COUNT, + XE_GT_STATS_ID_SVM_64K_VALID_PAGEFAULT_COUNT, + XE_GT_STATS_ID_SVM_2M_VALID_PAGEFAULT_COUNT, + XE_GT_STATS_ID_SVM_4K_PAGEFAULT_US, + XE_GT_STATS_ID_SVM_64K_PAGEFAULT_US, + XE_GT_STATS_ID_SVM_2M_PAGEFAULT_US, + XE_GT_STATS_ID_SVM_4K_MIGRATE_COUNT, + XE_GT_STATS_ID_SVM_64K_MIGRATE_COUNT, + XE_GT_STATS_ID_SVM_2M_MIGRATE_COUNT, + XE_GT_STATS_ID_SVM_4K_MIGRATE_US, + XE_GT_STATS_ID_SVM_64K_MIGRATE_US, + XE_GT_STATS_ID_SVM_2M_MIGRATE_US, + XE_GT_STATS_ID_SVM_DEVICE_COPY_US, + XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US, + XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US, + XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US, + XE_GT_STATS_ID_SVM_CPU_COPY_US, + XE_GT_STATS_ID_SVM_4K_CPU_COPY_US, + XE_GT_STATS_ID_SVM_64K_CPU_COPY_US, + XE_GT_STATS_ID_SVM_2M_CPU_COPY_US, + XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, + XE_GT_STATS_ID_SVM_CPU_COPY_KB, + XE_GT_STATS_ID_SVM_4K_GET_PAGES_US, + XE_GT_STATS_ID_SVM_64K_GET_PAGES_US, + XE_GT_STATS_ID_SVM_2M_GET_PAGES_US, + XE_GT_STATS_ID_SVM_4K_BIND_US, + XE_GT_STATS_ID_SVM_64K_BIND_US, + XE_GT_STATS_ID_SVM_2M_BIND_US, /* must be the last entry */ __XE_GT_STATS_NUM_IDS, }; diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index a0baa560dd71..4e61c5e39bcb 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -12,6 +12,7 @@ #include "regs/xe_gt_regs.h" #include "xe_assert.h" #include "xe_gt.h" +#include "xe_gt_mcr.h" #include "xe_gt_printk.h" #include "xe_mmio.h" #include "xe_wa.h" @@ -122,6 +123,21 @@ gen_l3_mask_from_pattern(struct xe_device *xe, xe_l3_bank_mask_t dst, } } +bool xe_gt_topology_report_l3(struct xe_gt *gt) +{ + /* + * No known userspace needs/uses the L3 bank mask reported by + * the media GT, and the hardware itself is known to report bogus + * values on several platforms. Only report L3 bank mask as part + * of the media GT's topology on pre-Xe3 platforms since that's + * already part of our ABI. + */ + if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) >= 30) + return false; + + return true; +} + static void load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask) { @@ -129,16 +145,7 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask) struct xe_mmio *mmio = >->mmio; u32 fuse3 = xe_mmio_read32(mmio, MIRROR_FUSE3); - /* - * PTL platforms with media version 30.00 do not provide proper values - * for the media GT's L3 bank registers. Skip the readout since we - * don't have any way to obtain real values. - * - * This may get re-described as an official workaround in the future, - * but there's no tracking number assigned yet so we use a custom - * OOB workaround descriptor. - */ - if (XE_GT_WA(gt, no_media_l3)) + if (!xe_gt_topology_report_l3(gt)) return; if (GRAPHICS_VER(xe) >= 30) { @@ -275,8 +282,9 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p) drm_printf(p, "EU type: %s\n", eu_type_to_str(gt->fuse_topo.eu_type)); - drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS, - gt->fuse_topo.l3_bank_mask); + if (xe_gt_topology_report_l3(gt)) + drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS, + gt->fuse_topo.l3_bank_mask); } /* @@ -328,3 +336,19 @@ bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss) { return test_bit(dss, gt->fuse_topo.c_dss_mask); } + +bool xe_gt_has_discontiguous_dss_groups(const struct xe_gt *gt) +{ + unsigned int xecore; + int last_group = -1; + u16 group, instance; + + for_each_dss_steering(xecore, gt, group, instance) { + if (last_group != group) { + if (group - last_group > 1) + return true; + last_group = group; + } + } + return false; +} diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h index c8140704ad4c..5e62f5949b7b 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.h +++ b/drivers/gpu/drm/xe/xe_gt_topology.h @@ -47,4 +47,8 @@ xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad); bool xe_gt_has_geometry_dss(struct xe_gt *gt, unsigned int dss); bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss); +bool xe_gt_has_discontiguous_dss_groups(const struct xe_gt *gt); + +bool xe_gt_topology_report_l3(struct xe_gt *gt); + #endif /* _XE_GT_TOPOLOGY_H_ */ diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 37d06c51180c..00789844ea4d 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -74,8 +74,7 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc) if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) flags |= GUC_LOG_DISABLED; else - flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << - GUC_LOG_VERBOSITY_SHIFT; + flags |= FIELD_PREP(GUC_LOG_VERBOSITY, GUC_LOG_LEVEL_TO_VERBOSITY(level)); return flags; } @@ -122,22 +121,14 @@ static u32 guc_ctl_log_params_flags(struct xe_guc *guc) BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE); BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT)); - BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) > - (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT)); - BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) > - (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT)); - BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) > - (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT)); - flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | CAPTURE_FLAG | LOG_FLAG | - ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) | - ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) | - ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << - GUC_LOG_CAPTURE_SHIFT) | - (offset << GUC_LOG_BUF_ADDR_SHIFT); + FIELD_PREP(GUC_LOG_CRASH, CRASH_BUFFER_SIZE / LOG_UNIT - 1) | + FIELD_PREP(GUC_LOG_DEBUG, DEBUG_BUFFER_SIZE / LOG_UNIT - 1) | + FIELD_PREP(GUC_LOG_CAPTURE, CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) | + FIELD_PREP(GUC_LOG_BUF_ADDR, offset); #undef LOG_UNIT #undef LOG_FLAG @@ -150,7 +141,7 @@ static u32 guc_ctl_log_params_flags(struct xe_guc *guc) static u32 guc_ctl_ads_flags(struct xe_guc *guc) { u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT; - u32 flags = ads << GUC_ADS_ADDR_SHIFT; + u32 flags = FIELD_PREP(GUC_ADS_ADDR, ads); return flags; } @@ -709,10 +700,6 @@ static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc) if (ret) return ret; - ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo); - if (ret) - return ret; - return 0; } @@ -847,6 +834,10 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc) if (ret) return ret; + ret = xe_guc_ct_init_post_hwconfig(&guc->ct); + if (ret) + return ret; + guc_init_params_post_hwconfig(guc); ret = xe_guc_submit_init(guc, ~0); @@ -888,9 +879,7 @@ int xe_guc_post_load_init(struct xe_guc *guc) return ret; } - guc->submission_state.enabled = true; - - return 0; + return xe_guc_submit_enable(guc); } int xe_guc_reset(struct xe_guc *guc) @@ -1066,7 +1055,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc) #endif #define GUC_LOAD_TIME_WARN_MS 200 -static void guc_wait_ucode(struct xe_guc *guc) +static int guc_wait_ucode(struct xe_guc *guc) { struct xe_gt *gt = guc_to_gt(guc); struct xe_mmio *mmio = >->mmio; @@ -1173,7 +1162,7 @@ static void guc_wait_ucode(struct xe_guc *guc) break; } - xe_device_declare_wedged(gt_to_xe(gt)); + return -EPROTO; } else if (delta_ms > GUC_LOAD_TIME_WARN_MS) { xe_gt_warn(gt, "excessive init time: %lldms! [status = 0x%08X, timeouts = %d]\n", delta_ms, status, count); @@ -1185,7 +1174,10 @@ static void guc_wait_ucode(struct xe_guc *guc) delta_ms, xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc), before_freq, status, count); } + + return 0; } +ALLOW_ERROR_INJECTION(guc_wait_ucode, ERRNO); static int __xe_guc_upload(struct xe_guc *guc) { @@ -1217,14 +1209,16 @@ static int __xe_guc_upload(struct xe_guc *guc) goto out; /* Wait for authentication */ - guc_wait_ucode(guc); + ret = guc_wait_ucode(guc); + if (ret) + goto out; xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING); return 0; out: xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL); - return 0 /* FIXME: ret, don't want to stop load currently */; + return ret; } static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc) @@ -1602,7 +1596,7 @@ void xe_guc_sanitize(struct xe_guc *guc) { xe_uc_fw_sanitize(&guc->fw); xe_guc_ct_disable(&guc->ct); - guc->submission_state.enabled = false; + xe_guc_submit_disable(guc); } int xe_guc_reset_prepare(struct xe_guc *guc) @@ -1695,3 +1689,7 @@ void xe_guc_declare_wedged(struct xe_guc *guc) xe_guc_ct_stop(&guc->ct); xe_guc_submit_wedge(guc); } + +#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) +#include "tests/xe_guc_g2g_test.c" +#endif diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index 22cf019a11bf..1cca05967e62 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -53,6 +53,10 @@ void xe_guc_stop(struct xe_guc *guc); int xe_guc_start(struct xe_guc *guc); void xe_guc_declare_wedged(struct xe_guc *guc); +#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) +int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *payload, u32 len); +#endif + static inline u16 xe_engine_class_to_guc_class(enum xe_engine_class class) { switch (class) { diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index 5631722f34f5..58e0b0294a5b 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -339,7 +339,7 @@ static void guc_waklv_init(struct xe_guc_ads *ads) if (XE_GT_WA(gt, 13011645652)) { u32 data = 0xC40; - guc_waklv_enable(ads, &data, sizeof(data) / sizeof(u32), &offset, &remain, + guc_waklv_enable(ads, &data, 1, &offset, &remain, GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE); } @@ -355,7 +355,7 @@ static void guc_waklv_init(struct xe_guc_ads *ads) 0x0, 0xF, }; - guc_waklv_enable(ads, data, sizeof(data) / sizeof(u32), &offset, &remain, + guc_waklv_enable(ads, data, ARRAY_SIZE(data), &offset, &remain, GUC_WA_KLV_RESTORE_UNSAVED_MEDIA_CONTROL_REG); } diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 848065a25c44..18f6327bf552 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -39,6 +39,8 @@ static void receive_g2h(struct xe_guc_ct *ct); static void g2h_worker_func(struct work_struct *w); static void safe_mode_worker_func(struct work_struct *w); static void ct_exit_safe_mode(struct xe_guc_ct *ct); +static void guc_ct_change_state(struct xe_guc_ct *ct, + enum xe_guc_ct_state state); #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) enum { @@ -252,6 +254,13 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct) } ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */ +static void guc_action_disable_ct(void *arg) +{ + struct xe_guc_ct *ct = arg; + + guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED); +} + int xe_guc_ct_init(struct xe_guc_ct *ct) { struct xe_device *xe = ct_to_xe(ct); @@ -268,10 +277,39 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) return PTR_ERR(bo); ct->bo = bo; - return 0; + + return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct); } ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */ +/** + * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM + * @ct: the &xe_guc_ct + * + * Allocate a new BO in VRAM and free the previous BO that was allocated + * in system memory (SMEM). Applicable only for DGFX products. + * + * Return: 0 on success, or a negative errno on failure. + */ +int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct) +{ + struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); + struct xe_tile *tile = gt_to_tile(gt); + int ret; + + xe_assert(xe, !xe_guc_ct_enabled(ct)); + + if (IS_DGFX(xe)) { + ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo); + if (ret) + return ret; + } + + devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct); + return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct); +} + #define desc_read(xe_, guc_ctb__, field_) \ xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \ struct guc_ct_buffer_desc, field_) @@ -1040,11 +1078,15 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret) return true; } +#define GUC_SEND_RETRY_LIMIT 50 +#define GUC_SEND_RETRY_MSLEEP 5 + static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 *response_buffer, bool no_fail) { struct xe_gt *gt = ct_to_gt(ct); struct g2h_fence g2h_fence; + unsigned int retries = 0; int ret = 0; /* @@ -1109,6 +1151,12 @@ retry_same_fence: xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n", action[0], g2h_fence.reason); mutex_unlock(&ct->lock); + if (++retries > GUC_SEND_RETRY_LIMIT) { + xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n", + action[0], GUC_SEND_RETRY_LIMIT); + return -ELOOP; + } + msleep(GUC_SEND_RETRY_MSLEEP * retries); goto retry; } if (g2h_fence.fail) { @@ -1438,6 +1486,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) case XE_GUC_ACTION_NOTIFY_EXCEPTION: ret = guc_crash_process_msg(ct, action); break; +#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) + case XE_GUC_ACTION_TEST_G2G_RECV: + ret = xe_guc_g2g_test_notification(guc, payload, adj_len); + break; +#endif default: xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); } diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index 18d4225e6502..cf41210ab30a 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -13,6 +13,7 @@ struct xe_device; int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct); int xe_guc_ct_init(struct xe_guc_ct *ct); +int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct); int xe_guc_ct_enable(struct xe_guc_ct *ct); void xe_guc_ct_disable(struct xe_guc_ct *ct); void xe_guc_ct_stop(struct xe_guc_ct *ct); diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c index 92e1f9f41b8c..2b99c1ebdd58 100644 --- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c +++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c @@ -94,16 +94,17 @@ static int allocate_engine_activity_buffers(struct xe_guc *guc, struct xe_tile *tile = gt_to_tile(gt); struct xe_bo *bo, *metadata_bo; - metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size), - ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM | - XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE); + metadata_bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(metadata_size), + ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE, + false); if (IS_ERR(metadata_bo)) return PTR_ERR(metadata_bo); - bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size), - ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE); + bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(size), + ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE, false); if (IS_ERR(bo)) { xe_bo_unpin_map_no_vm(metadata_bo); diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h index a3f421e2adc0..c30c0e3ccbbb 100644 --- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h @@ -35,8 +35,8 @@ struct xe_guc_exec_queue { struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE]; /** @lr_tdr: long running TDR worker */ struct work_struct lr_tdr; - /** @fini_async: do final fini async from this worker */ - struct work_struct fini_async; + /** @destroy_async: do final destroy async from this worker */ + struct work_struct destroy_async; /** @resume_time: time of last resume */ u64 resume_time; /** @state: GuC specific state for this xe_exec_queue */ diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h index 0508f1064178..50c4c2406132 100644 --- a/drivers/gpu/drm/xe/xe_guc_fwif.h +++ b/drivers/gpu/drm/xe/xe_guc_fwif.h @@ -15,6 +15,7 @@ #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 4 #define G2H_LEN_DW_DEREGISTER_CONTEXT 3 #define G2H_LEN_DW_TLB_INVALIDATE 3 +#define G2H_LEN_DW_G2G_NOTIFY_MIN 3 #define GUC_ID_MAX 65535 #define GUC_ID_UNKNOWN 0xffffffff @@ -65,6 +66,7 @@ struct guc_ctxt_registration_info { u32 hwlrca_hi; }; #define CONTEXT_REGISTRATION_FLAG_KMD BIT(0) +#define CONTEXT_REGISTRATION_FLAG_TYPE GENMASK(2, 1) /* 32-bit KLV structure as used by policy updates and others */ struct guc_klv_generic_dw_t { @@ -89,13 +91,10 @@ struct guc_update_exec_queue_policy { #define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1) #define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2) #define GUC_LOG_LOG_ALLOC_UNITS BIT(3) -#define GUC_LOG_CRASH_SHIFT 4 -#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT) -#define GUC_LOG_DEBUG_SHIFT 6 -#define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT) -#define GUC_LOG_CAPTURE_SHIFT 10 -#define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT) -#define GUC_LOG_BUF_ADDR_SHIFT 12 +#define GUC_LOG_CRASH REG_GENMASK(5, 4) +#define GUC_LOG_DEBUG REG_GENMASK(9, 6) +#define GUC_LOG_CAPTURE REG_GENMASK(11, 10) +#define GUC_LOG_BUF_ADDR REG_GENMASK(31, 12) #define GUC_CTL_WA 1 #define GUC_WA_GAM_CREDITS BIT(10) @@ -117,21 +116,14 @@ struct guc_update_exec_queue_policy { #define GUC_CTL_DISABLE_SCHEDULER BIT(14) #define GUC_CTL_DEBUG 3 -#define GUC_LOG_VERBOSITY_SHIFT 0 -#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) -#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT) -#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT) -#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT) -#define GUC_LOG_VERBOSITY_MIN 0 +#define GUC_LOG_VERBOSITY REG_GENMASK(1, 0) #define GUC_LOG_VERBOSITY_MAX 3 -#define GUC_LOG_VERBOSITY_MASK 0x0000000f -#define GUC_LOG_DESTINATION_MASK (3 << 4) -#define GUC_LOG_DISABLED (1 << 6) -#define GUC_PROFILE_ENABLED (1 << 7) +#define GUC_LOG_DESTINATION REG_GENMASK(5, 4) +#define GUC_LOG_DISABLED BIT(6) +#define GUC_PROFILE_ENABLED BIT(7) #define GUC_CTL_ADS 4 -#define GUC_ADS_ADDR_SHIFT 1 -#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT) +#define GUC_ADS_ADDR REG_GENMASK(21, 1) #define GUC_CTL_DEVID 5 diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h index f1e2b0be90a9..98a47ac42b08 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.h +++ b/drivers/gpu/drm/xe/xe_guc_log.h @@ -17,7 +17,7 @@ struct xe_device; #define DEBUG_BUFFER_SIZE SZ_8M #define CAPTURE_BUFFER_SIZE SZ_2M #else -#define CRASH_BUFFER_SIZE SZ_8K +#define CRASH_BUFFER_SIZE SZ_16K #define DEBUG_BUFFER_SIZE SZ_64K #define CAPTURE_BUFFER_SIZE SZ_1M #endif diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 88557e86d637..53fdf59524c4 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -79,6 +79,11 @@ * Xe driver enables SLPC with all of its defaults features and frequency * selection, which varies per platform. * + * Power profiles add another level of control to SLPC. When power saving + * profile is chosen, SLPC will use conservative thresholds to ramp frequency, + * thus saving power. Base profile is default and ensures balanced performance + * for any workload. + * * Render-C States: * ================ * @@ -1171,6 +1176,61 @@ static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val) return ret; } +static const char *power_profile_to_string(struct xe_guc_pc *pc) +{ + switch (pc->power_profile) { + case SLPC_POWER_PROFILE_BASE: + return "base"; + case SLPC_POWER_PROFILE_POWER_SAVING: + return "power_saving"; + default: + return "invalid"; + } +} + +void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile) +{ + switch (pc->power_profile) { + case SLPC_POWER_PROFILE_BASE: + sprintf(profile, "[%s] %s\n", "base", "power_saving"); + break; + case SLPC_POWER_PROFILE_POWER_SAVING: + sprintf(profile, "%s [%s]\n", "base", "power_saving"); + break; + default: + sprintf(profile, "invalid"); + } +} + +int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf) +{ + int ret = 0; + u32 val; + + if (strncmp("base", buf, strlen("base")) == 0) + val = SLPC_POWER_PROFILE_BASE; + else if (strncmp("power_saving", buf, strlen("power_saving")) == 0) + val = SLPC_POWER_PROFILE_POWER_SAVING; + else + return -EINVAL; + + guard(mutex)(&pc->freq_lock); + xe_pm_runtime_get_noresume(pc_to_xe(pc)); + + ret = pc_action_set_param(pc, + SLPC_PARAM_POWER_PROFILE, + val); + if (ret) + xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n", + val, ERR_PTR(ret)); + else + pc->power_profile = val; + + xe_pm_runtime_put(pc_to_xe(pc)); + + return ret; +} + /** * xe_guc_pc_start - Start GuC's Power Conservation component * @pc: Xe_GuC_PC instance @@ -1249,6 +1309,11 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) /* Enable SLPC Optimized Strategy for compute */ ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE); + /* Set cached value of power_profile */ + ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc)); + if (unlikely(ret)) + xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret)); + out: xe_force_wake_put(gt_to_fw(gt), fw_ref); return ret; @@ -1327,6 +1392,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc) pc->bo = bo; + pc->power_profile = SLPC_POWER_PROFILE_BASE; + return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc); } diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h index 52ecdd5ddbff..0e31396f103c 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.h +++ b/drivers/gpu/drm/xe/xe_guc_pc.h @@ -31,6 +31,8 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq); int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq); int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq); int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq); +int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf); +void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile); enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc); u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc); diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h index c02053948a57..5e4ea53fbee6 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h @@ -37,6 +37,8 @@ struct xe_guc_pc { struct mutex freq_lock; /** @freq_ready: Only handle freq changes, if they are really ready */ bool freq_ready; + /** @power_profile: Base or power_saving profile */ + u32 power_profile; }; #endif /* _XE_GUC_PC_TYPES_H_ */ diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 1185b23b1384..53024eb5670b 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -32,6 +32,7 @@ #include "xe_guc_ct.h" #include "xe_guc_exec_queue_types.h" #include "xe_guc_id_mgr.h" +#include "xe_guc_klv_helpers.h" #include "xe_guc_submit_types.h" #include "xe_hw_engine.h" #include "xe_hw_fence.h" @@ -316,6 +317,71 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); } +/* + * Given that we want to guarantee enough RCS throughput to avoid missing + * frames, we set the yield policy to 20% of each 80ms interval. + */ +#define RC_YIELD_DURATION 80 /* in ms */ +#define RC_YIELD_RATIO 20 /* in percent */ +static u32 *emit_render_compute_yield_klv(u32 *emit) +{ + *emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD); + *emit++ = RC_YIELD_DURATION; + *emit++ = RC_YIELD_RATIO; + + return emit; +} + +#define SCHEDULING_POLICY_MAX_DWORDS 16 +static int guc_init_global_schedule_policy(struct xe_guc *guc) +{ + u32 data[SCHEDULING_POLICY_MAX_DWORDS]; + u32 *emit = data; + u32 count = 0; + int ret; + + if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0)) + return 0; + + *emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV; + + if (CCS_MASK(guc_to_gt(guc))) + emit = emit_render_compute_yield_klv(emit); + + count = emit - data; + if (count > 1) { + xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS); + + ret = xe_guc_ct_send_block(&guc->ct, data, count); + if (ret < 0) { + xe_gt_err(guc_to_gt(guc), + "failed to enable GuC scheduling policies: %pe\n", + ERR_PTR(ret)); + return ret; + } + } + + return 0; +} + +int xe_guc_submit_enable(struct xe_guc *guc) +{ + int ret; + + ret = guc_init_global_schedule_policy(guc); + if (ret) + return ret; + + guc->submission_state.enabled = true; + + return 0; +} + +void xe_guc_submit_disable(struct xe_guc *guc) +{ + guc->submission_state.enabled = false; +} + static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) { int i; @@ -558,10 +624,8 @@ static void register_exec_queue(struct xe_exec_queue *q, int ctx_type) info.engine_submit_mask = q->logical_mask; info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc)); info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc)); - info.flags = CONTEXT_REGISTRATION_FLAG_KMD; - - if (ctx_type != GUC_CONTEXT_NORMAL) - info.flags |= BIT(ctx_type); + info.flags = CONTEXT_REGISTRATION_FLAG_KMD | + FIELD_PREP(CONTEXT_REGISTRATION_FLAG_TYPE, ctx_type); if (xe_exec_queue_is_parallel(q)) { u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc); @@ -1355,48 +1419,57 @@ rearm: return DRM_GPU_SCHED_STAT_NO_HANG; } -static void __guc_exec_queue_fini_async(struct work_struct *w) +static void guc_exec_queue_fini(struct xe_exec_queue *q) +{ + struct xe_guc_exec_queue *ge = q->guc; + struct xe_guc *guc = exec_queue_to_guc(q); + + release_guc_id(guc, q); + xe_sched_entity_fini(&ge->entity); + xe_sched_fini(&ge->sched); + + /* + * RCU free due sched being exported via DRM scheduler fences + * (timeline name). + */ + kfree_rcu(ge, rcu); +} + +static void __guc_exec_queue_destroy_async(struct work_struct *w) { struct xe_guc_exec_queue *ge = - container_of(w, struct xe_guc_exec_queue, fini_async); + container_of(w, struct xe_guc_exec_queue, destroy_async); struct xe_exec_queue *q = ge->q; struct xe_guc *guc = exec_queue_to_guc(q); xe_pm_runtime_get(guc_to_xe(guc)); trace_xe_exec_queue_destroy(q); - release_guc_id(guc, q); if (xe_exec_queue_is_lr(q)) cancel_work_sync(&ge->lr_tdr); /* Confirm no work left behind accessing device structures */ cancel_delayed_work_sync(&ge->sched.base.work_tdr); - xe_sched_entity_fini(&ge->entity); - xe_sched_fini(&ge->sched); - /* - * RCU free due sched being exported via DRM scheduler fences - * (timeline name). - */ - kfree_rcu(ge, rcu); xe_exec_queue_fini(q); + xe_pm_runtime_put(guc_to_xe(guc)); } -static void guc_exec_queue_fini_async(struct xe_exec_queue *q) +static void guc_exec_queue_destroy_async(struct xe_exec_queue *q) { struct xe_guc *guc = exec_queue_to_guc(q); struct xe_device *xe = guc_to_xe(guc); - INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async); + INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async); /* We must block on kernel engines so slabs are empty on driver unload */ if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q)) - __guc_exec_queue_fini_async(&q->guc->fini_async); + __guc_exec_queue_destroy_async(&q->guc->destroy_async); else - queue_work(xe->destroy_wq, &q->guc->fini_async); + queue_work(xe->destroy_wq, &q->guc->destroy_async); } -static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) +static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q) { /* * Might be done from within the GPU scheduler, need to do async as we @@ -1405,7 +1478,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) * this we and don't really care when everything is fini'd, just that it * is. */ - guc_exec_queue_fini_async(q); + guc_exec_queue_destroy_async(q); } static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg) @@ -1419,7 +1492,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg) if (exec_queue_registered(q)) disable_scheduling_deregister(guc, q); else - __guc_exec_queue_fini(guc, q); + __guc_exec_queue_destroy(guc, q); } static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q) @@ -1652,14 +1725,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q, #define STATIC_MSG_CLEANUP 0 #define STATIC_MSG_SUSPEND 1 #define STATIC_MSG_RESUME 2 -static void guc_exec_queue_fini(struct xe_exec_queue *q) +static void guc_exec_queue_destroy(struct xe_exec_queue *q) { struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q)) guc_exec_queue_add_msg(q, msg, CLEANUP); else - __guc_exec_queue_fini(exec_queue_to_guc(q), q); + __guc_exec_queue_destroy(exec_queue_to_guc(q), q); } static int guc_exec_queue_set_priority(struct xe_exec_queue *q, @@ -1789,6 +1862,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = { .init = guc_exec_queue_init, .kill = guc_exec_queue_kill, .fini = guc_exec_queue_fini, + .destroy = guc_exec_queue_destroy, .set_priority = guc_exec_queue_set_priority, .set_timeslice = guc_exec_queue_set_timeslice, .set_preempt_timeout = guc_exec_queue_set_preempt_timeout, @@ -1810,7 +1884,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) xe_exec_queue_put(q); else if (exec_queue_destroyed(q)) - __guc_exec_queue_fini(guc, q); + __guc_exec_queue_destroy(guc, q); } if (q->guc->suspend_pending) { set_exec_queue_suspended(q); @@ -2029,7 +2103,7 @@ g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id) q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id); if (unlikely(!q)) { - xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id); + xe_gt_err(gt, "No exec queue found for guc_id %u\n", guc_id); return NULL; } @@ -2139,7 +2213,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q) if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) xe_exec_queue_put(q); else - __guc_exec_queue_fini(guc, q); + __guc_exec_queue_destroy(guc, q); } int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len) @@ -2528,7 +2602,7 @@ static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p) } /** - * xe_guc_register_exec_queue - Register exec queue for a given context type. + * xe_guc_register_vf_exec_queue - Register exec queue for a given context type. * @q: Execution queue * @ctx_type: Type of the context * @@ -2539,15 +2613,17 @@ static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p) * * Returns - None. */ -void xe_guc_register_exec_queue(struct xe_exec_queue *q, int ctx_type) +void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type) { struct xe_guc *guc = exec_queue_to_guc(q); struct xe_device *xe = guc_to_xe(guc); + struct xe_gt *gt = guc_to_gt(guc); - xe_assert(xe, IS_SRIOV_VF(xe)); - xe_assert(xe, !IS_DGFX(xe)); - xe_assert(xe, (ctx_type > GUC_CONTEXT_NORMAL && - ctx_type < GUC_CONTEXT_COUNT)); + xe_gt_assert(gt, IS_SRIOV_VF(xe)); + xe_gt_assert(gt, !IS_DGFX(xe)); + xe_gt_assert(gt, ctx_type == GUC_CONTEXT_COMPRESSION_SAVE || + ctx_type == GUC_CONTEXT_COMPRESSION_RESTORE); + xe_gt_assert(gt, GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 23, 0)); register_exec_queue(q, ctx_type); enable_scheduling(q); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h index 6b5df5d0956b..78c3f07e31a0 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.h +++ b/drivers/gpu/drm/xe/xe_guc_submit.h @@ -13,6 +13,8 @@ struct xe_exec_queue; struct xe_guc; int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids); +int xe_guc_submit_enable(struct xe_guc *guc); +void xe_guc_submit_disable(struct xe_guc *guc); int xe_guc_submit_reset_prepare(struct xe_guc *guc); void xe_guc_submit_reset_wait(struct xe_guc *guc); @@ -46,7 +48,7 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot); void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p); -void xe_guc_register_exec_queue(struct xe_exec_queue *q, int ctx_type); +void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type); int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch); diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c deleted file mode 100644 index 57b71956ddf4..000000000000 --- a/drivers/gpu/drm/xe/xe_hmm.c +++ /dev/null @@ -1,325 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2024 Intel Corporation - */ - -#include <linux/scatterlist.h> -#include <linux/mmu_notifier.h> -#include <linux/dma-mapping.h> -#include <linux/memremap.h> -#include <linux/swap.h> -#include <linux/hmm.h> -#include <linux/mm.h> -#include "xe_hmm.h" -#include "xe_vm.h" -#include "xe_bo.h" - -static u64 xe_npages_in_range(unsigned long start, unsigned long end) -{ - return (end - start) >> PAGE_SHIFT; -} - -static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st, - struct hmm_range *range, struct rw_semaphore *notifier_sem) -{ - unsigned long i, npages, hmm_pfn; - unsigned long num_chunks = 0; - int ret; - - /* HMM docs says this is needed. */ - ret = down_read_interruptible(notifier_sem); - if (ret) - return ret; - - if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) { - up_read(notifier_sem); - return -EAGAIN; - } - - npages = xe_npages_in_range(range->start, range->end); - for (i = 0; i < npages;) { - unsigned long len; - - hmm_pfn = range->hmm_pfns[i]; - xe_assert(xe, hmm_pfn & HMM_PFN_VALID); - - len = 1UL << hmm_pfn_to_map_order(hmm_pfn); - - /* If order > 0 the page may extend beyond range->start */ - len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1); - i += len; - num_chunks++; - } - up_read(notifier_sem); - - return sg_alloc_table(st, num_chunks, GFP_KERNEL); -} - -/** - * xe_build_sg() - build a scatter gather table for all the physical pages/pfn - * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table - * and will be used to program GPU page table later. - * @xe: the xe device who will access the dma-address in sg table - * @range: the hmm range that we build the sg table from. range->hmm_pfns[] - * has the pfn numbers of pages that back up this hmm address range. - * @st: pointer to the sg table. - * @notifier_sem: The xe notifier lock. - * @write: whether we write to this range. This decides dma map direction - * for system pages. If write we map it bi-diretional; otherwise - * DMA_TO_DEVICE - * - * All the contiguous pfns will be collapsed into one entry in - * the scatter gather table. This is for the purpose of efficiently - * programming GPU page table. - * - * The dma_address in the sg table will later be used by GPU to - * access memory. So if the memory is system memory, we need to - * do a dma-mapping so it can be accessed by GPU/DMA. - * - * FIXME: This function currently only support pages in system - * memory. If the memory is GPU local memory (of the GPU who - * is going to access memory), we need gpu dpa (device physical - * address), and there is no need of dma-mapping. This is TBD. - * - * FIXME: dma-mapping for peer gpu device to access remote gpu's - * memory. Add this when you support p2p - * - * This function allocates the storage of the sg table. It is - * caller's responsibility to free it calling sg_free_table. - * - * Returns 0 if successful; -ENOMEM if fails to allocate memory - */ -static int xe_build_sg(struct xe_device *xe, struct hmm_range *range, - struct sg_table *st, - struct rw_semaphore *notifier_sem, - bool write) -{ - unsigned long npages = xe_npages_in_range(range->start, range->end); - struct device *dev = xe->drm.dev; - struct scatterlist *sgl; - struct page *page; - unsigned long i, j; - - lockdep_assert_held(notifier_sem); - - i = 0; - for_each_sg(st->sgl, sgl, st->nents, j) { - unsigned long hmm_pfn, size; - - hmm_pfn = range->hmm_pfns[i]; - page = hmm_pfn_to_page(hmm_pfn); - xe_assert(xe, !is_device_private_page(page)); - - size = 1UL << hmm_pfn_to_map_order(hmm_pfn); - size -= page_to_pfn(page) & (size - 1); - i += size; - - if (unlikely(j == st->nents - 1)) { - xe_assert(xe, i >= npages); - if (i > npages) - size -= (i - npages); - - sg_mark_end(sgl); - } else { - xe_assert(xe, i < npages); - } - - sg_set_page(sgl, page, size << PAGE_SHIFT, 0); - } - - return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); -} - -static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma) -{ - struct xe_userptr *userptr = &uvma->userptr; - struct xe_vm *vm = xe_vma_vm(&uvma->vma); - - lockdep_assert_held_write(&vm->lock); - lockdep_assert_held(&vm->userptr.notifier_lock); - - mutex_lock(&userptr->unmap_mutex); - xe_assert(vm->xe, !userptr->mapped); - userptr->mapped = true; - mutex_unlock(&userptr->unmap_mutex); -} - -void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma) -{ - struct xe_userptr *userptr = &uvma->userptr; - struct xe_vma *vma = &uvma->vma; - bool write = !xe_vma_read_only(vma); - struct xe_vm *vm = xe_vma_vm(vma); - struct xe_device *xe = vm->xe; - - if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) && - !lockdep_is_held_type(&vm->lock, 0) && - !(vma->gpuva.flags & XE_VMA_DESTROYED)) { - /* Don't unmap in exec critical section. */ - xe_vm_assert_held(vm); - /* Don't unmap while mapping the sg. */ - lockdep_assert_held(&vm->lock); - } - - mutex_lock(&userptr->unmap_mutex); - if (userptr->sg && userptr->mapped) - dma_unmap_sgtable(xe->drm.dev, userptr->sg, - write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); - userptr->mapped = false; - mutex_unlock(&userptr->unmap_mutex); -} - -/** - * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr - * @uvma: the userptr vma which hold the scatter gather table - * - * With function xe_userptr_populate_range, we allocate storage of - * the userptr sg table. This is a helper function to free this - * sg table, and dma unmap the address in the table. - */ -void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma) -{ - struct xe_userptr *userptr = &uvma->userptr; - - xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg); - xe_hmm_userptr_unmap(uvma); - sg_free_table(userptr->sg); - userptr->sg = NULL; -} - -/** - * xe_hmm_userptr_populate_range() - Populate physical pages of a virtual - * address range - * - * @uvma: userptr vma which has information of the range to populate. - * @is_mm_mmap_locked: True if mmap_read_lock is already acquired by caller. - * - * This function populate the physical pages of a virtual - * address range. The populated physical pages is saved in - * userptr's sg table. It is similar to get_user_pages but call - * hmm_range_fault. - * - * This function also read mmu notifier sequence # ( - * mmu_interval_read_begin), for the purpose of later - * comparison (through mmu_interval_read_retry). - * - * This must be called with mmap read or write lock held. - * - * This function allocates the storage of the userptr sg table. - * It is caller's responsibility to free it calling sg_free_table. - * - * returns: 0 for success; negative error no on failure - */ -int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, - bool is_mm_mmap_locked) -{ - unsigned long timeout = - jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); - unsigned long *pfns; - struct xe_userptr *userptr; - struct xe_vma *vma = &uvma->vma; - u64 userptr_start = xe_vma_userptr(vma); - u64 userptr_end = userptr_start + xe_vma_size(vma); - struct xe_vm *vm = xe_vma_vm(vma); - struct hmm_range hmm_range = { - .pfn_flags_mask = 0, /* ignore pfns */ - .default_flags = HMM_PFN_REQ_FAULT, - .start = userptr_start, - .end = userptr_end, - .notifier = &uvma->userptr.notifier, - .dev_private_owner = vm->xe, - }; - bool write = !xe_vma_read_only(vma); - unsigned long notifier_seq; - u64 npages; - int ret; - - userptr = &uvma->userptr; - - if (is_mm_mmap_locked) - mmap_assert_locked(userptr->notifier.mm); - - if (vma->gpuva.flags & XE_VMA_DESTROYED) - return 0; - - notifier_seq = mmu_interval_read_begin(&userptr->notifier); - if (notifier_seq == userptr->notifier_seq) - return 0; - - if (userptr->sg) - xe_hmm_userptr_free_sg(uvma); - - npages = xe_npages_in_range(userptr_start, userptr_end); - pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); - if (unlikely(!pfns)) - return -ENOMEM; - - if (write) - hmm_range.default_flags |= HMM_PFN_REQ_WRITE; - - if (!mmget_not_zero(userptr->notifier.mm)) { - ret = -EFAULT; - goto free_pfns; - } - - hmm_range.hmm_pfns = pfns; - - while (true) { - hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier); - - if (!is_mm_mmap_locked) - mmap_read_lock(userptr->notifier.mm); - - ret = hmm_range_fault(&hmm_range); - - if (!is_mm_mmap_locked) - mmap_read_unlock(userptr->notifier.mm); - - if (ret == -EBUSY) { - if (time_after(jiffies, timeout)) - break; - - continue; - } - break; - } - - mmput(userptr->notifier.mm); - - if (ret) - goto free_pfns; - - ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock); - if (ret) - goto free_pfns; - - ret = down_read_interruptible(&vm->userptr.notifier_lock); - if (ret) - goto free_st; - - if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) { - ret = -EAGAIN; - goto out_unlock; - } - - ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, - &vm->userptr.notifier_lock, write); - if (ret) - goto out_unlock; - - userptr->sg = &userptr->sgt; - xe_hmm_userptr_set_mapped(uvma); - userptr->notifier_seq = hmm_range.notifier_seq; - up_read(&vm->userptr.notifier_lock); - kvfree(pfns); - return 0; - -out_unlock: - up_read(&vm->userptr.notifier_lock); -free_st: - sg_free_table(&userptr->sgt); -free_pfns: - kvfree(pfns); - return ret; -} diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h deleted file mode 100644 index 0ea98d8e7bbc..000000000000 --- a/drivers/gpu/drm/xe/xe_hmm.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: MIT - * - * Copyright © 2024 Intel Corporation - */ - -#ifndef _XE_HMM_H_ -#define _XE_HMM_H_ - -#include <linux/types.h> - -struct xe_userptr_vma; - -int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked); - -void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma); - -void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma); -#endif diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index 32a76ae6e9dc..b6790589e623 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -286,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg */ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value) { - u64 reg_val = 0, min, max; + u32 reg_val = 0; struct xe_device *xe = hwmon->xe; struct xe_reg rapl_limit, pkg_power_sku; struct xe_mmio *mmio = xe_root_tile_mmio(xe); @@ -294,7 +294,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe mutex_lock(&hwmon->hwmon_lock); if (hwmon->xe->info.has_mbx_power_limits) { - xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)®_val); + xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, ®_val); } else { rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); @@ -304,19 +304,21 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe /* Check if PL limits are disabled. */ if (!(reg_val & PWR_LIM_EN)) { *value = PL_DISABLE; - drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n", + drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n", PWR_ATTR_TO_STR(attr), channel, reg_val); goto unlock; } reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val); - *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power); + *value = mul_u32_u32(reg_val, SF_POWER) >> hwmon->scl_shift_power; /* For platforms with mailbox power limit support clamping would be done by pcode. */ if (!hwmon->xe->info.has_mbx_power_limits) { - reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku); - min = REG_FIELD_GET(PKG_MIN_PWR, reg_val); - max = REG_FIELD_GET(PKG_MAX_PWR, reg_val); + u64 pkg_pwr, min, max; + + pkg_pwr = xe_mmio_read64_2x32(mmio, pkg_power_sku); + min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr); + max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr); min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power); max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power); if (min && max) @@ -493,8 +495,8 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at { struct xe_hwmon *hwmon = dev_get_drvdata(dev); struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); - u32 x, y, x_w = 2; /* 2 bits */ - u64 r, tau4, out; + u32 reg_val, x, y, x_w = 2; /* 2 bits */ + u64 tau4, out; int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD; u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; @@ -505,23 +507,24 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at mutex_lock(&hwmon->hwmon_lock); if (hwmon->xe->info.has_mbx_power_limits) { - ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r); + ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, ®_val); if (ret) { drm_err(&hwmon->xe->drm, - "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n", - channel, power_attr, r, ret); - r = 0; + "power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n", + channel, power_attr, reg_val, ret); + reg_val = 0; } } else { - r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel)); + reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, + channel)); } mutex_unlock(&hwmon->hwmon_lock); xe_pm_runtime_put(hwmon->xe); - x = REG_FIELD_GET(PWR_LIM_TIME_X, r); - y = REG_FIELD_GET(PWR_LIM_TIME_Y, r); + x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val); + y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val); /* * tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17) @@ -1294,13 +1297,6 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon) xe_hwmon_fan_input_read(hwmon, channel, &fan_speed); } -static void xe_hwmon_mutex_destroy(void *arg) -{ - struct xe_hwmon *hwmon = arg; - - mutex_destroy(&hwmon->hwmon_lock); -} - int xe_hwmon_register(struct xe_device *xe) { struct device *dev = xe->drm.dev; @@ -1319,8 +1315,7 @@ int xe_hwmon_register(struct xe_device *xe) if (!hwmon) return -ENOMEM; - mutex_init(&hwmon->hwmon_lock); - ret = devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon); + ret = devm_mutex_init(dev, &hwmon->hwmon_lock); if (ret) return ret; diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c index 044dda517b7c..48dfcb41fa08 100644 --- a/drivers/gpu/drm/xe/xe_i2c.c +++ b/drivers/gpu/drm/xe/xe_i2c.c @@ -259,7 +259,7 @@ void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) return; if (d3cold) - xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY); + xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D0); drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR)); diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.c b/drivers/gpu/drm/xe/xe_late_bind_fw.c new file mode 100644 index 000000000000..38f3feb2aecd --- /dev/null +++ b/drivers/gpu/drm/xe/xe_late_bind_fw.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/component.h> +#include <linux/delay.h> +#include <linux/firmware.h> + +#include <drm/drm_managed.h> +#include <drm/intel/i915_component.h> +#include <drm/intel/intel_lb_mei_interface.h> +#include <drm/drm_print.h> + +#include "xe_device.h" +#include "xe_late_bind_fw.h" +#include "xe_pcode.h" +#include "xe_pcode_api.h" +#include "xe_pm.h" + +/* + * The component should load quite quickly in most cases, but it could take + * a bit. Using a very big timeout just to cover the worst case scenario + */ +#define LB_INIT_TIMEOUT_MS 20000 + +/* + * Retry interval set to 6 seconds, in steps of 200 ms, to allow time for + * other OS components to release the MEI CL handle + */ +#define LB_FW_LOAD_RETRY_MAXCOUNT 30 +#define LB_FW_LOAD_RETRY_PAUSE_MS 200 + +static const u32 fw_id_to_type[] = { + [XE_LB_FW_FAN_CONTROL] = INTEL_LB_TYPE_FAN_CONTROL, + }; + +static const char * const fw_id_to_name[] = { + [XE_LB_FW_FAN_CONTROL] = "fan_control", + }; + +static struct xe_device * +late_bind_to_xe(struct xe_late_bind *late_bind) +{ + return container_of(late_bind, struct xe_device, late_bind); +} + +static struct xe_device * +late_bind_fw_to_xe(struct xe_late_bind_fw *lb_fw) +{ + return container_of(lb_fw, struct xe_device, late_bind.late_bind_fw[lb_fw->id]); +} + +/* Refer to the "Late Bind based Firmware Layout" documentation entry for details */ +static int parse_cpd_header(struct xe_late_bind_fw *lb_fw, + const void *data, size_t size, const char *manifest_entry) +{ + struct xe_device *xe = late_bind_fw_to_xe(lb_fw); + const struct gsc_cpd_header_v2 *header = data; + const struct gsc_manifest_header *manifest; + const struct gsc_cpd_entry *entry; + size_t min_size = sizeof(*header); + u32 offset; + int i; + + /* manifest_entry is mandatory */ + xe_assert(xe, manifest_entry); + + if (size < min_size || header->header_marker != GSC_CPD_HEADER_MARKER) + return -ENOENT; + + if (header->header_length < sizeof(struct gsc_cpd_header_v2)) { + drm_err(&xe->drm, "%s late binding fw: Invalid CPD header length %u!\n", + fw_id_to_name[lb_fw->id], header->header_length); + return -EINVAL; + } + + min_size = header->header_length + sizeof(struct gsc_cpd_entry) * header->num_of_entries; + if (size < min_size) { + drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", + fw_id_to_name[lb_fw->id], size, min_size); + return -ENODATA; + } + + /* Look for the manifest first */ + entry = (void *)header + header->header_length; + for (i = 0; i < header->num_of_entries; i++, entry++) + if (strcmp(entry->name, manifest_entry) == 0) + offset = entry->offset & GSC_CPD_ENTRY_OFFSET_MASK; + + if (!offset) { + drm_err(&xe->drm, "%s late binding fw: Failed to find manifest_entry\n", + fw_id_to_name[lb_fw->id]); + return -ENODATA; + } + + min_size = offset + sizeof(struct gsc_manifest_header); + if (size < min_size) { + drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", + fw_id_to_name[lb_fw->id], size, min_size); + return -ENODATA; + } + + manifest = data + offset; + + lb_fw->version = manifest->fw_version; + + return 0; +} + +/* Refer to the "Late Bind based Firmware Layout" documentation entry for details */ +static int parse_lb_layout(struct xe_late_bind_fw *lb_fw, + const void *data, size_t size, const char *fpt_entry) +{ + struct xe_device *xe = late_bind_fw_to_xe(lb_fw); + const struct csc_fpt_header *header = data; + const struct csc_fpt_entry *entry; + size_t min_size = sizeof(*header); + u32 offset; + int i; + + /* fpt_entry is mandatory */ + xe_assert(xe, fpt_entry); + + if (size < min_size || header->header_marker != CSC_FPT_HEADER_MARKER) + return -ENOENT; + + if (header->header_length < sizeof(struct csc_fpt_header)) { + drm_err(&xe->drm, "%s late binding fw: Invalid FPT header length %u!\n", + fw_id_to_name[lb_fw->id], header->header_length); + return -EINVAL; + } + + min_size = header->header_length + sizeof(struct csc_fpt_entry) * header->num_of_entries; + if (size < min_size) { + drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", + fw_id_to_name[lb_fw->id], size, min_size); + return -ENODATA; + } + + /* Look for the cpd header first */ + entry = (void *)header + header->header_length; + for (i = 0; i < header->num_of_entries; i++, entry++) + if (strcmp(entry->name, fpt_entry) == 0) + offset = entry->offset; + + if (!offset) { + drm_err(&xe->drm, "%s late binding fw: Failed to find fpt_entry\n", + fw_id_to_name[lb_fw->id]); + return -ENODATA; + } + + min_size = offset + sizeof(struct gsc_cpd_header_v2); + if (size < min_size) { + drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n", + fw_id_to_name[lb_fw->id], size, min_size); + return -ENODATA; + } + + return parse_cpd_header(lb_fw, data + offset, size - offset, "LTES.man"); +} + +static const char *xe_late_bind_parse_status(uint32_t status) +{ + switch (status) { + case INTEL_LB_STATUS_SUCCESS: + return "success"; + case INTEL_LB_STATUS_4ID_MISMATCH: + return "4Id Mismatch"; + case INTEL_LB_STATUS_ARB_FAILURE: + return "ARB Failure"; + case INTEL_LB_STATUS_GENERAL_ERROR: + return "General Error"; + case INTEL_LB_STATUS_INVALID_PARAMS: + return "Invalid Params"; + case INTEL_LB_STATUS_INVALID_SIGNATURE: + return "Invalid Signature"; + case INTEL_LB_STATUS_INVALID_PAYLOAD: + return "Invalid Payload"; + case INTEL_LB_STATUS_TIMEOUT: + return "Timeout"; + default: + return "Unknown error"; + } +} + +static int xe_late_bind_fw_num_fans(struct xe_late_bind *late_bind) +{ + struct xe_device *xe = late_bind_to_xe(late_bind); + struct xe_tile *root_tile = xe_device_get_root_tile(xe); + u32 uval; + + if (!xe_pcode_read(root_tile, + PCODE_MBOX(FAN_SPEED_CONTROL, FSC_READ_NUM_FANS, 0), &uval, NULL)) + return uval; + else + return 0; +} + +void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind) +{ + struct xe_device *xe = late_bind_to_xe(late_bind); + struct xe_late_bind_fw *lbfw; + int fw_id; + + for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) { + lbfw = &late_bind->late_bind_fw[fw_id]; + if (lbfw->payload && late_bind->wq) { + drm_dbg(&xe->drm, "Flush work: load %s firmware\n", + fw_id_to_name[lbfw->id]); + flush_work(&lbfw->work); + } + } +} + +static void xe_late_bind_work(struct work_struct *work) +{ + struct xe_late_bind_fw *lbfw = container_of(work, struct xe_late_bind_fw, work); + struct xe_late_bind *late_bind = container_of(lbfw, struct xe_late_bind, + late_bind_fw[lbfw->id]); + struct xe_device *xe = late_bind_to_xe(late_bind); + int retry = LB_FW_LOAD_RETRY_MAXCOUNT; + int ret; + int slept; + + xe_device_assert_mem_access(xe); + + /* we can queue this before the component is bound */ + for (slept = 0; slept < LB_INIT_TIMEOUT_MS; slept += 100) { + if (late_bind->component.ops) + break; + msleep(100); + } + + if (!late_bind->component.ops) { + drm_err(&xe->drm, "Late bind component not bound\n"); + /* Do not re-attempt fw load */ + drmm_kfree(&xe->drm, (void *)lbfw->payload); + lbfw->payload = NULL; + goto out; + } + + drm_dbg(&xe->drm, "Load %s firmware\n", fw_id_to_name[lbfw->id]); + + do { + ret = late_bind->component.ops->push_payload(late_bind->component.mei_dev, + lbfw->type, + lbfw->flags, + lbfw->payload, + lbfw->payload_size); + if (!ret) + break; + msleep(LB_FW_LOAD_RETRY_PAUSE_MS); + } while (--retry && ret == -EBUSY); + + if (!ret) { + drm_dbg(&xe->drm, "Load %s firmware successful\n", + fw_id_to_name[lbfw->id]); + goto out; + } + + if (ret > 0) + drm_err(&xe->drm, "Load %s firmware failed with err %d, %s\n", + fw_id_to_name[lbfw->id], ret, xe_late_bind_parse_status(ret)); + else + drm_err(&xe->drm, "Load %s firmware failed with err %d", + fw_id_to_name[lbfw->id], ret); + /* Do not re-attempt fw load */ + drmm_kfree(&xe->drm, (void *)lbfw->payload); + lbfw->payload = NULL; + +out: + xe_pm_runtime_put(xe); +} + +int xe_late_bind_fw_load(struct xe_late_bind *late_bind) +{ + struct xe_device *xe = late_bind_to_xe(late_bind); + struct xe_late_bind_fw *lbfw; + int fw_id; + + if (!late_bind->component_added) + return -ENODEV; + + if (late_bind->disable) + return 0; + + for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) { + lbfw = &late_bind->late_bind_fw[fw_id]; + if (lbfw->payload) { + xe_pm_runtime_get_noresume(xe); + queue_work(late_bind->wq, &lbfw->work); + } + } + return 0; +} + +static int __xe_late_bind_fw_init(struct xe_late_bind *late_bind, u32 fw_id) +{ + struct xe_device *xe = late_bind_to_xe(late_bind); + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + struct xe_late_bind_fw *lb_fw; + const struct firmware *fw; + u32 num_fans; + int ret; + + if (fw_id >= XE_LB_FW_MAX_ID) + return -EINVAL; + + lb_fw = &late_bind->late_bind_fw[fw_id]; + + lb_fw->id = fw_id; + lb_fw->type = fw_id_to_type[lb_fw->id]; + lb_fw->flags &= ~INTEL_LB_FLAG_IS_PERSISTENT; + + if (lb_fw->type == INTEL_LB_TYPE_FAN_CONTROL) { + num_fans = xe_late_bind_fw_num_fans(late_bind); + drm_dbg(&xe->drm, "Number of Fans: %d\n", num_fans); + if (!num_fans) + return 0; + } + + snprintf(lb_fw->blob_path, sizeof(lb_fw->blob_path), "xe/%s_8086_%04x_%04x_%04x.bin", + fw_id_to_name[lb_fw->id], pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); + + drm_dbg(&xe->drm, "Request late binding firmware %s\n", lb_fw->blob_path); + ret = firmware_request_nowarn(&fw, lb_fw->blob_path, xe->drm.dev); + if (ret) { + drm_dbg(&xe->drm, "%s late binding fw not available for current device", + fw_id_to_name[lb_fw->id]); + return 0; + } + + if (fw->size > XE_LB_MAX_PAYLOAD_SIZE) { + drm_err(&xe->drm, "Firmware %s size %zu is larger than max pay load size %u\n", + lb_fw->blob_path, fw->size, XE_LB_MAX_PAYLOAD_SIZE); + release_firmware(fw); + return -ENODATA; + } + + ret = parse_lb_layout(lb_fw, fw->data, fw->size, "LTES"); + if (ret) + return ret; + + lb_fw->payload_size = fw->size; + lb_fw->payload = drmm_kzalloc(&xe->drm, lb_fw->payload_size, GFP_KERNEL); + if (!lb_fw->payload) { + release_firmware(fw); + return -ENOMEM; + } + + drm_info(&xe->drm, "Using %s firmware from %s version %u.%u.%u.%u\n", + fw_id_to_name[lb_fw->id], lb_fw->blob_path, + lb_fw->version.major, lb_fw->version.minor, + lb_fw->version.hotfix, lb_fw->version.build); + + memcpy((void *)lb_fw->payload, fw->data, lb_fw->payload_size); + release_firmware(fw); + INIT_WORK(&lb_fw->work, xe_late_bind_work); + + return 0; +} + +static int xe_late_bind_fw_init(struct xe_late_bind *late_bind) +{ + int ret; + int fw_id; + + late_bind->wq = alloc_ordered_workqueue("late-bind-ordered-wq", 0); + if (!late_bind->wq) + return -ENOMEM; + + for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) { + ret = __xe_late_bind_fw_init(late_bind, fw_id); + if (ret) + return ret; + } + + return 0; +} + +static int xe_late_bind_component_bind(struct device *xe_kdev, + struct device *mei_kdev, void *data) +{ + struct xe_device *xe = kdev_to_xe_device(xe_kdev); + struct xe_late_bind *late_bind = &xe->late_bind; + + late_bind->component.ops = data; + late_bind->component.mei_dev = mei_kdev; + + return 0; +} + +static void xe_late_bind_component_unbind(struct device *xe_kdev, + struct device *mei_kdev, void *data) +{ + struct xe_device *xe = kdev_to_xe_device(xe_kdev); + struct xe_late_bind *late_bind = &xe->late_bind; + + xe_late_bind_wait_for_worker_completion(late_bind); + + late_bind->component.ops = NULL; +} + +static const struct component_ops xe_late_bind_component_ops = { + .bind = xe_late_bind_component_bind, + .unbind = xe_late_bind_component_unbind, +}; + +static void xe_late_bind_remove(void *arg) +{ + struct xe_late_bind *late_bind = arg; + struct xe_device *xe = late_bind_to_xe(late_bind); + + xe_late_bind_wait_for_worker_completion(late_bind); + + late_bind->component_added = false; + + component_del(xe->drm.dev, &xe_late_bind_component_ops); + if (late_bind->wq) { + destroy_workqueue(late_bind->wq); + late_bind->wq = NULL; + } +} + +/** + * xe_late_bind_init() - add xe mei late binding component + * @late_bind: pointer to late bind structure. + * + * Return: 0 if the initialization was successful, a negative errno otherwise. + */ +int xe_late_bind_init(struct xe_late_bind *late_bind) +{ + struct xe_device *xe = late_bind_to_xe(late_bind); + int err; + + if (!xe->info.has_late_bind) + return 0; + + if (!IS_ENABLED(CONFIG_INTEL_MEI_LB) || !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) { + drm_info(&xe->drm, "Can't init xe mei late bind missing mei component\n"); + return 0; + } + + err = component_add_typed(xe->drm.dev, &xe_late_bind_component_ops, + INTEL_COMPONENT_LB); + if (err < 0) { + drm_err(&xe->drm, "Failed to add mei late bind component (%pe)\n", ERR_PTR(err)); + return err; + } + + late_bind->component_added = true; + + err = devm_add_action_or_reset(xe->drm.dev, xe_late_bind_remove, late_bind); + if (err) + return err; + + err = xe_late_bind_fw_init(late_bind); + if (err) + return err; + + return xe_late_bind_fw_load(late_bind); +} diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.h b/drivers/gpu/drm/xe/xe_late_bind_fw.h new file mode 100644 index 000000000000..07e437390539 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_late_bind_fw.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_LATE_BIND_FW_H_ +#define _XE_LATE_BIND_FW_H_ + +#include <linux/types.h> + +struct xe_late_bind; + +int xe_late_bind_init(struct xe_late_bind *late_bind); +int xe_late_bind_fw_load(struct xe_late_bind *late_bind); +void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind); + +#endif diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw_types.h b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h new file mode 100644 index 000000000000..0f5da89ce98b --- /dev/null +++ b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_LATE_BIND_TYPES_H_ +#define _XE_LATE_BIND_TYPES_H_ + +#include <linux/iosys-map.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/workqueue.h> +#include "xe_uc_fw_abi.h" + +#define XE_LB_MAX_PAYLOAD_SIZE SZ_4K + +/** + * xe_late_bind_fw_id - enum to determine late binding fw index + */ +enum xe_late_bind_fw_id { + XE_LB_FW_FAN_CONTROL = 0, + XE_LB_FW_MAX_ID +}; + +/** + * struct xe_late_bind_fw + */ +struct xe_late_bind_fw { + /** @id: firmware index */ + u32 id; + /** @blob_path: firmware binary path */ + char blob_path[PATH_MAX]; + /** @type: firmware type */ + u32 type; + /** @flags: firmware flags */ + u32 flags; + /** @payload: to store the late binding blob */ + const u8 *payload; + /** @payload_size: late binding blob payload_size */ + size_t payload_size; + /** @work: worker to upload latebind blob */ + struct work_struct work; + /** @version: late binding blob manifest version */ + struct gsc_version version; +}; + +/** + * struct xe_late_bind_component - Late Binding services component + * @mei_dev: device that provide Late Binding service. + * @ops: Ops implemented by Late Binding driver, used by Xe driver. + * + * Communication between Xe and MEI drivers for Late Binding services + */ +struct xe_late_bind_component { + struct device *mei_dev; + const struct intel_lb_component_ops *ops; +}; + +/** + * struct xe_late_bind + */ +struct xe_late_bind { + /** @component: struct for communication with mei component */ + struct xe_late_bind_component component; + /** @late_bind_fw: late binding firmware array */ + struct xe_late_bind_fw late_bind_fw[XE_LB_FW_MAX_ID]; + /** @wq: workqueue to submit request to download late bind blob */ + struct workqueue_struct *wq; + /** @component_added: whether the component has been added */ + bool component_added; + /** @disable: to block late binding reload during pm resume flow*/ + bool disable; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index f2bfbfa3efa1..62fc5a1a332d 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -67,12 +67,12 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level goto out; } - bo = xe_bo_create_pin_map(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt), NULL, - PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) * - lmtt->ops->lmtt_pte_num(level)), - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) | - XE_BO_FLAG_NEEDS_64K); + bo = xe_bo_create_pin_map_novm(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt), + PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) * + lmtt->ops->lmtt_pte_num(level)), + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) | + XE_BO_FLAG_NEEDS_64K, false); if (IS_ERR(bo)) { err = PTR_ERR(bo); goto out_free_pt; diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 8f6c3ba47882..47e9df775072 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -8,6 +8,7 @@ #include <generated/xe_wa_oob.h> #include <linux/ascii85.h> +#include <linux/panic.h> #include "instructions/xe_mi_commands.h" #include "instructions/xe_gfxpipe_commands.h" @@ -16,6 +17,7 @@ #include "regs/xe_lrc_layout.h" #include "xe_bb.h" #include "xe_bo.h" +#include "xe_configfs.h" #include "xe_device.h" #include "xe_drm_client.h" #include "xe_exec_queue_types.h" @@ -75,11 +77,17 @@ lrc_to_xe(struct xe_lrc *lrc) static bool gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class) { + struct xe_device *xe = gt_to_xe(gt); + if (XE_GT_WA(gt, 16010904313) && (class == XE_ENGINE_CLASS_RENDER || class == XE_ENGINE_CLASS_COMPUTE)) return true; + if (xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev), + class, NULL)) + return true; + return false; } @@ -1102,6 +1110,64 @@ static ssize_t setup_timestamp_wa(struct xe_lrc *lrc, struct xe_hw_engine *hwe, return cmd - batch; } +static ssize_t setup_configfs_post_ctx_restore_bb(struct xe_lrc *lrc, + struct xe_hw_engine *hwe, + u32 *batch, size_t max_len) +{ + struct xe_device *xe = gt_to_xe(lrc->gt); + const u32 *user_batch; + u32 *cmd = batch; + u32 count; + + count = xe_configfs_get_ctx_restore_post_bb(to_pci_dev(xe->drm.dev), + hwe->class, &user_batch); + if (!count) + return 0; + + if (count > max_len) + return -ENOSPC; + + /* + * This should be used only for tests and validation. Taint the kernel + * as anything could be submitted directly in context switches + */ + add_taint(TAINT_TEST, LOCKDEP_STILL_OK); + + memcpy(cmd, user_batch, count * sizeof(u32)); + cmd += count; + + return cmd - batch; +} + +static ssize_t setup_configfs_mid_ctx_restore_bb(struct xe_lrc *lrc, + struct xe_hw_engine *hwe, + u32 *batch, size_t max_len) +{ + struct xe_device *xe = gt_to_xe(lrc->gt); + const u32 *user_batch; + u32 *cmd = batch; + u32 count; + + count = xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev), + hwe->class, &user_batch); + if (!count) + return 0; + + if (count > max_len) + return -ENOSPC; + + /* + * This should be used only for tests and validation. Taint the kernel + * as anything could be submitted directly in context switches + */ + add_taint(TAINT_TEST, LOCKDEP_STILL_OK); + + memcpy(cmd, user_batch, count * sizeof(u32)); + cmd += count; + + return cmd - batch; +} + static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc, struct xe_hw_engine *hwe, u32 *batch, size_t max_len) @@ -1203,6 +1269,7 @@ int xe_lrc_setup_wa_bb_with_scratch(struct xe_lrc *lrc, struct xe_hw_engine *hwe { .setup = setup_timestamp_wa }, { .setup = setup_invalidate_state_cache_wa }, { .setup = setup_utilization_wa }, + { .setup = setup_configfs_post_ctx_restore_bb }, }; struct bo_setup_state state = { .lrc = lrc, @@ -1249,8 +1316,12 @@ static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe) static int setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe) { - static struct bo_setup rcs_funcs[] = { + static const struct bo_setup rcs_funcs[] = { { .setup = setup_timestamp_wa }, + { .setup = setup_configfs_mid_ctx_restore_bb }, + }; + static const struct bo_setup xcs_funcs[] = { + { .setup = setup_configfs_mid_ctx_restore_bb }, }; struct bo_setup_state state = { .lrc = lrc, @@ -1268,6 +1339,9 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe) hwe->class == XE_ENGINE_CLASS_COMPUTE) { state.funcs = rcs_funcs; state.num_funcs = ARRAY_SIZE(rcs_funcs); + } else { + state.funcs = xcs_funcs; + state.num_funcs = ARRAY_SIZE(xcs_funcs); } if (xe_gt_WARN_ON(lrc->gt, !state.funcs)) @@ -1294,14 +1368,15 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe) finish_bo(&state); kfree(state.buffer); + /* + * Enable INDIRECT_CTX leaving INDIRECT_CTX_OFFSET at its default: it + * varies per engine class, but the default is good enough + */ xe_lrc_write_ctx_reg(lrc, CTX_CS_INDIRECT_CTX, (xe_bo_ggtt_addr(lrc->bo) + state.offset) | /* Size in CLs. */ (state.written * sizeof(u32) / 64)); - xe_lrc_write_ctx_reg(lrc, - CTX_CS_INDIRECT_CTX_OFFSET, - CTX_INDIRECT_CTX_OFFSET_DEFAULT); return 0; } @@ -1340,9 +1415,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, if (vm && vm->xef) /* userspace */ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE; - lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size, - ttm_bo_type_kernel, - bo_flags); + lrc->bo = xe_bo_create_pin_map_novm(xe, tile, + bo_size, + ttm_bo_type_kernel, + bo_flags, false); if (IS_ERR(lrc->bo)) return PTR_ERR(lrc->bo); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 9643442ef101..1d667fa36cf3 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -35,6 +35,7 @@ #include "xe_sched_job.h" #include "xe_sync.h" #include "xe_trace_bo.h" +#include "xe_validation.h" #include "xe_vm.h" #include "xe_vram.h" @@ -173,7 +174,7 @@ static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, } static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, - struct xe_vm *vm) + struct xe_vm *vm, struct drm_exec *exec) { struct xe_device *xe = tile_to_xe(tile); u16 pat_index = xe->pat.idx[XE_CACHE_WB]; @@ -200,7 +201,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, num_entries * XE_PAGE_SIZE, ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_PAGETABLE); + XE_BO_FLAG_PAGETABLE, exec); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -393,6 +394,24 @@ struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile) return m; } +static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm) +{ + struct xe_device *xe = tile_to_xe(tile); + struct xe_validation_ctx ctx; + struct drm_exec exec; + int err = 0; + + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) { + err = xe_vm_drm_exec_lock(vm, &exec); + drm_exec_retry_on_contention(&exec); + err = xe_migrate_prepare_vm(tile, m, vm, &exec); + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &err); + } + + return err; +} + /** * xe_migrate_init() - Initialize a migrate context * @m: The migration context @@ -413,11 +432,9 @@ int xe_migrate_init(struct xe_migrate *m) if (IS_ERR(vm)) return PTR_ERR(vm); - xe_vm_lock(vm, false); - err = xe_migrate_prepare_vm(tile, m, vm); - xe_vm_unlock(vm); + err = xe_migrate_lock_prepare_vm(tile, m, vm); if (err) - goto err_out; + return err; if (xe->info.has_usm) { struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt, @@ -842,11 +859,15 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0, &src_L0_ofs, &src_L0_pt, 0, 0, avail_pts); - - pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; - batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0, - &dst_L0_ofs, &dst_L0_pt, 0, - avail_pts, avail_pts); + if (copy_only_ccs) { + dst_L0_ofs = src_L0_ofs; + } else { + pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; + batch_size += pte_update_size(m, pte_flags, dst, + &dst_it, &src_L0, + &dst_L0_ofs, &dst_L0_pt, + 0, avail_pts, avail_pts); + } if (copy_system_ccs) { xe_assert(xe, type_device); @@ -876,7 +897,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it)) xe_res_next(&dst_it, src_L0); - else + else if (!copy_only_ccs) emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs, &dst_it, src_L0, dst); @@ -908,7 +929,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (!fence) { err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv, DMA_RESV_USAGE_BOOKKEEP); - if (!err && src_bo != dst_bo) + if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv) err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv, DMA_RESV_USAGE_BOOKKEEP); if (err) diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c index 9ca4a5ae1693..33f4ac82fc80 100644 --- a/drivers/gpu/drm/xe/xe_nvm.c +++ b/drivers/gpu/drm/xe/xe_nvm.c @@ -35,6 +35,10 @@ static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = { static void xe_nvm_release_dev(struct device *dev) { + struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev); + struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev); + + kfree(nvm); } static bool xe_nvm_non_posted_erase(struct xe_device *xe) @@ -162,6 +166,5 @@ void xe_nvm_fini(struct xe_device *xe) auxiliary_device_delete(&nvm->aux_dev); auxiliary_device_uninit(&nvm->aux_dev); - kfree(nvm); xe->nvm = NULL; } diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index a188bad172ad..a4894eb0d7f3 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -883,9 +883,9 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size) { struct xe_bo *bo; - bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL, - size, ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT); + bo = xe_bo_create_pin_map_novm(stream->oa->xe, stream->gt->tile, + size, ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, false); if (IS_ERR(bo)) return PTR_ERR(bo); diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 046d330bad34..be91343829dd 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -334,6 +334,7 @@ static const struct xe_device_desc bmg_desc = { .has_mbx_power_limits = true, .has_gsc_nvm = 1, .has_heci_cscfi = 1, + .has_late_bind = true, .has_sriov = true, .max_gt_per_tile = 2, .needs_scratch = true, @@ -510,6 +511,26 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, *revid = REG_FIELD_GET(GMD_ID_REVID, val); } +static const struct xe_ip *find_graphics_ip(unsigned int verx100) +{ + KUNIT_STATIC_STUB_REDIRECT(find_graphics_ip, verx100); + + for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) + if (graphics_ips[i].verx100 == verx100) + return &graphics_ips[i]; + return NULL; +} + +static const struct xe_ip *find_media_ip(unsigned int verx100) +{ + KUNIT_STATIC_STUB_REDIRECT(find_media_ip, verx100); + + for (int i = 0; i < ARRAY_SIZE(media_ips); i++) + if (media_ips[i].verx100 == verx100) + return &media_ips[i]; + return NULL; +} + /* * Read IP version from hardware and select graphics/media IP descriptors * based on the result. @@ -527,14 +548,7 @@ static void handle_gmdid(struct xe_device *xe, read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); - for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) { - if (ver == graphics_ips[i].verx100) { - *graphics_ip = &graphics_ips[i]; - - break; - } - } - + *graphics_ip = find_graphics_ip(ver); if (!*graphics_ip) { drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", ver / 100, ver % 100); @@ -545,14 +559,7 @@ static void handle_gmdid(struct xe_device *xe, if (ver == 0) return; - for (int i = 0; i < ARRAY_SIZE(media_ips); i++) { - if (ver == media_ips[i].verx100) { - *media_ip = &media_ips[i]; - - break; - } - } - + *media_ip = find_media_ip(ver); if (!*media_ip) { drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", ver / 100, ver % 100); @@ -581,6 +588,7 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.has_gsc_nvm = desc->has_gsc_nvm; xe->info.has_heci_gscfi = desc->has_heci_gscfi; xe->info.has_heci_cscfi = desc->has_heci_cscfi; + xe->info.has_late_bind = desc->has_late_bind; xe->info.has_llc = desc->has_llc; xe->info.has_pxp = desc->has_pxp; xe->info.has_sriov = desc->has_sriov; diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h index b63002fc0f67..9b9766a3baa3 100644 --- a/drivers/gpu/drm/xe/xe_pci_types.h +++ b/drivers/gpu/drm/xe/xe_pci_types.h @@ -39,6 +39,7 @@ struct xe_device_desc { u8 has_gsc_nvm:1; u8 has_heci_gscfi:1; u8 has_heci_cscfi:1; + u8 has_late_bind:1; u8 has_llc:1; u8 has_mbx_power_limits:1; u8 has_pxp:1; diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 6eea4190bbd2..d6625c71115b 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -21,6 +21,7 @@ #include "xe_gt_idle.h" #include "xe_i2c.h" #include "xe_irq.h" +#include "xe_late_bind_fw.h" #include "xe_pcode.h" #include "xe_pxp.h" #include "xe_sriov_vf_ccs.h" @@ -129,6 +130,8 @@ int xe_pm_suspend(struct xe_device *xe) if (err) goto err; + xe_late_bind_wait_for_worker_completion(&xe->late_bind); + for_each_gt(gt, xe, id) xe_gt_suspend_prepare(gt); @@ -213,9 +216,11 @@ int xe_pm_resume(struct xe_device *xe) xe_pxp_pm_resume(xe->pxp); - if (IS_SRIOV_VF(xe)) + if (IS_VF_CCS_READY(xe)) xe_sriov_vf_ccs_register_context(xe); + xe_late_bind_fw_load(&xe->late_bind); + drm_dbg(&xe->drm, "Device resumed\n"); return 0; err: @@ -598,9 +603,12 @@ int xe_pm_runtime_resume(struct xe_device *xe) xe_pxp_pm_resume(xe->pxp); - if (IS_SRIOV_VF(xe)) + if (IS_VF_CCS_READY(xe)) xe_sriov_vf_ccs_register_context(xe); + if (xe->d3cold.allowed) + xe_late_bind_fw_load(&xe->late_bind); + out: xe_rpm_lockmap_release(xe); xe_pm_write_callback_task(xe, NULL); diff --git a/drivers/gpu/drm/xe/xe_printk.h b/drivers/gpu/drm/xe/xe_printk.h new file mode 100644 index 000000000000..c5be2385aa95 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_printk.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_PRINTK_H_ +#define _XE_PRINTK_H_ + +#include <drm/drm_print.h> + +#include "xe_device_types.h" + +#define __XE_PRINTK_FMT(_xe, _fmt, _args...) _fmt, ##_args + +#define xe_printk(_xe, _level, _fmt, ...) \ + drm_##_level(&(_xe)->drm, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__)) + +#define xe_err(_xe, _fmt, ...) \ + xe_printk((_xe), err, _fmt, ##__VA_ARGS__) + +#define xe_err_once(_xe, _fmt, ...) \ + xe_printk((_xe), err_once, _fmt, ##__VA_ARGS__) + +#define xe_err_ratelimited(_xe, _fmt, ...) \ + xe_printk((_xe), err_ratelimited, _fmt, ##__VA_ARGS__) + +#define xe_warn(_xe, _fmt, ...) \ + xe_printk((_xe), warn, _fmt, ##__VA_ARGS__) + +#define xe_notice(_xe, _fmt, ...) \ + xe_printk((_xe), notice, _fmt, ##__VA_ARGS__) + +#define xe_info(_xe, _fmt, ...) \ + xe_printk((_xe), info, _fmt, ##__VA_ARGS__) + +#define xe_dbg(_xe, _fmt, ...) \ + xe_printk((_xe), dbg, _fmt, ##__VA_ARGS__) + +#define xe_WARN_type(_xe, _type, _condition, _fmt, ...) \ + drm_WARN##_type(&(_xe)->drm, _condition, _fmt, ## __VA_ARGS__) + +#define xe_WARN(_xe, _condition, _fmt, ...) \ + xe_WARN_type((_xe),, _condition, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__)) + +#define xe_WARN_ONCE(_xe, _condition, _fmt, ...) \ + xe_WARN_type((_xe), _ONCE, _condition, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__)) + +#define xe_WARN_ON(_xe, _condition) \ + xe_WARN((_xe), _condition, "%s(%s)", "WARN_ON", __stringify(_condition)) + +#define xe_WARN_ON_ONCE(_xe, _condition) \ + xe_WARN_ONCE((_xe), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition)) + +static inline void __xe_printfn_err(struct drm_printer *p, struct va_format *vaf) +{ + struct xe_device *xe = p->arg; + + xe_err(xe, "%pV", vaf); +} + +static inline void __xe_printfn_info(struct drm_printer *p, struct va_format *vaf) +{ + struct xe_device *xe = p->arg; + + xe_info(xe, "%pV", vaf); +} + +static inline void __xe_printfn_dbg(struct drm_printer *p, struct va_format *vaf) +{ + struct xe_device *xe = p->arg; + struct drm_printer ddp; + + /* + * The original xe_dbg() callsite annotations are useless here, + * redirect to the tweaked drm_dbg_printer() instead. + */ + ddp = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL); + ddp.origin = p->origin; + + drm_printf(&ddp, __XE_PRINTK_FMT(xe, "%pV", vaf)); +} + +/** + * xe_err_printer - Construct a &drm_printer that outputs to xe_err() + * @xe: the &xe_device pointer to use in xe_err() + * + * Return: The &drm_printer object. + */ +static inline struct drm_printer xe_err_printer(struct xe_device *xe) +{ + struct drm_printer p = { + .printfn = __xe_printfn_err, + .arg = xe, + }; + return p; +} + +/** + * xe_info_printer - Construct a &drm_printer that outputs to xe_info() + * @xe: the &xe_device pointer to use in xe_info() + * + * Return: The &drm_printer object. + */ +static inline struct drm_printer xe_info_printer(struct xe_device *xe) +{ + struct drm_printer p = { + .printfn = __xe_printfn_info, + .arg = xe, + }; + return p; +} + +/** + * xe_dbg_printer - Construct a &drm_printer that outputs like xe_dbg() + * @xe: the &xe_device pointer to use in xe_dbg() + * + * Return: The &drm_printer object. + */ +static inline struct drm_printer xe_dbg_printer(struct xe_device *xe) +{ + struct drm_printer p = { + .printfn = __xe_printfn_dbg, + .arg = xe, + .origin = (const void *)_THIS_IP_, + }; + return p; +} + +#endif diff --git a/drivers/gpu/drm/xe/xe_psmi.c b/drivers/gpu/drm/xe/xe_psmi.c index a2c9ff5bfd59..45d142191d60 100644 --- a/drivers/gpu/drm/xe/xe_psmi.c +++ b/drivers/gpu/drm/xe/xe_psmi.c @@ -68,9 +68,7 @@ static void psmi_cleanup(struct xe_device *xe) static struct xe_bo *psmi_alloc_object(struct xe_device *xe, unsigned int id, size_t bo_size) { - struct xe_bo *bo = NULL; struct xe_tile *tile; - int err; if (!id || !bo_size) return NULL; @@ -78,22 +76,12 @@ static struct xe_bo *psmi_alloc_object(struct xe_device *xe, tile = &xe->tiles[id - 1]; /* VRAM: Allocate GEM object for the capture buffer */ - bo = xe_bo_create_locked(xe, tile, NULL, bo_size, - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_PINNED | - XE_BO_FLAG_PINNED_LATE_RESTORE | - XE_BO_FLAG_NEEDS_CPU_ACCESS); - - if (!IS_ERR(bo)) { - /* Buffer written by HW, ensure stays resident */ - err = xe_bo_pin(bo); - if (err) - bo = ERR_PTR(err); - xe_bo_unlock(bo); - } - - return bo; + return xe_bo_create_pin_range_novm(xe, tile, bo_size, 0, ~0ull, + ttm_bo_type_kernel, + XE_BO_FLAG_VRAM_IF_DGFX(tile) | + XE_BO_FLAG_PINNED | + XE_BO_FLAG_PINNED_LATE_RESTORE | + XE_BO_FLAG_NEEDS_CPU_ACCESS); } /* diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index c129048a9a09..a1c88f9a6c76 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -13,17 +13,17 @@ #include "xe_drm_client.h" #include "xe_exec_queue.h" #include "xe_gt.h" -#include "xe_tlb_inval_job.h" #include "xe_migrate.h" #include "xe_pt_types.h" #include "xe_pt_walk.h" #include "xe_res_cursor.h" #include "xe_sched_job.h" -#include "xe_sync.h" #include "xe_svm.h" +#include "xe_sync.h" #include "xe_tlb_inval_job.h" #include "xe_trace.h" #include "xe_ttm_stolen_mgr.h" +#include "xe_userptr.h" #include "xe_vm.h" struct xe_pt_dir { @@ -89,6 +89,7 @@ static void xe_pt_free(struct xe_pt *pt) * @vm: The vm to create for. * @tile: The tile to create for. * @level: The page-table level. + * @exec: The drm_exec object used to lock the vm. * * Allocate and initialize a single struct xe_pt metadata structure. Also * create the corresponding page-table bo, but don't initialize it. If the @@ -100,7 +101,7 @@ static void xe_pt_free(struct xe_pt *pt) * error. */ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, - unsigned int level) + unsigned int level, struct drm_exec *exec) { struct xe_pt *pt; struct xe_bo *bo; @@ -124,9 +125,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE; pt->level = level; + + drm_WARN_ON(&vm->xe->drm, IS_ERR_OR_NULL(exec)); bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, ttm_bo_type_kernel, - bo_flags); + bo_flags, exec); if (IS_ERR(bo)) { err = PTR_ERR(bo); goto err_kfree; @@ -590,7 +593,8 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, if (covers || !*child) { u64 flags = 0; - xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1); + xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1, + xe_vm_validation_exec(vm)); if (IS_ERR(xe_child)) return PTR_ERR(xe_child); @@ -729,7 +733,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, return -EAGAIN; } if (xe_svm_range_has_dma_mapping(range)) { - xe_res_first_dma(range->base.dma_addr, 0, + xe_res_first_dma(range->base.pages.dma_addr, 0, range->base.itree.last + 1 - range->base.itree.start, &curs); xe_svm_range_debug(range, "BIND PREPARE - MIXED"); @@ -760,8 +764,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, if (!xe_vma_is_null(vma) && !range) { if (xe_vma_is_userptr(vma)) - xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0, - xe_vma_size(vma), &curs); + xe_res_first_dma(to_userptr_vma(vma)->userptr.pages.dma_addr, 0, + xe_vma_size(vma), &curs); else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo)) xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma), xe_vma_size(vma), &curs); @@ -914,7 +918,7 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma) if (xe_vma_bo(vma)) xe_bo_assert_held(xe_vma_bo(vma)); else if (xe_vma_is_userptr(vma)) - lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock); + lockdep_assert_held(&xe_vma_vm(vma)->svm.gpusvm.notifier_lock); if (!(pt_mask & BIT(tile->id))) return false; @@ -1049,7 +1053,7 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma) xe_pt_commit_prepare_locks_assert(vma); if (xe_vma_is_userptr(vma)) - lockdep_assert_held_read(&vm->userptr.notifier_lock); + xe_svm_assert_held_read(vm); } static void xe_pt_commit(struct xe_vma *vma, @@ -1376,6 +1380,7 @@ static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update) pt_update_ops, rftree); } +#if IS_ENABLED(CONFIG_DRM_GPUSVM) #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) @@ -1406,7 +1411,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, struct xe_userptr_vma *uvma; unsigned long notifier_seq; - lockdep_assert_held_read(&vm->userptr.notifier_lock); + xe_svm_assert_held_read(vm); if (!xe_vma_is_userptr(vma)) return 0; @@ -1415,7 +1420,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, if (xe_pt_userptr_inject_eagain(uvma)) xe_vma_userptr_force_invalidate(uvma); - notifier_seq = uvma->userptr.notifier_seq; + notifier_seq = uvma->userptr.pages.notifier_seq; if (!mmu_interval_read_retry(&uvma->userptr.notifier, notifier_seq)) @@ -1431,12 +1436,12 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, return 0; } -static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, - struct xe_vm_pgtable_update_ops *pt_update) +static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op, + struct xe_vm_pgtable_update_ops *pt_update) { int err = 0; - lockdep_assert_held_read(&vm->userptr.notifier_lock); + xe_svm_assert_held_read(vm); switch (op->base.op) { case DRM_GPUVA_OP_MAP: @@ -1454,9 +1459,40 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, case DRM_GPUVA_OP_UNMAP: break; case DRM_GPUVA_OP_PREFETCH: - err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), - pt_update); + if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) { + struct xe_svm_range *range = op->map_range.range; + unsigned long i; + + xe_assert(vm->xe, + xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))); + xa_for_each(&op->prefetch_range.range, i, range) { + xe_svm_range_debug(range, "PRE-COMMIT"); + + if (!xe_svm_range_pages_valid(range)) { + xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); + return -ENODATA; + } + } + } else { + err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), pt_update); + } + break; +#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) + case DRM_GPUVA_OP_DRIVER: + if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { + struct xe_svm_range *range = op->map_range.range; + + xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); + + xe_svm_range_debug(range, "PRE-COMMIT"); + + if (!xe_svm_range_pages_valid(range)) { + xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); + return -EAGAIN; + } + } break; +#endif default: drm_warn(&vm->xe->drm, "NOT POSSIBLE"); } @@ -1464,7 +1500,7 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, return err; } -static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) +static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) { struct xe_vm *vm = pt_update->vops->vm; struct xe_vma_ops *vops = pt_update->vops; @@ -1477,69 +1513,18 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) if (err) return err; - down_read(&vm->userptr.notifier_lock); + xe_svm_notifier_lock(vm); list_for_each_entry(op, &vops->list, link) { - err = op_check_userptr(vm, op, pt_update_ops); + err = op_check_svm_userptr(vm, op, pt_update_ops); if (err) { - up_read(&vm->userptr.notifier_lock); + xe_svm_notifier_unlock(vm); break; } } return err; } - -#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) -static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update) -{ - struct xe_vm *vm = pt_update->vops->vm; - struct xe_vma_ops *vops = pt_update->vops; - struct xe_vma_op *op; - unsigned long i; - int err; - - err = xe_pt_pre_commit(pt_update); - if (err) - return err; - - xe_svm_notifier_lock(vm); - - list_for_each_entry(op, &vops->list, link) { - struct xe_svm_range *range = NULL; - - if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) - continue; - - if (op->base.op == DRM_GPUVA_OP_PREFETCH) { - xe_assert(vm->xe, - xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))); - xa_for_each(&op->prefetch_range.range, i, range) { - xe_svm_range_debug(range, "PRE-COMMIT"); - - if (!xe_svm_range_pages_valid(range)) { - xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); - xe_svm_notifier_unlock(vm); - return -ENODATA; - } - } - } else { - xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); - xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE); - range = op->map_range.range; - - xe_svm_range_debug(range, "PRE-COMMIT"); - - if (!xe_svm_range_pages_valid(range)) { - xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); - xe_svm_notifier_unlock(vm); - return -EAGAIN; - } - } - } - - return 0; -} #endif struct xe_pt_stage_unbind_walk { @@ -1843,7 +1828,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile, xe_vma_start(vma), xe_vma_end(vma)); ++pt_update_ops->current_op; - pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); + pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); /* * If rebind, we have to invalidate TLB on !LR vms to invalidate @@ -1951,7 +1936,7 @@ static int unbind_op_prepare(struct xe_tile *tile, xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma), xe_vma_end(vma)); ++pt_update_ops->current_op; - pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); + pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); pt_update_ops->needs_invalidation = true; xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries); @@ -2199,7 +2184,7 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile, vma->tile_invalidated & ~BIT(tile->id)); vma->tile_staged &= ~BIT(tile->id); if (xe_vma_is_userptr(vma)) { - lockdep_assert_held_read(&vm->userptr.notifier_lock); + xe_svm_assert_held_read(vm); to_userptr_vma(vma)->userptr.initial_bind = true; } @@ -2235,7 +2220,7 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile, if (!vma->tile_present) { list_del_init(&vma->combined_links.rebind); if (xe_vma_is_userptr(vma)) { - lockdep_assert_held_read(&vm->userptr.notifier_lock); + xe_svm_assert_held_read(vm); spin_lock(&vm->userptr.invalidated_lock); list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link); @@ -2338,20 +2323,14 @@ static const struct xe_migrate_pt_update_ops migrate_ops = { .pre_commit = xe_pt_pre_commit, }; -static const struct xe_migrate_pt_update_ops userptr_migrate_ops = { - .populate = xe_vm_populate_pgtable, - .clear = xe_migrate_clear_pgtable_callback, - .pre_commit = xe_pt_userptr_pre_commit, -}; - -#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) -static const struct xe_migrate_pt_update_ops svm_migrate_ops = { +#if IS_ENABLED(CONFIG_DRM_GPUSVM) +static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = { .populate = xe_vm_populate_pgtable, .clear = xe_migrate_clear_pgtable_callback, - .pre_commit = xe_pt_svm_pre_commit, + .pre_commit = xe_pt_svm_userptr_pre_commit, }; #else -static const struct xe_migrate_pt_update_ops svm_migrate_ops; +static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops; #endif static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q, @@ -2389,9 +2368,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) int err = 0, i; struct xe_migrate_pt_update update = { .ops = pt_update_ops->needs_svm_lock ? - &svm_migrate_ops : - pt_update_ops->needs_userptr_lock ? - &userptr_migrate_ops : + &svm_userptr_migrate_ops : &migrate_ops, .vops = vops, .tile_id = tile->id, @@ -2533,8 +2510,6 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) if (pt_update_ops->needs_svm_lock) xe_svm_notifier_unlock(vm); - if (pt_update_ops->needs_userptr_lock) - up_read(&vm->userptr.notifier_lock); xe_tlb_inval_job_put(mjob); xe_tlb_inval_job_put(ijob); diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h index 5ecf003d513c..4daeebaab5a1 100644 --- a/drivers/gpu/drm/xe/xe_pt.h +++ b/drivers/gpu/drm/xe/xe_pt.h @@ -10,6 +10,7 @@ #include "xe_pt_types.h" struct dma_fence; +struct drm_exec; struct xe_bo; struct xe_device; struct xe_exec_queue; @@ -29,7 +30,7 @@ struct xe_vma_ops; unsigned int xe_pt_shift(unsigned int level); struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, - unsigned int level); + unsigned int level, struct drm_exec *exec); void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm, struct xe_pt *pt); diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h index 17cdd7c7e9f5..881f01e14db8 100644 --- a/drivers/gpu/drm/xe/xe_pt_types.h +++ b/drivers/gpu/drm/xe/xe_pt_types.h @@ -105,8 +105,6 @@ struct xe_vm_pgtable_update_ops { u32 current_op; /** @needs_svm_lock: Needs SVM lock */ bool needs_svm_lock; - /** @needs_userptr_lock: Needs userptr lock */ - bool needs_userptr_lock; /** @needs_invalidation: Needs invalidation */ bool needs_invalidation; /** diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c index 3d62008c99f1..bdbdbbf6a678 100644 --- a/drivers/gpu/drm/xe/xe_pxp.c +++ b/drivers/gpu/drm/xe/xe_pxp.c @@ -688,6 +688,7 @@ start: return ret; } +ALLOW_ERROR_INJECTION(xe_pxp_exec_queue_add, ERRNO); static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock) { diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c index ca95f2a4d4ef..e60526e30030 100644 --- a/drivers/gpu/drm/xe/xe_pxp_submit.c +++ b/drivers/gpu/drm/xe/xe_pxp_submit.c @@ -54,8 +54,9 @@ static int allocate_vcs_execution_resources(struct xe_pxp *pxp) * Each termination is 16 DWORDS, so 4K is enough to contain a * termination for each sessions. */ - bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT); + bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT, + false); if (IS_ERR(bo)) { err = PTR_ERR(bo); goto out_queue; @@ -87,7 +88,9 @@ static int allocate_gsc_client_resources(struct xe_gt *gt, { struct xe_tile *tile = gt_to_tile(gt); struct xe_device *xe = tile_to_xe(tile); + struct xe_validation_ctx ctx; struct xe_hw_engine *hwe; + struct drm_exec exec; struct xe_vm *vm; struct xe_bo *bo; struct xe_exec_queue *q; @@ -106,15 +109,26 @@ static int allocate_gsc_client_resources(struct xe_gt *gt, return PTR_ERR(vm); /* We allocate a single object for the batch and the in/out memory */ - xe_vm_lock(vm, false); - bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2, - ttm_bo_type_kernel, - XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_NEEDS_UC); - xe_vm_unlock(vm); - if (IS_ERR(bo)) { - err = PTR_ERR(bo); - goto vm_out; + + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags){}, err) { + err = xe_vm_drm_exec_lock(vm, &exec); + drm_exec_retry_on_contention(&exec); + if (err) + break; + + bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | + XE_BO_FLAG_NEEDS_UC, &exec); + drm_exec_retry_on_contention(&exec); + if (IS_ERR(bo)) { + err = PTR_ERR(bo); + xe_validation_retry_on_oom(&ctx, &err); + break; + } } + if (err) + goto vm_out; fence = xe_vm_bind_kernel_bo(vm, bo, NULL, 0, XE_CACHE_WB); if (IS_ERR(fence)) { diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 4dbe5732cb7f..e1b603aba61b 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -21,6 +21,7 @@ #include "xe_force_wake.h" #include "xe_ggtt.h" #include "xe_gt.h" +#include "xe_gt_topology.h" #include "xe_guc_hwconfig.h" #include "xe_macros.h" #include "xe_mmio.h" @@ -477,7 +478,7 @@ static size_t calc_topo_query_size(struct xe_device *xe) sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss); /* L3bank mask may not be available for some GTs */ - if (!XE_GT_WA(gt, no_media_l3)) + if (xe_gt_topology_report_l3(gt)) query_size += sizeof(struct drm_xe_query_topology_mask) + sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask); } @@ -540,7 +541,7 @@ static int query_gt_topology(struct xe_device *xe, * mask, then it's better to omit L3 from the query rather than * reporting bogus or zeroed information to userspace. */ - if (!XE_GT_WA(gt, no_media_l3)) { + if (xe_gt_topology_report_l3(gt)) { topo.type = DRM_XE_TOPO_L3_BANK; err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask, sizeof(gt->fuse_topo.l3_bank_mask)); diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 47ea1521dc80..b5f430d59f80 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -370,3 +370,9 @@ bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt, { return xe_configfs_get_psmi_enabled(to_pci_dev(gt_to_xe(gt)->drm.dev)); } + +bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt, + const struct xe_hw_engine *hwe) +{ + return xe_gt_has_discontiguous_dss_groups(gt); +} diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h index 7951fefdbe04..ac12ddf6cde6 100644 --- a/drivers/gpu/drm/xe/xe_rtp.h +++ b/drivers/gpu/drm/xe/xe_rtp.h @@ -480,4 +480,7 @@ bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt, bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt, const struct xe_hw_engine *hwe); +bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt, + const struct xe_hw_engine *hwe); + #endif diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c index 87911fb4eea7..7d2d6de2aabf 100644 --- a/drivers/gpu/drm/xe/xe_sriov.c +++ b/drivers/gpu/drm/xe/xe_sriov.c @@ -160,19 +160,15 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t size) } /** - * xe_sriov_late_init() - SR-IOV late initialization functions. + * xe_sriov_init_late() - SR-IOV late initialization functions. * @xe: the &xe_device to initialize * - * On VF this function will initialize code for CCS migration. - * * Return: 0 on success or a negative error code on failure. */ -int xe_sriov_late_init(struct xe_device *xe) +int xe_sriov_init_late(struct xe_device *xe) { - int err = 0; - - if (IS_VF_CCS_INIT_NEEDED(xe)) - err = xe_sriov_vf_ccs_init(xe); + if (IS_SRIOV_VF(xe)) + return xe_sriov_vf_init_late(xe); - return err; + return 0; } diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h index 0e0c1abf2d14..6db45df55615 100644 --- a/drivers/gpu/drm/xe/xe_sriov.h +++ b/drivers/gpu/drm/xe/xe_sriov.h @@ -18,7 +18,7 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t len); void xe_sriov_probe_early(struct xe_device *xe); void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p); int xe_sriov_init(struct xe_device *xe); -int xe_sriov_late_init(struct xe_device *xe); +int xe_sriov_init_late(struct xe_device *xe); static inline enum xe_sriov_mode xe_device_sriov_mode(const struct xe_device *xe) { diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c index 5de81f213d83..cdd9f8e78b2a 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf.c @@ -3,6 +3,7 @@ * Copyright © 2023-2024 Intel Corporation */ +#include <drm/drm_debugfs.h> #include <drm/drm_managed.h> #include "xe_assert.h" @@ -10,6 +11,7 @@ #include "xe_gt.h" #include "xe_gt_sriov_printk.h" #include "xe_gt_sriov_vf.h" +#include "xe_guc.h" #include "xe_guc_ct.h" #include "xe_guc_submit.h" #include "xe_irq.h" @@ -18,6 +20,7 @@ #include "xe_sriov.h" #include "xe_sriov_printk.h" #include "xe_sriov_vf.h" +#include "xe_sriov_vf_ccs.h" #include "xe_tile_sriov_vf.h" /** @@ -127,16 +130,66 @@ * | | | */ -static bool vf_migration_supported(struct xe_device *xe) +/** + * xe_sriov_vf_migration_supported - Report whether SR-IOV VF migration is + * supported or not. + * @xe: the &xe_device to check + * + * Returns: true if VF migration is supported, false otherwise. + */ +bool xe_sriov_vf_migration_supported(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_VF(xe)); + return xe->sriov.vf.migration.enabled; +} + +static void vf_disable_migration(struct xe_device *xe, const char *fmt, ...) +{ + struct va_format vaf; + va_list va_args; + + xe_assert(xe, IS_SRIOV_VF(xe)); + + va_start(va_args, fmt); + vaf.fmt = fmt; + vaf.va = &va_args; + xe_sriov_notice(xe, "migration disabled: %pV\n", &vaf); + va_end(va_args); + + xe->sriov.vf.migration.enabled = false; +} + +static void migration_worker_func(struct work_struct *w); + +static void vf_migration_init_early(struct xe_device *xe) { /* * TODO: Add conditions to allow specific platforms, when they're * supported at production quality. */ - return IS_ENABLED(CONFIG_DRM_XE_DEBUG); -} + if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG)) + return vf_disable_migration(xe, + "experimental feature not available on production builds"); + + if (GRAPHICS_VER(xe) < 20) + return vf_disable_migration(xe, "requires gfx version >= 20, but only %u found", + GRAPHICS_VER(xe)); + + if (!IS_DGFX(xe)) { + struct xe_uc_fw_version guc_version; + + xe_gt_sriov_vf_guc_versions(xe_device_get_gt(xe, 0), NULL, &guc_version); + if (MAKE_GUC_VER_STRUCT(guc_version) < MAKE_GUC_VER(1, 23, 0)) + return vf_disable_migration(xe, + "CCS migration requires GuC ABI >= 1.23 but only %u.%u found", + guc_version.major, guc_version.minor); + } -static void migration_worker_func(struct work_struct *w); + INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); + + xe->sriov.vf.migration.enabled = true; + xe_sriov_dbg(xe, "migration support enabled\n"); +} /** * xe_sriov_vf_init_early - Initialize SR-IOV VF specific data. @@ -144,10 +197,7 @@ static void migration_worker_func(struct work_struct *w); */ void xe_sriov_vf_init_early(struct xe_device *xe) { - INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); - - if (!vf_migration_supported(xe)) - xe_sriov_info(xe, "migration not supported by this module version\n"); + vf_migration_init_early(xe); } /** @@ -302,8 +352,8 @@ static void vf_post_migration_recovery(struct xe_device *xe) xe_pm_runtime_get(xe); vf_post_migration_shutdown(xe); - if (!vf_migration_supported(xe)) { - xe_sriov_err(xe, "migration not supported by this module version\n"); + if (!xe_sriov_vf_migration_supported(xe)) { + xe_sriov_err(xe, "migration is not supported\n"); err = -ENOTRECOVERABLE; goto fail; } @@ -378,3 +428,48 @@ void xe_sriov_vf_start_migration_recovery(struct xe_device *xe) drm_info(&xe->drm, "VF migration recovery %s\n", started ? "scheduled" : "already in progress"); } + +/** + * xe_sriov_vf_init_late() - SR-IOV VF late initialization functions. + * @xe: the &xe_device to initialize + * + * This function initializes code for CCS migration. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vf_init_late(struct xe_device *xe) +{ + int err = 0; + + if (xe_sriov_vf_migration_supported(xe)) + err = xe_sriov_vf_ccs_init(xe); + + return err; +} + +static int sa_info_vf_ccs(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct xe_device *xe = to_xe_device(node->minor->dev); + struct drm_printer p = drm_seq_file_printer(m); + + xe_sriov_vf_ccs_print(xe, &p); + return 0; +} + +static const struct drm_info_list debugfs_list[] = { + { .name = "sa_info_vf_ccs", .show = sa_info_vf_ccs }, +}; + +/** + * xe_sriov_vf_debugfs_register - Register VF debugfs attributes. + * @xe: the &xe_device + * @root: the root &dentry + * + * Prepare debugfs attributes exposed by the VF. + */ +void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root) +{ + drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), + root, xe->drm.primary); +} diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h index 7b8622cff2b7..9e752105ec2a 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf.h +++ b/drivers/gpu/drm/xe/xe_sriov_vf.h @@ -6,9 +6,15 @@ #ifndef _XE_SRIOV_VF_H_ #define _XE_SRIOV_VF_H_ +#include <linux/types.h> + +struct dentry; struct xe_device; void xe_sriov_vf_init_early(struct xe_device *xe); +int xe_sriov_vf_init_late(struct xe_device *xe); void xe_sriov_vf_start_migration_recovery(struct xe_device *xe); +bool xe_sriov_vf_migration_supported(struct xe_device *xe); +void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root); #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c index 4872e43eb440..8dec616c37c9 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c @@ -13,8 +13,10 @@ #include "xe_guc_submit.h" #include "xe_lrc.h" #include "xe_migrate.h" +#include "xe_pm.h" #include "xe_sa.h" #include "xe_sriov_printk.h" +#include "xe_sriov_vf.h" #include "xe_sriov_vf_ccs.h" #include "xe_sriov_vf_ccs_types.h" @@ -135,7 +137,7 @@ static u64 get_ccs_bb_pool_size(struct xe_device *xe) return round_up(bb_pool_size * 2, SZ_1M); } -static int alloc_bb_pool(struct xe_tile *tile, struct xe_tile_vf_ccs *ctx) +static int alloc_bb_pool(struct xe_tile *tile, struct xe_sriov_vf_ccs_ctx *ctx) { struct xe_device *xe = tile_to_xe(tile); struct xe_sa_manager *sa_manager; @@ -167,7 +169,7 @@ static int alloc_bb_pool(struct xe_tile *tile, struct xe_tile_vf_ccs *ctx) return 0; } -static void ccs_rw_update_ring(struct xe_tile_vf_ccs *ctx) +static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx) { u64 addr = xe_sa_manager_gpu_addr(ctx->mem.ccs_bb_pool); struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q); @@ -184,9 +186,8 @@ static void ccs_rw_update_ring(struct xe_tile_vf_ccs *ctx) xe_lrc_set_ring_tail(lrc, lrc->ring.tail); } -static int register_save_restore_context(struct xe_tile_vf_ccs *ctx) +static int register_save_restore_context(struct xe_sriov_vf_ccs_ctx *ctx) { - int err = -EINVAL; int ctx_type; switch (ctx->ctx_id) { @@ -197,10 +198,10 @@ static int register_save_restore_context(struct xe_tile_vf_ccs *ctx) ctx_type = GUC_CONTEXT_COMPRESSION_RESTORE; break; default: - return err; + return -EINVAL; } - xe_guc_register_exec_queue(ctx->mig_q, ctx_type); + xe_guc_register_vf_exec_queue(ctx->mig_q, ctx_type); return 0; } @@ -215,16 +216,14 @@ static int register_save_restore_context(struct xe_tile_vf_ccs *ctx) */ int xe_sriov_vf_ccs_register_context(struct xe_device *xe) { - struct xe_tile *tile = xe_device_get_root_tile(xe); enum xe_sriov_vf_ccs_rw_ctxs ctx_id; - struct xe_tile_vf_ccs *ctx; + struct xe_sriov_vf_ccs_ctx *ctx; int err; - if (!IS_VF_CCS_READY(xe)) - return 0; + xe_assert(xe, IS_VF_CCS_READY(xe)); for_each_ccs_rw_ctx(ctx_id) { - ctx = &tile->sriov.vf.ccs[ctx_id]; + ctx = &xe->sriov.vf.ccs.contexts[ctx_id]; err = register_save_restore_context(ctx); if (err) return err; @@ -235,7 +234,7 @@ int xe_sriov_vf_ccs_register_context(struct xe_device *xe) static void xe_sriov_vf_ccs_fini(void *arg) { - struct xe_tile_vf_ccs *ctx = arg; + struct xe_sriov_vf_ccs_ctx *ctx = arg; struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q); /* @@ -259,17 +258,19 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe) { struct xe_tile *tile = xe_device_get_root_tile(xe); enum xe_sriov_vf_ccs_rw_ctxs ctx_id; - struct xe_tile_vf_ccs *ctx; + struct xe_sriov_vf_ccs_ctx *ctx; struct xe_exec_queue *q; u32 flags; int err; xe_assert(xe, IS_SRIOV_VF(xe)); - xe_assert(xe, !IS_DGFX(xe)); - xe_assert(xe, xe_device_has_flat_ccs(xe)); + xe_assert(xe, xe_sriov_vf_migration_supported(xe)); + + if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)) + return 0; for_each_ccs_rw_ctx(ctx_id) { - ctx = &tile->sriov.vf.ccs[ctx_id]; + ctx = &xe->sriov.vf.ccs.contexts[ctx_id]; ctx->ctx_id = ctx_id; flags = EXEC_QUEUE_FLAG_KERNEL | @@ -324,13 +325,12 @@ int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo) { struct xe_device *xe = xe_bo_device(bo); enum xe_sriov_vf_ccs_rw_ctxs ctx_id; - struct xe_tile_vf_ccs *ctx; + struct xe_sriov_vf_ccs_ctx *ctx; struct xe_tile *tile; struct xe_bb *bb; int err = 0; - if (!IS_VF_CCS_READY(xe)) - return 0; + xe_assert(xe, IS_VF_CCS_READY(xe)); tile = xe_device_get_root_tile(xe); @@ -339,7 +339,7 @@ int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo) /* bb should be NULL here. Assert if not NULL */ xe_assert(xe, !bb); - ctx = &tile->sriov.vf.ccs[ctx_id]; + ctx = &xe->sriov.vf.ccs.contexts[ctx_id]; err = xe_migrate_ccs_rw_copy(tile, ctx->mig_q, bo, ctx_id); } return err; @@ -361,7 +361,9 @@ int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo) enum xe_sriov_vf_ccs_rw_ctxs ctx_id; struct xe_bb *bb; - if (!IS_VF_CCS_READY(xe)) + xe_assert(xe, IS_VF_CCS_READY(xe)); + + if (!xe_bo_has_valid_ccs_bb(bo)) return 0; for_each_ccs_rw_ctx(ctx_id) { @@ -375,3 +377,34 @@ int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo) } return 0; } + +/** + * xe_sriov_vf_ccs_print - Print VF CCS details. + * @xe: the &xe_device + * @p: the &drm_printer + * + * This function is for VF use only. + */ +void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p) +{ + struct xe_sa_manager *bb_pool; + enum xe_sriov_vf_ccs_rw_ctxs ctx_id; + + if (!IS_VF_CCS_READY(xe)) + return; + + xe_pm_runtime_get(xe); + + for_each_ccs_rw_ctx(ctx_id) { + bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool; + if (!bb_pool) + break; + + drm_printf(p, "ccs %s bb suballoc info\n", ctx_id ? "write" : "read"); + drm_printf(p, "-------------------------\n"); + drm_suballoc_dump_debug_info(&bb_pool->base, p, xe_sa_manager_gpu_addr(bb_pool)); + drm_puts(p, "\n"); + } + + xe_pm_runtime_put(xe); +} diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h index 1f1baf685fec..0745c0ff0228 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h @@ -6,6 +6,11 @@ #ifndef _XE_SRIOV_VF_CCS_H_ #define _XE_SRIOV_VF_CCS_H_ +#include "xe_device_types.h" +#include "xe_sriov.h" +#include "xe_sriov_vf_ccs_types.h" + +struct drm_printer; struct xe_device; struct xe_bo; @@ -13,5 +18,17 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe); int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo); int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo); int xe_sriov_vf_ccs_register_context(struct xe_device *xe); +void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p); + +static inline bool xe_sriov_vf_ccs_ready(struct xe_device *xe) +{ + xe_assert(xe, IS_SRIOV_VF(xe)); + return xe->sriov.vf.ccs.initialized; +} + +#define IS_VF_CCS_READY(xe) ({ \ + struct xe_device *xe__ = (xe); \ + IS_SRIOV_VF(xe__) && xe_sriov_vf_ccs_ready(xe__); \ + }) #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h index 93435a6f4cb6..22c499943d2a 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h @@ -6,48 +6,46 @@ #ifndef _XE_SRIOV_VF_CCS_TYPES_H_ #define _XE_SRIOV_VF_CCS_TYPES_H_ +#include <linux/types.h> + #define for_each_ccs_rw_ctx(id__) \ for ((id__) = 0; (id__) < XE_SRIOV_VF_CCS_CTX_COUNT; (id__)++) -#define IS_VF_CCS_READY(xe) ({ \ - struct xe_device *___xe = (xe); \ - xe_assert(___xe, IS_SRIOV_VF(___xe)); \ - ___xe->sriov.vf.ccs.initialized; \ - }) - -#define IS_VF_CCS_INIT_NEEDED(xe) ({\ - struct xe_device *___xe = (xe); \ - IS_SRIOV_VF(___xe) && !IS_DGFX(___xe) && \ - xe_device_has_flat_ccs(___xe) && GRAPHICS_VER(___xe) >= 20; \ - }) - enum xe_sriov_vf_ccs_rw_ctxs { XE_SRIOV_VF_CCS_READ_CTX, XE_SRIOV_VF_CCS_WRITE_CTX, XE_SRIOV_VF_CCS_CTX_COUNT }; -#define IS_VF_CCS_BB_VALID(xe, bo) ({ \ - struct xe_device *___xe = (xe); \ - struct xe_bo *___bo = (bo); \ - IS_SRIOV_VF(___xe) && \ - ___bo->bb_ccs[XE_SRIOV_VF_CCS_READ_CTX] && \ - ___bo->bb_ccs[XE_SRIOV_VF_CCS_WRITE_CTX]; \ - }) - struct xe_migrate; struct xe_sa_manager; -struct xe_tile_vf_ccs { - /** @id: Id to which context it belongs to */ +/** + * struct xe_sriov_vf_ccs_ctx - VF CCS migration context data. + */ +struct xe_sriov_vf_ccs_ctx { + /** @ctx_id: Id to which context it belongs to */ enum xe_sriov_vf_ccs_rw_ctxs ctx_id; + /** @mig_q: exec queues used for migration */ struct xe_exec_queue *mig_q; + /** @mem: memory data */ struct { - /** @ccs_bb_pool: Pool from which batch buffers are allocated. */ + /** @mem.ccs_bb_pool: Pool from which batch buffers are allocated. */ struct xe_sa_manager *ccs_bb_pool; } mem; }; +/** + * struct xe_sriov_vf_ccs - The VF CCS migration support data. + */ +struct xe_sriov_vf_ccs { + /** @contexts: CCS read and write contexts for VF. */ + struct xe_sriov_vf_ccs_ctx contexts[XE_SRIOV_VF_CCS_CTX_COUNT]; + + /** @initialized: Initialization of VF CCS is completed or not. */ + bool initialized; +}; + #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h index 24a873c50c49..426cc5841958 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h @@ -9,6 +9,8 @@ #include <linux/types.h> #include <linux/workqueue_types.h> +#include "xe_sriov_vf_ccs_types.h" + /** * struct xe_sriov_vf_relay_version - PF ABI version details. */ @@ -35,13 +37,15 @@ struct xe_device_vf { struct work_struct worker; /** @migration.gt_flags: Per-GT request flags for VF migration recovery */ unsigned long gt_flags; + /** + * @migration.enabled: flag indicating if migration support + * was enabled or not due to missing prerequisites + */ + bool enabled; } migration; /** @ccs: VF CCS state data */ - struct { - /** @ccs.initialized: Initilalization of VF CCS is completed or not */ - bool initialized; - } ccs; + struct xe_sriov_vf_ccs ccs; }; #endif diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c index 7999cc5262a5..1662bfddd4bc 100644 --- a/drivers/gpu/drm/xe/xe_survivability_mode.c +++ b/drivers/gpu/drm/xe/xe_survivability_mode.c @@ -289,19 +289,10 @@ bool xe_survivability_mode_is_requested(struct xe_device *xe) u32 data; bool survivability_mode; - if (!IS_DGFX(xe) || IS_SRIOV_VF(xe)) + if (!IS_DGFX(xe) || IS_SRIOV_VF(xe) || xe->info.platform < XE_BATTLEMAGE) return false; survivability_mode = xe_configfs_get_survivability_mode(pdev); - - if (xe->info.platform < XE_BATTLEMAGE) { - if (survivability_mode) { - dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n"); - xe_configfs_clear_survivability_mode(pdev); - } - return false; - } - /* Enable survivability mode if set via configfs */ if (survivability_mode) return true; diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 76c6d74c1208..7f2f1f041f1d 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -6,6 +6,7 @@ #include <drm/drm_drv.h> #include "xe_bo.h" +#include "xe_exec_queue_types.h" #include "xe_gt_stats.h" #include "xe_migrate.h" #include "xe_module.h" @@ -25,9 +26,9 @@ static bool xe_svm_range_in_vram(struct xe_svm_range *range) * memory. */ - struct drm_gpusvm_range_flags flags = { + struct drm_gpusvm_pages_flags flags = { /* Pairs with WRITE_ONCE in drm_gpusvm.c */ - .__flags = READ_ONCE(range->base.flags.__flags), + .__flags = READ_ONCE(range->base.pages.flags.__flags), }; return flags.has_devmem_pages; @@ -49,15 +50,15 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r) return gpusvm_to_vm(r->gpusvm); } -#define range_debug(r__, operaton__) \ +#define range_debug(r__, operation__) \ vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \ "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \ "start=0x%014lx, end=0x%014lx, size=%lu", \ - (operaton__), range_to_vm(&(r__)->base)->usm.asid, \ + (operation__), range_to_vm(&(r__)->base)->usm.asid, \ (r__)->base.gpusvm, \ xe_svm_range_in_vram((r__)) ? 1 : 0, \ xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \ - (r__)->base.notifier_seq, \ + (r__)->base.pages.notifier_seq, \ xe_svm_range_start((r__)), xe_svm_range_end((r__)), \ xe_svm_range_size((r__))) @@ -112,6 +113,11 @@ xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, &vm->svm.garbage_collector.work); } +static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt) +{ + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1); +} + static u8 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, const struct mmu_notifier_range *mmu_range, @@ -128,7 +134,7 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, range_debug(range, "NOTIFIER"); /* Skip if already unmapped or if no binding exist */ - if (range->base.flags.unmapped || !range->tile_present) + if (range->base.pages.flags.unmapped || !range->tile_present) return 0; range_debug(range, "NOTIFIER - EXECUTE"); @@ -144,13 +150,19 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, */ for_each_tile(tile, xe, id) if (xe_pt_zap_ptes_range(tile, vm, range)) { - tile_mask |= BIT(id); /* * WRITE_ONCE pairs with READ_ONCE in * xe_vm_has_valid_gpu_mapping() */ WRITE_ONCE(range->tile_invalidated, range->tile_invalidated | BIT(id)); + + if (!(tile_mask & BIT(id))) { + xe_svm_tlb_inval_count_stats_incr(tile->primary_gt); + if (tile->media_gt) + xe_svm_tlb_inval_count_stats_incr(tile->media_gt); + tile_mask |= BIT(id); + } } return tile_mask; @@ -170,6 +182,24 @@ xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r, mmu_range); } +static s64 xe_svm_stats_ktime_us_delta(ktime_t start) +{ + return IS_ENABLED(CONFIG_DEBUG_FS) ? + ktime_us_delta(ktime_get(), start) : 0; +} + +static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start) +{ + s64 us_delta = xe_svm_stats_ktime_us_delta(start); + + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta); +} + +static ktime_t xe_svm_stats_ktime_get(void) +{ + return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0; +} + static void xe_svm_invalidate(struct drm_gpusvm *gpusvm, struct drm_gpusvm_notifier *notifier, const struct mmu_notifier_range *mmu_range) @@ -177,8 +207,10 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm, struct xe_vm *vm = gpusvm_to_vm(gpusvm); struct xe_device *xe = vm->xe; struct drm_gpusvm_range *r, *first; + struct xe_tile *tile; + ktime_t start = xe_svm_stats_ktime_get(); u64 adj_start = mmu_range->start, adj_end = mmu_range->end; - u8 tile_mask = 0; + u8 tile_mask = 0, id; long err; xe_svm_assert_in_notifier(vm); @@ -231,6 +263,13 @@ range_notifier_event_end: r = first; drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) xe_svm_range_notifier_event_end(vm, r, mmu_range); + for_each_tile(tile, xe, id) { + if (tile_mask & BIT(id)) { + xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start); + if (tile->media_gt) + xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start); + } + } } static int __xe_svm_garbage_collector(struct xe_vm *vm, @@ -308,8 +347,8 @@ static int xe_svm_garbage_collector(struct xe_vm *vm) if (xe_vm_is_closed_or_banned(vm)) return -ENOENT; - spin_lock(&vm->svm.garbage_collector.lock); for (;;) { + spin_lock(&vm->svm.garbage_collector.lock); range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list, typeof(*range), garbage_collector_link); @@ -338,8 +377,6 @@ static int xe_svm_garbage_collector(struct xe_vm *vm) else return err; } - - spin_lock(&vm->svm.garbage_collector.lock); } spin_unlock(&vm->svm.garbage_collector.lock); @@ -384,11 +421,66 @@ enum xe_svm_copy_dir { XE_SVM_COPY_TO_SRAM, }; +static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt, + const enum xe_svm_copy_dir dir, + int kb) +{ + if (dir == XE_SVM_COPY_TO_VRAM) + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb); + else + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb); +} + +static void xe_svm_copy_us_stats_incr(struct xe_gt *gt, + const enum xe_svm_copy_dir dir, + unsigned long npages, + ktime_t start) +{ + s64 us_delta = xe_svm_stats_ktime_us_delta(start); + + if (dir == XE_SVM_COPY_TO_VRAM) { + switch (npages) { + case 1: + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US, + us_delta); + break; + case 16: + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US, + us_delta); + break; + case 512: + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US, + us_delta); + break; + } + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US, + us_delta); + } else { + switch (npages) { + case 1: + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US, + us_delta); + break; + case 16: + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US, + us_delta); + break; + case 512: + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US, + us_delta); + break; + } + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US, + us_delta); + } +} + static int xe_svm_copy(struct page **pages, struct drm_pagemap_addr *pagemap_addr, unsigned long npages, const enum xe_svm_copy_dir dir) { struct xe_vram_region *vr = NULL; + struct xe_gt *gt = NULL; struct xe_device *xe; struct dma_fence *fence = NULL; unsigned long i; @@ -396,6 +488,7 @@ static int xe_svm_copy(struct page **pages, u64 vram_addr = XE_VRAM_ADDR_INVALID; int err = 0, pos = 0; bool sram = dir == XE_SVM_COPY_TO_SRAM; + ktime_t start = xe_svm_stats_ktime_get(); /* * This flow is complex: it locates physically contiguous device pages, @@ -422,6 +515,7 @@ static int xe_svm_copy(struct page **pages, if (!vr && spage) { vr = page_to_vr(spage); + gt = xe_migrate_exec_queue(vr->migrate)->gt; xe = vr->xe; } XE_WARN_ON(spage && page_to_vr(spage) != vr); @@ -461,6 +555,9 @@ static int xe_svm_copy(struct page **pages, int incr = (match && last) ? 1 : 0; if (vram_addr != XE_VRAM_ADDR_INVALID) { + xe_svm_copy_kb_stats_incr(gt, dir, + (i - pos + incr) * + (PAGE_SIZE / SZ_1K)); if (sram) { vm_dbg(&xe->drm, "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", @@ -499,6 +596,8 @@ static int xe_svm_copy(struct page **pages, /* Extra mismatched device page, copy it */ if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) { + xe_svm_copy_kb_stats_incr(gt, dir, + (PAGE_SIZE / SZ_1K)); if (sram) { vm_dbg(&xe->drm, "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", @@ -532,6 +631,14 @@ err_out: dma_fence_put(fence); } + /* + * XXX: We can't derive the GT here (or anywhere in this functions, but + * compute always uses the primary GT so accumlate stats on the likely + * GT of the fault. + */ + if (gt) + xe_svm_copy_us_stats_incr(gt, dir, npages, start); + return err; #undef XE_MIGRATE_CHUNK_SIZE #undef XE_VRAM_ADDR_INVALID @@ -630,22 +737,26 @@ int xe_svm_init(struct xe_vm *vm) { int err; - spin_lock_init(&vm->svm.garbage_collector.lock); - INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list); - INIT_WORK(&vm->svm.garbage_collector.work, - xe_svm_garbage_collector_work_func); - - err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm, - current->mm, xe_svm_devm_owner(vm->xe), 0, - vm->size, xe_modparam.svm_notifier_size * SZ_1M, - &gpusvm_ops, fault_chunk_sizes, - ARRAY_SIZE(fault_chunk_sizes)); - if (err) - return err; - - drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock); + if (vm->flags & XE_VM_FLAG_FAULT_MODE) { + spin_lock_init(&vm->svm.garbage_collector.lock); + INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list); + INIT_WORK(&vm->svm.garbage_collector.work, + xe_svm_garbage_collector_work_func); + + err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm, + current->mm, xe_svm_devm_owner(vm->xe), 0, + vm->size, + xe_modparam.svm_notifier_size * SZ_1M, + &gpusvm_ops, fault_chunk_sizes, + ARRAY_SIZE(fault_chunk_sizes)); + drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock); + } else { + err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", + &vm->xe->drm, NULL, NULL, 0, 0, 0, NULL, + NULL, 0); + } - return 0; + return err; } /** @@ -716,7 +827,7 @@ bool xe_svm_range_validate(struct xe_vm *vm, xe_svm_notifier_lock(vm); ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask && - (devmem_preferred == range->base.flags.has_devmem_pages); + (devmem_preferred == range->base.pages.flags.has_devmem_pages); xe_svm_notifier_unlock(vm); @@ -755,49 +866,48 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, struct xe_device *xe = vr->xe; struct device *dev = xe->drm.dev; struct drm_buddy_block *block; + struct xe_validation_ctx vctx; struct list_head *blocks; + struct drm_exec exec; struct xe_bo *bo; - ktime_t time_end = 0; - int err, idx; + int err = 0, idx; if (!drm_dev_enter(&xe->drm, &idx)) return -ENODEV; xe_pm_runtime_get(xe); - retry: - bo = xe_bo_create_locked(vr->xe, NULL, NULL, end - start, - ttm_bo_type_device, - (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) | - XE_BO_FLAG_CPU_ADDR_MIRROR); - if (IS_ERR(bo)) { - err = PTR_ERR(bo); - if (xe_vm_validate_should_retry(NULL, err, &time_end)) - goto retry; - goto out_pm_put; - } - - drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm, - &dpagemap_devmem_ops, dpagemap, end - start); - - blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks; - list_for_each_entry(block, blocks, link) - block->private = vr; + xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) { + bo = xe_bo_create_locked(xe, NULL, NULL, end - start, + ttm_bo_type_device, + (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) | + XE_BO_FLAG_CPU_ADDR_MIRROR, &exec); + drm_exec_retry_on_contention(&exec); + if (IS_ERR(bo)) { + err = PTR_ERR(bo); + xe_validation_retry_on_oom(&vctx, &err); + break; + } - xe_bo_get(bo); + drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm, + &dpagemap_devmem_ops, dpagemap, end - start); - /* Ensure the device has a pm ref while there are device pages active. */ - xe_pm_runtime_get_noresume(xe); - err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm, - start, end, timeslice_ms, - xe_svm_devm_owner(xe)); - if (err) - xe_svm_devmem_release(&bo->devmem_allocation); + blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks; + list_for_each_entry(block, blocks, link) + block->private = vr; - xe_bo_unlock(bo); - xe_bo_put(bo); + xe_bo_get(bo); -out_pm_put: + /* Ensure the device has a pm ref while there are device pages active. */ + xe_pm_runtime_get_noresume(xe); + err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm, + start, end, timeslice_ms, + xe_svm_devm_owner(xe)); + if (err) + xe_svm_devmem_release(&bo->devmem_allocation); + xe_bo_unlock(bo); + xe_bo_put(bo); + } xe_pm_runtime_put(xe); drm_dev_exit(idx); @@ -827,17 +937,17 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm struct xe_vm *vm = range_to_vm(&range->base); u64 range_size = xe_svm_range_size(range); - if (!range->base.flags.migrate_devmem || !preferred_region_is_vram) + if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram) return false; xe_assert(vm->xe, IS_DGFX(vm->xe)); - if (preferred_region_is_vram && xe_svm_range_in_vram(range)) { + if (xe_svm_range_in_vram(range)) { drm_info(&vm->xe->drm, "Range is already in VRAM\n"); return false; } - if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) { + if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) { drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); return false; } @@ -845,27 +955,77 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm return true; } +#define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \ +static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \ + struct xe_svm_range *range) \ +{ \ + switch (xe_svm_range_size(range)) { \ + case SZ_4K: \ + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \ + break; \ + case SZ_64K: \ + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \ + break; \ + case SZ_2M: \ + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \ + break; \ + } \ +} \ + +DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT) +DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT) +DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE) + +#define DECL_SVM_RANGE_US_STATS(elem, stat) \ +static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \ + struct xe_svm_range *range, \ + ktime_t start) \ +{ \ + s64 us_delta = xe_svm_stats_ktime_us_delta(start); \ +\ + switch (xe_svm_range_size(range)) { \ + case SZ_4K: \ + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \ + us_delta); \ + break; \ + case SZ_64K: \ + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \ + us_delta); \ + break; \ + case SZ_2M: \ + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \ + us_delta); \ + break; \ + } \ +} \ + +DECL_SVM_RANGE_US_STATS(migrate, MIGRATE) +DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES) +DECL_SVM_RANGE_US_STATS(bind, BIND) +DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT) + static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, struct xe_gt *gt, u64 fault_addr, bool need_vram) { + int devmem_possible = IS_DGFX(vm->xe) && + IS_ENABLED(CONFIG_DRM_XE_PAGEMAP); struct drm_gpusvm_ctx ctx = { .read_only = xe_vma_read_only(vma), - .devmem_possible = IS_DGFX(vm->xe) && - IS_ENABLED(CONFIG_DRM_XE_PAGEMAP), - .check_pages_threshold = IS_DGFX(vm->xe) && - IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0, - .devmem_only = need_vram && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP), - .timeslice_ms = need_vram && IS_DGFX(vm->xe) && - IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? + .devmem_possible = devmem_possible, + .check_pages_threshold = devmem_possible ? SZ_64K : 0, + .devmem_only = need_vram && devmem_possible, + .timeslice_ms = need_vram && devmem_possible ? vm->xe->atomic_svm_timeslice_ms : 0, }; + struct xe_validation_ctx vctx; + struct drm_exec exec; struct xe_svm_range *range; struct dma_fence *fence; struct drm_pagemap *dpagemap; struct xe_tile *tile = gt_to_tile(gt); int migrate_try_count = ctx.devmem_only ? 3 : 1; - ktime_t end = 0; + ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start; int err; lockdep_assert_held_write(&vm->lock); @@ -884,23 +1044,34 @@ retry: if (IS_ERR(range)) return PTR_ERR(range); - if (ctx.devmem_only && !range->base.flags.migrate_devmem) - return -EACCES; + xe_svm_range_fault_count_stats_incr(gt, range); - if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) - return 0; + if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) { + err = -EACCES; + goto out; + } + + if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) { + xe_svm_range_valid_fault_count_stats_incr(gt, range); + range_debug(range, "PAGE FAULT - VALID"); + goto out; + } range_debug(range, "PAGE FAULT"); dpagemap = xe_vma_resolve_pagemap(vma, tile); if (--migrate_try_count >= 0 && xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) { + ktime_t migrate_start = xe_svm_stats_ktime_get(); + /* TODO : For multi-device dpagemap will be used to find the * remote tile and remote device. Will need to modify * xe_svm_alloc_vram to use dpagemap for future multi-device * support. */ + xe_svm_range_migrate_count_stats_incr(gt, range); err = xe_svm_alloc_vram(tile, range, &ctx); + xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start); ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ if (err) { if (migrate_try_count || !ctx.devmem_only) { @@ -917,6 +1088,8 @@ retry: } } + get_pages_start = xe_svm_stats_ktime_get(); + range_debug(range, "GET PAGES"); err = xe_svm_range_get_pages(vm, range, &ctx); /* Corner where CPU mappings have changed */ @@ -936,32 +1109,45 @@ retry: } if (err) { range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT"); - goto err_out; + goto out; } + xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start); range_debug(range, "PAGE FAULT - BIND"); -retry_bind: - xe_vm_lock(vm, false); - fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id)); - if (IS_ERR(fence)) { - xe_vm_unlock(vm); - err = PTR_ERR(fence); - if (err == -EAGAIN) { - ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ - range_debug(range, "PAGE FAULT - RETRY BIND"); - goto retry; + bind_start = xe_svm_stats_ktime_get(); + xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) { + err = xe_vm_drm_exec_lock(vm, &exec); + drm_exec_retry_on_contention(&exec); + + xe_vm_set_validation_exec(vm, &exec); + fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id)); + xe_vm_set_validation_exec(vm, NULL); + if (IS_ERR(fence)) { + drm_exec_retry_on_contention(&exec); + err = PTR_ERR(fence); + xe_validation_retry_on_oom(&vctx, &err); + xe_svm_range_bind_us_stats_incr(gt, range, bind_start); + break; } - if (xe_vm_validate_should_retry(NULL, err, &end)) - goto retry_bind; - goto err_out; } - xe_vm_unlock(vm); + if (err) + goto err_out; dma_fence_wait(fence, false); dma_fence_put(fence); + xe_svm_range_bind_us_stats_incr(gt, range, bind_start); + +out: + xe_svm_range_fault_us_stats_incr(gt, range, start); + return 0; err_out: + if (err == -EAGAIN) { + ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ + range_debug(range, "PAGE FAULT - RETRY BIND"); + goto retry; + } return err; } @@ -1089,7 +1275,7 @@ struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)), xe_vma_start(vma), xe_vma_end(vma), ctx); if (IS_ERR(r)) - return ERR_PTR(PTR_ERR(r)); + return ERR_CAST(r); return to_xe_range(r); } @@ -1221,7 +1407,7 @@ int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, { struct drm_pagemap *dpagemap; - xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem); + xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem); range_debug(range, "ALLOCATE VRAM"); dpagemap = tile_local_pagemap(tile); diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index 9d6a8840a8b7..cef6ee7d6fe3 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -105,7 +105,7 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range) { lockdep_assert_held(&range->base.gpusvm->notifier_lock); - return range->base.flags.has_dma_mapping; + return range->base.pages.flags.has_dma_mapping; } /** @@ -155,19 +155,11 @@ static inline unsigned long xe_svm_range_size(struct xe_svm_range *range) return drm_gpusvm_range_size(&range->base); } -#define xe_svm_assert_in_notifier(vm__) \ - lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock) - -#define xe_svm_notifier_lock(vm__) \ - drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm) - -#define xe_svm_notifier_unlock(vm__) \ - drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm) - void xe_svm_flush(struct xe_vm *vm); #else #include <linux/interval_tree.h> +#include "xe_vm.h" struct drm_pagemap_addr; struct drm_gpusvm_ctx; @@ -184,7 +176,9 @@ struct xe_vram_region; struct xe_svm_range { struct { struct interval_tree_node itree; - const struct drm_pagemap_addr *dma_addr; + struct { + const struct drm_pagemap_addr *dma_addr; + } pages; } base; u32 tile_present; u32 tile_invalidated; @@ -204,12 +198,21 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) static inline int xe_svm_init(struct xe_vm *vm) { +#if IS_ENABLED(CONFIG_DRM_GPUSVM) + return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm, + NULL, NULL, 0, 0, 0, NULL, NULL, 0); +#else return 0; +#endif } static inline void xe_svm_fini(struct xe_vm *vm) { +#if IS_ENABLED(CONFIG_DRM_GPUSVM) + xe_assert(vm->xe, xe_vm_is_closed(vm)); + drm_gpusvm_fini(&vm->svm.gpusvm); +#endif } static inline @@ -326,19 +329,47 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t return NULL; } -#define xe_svm_assert_in_notifier(...) do {} while (0) +static inline void xe_svm_flush(struct xe_vm *vm) +{ +} #define xe_svm_range_has_dma_mapping(...) false +#endif /* CONFIG_DRM_XE_GPUSVM */ + +#if IS_ENABLED(CONFIG_DRM_GPUSVM) /* Need to support userptr without XE_GPUSVM */ +#define xe_svm_assert_in_notifier(vm__) \ + lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock) + +#define xe_svm_assert_held_read(vm__) \ + lockdep_assert_held_read(&(vm__)->svm.gpusvm.notifier_lock) + +#define xe_svm_notifier_lock(vm__) \ + drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm) + +#define xe_svm_notifier_lock_interruptible(vm__) \ + down_read_interruptible(&(vm__)->svm.gpusvm.notifier_lock) + +#define xe_svm_notifier_unlock(vm__) \ + drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm) + +#else +#define xe_svm_assert_in_notifier(...) do {} while (0) + +static inline void xe_svm_assert_held_read(struct xe_vm *vm) +{ +} static inline void xe_svm_notifier_lock(struct xe_vm *vm) { } -static inline void xe_svm_notifier_unlock(struct xe_vm *vm) +static inline int xe_svm_notifier_lock_interruptible(struct xe_vm *vm) { + return 0; } -static inline void xe_svm_flush(struct xe_vm *vm) +static inline void xe_svm_notifier_unlock(struct xe_vm *vm) { } -#endif +#endif /* CONFIG_DRM_GPUSVM */ + #endif diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.c b/drivers/gpu/drm/xe/xe_tile_debugfs.c new file mode 100644 index 000000000000..5523874cba7b --- /dev/null +++ b/drivers/gpu/drm/xe/xe_tile_debugfs.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/debugfs.h> +#include <drm/drm_debugfs.h> + +#include "xe_pm.h" +#include "xe_sa.h" +#include "xe_tile_debugfs.h" + +static struct xe_tile *node_to_tile(struct drm_info_node *node) +{ + return node->dent->d_parent->d_inode->i_private; +} + +/** + * tile_debugfs_simple_show - A show callback for struct drm_info_list + * @m: the &seq_file + * @data: data used by the drm debugfs helpers + * + * This callback can be used in struct drm_info_list to describe debugfs + * files that are &xe_tile specific. + * + * It is assumed that those debugfs files will be created on directory entry + * which struct dentry d_inode->i_private points to &xe_tile. + * + * /sys/kernel/debug/dri/0/ + * ├── tile0/ # tile = dentry->d_inode->i_private + * │ │ ├── id # tile = dentry->d_parent->d_inode->i_private + * + * This function assumes that &m->private will be set to the &struct + * drm_info_node corresponding to the instance of the info on a given &struct + * drm_minor (see struct drm_info_list.show for details). + * + * This function also assumes that struct drm_info_list.data will point to the + * function code that will actually print a file content:: + * + * int (*print)(struct xe_tile *, struct drm_printer *) + * + * Example:: + * + * int tile_id(struct xe_tile *tile, struct drm_printer *p) + * { + * drm_printf(p, "%u\n", tile->id); + * return 0; + * } + * + * static const struct drm_info_list info[] = { + * { name = "id", .show = tile_debugfs_simple_show, .data = tile_id }, + * }; + * + * dir = debugfs_create_dir("tile0", parent); + * dir->d_inode->i_private = tile; + * drm_debugfs_create_files(info, ARRAY_SIZE(info), dir, minor); + * + * Return: 0 on success or a negative error code on failure. + */ +static int tile_debugfs_simple_show(struct seq_file *m, void *data) +{ + struct drm_printer p = drm_seq_file_printer(m); + struct drm_info_node *node = m->private; + struct xe_tile *tile = node_to_tile(node); + int (*print)(struct xe_tile *, struct drm_printer *) = node->info_ent->data; + + return print(tile, &p); +} + +/** + * tile_debugfs_show_with_rpm - A show callback for struct drm_info_list + * @m: the &seq_file + * @data: data used by the drm debugfs helpers + * + * Similar to tile_debugfs_simple_show() but implicitly takes a RPM ref. + * + * Return: 0 on success or a negative error code on failure. + */ +static int tile_debugfs_show_with_rpm(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct xe_tile *tile = node_to_tile(node); + struct xe_device *xe = tile_to_xe(tile); + int ret; + + xe_pm_runtime_get(xe); + ret = tile_debugfs_simple_show(m, data); + xe_pm_runtime_put(xe); + + return ret; +} + +static int sa_info(struct xe_tile *tile, struct drm_printer *p) +{ + drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p, + xe_sa_manager_gpu_addr(tile->mem.kernel_bb_pool)); + + return 0; +} + +/* only for debugfs files which can be safely used on the VF */ +static const struct drm_info_list vf_safe_debugfs_list[] = { + { "sa_info", .show = tile_debugfs_show_with_rpm, .data = sa_info }, +}; + +/** + * xe_tile_debugfs_register - Register tile's debugfs attributes + * @tile: the &xe_tile to register + * + * Create debugfs sub-directory with a name that includes a tile ID and + * then creates set of debugfs files (attributes) specific to this tile. + */ +void xe_tile_debugfs_register(struct xe_tile *tile) +{ + struct xe_device *xe = tile_to_xe(tile); + struct drm_minor *minor = xe->drm.primary; + struct dentry *root = minor->debugfs_root; + char name[8]; + + snprintf(name, sizeof(name), "tile%u", tile->id); + tile->debugfs = debugfs_create_dir(name, root); + if (IS_ERR(tile->debugfs)) + return; + + /* + * Store the xe_tile pointer as private data of the tile/ directory + * node so other tile specific attributes under that directory may + * refer to it by looking at its parent node private data. + */ + tile->debugfs->d_inode->i_private = tile; + + drm_debugfs_create_files(vf_safe_debugfs_list, + ARRAY_SIZE(vf_safe_debugfs_list), + tile->debugfs, minor); +} diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.h b/drivers/gpu/drm/xe/xe_tile_debugfs.h new file mode 100644 index 000000000000..0e5f724de37f --- /dev/null +++ b/drivers/gpu/drm/xe/xe_tile_debugfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_TILE_DEBUGFS_H_ +#define _XE_TILE_DEBUGFS_H_ + +struct xe_tile; + +void xe_tile_debugfs_register(struct xe_tile *tile); + +#endif diff --git a/drivers/gpu/drm/xe/xe_tile_printk.h b/drivers/gpu/drm/xe/xe_tile_printk.h new file mode 100644 index 000000000000..63640a42685d --- /dev/null +++ b/drivers/gpu/drm/xe/xe_tile_printk.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _xe_tile_printk_H_ +#define _xe_tile_printk_H_ + +#include "xe_printk.h" + +#define __XE_TILE_PRINTK_FMT(_tile, _fmt, _args...) "Tile%u: " _fmt, (_tile)->id, ##_args + +#define xe_tile_printk(_tile, _level, _fmt, ...) \ + xe_printk((_tile)->xe, _level, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__)) + +#define xe_tile_err(_tile, _fmt, ...) \ + xe_tile_printk((_tile), err, _fmt, ##__VA_ARGS__) + +#define xe_tile_err_once(_tile, _fmt, ...) \ + xe_tile_printk((_tile), err_once, _fmt, ##__VA_ARGS__) + +#define xe_tile_err_ratelimited(_tile, _fmt, ...) \ + xe_tile_printk((_tile), err_ratelimited, _fmt, ##__VA_ARGS__) + +#define xe_tile_warn(_tile, _fmt, ...) \ + xe_tile_printk((_tile), warn, _fmt, ##__VA_ARGS__) + +#define xe_tile_notice(_tile, _fmt, ...) \ + xe_tile_printk((_tile), notice, _fmt, ##__VA_ARGS__) + +#define xe_tile_info(_tile, _fmt, ...) \ + xe_tile_printk((_tile), info, _fmt, ##__VA_ARGS__) + +#define xe_tile_dbg(_tile, _fmt, ...) \ + xe_tile_printk((_tile), dbg, _fmt, ##__VA_ARGS__) + +#define xe_tile_WARN_type(_tile, _type, _condition, _fmt, ...) \ + xe_WARN##_type((_tile)->xe, _condition, _fmt, ## __VA_ARGS__) + +#define xe_tile_WARN(_tile, _condition, _fmt, ...) \ + xe_tile_WARN_type((_tile),, _condition, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__)) + +#define xe_tile_WARN_ONCE(_tile, _condition, _fmt, ...) \ + xe_tile_WARN_type((_tile), _ONCE, _condition, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__)) + +#define xe_tile_WARN_ON(_tile, _condition) \ + xe_tile_WARN((_tile), _condition, "%s(%s)", "WARN_ON", __stringify(_condition)) + +#define xe_tile_WARN_ON_ONCE(_tile, _condition) \ + xe_tile_WARN_ONCE((_tile), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition)) + +static inline void __xe_tile_printfn_err(struct drm_printer *p, struct va_format *vaf) +{ + struct xe_tile *tile = p->arg; + + xe_tile_err(tile, "%pV", vaf); +} + +static inline void __xe_tile_printfn_info(struct drm_printer *p, struct va_format *vaf) +{ + struct xe_tile *tile = p->arg; + + xe_tile_info(tile, "%pV", vaf); +} + +static inline void __xe_tile_printfn_dbg(struct drm_printer *p, struct va_format *vaf) +{ + struct xe_tile *tile = p->arg; + struct drm_printer dbg; + + /* + * The original xe_tile_dbg() callsite annotations are useless here, + * redirect to the tweaked xe_dbg_printer() instead. + */ + dbg = xe_dbg_printer(tile->xe); + dbg.origin = p->origin; + + drm_printf(&dbg, __XE_TILE_PRINTK_FMT(tile, "%pV", vaf)); +} + +/** + * xe_tile_err_printer - Construct a &drm_printer that outputs to xe_tile_err() + * @tile: the &xe_tile pointer to use in xe_tile_err() + * + * Return: The &drm_printer object. + */ +static inline struct drm_printer xe_tile_err_printer(struct xe_tile *tile) +{ + struct drm_printer p = { + .printfn = __xe_tile_printfn_err, + .arg = tile, + }; + return p; +} + +/** + * xe_tile_info_printer - Construct a &drm_printer that outputs to xe_tile_info() + * @tile: the &xe_tile pointer to use in xe_tile_info() + * + * Return: The &drm_printer object. + */ +static inline struct drm_printer xe_tile_info_printer(struct xe_tile *tile) +{ + struct drm_printer p = { + .printfn = __xe_tile_printfn_info, + .arg = tile, + }; + return p; +} + +/** + * xe_tile_dbg_printer - Construct a &drm_printer that outputs like xe_tile_dbg() + * @tile: the &xe_tile pointer to use in xe_tile_dbg() + * + * Return: The &drm_printer object. + */ +static inline struct drm_printer xe_tile_dbg_printer(struct xe_tile *tile) +{ + struct drm_printer p = { + .printfn = __xe_tile_printfn_dbg, + .arg = tile, + .origin = (const void *)_THIS_IP_, + }; + return p; +} + +#endif diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c index b804234a6551..9e1236a9ec67 100644 --- a/drivers/gpu/drm/xe/xe_tile_sysfs.c +++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c @@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile) kt->tile = tile; err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id); - if (err) { - kobject_put(&kt->base); - return err; - } + if (err) + goto err_object; tile->sysfs = &kt->base; err = xe_vram_freq_sysfs_init(tile); if (err) - return err; + goto err_object; return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile); + +err_object: + kobject_put(&kt->base); + return err; } diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.c b/drivers/gpu/drm/xe/xe_tlb_inval.c index e6e97b5a7b5c..918a59e686ea 100644 --- a/drivers/gpu/drm/xe/xe_tlb_inval.c +++ b/drivers/gpu/drm/xe/xe_tlb_inval.c @@ -10,11 +10,10 @@ #include "xe_force_wake.h" #include "xe_gt.h" #include "xe_gt_printk.h" +#include "xe_gt_stats.h" #include "xe_guc.h" #include "xe_guc_ct.h" #include "xe_guc_tlb_inval.h" -#include "xe_gt_stats.h" -#include "xe_tlb_inval.h" #include "xe_mmio.h" #include "xe_pm.h" #include "xe_tlb_inval.h" diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 9bbdde604923..622b76078567 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -115,8 +115,8 @@ struct fw_blobs_by_type { #define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED #define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ - fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 47, 0)) \ - fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \ + fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 49, 4)) \ + fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 49, 4)) \ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \ fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 45, 2)) \ @@ -328,7 +328,7 @@ static void uc_fw_fini(struct drm_device *drm, void *arg) xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_SELECTED); } -static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css) +static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_guc_info *guc_info) { struct xe_gt *gt = uc_fw_to_gt(uc_fw); struct xe_uc_fw_version *release = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE]; @@ -343,11 +343,12 @@ static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css) return -EINVAL; } - compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->submission_version); - compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->submission_version); - compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->submission_version); + compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, guc_info->submission_version); + compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, guc_info->submission_version); + compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, guc_info->submission_version); - uc_fw->private_data_size = css->private_data_size; + uc_fw->build_type = FIELD_GET(CSS_UKERNEL_INFO_BUILDTYPE, guc_info->ukernel_info); + uc_fw->private_data_size = guc_info->private_data_size; return 0; } @@ -416,8 +417,8 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t css = (struct uc_css_header *)fw_data; /* Check integrity of size values inside CSS header */ - size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw - - css->exponent_size_dw) * sizeof(u32); + size = (css->header_size_dw - css->rsa_info.key_size_dw - css->rsa_info.modulus_size_dw - + css->rsa_info.exponent_size_dw) * sizeof(u32); if (unlikely(size != sizeof(struct uc_css_header))) { drm_warn(&xe->drm, "%s firmware %s: unexpected header size: %zu != %zu\n", @@ -430,7 +431,7 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); /* now RSA */ - uc_fw->rsa_size = css->key_size_dw * sizeof(u32); + uc_fw->rsa_size = css->rsa_info.key_size_dw * sizeof(u32); /* At least, it should have header, uCode and RSA. Size of all three. */ size = sizeof(struct uc_css_header) + uc_fw->ucode_size + @@ -443,12 +444,12 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t } /* Get version numbers from the CSS header */ - release->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->sw_version); - release->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->sw_version); - release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->sw_version); + release->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->guc_info.sw_version); + release->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->guc_info.sw_version); + release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->guc_info.sw_version); if (uc_fw->type == XE_UC_FW_TYPE_GUC) - return guc_read_css_info(uc_fw, css); + return guc_read_css_info(uc_fw, &css->guc_info); return 0; } diff --git a/drivers/gpu/drm/xe/xe_uc_fw_abi.h b/drivers/gpu/drm/xe/xe_uc_fw_abi.h index 87ade41209d0..3c9a63d13032 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw_abi.h +++ b/drivers/gpu/drm/xe/xe_uc_fw_abi.h @@ -44,6 +44,39 @@ * in fw. So driver will load a truncated firmware in this case. */ +struct uc_css_rsa_info { + u32 key_size_dw; + u32 modulus_size_dw; + u32 exponent_size_dw; +} __packed; + +struct uc_css_guc_info { + u32 time; +#define CSS_TIME_HOUR (0xFF << 0) +#define CSS_TIME_MIN (0xFF << 8) +#define CSS_TIME_SEC (0xFFFF << 16) + u32 reserved0[5]; + u32 sw_version; +#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16) +#define CSS_SW_VERSION_UC_MINOR (0xFF << 8) +#define CSS_SW_VERSION_UC_PATCH (0xFF << 0) + u32 submission_version; + u32 reserved1[11]; + u32 header_info; +#define CSS_HEADER_INFO_SVN (0xFF) +#define CSS_HEADER_INFO_COPY_VALID (0x1 << 31) + u32 private_data_size; + u32 ukernel_info; +#define CSS_UKERNEL_INFO_DEVICEID (0xFFFF << 16) +#define CSS_UKERNEL_INFO_PRODKEY (0xFF << 8) +#define CSS_UKERNEL_INFO_BUILDTYPE (0x3 << 2) +#define CSS_UKERNEL_INFO_BUILDTYPE_PROD 0 +#define CSS_UKERNEL_INFO_BUILDTYPE_PREPROD 1 +#define CSS_UKERNEL_INFO_BUILDTYPE_DEBUG 2 +#define CSS_UKERNEL_INFO_ENCSTATUS (0x1 << 1) +#define CSS_UKERNEL_INFO_COPY_VALID (0x1 << 0) +} __packed; + struct uc_css_header { u32 module_type; /* @@ -52,36 +85,21 @@ struct uc_css_header { */ u32 header_size_dw; u32 header_version; - u32 module_id; + u32 reserved0; u32 module_vendor; u32 date; -#define CSS_DATE_DAY (0xFF << 0) -#define CSS_DATE_MONTH (0xFF << 8) -#define CSS_DATE_YEAR (0xFFFF << 16) +#define CSS_DATE_DAY (0xFF << 0) +#define CSS_DATE_MONTH (0xFF << 8) +#define CSS_DATE_YEAR (0xFFFF << 16) u32 size_dw; /* uCode plus header_size_dw */ - u32 key_size_dw; - u32 modulus_size_dw; - u32 exponent_size_dw; - u32 time; -#define CSS_TIME_HOUR (0xFF << 0) -#define CSS_DATE_MIN (0xFF << 8) -#define CSS_DATE_SEC (0xFFFF << 16) - char username[8]; - char buildnumber[12]; - u32 sw_version; -#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16) -#define CSS_SW_VERSION_UC_MINOR (0xFF << 8) -#define CSS_SW_VERSION_UC_PATCH (0xFF << 0) union { - u32 submission_version; /* only applies to GuC */ - u32 reserved2; + u32 reserved1[3]; + struct uc_css_rsa_info rsa_info; }; - u32 reserved0[12]; union { - u32 private_data_size; /* only applies to GuC */ - u32 reserved1; + u32 reserved2[22]; + struct uc_css_guc_info guc_info; }; - u32 header_info; } __packed; static_assert(sizeof(struct uc_css_header) == 128); @@ -318,4 +336,70 @@ struct gsc_manifest_header { u32 exponent_size; /* in dwords */ } __packed; +/** + * DOC: Late binding Firmware Layout + * + * The Late binding binary starts with FPT header, which contains locations + * of various partitions of the binary. Here we're interested in finding out + * manifest version. To the manifest version, we need to locate CPD header + * one of the entry in CPD header points to manifest header. Manifest header + * contains the version. + * + * +================================================+ + * | FPT Header | + * +================================================+ + * | FPT entries[] | + * | entry1 | + * | ... | + * | entryX | + * | "LTES" | + * | ... | + * | offset >-----------------------------|------o + * +================================================+ | + * | + * +================================================+ | + * | CPD Header |<-----o + * +================================================+ + * | CPD entries[] | + * | entry1 | + * | ... | + * | entryX | + * | "LTES.man" | + * | ... | + * | offset >----------------------------|------o + * +================================================+ | + * | + * +================================================+ | + * | Manifest Header |<-----o + * | ... | + * | FW version | + * | ... | + * +================================================+ + */ + +/* FPT Headers */ +struct csc_fpt_header { + u32 header_marker; +#define CSC_FPT_HEADER_MARKER 0x54504624 + u32 num_of_entries; + u8 header_version; + u8 entry_version; + u8 header_length; /* in bytes */ + u8 flags; + u16 ticks_to_add; + u16 tokens_to_add; + u32 uma_size; + u32 crc32; + struct gsc_version fitc_version; +} __packed; + +struct csc_fpt_entry { + u8 name[4]; /* partition name */ + u32 reserved1; + u32 offset; /* offset from beginning of CSE region */ + u32 length; /* partition length in bytes */ + u32 reserved2[3]; + u32 partition_flags; +} __packed; + #endif diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h index 914026015019..77a1dcf8b4ed 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw_types.h +++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h @@ -147,6 +147,9 @@ struct xe_uc_fw { /** @private_data_size: size of private data found in uC css header */ u32 private_data_size; + + /** @build_type: Firmware build type (see CSS_UKERNEL_INFO_BUILDTYPE for definitions) */ + u32 build_type; }; #endif diff --git a/drivers/gpu/drm/xe/xe_userptr.c b/drivers/gpu/drm/xe/xe_userptr.c new file mode 100644 index 000000000000..91d09af71ced --- /dev/null +++ b/drivers/gpu/drm/xe/xe_userptr.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include "xe_userptr.h" + +#include <linux/mm.h> + +#include "xe_trace_bo.h" + +/** + * xe_vma_userptr_check_repin() - Advisory check for repin needed + * @uvma: The userptr vma + * + * Check if the userptr vma has been invalidated since last successful + * repin. The check is advisory only and can the function can be called + * without the vm->svm.gpusvm.notifier_lock held. There is no guarantee that the + * vma userptr will remain valid after a lockless check, so typically + * the call needs to be followed by a proper check under the notifier_lock. + * + * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended. + */ +int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) +{ + return mmu_interval_check_retry(&uvma->userptr.notifier, + uvma->userptr.pages.notifier_seq) ? + -EAGAIN : 0; +} + +/** + * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs + * that need repinning. + * @vm: The VM. + * + * This function checks for whether the VM has userptrs that need repinning, + * and provides a release-type barrier on the svm.gpusvm.notifier_lock after + * checking. + * + * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are. + */ +int __xe_vm_userptr_needs_repin(struct xe_vm *vm) +{ + lockdep_assert_held_read(&vm->svm.gpusvm.notifier_lock); + + return (list_empty(&vm->userptr.repin_list) && + list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; +} + +int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) +{ + struct xe_vma *vma = &uvma->vma; + struct xe_vm *vm = xe_vma_vm(vma); + struct xe_device *xe = vm->xe; + struct drm_gpusvm_ctx ctx = { + .read_only = xe_vma_read_only(vma), + }; + + lockdep_assert_held(&vm->lock); + xe_assert(xe, xe_vma_is_userptr(vma)); + + if (vma->gpuva.flags & XE_VMA_DESTROYED) + return 0; + + return drm_gpusvm_get_pages(&vm->svm.gpusvm, &uvma->userptr.pages, + uvma->userptr.notifier.mm, + &uvma->userptr.notifier, + xe_vma_userptr(vma), + xe_vma_userptr(vma) + xe_vma_size(vma), + &ctx); +} + +static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma) +{ + struct xe_userptr *userptr = &uvma->userptr; + struct xe_vma *vma = &uvma->vma; + struct dma_resv_iter cursor; + struct dma_fence *fence; + struct drm_gpusvm_ctx ctx = { + .in_notifier = true, + .read_only = xe_vma_read_only(vma), + }; + long err; + + /* + * Tell exec and rebind worker they need to repin and rebind this + * userptr. + */ + if (!xe_vm_in_fault_mode(vm) && + !(vma->gpuva.flags & XE_VMA_DESTROYED)) { + spin_lock(&vm->userptr.invalidated_lock); + list_move_tail(&userptr->invalidate_link, + &vm->userptr.invalidated); + spin_unlock(&vm->userptr.invalidated_lock); + } + + /* + * Preempt fences turn into schedule disables, pipeline these. + * Note that even in fault mode, we need to wait for binds and + * unbinds to complete, and those are attached as BOOKMARK fences + * to the vm. + */ + dma_resv_iter_begin(&cursor, xe_vm_resv(vm), + DMA_RESV_USAGE_BOOKKEEP); + dma_resv_for_each_fence_unlocked(&cursor, fence) + dma_fence_enable_sw_signaling(fence); + dma_resv_iter_end(&cursor); + + err = dma_resv_wait_timeout(xe_vm_resv(vm), + DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); + XE_WARN_ON(err <= 0); + + if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) { + err = xe_vm_invalidate_vma(vma); + XE_WARN_ON(err); + } + + drm_gpusvm_unmap_pages(&vm->svm.gpusvm, &uvma->userptr.pages, + xe_vma_size(vma) >> PAGE_SHIFT, &ctx); +} + +static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier); + struct xe_vma *vma = &uvma->vma; + struct xe_vm *vm = xe_vma_vm(vma); + + xe_assert(vm->xe, xe_vma_is_userptr(vma)); + trace_xe_vma_userptr_invalidate(vma); + + if (!mmu_notifier_range_blockable(range)) + return false; + + vm_dbg(&xe_vma_vm(vma)->xe->drm, + "NOTIFIER: addr=0x%016llx, range=0x%016llx", + xe_vma_start(vma), xe_vma_size(vma)); + + down_write(&vm->svm.gpusvm.notifier_lock); + mmu_interval_set_seq(mni, cur_seq); + + __vma_userptr_invalidate(vm, uvma); + up_write(&vm->svm.gpusvm.notifier_lock); + trace_xe_vma_userptr_invalidate_complete(vma); + + return true; +} + +static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = { + .invalidate = vma_userptr_invalidate, +}; + +#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) +/** + * xe_vma_userptr_force_invalidate() - force invalidate a userptr + * @uvma: The userptr vma to invalidate + * + * Perform a forced userptr invalidation for testing purposes. + */ +void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma) +{ + struct xe_vm *vm = xe_vma_vm(&uvma->vma); + + /* Protect against concurrent userptr pinning */ + lockdep_assert_held(&vm->lock); + /* Protect against concurrent notifiers */ + lockdep_assert_held(&vm->svm.gpusvm.notifier_lock); + /* + * Protect against concurrent instances of this function and + * the critical exec sections + */ + xe_vm_assert_held(vm); + + if (!mmu_interval_read_retry(&uvma->userptr.notifier, + uvma->userptr.pages.notifier_seq)) + uvma->userptr.pages.notifier_seq -= 2; + __vma_userptr_invalidate(vm, uvma); +} +#endif + +int xe_vm_userptr_pin(struct xe_vm *vm) +{ + struct xe_userptr_vma *uvma, *next; + int err = 0; + + xe_assert(vm->xe, !xe_vm_in_fault_mode(vm)); + lockdep_assert_held_write(&vm->lock); + + /* Collect invalidated userptrs */ + spin_lock(&vm->userptr.invalidated_lock); + xe_assert(vm->xe, list_empty(&vm->userptr.repin_list)); + list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, + userptr.invalidate_link) { + list_del_init(&uvma->userptr.invalidate_link); + list_add_tail(&uvma->userptr.repin_link, + &vm->userptr.repin_list); + } + spin_unlock(&vm->userptr.invalidated_lock); + + /* Pin and move to bind list */ + list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, + userptr.repin_link) { + err = xe_vma_userptr_pin_pages(uvma); + if (err == -EFAULT) { + list_del_init(&uvma->userptr.repin_link); + /* + * We might have already done the pin once already, but + * then had to retry before the re-bind happened, due + * some other condition in the caller, but in the + * meantime the userptr got dinged by the notifier such + * that we need to revalidate here, but this time we hit + * the EFAULT. In such a case make sure we remove + * ourselves from the rebind list to avoid going down in + * flames. + */ + if (!list_empty(&uvma->vma.combined_links.rebind)) + list_del_init(&uvma->vma.combined_links.rebind); + + /* Wait for pending binds */ + xe_vm_lock(vm, false); + dma_resv_wait_timeout(xe_vm_resv(vm), + DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); + + down_read(&vm->svm.gpusvm.notifier_lock); + err = xe_vm_invalidate_vma(&uvma->vma); + up_read(&vm->svm.gpusvm.notifier_lock); + xe_vm_unlock(vm); + if (err) + break; + } else { + if (err) + break; + + list_del_init(&uvma->userptr.repin_link); + list_move_tail(&uvma->vma.combined_links.rebind, + &vm->rebind_list); + } + } + + if (err) { + down_write(&vm->svm.gpusvm.notifier_lock); + spin_lock(&vm->userptr.invalidated_lock); + list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, + userptr.repin_link) { + list_del_init(&uvma->userptr.repin_link); + list_move_tail(&uvma->userptr.invalidate_link, + &vm->userptr.invalidated); + } + spin_unlock(&vm->userptr.invalidated_lock); + up_write(&vm->svm.gpusvm.notifier_lock); + } + return err; +} + +/** + * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs + * that need repinning. + * @vm: The VM. + * + * This function does an advisory check for whether the VM has userptrs that + * need repinning. + * + * Return: 0 if there are no indications of userptrs needing repinning, + * -EAGAIN if there are. + */ +int xe_vm_userptr_check_repin(struct xe_vm *vm) +{ + return (list_empty_careful(&vm->userptr.repin_list) && + list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; +} + +int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start, + unsigned long range) +{ + struct xe_userptr *userptr = &uvma->userptr; + int err; + + INIT_LIST_HEAD(&userptr->invalidate_link); + INIT_LIST_HEAD(&userptr->repin_link); + + err = mmu_interval_notifier_insert(&userptr->notifier, current->mm, + start, range, + &vma_userptr_notifier_ops); + if (err) + return err; + + userptr->pages.notifier_seq = LONG_MAX; + + return 0; +} + +void xe_userptr_remove(struct xe_userptr_vma *uvma) +{ + struct xe_vm *vm = xe_vma_vm(&uvma->vma); + struct xe_userptr *userptr = &uvma->userptr; + + drm_gpusvm_free_pages(&vm->svm.gpusvm, &uvma->userptr.pages, + xe_vma_size(&uvma->vma) >> PAGE_SHIFT); + + /* + * Since userptr pages are not pinned, we can't remove + * the notifier until we're sure the GPU is not accessing + * them anymore + */ + mmu_interval_notifier_remove(&userptr->notifier); +} + +void xe_userptr_destroy(struct xe_userptr_vma *uvma) +{ + struct xe_vm *vm = xe_vma_vm(&uvma->vma); + + spin_lock(&vm->userptr.invalidated_lock); + xe_assert(vm->xe, list_empty(&uvma->userptr.repin_link)); + list_del(&uvma->userptr.invalidate_link); + spin_unlock(&vm->userptr.invalidated_lock); +} diff --git a/drivers/gpu/drm/xe/xe_userptr.h b/drivers/gpu/drm/xe/xe_userptr.h new file mode 100644 index 000000000000..ef801234991e --- /dev/null +++ b/drivers/gpu/drm/xe/xe_userptr.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_USERPTR_H_ +#define _XE_USERPTR_H_ + +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/scatterlist.h> +#include <linux/spinlock.h> + +#include <drm/drm_gpusvm.h> + +struct xe_vm; +struct xe_vma; +struct xe_userptr_vma; + +/** struct xe_userptr_vm - User pointer VM level state */ +struct xe_userptr_vm { + /** + * @userptr.repin_list: list of VMAs which are user pointers, + * and needs repinning. Protected by @lock. + */ + struct list_head repin_list; + /** + * @userptr.invalidated_lock: Protects the + * @userptr.invalidated list. + */ + spinlock_t invalidated_lock; + /** + * @userptr.invalidated: List of invalidated userptrs, not yet + * picked + * up for revalidation. Protected from access with the + * @invalidated_lock. Removing items from the list + * additionally requires @lock in write mode, and adding + * items to the list requires either the @svm.gpusvm.notifier_lock in + * write mode, OR @lock in write mode. + */ + struct list_head invalidated; +}; + +/** struct xe_userptr - User pointer */ +struct xe_userptr { + /** @invalidate_link: Link for the vm::userptr.invalidated list */ + struct list_head invalidate_link; + /** @userptr: link into VM repin list if userptr. */ + struct list_head repin_link; + /** + * @pages: gpusvm pages for this user pointer. + */ + struct drm_gpusvm_pages pages; + /** + * @notifier: MMU notifier for user pointer (invalidation call back) + */ + struct mmu_interval_notifier notifier; + + /** + * @initial_bind: user pointer has been bound at least once. + * write: vm->svm.gpusvm.notifier_lock in read mode and vm->resv held. + * read: vm->svm.gpusvm.notifier_lock in write mode or vm->resv held. + */ + bool initial_bind; +#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) + u32 divisor; +#endif +}; + +#if IS_ENABLED(CONFIG_DRM_GPUSVM) +void xe_userptr_remove(struct xe_userptr_vma *uvma); +int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start, + unsigned long range); +void xe_userptr_destroy(struct xe_userptr_vma *uvma); + +int xe_vm_userptr_pin(struct xe_vm *vm); +int __xe_vm_userptr_needs_repin(struct xe_vm *vm); +int xe_vm_userptr_check_repin(struct xe_vm *vm); +int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma); +int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma); +#else +static inline void xe_userptr_remove(struct xe_userptr_vma *uvma) {} + +static inline int xe_userptr_setup(struct xe_userptr_vma *uvma, + unsigned long start, unsigned long range) +{ + return -ENODEV; +} + +static inline void xe_userptr_destroy(struct xe_userptr_vma *uvma) {} + +static inline int xe_vm_userptr_pin(struct xe_vm *vm) { return 0; } +static inline int __xe_vm_userptr_needs_repin(struct xe_vm *vm) { return 0; } +static inline int xe_vm_userptr_check_repin(struct xe_vm *vm) { return 0; } +static inline int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) { return -ENODEV; } +static inline int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) { return -ENODEV; }; +#endif + +#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) +void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma); +#else +static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma) +{ +} +#endif +#endif diff --git a/drivers/gpu/drm/xe/xe_validation.c b/drivers/gpu/drm/xe/xe_validation.c new file mode 100644 index 000000000000..826cd09966ef --- /dev/null +++ b/drivers/gpu/drm/xe/xe_validation.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ +#include "xe_bo.h" +#include <drm/drm_exec.h> +#include <drm/drm_gem.h> +#include <drm/drm_gpuvm.h> + +#include "xe_assert.h" +#include "xe_validation.h" + +#ifdef CONFIG_DRM_XE_DEBUG +/** + * xe_validation_assert_exec() - Assert that the drm_exec pointer is suitable + * for validation. + * @xe: Pointer to the xe device. + * @exec: The drm_exec pointer to check. + * @obj: Pointer to the object subject to validation. + * + * NULL exec pointers are not allowed. + * For XE_VALIDATION_UNIMPLEMENTED, no checking. + * For XE_VLIDATION_OPT_OUT, check that the caller is a kunit test + * For XE_VALIDATION_UNSUPPORTED, check that the object subject to + * validation is a dma-buf, for which support for ww locking is + * not in place in the dma-buf layer. + */ +void xe_validation_assert_exec(const struct xe_device *xe, + const struct drm_exec *exec, + const struct drm_gem_object *obj) +{ + xe_assert(xe, exec); + if (IS_ERR(exec)) { + switch (PTR_ERR(exec)) { + case __XE_VAL_UNIMPLEMENTED: + break; + case __XE_VAL_UNSUPPORTED: + xe_assert(xe, !!obj->dma_buf); + break; +#if IS_ENABLED(CONFIG_KUNIT) + case __XE_VAL_OPT_OUT: + xe_assert(xe, current->kunit_test); + break; +#endif + default: + xe_assert(xe, false); + } + } +} +#endif + +static int xe_validation_lock(struct xe_validation_ctx *ctx) +{ + struct xe_validation_device *val = ctx->val; + int ret = 0; + + if (ctx->val_flags.interruptible) { + if (ctx->request_exclusive) + ret = down_write_killable(&val->lock); + else + ret = down_read_interruptible(&val->lock); + } else { + if (ctx->request_exclusive) + down_write(&val->lock); + else + down_read(&val->lock); + } + + if (!ret) { + ctx->lock_held = true; + ctx->lock_held_exclusive = ctx->request_exclusive; + } + + return ret; +} + +static int xe_validation_trylock(struct xe_validation_ctx *ctx) +{ + struct xe_validation_device *val = ctx->val; + bool locked; + + if (ctx->request_exclusive) + locked = down_write_trylock(&val->lock); + else + locked = down_read_trylock(&val->lock); + + if (locked) { + ctx->lock_held = true; + ctx->lock_held_exclusive = ctx->request_exclusive; + } + + return locked ? 0 : -EWOULDBLOCK; +} + +static void xe_validation_unlock(struct xe_validation_ctx *ctx) +{ + if (!ctx->lock_held) + return; + + if (ctx->lock_held_exclusive) + up_write(&ctx->val->lock); + else + up_read(&ctx->val->lock); + + ctx->lock_held = false; +} + +/** + * xe_validation_ctx_init() - Initialize an xe_validation_ctx + * @ctx: The xe_validation_ctx to initialize. + * @val: The xe_validation_device representing the validation domain. + * @exec: The struct drm_exec to use for the transaction. May be NULL. + * @flags: The flags to use for initialization. + * + * Initialize and lock a an xe_validation transaction using the validation domain + * represented by @val. Also initialize the drm_exec object forwarding parts of + * @flags to the drm_exec initialization. The @flags.exclusive flag should + * typically be set to false to avoid locking out other validators from the + * domain until an OOM is hit. For testing- or final attempt purposes it can, + * however, be set to true. + * + * Return: %0 on success, %-EINTR if interruptible initial locking failed with a + * signal pending. If @flags.no_block is set to true, a failed trylock + * returns %-EWOULDBLOCK. + */ +int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val, + struct drm_exec *exec, const struct xe_val_flags flags) +{ + int ret; + + ctx->exec = exec; + ctx->val = val; + ctx->lock_held = false; + ctx->lock_held_exclusive = false; + ctx->request_exclusive = flags.exclusive; + ctx->val_flags = flags; + ctx->exec_flags = 0; + ctx->nr = 0; + + if (flags.no_block) + ret = xe_validation_trylock(ctx); + else + ret = xe_validation_lock(ctx); + if (ret) + return ret; + + if (exec) { + if (flags.interruptible) + ctx->exec_flags |= DRM_EXEC_INTERRUPTIBLE_WAIT; + if (flags.exec_ignore_duplicates) + ctx->exec_flags |= DRM_EXEC_IGNORE_DUPLICATES; + drm_exec_init(exec, ctx->exec_flags, ctx->nr); + } + + return 0; +} + +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH +/* + * This abuses both drm_exec and ww_mutex internals and should be + * replaced by checking for -EDEADLK when we can make TTM + * stop converting -EDEADLK to -ENOMEM. + * An alternative is to not have exhaustive eviction with + * CONFIG_DEBUG_WW_MUTEX_SLOWPATH until that happens. + */ +static bool xe_validation_contention_injected(struct drm_exec *exec) +{ + return !!exec->ticket.contending_lock; +} + +#else + +static bool xe_validation_contention_injected(struct drm_exec *exec) +{ + return false; +} + +#endif + +static bool __xe_validation_should_retry(struct xe_validation_ctx *ctx, int ret) +{ + if (ret == -ENOMEM && + ((ctx->request_exclusive && + xe_validation_contention_injected(ctx->exec)) || + !ctx->request_exclusive)) { + ctx->request_exclusive = true; + return true; + } + + return false; +} + +/** + * xe_validation_exec_lock() - Perform drm_gpuvm_exec_lock within a validation + * transaction. + * @ctx: An uninitialized xe_validation_ctx. + * @vm_exec: An initialized struct vm_exec. + * @val: The validation domain. + * + * The drm_gpuvm_exec_lock() function internally initializes its drm_exec + * transaction and therefore doesn't lend itself very well to be using + * xe_validation_ctx_init(). Provide a helper that takes an uninitialized + * xe_validation_ctx and calls drm_gpuvm_exec_lock() with OOM retry. + * + * Return: %0 on success, negative error code on failure. + */ +int xe_validation_exec_lock(struct xe_validation_ctx *ctx, + struct drm_gpuvm_exec *vm_exec, + struct xe_validation_device *val) +{ + int ret; + + memset(ctx, 0, sizeof(*ctx)); + ctx->exec = &vm_exec->exec; + ctx->exec_flags = vm_exec->flags; + ctx->val = val; + if (ctx->exec_flags & DRM_EXEC_INTERRUPTIBLE_WAIT) + ctx->val_flags.interruptible = 1; + if (ctx->exec_flags & DRM_EXEC_IGNORE_DUPLICATES) + ctx->val_flags.exec_ignore_duplicates = 1; +retry: + ret = xe_validation_lock(ctx); + if (ret) + return ret; + + ret = drm_gpuvm_exec_lock(vm_exec); + if (ret) { + xe_validation_unlock(ctx); + if (__xe_validation_should_retry(ctx, ret)) + goto retry; + } + + return ret; +} + +/** + * xe_validation_ctx_fini() - Finalize a validation transaction + * @ctx: The Validation transaction to finalize. + * + * Finalize a validation transaction and its related drm_exec transaction. + */ +void xe_validation_ctx_fini(struct xe_validation_ctx *ctx) +{ + if (ctx->exec) + drm_exec_fini(ctx->exec); + xe_validation_unlock(ctx); +} + +/** + * xe_validation_should_retry() - Determine if a validation transaction should retry + * @ctx: The validation transaction. + * @ret: Pointer to a return value variable. + * + * Determines whether a validation transaction should retry based on the + * internal transaction state and the return value pointed to by @ret. + * If a validation should be retried, the transaction is prepared for that, + * and the validation locked might be re-locked in exclusive mode, and *@ret + * is set to %0. If the re-locking errors, typically due to interruptible + * locking with signal pending, *@ret is instead set to -EINTR and the + * function returns %false. + * + * Return: %true if validation should be retried, %false otherwise. + */ +bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret) +{ + if (__xe_validation_should_retry(ctx, *ret)) { + drm_exec_fini(ctx->exec); + *ret = 0; + if (ctx->request_exclusive != ctx->lock_held_exclusive) { + xe_validation_unlock(ctx); + *ret = xe_validation_lock(ctx); + } + drm_exec_init(ctx->exec, ctx->exec_flags, ctx->nr); + return !*ret; + } + + return false; +} diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h new file mode 100644 index 000000000000..fec331d791e7 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_validation.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ +#ifndef _XE_VALIDATION_H_ +#define _XE_VALIDATION_H_ + +#include <linux/dma-resv.h> +#include <linux/types.h> +#include <linux/rwsem.h> + +struct drm_exec; +struct drm_gem_object; +struct drm_gpuvm_exec; +struct xe_device; + +#ifdef CONFIG_PROVE_LOCKING +/** + * xe_validation_lockdep() - Assert that a drm_exec locking transaction can + * be initialized at this point. + */ +static inline void xe_validation_lockdep(void) +{ + struct ww_acquire_ctx ticket; + + ww_acquire_init(&ticket, &reservation_ww_class); + ww_acquire_fini(&ticket); +} +#else +static inline void xe_validation_lockdep(void) +{ +} +#endif + +/* + * Various values of the drm_exec pointer where we've not (yet) + * implemented full ww locking. + * + * XE_VALIDATION_UNIMPLEMENTED means implementation is pending. + * A lockdep check is made to assure that a drm_exec locking + * transaction can actually take place where the macro is + * used. If this asserts, the exec pointer needs to be assigned + * higher up in the callchain and passed down. + * + * XE_VALIDATION_UNSUPPORTED is for dma-buf code only where + * the dma-buf layer doesn't support WW locking. + * + * XE_VALIDATION_OPT_OUT is for simplification of kunit tests where + * exhaustive eviction isn't necessary. + */ +#define __XE_VAL_UNIMPLEMENTED -EINVAL +#define XE_VALIDATION_UNIMPLEMENTED (xe_validation_lockdep(), \ + (struct drm_exec *)ERR_PTR(__XE_VAL_UNIMPLEMENTED)) + +#define __XE_VAL_UNSUPPORTED -EOPNOTSUPP +#define XE_VALIDATION_UNSUPPORTED ((struct drm_exec *)ERR_PTR(__XE_VAL_UNSUPPORTED)) + +#define __XE_VAL_OPT_OUT -ENOMEM +#define XE_VALIDATION_OPT_OUT (xe_validation_lockdep(), \ + (struct drm_exec *)ERR_PTR(__XE_VAL_OPT_OUT)) +#ifdef CONFIG_DRM_XE_DEBUG +void xe_validation_assert_exec(const struct xe_device *xe, const struct drm_exec *exec, + const struct drm_gem_object *obj); +#else +#define xe_validation_assert_exec(_xe, _exec, _obj) \ + do { \ + (void)_xe; (void)_exec; (void)_obj; \ + } while (0) +#endif + +/** + * struct xe_validation_device - The domain for exhaustive eviction + * @lock: The lock used to exclude other processes from allocating graphics memory + * + * The struct xe_validation_device represents the domain for which we want to use + * exhaustive eviction. The @lock is typically grabbed in read mode for allocations + * but when graphics memory allocation fails, it is retried with the write mode held. + */ +struct xe_validation_device { + struct rw_semaphore lock; +}; + +/** + * struct xe_val_flags - Flags for xe_validation_ctx_init(). + * @exclusive: Start the validation transaction by locking out all other validators. + * @no_block: Don't block on initialization. + * @interruptible: Block interruptible if blocking. Implies initializing the drm_exec + * context with the DRM_EXEC_INTERRUPTIBLE_WAIT flag. + * @exec_ignore_duplicates: Initialize the drm_exec context with the + * DRM_EXEC_IGNORE_DUPLICATES flag. + */ +struct xe_val_flags { + u32 exclusive :1; + u32 no_block :1; + u32 interruptible :1; + u32 exec_ignore_duplicates :1; +}; + +/** + * struct xe_validation_ctx - A struct drm_exec subclass with support for + * exhaustive eviction + * @exec: The drm_exec object base class. Note that we use a pointer instead of + * embedding to avoid diamond inheritance. + * @val: The exhaustive eviction domain. + * @val_flags: Copy of the struct xe_val_flags passed to xe_validation_ctx_init. + * @lock_held: Whether The domain lock is currently held. + * @lock_held_exclusive: Whether the domain lock is held in exclusive mode. + * @request_exclusive: Whether to lock exclusively (write mode) the next time + * the domain lock is locked. + * @exec_flags: The drm_exec flags used for drm_exec (re-)initialization. + * @nr: The drm_exec nr parameter used for drm_exec (re-)initializaiton. + */ +struct xe_validation_ctx { + struct drm_exec *exec; + struct xe_validation_device *val; + struct xe_val_flags val_flags; + bool lock_held; + bool lock_held_exclusive; + bool request_exclusive; + u32 exec_flags; + unsigned int nr; +}; + +int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val, + struct drm_exec *exec, const struct xe_val_flags flags); + +int xe_validation_exec_lock(struct xe_validation_ctx *ctx, struct drm_gpuvm_exec *vm_exec, + struct xe_validation_device *val); + +void xe_validation_ctx_fini(struct xe_validation_ctx *ctx); + +bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret); + +/** + * xe_validation_retry_on_oom() - Retry on oom in an xe_validaton transaction + * @_ctx: Pointer to the xe_validation_ctx + * @_ret: The current error value possibly holding -ENOMEM + * + * Use this in way similar to drm_exec_retry_on_contention(). + * If @_ret contains -ENOMEM the tranaction is restarted once in a way that + * blocks other transactions and allows exhastive eviction. If the transaction + * was already restarted once, Just return the -ENOMEM. May also set + * _ret to -EINTR if not retrying and waits are interruptible. + * May only be used within a drm_exec_until_all_locked() loop. + */ +#define xe_validation_retry_on_oom(_ctx, _ret) \ + do { \ + if (xe_validation_should_retry(_ctx, _ret)) \ + goto *__drm_exec_retry_ptr; \ + } while (0) + +/** + * xe_validation_device_init - Initialize a struct xe_validation_device + * @val: The xe_validation_device to init. + */ +static inline void +xe_validation_device_init(struct xe_validation_device *val) +{ + init_rwsem(&val->lock); +} + +/* + * Make guard() and scoped_guard() work with xe_validation_ctx + * so that we can exit transactions without caring about the + * cleanup. + */ +DEFINE_CLASS(xe_validation, struct xe_validation_ctx *, + if (_T) xe_validation_ctx_fini(_T);, + ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); + _ret ? NULL : _ctx; }), + struct xe_validation_ctx *_ctx, struct xe_validation_device *_val, + struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret); +static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T) +{return *_T; } +#define class_xe_validation_is_conditional true + +/** + * xe_validation_guard() - An auto-cleanup xe_validation_ctx transaction + * @_ctx: The xe_validation_ctx. + * @_val: The xe_validation_device. + * @_exec: The struct drm_exec object + * @_flags: Flags for the xe_validation_ctx initialization. + * @_ret: Return in / out parameter. May be set by this macro. Typicall 0 when called. + * + * This macro is will initiate a drm_exec transaction with additional support for + * exhaustive eviction. + */ +#define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \ + scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \ + drm_exec_until_all_locked(_exec) + +#endif diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index c00a5ff31817..0cacab20ff85 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -41,7 +41,6 @@ #include "xe_tlb_inval.h" #include "xe_trace_bo.h" #include "xe_wa.h" -#include "xe_hmm.h" static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) { @@ -49,34 +48,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) } /** - * xe_vma_userptr_check_repin() - Advisory check for repin needed - * @uvma: The userptr vma + * xe_vm_drm_exec_lock() - Lock the vm's resv with a drm_exec transaction + * @vm: The vm whose resv is to be locked. + * @exec: The drm_exec transaction. * - * Check if the userptr vma has been invalidated since last successful - * repin. The check is advisory only and can the function can be called - * without the vm->userptr.notifier_lock held. There is no guarantee that the - * vma userptr will remain valid after a lockless check, so typically - * the call needs to be followed by a proper check under the notifier_lock. + * Helper to lock the vm's resv as part of a drm_exec transaction. * - * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended. + * Return: %0 on success. See drm_exec_lock_obj() for error codes. */ -int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) +int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec) { - return mmu_interval_check_retry(&uvma->userptr.notifier, - uvma->userptr.notifier_seq) ? - -EAGAIN : 0; -} - -int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) -{ - struct xe_vma *vma = &uvma->vma; - struct xe_vm *vm = xe_vma_vm(vma); - struct xe_device *xe = vm->xe; - - lockdep_assert_held(&vm->lock); - xe_assert(xe, xe_vma_is_userptr(vma)); - - return xe_hmm_userptr_populate_range(uvma, false); + return drm_exec_lock_obj(exec, xe_vm_obj(vm)); } static bool preempt_fences_waiting(struct xe_vm *vm) @@ -228,6 +210,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) .num_fences = 1, }; struct drm_exec *exec = &vm_exec.exec; + struct xe_validation_ctx ctx; struct dma_fence *pfence; int err; bool wait; @@ -235,14 +218,14 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); down_write(&vm->lock); - err = drm_gpuvm_exec_lock(&vm_exec); + err = xe_validation_exec_lock(&ctx, &vm_exec, &vm->xe->val); if (err) goto out_up_write; pfence = xe_preempt_fence_create(q, q->lr.context, ++q->lr.seqno); - if (!pfence) { - err = -ENOMEM; + if (IS_ERR(pfence)) { + err = PTR_ERR(pfence); goto out_fini; } @@ -250,7 +233,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) ++vm->preempt.num_exec_queues; q->lr.pfence = pfence; - down_read(&vm->userptr.notifier_lock); + xe_svm_notifier_lock(vm); drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence, DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); @@ -264,10 +247,10 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) if (wait) dma_fence_enable_sw_signaling(pfence); - up_read(&vm->userptr.notifier_lock); + xe_svm_notifier_unlock(vm); out_fini: - drm_exec_fini(exec); + xe_validation_ctx_fini(&ctx); out_up_write: up_write(&vm->lock); @@ -300,25 +283,6 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) up_write(&vm->lock); } -/** - * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs - * that need repinning. - * @vm: The VM. - * - * This function checks for whether the VM has userptrs that need repinning, - * and provides a release-type barrier on the userptr.notifier_lock after - * checking. - * - * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are. - */ -int __xe_vm_userptr_needs_repin(struct xe_vm *vm) -{ - lockdep_assert_held_read(&vm->userptr.notifier_lock); - - return (list_empty(&vm->userptr.repin_list) && - list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; -} - #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000 /** @@ -350,39 +314,6 @@ void xe_vm_kill(struct xe_vm *vm, bool unlocked) /* TODO: Inform user the VM is banned */ } -/** - * xe_vm_validate_should_retry() - Whether to retry after a validate error. - * @exec: The drm_exec object used for locking before validation. - * @err: The error returned from ttm_bo_validate(). - * @end: A ktime_t cookie that should be set to 0 before first use and - * that should be reused on subsequent calls. - * - * With multiple active VMs, under memory pressure, it is possible that - * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM. - * Until ttm properly handles locking in such scenarios, best thing the - * driver can do is retry with a timeout. Check if that is necessary, and - * if so unlock the drm_exec's objects while keeping the ticket to prepare - * for a rerun. - * - * Return: true if a retry after drm_exec_init() is recommended; - * false otherwise. - */ -bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end) -{ - ktime_t cur; - - if (err != -ENOMEM) - return false; - - cur = ktime_get(); - *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS); - if (!ktime_before(cur, *end)) - return false; - - msleep(20); - return true; -} - static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) { struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); @@ -397,7 +328,7 @@ static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) if (!try_wait_for_completion(&vm->xe->pm_block)) return -EAGAIN; - ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false); + ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false, exec); if (ret) return ret; @@ -513,10 +444,10 @@ void xe_vm_resume_rebind_worker(struct xe_vm *vm) static void preempt_rebind_work_func(struct work_struct *w) { struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); + struct xe_validation_ctx ctx; struct drm_exec exec; unsigned int fence_count = 0; LIST_HEAD(preempt_fences); - ktime_t end = 0; int err = 0; long wait; int __maybe_unused tries = 0; @@ -544,18 +475,19 @@ retry: goto out_unlock_outer; } - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + err = xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, + (struct xe_val_flags) {.interruptible = true}); + if (err) + goto out_unlock_outer; drm_exec_until_all_locked(&exec) { bool done = false; err = xe_preempt_work_begin(&exec, vm, &done); drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &err); if (err || done) { - drm_exec_fini(&exec); - if (err && xe_vm_validate_should_retry(&exec, err, &end)) - err = -EAGAIN; - + xe_validation_ctx_fini(&ctx); goto out_unlock_outer; } } @@ -564,7 +496,9 @@ retry: if (err) goto out_unlock; + xe_vm_set_validation_exec(vm, &exec); err = xe_vm_rebind(vm, true); + xe_vm_set_validation_exec(vm, NULL); if (err) goto out_unlock; @@ -582,9 +516,9 @@ retry: (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \ __xe_vm_userptr_needs_repin(__vm)) - down_read(&vm->userptr.notifier_lock); + xe_svm_notifier_lock(vm); if (retry_required(tries, vm)) { - up_read(&vm->userptr.notifier_lock); + xe_svm_notifier_unlock(vm); err = -EAGAIN; goto out_unlock; } @@ -598,10 +532,10 @@ retry: /* Point of no return. */ arm_preempt_fences(vm, &preempt_fences); resume_and_reinstall_preempt_fences(vm, &exec); - up_read(&vm->userptr.notifier_lock); + xe_svm_notifier_unlock(vm); out_unlock: - drm_exec_fini(&exec); + xe_validation_ctx_fini(&ctx); out_unlock_outer: if (err == -EAGAIN) { trace_xe_vm_rebind_worker_retry(vm); @@ -619,203 +553,6 @@ out_unlock_outer: trace_xe_vm_rebind_worker_exit(vm); } -static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma) -{ - struct xe_userptr *userptr = &uvma->userptr; - struct xe_vma *vma = &uvma->vma; - struct dma_resv_iter cursor; - struct dma_fence *fence; - long err; - - /* - * Tell exec and rebind worker they need to repin and rebind this - * userptr. - */ - if (!xe_vm_in_fault_mode(vm) && - !(vma->gpuva.flags & XE_VMA_DESTROYED)) { - spin_lock(&vm->userptr.invalidated_lock); - list_move_tail(&userptr->invalidate_link, - &vm->userptr.invalidated); - spin_unlock(&vm->userptr.invalidated_lock); - } - - /* - * Preempt fences turn into schedule disables, pipeline these. - * Note that even in fault mode, we need to wait for binds and - * unbinds to complete, and those are attached as BOOKMARK fences - * to the vm. - */ - dma_resv_iter_begin(&cursor, xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP); - dma_resv_for_each_fence_unlocked(&cursor, fence) - dma_fence_enable_sw_signaling(fence); - dma_resv_iter_end(&cursor); - - err = dma_resv_wait_timeout(xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP, - false, MAX_SCHEDULE_TIMEOUT); - XE_WARN_ON(err <= 0); - - if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) { - err = xe_vm_invalidate_vma(vma); - XE_WARN_ON(err); - } - - xe_hmm_userptr_unmap(uvma); -} - -static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, - const struct mmu_notifier_range *range, - unsigned long cur_seq) -{ - struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier); - struct xe_vma *vma = &uvma->vma; - struct xe_vm *vm = xe_vma_vm(vma); - - xe_assert(vm->xe, xe_vma_is_userptr(vma)); - trace_xe_vma_userptr_invalidate(vma); - - if (!mmu_notifier_range_blockable(range)) - return false; - - vm_dbg(&xe_vma_vm(vma)->xe->drm, - "NOTIFIER: addr=0x%016llx, range=0x%016llx", - xe_vma_start(vma), xe_vma_size(vma)); - - down_write(&vm->userptr.notifier_lock); - mmu_interval_set_seq(mni, cur_seq); - - __vma_userptr_invalidate(vm, uvma); - up_write(&vm->userptr.notifier_lock); - trace_xe_vma_userptr_invalidate_complete(vma); - - return true; -} - -static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = { - .invalidate = vma_userptr_invalidate, -}; - -#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) -/** - * xe_vma_userptr_force_invalidate() - force invalidate a userptr - * @uvma: The userptr vma to invalidate - * - * Perform a forced userptr invalidation for testing purposes. - */ -void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma) -{ - struct xe_vm *vm = xe_vma_vm(&uvma->vma); - - /* Protect against concurrent userptr pinning */ - lockdep_assert_held(&vm->lock); - /* Protect against concurrent notifiers */ - lockdep_assert_held(&vm->userptr.notifier_lock); - /* - * Protect against concurrent instances of this function and - * the critical exec sections - */ - xe_vm_assert_held(vm); - - if (!mmu_interval_read_retry(&uvma->userptr.notifier, - uvma->userptr.notifier_seq)) - uvma->userptr.notifier_seq -= 2; - __vma_userptr_invalidate(vm, uvma); -} -#endif - -int xe_vm_userptr_pin(struct xe_vm *vm) -{ - struct xe_userptr_vma *uvma, *next; - int err = 0; - - xe_assert(vm->xe, !xe_vm_in_fault_mode(vm)); - lockdep_assert_held_write(&vm->lock); - - /* Collect invalidated userptrs */ - spin_lock(&vm->userptr.invalidated_lock); - xe_assert(vm->xe, list_empty(&vm->userptr.repin_list)); - list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, - userptr.invalidate_link) { - list_del_init(&uvma->userptr.invalidate_link); - list_add_tail(&uvma->userptr.repin_link, - &vm->userptr.repin_list); - } - spin_unlock(&vm->userptr.invalidated_lock); - - /* Pin and move to bind list */ - list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, - userptr.repin_link) { - err = xe_vma_userptr_pin_pages(uvma); - if (err == -EFAULT) { - list_del_init(&uvma->userptr.repin_link); - /* - * We might have already done the pin once already, but - * then had to retry before the re-bind happened, due - * some other condition in the caller, but in the - * meantime the userptr got dinged by the notifier such - * that we need to revalidate here, but this time we hit - * the EFAULT. In such a case make sure we remove - * ourselves from the rebind list to avoid going down in - * flames. - */ - if (!list_empty(&uvma->vma.combined_links.rebind)) - list_del_init(&uvma->vma.combined_links.rebind); - - /* Wait for pending binds */ - xe_vm_lock(vm, false); - dma_resv_wait_timeout(xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP, - false, MAX_SCHEDULE_TIMEOUT); - - down_read(&vm->userptr.notifier_lock); - err = xe_vm_invalidate_vma(&uvma->vma); - up_read(&vm->userptr.notifier_lock); - xe_vm_unlock(vm); - if (err) - break; - } else { - if (err) - break; - - list_del_init(&uvma->userptr.repin_link); - list_move_tail(&uvma->vma.combined_links.rebind, - &vm->rebind_list); - } - } - - if (err) { - down_write(&vm->userptr.notifier_lock); - spin_lock(&vm->userptr.invalidated_lock); - list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, - userptr.repin_link) { - list_del_init(&uvma->userptr.repin_link); - list_move_tail(&uvma->userptr.invalidate_link, - &vm->userptr.invalidated); - } - spin_unlock(&vm->userptr.invalidated_lock); - up_write(&vm->userptr.notifier_lock); - } - return err; -} - -/** - * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs - * that need repinning. - * @vm: The VM. - * - * This function does an advisory check for whether the VM has userptrs that - * need repinning. - * - * Return: 0 if there are no indications of userptrs needing repinning, - * -EAGAIN if there are. - */ -int xe_vm_userptr_check_repin(struct xe_vm *vm) -{ - return (list_empty_careful(&vm->userptr.repin_list) && - list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; -} - static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds) { int i; @@ -1280,25 +1017,17 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, drm_gpuvm_bo_put(vm_bo); } else /* userptr or null */ { if (!is_null && !is_cpu_addr_mirror) { - struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr; + struct xe_userptr_vma *uvma = to_userptr_vma(vma); u64 size = end - start + 1; int err; - INIT_LIST_HEAD(&userptr->invalidate_link); - INIT_LIST_HEAD(&userptr->repin_link); vma->gpuva.gem.offset = bo_offset_or_userptr; - mutex_init(&userptr->unmap_mutex); - err = mmu_interval_notifier_insert(&userptr->notifier, - current->mm, - xe_vma_userptr(vma), size, - &vma_userptr_notifier_ops); + err = xe_userptr_setup(uvma, xe_vma_userptr(vma), size); if (err) { xe_vma_free(vma); return ERR_PTR(err); } - - userptr->notifier_seq = LONG_MAX; } xe_vm_get(vm); @@ -1318,18 +1047,8 @@ static void xe_vma_destroy_late(struct xe_vma *vma) if (xe_vma_is_userptr(vma)) { struct xe_userptr_vma *uvma = to_userptr_vma(vma); - struct xe_userptr *userptr = &uvma->userptr; - - if (userptr->sg) - xe_hmm_userptr_free_sg(uvma); - /* - * Since userptr pages are not pinned, we can't remove - * the notifier until we're sure the GPU is not accessing - * them anymore - */ - mmu_interval_notifier_remove(&userptr->notifier); - mutex_destroy(&userptr->unmap_mutex); + xe_userptr_remove(uvma); xe_vm_put(vm); } else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) { xe_vm_put(vm); @@ -1366,11 +1085,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) if (xe_vma_is_userptr(vma)) { xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); - - spin_lock(&vm->userptr.invalidated_lock); - xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link)); - list_del(&to_userptr_vma(vma)->userptr.invalidate_link); - spin_unlock(&vm->userptr.invalidated_lock); + xe_userptr_destroy(to_userptr_vma(vma)); } else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) { xe_bo_assert_held(xe_vma_bo(vma)); @@ -1418,20 +1133,19 @@ int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma) static void xe_vma_destroy_unlocked(struct xe_vma *vma) { + struct xe_device *xe = xe_vma_vm(vma)->xe; + struct xe_validation_ctx ctx; struct drm_exec exec; - int err; + int err = 0; - drm_exec_init(&exec, 0, 0); - drm_exec_until_all_locked(&exec) { + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) { err = xe_vm_lock_vma(&exec, vma); drm_exec_retry_on_contention(&exec); if (XE_WARN_ON(err)) break; + xe_vma_destroy(vma, NULL); } - - xe_vma_destroy(vma, NULL); - - drm_exec_fini(&exec); + xe_assert(xe, !err); } struct xe_vma * @@ -1656,6 +1370,7 @@ static void vm_destroy_work_func(struct work_struct *w); * @xe: xe device. * @tile: tile to set up for. * @vm: vm to set up for. + * @exec: The struct drm_exec object used to lock the vm resv. * * Sets up a pagetable tree with one page-table per level and a single * leaf PTE. All pagetable entries point to the single page-table or, @@ -1665,20 +1380,19 @@ static void vm_destroy_work_func(struct work_struct *w); * Return: 0 on success, negative error code on error. */ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile, - struct xe_vm *vm) + struct xe_vm *vm, struct drm_exec *exec) { u8 id = tile->id; int i; for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { - vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i); + vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i, exec); if (IS_ERR(vm->scratch_pt[id][i])) { int err = PTR_ERR(vm->scratch_pt[id][i]); vm->scratch_pt[id][i] = NULL; return err; } - xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); } @@ -1706,9 +1420,26 @@ static void xe_vm_free_scratch(struct xe_vm *vm) } } +static void xe_vm_pt_destroy(struct xe_vm *vm) +{ + struct xe_tile *tile; + u8 id; + + xe_vm_assert_held(vm); + + for_each_tile(tile, vm->xe, id) { + if (vm->pt_root[id]) { + xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); + vm->pt_root[id] = NULL; + } + } +} + struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) { struct drm_gem_object *vm_resv_obj; + struct xe_validation_ctx ctx; + struct drm_exec exec; struct xe_vm *vm; int err, number_tiles = 0; struct xe_tile *tile; @@ -1752,7 +1483,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) INIT_LIST_HEAD(&vm->userptr.repin_list); INIT_LIST_HEAD(&vm->userptr.invalidated); - init_rwsem(&vm->userptr.notifier_lock); spin_lock_init(&vm->userptr.invalidated_lock); ttm_lru_bulk_move_init(&vm->lru_bulk_move); @@ -1779,11 +1509,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) INIT_LIST_HEAD(&vm->preempt.pm_activate_link); } - if (flags & XE_VM_FLAG_FAULT_MODE) { - err = xe_svm_init(vm); - if (err) - goto err_no_resv; - } + err = xe_svm_init(vm); + if (err) + goto err_no_resv; vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm); if (!vm_resv_obj) { @@ -1796,49 +1524,68 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) drm_gem_object_put(vm_resv_obj); - err = xe_vm_lock(vm, true); - if (err) - goto err_close; - - if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) - vm->flags |= XE_VM_FLAG_64K; - - for_each_tile(tile, xe, id) { - if (flags & XE_VM_FLAG_MIGRATION && - tile->id != XE_VM_FLAG_TILE_ID(flags)) - continue; + err = 0; + xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, + err) { + err = xe_vm_drm_exec_lock(vm, &exec); + drm_exec_retry_on_contention(&exec); - vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level); - if (IS_ERR(vm->pt_root[id])) { - err = PTR_ERR(vm->pt_root[id]); - vm->pt_root[id] = NULL; - goto err_unlock_close; - } - } + if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) + vm->flags |= XE_VM_FLAG_64K; - if (xe_vm_has_scratch(vm)) { for_each_tile(tile, xe, id) { - if (!vm->pt_root[id]) + if (flags & XE_VM_FLAG_MIGRATION && + tile->id != XE_VM_FLAG_TILE_ID(flags)) continue; - err = xe_vm_create_scratch(xe, tile, vm); + vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level, + &exec); + if (IS_ERR(vm->pt_root[id])) { + err = PTR_ERR(vm->pt_root[id]); + vm->pt_root[id] = NULL; + xe_vm_pt_destroy(vm); + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &err); + break; + } + } + if (err) + break; + + if (xe_vm_has_scratch(vm)) { + for_each_tile(tile, xe, id) { + if (!vm->pt_root[id]) + continue; + + err = xe_vm_create_scratch(xe, tile, vm, &exec); + if (err) { + xe_vm_free_scratch(vm); + xe_vm_pt_destroy(vm); + drm_exec_retry_on_contention(&exec); + xe_validation_retry_on_oom(&ctx, &err); + break; + } + } if (err) - goto err_unlock_close; + break; + vm->batch_invalidate_tlb = true; } - vm->batch_invalidate_tlb = true; - } - if (vm->flags & XE_VM_FLAG_LR_MODE) - vm->batch_invalidate_tlb = false; + if (vm->flags & XE_VM_FLAG_LR_MODE) { + INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); + vm->batch_invalidate_tlb = false; + } - /* Fill pt_root after allocating scratch tables */ - for_each_tile(tile, xe, id) { - if (!vm->pt_root[id]) - continue; + /* Fill pt_root after allocating scratch tables */ + for_each_tile(tile, xe, id) { + if (!vm->pt_root[id]) + continue; - xe_pt_populate_empty(tile, vm, vm->pt_root[id]); + xe_pt_populate_empty(tile, vm, vm->pt_root[id]); + } } - xe_vm_unlock(vm); + if (err) + goto err_close; /* Kernel migration VM shouldn't have a circular loop.. */ if (!(flags & XE_VM_FLAG_MIGRATION)) { @@ -1871,7 +1618,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) &xe->usm.next_asid, GFP_KERNEL); up_write(&xe->usm.lock); if (err < 0) - goto err_unlock_close; + goto err_close; vm->usm.asid = asid; } @@ -1880,8 +1627,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) return vm; -err_unlock_close: - xe_vm_unlock(vm); err_close: xe_vm_close_and_put(vm); return ERR_PTR(err); @@ -1988,9 +1733,9 @@ void xe_vm_close_and_put(struct xe_vm *vm) vma = gpuva_to_vma(gpuva); if (xe_vma_has_no_bo(vma)) { - down_read(&vm->userptr.notifier_lock); + xe_svm_notifier_lock(vm); vma->gpuva.flags |= XE_VMA_DESTROYED; - up_read(&vm->userptr.notifier_lock); + xe_svm_notifier_unlock(vm); } xe_vm_remove_vma(vm, vma); @@ -2014,13 +1759,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) * destroy the pagetables immediately. */ xe_vm_free_scratch(vm); - - for_each_tile(tile, xe, id) { - if (vm->pt_root[id]) { - xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); - vm->pt_root[id] = NULL; - } - } + xe_vm_pt_destroy(vm); xe_vm_unlock(vm); /* @@ -2034,8 +1773,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) xe_vma_destroy_unlocked(vma); } - if (xe_vm_in_fault_mode(vm)) - xe_svm_fini(vm); + xe_svm_fini(vm); up_write(&vm->lock); @@ -2328,6 +2066,8 @@ int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_ err = copy_to_user(attrs_user, mem_attrs, args->sizeof_mem_range_attr * args->num_mem_ranges); + if (err) + err = -EFAULT; free_mem_attrs: kvfree(mem_attrs); @@ -2376,9 +2116,9 @@ static const u32 region_to_mem_type[] = { static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, bool post_commit) { - down_read(&vm->userptr.notifier_lock); + xe_svm_notifier_lock(vm); vma->gpuva.flags |= XE_VMA_DESTROYED; - up_read(&vm->userptr.notifier_lock); + xe_svm_notifier_unlock(vm); if (post_commit) xe_vm_remove_vma(vm, vma); } @@ -2639,6 +2379,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, struct xe_vma_mem_attr *attr, unsigned int flags) { struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL; + struct xe_validation_ctx ctx; struct drm_exec exec; struct xe_vma *vma; int err = 0; @@ -2646,9 +2387,9 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, lockdep_assert_held_write(&vm->lock); if (bo) { - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); - drm_exec_until_all_locked(&exec) { - err = 0; + err = 0; + xe_validation_guard(&ctx, &vm->xe->val, &exec, + (struct xe_val_flags) {.interruptible = true}, err) { if (!bo->vm) { err = drm_exec_lock_obj(&exec, xe_vm_obj(vm)); drm_exec_retry_on_contention(&exec); @@ -2657,27 +2398,35 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, err = drm_exec_lock_obj(&exec, &bo->ttm.base); drm_exec_retry_on_contention(&exec); } - if (err) { - drm_exec_fini(&exec); + if (err) return ERR_PTR(err); - } - } - } - vma = xe_vma_create(vm, bo, op->gem.offset, - op->va.addr, op->va.addr + - op->va.range - 1, attr, flags); - if (IS_ERR(vma)) - goto err_unlock; - if (xe_vma_is_userptr(vma)) - err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); - else if (!xe_vma_has_no_bo(vma) && !bo->vm) - err = add_preempt_fences(vm, bo); + vma = xe_vma_create(vm, bo, op->gem.offset, + op->va.addr, op->va.addr + + op->va.range - 1, attr, flags); + if (IS_ERR(vma)) + return vma; -err_unlock: - if (bo) - drm_exec_fini(&exec); + if (!bo->vm) { + err = add_preempt_fences(vm, bo); + if (err) { + prep_vma_destroy(vm, vma, false); + xe_vma_destroy(vma, NULL); + } + } + } + if (err) + return ERR_PTR(err); + } else { + vma = xe_vma_create(vm, NULL, op->gem.offset, + op->va.addr, op->va.addr + + op->va.range - 1, attr, flags); + if (IS_ERR(vma)) + return vma; + if (xe_vma_is_userptr(vma)) + err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); + } if (err) { prep_vma_destroy(vm, vma, false); xe_vma_destroy_unlocked(vma); @@ -3021,9 +2770,9 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); if (vma) { - down_read(&vm->userptr.notifier_lock); + xe_svm_notifier_lock(vm); vma->gpuva.flags &= ~XE_VMA_DESTROYED; - up_read(&vm->userptr.notifier_lock); + xe_svm_notifier_unlock(vm); if (post_commit) xe_vm_insert_vma(vm, vma); } @@ -3042,9 +2791,9 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, xe_vma_destroy_unlocked(op->remap.next); } if (vma) { - down_read(&vm->userptr.notifier_lock); + xe_svm_notifier_lock(vm); vma->gpuva.flags &= ~XE_VMA_DESTROYED; - up_read(&vm->userptr.notifier_lock); + xe_svm_notifier_unlock(vm); if (post_commit) xe_vm_insert_vma(vm, vma); } @@ -3094,7 +2843,7 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma, err = drm_exec_lock_obj(exec, &bo->ttm.base); if (!err && validate) err = xe_bo_validate(bo, vm, - !xe_vm_in_preempt_fence_mode(vm)); + !xe_vm_in_preempt_fence_mode(vm), exec); } return err; @@ -3212,7 +2961,9 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, false); if (!err && !xe_vma_has_no_bo(vma)) err = xe_bo_migrate(xe_vma_bo(vma), - region_to_mem_type[region]); + region_to_mem_type[region], + NULL, + exec); break; } default: @@ -3475,35 +3226,37 @@ static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm, struct xe_vma_ops *vops) { + struct xe_validation_ctx ctx; struct drm_exec exec; struct dma_fence *fence; - int err; + int err = 0; lockdep_assert_held_write(&vm->lock); - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES, 0); - drm_exec_until_all_locked(&exec) { + xe_validation_guard(&ctx, &vm->xe->val, &exec, + ((struct xe_val_flags) { + .interruptible = true, + .exec_ignore_duplicates = true, + }), err) { err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); drm_exec_retry_on_contention(&exec); - if (err) { - fence = ERR_PTR(err); - goto unlock; - } + xe_validation_retry_on_oom(&ctx, &err); + if (err) + return ERR_PTR(err); + xe_vm_set_validation_exec(vm, &exec); fence = ops_execute(vm, vops); + xe_vm_set_validation_exec(vm, NULL); if (IS_ERR(fence)) { if (PTR_ERR(fence) == -ENODATA) vm_bind_ioctl_ops_fini(vm, vops, NULL); - goto unlock; + return fence; } vm_bind_ioctl_ops_fini(vm, vops, fence); } -unlock: - drm_exec_fini(&exec); - return fence; + return err ? ERR_PTR(err) : fence; } ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO); @@ -3619,6 +3372,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE && op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || + XE_IOCTL_DBG(xe, op == DRM_XE_VM_BIND_OP_MAP_USERPTR && + !IS_ENABLED(CONFIG_DRM_GPUSVM)) || XE_IOCTL_DBG(xe, obj && op == DRM_XE_VM_BIND_OP_PREFETCH) || XE_IOCTL_DBG(xe, prefetch_region && @@ -4054,10 +3809,14 @@ release_vm_lock: */ int xe_vm_lock(struct xe_vm *vm, bool intr) { + int ret; + if (intr) - return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); + ret = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); + else + ret = dma_resv_lock(xe_vm_resv(vm), NULL); - return dma_resv_lock(xe_vm_resv(vm), NULL); + return ret; } /** @@ -4164,13 +3923,13 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) */ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { if (xe_vma_is_userptr(vma)) { - lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) || - (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) && + lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) || + (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) && lockdep_is_held(&xe_vm_resv(vm)->lock.base))); WARN_ON_ONCE(!mmu_interval_check_retry (&to_userptr_vma(vma)->userptr.notifier, - to_userptr_vma(vma)->userptr.notifier_seq)); + to_userptr_vma(vma)->userptr.pages.notifier_seq)); WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP)); diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index d631c4b25c51..ef8a5019574e 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -220,12 +220,6 @@ static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm) int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q); void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q); -int xe_vm_userptr_pin(struct xe_vm *vm); - -int __xe_vm_userptr_needs_repin(struct xe_vm *vm); - -int xe_vm_userptr_check_repin(struct xe_vm *vm); - int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker); struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask); @@ -266,12 +260,6 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) } } -int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma); - -int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma); - -bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end); - int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma); int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, @@ -302,6 +290,8 @@ void xe_vm_kill(struct xe_vm *vm, bool unlocked); */ #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm)) +int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec); + #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) #define vm_dbg drm_dbg #else @@ -331,7 +321,7 @@ static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict) if (vm && !allow_res_evict) { xe_vm_assert_held(vm); /* Pairs with READ_ONCE in xe_vm_is_validating() */ - WRITE_ONCE(vm->validating, current); + WRITE_ONCE(vm->validation.validating, current); } } @@ -349,7 +339,7 @@ static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict { if (vm && !allow_res_evict) { /* Pairs with READ_ONCE in xe_vm_is_validating() */ - WRITE_ONCE(vm->validating, NULL); + WRITE_ONCE(vm->validation.validating, NULL); } } @@ -367,7 +357,7 @@ static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict static inline bool xe_vm_is_validating(struct xe_vm *vm) { /* Pairs with WRITE_ONCE in xe_vm_is_validating() */ - if (READ_ONCE(vm->validating) == current) { + if (READ_ONCE(vm->validation.validating) == current) { xe_vm_assert_held(vm); return true; } @@ -375,6 +365,34 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm) } /** + * xe_vm_set_validation_exec() - Accessor to set the drm_exec object + * @vm: The vm we want to register a drm_exec object with. + * @exec: The exec object we want to register. + * + * Set the drm_exec object used to lock the vm's resv. + */ +static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec *exec) +{ + xe_vm_assert_held(vm); + xe_assert(vm->xe, !!exec ^ !!vm->validation._exec); + vm->validation._exec = exec; +} + +/** + * xe_vm_set_validation_exec() - Accessor to read the drm_exec object + * @vm: The vm we want to register a drm_exec object with. + * + * Return: The drm_exec object used to lock the vm's resv. The value + * is a valid pointer, %NULL, or one of the special values defined in + * xe_validation.h. + */ +static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm) +{ + xe_vm_assert_held(vm); + return vm->validation._exec; +} + +/** * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has * a valid GPU mapping * @tile: The tile which the GPU mapping belongs to @@ -393,11 +411,4 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm) #define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \ ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id)) -#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) -void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma); -#else -static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma) -{ -} -#endif #endif diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c index 09c5783ee523..cad3cf627c3f 100644 --- a/drivers/gpu/drm/xe/xe_vm_madvise.c +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c @@ -18,9 +18,8 @@ struct xe_vmas_in_madvise_range { u64 range; struct xe_vma **vmas; int num_vmas; - bool has_svm_vmas; bool has_bo_vmas; - bool has_userptr_vmas; + bool has_svm_userptr_vmas; }; static int get_vmas(struct xe_vm *vm, struct xe_vmas_in_madvise_range *madvise_range) @@ -46,10 +45,8 @@ static int get_vmas(struct xe_vm *vm, struct xe_vmas_in_madvise_range *madvise_r if (xe_vma_bo(vma)) madvise_range->has_bo_vmas = true; - else if (xe_vma_is_cpu_addr_mirror(vma)) - madvise_range->has_svm_vmas = true; - else if (xe_vma_is_userptr(vma)) - madvise_range->has_userptr_vmas = true; + else if (xe_vma_is_cpu_addr_mirror(vma) || xe_vma_is_userptr(vma)) + madvise_range->has_svm_userptr_vmas = true; if (madvise_range->num_vmas == max_vmas) { max_vmas <<= 1; @@ -127,8 +124,6 @@ static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm, vmas[i]->attr.atomic_access = op->atomic.val; } - vmas[i]->attr.atomic_access = op->atomic.val; - bo = xe_vma_bo(vmas[i]); if (!bo || bo->attr.atomic_access == op->atomic.val) continue; @@ -201,12 +196,12 @@ static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end) if (xe_pt_zap_ptes(tile, vma)) { tile_mask |= BIT(id); - /* - * WRITE_ONCE pairs with READ_ONCE - * in xe_vm_has_valid_gpu_mapping() - */ - WRITE_ONCE(vma->tile_invalidated, - vma->tile_invalidated | BIT(id)); + /* + * WRITE_ONCE pairs with READ_ONCE + * in xe_vm_has_valid_gpu_mapping() + */ + WRITE_ONCE(vma->tile_invalidated, + vma->tile_invalidated | BIT(id)); } } } @@ -256,7 +251,7 @@ static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madv if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.pad)) return false; - if (XE_IOCTL_DBG(xe, args->atomic.reserved)) + if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.reserved)) return false; break; } @@ -409,29 +404,20 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil } } - if (madvise_range.has_userptr_vmas) { - err = down_read_interruptible(&vm->userptr.notifier_lock); + if (madvise_range.has_svm_userptr_vmas) { + err = xe_svm_notifier_lock_interruptible(vm); if (err) goto err_fini; } - if (madvise_range.has_svm_vmas) { - err = down_read_interruptible(&vm->svm.gpusvm.notifier_lock); - if (err) - goto unlock_userptr; - } - attr_type = array_index_nospec(args->type, ARRAY_SIZE(madvise_funcs)); madvise_funcs[attr_type](xe, vm, madvise_range.vmas, madvise_range.num_vmas, args); err = xe_vm_invalidate_madvise_range(vm, args->start, args->start + args->range); - if (madvise_range.has_svm_vmas) + if (madvise_range.has_svm_userptr_vmas) xe_svm_notifier_unlock(vm); -unlock_userptr: - if (madvise_range.has_userptr_vmas) - up_read(&vm->userptr.notifier_lock); err_fini: if (madvise_range.has_bo_vmas) drm_exec_fini(&exec); diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index e1a786db5f89..da39940501d8 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -17,6 +17,7 @@ #include "xe_device_types.h" #include "xe_pt_types.h" #include "xe_range_fence.h" +#include "xe_userptr.h" struct xe_bo; struct xe_svm_range; @@ -46,37 +47,6 @@ struct xe_vm_pgtable_update_op; #define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8) #define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9) -/** struct xe_userptr - User pointer */ -struct xe_userptr { - /** @invalidate_link: Link for the vm::userptr.invalidated list */ - struct list_head invalidate_link; - /** @userptr: link into VM repin list if userptr. */ - struct list_head repin_link; - /** - * @notifier: MMU notifier for user pointer (invalidation call back) - */ - struct mmu_interval_notifier notifier; - /** @sgt: storage for a scatter gather table */ - struct sg_table sgt; - /** @sg: allocated scatter gather table */ - struct sg_table *sg; - /** @notifier_seq: notifier sequence number */ - unsigned long notifier_seq; - /** @unmap_mutex: Mutex protecting dma-unmapping */ - struct mutex unmap_mutex; - /** - * @initial_bind: user pointer has been bound at least once. - * write: vm->userptr.notifier_lock in read mode and vm->resv held. - * read: vm->userptr.notifier_lock in write mode or vm->resv held. - */ - bool initial_bind; - /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */ - bool mapped; -#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) - u32 divisor; -#endif -}; - /** * struct xe_vma_mem_attr - memory attributes associated with vma */ @@ -140,10 +110,10 @@ struct xe_vma { /** * @tile_invalidated: Tile mask of binding are invalidated for this VMA. - * protected by BO's resv and for userptrs, vm->userptr.notifier_lock in - * write mode for writing or vm->userptr.notifier_lock in read mode and + * protected by BO's resv and for userptrs, vm->svm.gpusvm.notifier_lock in + * write mode for writing or vm->svm.gpusvm.notifier_lock in read mode and * the vm->resv. For stable reading, BO's resv or userptr - * vm->userptr.notifier_lock in read mode is required. Can be + * vm->svm.gpusvm.notifier_lock in read mode is required. Can be * opportunistically read with READ_ONCE outside of locks. */ u8 tile_invalidated; @@ -154,7 +124,7 @@ struct xe_vma { /** * @tile_present: Tile mask of binding are present for this VMA. * protected by vm->lock, vm->resv and for userptrs, - * vm->userptr.notifier_lock for writing. Needs either for reading, + * vm->svm.gpusvm.notifier_lock for writing. Needs either for reading, * but if reading is done under the vm->lock only, it needs to be held * in write mode. */ @@ -289,33 +259,7 @@ struct xe_vm { const struct xe_pt_ops *pt_ops; /** @userptr: user pointer state */ - struct { - /** - * @userptr.repin_list: list of VMAs which are user pointers, - * and needs repinning. Protected by @lock. - */ - struct list_head repin_list; - /** - * @notifier_lock: protects notifier in write mode and - * submission in read mode. - */ - struct rw_semaphore notifier_lock; - /** - * @userptr.invalidated_lock: Protects the - * @userptr.invalidated list. - */ - spinlock_t invalidated_lock; - /** - * @userptr.invalidated: List of invalidated userptrs, not yet - * picked - * up for revalidation. Protected from access with the - * @invalidated_lock. Removing items from the list - * additionally requires @lock in write mode, and adding - * items to the list requires either the @userptr.notifier_lock in - * write mode, OR @lock in write mode. - */ - struct list_head invalidated; - } userptr; + struct xe_userptr_vm userptr; /** @preempt: preempt state */ struct { @@ -363,18 +307,34 @@ struct xe_vm { } error_capture; /** + * @validation: Validation data only valid with the vm resv held. + * Note: This is really task state of the task holding the vm resv, + * and moving forward we should + * come up with a better way of passing this down the call- + * chain. + */ + struct { + /** + * @validation.validating: The task that is currently making bos resident. + * for this vm. + * Protected by the VM's resv for writing. Opportunistic reading can be done + * using READ_ONCE. Note: This is a workaround for the + * TTM eviction_valuable() callback not being passed a struct + * ttm_operation_context(). Future work might want to address this. + */ + struct task_struct *validating; + /** + * @validation.exec The drm_exec context used when locking the vm resv. + * Protected by the vm's resv. + */ + struct drm_exec *_exec; + } validation; + + /** * @tlb_flush_seqno: Required TLB flush seqno for the next exec. * protected by the vm resv. */ u64 tlb_flush_seqno; - /** - * @validating: The task that is currently making bos resident for this vm. - * Protected by the VM's resv for writing. Opportunistic reading can be done - * using READ_ONCE. Note: This is a workaround for the - * TTM eviction_valuable() callback not being passed a struct - * ttm_operation_context(). Future work might want to address this. - */ - struct task_struct *validating; /** @batch_invalidate_tlb: Always invalidate TLB before batch start */ bool batch_invalidate_tlb; /** @xef: XE file handle for tracking this VM's drm client */ diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 52c7df4c3afd..cd03891654a1 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -39,7 +39,8 @@ * Register Immediate commands) once when initializing the device and saved in * the default context. That default context is then used on every context * creation to have a "primed golden context", i.e. a context image that - * already contains the changes needed to all the registers. + * already contains the changes needed to all the registers. See + * drivers/gpu/drm/xe/xe_lrc.c for default context handling. * * - Engine workarounds: the list of these WAs is applied whenever the specific * engine is reset. It's also possible that a set of engine classes share a @@ -48,10 +49,10 @@ * them need to keeep the workaround programming: the approach taken in the * driver is to tie those workarounds to the first compute/render engine that * is registered. When executing with GuC submission, engine resets are - * outside of kernel driver control, hence the list of registers involved in + * outside of kernel driver control, hence the list of registers involved is * written once, on engine initialization, and then passed to GuC, that * saves/restores their values before/after the reset takes place. See - * ``drivers/gpu/drm/xe/xe_guc_ads.c`` for reference. + * drivers/gpu/drm/xe/xe_guc_ads.c for reference. * * - GT workarounds: the list of these WAs is applied whenever these registers * revert to their default values: on GPU reset, suspend/resume [1]_, etc. @@ -66,21 +67,39 @@ * hardware on every HW context restore. These buffers are created and * programmed in the default context so the hardware always go through those * programming sequences when switching contexts. The support for workaround - * batchbuffers is enabled these hardware mechanisms: + * batchbuffers is enabled via these hardware mechanisms: * - * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default - * context, pointing the hardware to jump to that location when that offset - * is reached in the context restore. Workaround batchbuffer in the driver - * currently uses this mechanism for all platforms. + * #. INDIRECT_CTX (also known as **mid context restore bb**): A batchbuffer + * and an offset are provided in the default context, pointing the hardware + * to jump to that location when that offset is reached in the context + * restore. When a context is being restored, this is executed after the + * ring context, in the middle (or beginning) of the engine context image. * - * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context, - * pointing the hardware to a buffer to continue executing after the - * engine registers are restored in a context restore sequence. This is - * currently not used in the driver. + * #. BB_PER_CTX_PTR (also known as **post context restore bb**): A + * batchbuffer is provided in the default context, pointing the hardware to + * a buffer to continue executing after the engine registers are restored + * in a context restore sequence. + * + * Below is the timeline for a context restore sequence: + * + * .. code:: + * + * INDIRECT_CTX_OFFSET + * |----------->| + * .------------.------------.-------------.------------.--------------.-----------. + * |Ring | Engine | Mid-context | Engine | Post-context | Ring | + * |Restore | Restore (1)| BB Restore | Restore (2)| BB Restore | Execution | + * `------------'------------'-------------'------------'--------------'-----------' * * - Other/OOB: There are WAs that, due to their nature, cannot be applied from * a central place. Those are peppered around the rest of the code, as needed. - * Workarounds related to the display IP are the main example. + * There's a central place to control which workarounds are enabled: + * drivers/gpu/drm/xe/xe_wa_oob.rules for GT workarounds and + * drivers/gpu/drm/xe/xe_device_wa_oob.rules for device/SoC workarounds. + * These files only record which workarounds are enabled: during early device + * initialization those rules are evaluated and recorded by the driver. Then + * later the driver checks with ``XE_GT_WA()`` and ``XE_DEVICE_WA()`` to + * implement them. * * .. [1] Technically, some registers are powercontext saved & restored, so they * survive a suspend/resume. In practice, writing them again is not too @@ -612,6 +631,13 @@ static const struct xe_rtp_entry_sr engine_was[] = { FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS)) }, + { XE_RTP_NAME("18041344222"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), + FUNC(xe_rtp_match_first_render_or_compute), + FUNC(xe_rtp_match_not_sriov_vf), + FUNC(xe_rtp_match_gt_has_discontiguous_dss_groups)), + XE_RTP_ACTIONS(SET(TDL_CHICKEN, EUSTALL_PERF_SAMPLING_DISABLE)) + }, /* Xe2_LPM */ @@ -672,6 +698,13 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) }, + { XE_RTP_NAME("18041344222"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), + FUNC(xe_rtp_match_first_render_or_compute), + FUNC(xe_rtp_match_not_sriov_vf), + FUNC(xe_rtp_match_gt_has_discontiguous_dss_groups)), + XE_RTP_ACTIONS(SET(TDL_CHICKEN, EUSTALL_PERF_SAMPLING_DISABLE)) + }, }; static const struct xe_rtp_entry_sr lrc_was[] = { @@ -879,6 +912,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = { DIS_PARTIAL_AUTOSTRIP | DIS_AUTOSTRIP)) }, + { XE_RTP_NAME("22021007897"), + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3003), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE)) + }, }; static __maybe_unused const struct xe_rtp_entry oob_was[] = { diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 338c344dcd7d..f3a6d5d239ce 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -49,7 +49,6 @@ 16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf) 14019789679 GRAPHICS_VERSION(1255) GRAPHICS_VERSION_RANGE(1270, 2004) -no_media_l3 MEDIA_VERSION_RANGE(3000, 3002) 14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0) MEDIA_VERSION(3000), MEDIA_STEP(A0, B0) 16021333562 GRAPHICS_VERSION_RANGE(1200, 1274) diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 344cc9e741c1..723a80895cd4 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -471,6 +471,18 @@ static int host1x_device_add(struct host1x *host1x, mutex_unlock(&clients_lock); + /* + * Add device even if there are no subdevs to ensure syncpoint functionality + * is available regardless of whether any engine subdevices are present + */ + if (list_empty(&device->subdevs)) { + err = device_add(&device->dev); + if (err < 0) + dev_err(&device->dev, "failed to add device: %d\n", err); + else + device->registered = true; + } + return 0; } diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 1f93e5e276c0..e365df6af353 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -585,14 +585,8 @@ static int host1x_probe(struct platform_device *pdev) } host->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(host->clk)) { - err = PTR_ERR(host->clk); - - if (err != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to get clock: %d\n", err); - - return err; - } + if (IS_ERR(host->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(host->clk), "failed to get clock\n"); err = host1x_get_resets(host); if (err) @@ -821,6 +815,7 @@ u64 host1x_get_dma_mask(struct host1x *host1x) } EXPORT_SYMBOL(host1x_get_dma_mask); +MODULE_SOFTDEP("post: tegra-drm"); MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>"); MODULE_DESCRIPTION("Host1x driver for Tegra products"); diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index d44b8de890be..2df6a16d484e 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c @@ -47,24 +47,11 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo, } } -static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, - u32 next_class) +static void submit_wait(struct host1x_job *job, u32 id, u32 threshold) { struct host1x_cdma *cdma = &job->channel->cdma; -#if HOST1X_HW >= 6 - u32 stream_id; - - /* - * If a memory context has been set, use it. Otherwise - * (if context isolation is disabled) use the engine's - * firmware stream ID. - */ - if (job->memory_context) - stream_id = job->memory_context->stream_id; - else - stream_id = job->engine_fallback_streamid; - +#if HOST1X_HW >= 2 host1x_cdma_push_wide(cdma, host1x_opcode_setclass( HOST1X_CLASS_HOST1X, @@ -76,23 +63,6 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, id, HOST1X_OPCODE_NOP ); - host1x_cdma_push_wide(&job->channel->cdma, - host1x_opcode_setclass(job->class, 0, 0), - host1x_opcode_setpayload(stream_id), - host1x_opcode_setstreamid(job->engine_streamid_offset / 4), - HOST1X_OPCODE_NOP); -#elif HOST1X_HW >= 2 - host1x_cdma_push_wide(cdma, - host1x_opcode_setclass( - HOST1X_CLASS_HOST1X, - HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32, - /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */ - BIT(0) | BIT(2) - ), - threshold, - id, - host1x_opcode_setclass(next_class, 0, 0) - ); #else /* TODO add waitchk or use waitbases or other mitigation */ host1x_cdma_push(cdma, @@ -103,6 +73,32 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, ), host1x_class_host_wait_syncpt(id, threshold) ); +#endif +} + +static void submit_setclass(struct host1x_job *job, u32 next_class) +{ + struct host1x_cdma *cdma = &job->channel->cdma; + +#if HOST1X_HW >= 6 + u32 stream_id; + + /* + * If a memory context has been set, use it. Otherwise + * (if context isolation is disabled) use the engine's + * firmware stream ID. + */ + if (job->memory_context) + stream_id = job->memory_context->stream_id; + else + stream_id = job->engine_fallback_streamid; + + host1x_cdma_push_wide(cdma, + host1x_opcode_setclass(next_class, 0, 0), + host1x_opcode_setpayload(stream_id), + host1x_opcode_setstreamid(job->engine_streamid_offset / 4), + HOST1X_OPCODE_NOP); +#else host1x_cdma_push(cdma, host1x_opcode_setclass(next_class, 0, 0), HOST1X_OPCODE_NOP @@ -110,7 +106,8 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, #endif } -static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) +static void submit_gathers(struct host1x_job *job, struct host1x_job_cmd *cmds, u32 num_cmds, + u32 job_syncpt_base) { struct host1x_cdma *cdma = &job->channel->cdma; #if HOST1X_HW < 6 @@ -119,8 +116,8 @@ static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) unsigned int i; u32 threshold; - for (i = 0; i < job->num_cmds; i++) { - struct host1x_job_cmd *cmd = &job->cmds[i]; + for (i = 0; i < num_cmds; i++) { + struct host1x_job_cmd *cmd = &cmds[i]; if (cmd->is_wait) { if (cmd->wait.relative) @@ -128,7 +125,8 @@ static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) else threshold = cmd->wait.threshold; - submit_wait(job, cmd->wait.id, threshold, cmd->wait.next_class); + submit_wait(job, cmd->wait.id, threshold); + submit_setclass(job, cmd->wait.next_class); } else { struct host1x_job_gather *g = &cmd->gather; @@ -216,7 +214,34 @@ static void channel_program_cdma(struct host1x_job *job) #if HOST1X_HW >= 6 u32 fence; + int i = 0; + + if (job->num_cmds == 0) + goto prefences_done; + if (!job->cmds[0].is_wait || job->cmds[0].wait.relative) + goto prefences_done; + + /* Enter host1x class with invalid stream ID for prefence waits. */ + host1x_cdma_push_wide(cdma, + host1x_opcode_acquire_mlock(1), + host1x_opcode_setclass(1, 0, 0), + host1x_opcode_setpayload(0), + host1x_opcode_setstreamid(0x1fffff)); + + for (i = 0; i < job->num_cmds; i++) { + struct host1x_job_cmd *cmd = &job->cmds[i]; + + if (!cmd->is_wait || cmd->wait.relative) + break; + + submit_wait(job, cmd->wait.id, cmd->wait.threshold); + } + + host1x_cdma_push(cdma, + HOST1X_OPCODE_NOP, + host1x_opcode_release_mlock(1)); +prefences_done: /* Enter engine class with invalid stream ID. */ host1x_cdma_push_wide(cdma, host1x_opcode_acquire_mlock(job->class), @@ -230,11 +255,12 @@ static void channel_program_cdma(struct host1x_job *job) host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1), HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) | HOST1X_UCLASS_INCR_SYNCPT_COND_F(4)); - submit_wait(job, job->syncpt->id, fence, job->class); + submit_wait(job, job->syncpt->id, fence); + submit_setclass(job, job->class); /* Submit work. */ job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs); - submit_gathers(job, job->syncpt_end - job->syncpt_incrs); + submit_gathers(job, job->cmds + i, job->num_cmds - i, job->syncpt_end - job->syncpt_incrs); /* Before releasing MLOCK, ensure engine is idle again. */ fence = host1x_syncpt_incr_max(sp, 1); @@ -242,7 +268,7 @@ static void channel_program_cdma(struct host1x_job *job) host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1), HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) | HOST1X_UCLASS_INCR_SYNCPT_COND_F(4)); - submit_wait(job, job->syncpt->id, fence, job->class); + submit_wait(job, job->syncpt->id, fence); /* Release MLOCK. */ host1x_cdma_push(cdma, @@ -272,7 +298,7 @@ static void channel_program_cdma(struct host1x_job *job) job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs); - submit_gathers(job, job->syncpt_end - job->syncpt_incrs); + submit_gathers(job, job->cmds, job->num_cmds, job->syncpt_end - job->syncpt_incrs); #endif } diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index f63d14a57a1d..acc7d82e0585 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -345,8 +345,6 @@ static void syncpt_release(struct kref *ref) sp->locked = false; - mutex_lock(&sp->host->syncpt_mutex); - host1x_syncpt_base_free(sp->base); kfree(sp->name); sp->base = NULL; @@ -369,7 +367,7 @@ void host1x_syncpt_put(struct host1x_syncpt *sp) if (!sp) return; - kref_put(&sp->ref, syncpt_release); + kref_put_mutex(&sp->ref, syncpt_release, &sp->host->syncpt_mutex); } EXPORT_SYMBOL(host1x_syncpt_put); diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs index 274989ea1fb4..1380b47617f7 100644 --- a/drivers/gpu/nova-core/driver.rs +++ b/drivers/gpu/nova-core/driver.rs @@ -34,14 +34,19 @@ impl pci::Driver for NovaCore { pdev.enable_device_mem()?; pdev.set_master(); - let bar = Arc::pin_init( + let devres_bar = Arc::pin_init( pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0")), GFP_KERNEL, )?; + // Used to provided a `&Bar0` to `Gpu::new` without tying it to the lifetime of + // `devres_bar`. + let bar_clone = Arc::clone(&devres_bar); + let bar = bar_clone.access(pdev.as_ref())?; + let this = KBox::pin_init( try_pin_init!(Self { - gpu <- Gpu::new(pdev, bar)?, + gpu <- Gpu::new(pdev, devres_bar, bar), _reg: auxiliary::Registration::new( pdev.as_ref(), c_str!("nova-drm"), @@ -54,4 +59,8 @@ impl pci::Driver for NovaCore { Ok(this) } + + fn unbind(pdev: &pci::Device<Core>, this: Pin<&Self>) { + this.gpu.unbind(pdev.as_ref()); + } } diff --git a/drivers/gpu/nova-core/falcon.rs b/drivers/gpu/nova-core/falcon.rs index 50437c67c14a..37e6298195e4 100644 --- a/drivers/gpu/nova-core/falcon.rs +++ b/drivers/gpu/nova-core/falcon.rs @@ -4,16 +4,17 @@ use core::ops::Deref; use hal::FalconHal; -use kernel::bindings; use kernel::device; +use kernel::dma::DmaAddress; use kernel::prelude::*; +use kernel::sync::aref::ARef; use kernel::time::Delta; -use kernel::types::ARef; use crate::dma::DmaObject; use crate::driver::Bar0; use crate::gpu::Chipset; use crate::regs; +use crate::regs::macros::RegisterBase; use crate::util; pub(crate) mod gsp; @@ -274,14 +275,25 @@ impl From<bool> for FalconFbifMemType { } } -/// Trait defining the parameters of a given Falcon instance. -pub(crate) trait FalconEngine: Sync { - /// Base I/O address for the falcon, relative from which its registers are accessed. - const BASE: usize; +/// Type used to represent the `PFALCON` registers address base for a given falcon engine. +pub(crate) struct PFalconBase(()); + +/// Type used to represent the `PFALCON2` registers address base for a given falcon engine. +pub(crate) struct PFalcon2Base(()); + +/// Trait defining the parameters of a given Falcon engine. +/// +/// Each engine provides one base for `PFALCON` and `PFALCON2` registers. The `ID` constant is used +/// to identify a given Falcon instance with register I/O methods. +pub(crate) trait FalconEngine: + Send + Sync + RegisterBase<PFalconBase> + RegisterBase<PFalcon2Base> + Sized +{ + /// Singleton of the engine, used to identify it with register I/O methods. + const ID: Self; } /// Represents a portion of the firmware to be loaded into a particular memory (e.g. IMEM or DMEM). -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct FalconLoadTarget { /// Offset from the start of the source object to copy from. pub(crate) src_start: u32, @@ -292,7 +304,7 @@ pub(crate) struct FalconLoadTarget { } /// Parameters for the falcon boot ROM. -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct FalconBromParams { /// Offset in `DMEM`` of the firmware's signature. pub(crate) pkc_data_offset: u32, @@ -343,13 +355,13 @@ impl<E: FalconEngine + 'static> Falcon<E> { bar: &Bar0, need_riscv: bool, ) -> Result<Self> { - let hwcfg1 = regs::NV_PFALCON_FALCON_HWCFG1::read(bar, E::BASE); + let hwcfg1 = regs::NV_PFALCON_FALCON_HWCFG1::read(bar, &E::ID); // Check that the revision and security model contain valid values. let _ = hwcfg1.core_rev()?; let _ = hwcfg1.security_model()?; if need_riscv { - let hwcfg2 = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE); + let hwcfg2 = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID); if !hwcfg2.riscv() { dev_err!( dev, @@ -369,7 +381,7 @@ impl<E: FalconEngine + 'static> Falcon<E> { fn reset_wait_mem_scrubbing(&self, bar: &Bar0) -> Result { // TIMEOUT: memory scrubbing should complete in less than 20ms. util::wait_on(Delta::from_millis(20), || { - if regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE).mem_scrubbing_done() { + if regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID).mem_scrubbing_done() { Some(()) } else { None @@ -379,12 +391,12 @@ impl<E: FalconEngine + 'static> Falcon<E> { /// Reset the falcon engine. fn reset_eng(&self, bar: &Bar0) -> Result { - let _ = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE); + let _ = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID); // According to OpenRM's `kflcnPreResetWait_GA102` documentation, HW sometimes does not set // RESET_READY so a non-failing timeout is used. let _ = util::wait_on(Delta::from_micros(150), || { - let r = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE); + let r = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID); if r.reset_ready() { Some(()) } else { @@ -392,13 +404,13 @@ impl<E: FalconEngine + 'static> Falcon<E> { } }); - regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(true)); + regs::NV_PFALCON_FALCON_ENGINE::alter(bar, &E::ID, |v| v.set_reset(true)); // TODO[DLAY]: replace with udelay() or equivalent once available. // TIMEOUT: falcon engine should not take more than 10us to reset. let _: Result = util::wait_on(Delta::from_micros(10), || None); - regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(false)); + regs::NV_PFALCON_FALCON_ENGINE::alter(bar, &E::ID, |v| v.set_reset(false)); self.reset_wait_mem_scrubbing(bar)?; @@ -413,7 +425,7 @@ impl<E: FalconEngine + 'static> Falcon<E> { regs::NV_PFALCON_FALCON_RM::default() .set_value(regs::NV_PMC_BOOT_0::read(bar).into()) - .write(bar, E::BASE); + .write(bar, &E::ID); Ok(()) } @@ -443,7 +455,7 @@ impl<E: FalconEngine + 'static> Falcon<E> { fw.dma_handle_with_offset(load_offsets.src_start as usize)?, ), }; - if dma_start % bindings::dma_addr_t::from(DMA_LEN) > 0 { + if dma_start % DmaAddress::from(DMA_LEN) > 0 { dev_err!( self.dev, "DMA transfer start addresses must be a multiple of {}", @@ -451,44 +463,57 @@ impl<E: FalconEngine + 'static> Falcon<E> { ); return Err(EINVAL); } - if load_offsets.len % DMA_LEN > 0 { - dev_err!( - self.dev, - "DMA transfer length must be a multiple of {}", - DMA_LEN - ); - return Err(EINVAL); - } + + // DMA transfers can only be done in units of 256 bytes. Compute how many such transfers we + // need to perform. + let num_transfers = load_offsets.len.div_ceil(DMA_LEN); + + // Check that the area we are about to transfer is within the bounds of the DMA object. + // Upper limit of transfer is `(num_transfers * DMA_LEN) + load_offsets.src_start`. + match num_transfers + .checked_mul(DMA_LEN) + .and_then(|size| size.checked_add(load_offsets.src_start)) + { + None => { + dev_err!(self.dev, "DMA transfer length overflow"); + return Err(EOVERFLOW); + } + Some(upper_bound) if upper_bound as usize > fw.size() => { + dev_err!(self.dev, "DMA transfer goes beyond range of DMA object"); + return Err(EINVAL); + } + Some(_) => (), + }; // Set up the base source DMA address. regs::NV_PFALCON_FALCON_DMATRFBASE::default() .set_base((dma_start >> 8) as u32) - .write(bar, E::BASE); + .write(bar, &E::ID); regs::NV_PFALCON_FALCON_DMATRFBASE1::default() .set_base((dma_start >> 40) as u16) - .write(bar, E::BASE); + .write(bar, &E::ID); let cmd = regs::NV_PFALCON_FALCON_DMATRFCMD::default() .set_size(DmaTrfCmdSize::Size256B) .set_imem(target_mem == FalconMem::Imem) .set_sec(if sec { 1 } else { 0 }); - for pos in (0..load_offsets.len).step_by(DMA_LEN as usize) { + for pos in (0..num_transfers).map(|i| i * DMA_LEN) { // Perform a transfer of size `DMA_LEN`. regs::NV_PFALCON_FALCON_DMATRFMOFFS::default() .set_offs(load_offsets.dst_start + pos) - .write(bar, E::BASE); + .write(bar, &E::ID); regs::NV_PFALCON_FALCON_DMATRFFBOFFS::default() .set_offs(src_start + pos) - .write(bar, E::BASE); - cmd.write(bar, E::BASE); + .write(bar, &E::ID); + cmd.write(bar, &E::ID); // Wait for the transfer to complete. // TIMEOUT: arbitrarily large value, no DMA transfer to the falcon's small memories // should ever take that long. util::wait_on(Delta::from_secs(2), || { - let r = regs::NV_PFALCON_FALCON_DMATRFCMD::read(bar, E::BASE); + let r = regs::NV_PFALCON_FALCON_DMATRFCMD::read(bar, &E::ID); if r.idle() { Some(()) } else { @@ -502,9 +527,9 @@ impl<E: FalconEngine + 'static> Falcon<E> { /// Perform a DMA load into `IMEM` and `DMEM` of `fw`, and prepare the falcon to run it. pub(crate) fn dma_load<F: FalconFirmware<Target = E>>(&self, bar: &Bar0, fw: &F) -> Result { - regs::NV_PFALCON_FBIF_CTL::alter(bar, E::BASE, |v| v.set_allow_phys_no_ctx(true)); - regs::NV_PFALCON_FALCON_DMACTL::default().write(bar, E::BASE); - regs::NV_PFALCON_FBIF_TRANSCFG::alter(bar, E::BASE, |v| { + regs::NV_PFALCON_FBIF_CTL::alter(bar, &E::ID, |v| v.set_allow_phys_no_ctx(true)); + regs::NV_PFALCON_FALCON_DMACTL::default().write(bar, &E::ID); + regs::NV_PFALCON_FBIF_TRANSCFG::alter(bar, &E::ID, 0, |v| { v.set_target(FalconFbifTarget::CoherentSysmem) .set_mem_type(FalconFbifMemType::Physical) }); @@ -517,7 +542,7 @@ impl<E: FalconEngine + 'static> Falcon<E> { // Set `BootVec` to start of non-secure code. regs::NV_PFALCON_FALCON_BOOTVEC::default() .set_value(fw.boot_addr()) - .write(bar, E::BASE); + .write(bar, &E::ID); Ok(()) } @@ -538,27 +563,27 @@ impl<E: FalconEngine + 'static> Falcon<E> { if let Some(mbox0) = mbox0 { regs::NV_PFALCON_FALCON_MAILBOX0::default() .set_value(mbox0) - .write(bar, E::BASE); + .write(bar, &E::ID); } if let Some(mbox1) = mbox1 { regs::NV_PFALCON_FALCON_MAILBOX1::default() .set_value(mbox1) - .write(bar, E::BASE); + .write(bar, &E::ID); } - match regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE).alias_en() { + match regs::NV_PFALCON_FALCON_CPUCTL::read(bar, &E::ID).alias_en() { true => regs::NV_PFALCON_FALCON_CPUCTL_ALIAS::default() .set_startcpu(true) - .write(bar, E::BASE), + .write(bar, &E::ID), false => regs::NV_PFALCON_FALCON_CPUCTL::default() .set_startcpu(true) - .write(bar, E::BASE), + .write(bar, &E::ID), } // TIMEOUT: arbitrarily large value, firmwares should complete in less than 2 seconds. util::wait_on(Delta::from_secs(2), || { - let r = regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE); + let r = regs::NV_PFALCON_FALCON_CPUCTL::read(bar, &E::ID); if r.halted() { Some(()) } else { @@ -567,8 +592,8 @@ impl<E: FalconEngine + 'static> Falcon<E> { })?; let (mbox0, mbox1) = ( - regs::NV_PFALCON_FALCON_MAILBOX0::read(bar, E::BASE).value(), - regs::NV_PFALCON_FALCON_MAILBOX1::read(bar, E::BASE).value(), + regs::NV_PFALCON_FALCON_MAILBOX0::read(bar, &E::ID).value(), + regs::NV_PFALCON_FALCON_MAILBOX1::read(bar, &E::ID).value(), ); Ok((mbox0, mbox1)) diff --git a/drivers/gpu/nova-core/falcon/gsp.rs b/drivers/gpu/nova-core/falcon/gsp.rs index d622e9a64470..f17599cb49fa 100644 --- a/drivers/gpu/nova-core/falcon/gsp.rs +++ b/drivers/gpu/nova-core/falcon/gsp.rs @@ -2,23 +2,31 @@ use crate::{ driver::Bar0, - falcon::{Falcon, FalconEngine}, - regs, + falcon::{Falcon, FalconEngine, PFalcon2Base, PFalconBase}, + regs::{self, macros::RegisterBase}, }; /// Type specifying the `Gsp` falcon engine. Cannot be instantiated. pub(crate) struct Gsp(()); -impl FalconEngine for Gsp { +impl RegisterBase<PFalconBase> for Gsp { const BASE: usize = 0x00110000; } +impl RegisterBase<PFalcon2Base> for Gsp { + const BASE: usize = 0x00111000; +} + +impl FalconEngine for Gsp { + const ID: Self = Gsp(()); +} + impl Falcon<Gsp> { /// Clears the SWGEN0 bit in the Falcon's IRQ status clear register to /// allow GSP to signal CPU for processing new messages in message queue. pub(crate) fn clear_swgen0_intr(&self, bar: &Bar0) { regs::NV_PFALCON_FALCON_IRQSCLR::default() .set_swgen0(true) - .write(bar, Gsp::BASE); + .write(bar, &Gsp::ID); } } diff --git a/drivers/gpu/nova-core/falcon/hal.rs b/drivers/gpu/nova-core/falcon/hal.rs index b233bc365882..bba288455617 100644 --- a/drivers/gpu/nova-core/falcon/hal.rs +++ b/drivers/gpu/nova-core/falcon/hal.rs @@ -13,7 +13,7 @@ mod ga102; /// Implements chipset-specific low-level operations. The trait is generic against [`FalconEngine`] /// so its `BASE` parameter can be used in order to avoid runtime bound checks when accessing /// registers. -pub(crate) trait FalconHal<E: FalconEngine>: Sync { +pub(crate) trait FalconHal<E: FalconEngine>: Send + Sync { /// Activates the Falcon core if the engine is a risvc/falcon dual engine. fn select_core(&self, _falcon: &Falcon<E>, _bar: &Bar0) -> Result { Ok(()) diff --git a/drivers/gpu/nova-core/falcon/hal/ga102.rs b/drivers/gpu/nova-core/falcon/hal/ga102.rs index 52c33d3f22a8..0b1cbe7853b3 100644 --- a/drivers/gpu/nova-core/falcon/hal/ga102.rs +++ b/drivers/gpu/nova-core/falcon/hal/ga102.rs @@ -16,15 +16,15 @@ use crate::util; use super::FalconHal; fn select_core_ga102<E: FalconEngine>(bar: &Bar0) -> Result { - let bcr_ctrl = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE); + let bcr_ctrl = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, &E::ID); if bcr_ctrl.core_select() != PeregrineCoreSelect::Falcon { regs::NV_PRISCV_RISCV_BCR_CTRL::default() .set_core_select(PeregrineCoreSelect::Falcon) - .write(bar, E::BASE); + .write(bar, &E::ID); // TIMEOUT: falcon core should take less than 10ms to report being enabled. util::wait_on(Delta::from_millis(10), || { - let r = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE); + let r = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, &E::ID); if r.valid() { Some(()) } else { @@ -42,50 +42,47 @@ fn signature_reg_fuse_version_ga102( engine_id_mask: u16, ucode_id: u8, ) -> Result<u32> { - // TODO[REGA]: The ucode fuse versions are contained in the - // FUSE_OPT_FPF_<ENGINE>_UCODE<X>_VERSION registers, which are an array. Our register - // definition macros do not allow us to manage them properly, so we need to hardcode their - // addresses for now. Clean this up once we support register arrays. + const NV_FUSE_OPT_FPF_SIZE: u8 = regs::NV_FUSE_OPT_FPF_SIZE as u8; // Each engine has 16 ucode version registers numbered from 1 to 16. - if ucode_id == 0 || ucode_id > 16 { - dev_err!(dev, "invalid ucode id {:#x}", ucode_id); - return Err(EINVAL); - } + let ucode_idx = match ucode_id { + 1..=NV_FUSE_OPT_FPF_SIZE => (ucode_id - 1) as usize, + _ => { + dev_err!(dev, "invalid ucode id {:#x}", ucode_id); + return Err(EINVAL); + } + }; - // Base address of the FUSE registers array corresponding to the engine. - let reg_fuse_base = if engine_id_mask & 0x0001 != 0 { - regs::NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION::OFFSET + // `ucode_idx` is guaranteed to be in the range [0..15], making the `read` calls provable valid + // at build-time. + let reg_fuse_version = if engine_id_mask & 0x0001 != 0 { + regs::NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION::read(bar, ucode_idx).data() } else if engine_id_mask & 0x0004 != 0 { - regs::NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION::OFFSET + regs::NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION::read(bar, ucode_idx).data() } else if engine_id_mask & 0x0400 != 0 { - regs::NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION::OFFSET + regs::NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION::read(bar, ucode_idx).data() } else { dev_err!(dev, "unexpected engine_id_mask {:#x}", engine_id_mask); return Err(EINVAL); }; - // Read `reg_fuse_base[ucode_id - 1]`. - let reg_fuse_version = - bar.read32(reg_fuse_base + ((ucode_id - 1) as usize * core::mem::size_of::<u32>())); - // TODO[NUMM]: replace with `last_set_bit` once it lands. - Ok(u32::BITS - reg_fuse_version.leading_zeros()) + Ok(u16::BITS - reg_fuse_version.leading_zeros()) } fn program_brom_ga102<E: FalconEngine>(bar: &Bar0, params: &FalconBromParams) -> Result { regs::NV_PFALCON2_FALCON_BROM_PARAADDR::default() .set_value(params.pkc_data_offset) - .write(bar, E::BASE); + .write(bar, &E::ID, 0); regs::NV_PFALCON2_FALCON_BROM_ENGIDMASK::default() .set_value(u32::from(params.engine_id_mask)) - .write(bar, E::BASE); + .write(bar, &E::ID); regs::NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID::default() .set_ucode_id(params.ucode_id) - .write(bar, E::BASE); + .write(bar, &E::ID); regs::NV_PFALCON2_FALCON_MOD_SEL::default() .set_algo(FalconModSelAlgo::Rsa3k) - .write(bar, E::BASE); + .write(bar, &E::ID); Ok(()) } diff --git a/drivers/gpu/nova-core/falcon/sec2.rs b/drivers/gpu/nova-core/falcon/sec2.rs index 5147d9e2a7fe..815786c8480d 100644 --- a/drivers/gpu/nova-core/falcon/sec2.rs +++ b/drivers/gpu/nova-core/falcon/sec2.rs @@ -1,10 +1,19 @@ // SPDX-License-Identifier: GPL-2.0 -use crate::falcon::FalconEngine; +use crate::falcon::{FalconEngine, PFalcon2Base, PFalconBase}; +use crate::regs::macros::RegisterBase; /// Type specifying the `Sec2` falcon engine. Cannot be instantiated. pub(crate) struct Sec2(()); -impl FalconEngine for Sec2 { +impl RegisterBase<PFalconBase> for Sec2 { const BASE: usize = 0x00840000; } + +impl RegisterBase<PFalcon2Base> for Sec2 { + const BASE: usize = 0x00841000; +} + +impl FalconEngine for Sec2 { + const ID: Self = Sec2(()); +} diff --git a/drivers/gpu/nova-core/fb.rs b/drivers/gpu/nova-core/fb.rs index 4a702525fff4..68559902ae78 100644 --- a/drivers/gpu/nova-core/fb.rs +++ b/drivers/gpu/nova-core/fb.rs @@ -4,7 +4,7 @@ use core::ops::Range; use kernel::prelude::*; use kernel::sizes::*; -use kernel::types::ARef; +use kernel::sync::aref::ARef; use kernel::{dev_warn, device}; use crate::dma::DmaObject; diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs index 2931912ddba0..4179a74a2342 100644 --- a/drivers/gpu/nova-core/firmware.rs +++ b/drivers/gpu/nova-core/firmware.rs @@ -4,48 +4,36 @@ //! to be loaded into a given execution unit. use core::marker::PhantomData; +use core::mem::size_of; use kernel::device; use kernel::firmware; use kernel::prelude::*; use kernel::str::CString; +use kernel::transmute::FromBytes; use crate::dma::DmaObject; use crate::falcon::FalconFirmware; use crate::gpu; -use crate::gpu::Chipset; +pub(crate) mod booter; pub(crate) mod fwsec; - -pub(crate) const FIRMWARE_VERSION: &str = "535.113.01"; - -/// Structure encapsulating the firmware blobs required for the GPU to operate. -#[expect(dead_code)] -pub(crate) struct Firmware { - booter_load: firmware::Firmware, - booter_unload: firmware::Firmware, - bootloader: firmware::Firmware, - gsp: firmware::Firmware, -} - -impl Firmware { - pub(crate) fn new(dev: &device::Device, chipset: Chipset, ver: &str) -> Result<Firmware> { - let mut chip_name = CString::try_from_fmt(fmt!("{chipset}"))?; - chip_name.make_ascii_lowercase(); - let chip_name = &*chip_name; - - let request = |name_| { - CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name_}-{ver}.bin")) - .and_then(|path| firmware::Firmware::request(&path, dev)) - }; - - Ok(Firmware { - booter_load: request("booter_load")?, - booter_unload: request("booter_unload")?, - bootloader: request("bootloader")?, - gsp: request("gsp")?, - }) - } +pub(crate) mod gsp; +pub(crate) mod riscv; + +pub(crate) const FIRMWARE_VERSION: &str = "570.144"; + +/// Requests the GPU firmware `name` suitable for `chipset`, with version `ver`. +fn request_firmware( + dev: &device::Device, + chipset: gpu::Chipset, + name: &str, + ver: &str, +) -> Result<firmware::Firmware> { + let chip_name = chipset.name(); + + CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name}-{ver}.bin")) + .and_then(|path| firmware::Firmware::request(&path, dev)) } /// Structure used to describe some firmwares, notably FWSEC-FRTS. @@ -150,6 +138,65 @@ impl<F: FalconFirmware> FirmwareDmaObject<F, Unsigned> { } } +/// Header common to most firmware files. +#[repr(C)] +#[derive(Debug, Clone)] +struct BinHdr { + /// Magic number, must be `0x10de`. + bin_magic: u32, + /// Version of the header. + bin_ver: u32, + /// Size in bytes of the binary (to be ignored). + bin_size: u32, + /// Offset of the start of the application-specific header. + header_offset: u32, + /// Offset of the start of the data payload. + data_offset: u32, + /// Size in bytes of the data payload. + data_size: u32, +} + +// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability. +unsafe impl FromBytes for BinHdr {} + +// A firmware blob starting with a `BinHdr`. +struct BinFirmware<'a> { + hdr: BinHdr, + fw: &'a [u8], +} + +impl<'a> BinFirmware<'a> { + /// Interpret `fw` as a firmware image starting with a [`BinHdr`], and returns the + /// corresponding [`BinFirmware`] that can be used to extract its payload. + fn new(fw: &'a firmware::Firmware) -> Result<Self> { + const BIN_MAGIC: u32 = 0x10de; + let fw = fw.data(); + + fw.get(0..size_of::<BinHdr>()) + // Extract header. + .and_then(BinHdr::from_bytes_copy) + // Validate header. + .and_then(|hdr| { + if hdr.bin_magic == BIN_MAGIC { + Some(hdr) + } else { + None + } + }) + .map(|hdr| Self { hdr, fw }) + .ok_or(EINVAL) + } + + /// Returns the data payload of the firmware, or `None` if the data range is out of bounds of + /// the firmware image. + fn data(&self) -> Option<&[u8]> { + let fw_start = self.hdr.data_offset as usize; + let fw_size = self.hdr.data_size as usize; + + self.fw.get(fw_start..fw_start + fw_size) + } +} + pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>); impl<const N: usize> ModInfoBuilder<N> { @@ -180,8 +227,8 @@ impl<const N: usize> ModInfoBuilder<N> { let mut this = Self(firmware::ModInfoBuilder::new(module_name)); let mut i = 0; - while i < gpu::Chipset::NAMES.len() { - this = this.make_entry_chipset(gpu::Chipset::NAMES[i]); + while i < gpu::Chipset::ALL.len() { + this = this.make_entry_chipset(gpu::Chipset::ALL[i].name()); i += 1; } diff --git a/drivers/gpu/nova-core/firmware/booter.rs b/drivers/gpu/nova-core/firmware/booter.rs new file mode 100644 index 000000000000..b4ff1b17e4a0 --- /dev/null +++ b/drivers/gpu/nova-core/firmware/booter.rs @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Support for loading and patching the `Booter` firmware. `Booter` is a Heavy Secured firmware +//! running on [`Sec2`], that is used on Turing/Ampere to load the GSP firmware into the GSP falcon +//! (and optionally unload it through a separate firmware image). + +use core::marker::PhantomData; +use core::mem::size_of; +use core::ops::Deref; + +use kernel::device; +use kernel::prelude::*; +use kernel::transmute::FromBytes; + +use crate::dma::DmaObject; +use crate::driver::Bar0; +use crate::falcon::sec2::Sec2; +use crate::falcon::{Falcon, FalconBromParams, FalconFirmware, FalconLoadParams, FalconLoadTarget}; +use crate::firmware::{BinFirmware, FirmwareDmaObject, FirmwareSignature, Signed, Unsigned}; +use crate::gpu::Chipset; + +/// Local convenience function to return a copy of `S` by reinterpreting the bytes starting at +/// `offset` in `slice`. +fn frombytes_at<S: FromBytes + Sized>(slice: &[u8], offset: usize) -> Result<S> { + slice + .get(offset..offset + size_of::<S>()) + .and_then(S::from_bytes_copy) + .ok_or(EINVAL) +} + +/// Heavy-Secured firmware header. +/// +/// Such firmwares have an application-specific payload that needs to be patched with a given +/// signature. +#[repr(C)] +#[derive(Debug, Clone)] +struct HsHeaderV2 { + /// Offset to the start of the signatures. + sig_prod_offset: u32, + /// Size in bytes of the signatures. + sig_prod_size: u32, + /// Offset to a `u32` containing the location at which to patch the signature in the microcode + /// image. + patch_loc_offset: u32, + /// Offset to a `u32` containing the index of the signature to patch. + patch_sig_offset: u32, + /// Start offset to the signature metadata. + meta_data_offset: u32, + /// Size in bytes of the signature metadata. + meta_data_size: u32, + /// Offset to a `u32` containing the number of signatures in the signatures section. + num_sig_offset: u32, + /// Offset of the application-specific header. + header_offset: u32, + /// Size in bytes of the application-specific header. + header_size: u32, +} + +// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability. +unsafe impl FromBytes for HsHeaderV2 {} + +/// Heavy-Secured Firmware image container. +/// +/// This provides convenient access to the fields of [`HsHeaderV2`] that are actually indices to +/// read from in the firmware data. +struct HsFirmwareV2<'a> { + hdr: HsHeaderV2, + fw: &'a [u8], +} + +impl<'a> HsFirmwareV2<'a> { + /// Interprets the header of `bin_fw` as a [`HsHeaderV2`] and returns an instance of + /// `HsFirmwareV2` for further parsing. + /// + /// Fails if the header pointed at by `bin_fw` is not within the bounds of the firmware image. + fn new(bin_fw: &BinFirmware<'a>) -> Result<Self> { + frombytes_at::<HsHeaderV2>(bin_fw.fw, bin_fw.hdr.header_offset as usize) + .map(|hdr| Self { hdr, fw: bin_fw.fw }) + } + + /// Returns the location at which the signatures should be patched in the microcode image. + /// + /// Fails if the offset of the patch location is outside the bounds of the firmware + /// image. + fn patch_location(&self) -> Result<u32> { + frombytes_at::<u32>(self.fw, self.hdr.patch_loc_offset as usize) + } + + /// Returns an iterator to the signatures of the firmware. The iterator can be empty if the + /// firmware is unsigned. + /// + /// Fails if the pointed signatures are outside the bounds of the firmware image. + fn signatures_iter(&'a self) -> Result<impl Iterator<Item = BooterSignature<'a>>> { + let num_sig = frombytes_at::<u32>(self.fw, self.hdr.num_sig_offset as usize)?; + let iter = match self.hdr.sig_prod_size.checked_div(num_sig) { + // If there are no signatures, return an iterator that will yield zero elements. + None => (&[] as &[u8]).chunks_exact(1), + Some(sig_size) => { + let patch_sig = frombytes_at::<u32>(self.fw, self.hdr.patch_sig_offset as usize)?; + let signatures_start = (self.hdr.sig_prod_offset + patch_sig) as usize; + + self.fw + // Get signatures range. + .get(signatures_start..signatures_start + self.hdr.sig_prod_size as usize) + .ok_or(EINVAL)? + .chunks_exact(sig_size as usize) + } + }; + + // Map the byte slices into signatures. + Ok(iter.map(BooterSignature)) + } +} + +/// Signature parameters, as defined in the firmware. +#[repr(C)] +struct HsSignatureParams { + /// Fuse version to use. + fuse_ver: u32, + /// Mask of engine IDs this firmware applies to. + engine_id_mask: u32, + /// ID of the microcode. + ucode_id: u32, +} + +// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability. +unsafe impl FromBytes for HsSignatureParams {} + +impl HsSignatureParams { + /// Returns the signature parameters contained in `hs_fw`. + /// + /// Fails if the meta data parameter of `hs_fw` is outside the bounds of the firmware image, or + /// if its size doesn't match that of [`HsSignatureParams`]. + fn new(hs_fw: &HsFirmwareV2<'_>) -> Result<Self> { + let start = hs_fw.hdr.meta_data_offset as usize; + let end = start + .checked_add(hs_fw.hdr.meta_data_size as usize) + .ok_or(EINVAL)?; + + hs_fw + .fw + .get(start..end) + .and_then(Self::from_bytes_copy) + .ok_or(EINVAL) + } +} + +/// Header for code and data load offsets. +#[repr(C)] +#[derive(Debug, Clone)] +struct HsLoadHeaderV2 { + // Offset at which the code starts. + os_code_offset: u32, + // Total size of the code, for all apps. + os_code_size: u32, + // Offset at which the data starts. + os_data_offset: u32, + // Size of the data. + os_data_size: u32, + // Number of apps following this header. Each app is described by a [`HsLoadHeaderV2App`]. + num_apps: u32, +} + +// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability. +unsafe impl FromBytes for HsLoadHeaderV2 {} + +impl HsLoadHeaderV2 { + /// Returns the load header contained in `hs_fw`. + /// + /// Fails if the header pointed at by `hs_fw` is not within the bounds of the firmware image. + fn new(hs_fw: &HsFirmwareV2<'_>) -> Result<Self> { + frombytes_at::<Self>(hs_fw.fw, hs_fw.hdr.header_offset as usize) + } +} + +/// Header for app code loader. +#[repr(C)] +#[derive(Debug, Clone)] +struct HsLoadHeaderV2App { + /// Offset at which to load the app code. + offset: u32, + /// Length in bytes of the app code. + len: u32, +} + +// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability. +unsafe impl FromBytes for HsLoadHeaderV2App {} + +impl HsLoadHeaderV2App { + /// Returns the [`HsLoadHeaderV2App`] for app `idx` of `hs_fw`. + /// + /// Fails if `idx` is larger than the number of apps declared in `hs_fw`, or if the header is + /// not within the bounds of the firmware image. + fn new(hs_fw: &HsFirmwareV2<'_>, idx: u32) -> Result<Self> { + let load_hdr = HsLoadHeaderV2::new(hs_fw)?; + if idx >= load_hdr.num_apps { + Err(EINVAL) + } else { + frombytes_at::<Self>( + hs_fw.fw, + (hs_fw.hdr.header_offset as usize) + // Skip the load header... + .checked_add(size_of::<HsLoadHeaderV2>()) + // ... and jump to app header `idx`. + .and_then(|offset| { + offset.checked_add((idx as usize).checked_mul(size_of::<Self>())?) + }) + .ok_or(EINVAL)?, + ) + } + } +} + +/// Signature for Booter firmware. Their size is encoded into the header and not known a compile +/// time, so we just wrap a byte slices on which we can implement [`FirmwareSignature`]. +struct BooterSignature<'a>(&'a [u8]); + +impl<'a> AsRef<[u8]> for BooterSignature<'a> { + fn as_ref(&self) -> &[u8] { + self.0 + } +} + +impl<'a> FirmwareSignature<BooterFirmware> for BooterSignature<'a> {} + +/// The `Booter` loader firmware, responsible for loading the GSP. +pub(crate) struct BooterFirmware { + // Load parameters for `IMEM` falcon memory. + imem_load_target: FalconLoadTarget, + // Load parameters for `DMEM` falcon memory. + dmem_load_target: FalconLoadTarget, + // BROM falcon parameters. + brom_params: FalconBromParams, + // Device-mapped firmware image. + ucode: FirmwareDmaObject<Self, Signed>, +} + +impl FirmwareDmaObject<BooterFirmware, Unsigned> { + fn new_booter(dev: &device::Device<device::Bound>, data: &[u8]) -> Result<Self> { + DmaObject::from_data(dev, data).map(|ucode| Self(ucode, PhantomData)) + } +} + +#[derive(Copy, Clone, Debug, PartialEq)] +pub(crate) enum BooterKind { + Loader, + #[expect(unused)] + Unloader, +} + +impl BooterFirmware { + /// Parses the Booter firmware contained in `fw`, and patches the correct signature so it is + /// ready to be loaded and run on `falcon`. + pub(crate) fn new( + dev: &device::Device<device::Bound>, + kind: BooterKind, + chipset: Chipset, + ver: &str, + falcon: &Falcon<<Self as FalconFirmware>::Target>, + bar: &Bar0, + ) -> Result<Self> { + let fw_name = match kind { + BooterKind::Loader => "booter_load", + BooterKind::Unloader => "booter_unload", + }; + let fw = super::request_firmware(dev, chipset, fw_name, ver)?; + let bin_fw = BinFirmware::new(&fw)?; + + // The binary firmware embeds a Heavy-Secured firmware. + let hs_fw = HsFirmwareV2::new(&bin_fw)?; + + // The Heavy-Secured firmware embeds a firmware load descriptor. + let load_hdr = HsLoadHeaderV2::new(&hs_fw)?; + + // Offset in `ucode` where to patch the signature. + let patch_loc = hs_fw.patch_location()?; + + let sig_params = HsSignatureParams::new(&hs_fw)?; + let brom_params = FalconBromParams { + // `load_hdr.os_data_offset` is an absolute index, but `pkc_data_offset` is from the + // signature patch location. + pkc_data_offset: patch_loc + .checked_sub(load_hdr.os_data_offset) + .ok_or(EINVAL)?, + engine_id_mask: u16::try_from(sig_params.engine_id_mask).map_err(|_| EINVAL)?, + ucode_id: u8::try_from(sig_params.ucode_id).map_err(|_| EINVAL)?, + }; + let app0 = HsLoadHeaderV2App::new(&hs_fw, 0)?; + + // Object containing the firmware microcode to be signature-patched. + let ucode = bin_fw + .data() + .ok_or(EINVAL) + .and_then(|data| FirmwareDmaObject::<Self, _>::new_booter(dev, data))?; + + let ucode_signed = { + let mut signatures = hs_fw.signatures_iter()?.peekable(); + + if signatures.peek().is_none() { + // If there are no signatures, then the firmware is unsigned. + ucode.no_patch_signature() + } else { + // Obtain the version from the fuse register, and extract the corresponding + // signature. + let reg_fuse_version = falcon.signature_reg_fuse_version( + bar, + brom_params.engine_id_mask, + brom_params.ucode_id, + )?; + + // `0` means the last signature should be used. + const FUSE_VERSION_USE_LAST_SIG: u32 = 0; + let signature = match reg_fuse_version { + FUSE_VERSION_USE_LAST_SIG => signatures.last(), + // Otherwise hardware fuse version needs to be subtracted to obtain the index. + reg_fuse_version => { + let Some(idx) = sig_params.fuse_ver.checked_sub(reg_fuse_version) else { + dev_err!(dev, "invalid fuse version for Booter firmware\n"); + return Err(EINVAL); + }; + signatures.nth(idx as usize) + } + } + .ok_or(EINVAL)?; + + ucode.patch_signature(&signature, patch_loc as usize)? + } + }; + + Ok(Self { + imem_load_target: FalconLoadTarget { + src_start: app0.offset, + dst_start: 0, + len: app0.len, + }, + dmem_load_target: FalconLoadTarget { + src_start: load_hdr.os_data_offset, + dst_start: 0, + len: load_hdr.os_data_size, + }, + brom_params, + ucode: ucode_signed, + }) + } +} + +impl FalconLoadParams for BooterFirmware { + fn imem_load_params(&self) -> FalconLoadTarget { + self.imem_load_target.clone() + } + + fn dmem_load_params(&self) -> FalconLoadTarget { + self.dmem_load_target.clone() + } + + fn brom_params(&self) -> FalconBromParams { + self.brom_params.clone() + } + + fn boot_addr(&self) -> u32 { + self.imem_load_target.src_start + } +} + +impl Deref for BooterFirmware { + type Target = DmaObject; + + fn deref(&self) -> &Self::Target { + &self.ucode.0 + } +} + +impl FalconFirmware for BooterFirmware { + type Target = Sec2; +} diff --git a/drivers/gpu/nova-core/firmware/fwsec.rs b/drivers/gpu/nova-core/firmware/fwsec.rs index 0dff3cfa90af..8edbb5c0572c 100644 --- a/drivers/gpu/nova-core/firmware/fwsec.rs +++ b/drivers/gpu/nova-core/firmware/fwsec.rs @@ -202,9 +202,6 @@ pub(crate) struct FwsecFirmware { ucode: FirmwareDmaObject<Self, Signed>, } -// We need to load full DMEM pages. -const DMEM_LOAD_SIZE_ALIGN: u32 = 256; - impl FalconLoadParams for FwsecFirmware { fn imem_load_params(&self) -> FalconLoadTarget { FalconLoadTarget { @@ -218,11 +215,7 @@ impl FalconLoadParams for FwsecFirmware { FalconLoadTarget { src_start: self.desc.imem_load_size, dst_start: self.desc.dmem_phys_base, - // TODO[NUMM]: replace with `align_up` once it lands. - len: self - .desc - .dmem_load_size - .next_multiple_of(DMEM_LOAD_SIZE_ALIGN), + len: self.desc.dmem_load_size, } } @@ -253,8 +246,8 @@ impl FalconFirmware for FwsecFirmware { impl FirmwareDmaObject<FwsecFirmware, Unsigned> { fn new_fwsec(dev: &Device<device::Bound>, bios: &Vbios, cmd: FwsecCommand) -> Result<Self> { - let desc = bios.fwsec_image().header(dev)?; - let ucode = bios.fwsec_image().ucode(dev, desc)?; + let desc = bios.fwsec_image().header()?; + let ucode = bios.fwsec_image().ucode(desc)?; let mut dma_object = DmaObject::from_data(dev, ucode)?; let hdr_offset = (desc.imem_load_size + desc.interface_offset) as usize; @@ -343,7 +336,7 @@ impl FwsecFirmware { let ucode_dma = FirmwareDmaObject::<Self, _>::new_fwsec(dev, bios, cmd)?; // Patch signature if needed. - let desc = bios.fwsec_image().header(dev)?; + let desc = bios.fwsec_image().header()?; let ucode_signed = if desc.signature_count != 0 { let sig_base_img = (desc.imem_load_size + desc.pkc_data_offset) as usize; let desc_sig_versions = u32::from(desc.signature_versions); @@ -382,7 +375,7 @@ impl FwsecFirmware { dev_dbg!(dev, "patching signature with index {}\n", signature_idx); let signature = bios .fwsec_image() - .sigs(dev, desc) + .sigs(desc) .and_then(|sigs| sigs.get(signature_idx).ok_or(EINVAL))?; ucode_dma.patch_signature(signature, sig_base_img)? diff --git a/drivers/gpu/nova-core/firmware/gsp.rs b/drivers/gpu/nova-core/firmware/gsp.rs new file mode 100644 index 000000000000..9b70095434c6 --- /dev/null +++ b/drivers/gpu/nova-core/firmware/gsp.rs @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 + +use core::mem::size_of_val; + +use kernel::device; +use kernel::dma::{DataDirection, DmaAddress}; +use kernel::kvec; +use kernel::prelude::*; +use kernel::scatterlist::{Owned, SGTable}; + +use crate::dma::DmaObject; +use crate::firmware::riscv::RiscvFirmware; +use crate::gpu::{Architecture, Chipset}; +use crate::gsp::GSP_PAGE_SIZE; + +/// Ad-hoc and temporary module to extract sections from ELF images. +/// +/// Some firmware images are currently packaged as ELF files, where sections names are used as keys +/// to specific and related bits of data. Future firmware versions are scheduled to move away from +/// that scheme before nova-core becomes stable, which means this module will eventually be +/// removed. +mod elf { + use core::mem::size_of; + + use kernel::bindings; + use kernel::str::CStr; + use kernel::transmute::FromBytes; + + /// Newtype to provide a [`FromBytes`] implementation. + #[repr(transparent)] + struct Elf64Hdr(bindings::elf64_hdr); + // SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability. + unsafe impl FromBytes for Elf64Hdr {} + + #[repr(transparent)] + struct Elf64SHdr(bindings::elf64_shdr); + // SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability. + unsafe impl FromBytes for Elf64SHdr {} + + /// Tries to extract section with name `name` from the ELF64 image `elf`, and returns it. + pub(super) fn elf64_section<'a, 'b>(elf: &'a [u8], name: &'b str) -> Option<&'a [u8]> { + let hdr = &elf + .get(0..size_of::<bindings::elf64_hdr>()) + .and_then(Elf64Hdr::from_bytes)? + .0; + + // Get all the section headers. + let mut shdr = { + let shdr_num = usize::from(hdr.e_shnum); + let shdr_start = usize::try_from(hdr.e_shoff).ok()?; + let shdr_end = shdr_num + .checked_mul(size_of::<Elf64SHdr>()) + .and_then(|v| v.checked_add(shdr_start))?; + + elf.get(shdr_start..shdr_end) + .map(|slice| slice.chunks_exact(size_of::<Elf64SHdr>()))? + }; + + // Get the strings table. + let strhdr = shdr + .clone() + .nth(usize::from(hdr.e_shstrndx)) + .and_then(Elf64SHdr::from_bytes)?; + + // Find the section which name matches `name` and return it. + shdr.find(|&sh| { + let Some(hdr) = Elf64SHdr::from_bytes(sh) else { + return false; + }; + + let Some(name_idx) = strhdr + .0 + .sh_offset + .checked_add(u64::from(hdr.0.sh_name)) + .and_then(|idx| usize::try_from(idx).ok()) + else { + return false; + }; + + // Get the start of the name. + elf.get(name_idx..) + // Stop at the first `0`. + .and_then(|nstr| nstr.get(0..=nstr.iter().position(|b| *b == 0)?)) + // Convert into CStr. This should never fail because of the line above. + .and_then(|nstr| CStr::from_bytes_with_nul(nstr).ok()) + // Convert into str. + .and_then(|c_str| c_str.to_str().ok()) + // Check that the name matches. + .map(|str| str == name) + .unwrap_or(false) + }) + // Return the slice containing the section. + .and_then(|sh| { + let hdr = Elf64SHdr::from_bytes(sh)?; + let start = usize::try_from(hdr.0.sh_offset).ok()?; + let end = usize::try_from(hdr.0.sh_size) + .ok() + .and_then(|sh_size| start.checked_add(sh_size))?; + + elf.get(start..end) + }) + } +} + +/// GSP firmware with 3-level radix page tables for the GSP bootloader. +/// +/// The bootloader expects firmware to be mapped starting at address 0 in GSP's virtual address +/// space: +/// +/// ```text +/// Level 0: 1 page, 1 entry -> points to first level 1 page +/// Level 1: Multiple pages/entries -> each entry points to a level 2 page +/// Level 2: Multiple pages/entries -> each entry points to a firmware page +/// ``` +/// +/// Each page is 4KB, each entry is 8 bytes (64-bit DMA address). +/// Also known as "Radix3" firmware. +#[pin_data] +pub(crate) struct GspFirmware { + /// The GSP firmware inside a [`VVec`], device-mapped via a SG table. + #[pin] + fw: SGTable<Owned<VVec<u8>>>, + /// Level 2 page table whose entries contain DMA addresses of firmware pages. + #[pin] + level2: SGTable<Owned<VVec<u8>>>, + /// Level 1 page table whose entries contain DMA addresses of level 2 pages. + #[pin] + level1: SGTable<Owned<VVec<u8>>>, + /// Level 0 page table (single 4KB page) with one entry: DMA address of first level 1 page. + level0: DmaObject, + /// Size in bytes of the firmware contained in [`Self::fw`]. + size: usize, + /// Device-mapped GSP signatures matching the GPU's [`Chipset`]. + signatures: DmaObject, + /// GSP bootloader, verifies the GSP firmware before loading and running it. + bootloader: RiscvFirmware, +} + +impl GspFirmware { + /// Loads the GSP firmware binaries, map them into `dev`'s address-space, and creates the page + /// tables expected by the GSP bootloader to load it. + pub(crate) fn new<'a, 'b>( + dev: &'a device::Device<device::Bound>, + chipset: Chipset, + ver: &'b str, + ) -> Result<impl PinInit<Self, Error> + 'a> { + let fw = super::request_firmware(dev, chipset, "gsp", ver)?; + + let fw_section = elf::elf64_section(fw.data(), ".fwimage").ok_or(EINVAL)?; + + let sigs_section = match chipset.arch() { + Architecture::Ampere => ".fwsignature_ga10x", + _ => return Err(ENOTSUPP), + }; + let signatures = elf::elf64_section(fw.data(), sigs_section) + .ok_or(EINVAL) + .and_then(|data| DmaObject::from_data(dev, data))?; + + let size = fw_section.len(); + + // Move the firmware into a vmalloc'd vector and map it into the device address + // space. + let fw_vvec = VVec::with_capacity(fw_section.len(), GFP_KERNEL) + .and_then(|mut v| { + v.extend_from_slice(fw_section, GFP_KERNEL)?; + Ok(v) + }) + .map_err(|_| ENOMEM)?; + + let bl = super::request_firmware(dev, chipset, "bootloader", ver)?; + let bootloader = RiscvFirmware::new(dev, &bl)?; + + Ok(try_pin_init!(Self { + fw <- SGTable::new(dev, fw_vvec, DataDirection::ToDevice, GFP_KERNEL), + level2 <- { + // Allocate the level 2 page table, map the firmware onto it, and map it into the + // device address space. + VVec::<u8>::with_capacity( + fw.iter().count() * core::mem::size_of::<u64>(), + GFP_KERNEL, + ) + .map_err(|_| ENOMEM) + .and_then(|level2| map_into_lvl(&fw, level2)) + .map(|level2| SGTable::new(dev, level2, DataDirection::ToDevice, GFP_KERNEL))? + }, + level1 <- { + // Allocate the level 1 page table, map the level 2 page table onto it, and map it + // into the device address space. + VVec::<u8>::with_capacity( + level2.iter().count() * core::mem::size_of::<u64>(), + GFP_KERNEL, + ) + .map_err(|_| ENOMEM) + .and_then(|level1| map_into_lvl(&level2, level1)) + .map(|level1| SGTable::new(dev, level1, DataDirection::ToDevice, GFP_KERNEL))? + }, + level0: { + // Allocate the level 0 page table as a device-visible DMA object, and map the + // level 1 page table onto it. + + // Level 0 page table data. + let mut level0_data = kvec![0u8; GSP_PAGE_SIZE]?; + + // Fill level 1 page entry. + #[allow(clippy::useless_conversion)] + let level1_entry = u64::from(level1.iter().next().unwrap().dma_address()); + let dst = &mut level0_data[..size_of_val(&level1_entry)]; + dst.copy_from_slice(&level1_entry.to_le_bytes()); + + // Turn the level0 page table into a [`DmaObject`]. + DmaObject::from_data(dev, &level0_data)? + }, + size, + signatures, + bootloader, + })) + } + + #[expect(unused)] + /// Returns the DMA handle of the radix3 level 0 page table. + pub(crate) fn radix3_dma_handle(&self) -> DmaAddress { + self.level0.dma_handle() + } +} + +/// Build a page table from a scatter-gather list. +/// +/// Takes each DMA-mapped region from `sg_table` and writes page table entries +/// for all 4KB pages within that region. For example, a 16KB SG entry becomes +/// 4 consecutive page table entries. +fn map_into_lvl(sg_table: &SGTable<Owned<VVec<u8>>>, mut dst: VVec<u8>) -> Result<VVec<u8>> { + for sg_entry in sg_table.iter() { + // Number of pages we need to map. + let num_pages = (sg_entry.dma_len() as usize).div_ceil(GSP_PAGE_SIZE); + + for i in 0..num_pages { + let entry = sg_entry.dma_address() + (i as u64 * GSP_PAGE_SIZE as u64); + dst.extend_from_slice(&entry.to_le_bytes(), GFP_KERNEL)?; + } + } + + Ok(dst) +} diff --git a/drivers/gpu/nova-core/firmware/riscv.rs b/drivers/gpu/nova-core/firmware/riscv.rs new file mode 100644 index 000000000000..afb08f5bc4ba --- /dev/null +++ b/drivers/gpu/nova-core/firmware/riscv.rs @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Support for firmware binaries designed to run on a RISC-V core. Such firmwares files have a +//! dedicated header. + +use core::mem::size_of; + +use kernel::device; +use kernel::firmware::Firmware; +use kernel::prelude::*; +use kernel::transmute::FromBytes; + +use crate::dma::DmaObject; +use crate::firmware::BinFirmware; + +/// Descriptor for microcode running on a RISC-V core. +#[repr(C)] +#[derive(Debug)] +struct RmRiscvUCodeDesc { + version: u32, + bootloader_offset: u32, + bootloader_size: u32, + bootloader_param_offset: u32, + bootloader_param_size: u32, + riscv_elf_offset: u32, + riscv_elf_size: u32, + app_version: u32, + manifest_offset: u32, + manifest_size: u32, + monitor_data_offset: u32, + monitor_data_size: u32, + monitor_code_offset: u32, + monitor_code_size: u32, +} + +// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability. +unsafe impl FromBytes for RmRiscvUCodeDesc {} + +impl RmRiscvUCodeDesc { + /// Interprets the header of `bin_fw` as a [`RmRiscvUCodeDesc`] and returns it. + /// + /// Fails if the header pointed at by `bin_fw` is not within the bounds of the firmware image. + fn new(bin_fw: &BinFirmware<'_>) -> Result<Self> { + let offset = bin_fw.hdr.header_offset as usize; + + bin_fw + .fw + .get(offset..offset + size_of::<Self>()) + .and_then(Self::from_bytes_copy) + .ok_or(EINVAL) + } +} + +/// A parsed firmware for a RISC-V core, ready to be loaded and run. +#[expect(unused)] +pub(crate) struct RiscvFirmware { + /// Offset at which the code starts in the firmware image. + code_offset: u32, + /// Offset at which the data starts in the firmware image. + data_offset: u32, + /// Offset at which the manifest starts in the firmware image. + manifest_offset: u32, + /// Application version. + app_version: u32, + /// Device-mapped firmware image. + ucode: DmaObject, +} + +impl RiscvFirmware { + /// Parses the RISC-V firmware image contained in `fw`. + pub(crate) fn new(dev: &device::Device<device::Bound>, fw: &Firmware) -> Result<Self> { + let bin_fw = BinFirmware::new(fw)?; + + let riscv_desc = RmRiscvUCodeDesc::new(&bin_fw)?; + + let ucode = { + let start = bin_fw.hdr.data_offset as usize; + let len = bin_fw.hdr.data_size as usize; + + DmaObject::from_data(dev, fw.data().get(start..start + len).ok_or(EINVAL)?)? + }; + + Ok(Self { + ucode, + code_offset: riscv_desc.monitor_code_offset, + data_offset: riscv_desc.monitor_data_offset, + manifest_offset: riscv_desc.manifest_offset, + app_version: riscv_desc.app_version, + }) + } +} diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs index b5c9786619a9..5da9ad726483 100644 --- a/drivers/gpu/nova-core/gpu.rs +++ b/drivers/gpu/nova-core/gpu.rs @@ -3,15 +3,11 @@ use kernel::{device, devres::Devres, error::code::*, pci, prelude::*, sync::Arc}; use crate::driver::Bar0; -use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon}; -use crate::fb::FbLayout; +use crate::falcon::{gsp::Gsp as GspFalcon, sec2::Sec2 as Sec2Falcon, Falcon}; use crate::fb::SysmemFlush; -use crate::firmware::fwsec::{FwsecCommand, FwsecFirmware}; -use crate::firmware::{Firmware, FIRMWARE_VERSION}; use crate::gfw; +use crate::gsp::Gsp; use crate::regs; -use crate::util; -use crate::vbios::Vbios; use core::fmt; macro_rules! define_chipset { @@ -28,13 +24,23 @@ macro_rules! define_chipset { $( Chipset::$variant, )* ]; - pub(crate) const NAMES: [&'static str; Self::ALL.len()] = [ - $( util::const_bytes_to_str( - util::to_lowercase_bytes::<{ stringify!($variant).len() }>( - stringify!($variant) - ).as_slice() - ), )* - ]; + ::kernel::macros::paste!( + /// Returns the name of this chipset, in lowercase. + /// + /// # Examples + /// + /// ``` + /// let chipset = Chipset::GA102; + /// assert_eq!(chipset.name(), "ga102"); + /// ``` + pub(crate) const fn name(&self) -> &'static str { + match *self { + $( + Chipset::$variant => stringify!([<$variant:lower>]), + )* + } + } + ); } // TODO[FPRI]: replace with something like derive(FromPrimitive) @@ -163,150 +169,74 @@ impl Spec { } /// Structure holding the resources required to operate the GPU. -#[pin_data(PinnedDrop)] +#[pin_data] pub(crate) struct Gpu { spec: Spec, /// MMIO mapping of PCI BAR 0 bar: Arc<Devres<Bar0>>, - fw: Firmware, /// System memory page required for flushing all pending GPU-side memory writes done through /// PCIE into system memory, via sysmembar (A GPU-initiated HW memory-barrier operation). sysmem_flush: SysmemFlush, -} - -#[pinned_drop] -impl PinnedDrop for Gpu { - fn drop(self: Pin<&mut Self>) { - // Unregister the sysmem flush page before we release it. - self.bar - .try_access_with(|b| self.sysmem_flush.unregister(b)); - } + /// GSP falcon instance, used for GSP boot up and cleanup. + gsp_falcon: Falcon<GspFalcon>, + /// SEC2 falcon instance, used for GSP boot up and cleanup. + sec2_falcon: Falcon<Sec2Falcon>, + /// GSP runtime data. Temporarily an empty placeholder. + #[pin] + gsp: Gsp, } impl Gpu { - /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly - /// created the WPR2 region. - /// - /// TODO: this needs to be moved into a larger type responsible for booting the whole GSP - /// (`GspBooter`?). - fn run_fwsec_frts( - dev: &device::Device<device::Bound>, - falcon: &Falcon<Gsp>, - bar: &Bar0, - bios: &Vbios, - fb_layout: &FbLayout, - ) -> Result<()> { - // Check that the WPR2 region does not already exists - if it does, we cannot run - // FWSEC-FRTS until the GPU is reset. - if regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound() != 0 { - dev_err!( - dev, - "WPR2 region already exists - GPU needs to be reset to proceed\n" - ); - return Err(EBUSY); - } + pub(crate) fn new<'a>( + pdev: &'a pci::Device<device::Bound>, + devres_bar: Arc<Devres<Bar0>>, + bar: &'a Bar0, + ) -> impl PinInit<Self, Error> + 'a { + try_pin_init!(Self { + spec: Spec::new(bar).inspect(|spec| { + dev_info!( + pdev.as_ref(), + "NVIDIA (Chipset: {}, Architecture: {:?}, Revision: {})\n", + spec.chipset, + spec.chipset.arch(), + spec.revision + ); + })?, - let fwsec_frts = FwsecFirmware::new( - dev, - falcon, - bar, - bios, - FwsecCommand::Frts { - frts_addr: fb_layout.frts.start, - frts_size: fb_layout.frts.end - fb_layout.frts.start, + // We must wait for GFW_BOOT completion before doing any significant setup on the GPU. + _: { + gfw::wait_gfw_boot_completion(bar) + .inspect_err(|_| dev_err!(pdev.as_ref(), "GFW boot did not complete"))?; }, - )?; - // Run FWSEC-FRTS to create the WPR2 region. - fwsec_frts.run(dev, falcon, bar)?; + sysmem_flush: SysmemFlush::register(pdev.as_ref(), bar, spec.chipset)?, - // SCRATCH_E contains the error code for FWSEC-FRTS. - let frts_status = regs::NV_PBUS_SW_SCRATCH_0E::read(bar).frts_err_code(); - if frts_status != 0 { - dev_err!( - dev, - "FWSEC-FRTS returned with error code {:#x}", - frts_status - ); + gsp_falcon: Falcon::new( + pdev.as_ref(), + spec.chipset, + bar, + spec.chipset > Chipset::GA100, + ) + .inspect(|falcon| falcon.clear_swgen0_intr(bar))?, - return Err(EIO); - } + sec2_falcon: Falcon::new(pdev.as_ref(), spec.chipset, bar, true)?, - // Check that the WPR2 region has been created as we requested. - let (wpr2_lo, wpr2_hi) = ( - regs::NV_PFB_PRI_MMU_WPR2_ADDR_LO::read(bar).lower_bound(), - regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound(), - ); + gsp <- Gsp::new(), - match (wpr2_lo, wpr2_hi) { - (_, 0) => { - dev_err!(dev, "WPR2 region not created after running FWSEC-FRTS\n"); + _: { gsp.boot(pdev, bar, spec.chipset, gsp_falcon, sec2_falcon)? }, - Err(EIO) - } - (wpr2_lo, _) if wpr2_lo != fb_layout.frts.start => { - dev_err!( - dev, - "WPR2 region created at unexpected address {:#x}; expected {:#x}\n", - wpr2_lo, - fb_layout.frts.start, - ); - - Err(EIO) - } - (wpr2_lo, wpr2_hi) => { - dev_dbg!(dev, "WPR2: {:#x}-{:#x}\n", wpr2_lo, wpr2_hi); - dev_dbg!(dev, "GPU instance built\n"); - - Ok(()) - } - } + bar: devres_bar, + }) } - pub(crate) fn new( - pdev: &pci::Device<device::Bound>, - devres_bar: Arc<Devres<Bar0>>, - ) -> Result<impl PinInit<Self>> { - let bar = devres_bar.access(pdev.as_ref())?; - let spec = Spec::new(bar)?; - let fw = Firmware::new(pdev.as_ref(), spec.chipset, FIRMWARE_VERSION)?; - - dev_info!( - pdev.as_ref(), - "NVIDIA (Chipset: {}, Architecture: {:?}, Revision: {})\n", - spec.chipset, - spec.chipset.arch(), - spec.revision - ); - - // We must wait for GFW_BOOT completion before doing any significant setup on the GPU. - gfw::wait_gfw_boot_completion(bar) - .inspect_err(|_| dev_err!(pdev.as_ref(), "GFW boot did not complete"))?; - - let sysmem_flush = SysmemFlush::register(pdev.as_ref(), bar, spec.chipset)?; - - let gsp_falcon = Falcon::<Gsp>::new( - pdev.as_ref(), - spec.chipset, - bar, - spec.chipset > Chipset::GA100, - )?; - gsp_falcon.clear_swgen0_intr(bar); - - let _sec2_falcon = Falcon::<Sec2>::new(pdev.as_ref(), spec.chipset, bar, true)?; - - let fb_layout = FbLayout::new(spec.chipset, bar)?; - dev_dbg!(pdev.as_ref(), "{:#x?}\n", fb_layout); - - let bios = Vbios::new(pdev, bar)?; - - Self::run_fwsec_frts(pdev.as_ref(), &gsp_falcon, bar, &bios, &fb_layout)?; - - Ok(pin_init!(Self { - spec, - bar: devres_bar, - fw, - sysmem_flush, - })) + /// Called when the corresponding [`Device`](device::Device) is unbound. + /// + /// Note: This method must only be called from `Driver::unbind`. + pub(crate) fn unbind(&self, dev: &device::Device<device::Core>) { + kernel::warn_on!(self + .bar + .access(dev) + .inspect(|bar| self.sysmem_flush.unregister(bar)) + .is_err()); } } diff --git a/drivers/gpu/nova-core/gsp.rs b/drivers/gpu/nova-core/gsp.rs new file mode 100644 index 000000000000..64e472e7a9d3 --- /dev/null +++ b/drivers/gpu/nova-core/gsp.rs @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 + +mod boot; + +use kernel::prelude::*; + +mod fw; + +pub(crate) const GSP_PAGE_SHIFT: usize = 12; +pub(crate) const GSP_PAGE_SIZE: usize = 1 << GSP_PAGE_SHIFT; + +/// GSP runtime data. +/// +/// This is an empty pinned placeholder for now. +#[pin_data] +pub(crate) struct Gsp {} + +impl Gsp { + pub(crate) fn new() -> impl PinInit<Self> { + pin_init!(Self {}) + } +} diff --git a/drivers/gpu/nova-core/gsp/boot.rs b/drivers/gpu/nova-core/gsp/boot.rs new file mode 100644 index 000000000000..2800f3aee37d --- /dev/null +++ b/drivers/gpu/nova-core/gsp/boot.rs @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 + +use kernel::device; +use kernel::pci; +use kernel::prelude::*; + +use crate::driver::Bar0; +use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon}; +use crate::fb::FbLayout; +use crate::firmware::{ + booter::{BooterFirmware, BooterKind}, + fwsec::{FwsecCommand, FwsecFirmware}, + gsp::GspFirmware, + FIRMWARE_VERSION, +}; +use crate::gpu::Chipset; +use crate::regs; +use crate::vbios::Vbios; + +impl super::Gsp { + /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly + /// created the WPR2 region. + fn run_fwsec_frts( + dev: &device::Device<device::Bound>, + falcon: &Falcon<Gsp>, + bar: &Bar0, + bios: &Vbios, + fb_layout: &FbLayout, + ) -> Result<()> { + // Check that the WPR2 region does not already exists - if it does, we cannot run + // FWSEC-FRTS until the GPU is reset. + if regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound() != 0 { + dev_err!( + dev, + "WPR2 region already exists - GPU needs to be reset to proceed\n" + ); + return Err(EBUSY); + } + + let fwsec_frts = FwsecFirmware::new( + dev, + falcon, + bar, + bios, + FwsecCommand::Frts { + frts_addr: fb_layout.frts.start, + frts_size: fb_layout.frts.end - fb_layout.frts.start, + }, + )?; + + // Run FWSEC-FRTS to create the WPR2 region. + fwsec_frts.run(dev, falcon, bar)?; + + // SCRATCH_E contains the error code for FWSEC-FRTS. + let frts_status = regs::NV_PBUS_SW_SCRATCH_0E_FRTS_ERR::read(bar).frts_err_code(); + if frts_status != 0 { + dev_err!( + dev, + "FWSEC-FRTS returned with error code {:#x}", + frts_status + ); + + return Err(EIO); + } + + // Check that the WPR2 region has been created as we requested. + let (wpr2_lo, wpr2_hi) = ( + regs::NV_PFB_PRI_MMU_WPR2_ADDR_LO::read(bar).lower_bound(), + regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound(), + ); + + match (wpr2_lo, wpr2_hi) { + (_, 0) => { + dev_err!(dev, "WPR2 region not created after running FWSEC-FRTS\n"); + + Err(EIO) + } + (wpr2_lo, _) if wpr2_lo != fb_layout.frts.start => { + dev_err!( + dev, + "WPR2 region created at unexpected address {:#x}; expected {:#x}\n", + wpr2_lo, + fb_layout.frts.start, + ); + + Err(EIO) + } + (wpr2_lo, wpr2_hi) => { + dev_dbg!(dev, "WPR2: {:#x}-{:#x}\n", wpr2_lo, wpr2_hi); + dev_dbg!(dev, "GPU instance built\n"); + + Ok(()) + } + } + } + + /// Attempt to boot the GSP. + /// + /// This is a GPU-dependent and complex procedure that involves loading firmware files from + /// user-space, patching them with signatures, and building firmware-specific intricate data + /// structures that the GSP will use at runtime. + /// + /// Upon return, the GSP is up and running, and its runtime object given as return value. + pub(crate) fn boot( + self: Pin<&mut Self>, + pdev: &pci::Device<device::Bound>, + bar: &Bar0, + chipset: Chipset, + gsp_falcon: &Falcon<Gsp>, + sec2_falcon: &Falcon<Sec2>, + ) -> Result { + let dev = pdev.as_ref(); + + let bios = Vbios::new(dev, bar)?; + + let _gsp_fw = KBox::pin_init( + GspFirmware::new(dev, chipset, FIRMWARE_VERSION)?, + GFP_KERNEL, + )?; + + let fb_layout = FbLayout::new(chipset, bar)?; + dev_dbg!(dev, "{:#x?}\n", fb_layout); + + Self::run_fwsec_frts(dev, gsp_falcon, bar, &bios, &fb_layout)?; + + let _booter_loader = BooterFirmware::new( + dev, + BooterKind::Loader, + chipset, + FIRMWARE_VERSION, + sec2_falcon, + bar, + )?; + + Ok(()) + } +} diff --git a/drivers/gpu/nova-core/gsp/fw.rs b/drivers/gpu/nova-core/gsp/fw.rs new file mode 100644 index 000000000000..34226dd00982 --- /dev/null +++ b/drivers/gpu/nova-core/gsp/fw.rs @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 + +mod r570_144; + +// Alias to avoid repeating the version number with every use. +#[expect(unused)] +use r570_144 as bindings; diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144.rs b/drivers/gpu/nova-core/gsp/fw/r570_144.rs new file mode 100644 index 000000000000..35cb0370a7c9 --- /dev/null +++ b/drivers/gpu/nova-core/gsp/fw/r570_144.rs @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Firmware bindings. +//! +//! Imports the generated bindings by `bindgen`. +//! +//! This module may not be directly used. Please abstract or re-export the needed symbols in the +//! parent module instead. + +#![cfg_attr(test, allow(deref_nullptr))] +#![cfg_attr(test, allow(unaligned_references))] +#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))] +#![allow( + dead_code, + unused_imports, + clippy::all, + clippy::undocumented_unsafe_blocks, + clippy::ptr_as_ptr, + clippy::ref_as_ptr, + missing_docs, + non_camel_case_types, + non_upper_case_globals, + non_snake_case, + improper_ctypes, + unreachable_pub, + unsafe_op_in_unsafe_fn +)] +use kernel::ffi; +include!("r570_144/bindings.rs"); diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs new file mode 100644 index 000000000000..cec594032515 --- /dev/null +++ b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs @@ -0,0 +1 @@ +// SPDX-License-Identifier: GPL-2.0 diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs index cb2bbb30cba1..fffcaee2249f 100644 --- a/drivers/gpu/nova-core/nova_core.rs +++ b/drivers/gpu/nova-core/nova_core.rs @@ -9,6 +9,7 @@ mod fb; mod firmware; mod gfw; mod gpu; +mod gsp; mod regs; mod util; mod vbios; diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs index d49fddf6a3c6..206dab2e1335 100644 --- a/drivers/gpu/nova-core/regs.rs +++ b/drivers/gpu/nova-core/regs.rs @@ -5,11 +5,11 @@ #![allow(non_camel_case_types)] #[macro_use] -mod macros; +pub(crate) mod macros; use crate::falcon::{ DmaTrfCmdSize, FalconCoreRev, FalconCoreRevSubversion, FalconFbifMemType, FalconFbifTarget, - FalconModSelAlgo, FalconSecurityModel, PeregrineCoreSelect, + FalconModSelAlgo, FalconSecurityModel, PFalcon2Base, PFalconBase, PeregrineCoreSelect, }; use crate::gpu::{Architecture, Chipset}; use kernel::prelude::*; @@ -28,7 +28,7 @@ impl NV_PMC_BOOT_0 { /// Combines `architecture_0` and `architecture_1` to obtain the architecture of the chip. pub(crate) fn architecture(self) -> Result<Architecture> { Architecture::try_from( - self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0.len()), + self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0_RANGE.len()), ) } @@ -36,7 +36,8 @@ impl NV_PMC_BOOT_0 { pub(crate) fn chipset(self) -> Result<Chipset> { self.architecture() .map(|arch| { - ((arch as u32) << Self::IMPLEMENTATION.len()) | u32::from(self.implementation()) + ((arch as u32) << Self::IMPLEMENTATION_RANGE.len()) + | u32::from(self.implementation()) }) .and_then(Chipset::try_from) } @@ -44,8 +45,10 @@ impl NV_PMC_BOOT_0 { // PBUS -// TODO[REGA]: this is an array of registers. -register!(NV_PBUS_SW_SCRATCH_0E@0x00001438 { +register!(NV_PBUS_SW_SCRATCH @ 0x00001400[64] {}); + +register!(NV_PBUS_SW_SCRATCH_0E_FRTS_ERR => NV_PBUS_SW_SCRATCH[0xe], + "scratch register 0xe used as FRTS firmware error code" { 31:16 frts_err_code as u16; }); @@ -123,13 +126,12 @@ register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK @ 0x00118128, 0:0 read_protection_level0 as bool, "Set after FWSEC lowers its protection level"; }); -// TODO[REGA]: This is an array of registers. -register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05 @ 0x00118234 { - 31:0 value as u32; -}); +// OpenRM defines this as a register array, but doesn't specify its size and only uses its first +// element. Be conservative until we know the actual size or need to use more registers. +register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05 @ 0x00118234[1] {}); register!( - NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT => NV_PGC6_AON_SECURE_SCRATCH_GROUP_05, + NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT => NV_PGC6_AON_SECURE_SCRATCH_GROUP_05[0], "Scratch group 05 register 0 used as GFW boot progress indicator" { 7:0 progress as u8, "Progress of GFW boot (0xff means completed)"; } @@ -180,38 +182,40 @@ impl NV_PDISP_VGA_WORKSPACE_BASE { // FUSE -register!(NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION @ 0x00824100 { +pub(crate) const NV_FUSE_OPT_FPF_SIZE: usize = 16; + +register!(NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION @ 0x00824100[NV_FUSE_OPT_FPF_SIZE] { 15:0 data as u16; }); -register!(NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION @ 0x00824140 { +register!(NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION @ 0x00824140[NV_FUSE_OPT_FPF_SIZE] { 15:0 data as u16; }); -register!(NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION @ 0x008241c0 { +register!(NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION @ 0x008241c0[NV_FUSE_OPT_FPF_SIZE] { 15:0 data as u16; }); // PFALCON -register!(NV_PFALCON_FALCON_IRQSCLR @ +0x00000004 { +register!(NV_PFALCON_FALCON_IRQSCLR @ PFalconBase[0x00000004] { 4:4 halt as bool; 6:6 swgen0 as bool; }); -register!(NV_PFALCON_FALCON_MAILBOX0 @ +0x00000040 { +register!(NV_PFALCON_FALCON_MAILBOX0 @ PFalconBase[0x00000040] { 31:0 value as u32; }); -register!(NV_PFALCON_FALCON_MAILBOX1 @ +0x00000044 { +register!(NV_PFALCON_FALCON_MAILBOX1 @ PFalconBase[0x00000044] { 31:0 value as u32; }); -register!(NV_PFALCON_FALCON_RM @ +0x00000084 { +register!(NV_PFALCON_FALCON_RM @ PFalconBase[0x00000084] { 31:0 value as u32; }); -register!(NV_PFALCON_FALCON_HWCFG2 @ +0x000000f4 { +register!(NV_PFALCON_FALCON_HWCFG2 @ PFalconBase[0x000000f4] { 10:10 riscv as bool; 12:12 mem_scrubbing as bool, "Set to 0 after memory scrubbing is completed"; 31:31 reset_ready as bool, "Signal indicating that reset is completed (GA102+)"; @@ -224,17 +228,17 @@ impl NV_PFALCON_FALCON_HWCFG2 { } } -register!(NV_PFALCON_FALCON_CPUCTL @ +0x00000100 { +register!(NV_PFALCON_FALCON_CPUCTL @ PFalconBase[0x00000100] { 1:1 startcpu as bool; 4:4 halted as bool; 6:6 alias_en as bool; }); -register!(NV_PFALCON_FALCON_BOOTVEC @ +0x00000104 { +register!(NV_PFALCON_FALCON_BOOTVEC @ PFalconBase[0x00000104] { 31:0 value as u32; }); -register!(NV_PFALCON_FALCON_DMACTL @ +0x0000010c { +register!(NV_PFALCON_FALCON_DMACTL @ PFalconBase[0x0000010c] { 0:0 require_ctx as bool; 1:1 dmem_scrubbing as bool; 2:2 imem_scrubbing as bool; @@ -242,15 +246,15 @@ register!(NV_PFALCON_FALCON_DMACTL @ +0x0000010c { 7:7 secure_stat as bool; }); -register!(NV_PFALCON_FALCON_DMATRFBASE @ +0x00000110 { +register!(NV_PFALCON_FALCON_DMATRFBASE @ PFalconBase[0x00000110] { 31:0 base as u32; }); -register!(NV_PFALCON_FALCON_DMATRFMOFFS @ +0x00000114 { +register!(NV_PFALCON_FALCON_DMATRFMOFFS @ PFalconBase[0x00000114] { 23:0 offs as u32; }); -register!(NV_PFALCON_FALCON_DMATRFCMD @ +0x00000118 { +register!(NV_PFALCON_FALCON_DMATRFCMD @ PFalconBase[0x00000118] { 0:0 full as bool; 1:1 idle as bool; 3:2 sec as u8; @@ -261,60 +265,62 @@ register!(NV_PFALCON_FALCON_DMATRFCMD @ +0x00000118 { 16:16 set_dmtag as u8; }); -register!(NV_PFALCON_FALCON_DMATRFFBOFFS @ +0x0000011c { +register!(NV_PFALCON_FALCON_DMATRFFBOFFS @ PFalconBase[0x0000011c] { 31:0 offs as u32; }); -register!(NV_PFALCON_FALCON_DMATRFBASE1 @ +0x00000128 { +register!(NV_PFALCON_FALCON_DMATRFBASE1 @ PFalconBase[0x00000128] { 8:0 base as u16; }); -register!(NV_PFALCON_FALCON_HWCFG1 @ +0x0000012c { +register!(NV_PFALCON_FALCON_HWCFG1 @ PFalconBase[0x0000012c] { 3:0 core_rev as u8 ?=> FalconCoreRev, "Core revision"; 5:4 security_model as u8 ?=> FalconSecurityModel, "Security model"; 7:6 core_rev_subversion as u8 ?=> FalconCoreRevSubversion, "Core revision subversion"; }); -register!(NV_PFALCON_FALCON_CPUCTL_ALIAS @ +0x00000130 { +register!(NV_PFALCON_FALCON_CPUCTL_ALIAS @ PFalconBase[0x00000130] { 1:1 startcpu as bool; }); // Actually known as `NV_PSEC_FALCON_ENGINE` and `NV_PGSP_FALCON_ENGINE` depending on the falcon // instance. -register!(NV_PFALCON_FALCON_ENGINE @ +0x000003c0 { +register!(NV_PFALCON_FALCON_ENGINE @ PFalconBase[0x000003c0] { 0:0 reset as bool; }); -// TODO[REGA]: this is an array of registers. -register!(NV_PFALCON_FBIF_TRANSCFG @ +0x00000600 { +register!(NV_PFALCON_FBIF_TRANSCFG @ PFalconBase[0x00000600[8]] { 1:0 target as u8 ?=> FalconFbifTarget; 2:2 mem_type as bool => FalconFbifMemType; }); -register!(NV_PFALCON_FBIF_CTL @ +0x00000624 { +register!(NV_PFALCON_FBIF_CTL @ PFalconBase[0x00000624] { 7:7 allow_phys_no_ctx as bool; }); -register!(NV_PFALCON2_FALCON_MOD_SEL @ +0x00001180 { +/* PFALCON2 */ + +register!(NV_PFALCON2_FALCON_MOD_SEL @ PFalcon2Base[0x00000180] { 7:0 algo as u8 ?=> FalconModSelAlgo; }); -register!(NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID @ +0x00001198 { +register!(NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID @ PFalcon2Base[0x00000198] { 7:0 ucode_id as u8; }); -register!(NV_PFALCON2_FALCON_BROM_ENGIDMASK @ +0x0000119c { +register!(NV_PFALCON2_FALCON_BROM_ENGIDMASK @ PFalcon2Base[0x0000019c] { 31:0 value as u32; }); -// TODO[REGA]: this is an array of registers. -register!(NV_PFALCON2_FALCON_BROM_PARAADDR @ +0x00001210 { +// OpenRM defines this as a register array, but doesn't specify its size and only uses its first +// element. Be conservative until we know the actual size or need to use more registers. +register!(NV_PFALCON2_FALCON_BROM_PARAADDR @ PFalcon2Base[0x00000210[1]] { 31:0 value as u32; }); // PRISCV -register!(NV_PRISCV_RISCV_BCR_CTRL @ +0x00001668 { +register!(NV_PRISCV_RISCV_BCR_CTRL @ PFalconBase[0x00001668] { 0:0 valid as bool; 4:4 core_select as bool => PeregrineCoreSelect; 8:8 br_fetch as bool; diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs index a3e6de1779d4..754c14ee7f40 100644 --- a/drivers/gpu/nova-core/regs/macros.rs +++ b/drivers/gpu/nova-core/regs/macros.rs @@ -1,17 +1,27 @@ // SPDX-License-Identifier: GPL-2.0 -//! Macro to define register layout and accessors. +//! `register!` macro to define register layout and accessors. //! //! A single register typically includes several fields, which are accessed through a combination //! of bit-shift and mask operations that introduce a class of potential mistakes, notably because //! not all possible field values are necessarily valid. //! -//! The macro in this module allow to define, using an intruitive and readable syntax, a dedicated -//! type for each register with its own field accessors that can return an error is a field's value -//! is invalid. +//! The `register!` macro in this module provides an intuitive and readable syntax for defining a +//! dedicated type for each register. Each such type comes with its own field accessors that can +//! return an error if a field's value is invalid. -/// Defines a dedicated type for a register with an absolute offset, alongside with getter and -/// setter methods for its fields and methods to read and write it from an `Io` region. +/// Trait providing a base address to be added to the offset of a relative register to obtain +/// its actual offset. +/// +/// The `T` generic argument is used to distinguish which base to use, in case a type provides +/// several bases. It is given to the `register!` macro to restrict the use of the register to +/// implementors of this particular variant. +pub(crate) trait RegisterBase<T> { + const BASE: usize; +} + +/// Defines a dedicated type for a register with an absolute offset, including getter and setter +/// methods for its fields and methods to read and write it from an `Io` region. /// /// Example: /// @@ -24,7 +34,7 @@ /// ``` /// /// This defines a `BOOT_0` type which can be read or written from offset `0x100` of an `Io` -/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 less +/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 least /// significant bits of the register. Each field can be accessed and modified using accessor /// methods: /// @@ -33,130 +43,344 @@ /// let boot0 = BOOT_0::read(&bar); /// pr_info!("chip revision: {}.{}", boot0.major_revision(), boot0.minor_revision()); /// -/// // `Chipset::try_from` will be called with the value of the field and returns an error if the -/// // value is invalid. +/// // `Chipset::try_from` is called with the value of the `chipset` field and returns an +/// // error if it is invalid. /// let chipset = boot0.chipset()?; /// /// // Update some fields and write the value back. /// boot0.set_major_revision(3).set_minor_revision(10).write(&bar); /// -/// // Or just read and update the register in a single step: +/// // Or, just read and update the register in a single step: /// BOOT_0::alter(&bar, |r| r.set_major_revision(3).set_minor_revision(10)); /// ``` /// -/// Fields can be defined as follows: +/// Fields are defined as follows: /// -/// - `as <type>` simply returns the field value casted as the requested integer type, typically -/// `u32`, `u16`, `u8` or `bool`. Note that `bool` fields must have a range of 1 bit. +/// - `as <type>` simply returns the field value casted to <type>, typically `u32`, `u16`, `u8` or +/// `bool`. Note that `bool` fields must have a range of 1 bit. /// - `as <type> => <into_type>` calls `<into_type>`'s `From::<<type>>` implementation and returns /// the result. /// - `as <type> ?=> <try_into_type>` calls `<try_into_type>`'s `TryFrom::<<type>>` implementation -/// and returns the result. This is useful on fields for which not all values are value. +/// and returns the result. This is useful with fields for which not all values are valid. /// /// The documentation strings are optional. If present, they will be added to the type's /// definition, or the field getter and setter methods they are attached to. /// -/// Putting a `+` before the address of the register makes it relative to a base: the `read` and -/// `write` methods take a `base` argument that is added to the specified address before access, -/// and `try_read` and `try_write` methods are also created, allowing access with offsets unknown -/// at compile-time: +/// It is also possible to create a alias register by using the `=> ALIAS` syntax. This is useful +/// for cases where a register's interpretation depends on the context: /// /// ```no_run -/// register!(CPU_CTL @ +0x0000010, "CPU core control" { -/// 0:0 start as bool, "Start the CPU core"; +/// register!(SCRATCH @ 0x00000200, "Scratch register" { +/// 31:0 value as u32, "Raw value"; /// }); /// -/// // Flip the `start` switch for the CPU core which base address is at `CPU_BASE`. -/// let cpuctl = CPU_CTL::read(&bar, CPU_BASE); -/// pr_info!("CPU CTL: {:#x}", cpuctl); -/// cpuctl.set_start(true).write(&bar, CPU_BASE); +/// register!(SCRATCH_BOOT_STATUS => SCRATCH, "Boot status of the firmware" { +/// 0:0 completed as bool, "Whether the firmware has completed booting"; +/// }); /// ``` /// -/// It is also possible to create a alias register by using the `=> ALIAS` syntax. This is useful -/// for cases where a register's interpretation depends on the context: +/// In this example, `SCRATCH_0_BOOT_STATUS` uses the same I/O address as `SCRATCH`, while also +/// providing its own `completed` field. +/// +/// ## Relative registers +/// +/// A register can be defined as being accessible from a fixed offset of a provided base. For +/// instance, imagine the following I/O space: +/// +/// ```text +/// +-----------------------------+ +/// | ... | +/// | | +/// 0x100--->+------------CPU0-------------+ +/// | | +/// 0x110--->+-----------------------------+ +/// | CPU_CTL | +/// +-----------------------------+ +/// | ... | +/// | | +/// | | +/// 0x200--->+------------CPU1-------------+ +/// | | +/// 0x210--->+-----------------------------+ +/// | CPU_CTL | +/// +-----------------------------+ +/// | ... | +/// +-----------------------------+ +/// ``` +/// +/// `CPU0` and `CPU1` both have a `CPU_CTL` register that starts at offset `0x10` of their I/O +/// space segment. Since both instances of `CPU_CTL` share the same layout, we don't want to define +/// them twice and would prefer a way to select which one to use from a single definition +/// +/// This can be done using the `Base[Offset]` syntax when specifying the register's address. +/// +/// `Base` is an arbitrary type (typically a ZST) to be used as a generic parameter of the +/// [`RegisterBase`] trait to provide the base as a constant, i.e. each type providing a base for +/// this register needs to implement `RegisterBase<Base>`. Here is the above example translated +/// into code: /// /// ```no_run -/// register!(SCRATCH_0 @ 0x0000100, "Scratch register 0" { -/// 31:0 value as u32, "Raw value"; +/// // Type used to identify the base. +/// pub(crate) struct CpuCtlBase; /// -/// register!(SCRATCH_0_BOOT_STATUS => SCRATCH_0, "Boot status of the firmware" { -/// 0:0 completed as bool, "Whether the firmware has completed booting"; +/// // ZST describing `CPU0`. +/// struct Cpu0; +/// impl RegisterBase<CpuCtlBase> for Cpu0 { +/// const BASE: usize = 0x100; +/// } +/// // Singleton of `CPU0` used to identify it. +/// const CPU0: Cpu0 = Cpu0; +/// +/// // ZST describing `CPU1`. +/// struct Cpu1; +/// impl RegisterBase<CpuCtlBase> for Cpu1 { +/// const BASE: usize = 0x200; +/// } +/// // Singleton of `CPU1` used to identify it. +/// const CPU1: Cpu1 = Cpu1; +/// +/// // This makes `CPU_CTL` accessible from all implementors of `RegisterBase<CpuCtlBase>`. +/// register!(CPU_CTL @ CpuCtlBase[0x10], "CPU core control" { +/// 0:0 start as bool, "Start the CPU core"; +/// }); +/// +/// // The `read`, `write` and `alter` methods of relative registers take an extra `base` argument +/// // that is used to resolve its final address by adding its `BASE` to the offset of the +/// // register. +/// +/// // Start `CPU0`. +/// CPU_CTL::alter(bar, &CPU0, |r| r.set_start(true)); +/// +/// // Start `CPU1`. +/// CPU_CTL::alter(bar, &CPU1, |r| r.set_start(true)); +/// +/// // Aliases can also be defined for relative register. +/// register!(CPU_CTL_ALIAS => CpuCtlBase[CPU_CTL], "Alias to CPU core control" { +/// 1:1 alias_start as bool, "Start the aliased CPU core"; +/// }); +/// +/// // Start the aliased `CPU0`. +/// CPU_CTL_ALIAS::alter(bar, &CPU0, |r| r.set_alias_start(true)); +/// ``` +/// +/// ## Arrays of registers +/// +/// Some I/O areas contain consecutive values that can be interpreted in the same way. These areas +/// can be defined as an array of identical registers, allowing them to be accessed by index with +/// compile-time or runtime bound checking. Simply define their address as `Address[Size]`, and add +/// an `idx` parameter to their `read`, `write` and `alter` methods: +/// +/// ```no_run +/// # fn no_run() -> Result<(), Error> { +/// # fn get_scratch_idx() -> usize { +/// # 0x15 +/// # } +/// // Array of 64 consecutive registers with the same layout starting at offset `0x80`. +/// register!(SCRATCH @ 0x00000080[64], "Scratch registers" { +/// 31:0 value as u32; +/// }); +/// +/// // Read scratch register 0, i.e. I/O address `0x80`. +/// let scratch_0 = SCRATCH::read(bar, 0).value(); +/// // Read scratch register 15, i.e. I/O address `0x80 + (15 * 4)`. +/// let scratch_15 = SCRATCH::read(bar, 15).value(); +/// +/// // This is out of bounds and won't build. +/// // let scratch_128 = SCRATCH::read(bar, 128).value(); +/// +/// // Runtime-obtained array index. +/// let scratch_idx = get_scratch_idx(); +/// // Access on a runtime index returns an error if it is out-of-bounds. +/// let some_scratch = SCRATCH::try_read(bar, scratch_idx)?.value(); +/// +/// // Alias to a particular register in an array. +/// // Here `SCRATCH[8]` is used to convey the firmware exit code. +/// register!(FIRMWARE_STATUS => SCRATCH[8], "Firmware exit status code" { +/// 7:0 status as u8; +/// }); +/// +/// let status = FIRMWARE_STATUS::read(bar).status(); +/// +/// // Non-contiguous register arrays can be defined by adding a stride parameter. +/// // Here, each of the 16 registers of the array are separated by 8 bytes, meaning that the +/// // registers of the two declarations below are interleaved. +/// register!(SCRATCH_INTERLEAVED_0 @ 0x000000c0[16 ; 8], "Scratch registers bank 0" { +/// 31:0 value as u32; +/// }); +/// register!(SCRATCH_INTERLEAVED_1 @ 0x000000c4[16 ; 8], "Scratch registers bank 1" { +/// 31:0 value as u32; +/// }); +/// # Ok(()) +/// # } /// ``` /// -/// In this example, `SCRATCH_0_BOOT_STATUS` uses the same I/O address as `SCRATCH_0`, while also -/// providing its own `completed` method. +/// ## Relative arrays of registers +/// +/// Combining the two features described in the sections above, arrays of registers accessible from +/// a base can also be defined: +/// +/// ```no_run +/// # fn no_run() -> Result<(), Error> { +/// # fn get_scratch_idx() -> usize { +/// # 0x15 +/// # } +/// // Type used as parameter of `RegisterBase` to specify the base. +/// pub(crate) struct CpuCtlBase; +/// +/// // ZST describing `CPU0`. +/// struct Cpu0; +/// impl RegisterBase<CpuCtlBase> for Cpu0 { +/// const BASE: usize = 0x100; +/// } +/// // Singleton of `CPU0` used to identify it. +/// const CPU0: Cpu0 = Cpu0; +/// +/// // ZST describing `CPU1`. +/// struct Cpu1; +/// impl RegisterBase<CpuCtlBase> for Cpu1 { +/// const BASE: usize = 0x200; +/// } +/// // Singleton of `CPU1` used to identify it. +/// const CPU1: Cpu1 = Cpu1; +/// +/// // 64 per-cpu scratch registers, arranged as an contiguous array. +/// register!(CPU_SCRATCH @ CpuCtlBase[0x00000080[64]], "Per-CPU scratch registers" { +/// 31:0 value as u32; +/// }); +/// +/// let cpu0_scratch_0 = CPU_SCRATCH::read(bar, &Cpu0, 0).value(); +/// let cpu1_scratch_15 = CPU_SCRATCH::read(bar, &Cpu1, 15).value(); +/// +/// // This won't build. +/// // let cpu0_scratch_128 = CPU_SCRATCH::read(bar, &Cpu0, 128).value(); +/// +/// // Runtime-obtained array index. +/// let scratch_idx = get_scratch_idx(); +/// // Access on a runtime value returns an error if it is out-of-bounds. +/// let cpu0_some_scratch = CPU_SCRATCH::try_read(bar, &Cpu0, scratch_idx)?.value(); +/// +/// // `SCRATCH[8]` is used to convey the firmware exit code. +/// register!(CPU_FIRMWARE_STATUS => CpuCtlBase[CPU_SCRATCH[8]], +/// "Per-CPU firmware exit status code" { +/// 7:0 status as u8; +/// }); +/// +/// let cpu0_status = CPU_FIRMWARE_STATUS::read(bar, &Cpu0).status(); +/// +/// // Non-contiguous register arrays can be defined by adding a stride parameter. +/// // Here, each of the 16 registers of the array are separated by 8 bytes, meaning that the +/// // registers of the two declarations below are interleaved. +/// register!(CPU_SCRATCH_INTERLEAVED_0 @ CpuCtlBase[0x00000d00[16 ; 8]], +/// "Scratch registers bank 0" { +/// 31:0 value as u32; +/// }); +/// register!(CPU_SCRATCH_INTERLEAVED_1 @ CpuCtlBase[0x00000d04[16 ; 8]], +/// "Scratch registers bank 1" { +/// 31:0 value as u32; +/// }); +/// # Ok(()) +/// # } +/// ``` macro_rules! register { // Creates a register at a fixed offset of the MMIO space. + ($name:ident @ $offset:literal $(, $comment:literal)? { $($fields:tt)* } ) => { + register!(@core $name $(, $comment)? { $($fields)* } ); + register!(@io_fixed $name @ $offset); + }; + + // Creates an alias register of fixed offset register `alias` with its own fields. + ($name:ident => $alias:ident $(, $comment:literal)? { $($fields:tt)* } ) => { + register!(@core $name $(, $comment)? { $($fields)* } ); + register!(@io_fixed $name @ $alias::OFFSET); + }; + + // Creates a register at a relative offset from a base address provider. + ($name:ident @ $base:ty [ $offset:literal ] $(, $comment:literal)? { $($fields:tt)* } ) => { + register!(@core $name $(, $comment)? { $($fields)* } ); + register!(@io_relative $name @ $base [ $offset ]); + }; + + // Creates an alias register of relative offset register `alias` with its own fields. + ($name:ident => $base:ty [ $alias:ident ] $(, $comment:literal)? { $($fields:tt)* }) => { + register!(@core $name $(, $comment)? { $($fields)* } ); + register!(@io_relative $name @ $base [ $alias::OFFSET ]); + }; + + // Creates an array of registers at a fixed offset of the MMIO space. ( - $name:ident @ $offset:literal $(, $comment:literal)? { + $name:ident @ $offset:literal [ $size:expr ; $stride:expr ] $(, $comment:literal)? { $($fields:tt)* } ) => { - register!(@common $name @ $offset $(, $comment)?); - register!(@field_accessors $name { $($fields)* }); - register!(@io $name @ $offset); + static_assert!(::core::mem::size_of::<u32>() <= $stride); + register!(@core $name $(, $comment)? { $($fields)* } ); + register!(@io_array $name @ $offset [ $size ; $stride ]); }; - // Creates a alias register of fixed offset register `alias` with its own fields. + // Shortcut for contiguous array of registers (stride == size of element). ( - $name:ident => $alias:ident $(, $comment:literal)? { + $name:ident @ $offset:literal [ $size:expr ] $(, $comment:literal)? { $($fields:tt)* } ) => { - register!(@common $name @ $alias::OFFSET $(, $comment)?); - register!(@field_accessors $name { $($fields)* }); - register!(@io $name @ $alias::OFFSET); + register!($name @ $offset [ $size ; ::core::mem::size_of::<u32>() ] $(, $comment)? { + $($fields)* + } ); + }; + + // Creates an array of registers at a relative offset from a base address provider. + ( + $name:ident @ $base:ty [ $offset:literal [ $size:expr ; $stride:expr ] ] + $(, $comment:literal)? { $($fields:tt)* } + ) => { + static_assert!(::core::mem::size_of::<u32>() <= $stride); + register!(@core $name $(, $comment)? { $($fields)* } ); + register!(@io_relative_array $name @ $base [ $offset [ $size ; $stride ] ]); }; - // Creates a register at a relative offset from a base address. + // Shortcut for contiguous array of relative registers (stride == size of element). ( - $name:ident @ + $offset:literal $(, $comment:literal)? { + $name:ident @ $base:ty [ $offset:literal [ $size:expr ] ] $(, $comment:literal)? { $($fields:tt)* } ) => { - register!(@common $name @ $offset $(, $comment)?); - register!(@field_accessors $name { $($fields)* }); - register!(@io$name @ + $offset); + register!($name @ $base [ $offset [ $size ; ::core::mem::size_of::<u32>() ] ] + $(, $comment)? { $($fields)* } ); }; - // Creates a alias register of relative offset register `alias` with its own fields. + // Creates an alias of register `idx` of relative array of registers `alias` with its own + // fields. ( - $name:ident => + $alias:ident $(, $comment:literal)? { + $name:ident => $base:ty [ $alias:ident [ $idx:expr ] ] $(, $comment:literal)? { $($fields:tt)* } ) => { - register!(@common $name @ $alias::OFFSET $(, $comment)?); - register!(@field_accessors $name { $($fields)* }); - register!(@io $name @ + $alias::OFFSET); + static_assert!($idx < $alias::SIZE); + register!(@core $name $(, $comment)? { $($fields)* } ); + register!(@io_relative $name @ $base [ $alias::OFFSET + $idx * $alias::STRIDE ] ); + }; + + // Creates an alias of register `idx` of array of registers `alias` with its own fields. + // This rule belongs to the (non-relative) register arrays set, but needs to be put last + // to avoid it being interpreted in place of the relative register array alias rule. + ($name:ident => $alias:ident [ $idx:expr ] $(, $comment:literal)? { $($fields:tt)* }) => { + static_assert!($idx < $alias::SIZE); + register!(@core $name $(, $comment)? { $($fields)* } ); + register!(@io_fixed $name @ $alias::OFFSET + $idx * $alias::STRIDE ); }; // All rules below are helpers. - // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`, `BitOr`, - // and conversion to regular `u32`). - (@common $name:ident @ $offset:expr $(, $comment:literal)?) => { + // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`, + // `Default`, `BitOr`, and conversion to the value type) and field accessor methods. + (@core $name:ident $(, $comment:literal)? { $($fields:tt)* }) => { $( #[doc=$comment] )? #[repr(transparent)] - #[derive(Clone, Copy, Default)] + #[derive(Clone, Copy)] pub(crate) struct $name(u32); - #[allow(dead_code)] - impl $name { - pub(crate) const OFFSET: usize = $offset; - } - - // TODO[REGA]: display the raw hex value, then the value of all the fields. This requires - // matching the fields, which will complexify the syntax considerably... - impl ::core::fmt::Debug for $name { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - f.debug_tuple(stringify!($name)) - .field(&format_args!("0x{0:x}", &self.0)) - .finish() - } - } - impl ::core::ops::BitOr for $name { type Output = Self; @@ -170,6 +394,34 @@ macro_rules! register { reg.0 } } + + register!(@fields_dispatcher $name { $($fields)* }); + }; + + // Captures the fields and passes them to all the implementers that require field information. + // + // Used to simplify the matching rules for implementers, so they don't need to match the entire + // complex fields rule even though they only make use of part of it. + (@fields_dispatcher $name:ident { + $($hi:tt:$lo:tt $field:ident as $type:tt + $(?=> $try_into_type:ty)? + $(=> $into_type:ty)? + $(, $comment:literal)? + ; + )* + } + ) => { + register!(@field_accessors $name { + $( + $hi:$lo $field as $type + $(?=> $try_into_type)? + $(=> $into_type)? + $(, $comment)? + ; + )* + }); + register!(@debug $name { $($field;)* }); + register!(@default $name { $($field;)* }); }; // Defines all the field getter/methods methods for `$name`. @@ -228,7 +480,7 @@ macro_rules! register { $(, $comment:literal)?; ) => { register!( - @leaf_accessor $name $hi:$lo $field as bool + @leaf_accessor $name $hi:$lo $field { |f| <$into_type>::from(if f != 0 { true } else { false }) } $into_type => $into_type $(, $comment)?; ); @@ -246,7 +498,7 @@ macro_rules! register { @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt ?=> $try_into_type:ty $(, $comment:literal)?; ) => { - register!(@leaf_accessor $name $hi:$lo $field as $type + register!(@leaf_accessor $name $hi:$lo $field { |f| <$try_into_type>::try_from(f as $type) } $try_into_type => ::core::result::Result< $try_into_type, @@ -260,11 +512,11 @@ macro_rules! register { @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt => $into_type:ty $(, $comment:literal)?; ) => { - register!(@leaf_accessor $name $hi:$lo $field as $type + register!(@leaf_accessor $name $hi:$lo $field { |f| <$into_type>::from(f as $type) } $into_type => $into_type $(, $comment)?;); }; - // Shortcut for fields defined as non-`bool` without the `=>` or `?=>` syntax. + // Shortcut for non-boolean fields defined without the `=>` or `?=>` syntax. ( @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt $(, $comment:literal)?; @@ -274,11 +526,11 @@ macro_rules! register { // Generates the accessor methods for a single field. ( - @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:ty + @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident { $process:expr } $to_type:ty => $res_type:ty $(, $comment:literal)?; ) => { ::kernel::macros::paste!( - const [<$field:upper>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi; + const [<$field:upper _RANGE>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi; const [<$field:upper _MASK>]: u32 = ((((1 << $hi) - 1) << 1) + 1) - ((1 << $lo) - 1); const [<$field:upper _SHIFT>]: u32 = Self::[<$field:upper _MASK>].trailing_zeros(); ); @@ -287,7 +539,7 @@ macro_rules! register { #[doc="Returns the value of this field:"] #[doc=$comment] )? - #[inline] + #[inline(always)] pub(crate) fn $field(self) -> $res_type { ::kernel::macros::paste!( const MASK: u32 = $name::[<$field:upper _MASK>]; @@ -303,7 +555,7 @@ macro_rules! register { #[doc="Sets the value of this field:"] #[doc=$comment] )? - #[inline] + #[inline(always)] pub(crate) fn [<set_ $field>](mut self, value: $to_type) -> Self { const MASK: u32 = $name::[<$field:upper _MASK>]; const SHIFT: u32 = $name::[<$field:upper _SHIFT>]; @@ -315,25 +567,64 @@ macro_rules! register { ); }; - // Creates the IO accessors for a fixed offset register. - (@io $name:ident @ $offset:expr) => { + // Generates the `Debug` implementation for `$name`. + (@debug $name:ident { $($field:ident;)* }) => { + impl ::core::fmt::Debug for $name { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + f.debug_struct(stringify!($name)) + .field("<raw>", &format_args!("{:#x}", &self.0)) + $( + .field(stringify!($field), &self.$field()) + )* + .finish() + } + } + }; + + // Generates the `Default` implementation for `$name`. + (@default $name:ident { $($field:ident;)* }) => { + /// Returns a value for the register where all fields are set to their default value. + impl ::core::default::Default for $name { + fn default() -> Self { + #[allow(unused_mut)] + let mut value = Self(Default::default()); + + ::kernel::macros::paste!( + $( + value.[<set_ $field>](Default::default()); + )* + ); + + value + } + } + }; + + // Generates the IO accessors for a fixed offset register. + (@io_fixed $name:ident @ $offset:expr) => { #[allow(dead_code)] impl $name { - #[inline] + pub(crate) const OFFSET: usize = $offset; + + /// Read the register from its address in `io`. + #[inline(always)] pub(crate) fn read<const SIZE: usize, T>(io: &T) -> Self where T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, { Self(io.read32($offset)) } - #[inline] + /// Write the value contained in `self` to the register address in `io`. + #[inline(always)] pub(crate) fn write<const SIZE: usize, T>(self, io: &T) where T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, { io.write32(self.0, $offset) } - #[inline] + /// Read the register from its address in `io` and run `f` on its value to obtain a new + /// value to write back. + #[inline(always)] pub(crate) fn alter<const SIZE: usize, T, F>( io: &T, f: F, @@ -347,76 +638,322 @@ macro_rules! register { } }; - // Create the IO accessors for a relative offset register. - (@io $name:ident @ + $offset:literal) => { + // Generates the IO accessors for a relative offset register. + (@io_relative $name:ident @ $base:ty [ $offset:expr ]) => { #[allow(dead_code)] impl $name { - #[inline] + pub(crate) const OFFSET: usize = $offset; + + /// Read the register from `io`, using the base address provided by `base` and adding + /// the register's offset to it. + #[inline(always)] + pub(crate) fn read<const SIZE: usize, T, B>( + io: &T, + #[allow(unused_variables)] + base: &B, + ) -> Self where + T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + B: crate::regs::macros::RegisterBase<$base>, + { + const OFFSET: usize = $name::OFFSET; + + let value = io.read32( + <B as crate::regs::macros::RegisterBase<$base>>::BASE + OFFSET + ); + + Self(value) + } + + /// Write the value contained in `self` to `io`, using the base address provided by + /// `base` and adding the register's offset to it. + #[inline(always)] + pub(crate) fn write<const SIZE: usize, T, B>( + self, + io: &T, + #[allow(unused_variables)] + base: &B, + ) where + T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + B: crate::regs::macros::RegisterBase<$base>, + { + const OFFSET: usize = $name::OFFSET; + + io.write32( + self.0, + <B as crate::regs::macros::RegisterBase<$base>>::BASE + OFFSET + ); + } + + /// Read the register from `io`, using the base address provided by `base` and adding + /// the register's offset to it, then run `f` on its value to obtain a new value to + /// write back. + #[inline(always)] + pub(crate) fn alter<const SIZE: usize, T, B, F>( + io: &T, + base: &B, + f: F, + ) where + T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + B: crate::regs::macros::RegisterBase<$base>, + F: ::core::ops::FnOnce(Self) -> Self, + { + let reg = f(Self::read(io, base)); + reg.write(io, base); + } + } + }; + + // Generates the IO accessors for an array of registers. + (@io_array $name:ident @ $offset:literal [ $size:expr ; $stride:expr ]) => { + #[allow(dead_code)] + impl $name { + pub(crate) const OFFSET: usize = $offset; + pub(crate) const SIZE: usize = $size; + pub(crate) const STRIDE: usize = $stride; + + /// Read the array register at index `idx` from its address in `io`. + #[inline(always)] pub(crate) fn read<const SIZE: usize, T>( io: &T, - base: usize, + idx: usize, ) -> Self where T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, { - Self(io.read32(base + $offset)) + build_assert!(idx < Self::SIZE); + + let offset = Self::OFFSET + (idx * Self::STRIDE); + let value = io.read32(offset); + + Self(value) } - #[inline] + /// Write the value contained in `self` to the array register with index `idx` in `io`. + #[inline(always)] pub(crate) fn write<const SIZE: usize, T>( self, io: &T, - base: usize, + idx: usize ) where T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, { - io.write32(self.0, base + $offset) + build_assert!(idx < Self::SIZE); + + let offset = Self::OFFSET + (idx * Self::STRIDE); + + io.write32(self.0, offset); } - #[inline] + /// Read the array register at index `idx` in `io` and run `f` on its value to obtain a + /// new value to write back. + #[inline(always)] pub(crate) fn alter<const SIZE: usize, T, F>( io: &T, - base: usize, + idx: usize, f: F, ) where T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, F: ::core::ops::FnOnce(Self) -> Self, { - let reg = f(Self::read(io, base)); - reg.write(io, base); + let reg = f(Self::read(io, idx)); + reg.write(io, idx); } - #[inline] + /// Read the array register at index `idx` from its address in `io`. + /// + /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the + /// access was out-of-bounds. + #[inline(always)] pub(crate) fn try_read<const SIZE: usize, T>( io: &T, - base: usize, + idx: usize, ) -> ::kernel::error::Result<Self> where T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, { - io.try_read32(base + $offset).map(Self) + if idx < Self::SIZE { + Ok(Self::read(io, idx)) + } else { + Err(EINVAL) + } } - #[inline] + /// Write the value contained in `self` to the array register with index `idx` in `io`. + /// + /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the + /// access was out-of-bounds. + #[inline(always)] pub(crate) fn try_write<const SIZE: usize, T>( self, io: &T, - base: usize, - ) -> ::kernel::error::Result<()> where + idx: usize, + ) -> ::kernel::error::Result where T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, { - io.try_write32(self.0, base + $offset) + if idx < Self::SIZE { + Ok(self.write(io, idx)) + } else { + Err(EINVAL) + } } - #[inline] + /// Read the array register at index `idx` in `io` and run `f` on its value to obtain a + /// new value to write back. + /// + /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the + /// access was out-of-bounds. + #[inline(always)] pub(crate) fn try_alter<const SIZE: usize, T, F>( io: &T, - base: usize, + idx: usize, + f: F, + ) -> ::kernel::error::Result where + T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + F: ::core::ops::FnOnce(Self) -> Self, + { + if idx < Self::SIZE { + Ok(Self::alter(io, idx, f)) + } else { + Err(EINVAL) + } + } + } + }; + + // Generates the IO accessors for an array of relative registers. + ( + @io_relative_array $name:ident @ $base:ty + [ $offset:literal [ $size:expr ; $stride:expr ] ] + ) => { + #[allow(dead_code)] + impl $name { + pub(crate) const OFFSET: usize = $offset; + pub(crate) const SIZE: usize = $size; + pub(crate) const STRIDE: usize = $stride; + + /// Read the array register at index `idx` from `io`, using the base address provided + /// by `base` and adding the register's offset to it. + #[inline(always)] + pub(crate) fn read<const SIZE: usize, T, B>( + io: &T, + #[allow(unused_variables)] + base: &B, + idx: usize, + ) -> Self where + T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + B: crate::regs::macros::RegisterBase<$base>, + { + build_assert!(idx < Self::SIZE); + + let offset = <B as crate::regs::macros::RegisterBase<$base>>::BASE + + Self::OFFSET + (idx * Self::STRIDE); + let value = io.read32(offset); + + Self(value) + } + + /// Write the value contained in `self` to `io`, using the base address provided by + /// `base` and adding the offset of array register `idx` to it. + #[inline(always)] + pub(crate) fn write<const SIZE: usize, T, B>( + self, + io: &T, + #[allow(unused_variables)] + base: &B, + idx: usize + ) where + T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + B: crate::regs::macros::RegisterBase<$base>, + { + build_assert!(idx < Self::SIZE); + + let offset = <B as crate::regs::macros::RegisterBase<$base>>::BASE + + Self::OFFSET + (idx * Self::STRIDE); + + io.write32(self.0, offset); + } + + /// Read the array register at index `idx` from `io`, using the base address provided + /// by `base` and adding the register's offset to it, then run `f` on its value to + /// obtain a new value to write back. + #[inline(always)] + pub(crate) fn alter<const SIZE: usize, T, B, F>( + io: &T, + base: &B, + idx: usize, + f: F, + ) where + T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + B: crate::regs::macros::RegisterBase<$base>, + F: ::core::ops::FnOnce(Self) -> Self, + { + let reg = f(Self::read(io, base, idx)); + reg.write(io, base, idx); + } + + /// Read the array register at index `idx` from `io`, using the base address provided + /// by `base` and adding the register's offset to it. + /// + /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the + /// access was out-of-bounds. + #[inline(always)] + pub(crate) fn try_read<const SIZE: usize, T, B>( + io: &T, + base: &B, + idx: usize, + ) -> ::kernel::error::Result<Self> where + T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + B: crate::regs::macros::RegisterBase<$base>, + { + if idx < Self::SIZE { + Ok(Self::read(io, base, idx)) + } else { + Err(EINVAL) + } + } + + /// Write the value contained in `self` to `io`, using the base address provided by + /// `base` and adding the offset of array register `idx` to it. + /// + /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the + /// access was out-of-bounds. + #[inline(always)] + pub(crate) fn try_write<const SIZE: usize, T, B>( + self, + io: &T, + base: &B, + idx: usize, + ) -> ::kernel::error::Result where + T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + B: crate::regs::macros::RegisterBase<$base>, + { + if idx < Self::SIZE { + Ok(self.write(io, base, idx)) + } else { + Err(EINVAL) + } + } + + /// Read the array register at index `idx` from `io`, using the base address provided + /// by `base` and adding the register's offset to it, then run `f` on its value to + /// obtain a new value to write back. + /// + /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the + /// access was out-of-bounds. + #[inline(always)] + pub(crate) fn try_alter<const SIZE: usize, T, B, F>( + io: &T, + base: &B, + idx: usize, f: F, - ) -> ::kernel::error::Result<()> where + ) -> ::kernel::error::Result where T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>, + B: crate::regs::macros::RegisterBase<$base>, F: ::core::ops::FnOnce(Self) -> Self, { - let reg = f(Self::try_read(io, base)?); - reg.try_write(io, base) + if idx < Self::SIZE { + Ok(Self::alter(io, base, idx, f)) + } else { + Err(EINVAL) + } } } }; diff --git a/drivers/gpu/nova-core/util.rs b/drivers/gpu/nova-core/util.rs index 76cedf3710d7..bf35f00cb732 100644 --- a/drivers/gpu/nova-core/util.rs +++ b/drivers/gpu/nova-core/util.rs @@ -3,26 +3,6 @@ use kernel::prelude::*; use kernel::time::{Delta, Instant, Monotonic}; -pub(crate) const fn to_lowercase_bytes<const N: usize>(s: &str) -> [u8; N] { - let src = s.as_bytes(); - let mut dst = [0; N]; - let mut i = 0; - - while i < src.len() && i < N { - dst[i] = (src[i] as char).to_ascii_lowercase() as u8; - i += 1; - } - - dst -} - -pub(crate) const fn const_bytes_to_str(bytes: &[u8]) -> &str { - match core::str::from_utf8(bytes) { - Ok(string) => string, - Err(_) => kernel::build_error!("Bytes are not valid UTF-8."), - } -} - /// Wait until `cond` is true or `timeout` elapsed. /// /// When `cond` evaluates to `Some`, its return value is returned. diff --git a/drivers/gpu/nova-core/vbios.rs b/drivers/gpu/nova-core/vbios.rs index 5b5d9f38cbb3..e6a060714205 100644 --- a/drivers/gpu/nova-core/vbios.rs +++ b/drivers/gpu/nova-core/vbios.rs @@ -8,8 +8,8 @@ use crate::firmware::FalconUCodeDescV3; use core::convert::TryFrom; use kernel::device; use kernel::error::Result; -use kernel::pci; use kernel::prelude::*; +use kernel::types::ARef; /// The offset of the VBIOS ROM in the BAR0 space. const ROM_OFFSET: usize = 0x300000; @@ -31,7 +31,7 @@ const FALCON_UCODE_ENTRY_APPID_FWSEC_PROD: u8 = 0x85; /// Vbios Reader for constructing the VBIOS data. struct VbiosIterator<'a> { - pdev: &'a pci::Device, + dev: &'a device::Device, bar0: &'a Bar0, /// VBIOS data vector: As BIOS images are scanned, they are added to this vector for reference /// or copying into other data structures. It is the entire scanned contents of the VBIOS which @@ -46,9 +46,9 @@ struct VbiosIterator<'a> { } impl<'a> VbiosIterator<'a> { - fn new(pdev: &'a pci::Device, bar0: &'a Bar0) -> Result<Self> { + fn new(dev: &'a device::Device, bar0: &'a Bar0) -> Result<Self> { Ok(Self { - pdev, + dev, bar0, data: KVec::new(), current_offset: 0, @@ -64,7 +64,7 @@ impl<'a> VbiosIterator<'a> { // Ensure length is a multiple of 4 for 32-bit reads if len % core::mem::size_of::<u32>() != 0 { dev_err!( - self.pdev.as_ref(), + self.dev, "VBIOS read length {} is not a multiple of 4\n", len ); @@ -89,7 +89,7 @@ impl<'a> VbiosIterator<'a> { /// Read bytes at a specific offset, filling any gap. fn read_more_at_offset(&mut self, offset: usize, len: usize) -> Result { if offset > BIOS_MAX_SCAN_LEN { - dev_err!(self.pdev.as_ref(), "Error: exceeded BIOS scan limit.\n"); + dev_err!(self.dev, "Error: exceeded BIOS scan limit.\n"); return Err(EINVAL); } @@ -115,7 +115,7 @@ impl<'a> VbiosIterator<'a> { if offset + len > data_len { self.read_more_at_offset(offset, len).inspect_err(|e| { dev_err!( - self.pdev.as_ref(), + self.dev, "Failed to read more at offset {:#x}: {:?}\n", offset, e @@ -123,9 +123,9 @@ impl<'a> VbiosIterator<'a> { })?; } - BiosImage::new(self.pdev, &self.data[offset..offset + len]).inspect_err(|err| { + BiosImage::new(self.dev, &self.data[offset..offset + len]).inspect_err(|err| { dev_err!( - self.pdev.as_ref(), + self.dev, "Failed to {} at offset {:#x}: {:?}\n", context, offset, @@ -146,10 +146,7 @@ impl<'a> Iterator for VbiosIterator<'a> { } if self.current_offset > BIOS_MAX_SCAN_LEN { - dev_err!( - self.pdev.as_ref(), - "Error: exceeded BIOS scan limit, stopping scan\n" - ); + dev_err!(self.dev, "Error: exceeded BIOS scan limit, stopping scan\n"); return None; } @@ -192,18 +189,18 @@ impl Vbios { /// Probe for VBIOS extraction. /// /// Once the VBIOS object is built, `bar0` is not read for [`Vbios`] purposes anymore. - pub(crate) fn new(pdev: &pci::Device, bar0: &Bar0) -> Result<Vbios> { + pub(crate) fn new(dev: &device::Device, bar0: &Bar0) -> Result<Vbios> { // Images to extract from iteration let mut pci_at_image: Option<PciAtBiosImage> = None; let mut first_fwsec_image: Option<FwSecBiosBuilder> = None; let mut second_fwsec_image: Option<FwSecBiosBuilder> = None; // Parse all VBIOS images in the ROM - for image_result in VbiosIterator::new(pdev, bar0)? { + for image_result in VbiosIterator::new(dev, bar0)? { let full_image = image_result?; dev_dbg!( - pdev.as_ref(), + dev, "Found BIOS image: size: {:#x}, type: {}, last: {}\n", full_image.image_size_bytes(), full_image.image_type_str(), @@ -234,14 +231,14 @@ impl Vbios { (second_fwsec_image, first_fwsec_image, pci_at_image) { second - .setup_falcon_data(pdev, &pci_at, &first) - .inspect_err(|e| dev_err!(pdev.as_ref(), "Falcon data setup failed: {:?}\n", e))?; + .setup_falcon_data(&pci_at, &first) + .inspect_err(|e| dev_err!(dev, "Falcon data setup failed: {:?}\n", e))?; Ok(Vbios { - fwsec_image: second.build(pdev)?, + fwsec_image: second.build()?, }) } else { dev_err!( - pdev.as_ref(), + dev, "Missing required images for falcon data setup, skipping\n" ); Err(EINVAL) @@ -284,9 +281,9 @@ struct PcirStruct { } impl PcirStruct { - fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> { + fn new(dev: &device::Device, data: &[u8]) -> Result<Self> { if data.len() < core::mem::size_of::<PcirStruct>() { - dev_err!(pdev.as_ref(), "Not enough data for PcirStruct\n"); + dev_err!(dev, "Not enough data for PcirStruct\n"); return Err(EINVAL); } @@ -295,11 +292,7 @@ impl PcirStruct { // Signature should be "PCIR" (0x52494350) or "NPDS" (0x5344504e). if &signature != b"PCIR" && &signature != b"NPDS" { - dev_err!( - pdev.as_ref(), - "Invalid signature for PcirStruct: {:?}\n", - signature - ); + dev_err!(dev, "Invalid signature for PcirStruct: {:?}\n", signature); return Err(EINVAL); } @@ -308,7 +301,7 @@ impl PcirStruct { let image_len = u16::from_le_bytes([data[16], data[17]]); if image_len == 0 { - dev_err!(pdev.as_ref(), "Invalid image length: 0\n"); + dev_err!(dev, "Invalid image length: 0\n"); return Err(EINVAL); } @@ -345,7 +338,7 @@ impl PcirStruct { /// its header) is in the [`PciAtBiosImage`] and the falcon data it is pointing to is in the /// [`FwSecBiosImage`]. #[derive(Debug, Clone, Copy)] -#[expect(dead_code)] +#[repr(C)] struct BitHeader { /// 0h: BIT Header Identifier (BMP=0x7FFF/BIT=0xB8FF) id: u16, @@ -365,7 +358,7 @@ struct BitHeader { impl BitHeader { fn new(data: &[u8]) -> Result<Self> { - if data.len() < 12 { + if data.len() < core::mem::size_of::<Self>() { return Err(EINVAL); } @@ -467,7 +460,7 @@ struct PciRomHeader { } impl PciRomHeader { - fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> { + fn new(dev: &device::Device, data: &[u8]) -> Result<Self> { if data.len() < 26 { // Need at least 26 bytes to read pciDataStrucPtr and sizeOfBlock. return Err(EINVAL); @@ -479,7 +472,7 @@ impl PciRomHeader { match signature { 0xAA55 | 0xBB77 | 0x4E56 => {} _ => { - dev_err!(pdev.as_ref(), "ROM signature unknown {:#x}\n", signature); + dev_err!(dev, "ROM signature unknown {:#x}\n", signature); return Err(EINVAL); } } @@ -538,9 +531,9 @@ struct NpdeStruct { } impl NpdeStruct { - fn new(pdev: &pci::Device, data: &[u8]) -> Option<Self> { + fn new(dev: &device::Device, data: &[u8]) -> Option<Self> { if data.len() < core::mem::size_of::<Self>() { - dev_dbg!(pdev.as_ref(), "Not enough data for NpdeStruct\n"); + dev_dbg!(dev, "Not enough data for NpdeStruct\n"); return None; } @@ -549,17 +542,13 @@ impl NpdeStruct { // Signature should be "NPDE" (0x4544504E). if &signature != b"NPDE" { - dev_dbg!( - pdev.as_ref(), - "Invalid signature for NpdeStruct: {:?}\n", - signature - ); + dev_dbg!(dev, "Invalid signature for NpdeStruct: {:?}\n", signature); return None; } let subimage_len = u16::from_le_bytes([data[8], data[9]]); if subimage_len == 0 { - dev_dbg!(pdev.as_ref(), "Invalid subimage length: 0\n"); + dev_dbg!(dev, "Invalid subimage length: 0\n"); return None; } @@ -584,7 +573,7 @@ impl NpdeStruct { /// Try to find NPDE in the data, the NPDE is right after the PCIR. fn find_in_data( - pdev: &pci::Device, + dev: &device::Device, data: &[u8], rom_header: &PciRomHeader, pcir: &PcirStruct, @@ -596,12 +585,12 @@ impl NpdeStruct { // Check if we have enough data if npde_start + core::mem::size_of::<Self>() > data.len() { - dev_dbg!(pdev.as_ref(), "Not enough data for NPDE\n"); + dev_dbg!(dev, "Not enough data for NPDE\n"); return None; } // Try to create NPDE from the data - NpdeStruct::new(pdev, &data[npde_start..]) + NpdeStruct::new(dev, &data[npde_start..]) } } @@ -669,10 +658,10 @@ impl BiosImage { /// Create a [`BiosImageBase`] from a byte slice and convert it to a [`BiosImage`] which /// triggers the constructor of the specific BiosImage enum variant. - fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> { - let base = BiosImageBase::new(pdev, data)?; + fn new(dev: &device::Device, data: &[u8]) -> Result<Self> { + let base = BiosImageBase::new(dev, data)?; let image = base.into_image().inspect_err(|e| { - dev_err!(pdev.as_ref(), "Failed to create BiosImage: {:?}\n", e); + dev_err!(dev, "Failed to create BiosImage: {:?}\n", e); })?; Ok(image) @@ -754,9 +743,10 @@ impl TryFrom<BiosImageBase> for BiosImage { /// /// Each BiosImage type has a BiosImageBase type along with other image-specific fields. Note that /// Rust favors composition of types over inheritance. -#[derive(Debug)] #[expect(dead_code)] struct BiosImageBase { + /// Used for logging. + dev: ARef<device::Device>, /// PCI ROM Expansion Header rom_header: PciRomHeader, /// PCI Data Structure @@ -773,16 +763,16 @@ impl BiosImageBase { } /// Creates a new BiosImageBase from raw byte data. - fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> { + fn new(dev: &device::Device, data: &[u8]) -> Result<Self> { // Ensure we have enough data for the ROM header. if data.len() < 26 { - dev_err!(pdev.as_ref(), "Not enough data for ROM header\n"); + dev_err!(dev, "Not enough data for ROM header\n"); return Err(EINVAL); } // Parse the ROM header. - let rom_header = PciRomHeader::new(pdev, &data[0..26]) - .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PciRomHeader: {:?}\n", e))?; + let rom_header = PciRomHeader::new(dev, &data[0..26]) + .inspect_err(|e| dev_err!(dev, "Failed to create PciRomHeader: {:?}\n", e))?; // Get the PCI Data Structure using the pointer from the ROM header. let pcir_offset = rom_header.pci_data_struct_offset as usize; @@ -791,28 +781,29 @@ impl BiosImageBase { .ok_or(EINVAL) .inspect_err(|_| { dev_err!( - pdev.as_ref(), + dev, "PCIR offset {:#x} out of bounds (data length: {})\n", pcir_offset, data.len() ); dev_err!( - pdev.as_ref(), + dev, "Consider reading more data for construction of BiosImage\n" ); })?; - let pcir = PcirStruct::new(pdev, pcir_data) - .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PcirStruct: {:?}\n", e))?; + let pcir = PcirStruct::new(dev, pcir_data) + .inspect_err(|e| dev_err!(dev, "Failed to create PcirStruct: {:?}\n", e))?; // Look for NPDE structure if this is not an NBSI image (type != 0x70). - let npde = NpdeStruct::find_in_data(pdev, data, &rom_header, &pcir); + let npde = NpdeStruct::find_in_data(dev, data, &rom_header, &pcir); // Create a copy of the data. let mut data_copy = KVec::new(); data_copy.extend_from_slice(data, GFP_KERNEL)?; Ok(BiosImageBase { + dev: dev.into(), rom_header, pcir, npde, @@ -848,7 +839,7 @@ impl PciAtBiosImage { /// /// This is just a 4 byte structure that contains a pointer to the Falcon data in the FWSEC /// image. - fn falcon_data_ptr(&self, pdev: &pci::Device) -> Result<u32> { + fn falcon_data_ptr(&self) -> Result<u32> { let token = self.get_bit_token(BIT_TOKEN_ID_FALCON_DATA)?; // Make sure we don't go out of bounds @@ -859,14 +850,14 @@ impl PciAtBiosImage { // read the 4 bytes at the offset specified in the token let offset = token.data_offset as usize; let bytes: [u8; 4] = self.base.data[offset..offset + 4].try_into().map_err(|_| { - dev_err!(pdev.as_ref(), "Failed to convert data slice to array"); + dev_err!(self.base.dev, "Failed to convert data slice to array"); EINVAL })?; let data_ptr = u32::from_le_bytes(bytes); if (data_ptr as usize) < self.base.data.len() { - dev_err!(pdev.as_ref(), "Falcon data pointer out of bounds\n"); + dev_err!(self.base.dev, "Falcon data pointer out of bounds\n"); return Err(EINVAL); } @@ -892,7 +883,7 @@ impl TryFrom<BiosImageBase> for PciAtBiosImage { /// The [`PmuLookupTableEntry`] structure is a single entry in the [`PmuLookupTable`]. /// /// See the [`PmuLookupTable`] description for more information. -#[expect(dead_code)] +#[repr(C, packed)] struct PmuLookupTableEntry { application_id: u8, target_id: u8, @@ -901,7 +892,7 @@ struct PmuLookupTableEntry { impl PmuLookupTableEntry { fn new(data: &[u8]) -> Result<Self> { - if data.len() < 6 { + if data.len() < core::mem::size_of::<Self>() { return Err(EINVAL); } @@ -928,7 +919,7 @@ struct PmuLookupTable { } impl PmuLookupTable { - fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> { + fn new(dev: &device::Device, data: &[u8]) -> Result<Self> { if data.len() < 4 { return Err(EINVAL); } @@ -940,10 +931,7 @@ impl PmuLookupTable { let required_bytes = header_len + (entry_count * entry_len); if data.len() < required_bytes { - dev_err!( - pdev.as_ref(), - "PmuLookupTable data length less than required\n" - ); + dev_err!(dev, "PmuLookupTable data length less than required\n"); return Err(EINVAL); } @@ -956,11 +944,7 @@ impl PmuLookupTable { // Debug logging of entries (dumps the table data to dmesg) for i in (header_len..required_bytes).step_by(entry_len) { - dev_dbg!( - pdev.as_ref(), - "PMU entry: {:02x?}\n", - &data[i..][..entry_len] - ); + dev_dbg!(dev, "PMU entry: {:02x?}\n", &data[i..][..entry_len]); } Ok(PmuLookupTable { @@ -997,11 +981,10 @@ impl PmuLookupTable { impl FwSecBiosBuilder { fn setup_falcon_data( &mut self, - pdev: &pci::Device, pci_at_image: &PciAtBiosImage, first_fwsec: &FwSecBiosBuilder, ) -> Result { - let mut offset = pci_at_image.falcon_data_ptr(pdev)? as usize; + let mut offset = pci_at_image.falcon_data_ptr()? as usize; let mut pmu_in_first_fwsec = false; // The falcon data pointer assumes that the PciAt and FWSEC images @@ -1024,10 +1007,15 @@ impl FwSecBiosBuilder { self.falcon_data_offset = Some(offset); if pmu_in_first_fwsec { - self.pmu_lookup_table = - Some(PmuLookupTable::new(pdev, &first_fwsec.base.data[offset..])?); + self.pmu_lookup_table = Some(PmuLookupTable::new( + &self.base.dev, + &first_fwsec.base.data[offset..], + )?); } else { - self.pmu_lookup_table = Some(PmuLookupTable::new(pdev, &self.base.data[offset..])?); + self.pmu_lookup_table = Some(PmuLookupTable::new( + &self.base.dev, + &self.base.data[offset..], + )?); } match self @@ -1040,7 +1028,7 @@ impl FwSecBiosBuilder { let mut ucode_offset = entry.data as usize; ucode_offset -= pci_at_image.base.data.len(); if ucode_offset < first_fwsec.base.data.len() { - dev_err!(pdev.as_ref(), "Falcon Ucode offset not in second Fwsec.\n"); + dev_err!(self.base.dev, "Falcon Ucode offset not in second Fwsec.\n"); return Err(EINVAL); } ucode_offset -= first_fwsec.base.data.len(); @@ -1048,7 +1036,7 @@ impl FwSecBiosBuilder { } Err(e) => { dev_err!( - pdev.as_ref(), + self.base.dev, "PmuLookupTableEntry not found, error: {:?}\n", e ); @@ -1059,7 +1047,7 @@ impl FwSecBiosBuilder { } /// Build the final FwSecBiosImage from this builder - fn build(self, pdev: &pci::Device) -> Result<FwSecBiosImage> { + fn build(self) -> Result<FwSecBiosImage> { let ret = FwSecBiosImage { base: self.base, falcon_ucode_offset: self.falcon_ucode_offset.ok_or(EINVAL)?, @@ -1067,8 +1055,8 @@ impl FwSecBiosBuilder { if cfg!(debug_assertions) { // Print the desc header for debugging - let desc = ret.header(pdev.as_ref())?; - dev_dbg!(pdev.as_ref(), "PmuLookupTableEntry desc: {:#?}\n", desc); + let desc = ret.header()?; + dev_dbg!(ret.base.dev, "PmuLookupTableEntry desc: {:#?}\n", desc); } Ok(ret) @@ -1077,13 +1065,16 @@ impl FwSecBiosBuilder { impl FwSecBiosImage { /// Get the FwSec header ([`FalconUCodeDescV3`]). - pub(crate) fn header(&self, dev: &device::Device) -> Result<&FalconUCodeDescV3> { + pub(crate) fn header(&self) -> Result<&FalconUCodeDescV3> { // Get the falcon ucode offset that was found in setup_falcon_data. let falcon_ucode_offset = self.falcon_ucode_offset; // Make sure the offset is within the data bounds. if falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>() > self.base.data.len() { - dev_err!(dev, "fwsec-frts header not contained within BIOS bounds\n"); + dev_err!( + self.base.dev, + "fwsec-frts header not contained within BIOS bounds\n" + ); return Err(ERANGE); } @@ -1095,7 +1086,7 @@ impl FwSecBiosImage { let ver = (hdr & 0xff00) >> 8; if ver != 3 { - dev_err!(dev, "invalid fwsec firmware version: {:?}\n", ver); + dev_err!(self.base.dev, "invalid fwsec firmware version: {:?}\n", ver); return Err(EINVAL); } @@ -1115,7 +1106,7 @@ impl FwSecBiosImage { } /// Get the ucode data as a byte slice - pub(crate) fn ucode(&self, dev: &device::Device, desc: &FalconUCodeDescV3) -> Result<&[u8]> { + pub(crate) fn ucode(&self, desc: &FalconUCodeDescV3) -> Result<&[u8]> { let falcon_ucode_offset = self.falcon_ucode_offset; // The ucode data follows the descriptor. @@ -1127,15 +1118,16 @@ impl FwSecBiosImage { .data .get(ucode_data_offset..ucode_data_offset + size) .ok_or(ERANGE) - .inspect_err(|_| dev_err!(dev, "fwsec ucode data not contained within BIOS bounds\n")) + .inspect_err(|_| { + dev_err!( + self.base.dev, + "fwsec ucode data not contained within BIOS bounds\n" + ) + }) } /// Get the signatures as a byte slice - pub(crate) fn sigs( - &self, - dev: &device::Device, - desc: &FalconUCodeDescV3, - ) -> Result<&[Bcrt30Rsa3kSignature]> { + pub(crate) fn sigs(&self, desc: &FalconUCodeDescV3) -> Result<&[Bcrt30Rsa3kSignature]> { // The signatures data follows the descriptor. let sigs_data_offset = self.falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>(); let sigs_size = @@ -1144,7 +1136,7 @@ impl FwSecBiosImage { // Make sure the data is within bounds. if sigs_data_offset + sigs_size > self.base.data.len() { dev_err!( - dev, + self.base.dev, "fwsec signatures data not contained within BIOS bounds\n" ); return Err(ERANGE); diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 5219d7ddfdaa..95f63c5f6159 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -555,6 +555,7 @@ struct gcr3_tbl_info { }; struct amd_io_pgtable { + seqcount_t seqcount; /* Protects root/mode update */ struct io_pgtable pgtbl; int mode; u64 *root; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 8de689b2c5ed..ba9e582a8bbe 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1455,12 +1455,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, PCI_FUNC(e->devid)); devid = e->devid; - for (dev_i = devid_start; dev_i <= devid; ++dev_i) { - if (alias) + if (alias) { + for (dev_i = devid_start; dev_i <= devid; ++dev_i) pci_seg->alias_table[dev_i] = devid_to; + set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); } set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags); - set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); break; case IVHD_DEV_SPECIAL: { u8 handle, type; @@ -3067,7 +3067,8 @@ static int __init early_amd_iommu_init(void) if (!boot_cpu_has(X86_FEATURE_CX16)) { pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } /* diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index a91e71f981ef..70c2f5b1631b 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/dma-mapping.h> +#include <linux/seqlock.h> #include <asm/barrier.h> @@ -130,8 +131,11 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable, *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root)); + write_seqcount_begin(&pgtable->seqcount); pgtable->root = pte; pgtable->mode += 1; + write_seqcount_end(&pgtable->seqcount); + amd_iommu_update_and_flush_device_table(domain); pte = NULL; @@ -153,6 +157,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable, { unsigned long last_addr = address + (page_size - 1); struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; + unsigned int seqcount; int level, end_lvl; u64 *pte, *page; @@ -170,8 +175,14 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable, } - level = pgtable->mode - 1; - pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; + do { + seqcount = read_seqcount_begin(&pgtable->seqcount); + + level = pgtable->mode - 1; + pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; + } while (read_seqcount_retry(&pgtable->seqcount, seqcount)); + + address = PAGE_SIZE_ALIGN(address, page_size); end_lvl = PAGE_SIZE_LEVEL(page_size); @@ -249,6 +260,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable, unsigned long *page_size) { int level; + unsigned int seqcount; u64 *pte; *page_size = 0; @@ -256,8 +268,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable, if (address > PM_LEVEL_SIZE(pgtable->mode)) return NULL; - level = pgtable->mode - 1; - pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; + do { + seqcount = read_seqcount_begin(&pgtable->seqcount); + level = pgtable->mode - 1; + pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; + } while (read_seqcount_retry(&pgtable->seqcount, seqcount)); + *page_size = PTE_LEVEL_PAGE_SIZE(level); while (level > 0) { @@ -541,6 +557,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo if (!pgtable->root) return NULL; pgtable->mode = PAGE_MODE_3_LEVEL; + seqcount_init(&pgtable->seqcount); cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap; cfg->ias = IOMMU_IN_ADDR_BIT_SIZE; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 9c3ab9d9f69a..dff2d895b8ab 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1575,6 +1575,10 @@ static void switch_to_super_page(struct dmar_domain *domain, unsigned long lvl_pages = lvl_to_nr_pages(level); struct dma_pte *pte = NULL; + if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) || + !IS_ALIGNED(end_pfn + 1, lvl_pages))) + return; + while (start_pfn <= end_pfn) { if (!pte) pte = pfn_to_dma_pte(domain, start_pfn, &level, @@ -1650,7 +1654,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long pages_to_remove; pteval |= DMA_PTE_LARGE_PAGE; - pages_to_remove = min_t(unsigned long, nr_pages, + pages_to_remove = min_t(unsigned long, + round_down(nr_pages, lvl_pages), nr_pte_to_next_page(pte) * lvl_pages); end_pfn = iov_pfn + pages_to_remove - 1; switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl); diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 9c80d61deb2c..aa576736d60b 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -612,6 +612,23 @@ static u64 get_iota_region_flag(struct s390_domain *domain) } } +static bool reg_ioat_propagate_error(int cc, u8 status) +{ + /* + * If the device is in the error state the reset routine + * will register the IOAT of the newly set domain on re-enable + */ + if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL) + return false; + /* + * If the device was removed treat registration as success + * and let the subsequent error event trigger tear down. + */ + if (cc == ZPCI_CC_INVAL_HANDLE) + return false; + return cc != ZPCI_CC_OK; +} + static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev, struct iommu_domain *domain, u8 *status) { @@ -696,7 +713,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, /* If we fail now DMA remains blocked via blocking domain */ cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); - if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) + if (reg_ioat_propagate_error(cc, status)) return -EIO; zdev->dma_table = s390_domain->dma_table; zdev_s390_domain_update(zdev, domain); @@ -1032,7 +1049,8 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev) lockdep_assert_held(&zdev->dom_lock); - if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) + if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED || + zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) return NULL; s390_domain = to_s390_domain(zdev->s390_domain); @@ -1123,12 +1141,7 @@ static int s390_attach_dev_identity(struct iommu_domain *domain, /* If we fail now DMA remains blocked via blocking domain */ cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); - - /* - * If the device is undergoing error recovery the reset code - * will re-establish the new domain. - */ - if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) + if (reg_ioat_propagate_error(cc, status)) return -EIO; zdev_s390_domain_update(zdev, domain); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index efeee0a873c0..ab96b692e5a3 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -133,7 +133,7 @@ struct journal_sector { commit_id_t commit_id; }; -#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK])) +#define MAX_TAG_SIZE 255 #define METADATA_PADDING_SECTORS 8 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 79ea85d18e24..f4b904e24328 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3813,8 +3813,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) struct raid_set *rs = ti->private; unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); - limits->io_min = chunk_size_bytes; - limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs); + if (chunk_size_bytes) { + limits->io_min = chunk_size_bytes; + limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs); + } } static void raid_presuspend(struct dm_target *ti) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 58902091bf79..1461dc740dae 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -456,11 +456,15 @@ static void stripe_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct stripe_c *sc = ti->private; - unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT; + unsigned int io_min, io_opt; limits->chunk_sectors = sc->chunk_size; - limits->io_min = chunk_size; - limits->io_opt = chunk_size * sc->stripes; + + if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) && + !check_mul_overflow(io_min, sc->stripes, &io_opt)) { + limits->io_min = io_min; + limits->io_opt = io_opt; + } } static struct target_type stripe_target = { diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 5d9b08115375..3e1f165c2d20 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev) md_init_stacking_limits(&lim); lim.max_hw_sectors = mddev->chunk_sectors; lim.max_write_zeroes_sectors = mddev->chunk_sectors; + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; lim.io_min = mddev->chunk_sectors << 9; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); if (err) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index f1d8811a542a..419139ad7663 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev) md_init_stacking_limits(&lim); lim.max_hw_sectors = mddev->chunk_sectors; lim.max_write_zeroes_sectors = mddev->chunk_sectors; + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * mddev->raid_disks; lim.chunk_sectors = mddev->chunk_sectors; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index bf44878ec640..d30b82beeb92 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev) md_init_stacking_limits(&lim); lim.max_write_zeroes_sectors = 0; + lim.max_hw_wzeroes_unmap_sectors = 0; lim.features |= BLK_FEAT_ATOMIC_WRITES; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); if (err) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b60c30bfb6c7..9832eefb2f15 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4008,6 +4008,7 @@ static int raid10_set_queue_limits(struct mddev *mddev) md_init_stacking_limits(&lim); lim.max_write_zeroes_sectors = 0; + lim.max_hw_wzeroes_unmap_sectors = 0; lim.io_min = mddev->chunk_sectors << 9; lim.chunk_sectors = mddev->chunk_sectors; lim.io_opt = lim.io_min * raid10_nr_stripes(conf); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 023649fe2476..e385ef1355e8 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7732,6 +7732,7 @@ static int raid5_set_limits(struct mddev *mddev) lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE; lim.discard_granularity = stripe; lim.max_write_zeroes_sectors = 0; + lim.max_hw_wzeroes_unmap_sectors = 0; mddev_stack_rdev_limits(mddev, &lim, 0); rdev_for_each(rdev, mddev) queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset, diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index 7575fee96cc6..f8b04e49e4ba 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -81,6 +81,19 @@ config INTEL_MEI_VSC This driver can also be built as a module. If so, the module will be called mei-vsc. +config INTEL_MEI_LB + tristate "Intel Late Binding (LB) support on ME Interface" + depends on INTEL_MEI_ME + depends on DRM_XE + help + Enable support for Intel Late Binding (LB) via the MEI interface. + + Late Binding is a method for applying firmware updates at runtime, + allowing the Intel Xe driver to load firmware payloads such as + fan controller or voltage regulator. These firmware updates are + authenticated and versioned, and do not require firmware flashing + or system reboot. + source "drivers/misc/mei/hdcp/Kconfig" source "drivers/misc/mei/pxp/Kconfig" source "drivers/misc/mei/gsc_proxy/Kconfig" diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 6f9fdbf1a495..a203ed766b33 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -31,6 +31,7 @@ CFLAGS_mei-trace.o = -I$(src) obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/ obj-$(CONFIG_INTEL_MEI_PXP) += pxp/ obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += gsc_proxy/ +obj-$(CONFIG_INTEL_MEI_LB) += mei_lb.o obj-$(CONFIG_INTEL_MEI_VSC_HW) += mei-vsc-hw.o mei-vsc-hw-y := vsc-tp.o diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 5cc3ad07d5be..09aae8f9d225 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -615,6 +615,19 @@ u8 mei_cldev_ver(const struct mei_cl_device *cldev) EXPORT_SYMBOL_GPL(mei_cldev_ver); /** + * mei_cldev_mtu - max message that client can send and receive + * + * @cldev: mei client device + * + * Return: mtu or 0 if client is not connected + */ +size_t mei_cldev_mtu(const struct mei_cl_device *cldev) +{ + return mei_cl_mtu(cldev->cl); +} +EXPORT_SYMBOL_GPL(mei_cldev_mtu); + +/** * mei_cldev_enabled - check whether the device is enabled * * @cldev: mei client device diff --git a/drivers/misc/mei/mei_lb.c b/drivers/misc/mei/mei_lb.c new file mode 100644 index 000000000000..77686b108d3c --- /dev/null +++ b/drivers/misc/mei/mei_lb.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025 Intel Corporation + */ + +#include <linux/component.h> +#include <linux/mei_cl_bus.h> +#include <linux/module.h> +#include <linux/overflow.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/uuid.h> + +#include <drm/intel/i915_component.h> +#include <drm/intel/intel_lb_mei_interface.h> + +#include "mkhi.h" + +/** + * DOC: Late Binding Firmware Update/Upload + * + * Late Binding is a firmware update/upload mechanism that allows configuration + * payloads to be securely delivered and applied at runtime, rather than + * being embedded in the system firmware image (e.g., IFWI or SPI flash). + * + * This mechanism is used to update device-level configuration such as: + * - Fan controller + * - Voltage regulator (VR) + * + * Key Characteristics: + * --------------------- + * - Runtime Delivery: + * Firmware blobs are loaded by the host driver (e.g., Xe KMD) + * after the GPU or SoC has booted. + * + * - Secure and Authenticated: + * All payloads are signed and verified by the authentication firmware. + * + * - No Firmware Flashing Required: + * Updates are applied in volatile memory and do not require SPI flash + * modification or system reboot. + * + * - Re-entrant: + * Multiple updates of the same or different types can be applied + * sequentially within a single boot session. + * + * - Version Controlled: + * Each payload includes version and security version number (SVN) + * metadata to support anti-rollback enforcement. + * + * Upload Flow: + * ------------ + * 1. Host driver (KMD or user-space tool) loads the late binding firmware. + * 2. Firmware is passed to the MEI interface and forwarded to + * authentication firmware. + * 3. Authentication firmware authenticates the payload and extracts + * command and data arrays. + * 4. Authentication firmware delivers the configuration to PUnit/PCODE. + * 5. Status is returned back to the host via MEI. + */ + +#define INTEL_LB_CMD 0x12 +#define INTEL_LB_RSP (INTEL_LB_CMD | 0x80) + +#define INTEL_LB_SEND_TIMEOUT_MSEC 3000 +#define INTEL_LB_RECV_TIMEOUT_MSEC 3000 + +/** + * struct mei_lb_req - Late Binding request structure + * @header: MKHI message header (see struct mkhi_msg_hdr) + * @type: Type of the Late Binding payload + * @flags: Flags to be passed to the authentication firmware (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT) + * @reserved: Reserved for future use by authentication firmware, must be set to 0 + * @payload_size: Size of the payload data in bytes + * @payload: Payload data to be sent to the authentication firmware + */ +struct mei_lb_req { + struct mkhi_msg_hdr header; + __le32 type; + __le32 flags; + __le32 reserved[2]; + __le32 payload_size; + u8 payload[] __counted_by(payload_size); +} __packed; + +/** + * struct mei_lb_rsp - Late Binding response structure + * @header: MKHI message header (see struct mkhi_msg_hdr) + * @type: Type of the Late Binding payload + * @reserved: Reserved for future use by authentication firmware, must be set to 0 + * @status: Status returned by authentication firmware (see &enum intel_lb_status) + */ +struct mei_lb_rsp { + struct mkhi_msg_hdr header; + __le32 type; + __le32 reserved[2]; + __le32 status; +} __packed; + +static bool mei_lb_check_response(const struct device *dev, ssize_t bytes, + struct mei_lb_rsp *rsp) +{ + /* + * Received message size may be smaller than the full message size when + * reply contains only MKHI header with result field set to the error code. + * Check the header size and content first to output exact error, if needed, + * and then process to the whole message. + */ + if (bytes < sizeof(rsp->header)) { + dev_err(dev, "Received less than header size from the firmware: %zd < %zu\n", + bytes, sizeof(rsp->header)); + return false; + } + if (rsp->header.group_id != MKHI_GROUP_ID_GFX) { + dev_err(dev, "Mismatch group id: 0x%x instead of 0x%x\n", + rsp->header.group_id, MKHI_GROUP_ID_GFX); + return false; + } + if (rsp->header.command != INTEL_LB_RSP) { + dev_err(dev, "Mismatch command: 0x%x instead of 0x%x\n", + rsp->header.command, INTEL_LB_RSP); + return false; + } + if (rsp->header.result) { + dev_err(dev, "Error in result: 0x%x\n", rsp->header.result); + return false; + } + if (bytes < sizeof(*rsp)) { + dev_err(dev, "Received less than message size from the firmware: %zd < %zu\n", + bytes, sizeof(*rsp)); + return false; + } + + return true; +} + +static int mei_lb_push_payload(struct device *dev, + enum intel_lb_type type, u32 flags, + const void *payload, size_t payload_size) +{ + struct mei_cl_device *cldev; + struct mei_lb_req *req = NULL; + struct mei_lb_rsp rsp; + size_t req_size; + ssize_t bytes; + int ret; + + cldev = to_mei_cl_device(dev); + + ret = mei_cldev_enable(cldev); + if (ret) { + dev_dbg(dev, "Failed to enable firmware client. %d\n", ret); + return ret; + } + + req_size = struct_size(req, payload, payload_size); + if (req_size > mei_cldev_mtu(cldev)) { + dev_err(dev, "Payload is too big: %zu\n", payload_size); + ret = -EMSGSIZE; + goto end; + } + + req = kmalloc(req_size, GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto end; + } + + req->header.group_id = MKHI_GROUP_ID_GFX; + req->header.command = INTEL_LB_CMD; + req->type = cpu_to_le32(type); + req->flags = cpu_to_le32(flags); + req->reserved[0] = 0; + req->reserved[1] = 0; + req->payload_size = cpu_to_le32(payload_size); + memcpy(req->payload, payload, payload_size); + + bytes = mei_cldev_send_timeout(cldev, (u8 *)req, req_size, + INTEL_LB_SEND_TIMEOUT_MSEC); + if (bytes < 0) { + dev_err(dev, "Failed to send late binding request to firmware. %zd\n", bytes); + ret = bytes; + goto end; + } + + bytes = mei_cldev_recv_timeout(cldev, (u8 *)&rsp, sizeof(rsp), + INTEL_LB_RECV_TIMEOUT_MSEC); + if (bytes < 0) { + dev_err(dev, "Failed to receive late binding reply from MEI firmware. %zd\n", + bytes); + ret = bytes; + goto end; + } + if (!mei_lb_check_response(dev, bytes, &rsp)) { + dev_err(dev, "Bad response from the firmware. header: %02x %02x %02x %02x\n", + rsp.header.group_id, rsp.header.command, + rsp.header.reserved, rsp.header.result); + ret = -EPROTO; + goto end; + } + + dev_dbg(dev, "status = %u\n", le32_to_cpu(rsp.status)); + ret = (int)le32_to_cpu(rsp.status); +end: + mei_cldev_disable(cldev); + kfree(req); + return ret; +} + +static const struct intel_lb_component_ops mei_lb_ops = { + .push_payload = mei_lb_push_payload, +}; + +static int mei_lb_component_master_bind(struct device *dev) +{ + return component_bind_all(dev, (void *)&mei_lb_ops); +} + +static void mei_lb_component_master_unbind(struct device *dev) +{ + component_unbind_all(dev, (void *)&mei_lb_ops); +} + +static const struct component_master_ops mei_lb_component_master_ops = { + .bind = mei_lb_component_master_bind, + .unbind = mei_lb_component_master_unbind, +}; + +static int mei_lb_component_match(struct device *dev, int subcomponent, + void *data) +{ + /* + * This function checks if requester is Intel %PCI_CLASS_DISPLAY_VGA or + * %PCI_CLASS_DISPLAY_OTHER device, and checks if the requester is the + * grand parent of mei_if i.e. late bind MEI device + */ + struct device *base = data; + struct pci_dev *pdev; + + if (!dev) + return 0; + + if (!dev_is_pci(dev)) + return 0; + + pdev = to_pci_dev(dev); + + if (pdev->vendor != PCI_VENDOR_ID_INTEL) + return 0; + + if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) && + pdev->class != (PCI_CLASS_DISPLAY_OTHER << 8)) + return 0; + + if (subcomponent != INTEL_COMPONENT_LB) + return 0; + + base = base->parent; + if (!base) /* mei device */ + return 0; + + base = base->parent; /* pci device */ + + return !!base && dev == base; +} + +static int mei_lb_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct component_match *master_match = NULL; + int ret; + + component_match_add_typed(&cldev->dev, &master_match, + mei_lb_component_match, &cldev->dev); + if (IS_ERR_OR_NULL(master_match)) + return -ENOMEM; + + ret = component_master_add_with_match(&cldev->dev, + &mei_lb_component_master_ops, + master_match); + if (ret < 0) + dev_err(&cldev->dev, "Failed to add late binding master component. %d\n", ret); + + return ret; +} + +static void mei_lb_remove(struct mei_cl_device *cldev) +{ + component_master_del(&cldev->dev, &mei_lb_component_master_ops); +} + +#define MEI_GUID_MKHI UUID_LE(0xe2c2afa2, 0x3817, 0x4d19, \ + 0x9d, 0x95, 0x6, 0xb1, 0x6b, 0x58, 0x8a, 0x5d) + +static const struct mei_cl_device_id mei_lb_tbl[] = { + { .uuid = MEI_GUID_MKHI, .version = MEI_CL_VERSION_ANY }, + { } +}; +MODULE_DEVICE_TABLE(mei, mei_lb_tbl); + +static struct mei_cl_driver mei_lb_driver = { + .id_table = mei_lb_tbl, + .name = "mei_lb", + .probe = mei_lb_probe, + .remove = mei_lb_remove, +}; + +module_mei_cl_driver(mei_lb_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MEI Late Binding Firmware Update/Upload"); diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index a9e6277789ba..79df2fa89a3f 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data, host->pio_ptr = NULL; host->pio_size = 0; } else { - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags, + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, mmc_get_dma_dir(data)); } diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 3a1de477e9af..b0f91cc9e40e 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -283,6 +283,8 @@ #define PCIE_GLI_9767_UHS2_CTL2_ZC_VALUE 0xb #define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL BIT(6) #define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL_VALUE 0x1 +#define PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN BIT(13) +#define PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE BIT(14) #define GLI_MAX_TUNING_LOOP 40 @@ -1179,6 +1181,65 @@ static void gl9767_set_low_power_negotiation(struct pci_dev *pdev, bool enable) gl9767_vhs_read(pdev); } +static void sdhci_gl9767_uhs2_phy_reset(struct sdhci_host *host, bool assert) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct pci_dev *pdev = slot->chip->pdev; + u32 value, set, clr; + + if (assert) { + /* Assert reset, set RESETN and clean RESETN_VALUE */ + set = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN; + clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE; + } else { + /* De-assert reset, clean RESETN and set RESETN_VALUE */ + set = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE; + clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN; + } + + gl9767_vhs_write(pdev); + pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, &value); + value |= set; + pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value); + value &= ~clr; + pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value); + gl9767_vhs_read(pdev); +} + +static void __gl9767_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd) +{ + u8 pwr = 0; + + if (mode != MMC_POWER_OFF) { + pwr = sdhci_get_vdd_value(vdd); + if (!pwr) + WARN(1, "%s: Invalid vdd %#x\n", + mmc_hostname(host->mmc), vdd); + pwr |= SDHCI_VDD2_POWER_180; + } + + if (host->pwr == pwr) + return; + + host->pwr = pwr; + + if (pwr == 0) { + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + } else { + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + + pwr |= SDHCI_POWER_ON; + sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL); + usleep_range(5000, 6250); + + /* Assert reset */ + sdhci_gl9767_uhs2_phy_reset(host, true); + pwr |= SDHCI_VDD2_POWER_ON; + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + usleep_range(5000, 6250); + } +} + static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pci_slot *slot = sdhci_priv(host); @@ -1205,6 +1266,11 @@ static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock) } sdhci_enable_clk(host, clk); + + if (mmc_card_uhs2(host->mmc)) + /* De-assert reset */ + sdhci_gl9767_uhs2_phy_reset(host, false); + gl9767_set_low_power_negotiation(pdev, true); } @@ -1476,7 +1542,7 @@ static void sdhci_gl9767_set_power(struct sdhci_host *host, unsigned char mode, gl9767_vhs_read(pdev); sdhci_gli_overcurrent_event_enable(host, false); - sdhci_uhs2_set_power(host, mode, vdd); + __gl9767_uhs2_set_power(host, mode, vdd); sdhci_gli_overcurrent_event_enable(host, true); } else { gl9767_vhs_write(pdev); diff --git a/drivers/mmc/host/sdhci-uhs2.c b/drivers/mmc/host/sdhci-uhs2.c index 0efeb9d0c376..c459a08d01da 100644 --- a/drivers/mmc/host/sdhci-uhs2.c +++ b/drivers/mmc/host/sdhci-uhs2.c @@ -295,7 +295,8 @@ static void __sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) else sdhci_uhs2_set_power(host, ios->power_mode, ios->vdd); - sdhci_set_clock(host, host->clock); + host->ops->set_clock(host, ios->clock); + host->clock = ios->clock; } static int sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 3a17821efa5c..ac7e11f37af7 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2367,23 +2367,6 @@ void sdhci_set_ios_common(struct mmc_host *mmc, struct mmc_ios *ios) (ios->power_mode == MMC_POWER_UP) && !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) sdhci_enable_preset_value(host, false); - - if (!ios->clock || ios->clock != host->clock) { - host->ops->set_clock(host, ios->clock); - host->clock = ios->clock; - - if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && - host->clock) { - host->timeout_clk = mmc->actual_clock ? - mmc->actual_clock / 1000 : - host->clock / 1000; - mmc->max_busy_timeout = - host->ops->get_max_timeout_count ? - host->ops->get_max_timeout_count(host) : - 1 << 27; - mmc->max_busy_timeout /= host->timeout_clk; - } - } } EXPORT_SYMBOL_GPL(sdhci_set_ios_common); @@ -2410,6 +2393,23 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) sdhci_set_ios_common(mmc, ios); + if (!ios->clock || ios->clock != host->clock) { + host->ops->set_clock(host, ios->clock); + host->clock = ios->clock; + + if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && + host->clock) { + host->timeout_clk = mmc->actual_clock ? + mmc->actual_clock / 1000 : + host->clock / 1000; + mmc->max_busy_timeout = + host->ops->get_max_timeout_count ? + host->ops->get_max_timeout_count(host) : + 1 << 27; + mmc->max_busy_timeout /= host->timeout_clk; + } + } + if (host->ops->set_power) host->ops->set_power(host, ios->power_mode, ios->vdd); else diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 257333c88710..57be04f6cb11 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2132,6 +2132,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW && BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && + bond_has_slaves(bond) && memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) { /* Set slave to random address to avoid duplicate mac * address in later fail over. @@ -3355,7 +3356,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave) /* Find out through which dev should the packet go */ memset(&fl6, 0, sizeof(struct flowi6)); fl6.daddr = targets[i]; - fl6.flowi6_oif = bond->dev->ifindex; dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); if (dst->error) { diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index a9040c42d2ff..6e97a5a7daaf 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) cnic_bnx2x_delete_wait(dev, 0); - cancel_delayed_work(&cp->delete_task); - flush_workqueue(cnic_wq); + cancel_delayed_work_sync(&cp->delete_task); if (atomic_read(&cp->iscsi_conn) != 0) netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index de8a6ce86ad7..12105ffb5dac 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, oct->io_qmask.iq |= BIT_ULL(iq_no); /* Set the 32B/64B mode for each input queue */ - oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); + oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no); iq->iqcmd_64B = (conf->instr_type == 64); oct->fn_list.setup_iq_regs(oct, iq_no); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 4643a3380618..b1e1ad9e4b48 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -2736,7 +2736,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); goto err_get_attr; } - ethsw->bpid = dpbp_attrs.id; + ethsw->bpid = dpbp_attrs.bpid; return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 048c33039130..b194eae03208 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -948,9 +948,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, if (!eop_desc) break; - /* prevent any other reads prior to eop_desc */ - smp_rmb(); - i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* we have caught up to head, no work left to do */ if (tx_head == tx_desc) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index d2871757ec94..41e7e29879a3 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -894,10 +894,6 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, rx_buf->page_offset, size); sinfo->xdp_frags_size += size; - /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail() - * can pop off frags but driver has to handle it on its own - */ - rx_ring->nr_frags = sinfo->nr_frags; if (page_is_pfmemalloc(rx_buf->page)) xdp_buff_set_frag_pfmemalloc(xdp); @@ -968,20 +964,20 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, /** * ice_get_pgcnts - grab page_count() for gathered fragments * @rx_ring: Rx descriptor ring to store the page counts on + * @ntc: the next to clean element (not included in this frame!) * * This function is intended to be called right before running XDP * program so that the page recycling mechanism will be able to take * a correct decision regarding underlying pages; this is done in such * way as XDP program can change the refcount of page */ -static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) +static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc) { - u32 nr_frags = rx_ring->nr_frags + 1; u32 idx = rx_ring->first_desc; struct ice_rx_buf *rx_buf; u32 cnt = rx_ring->count; - for (int i = 0; i < nr_frags; i++) { + while (idx != ntc) { rx_buf = &rx_ring->rx_buf[idx]; rx_buf->pgcnt = page_count(rx_buf->page); @@ -1154,62 +1150,51 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) } /** - * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags + * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame * @rx_ring: Rx ring with all the auxiliary data * @xdp: XDP buffer carrying linear + frags part - * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage - * @ntc: a current next_to_clean value to be stored at rx_ring + * @ntc: the next to clean element (not included in this frame!) * @verdict: return code from XDP program execution * - * Walk through gathered fragments and satisfy internal page - * recycle mechanism; we take here an action related to verdict - * returned by XDP program; + * Called after XDP program is completed, or on error with verdict set to + * ICE_XDP_CONSUMED. + * + * Walk through buffers from first_desc to the end of the frame, releasing + * buffers and satisfying internal page recycle mechanism. The action depends + * on verdict from XDP program. */ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - u32 *xdp_xmit, u32 ntc, u32 verdict) + u32 ntc, u32 verdict) { - u32 nr_frags = rx_ring->nr_frags + 1; u32 idx = rx_ring->first_desc; u32 cnt = rx_ring->count; - u32 post_xdp_frags = 1; struct ice_rx_buf *buf; - int i; + u32 xdp_frags = 0; + int i = 0; if (unlikely(xdp_buff_has_frags(xdp))) - post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; + xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; - for (i = 0; i < post_xdp_frags; i++) { + while (idx != ntc) { buf = &rx_ring->rx_buf[idx]; + if (++idx == cnt) + idx = 0; - if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) { + /* An XDP program could release fragments from the end of the + * buffer. For these, we need to keep the pagecnt_bias as-is. + * To do this, only adjust pagecnt_bias for fragments up to + * the total remaining after the XDP program has run. + */ + if (verdict != ICE_XDP_CONSUMED) ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - *xdp_xmit |= verdict; - } else if (verdict & ICE_XDP_CONSUMED) { + else if (i++ <= xdp_frags) buf->pagecnt_bias++; - } else if (verdict == ICE_XDP_PASS) { - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - } ice_put_rx_buf(rx_ring, buf); - - if (++idx == cnt) - idx = 0; - } - /* handle buffers that represented frags released by XDP prog; - * for these we keep pagecnt_bias as-is; refcount from struct page - * has been decremented within XDP prog and we do not have to increase - * the biased refcnt - */ - for (; i < nr_frags; i++) { - buf = &rx_ring->rx_buf[idx]; - ice_put_rx_buf(rx_ring, buf); - if (++idx == cnt) - idx = 0; } xdp->data = NULL; rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; } /** @@ -1317,6 +1302,10 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) /* retrieve a buffer from the ring */ rx_buf = ice_get_rx_buf(rx_ring, size, ntc); + /* Increment ntc before calls to ice_put_rx_mbuf() */ + if (++ntc == cnt) + ntc = 0; + if (!xdp->data) { void *hard_start; @@ -1325,24 +1314,23 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); xdp_buff_clear_frags_flag(xdp); } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { - ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); + ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED); break; } - if (++ntc == cnt) - ntc = 0; /* skip if it is NOP desc */ if (ice_is_non_eop(rx_ring, rx_desc)) continue; - ice_get_pgcnts(rx_ring); + ice_get_pgcnts(rx_ring, ntc); xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); if (xdp_verdict == ICE_XDP_PASS) goto construct_skb; total_rx_bytes += xdp_get_buff_len(xdp); total_rx_pkts++; - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); + xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR); continue; construct_skb: @@ -1355,7 +1343,7 @@ construct_skb: rx_ring->ring_stats->rx_stats.alloc_buf_failed++; xdp_verdict = ICE_XDP_CONSUMED; } - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); if (!skb) break; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index fef750c5f288..2fd8e78178a2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -358,7 +358,6 @@ struct ice_rx_ring { struct ice_tx_ring *xdp_ring; struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; - u32 nr_frags; u16 max_frame; u16 rx_buf_len; dma_addr_t dma; /* physical address of ring */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 266bfcf2a28f..a427f05814c1 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -345,6 +345,7 @@ struct igc_adapter { /* LEDs */ struct mutex led_mutex; struct igc_led_classdev *leds; + bool leds_available; }; void igc_up(struct igc_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index e79b14d50b24..728d7ca5338b 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -7335,8 +7335,14 @@ static int igc_probe(struct pci_dev *pdev, if (IS_ENABLED(CONFIG_IGC_LEDS)) { err = igc_led_setup(adapter); - if (err) - goto err_register; + if (err) { + netdev_warn_once(netdev, + "LED init failed (%d); continuing without LED support\n", + err); + adapter->leds_available = false; + } else { + adapter->leds_available = true; + } } return 0; @@ -7392,7 +7398,7 @@ static void igc_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->watchdog_task); hrtimer_cancel(&adapter->hrtimer); - if (IS_ENABLED(CONFIG_IGC_LEDS)) + if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available) igc_led_free(adapter); /* Release control of h/w to f/w. If f/w is AMT enabled, this diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 80e6a2ef1350..6218bdb7f941 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6973,6 +6973,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, break; } + /* Make sure the SWFW semaphore is in a valid state */ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); + + if (hw->mac.type == ixgbe_mac_e610) + mutex_init(&hw->aci.lock); + #ifdef IXGBE_FCOE /* FCoE support exists, always init the FCoE lock */ spin_lock_init(&adapter->fcoe.lock); @@ -11643,10 +11650,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; - /* Make sure the SWFW semaphore is in a valid state */ - if (hw->mac.ops.init_swfw_sync) - hw->mac.ops.init_swfw_sync(hw); - if (ixgbe_check_fw_error(adapter)) return ixgbe_recovery_probe(adapter); @@ -11850,8 +11853,6 @@ skip_sriov: ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); ixgbe_mac_set_default_filter(adapter); - if (hw->mac.type == ixgbe_mac_e610) - mutex_init(&hw->aci.lock); timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); if (ixgbe_removed(hw->hw_addr)) { @@ -12007,9 +12008,9 @@ err_register: devl_unlock(adapter->devlink); ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); +err_sw_init: if (hw->mac.type == ixgbe_mac_e610) mutex_destroy(&adapter->hw.aci.lock); -err_sw_init: ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; iounmap(adapter->io_addr); @@ -12060,10 +12061,8 @@ static void ixgbe_remove(struct pci_dev *pdev) set_bit(__IXGBE_REMOVING, &adapter->state); cancel_work_sync(&adapter->service_task); - if (adapter->hw.mac.type == ixgbe_mac_e610) { + if (adapter->hw.mac.type == ixgbe_mac_e610) ixgbe_disable_link_status_events(adapter); - mutex_destroy(&adapter->hw.aci.lock); - } if (adapter->mii_bus) mdiobus_unregister(adapter->mii_bus); @@ -12123,6 +12122,9 @@ static void ixgbe_remove(struct pci_dev *pdev) disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); + if (adapter->hw.mac.type == ixgbe_mac_e610) + mutex_destroy(&adapter->hw.aci.lock); + if (disable_dev) pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 24499bb36c00..bcea3fc26a8c 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1124,11 +1124,24 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features return err; } +static bool octep_is_vf_valid(struct octep_device *oct, int vf) +{ + if (vf >= CFG_GET_ACTIVE_VFS(oct->conf)) { + netdev_err(oct->netdev, "Invalid VF ID %d\n", vf); + return false; + } + + return true; +} + static int octep_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi) { struct octep_device *oct = netdev_priv(dev); + if (!octep_is_vf_valid(oct, vf)) + return -EINVAL; + ivi->vf = vf; ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr); ivi->spoofchk = true; @@ -1143,6 +1156,9 @@ static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac) struct octep_device *oct = netdev_priv(dev); int err; + if (!octep_is_vf_valid(oct, vf)) + return -EINVAL; + if (!is_valid_ether_addr(mac)) { dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac); return -EADDRNOTAVAIL; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c index ebecdd29f3bd..0867fab61b19 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c @@ -196,6 +196,7 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id, vf_id); return; } + ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr); rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; } @@ -205,6 +206,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id, { int err; + /* Reset VF-specific information maintained by the PF */ + memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info)); err = octep_ctrl_net_dev_remove(oct, vf_id); if (err) { rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index e52cc6b1a26c..dedd586ed310 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf) if (!ptp) return; - cancel_delayed_work(&pfvf->ptp->synctstamp_work); + cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work); ptp_clock_unregister(ptp->ptp_clock); kfree(ptp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 9560fcba643f..ac65e3191480 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -92,6 +92,7 @@ enum { MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL, MLX5E_ACCEL_FS_POL_FT_LEVEL, + MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, #endif }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index ffcd0cdeb775..23703f28386a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -185,6 +185,7 @@ struct mlx5e_ipsec_rx_create_attr { u32 family; int prio; int pol_level; + int pol_miss_level; int sa_level; int status_level; enum mlx5_flow_namespace_type chains_ns; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 98b6a3a623f9..65dc3529283b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -747,6 +747,7 @@ static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, attr->family = family; attr->prio = MLX5E_NIC_PRIO; attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL; + attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL; attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL; attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL; @@ -833,7 +834,7 @@ static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec, ft_attr.max_fte = 1; ft_attr.autogroup.max_num_groups = 1; - ft_attr.level = attr->pol_level; + ft_attr.level = attr->pol_miss_level; ft_attr.prio = attr->prio; ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e680673ffb72..15eded36b872 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -139,8 +139,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv) if (up) { netdev_info(priv->netdev, "Link up\n"); netif_carrier_on(priv->netdev); - mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu, - NULL, NULL, NULL); } else { netdev_info(priv->netdev, "Link down\n"); netif_carrier_off(priv->netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 63a7a788fb0d..cd0242eb008c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1506,12 +1506,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { static int mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev)); struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct net_device *netdev; + struct mlx5e_priv *priv; + int err; + + netdev = mlx5_uplink_netdev_get(dev); + if (!netdev) + return 0; + priv = netdev_priv(netdev); rpriv->netdev = priv->netdev; - return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, - rpriv); + err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile, + rpriv); + mlx5_uplink_netdev_put(dev, netdev); + return err; } static void @@ -1638,8 +1647,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) { struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); struct net_device *netdev = rpriv->netdev; - struct mlx5e_priv *priv = netdev_priv(netdev); - void *ppriv = priv->ppriv; + struct mlx5e_priv *priv; + void *ppriv; + + if (!netdev) { + ppriv = rpriv; + goto free_ppriv; + } + + priv = netdev_priv(netdev); + ppriv = priv->ppriv; if (rep->vport == MLX5_VPORT_UPLINK) { mlx5e_vport_uplink_rep_unload(rpriv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 8b4977650183..5f2d6c35f1ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -1515,6 +1515,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) speed = lksettings.base.speed; out: + mlx5_uplink_netdev_put(mdev, slave); return speed; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index cb165085a4c1..db552c012b4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -114,9 +114,9 @@ #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) /* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, - * {IPsec RoCE MPV,Alias table},IPsec RoCE policy + * IPsec policy miss, {IPsec RoCE MPV,Alias table},IPsec RoCE policy */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 10 +#define KERNEL_NIC_PRIO_NUM_LEVELS 11 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc, and one more for promisc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index b111ccd03b02..74ea5da58b7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -47,7 +47,20 @@ int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data); static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev) { - return mdev->mlx5e_res.uplink_netdev; + struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res; + struct net_device *netdev; + + mutex_lock(&mlx5e_res->uplink_netdev_lock); + netdev = mlx5e_res->uplink_netdev; + netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL); + mutex_unlock(&mlx5e_res->uplink_netdev_lock); + return netdev; +} + +static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev, + struct net_device *netdev) +{ + netdev_put(netdev, &mdev->mlx5e_res.tracker); } struct mlx5_sd; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 2d7adf7444ba..aa9f2b0a77d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -1170,7 +1170,11 @@ const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev, mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, force_legacy); i = find_first_bit(&temp, max_size); - if (i < max_size) + + /* mlx5e_link_info has holes. Check speed + * is not zero as indication of one. + */ + if (i < max_size && table[i].speed) return &table[i]; return NULL; diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 56d5464222d9..cdbf82affa7b 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev) struct ns83820 *dev = PRIV(ndev); struct rx_info *info = &dev->rx_info; unsigned next_rx; - int rx_rc, len; + int len; u32 cmdsts; __le32 *desc; unsigned long flags; @@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev) if (likely(CMDSTS_OK & cmdsts)) { #endif skb_put(skb, len); - if (unlikely(!skb)) + if (unlikely(!skb)) { + ndev->stats.rx_dropped++; goto netdev_mangle_me_harder_failed; + } if (cmdsts & CMDSTS_DEST_MULTI) ndev->stats.multicast++; ndev->stats.rx_packets++; @@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev) __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag); } #endif - rx_rc = netif_rx(skb); - if (NET_RX_DROP == rx_rc) { -netdev_mangle_me_harder_failed: - ndev->stats.rx_dropped++; - } + netif_rx(skb); } else { dev_kfree_skb_irq(skb); } +netdev_mangle_me_harder_failed: nr++; next_rx = info->next_rx; desc = info->descs + (DESC_SIZE * next_rx); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 9c3d3dd2f847..1f0cea3cae92 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -4462,10 +4462,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, goto out; } - /* Add override window info to buffer */ + /* Add override window info to buffer, preventing buffer overflow */ override_window_dwords = - qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * - PROTECTION_OVERRIDE_ELEMENT_DWORDS; + min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * + PROTECTION_OVERRIDE_ELEMENT_DWORDS, + PROTECTION_OVERRIDE_DEPTH_DWORDS); if (override_window_dwords) { addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); offset += qed_grc_dump_addr_range(p_hwfn, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c index d912e709a92c..bb03dad4a300 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c @@ -2092,7 +2092,7 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, break; } - if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_7000 && trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) len = DIV_ROUND_UP(len, 4); diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index 18fb44139de2..4d0a38e06f08 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -239,6 +239,14 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), } }, + { + .ident = "MECHREVO Yilong15Pro Series GM5HG7A", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MECHREVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "Yilong15Pro Series GM5HG7A"), + } + }, /* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */ { .ident = "PCSpecialist Lafite Pro V 14M", diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index ef988605c4da..bc544a4a5266 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -403,6 +403,7 @@ static const struct acpi_device_id amd_pmf_acpi_ids[] = { {"AMDI0103", 0}, {"AMDI0105", 0}, {"AMDI0107", 0}, + {"AMDI0108", 0}, { } }; MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids); diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 3a488cf9ca06..6a62bc5b02fd 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -673,6 +673,8 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code, if (atkbd_reports_vol_keys) *code = ASUS_WMI_KEY_IGNORE; break; + case 0x5D: /* Wireless console Toggle */ + case 0x5E: /* Wireless console Enable / Keyboard Attach, Detach */ case 0x5F: /* Wireless console Disable / Special Key */ if (quirks->key_wlan_event) *code = quirks->key_wlan_event; diff --git a/drivers/platform/x86/oxpec.c b/drivers/platform/x86/oxpec.c index eb076bb4099b..54377b282ff8 100644 --- a/drivers/platform/x86/oxpec.c +++ b/drivers/platform/x86/oxpec.c @@ -126,6 +126,13 @@ static const struct dmi_system_id dmi_table[] = { }, { .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1X"), + }, + .driver_data = (void *)oxp_fly, + }, + { + .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"), }, @@ -306,6 +313,13 @@ static const struct dmi_system_id dmi_table[] = { }, .driver_data = (void *)oxp_x1, }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1Pro EVA-02"), + }, + .driver_data = (void *)oxp_x1, + }, {}, }; diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 0006ab3d0789..61c2277c9ce3 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -187,6 +187,7 @@ static const struct genpd_lock_ops genpd_raw_spin_ops = { #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW) #define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW) #define genpd_is_no_sync_state(genpd) (genpd->flags & GENPD_FLAG_NO_SYNC_STATE) +#define genpd_is_no_stay_on(genpd) (genpd->flags & GENPD_FLAG_NO_STAY_ON) static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, const struct generic_pm_domain *genpd) @@ -1357,7 +1358,6 @@ err_poweroff: return ret; } -#ifndef CONFIG_PM_GENERIC_DOMAINS_OF static bool pd_ignore_unused; static int __init pd_ignore_unused_setup(char *__unused) { @@ -1382,9 +1382,6 @@ static int __init genpd_power_off_unused(void) mutex_lock(&gpd_list_lock); list_for_each_entry(genpd, &gpd_list, gpd_list_node) { - genpd_lock(genpd); - genpd->stay_on = false; - genpd_unlock(genpd); genpd_queue_power_off_work(genpd); } @@ -1393,7 +1390,6 @@ static int __init genpd_power_off_unused(void) return 0; } late_initcall_sync(genpd_power_off_unused); -#endif #ifdef CONFIG_PM_SLEEP @@ -2367,6 +2363,18 @@ static void genpd_lock_init(struct generic_pm_domain *genpd) } } +#ifdef CONFIG_PM_GENERIC_DOMAINS_OF +static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off) +{ + genpd->stay_on = !genpd_is_no_stay_on(genpd) && !is_off; +} +#else +static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off) +{ + genpd->stay_on = false; +} +#endif + /** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. @@ -2392,7 +2400,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; - genpd->stay_on = !is_off; + genpd_set_stay_on(genpd, is_off); genpd->sync_state = GENPD_SYNC_STATE_OFF; genpd->device_count = 0; genpd->provider = NULL; diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c index 5aa7fa1df8fe..7434bf42d215 100644 --- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c +++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c @@ -251,6 +251,7 @@ static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd) genpd->detach_dev = cpg_mssr_detach_dev; } + genpd->flags |= GENPD_FLAG_NO_STAY_ON; genpd->power_off = rcar_gen4_sysc_pd_power_off; genpd->power_on = rcar_gen4_sysc_pd_power_on; diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c index 4b310c1d35fa..d8a8ffcde38d 100644 --- a/drivers/pmdomain/renesas/rcar-sysc.c +++ b/drivers/pmdomain/renesas/rcar-sysc.c @@ -241,6 +241,7 @@ static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) } } + genpd->flags |= GENPD_FLAG_NO_STAY_ON; genpd->power_off = rcar_sysc_pd_power_off; genpd->power_on = rcar_sysc_pd_power_on; @@ -342,7 +343,7 @@ struct rcar_pm_domains { }; static struct genpd_onecell_data *rcar_sysc_onecell_data; -static struct device_node *rcar_sysc_onecell_np; +static struct device_node *rcar_sysc_onecell_np __initdata = NULL; static int __init rcar_sysc_pd_init(void) { diff --git a/drivers/pmdomain/renesas/rmobile-sysc.c b/drivers/pmdomain/renesas/rmobile-sysc.c index 8eedc9a1d825..a6bf7295e909 100644 --- a/drivers/pmdomain/renesas/rmobile-sysc.c +++ b/drivers/pmdomain/renesas/rmobile-sysc.c @@ -100,7 +100,8 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) struct generic_pm_domain *genpd = &rmobile_pd->genpd; struct dev_power_governor *gov = rmobile_pd->gov; - genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP | + GENPD_FLAG_NO_STAY_ON; genpd->attach_dev = cpg_mstp_attach_dev; genpd->detach_dev = cpg_mstp_detach_dev; diff --git a/drivers/pmdomain/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c index 242570c505fb..1955c6d453e4 100644 --- a/drivers/pmdomain/rockchip/pm-domains.c +++ b/drivers/pmdomain/rockchip/pm-domains.c @@ -865,7 +865,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, pd->genpd.power_on = rockchip_pd_power_on; pd->genpd.attach_dev = rockchip_pd_attach_dev; pd->genpd.detach_dev = rockchip_pd_detach_dev; - pd->genpd.flags = GENPD_FLAG_PM_CLK; + pd->genpd.flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_NO_STAY_ON; if (pd_info->active_wakeup) pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP; pm_genpd_init(&pd->genpd, NULL, diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 93dcebbe1141..ad2d9ecf32a5 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1919,8 +1919,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); - if ((cache.flags & 0xff) == 0xff) - cache.flags = -1; /* read error */ + if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff) + cache.flags = -ENODEV; /* bq27000 hdq read error */ if (cache.flags >= 0) { cache.capacity = bq27xxx_battery_read_soc(di); diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 1e50675772fe..cc88aaa106da 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -243,7 +243,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, &hwq->sqe_dma_addr, GFP_KERNEL); - if (!hwq->sqe_dma_addr) { + if (!hwq->sqe_base_addr) { dev_err(hba->dev, "SQE allocation failed\n"); return -ENOMEM; } @@ -252,7 +252,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, &hwq->cqe_dma_addr, GFP_KERNEL); - if (!hwq->cqe_dma_addr) { + if (!hwq->cqe_base_addr) { dev_err(hba->dev, "CQE allocation failed\n"); return -ENOMEM; } diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index f9475c14f733..08cfcd81c6b4 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -236,10 +236,10 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable, int fg, int bg) { struct fb_cursor cursor; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = DIV_ROUND_UP(vc->vc_font.width, 8), c; - int y = real_y(ops->p, vc->state.y); + int y = real_y(par->p, vc->state.y); int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1; char *src; @@ -253,10 +253,10 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable, attribute = get_attribute(info, c); src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); - if (ops->cursor_state.image.data != src || - ops->cursor_reset) { - ops->cursor_state.image.data = src; - cursor.set |= FB_CUR_SETIMAGE; + if (par->cursor_state.image.data != src || + par->cursor_reset) { + par->cursor_state.image.data = src; + cursor.set |= FB_CUR_SETIMAGE; } if (attribute) { @@ -265,46 +265,46 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable, dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC); if (!dst) return; - kfree(ops->cursor_data); - ops->cursor_data = dst; + kfree(par->cursor_data); + par->cursor_data = dst; update_attr(dst, src, attribute, vc); src = dst; } - if (ops->cursor_state.image.fg_color != fg || - ops->cursor_state.image.bg_color != bg || - ops->cursor_reset) { - ops->cursor_state.image.fg_color = fg; - ops->cursor_state.image.bg_color = bg; + if (par->cursor_state.image.fg_color != fg || + par->cursor_state.image.bg_color != bg || + par->cursor_reset) { + par->cursor_state.image.fg_color = fg; + par->cursor_state.image.bg_color = bg; cursor.set |= FB_CUR_SETCMAP; } - if ((ops->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) || - (ops->cursor_state.image.dy != (vc->vc_font.height * y)) || - ops->cursor_reset) { - ops->cursor_state.image.dx = vc->vc_font.width * vc->state.x; - ops->cursor_state.image.dy = vc->vc_font.height * y; + if ((par->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) || + (par->cursor_state.image.dy != (vc->vc_font.height * y)) || + par->cursor_reset) { + par->cursor_state.image.dx = vc->vc_font.width * vc->state.x; + par->cursor_state.image.dy = vc->vc_font.height * y; cursor.set |= FB_CUR_SETPOS; } - if (ops->cursor_state.image.height != vc->vc_font.height || - ops->cursor_state.image.width != vc->vc_font.width || - ops->cursor_reset) { - ops->cursor_state.image.height = vc->vc_font.height; - ops->cursor_state.image.width = vc->vc_font.width; + if (par->cursor_state.image.height != vc->vc_font.height || + par->cursor_state.image.width != vc->vc_font.width || + par->cursor_reset) { + par->cursor_state.image.height = vc->vc_font.height; + par->cursor_state.image.width = vc->vc_font.width; cursor.set |= FB_CUR_SETSIZE; } - if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || - ops->cursor_reset) { - ops->cursor_state.hot.x = cursor.hot.y = 0; + if (par->cursor_state.hot.x || par->cursor_state.hot.y || + par->cursor_reset) { + par->cursor_state.hot.x = cursor.hot.y = 0; cursor.set |= FB_CUR_SETHOT; } if (cursor.set & FB_CUR_SETSIZE || - vc->vc_cursor_type != ops->p->cursor_shape || - ops->cursor_state.mask == NULL || - ops->cursor_reset) { + vc->vc_cursor_type != par->p->cursor_shape || + par->cursor_state.mask == NULL || + par->cursor_reset) { char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC); int cur_height, size, i = 0; u8 msk = 0xff; @@ -312,13 +312,13 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable, if (!mask) return; - kfree(ops->cursor_state.mask); - ops->cursor_state.mask = mask; + kfree(par->cursor_state.mask); + par->cursor_state.mask = mask; - ops->p->cursor_shape = vc->vc_cursor_type; + par->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (CUR_SIZE(ops->p->cursor_shape)) { + switch (CUR_SIZE(par->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; @@ -347,19 +347,19 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable, mask[i++] = msk; } - ops->cursor_state.enable = enable && !use_sw; + par->cursor_state.enable = enable && !use_sw; cursor.image.data = src; - cursor.image.fg_color = ops->cursor_state.image.fg_color; - cursor.image.bg_color = ops->cursor_state.image.bg_color; - cursor.image.dx = ops->cursor_state.image.dx; - cursor.image.dy = ops->cursor_state.image.dy; - cursor.image.height = ops->cursor_state.image.height; - cursor.image.width = ops->cursor_state.image.width; - cursor.hot.x = ops->cursor_state.hot.x; - cursor.hot.y = ops->cursor_state.hot.y; - cursor.mask = ops->cursor_state.mask; - cursor.enable = ops->cursor_state.enable; + cursor.image.fg_color = par->cursor_state.image.fg_color; + cursor.image.bg_color = par->cursor_state.image.bg_color; + cursor.image.dx = par->cursor_state.image.dx; + cursor.image.dy = par->cursor_state.image.dy; + cursor.image.height = par->cursor_state.image.height; + cursor.image.width = par->cursor_state.image.width; + cursor.hot.x = par->cursor_state.hot.x; + cursor.hot.y = par->cursor_state.hot.y; + cursor.mask = par->cursor_state.mask; + cursor.enable = par->cursor_state.enable; cursor.image.depth = 1; cursor.rop = ROP_XOR; @@ -369,31 +369,31 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable, if (err) soft_cursor(info, &cursor); - ops->cursor_reset = 0; + par->cursor_reset = 0; } static int bit_update_start(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int err; - err = fb_pan_display(info, &ops->var); - ops->var.xoffset = info->var.xoffset; - ops->var.yoffset = info->var.yoffset; - ops->var.vmode = info->var.vmode; + err = fb_pan_display(info, &par->var); + par->var.xoffset = info->var.xoffset; + par->var.yoffset = info->var.yoffset; + par->var.vmode = info->var.vmode; return err; } -void fbcon_set_bitops(struct fbcon_ops *ops) +static const struct fbcon_bitops bit_fbcon_bitops = { + .bmove = bit_bmove, + .clear = bit_clear, + .putcs = bit_putcs, + .clear_margins = bit_clear_margins, + .cursor = bit_cursor, + .update_start = bit_update_start, +}; + +void fbcon_set_bitops_ur(struct fbcon_par *par) { - ops->bmove = bit_bmove; - ops->clear = bit_clear; - ops->putcs = bit_putcs; - ops->clear_margins = bit_clear_margins; - ops->cursor = bit_cursor; - ops->update_start = bit_update_start; - ops->rotate_font = NULL; - - if (ops->rotate) - fbcon_set_rotate(ops); + par->bitops = &bit_fbcon_bitops; } diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 55f5731e94c3..c1c0cdd7597c 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -81,6 +81,7 @@ #include <asm/irq.h> #include "fbcon.h" +#include "fbcon_rotate.h" #include "fb_internal.h" /* @@ -198,27 +199,27 @@ static struct device *fbcon_device; #ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION static inline void fbcon_set_rotation(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; if (!(info->flags & FBINFO_MISC_TILEBLITTING) && - ops->p->con_rotate < 4) - ops->rotate = ops->p->con_rotate; + par->p->con_rotate < 4) + par->rotate = par->p->con_rotate; else - ops->rotate = 0; + par->rotate = 0; } static void fbcon_rotate(struct fb_info *info, u32 rotate) { - struct fbcon_ops *ops= info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fb_info *fb_info; - if (!ops || ops->currcon == -1) + if (!par || par->currcon == -1) return; - fb_info = fbcon_info_from_console(ops->currcon); + fb_info = fbcon_info_from_console(par->currcon); if (info == fb_info) { - struct fbcon_display *p = &fb_display[ops->currcon]; + struct fbcon_display *p = &fb_display[par->currcon]; if (rotate < 4) p->con_rotate = rotate; @@ -231,12 +232,12 @@ static void fbcon_rotate(struct fb_info *info, u32 rotate) static void fbcon_rotate_all(struct fb_info *info, u32 rotate) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct vc_data *vc; struct fbcon_display *p; int i; - if (!ops || ops->currcon < 0 || rotate > 3) + if (!par || par->currcon < 0 || rotate > 3) return; for (i = first_fb_vc; i <= last_fb_vc; i++) { @@ -254,9 +255,9 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate) #else static inline void fbcon_set_rotation(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; - ops->rotate = FB_ROTATE_UR; + par->rotate = FB_ROTATE_UR; } static void fbcon_rotate(struct fb_info *info, u32 rotate) @@ -270,11 +271,31 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate) } #endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ +static void fbcon_set_bitops(struct fbcon_par *par) +{ + switch (par->rotate) { + default: + fallthrough; + case FB_ROTATE_UR: + fbcon_set_bitops_ur(par); + break; + case FB_ROTATE_CW: + fbcon_set_bitops_cw(par); + break; + case FB_ROTATE_UD: + fbcon_set_bitops_ud(par); + break; + case FB_ROTATE_CCW: + fbcon_set_bitops_ccw(par); + break; + } +} + static int fbcon_get_rotate(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; - return (ops) ? ops->rotate : 0; + return (par) ? par->rotate : 0; } static bool fbcon_skip_panic(struct fb_info *info) @@ -291,10 +312,10 @@ static bool fbcon_skip_panic(struct fb_info *info) static inline bool fbcon_is_active(struct vc_data *vc, struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; return info->state == FBINFO_STATE_RUNNING && - vc->vc_mode == KD_TEXT && !ops->graphics && !fbcon_skip_panic(info); + vc->vc_mode == KD_TEXT && !par->graphics && !fbcon_skip_panic(info); } static int get_color(struct vc_data *vc, struct fb_info *info, @@ -376,7 +397,7 @@ static int get_bg_color(struct vc_data *vc, struct fb_info *info, u16 c) static void fb_flashcursor(struct work_struct *work) { - struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work); + struct fbcon_par *par = container_of(work, struct fbcon_par, cursor_work.work); struct fb_info *info; struct vc_data *vc = NULL; int c; @@ -391,10 +412,10 @@ static void fb_flashcursor(struct work_struct *work) return; /* protected by console_lock */ - info = ops->info; + info = par->info; - if (ops->currcon != -1) - vc = vc_cons[ops->currcon].d; + if (par->currcon != -1) + vc = vc_cons[par->currcon].d; if (!vc || !con_is_visible(vc) || fbcon_info_from_console(vc->vc_num) != info || @@ -404,30 +425,30 @@ static void fb_flashcursor(struct work_struct *work) } c = scr_readw((u16 *) vc->vc_pos); - enable = ops->cursor_flash && !ops->cursor_state.enable; - ops->cursor(vc, info, enable, - get_fg_color(vc, info, c), - get_bg_color(vc, info, c)); + enable = par->cursor_flash && !par->cursor_state.enable; + par->bitops->cursor(vc, info, enable, + get_fg_color(vc, info, c), + get_bg_color(vc, info, c)); console_unlock(); - queue_delayed_work(system_power_efficient_wq, &ops->cursor_work, - ops->cur_blink_jiffies); + queue_delayed_work(system_power_efficient_wq, &par->cursor_work, + par->cur_blink_jiffies); } static void fbcon_add_cursor_work(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; if (fbcon_cursor_blink) - queue_delayed_work(system_power_efficient_wq, &ops->cursor_work, - ops->cur_blink_jiffies); + queue_delayed_work(system_power_efficient_wq, &par->cursor_work, + par->cur_blink_jiffies); } static void fbcon_del_cursor_work(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; - cancel_delayed_work_sync(&ops->cursor_work); + cancel_delayed_work_sync(&par->cursor_work); } #ifndef MODULE @@ -587,7 +608,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, int cols, int rows, int new_cols, int new_rows) { /* Need to make room for the logo */ - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int cnt, erase = vc->vc_video_erase_char, step; unsigned short *save = NULL, *r, *q; int logo_height; @@ -603,7 +624,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, */ if (fb_get_color_depth(&info->var, &info->fix) == 1) erase &= ~0x400; - logo_height = fb_prepare_logo(info, ops->rotate); + logo_height = fb_prepare_logo(info, par->rotate); logo_lines = DIV_ROUND_UP(logo_height, vc->vc_font.height); q = (unsigned short *) (vc->vc_origin + vc->vc_size_row * rows); @@ -675,15 +696,15 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, #ifdef CONFIG_FB_TILEBLITTING static void set_blitting_type(struct vc_data *vc, struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; - ops->p = &fb_display[vc->vc_num]; + par->p = &fb_display[vc->vc_num]; if ((info->flags & FBINFO_MISC_TILEBLITTING)) fbcon_set_tileops(vc, info); else { fbcon_set_rotation(info); - fbcon_set_bitops(ops); + fbcon_set_bitops(par); } } @@ -700,12 +721,12 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) #else static void set_blitting_type(struct vc_data *vc, struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; info->flags &= ~FBINFO_MISC_TILEBLITTING; - ops->p = &fb_display[vc->vc_num]; + par->p = &fb_display[vc->vc_num]; fbcon_set_rotation(info); - fbcon_set_bitops(ops); + fbcon_set_bitops(par); } static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) @@ -725,13 +746,13 @@ static void fbcon_release(struct fb_info *info) module_put(info->fbops->owner); if (info->fbcon_par) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; fbcon_del_cursor_work(info); - kfree(ops->cursor_state.mask); - kfree(ops->cursor_data); - kfree(ops->cursor_src); - kfree(ops->fontbuffer); + kfree(par->cursor_state.mask); + kfree(par->cursor_data); + kfree(par->cursor_src); + kfree(par->fontbuffer); kfree(info->fbcon_par); info->fbcon_par = NULL; } @@ -739,7 +760,7 @@ static void fbcon_release(struct fb_info *info) static int fbcon_open(struct fb_info *info) { - struct fbcon_ops *ops; + struct fbcon_par *par; if (!try_module_get(info->fbops->owner)) return -ENODEV; @@ -753,16 +774,16 @@ static int fbcon_open(struct fb_info *info) } unlock_fb_info(info); - ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL); - if (!ops) { + par = kzalloc(sizeof(*par), GFP_KERNEL); + if (!par) { fbcon_release(info); return -ENOMEM; } - INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor); - ops->info = info; - info->fbcon_par = ops; - ops->cur_blink_jiffies = HZ / 5; + INIT_DELAYED_WORK(&par->cursor_work, fb_flashcursor); + par->info = info; + info->fbcon_par = par; + par->cur_blink_jiffies = HZ / 5; return 0; } @@ -809,12 +830,12 @@ static void con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo, static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, int unit, int show_logo) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int ret; - ops->currcon = fg_console; + par->currcon = fg_console; - if (info->fbops->fb_set_par && !ops->initialized) { + if (info->fbops->fb_set_par && !par->initialized) { ret = info->fbops->fb_set_par(info); if (ret) @@ -823,8 +844,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, "error code %d\n", ret); } - ops->initialized = true; - ops->graphics = 0; + par->initialized = true; + par->graphics = 0; fbcon_set_disp(info, &info->var, unit); if (show_logo) { @@ -961,7 +982,7 @@ static const char *fbcon_startup(void) struct vc_data *vc = vc_cons[fg_console].d; const struct font_desc *font = NULL; struct fb_info *info = NULL; - struct fbcon_ops *ops; + struct fbcon_par *par; int rows, cols; /* @@ -981,10 +1002,10 @@ static const char *fbcon_startup(void) if (fbcon_open(info)) return NULL; - ops = info->fbcon_par; - ops->currcon = -1; - ops->graphics = 1; - ops->cur_rotate = -1; + par = info->fbcon_par; + par->currcon = -1; + par->graphics = 1; + par->cur_rotate = -1; p->con_rotate = initial_rotation; if (p->con_rotate == -1) @@ -1007,8 +1028,8 @@ static const char *fbcon_startup(void) vc->vc_font.charcount = font->charcount; } - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); @@ -1026,7 +1047,7 @@ static const char *fbcon_startup(void) static void fbcon_init(struct vc_data *vc, bool init) { struct fb_info *info; - struct fbcon_ops *ops; + struct fbcon_par *par; struct vc_data **default_mode = vc->vc_display_fg; struct vc_data *svc = *default_mode; struct fbcon_display *t, *p = &fb_display[vc->vc_num]; @@ -1100,8 +1121,8 @@ static void fbcon_init(struct vc_data *vc, bool init) if (!*vc->uni_pagedict_loc) con_copy_unimap(vc, svc); - ops = info->fbcon_par; - ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); + par = info->fbcon_par; + par->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); p->con_rotate = initial_rotation; if (p->con_rotate == -1) @@ -1113,8 +1134,8 @@ static void fbcon_init(struct vc_data *vc, bool init) cols = vc->vc_cols; rows = vc->vc_rows; - new_cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); - new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + new_cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); + new_rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); new_cols /= vc->vc_font.width; new_rows /= vc->vc_font.height; @@ -1126,7 +1147,7 @@ static void fbcon_init(struct vc_data *vc, bool init) * We need to do it in fbcon_init() to prevent screen corruption. */ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { - if (info->fbops->fb_set_par && !ops->initialized) { + if (info->fbops->fb_set_par && !par->initialized) { ret = info->fbops->fb_set_par(info); if (ret) @@ -1135,10 +1156,10 @@ static void fbcon_init(struct vc_data *vc, bool init) "error code %d\n", ret); } - ops->initialized = true; + par->initialized = true; } - ops->graphics = 0; + par->graphics = 0; #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION if ((info->flags & FBINFO_HWACCEL_COPYAREA) && @@ -1162,12 +1183,12 @@ static void fbcon_init(struct vc_data *vc, bool init) if (logo) fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); - if (ops->rotate_font && ops->rotate_font(info, vc)) { - ops->rotate = FB_ROTATE_UR; + if (par->bitops->rotate_font && par->bitops->rotate_font(info, vc)) { + par->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); } - ops->p = &fb_display[fg_console]; + par->p = &fb_display[fg_console]; } static void fbcon_free_font(struct fbcon_display *p) @@ -1205,7 +1226,7 @@ static void fbcon_deinit(struct vc_data *vc) { struct fbcon_display *p = &fb_display[vc->vc_num]; struct fb_info *info; - struct fbcon_ops *ops; + struct fbcon_par *par; int idx; fbcon_free_font(p); @@ -1219,15 +1240,15 @@ static void fbcon_deinit(struct vc_data *vc) if (!info) goto finished; - ops = info->fbcon_par; + par = info->fbcon_par; - if (!ops) + if (!par) goto finished; if (con_is_visible(vc)) fbcon_del_cursor_work(info); - ops->initialized = false; + par->initialized = false; finished: fbcon_free_font(p); @@ -1274,7 +1295,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, unsigned int height, unsigned int width) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int fg, bg; struct fbcon_display *p = &fb_display[vc->vc_num]; u_int y_break; @@ -1289,7 +1310,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, vc->vc_top = 0; /* * If the font dimensions are not an integral of the display - * dimensions then the ops->clear below won't end up clearing + * dimensions then the par->clear below won't end up clearing * the margins. Call clear_margins here in case the logo * bitmap stretched into the margin area. */ @@ -1303,11 +1324,11 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, y_break = p->vrows - p->yscroll; if (sy < y_break && sy + height - 1 >= y_break) { u_int b = y_break - sy; - ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg); - ops->clear(vc, info, real_y(p, sy + b), sx, height - b, - width, fg, bg); + par->bitops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg); + par->bitops->clear(vc, info, real_y(p, sy + b), sx, height - b, + width, fg, bg); } else - ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg); + par->bitops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg); } static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, @@ -1321,30 +1342,30 @@ static void fbcon_putcs(struct vc_data *vc, const u16 *s, unsigned int count, { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; if (fbcon_is_active(vc, info)) - ops->putcs(vc, info, s, count, real_y(p, ypos), xpos, - get_fg_color(vc, info, scr_readw(s)), - get_bg_color(vc, info, scr_readw(s))); + par->bitops->putcs(vc, info, s, count, real_y(p, ypos), xpos, + get_fg_color(vc, info, scr_readw(s)), + get_bg_color(vc, info, scr_readw(s))); } static void fbcon_clear_margins(struct vc_data *vc, int bottom_only) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; if (fbcon_is_active(vc, info)) - ops->clear_margins(vc, info, margin_color, bottom_only); + par->bitops->clear_margins(vc, info, margin_color, bottom_only); } static void fbcon_cursor(struct vc_data *vc, bool enable) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int c = scr_readw((u16 *) vc->vc_pos); - ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); + par->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); if (!fbcon_is_active(vc, info) || vc->vc_deccm != 1) return; @@ -1354,14 +1375,14 @@ static void fbcon_cursor(struct vc_data *vc, bool enable) else fbcon_add_cursor_work(info); - ops->cursor_flash = enable; + par->cursor_flash = enable; - if (!ops->cursor) + if (!par->bitops->cursor) return; - ops->cursor(vc, info, enable, - get_fg_color(vc, info, c), - get_bg_color(vc, info, c)); + par->bitops->cursor(vc, info, enable, + get_fg_color(vc, info, c), + get_bg_color(vc, info, c)); } static int scrollback_phys_max = 0; @@ -1374,7 +1395,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, struct fbcon_display *p, *t; struct vc_data **default_mode, *vc; struct vc_data *svc; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int rows, cols; unsigned long ret = 0; @@ -1407,7 +1428,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, var->yoffset = info->var.yoffset; var->xoffset = info->var.xoffset; fb_set_var(info, var); - ops->var = info->var; + par->var = info->var; vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1); vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; if (vc->vc_font.charcount == 256) { @@ -1423,8 +1444,8 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, if (!*vc->uni_pagedict_loc) con_copy_unimap(vc, svc); - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; ret = vc_resize(vc, cols, rows); @@ -1436,16 +1457,16 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, static __inline__ void ywrap_up(struct vc_data *vc, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; p->yscroll += count; if (p->yscroll >= p->vrows) /* Deal with wrap */ p->yscroll -= p->vrows; - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode |= FB_VMODE_YWRAP; - ops->update_start(info); + par->var.xoffset = 0; + par->var.yoffset = p->yscroll * vc->vc_font.height; + par->var.vmode |= FB_VMODE_YWRAP; + par->bitops->update_start(info); scrollback_max += count; if (scrollback_max > scrollback_phys_max) scrollback_max = scrollback_phys_max; @@ -1455,16 +1476,16 @@ static __inline__ void ywrap_up(struct vc_data *vc, int count) static __inline__ void ywrap_down(struct vc_data *vc, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; p->yscroll -= count; if (p->yscroll < 0) /* Deal with wrap */ p->yscroll += p->vrows; - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode |= FB_VMODE_YWRAP; - ops->update_start(info); + par->var.xoffset = 0; + par->var.yoffset = p->yscroll * vc->vc_font.height; + par->var.vmode |= FB_VMODE_YWRAP; + par->bitops->update_start(info); scrollback_max -= count; if (scrollback_max < 0) scrollback_max = 0; @@ -1475,19 +1496,19 @@ static __inline__ void ypan_up(struct vc_data *vc, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; p->yscroll += count; if (p->yscroll > p->vrows - vc->vc_rows) { - ops->bmove(vc, info, p->vrows - vc->vc_rows, - 0, 0, 0, vc->vc_rows, vc->vc_cols); + par->bitops->bmove(vc, info, p->vrows - vc->vc_rows, + 0, 0, 0, vc->vc_rows, vc->vc_cols); p->yscroll -= p->vrows - vc->vc_rows; } - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode &= ~FB_VMODE_YWRAP; - ops->update_start(info); + par->var.xoffset = 0; + par->var.yoffset = p->yscroll * vc->vc_font.height; + par->var.vmode &= ~FB_VMODE_YWRAP; + par->bitops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max += count; if (scrollback_max > scrollback_phys_max) @@ -1498,7 +1519,7 @@ static __inline__ void ypan_up(struct vc_data *vc, int count) static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; p->yscroll += count; @@ -1508,10 +1529,10 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t); } - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode &= ~FB_VMODE_YWRAP; - ops->update_start(info); + par->var.xoffset = 0; + par->var.yoffset = p->yscroll * vc->vc_font.height; + par->var.vmode &= ~FB_VMODE_YWRAP; + par->bitops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max += count; if (scrollback_max > scrollback_phys_max) @@ -1523,19 +1544,19 @@ static __inline__ void ypan_down(struct vc_data *vc, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_display *p = &fb_display[vc->vc_num]; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; p->yscroll -= count; if (p->yscroll < 0) { - ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, - 0, vc->vc_rows, vc->vc_cols); + par->bitops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, + 0, vc->vc_rows, vc->vc_cols); p->yscroll += p->vrows - vc->vc_rows; } - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode &= ~FB_VMODE_YWRAP; - ops->update_start(info); + par->var.xoffset = 0; + par->var.yoffset = p->yscroll * vc->vc_font.height; + par->var.vmode &= ~FB_VMODE_YWRAP; + par->bitops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max -= count; if (scrollback_max < 0) @@ -1546,7 +1567,7 @@ static __inline__ void ypan_down(struct vc_data *vc, int count) static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; p->yscroll -= count; @@ -1556,10 +1577,10 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count); } - ops->var.xoffset = 0; - ops->var.yoffset = p->yscroll * vc->vc_font.height; - ops->var.vmode &= ~FB_VMODE_YWRAP; - ops->update_start(info); + par->var.xoffset = 0; + par->var.yoffset = p->yscroll * vc->vc_font.height; + par->var.vmode &= ~FB_VMODE_YWRAP; + par->bitops->update_start(info); fbcon_clear_margins(vc, 1); scrollback_max -= count; if (scrollback_max < 0) @@ -1608,7 +1629,7 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info, unsigned short *d = (unsigned short *) (vc->vc_origin + vc->vc_size_row * line); unsigned short *s = d + offset; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; while (count--) { unsigned short *start = s; @@ -1621,8 +1642,8 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info, if (c == scr_readw(d)) { if (s > start) { - ops->bmove(vc, info, line + ycount, x, - line, x, 1, s-start); + par->bitops->bmove(vc, info, line + ycount, x, + line, x, 1, s - start); x += s - start + 1; start = s + 1; } else { @@ -1637,8 +1658,8 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info, d++; } while (s < le); if (s > start) - ops->bmove(vc, info, line + ycount, x, line, x, 1, - s-start); + par->bitops->bmove(vc, info, line + ycount, x, line, x, 1, + s - start); console_conditional_schedule(); if (ycount > 0) line++; @@ -1709,7 +1730,7 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int dy, int dx, int height, int width, u_int y_break) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; u_int b; if (sy < y_break && sy + height > y_break) { @@ -1743,8 +1764,8 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, } return; } - ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, - height, width); + par->bitops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, + height, width); } static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, @@ -1971,15 +1992,13 @@ static void updatescrollmode_accel(struct fbcon_display *p, struct vc_data *vc) { #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int cap = info->flags; u16 t = 0; - int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, - info->fix.xpanstep); - int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t); - int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); - int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, - info->var.xres_virtual); + int ypan = FBCON_SWAP(par->rotate, info->fix.ypanstep, info->fix.xpanstep); + int ywrap = FBCON_SWAP(par->rotate, info->fix.ywrapstep, t); + int yres = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); + int vyres = FBCON_SWAP(par->rotate, info->var.yres_virtual, info->var.xres_virtual); int good_pan = (cap & FBINFO_HWACCEL_YPAN) && divides(ypan, vc->vc_font.height) && vyres > yres; int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) && @@ -2012,11 +2031,10 @@ static void updatescrollmode(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int fh = vc->vc_font.height; - int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); - int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, - info->var.xres_virtual); + int yres = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); + int vyres = FBCON_SWAP(par->rotate, info->var.yres_virtual, info->var.xres_virtual); p->vrows = vyres/fh; if (yres > (fh * (vc->vc_rows + 1))) @@ -2035,7 +2053,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, unsigned int height, bool from_user) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; struct fb_var_screeninfo var = info->var; int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; @@ -2058,12 +2076,10 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, return -EINVAL; } - virt_w = FBCON_SWAP(ops->rotate, width, height); - virt_h = FBCON_SWAP(ops->rotate, height, width); - virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, - vc->vc_font.height); - virt_fh = FBCON_SWAP(ops->rotate, vc->vc_font.height, - vc->vc_font.width); + virt_w = FBCON_SWAP(par->rotate, width, height); + virt_h = FBCON_SWAP(par->rotate, height, width); + virt_fw = FBCON_SWAP(par->rotate, vc->vc_font.width, vc->vc_font.height); + virt_fh = FBCON_SWAP(par->rotate, vc->vc_font.height, vc->vc_font.width); var.xres = virt_w * virt_fw; var.yres = virt_h * virt_fh; x_diff = info->var.xres - var.xres; @@ -2089,7 +2105,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, fb_set_var(info, &var); } var_to_display(p, &info->var, info); - ops->var = info->var; + par->var = info->var; } updatescrollmode(p, info, vc); return 0; @@ -2098,13 +2114,13 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, static bool fbcon_switch(struct vc_data *vc) { struct fb_info *info, *old_info = NULL; - struct fbcon_ops *ops; + struct fbcon_par *par; struct fbcon_display *p = &fb_display[vc->vc_num]; struct fb_var_screeninfo var; int i, ret, prev_console; info = fbcon_info_from_console(vc->vc_num); - ops = info->fbcon_par; + par = info->fbcon_par; if (logo_shown >= 0) { struct vc_data *conp2 = vc_cons[logo_shown].d; @@ -2115,7 +2131,7 @@ static bool fbcon_switch(struct vc_data *vc) logo_shown = FBCON_LOGO_CANSHOW; } - prev_console = ops->currcon; + prev_console = par->currcon; if (prev_console != -1) old_info = fbcon_info_from_console(prev_console); /* @@ -2128,9 +2144,9 @@ static bool fbcon_switch(struct vc_data *vc) */ fbcon_for_each_registered_fb(i) { if (fbcon_registered_fb[i]->fbcon_par) { - struct fbcon_ops *o = fbcon_registered_fb[i]->fbcon_par; + struct fbcon_par *par = fbcon_registered_fb[i]->fbcon_par; - o->currcon = vc->vc_num; + par->currcon = vc->vc_num; } } memset(&var, 0, sizeof(struct fb_var_screeninfo)); @@ -2144,7 +2160,7 @@ static bool fbcon_switch(struct vc_data *vc) info->var.activate = var.activate; var.vmode |= info->var.vmode & ~FB_VMODE_MASK; fb_set_var(info, &var); - ops->var = info->var; + par->var = info->var; if (old_info != NULL && (old_info != info || info->flags & FBINFO_MISC_ALWAYS_SETPAR)) { @@ -2161,17 +2177,16 @@ static bool fbcon_switch(struct vc_data *vc) fbcon_del_cursor_work(old_info); } - if (!fbcon_is_active(vc, info) || - ops->blank_state != FB_BLANK_UNBLANK) + if (!fbcon_is_active(vc, info) || par->blank_state != FB_BLANK_UNBLANK) fbcon_del_cursor_work(info); else fbcon_add_cursor_work(info); set_blitting_type(vc, info); - ops->cursor_reset = 1; + par->cursor_reset = 1; - if (ops->rotate_font && ops->rotate_font(info, vc)) { - ops->rotate = FB_ROTATE_UR; + if (par->bitops->rotate_font && par->bitops->rotate_font(info, vc)) { + par->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); } @@ -2202,8 +2217,8 @@ static bool fbcon_switch(struct vc_data *vc) scrollback_current = 0; if (fbcon_is_active(vc, info)) { - ops->var.xoffset = ops->var.yoffset = p->yscroll = 0; - ops->update_start(info); + par->var.xoffset = par->var.yoffset = p->yscroll = 0; + par->bitops->update_start(info); } fbcon_set_palette(vc, color_table); @@ -2212,7 +2227,7 @@ static bool fbcon_switch(struct vc_data *vc) if (logo_shown == FBCON_LOGO_DRAW) { logo_shown = fg_console; - fb_show_logo(info, ops->rotate); + fb_show_logo(info, par->rotate); update_region(vc, vc->vc_origin + vc->vc_size_row * vc->vc_top, vc->vc_size_row * (vc->vc_bottom - @@ -2241,27 +2256,27 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank, bool mode_switch) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; if (mode_switch) { struct fb_var_screeninfo var = info->var; - ops->graphics = 1; + par->graphics = 1; if (!blank) { var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE | FB_ACTIVATE_KD_TEXT; fb_set_var(info, &var); - ops->graphics = 0; - ops->var = info->var; + par->graphics = 0; + par->var = info->var; } } if (fbcon_is_active(vc, info)) { - if (ops->blank_state != blank) { - ops->blank_state = blank; + if (par->blank_state != blank) { + par->blank_state = blank; fbcon_cursor(vc, !blank); - ops->cursor_flash = (!blank); + par->cursor_flash = (!blank); if (fb_blank(info, blank)) fbcon_generic_blank(vc, info, blank); @@ -2271,8 +2286,7 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank, update_screen(vc); } - if (mode_switch || !fbcon_is_active(vc, info) || - ops->blank_state != FB_BLANK_UNBLANK) + if (mode_switch || !fbcon_is_active(vc, info) || par->blank_state != FB_BLANK_UNBLANK) fbcon_del_cursor_work(info); else fbcon_add_cursor_work(info); @@ -2283,10 +2297,10 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank, static void fbcon_debug_enter(struct vc_data *vc) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; - ops->save_graphics = ops->graphics; - ops->graphics = 0; + par->save_graphics = par->graphics; + par->graphics = 0; if (info->fbops->fb_debug_enter) info->fbops->fb_debug_enter(info); fbcon_set_palette(vc, color_table); @@ -2295,9 +2309,9 @@ static void fbcon_debug_enter(struct vc_data *vc) static void fbcon_debug_leave(struct vc_data *vc) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; - ops->graphics = ops->save_graphics; + par->graphics = par->save_graphics; if (info->fbops->fb_debug_leave) info->fbops->fb_debug_leave(info); } @@ -2432,7 +2446,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount, const u8 * data, int userfont) { struct fb_info *info = fbcon_info_from_console(vc->vc_num); - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; int resize, ret, old_userfont, old_width, old_height, old_charcount; u8 *old_data = vc->vc_font.data; @@ -2458,8 +2472,8 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount, if (resize) { int cols, rows; - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); cols /= w; rows /= h; ret = vc_resize(vc, cols, rows); @@ -2504,7 +2518,7 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font, unsigned charcount = font->charcount; int w = font->width; int h = font->height; - int size; + int size, alloc_size; int i, csum; u8 *new_data, *data = font->data; int pitch = PITCH(font->width); @@ -2531,9 +2545,16 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font, if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; - size = CALC_FONTSZ(h, pitch, charcount); + /* Check for integer overflow in font size calculation */ + if (check_mul_overflow(h, pitch, &size) || + check_mul_overflow(size, charcount, &size)) + return -EINVAL; + + /* Check for overflow in allocation size calculation */ + if (check_add_overflow(FONT_EXTRA_WORDS * sizeof(int), size, &alloc_size)) + return -EINVAL; - new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); + new_data = kmalloc(alloc_size, GFP_USER); if (!new_data) return -ENOMEM; @@ -2651,11 +2672,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt) void fbcon_suspended(struct fb_info *info) { struct vc_data *vc = NULL; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; - if (!ops || ops->currcon < 0) + if (!par || par->currcon < 0) return; - vc = vc_cons[ops->currcon].d; + vc = vc_cons[par->currcon].d; /* Clear cursor, restore saved data */ fbcon_cursor(vc, false); @@ -2664,27 +2685,27 @@ void fbcon_suspended(struct fb_info *info) void fbcon_resumed(struct fb_info *info) { struct vc_data *vc; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; - if (!ops || ops->currcon < 0) + if (!par || par->currcon < 0) return; - vc = vc_cons[ops->currcon].d; + vc = vc_cons[par->currcon].d; update_screen(vc); } static void fbcon_modechanged(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct vc_data *vc; struct fbcon_display *p; int rows, cols; - if (!ops || ops->currcon < 0) + if (!par || par->currcon < 0) return; - vc = vc_cons[ops->currcon].d; + vc = vc_cons[par->currcon].d; if (vc->vc_mode != KD_TEXT || - fbcon_info_from_console(ops->currcon) != info) + fbcon_info_from_console(par->currcon) != info) return; p = &fb_display[vc->vc_num]; @@ -2692,8 +2713,8 @@ static void fbcon_modechanged(struct fb_info *info) if (con_is_visible(vc)) { var_to_display(p, &info->var, info); - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); @@ -2702,8 +2723,8 @@ static void fbcon_modechanged(struct fb_info *info) scrollback_current = 0; if (fbcon_is_active(vc, info)) { - ops->var.xoffset = ops->var.yoffset = p->yscroll = 0; - ops->update_start(info); + par->var.xoffset = par->var.yoffset = p->yscroll = 0; + par->bitops->update_start(info); } fbcon_set_palette(vc, color_table); @@ -2713,12 +2734,12 @@ static void fbcon_modechanged(struct fb_info *info) static void fbcon_set_all_vcs(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct vc_data *vc; struct fbcon_display *p; int i, rows, cols, fg = -1; - if (!ops || ops->currcon < 0) + if (!par || par->currcon < 0) return; for (i = first_fb_vc; i <= last_fb_vc; i++) { @@ -2735,8 +2756,8 @@ static void fbcon_set_all_vcs(struct fb_info *info) p = &fb_display[vc->vc_num]; set_blitting_type(vc, info); var_to_display(p, &info->var, info); - cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); - rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + cols = FBCON_SWAP(par->rotate, info->var.xres, info->var.yres); + rows = FBCON_SWAP(par->rotate, info->var.yres, info->var.xres); cols /= vc->vc_font.width; rows /= vc->vc_font.height; vc_resize(vc, cols, rows); @@ -2759,13 +2780,13 @@ EXPORT_SYMBOL(fbcon_update_vcs); /* let fbcon check if it supports a new screen resolution */ int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct vc_data *vc; unsigned int i; WARN_CONSOLE_UNLOCKED(); - if (!ops) + if (!par) return 0; /* prevent setting a screen size which is smaller than font size */ @@ -3037,15 +3058,14 @@ int fbcon_fb_registered(struct fb_info *info) void fbcon_fb_blanked(struct fb_info *info, int blank) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct vc_data *vc; - if (!ops || ops->currcon < 0) + if (!par || par->currcon < 0) return; - vc = vc_cons[ops->currcon].d; - if (vc->vc_mode != KD_TEXT || - fbcon_info_from_console(ops->currcon) != info) + vc = vc_cons[par->currcon].d; + if (vc->vc_mode != KD_TEXT || fbcon_info_from_console(par->currcon) != info) return; if (con_is_visible(vc)) { @@ -3054,7 +3074,7 @@ void fbcon_fb_blanked(struct fb_info *info, int blank) else do_unblank_screen(0); } - ops->blank_state = blank; + par->blank_state = blank; } void fbcon_new_modelist(struct fb_info *info) @@ -3244,7 +3264,7 @@ static ssize_t cursor_blink_show(struct device *device, struct device_attribute *attr, char *buf) { struct fb_info *info; - struct fbcon_ops *ops; + struct fbcon_par *par; int idx, blink = -1; console_lock(); @@ -3254,12 +3274,12 @@ static ssize_t cursor_blink_show(struct device *device, goto err; info = fbcon_registered_fb[idx]; - ops = info->fbcon_par; + par = info->fbcon_par; - if (!ops) + if (!par) goto err; - blink = delayed_work_pending(&ops->cursor_work); + blink = delayed_work_pending(&par->cursor_work); err: console_unlock(); return sysfs_emit(buf, "%d\n", blink); diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 4d97e6d8a16a..44ea4ae4bba0 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -51,7 +51,7 @@ struct fbcon_display { const struct fb_videomode *mode; }; -struct fbcon_ops { +struct fbcon_bitops { void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width); void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, @@ -65,6 +65,9 @@ struct fbcon_ops { bool enable, int fg, int bg); int (*update_start)(struct fb_info *info); int (*rotate_font)(struct fb_info *info, struct vc_data *vc); +}; + +struct fbcon_par { struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ struct delayed_work cursor_work; /* Cursor timer */ struct fb_cursor cursor_state; @@ -86,7 +89,10 @@ struct fbcon_ops { u8 *cursor_src; u32 cursor_size; u32 fd_size; + + const struct fbcon_bitops *bitops; }; + /* * Attribute Decoding */ @@ -106,7 +112,6 @@ struct fbcon_ops { ((s) & 0x400) #define attr_blink(s) \ ((s) & 0x8000) - static inline int mono_col(const struct fb_info *info) { @@ -186,7 +191,7 @@ static inline u_short fb_scrollmode(struct fbcon_display *fb) #ifdef CONFIG_FB_TILEBLITTING extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); #endif -extern void fbcon_set_bitops(struct fbcon_ops *ops); +extern void fbcon_set_bitops_ur(struct fbcon_par *par); extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor); #define FBCON_ATTRIBUTE_UNDERLINE 1 @@ -224,10 +229,4 @@ static inline int get_attribute(struct fb_info *info, u16 c) (void) (&_r == &_v); \ (i == FB_ROTATE_UR || i == FB_ROTATE_UD) ? _r : _v; }) -#ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION -extern void fbcon_set_rotate(struct fbcon_ops *ops); -#else -#define fbcon_set_rotate(x) do {} while(0) -#endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ - #endif /* _VIDEO_FBCON_H */ diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 89ef4ba7e867..2f394b5a17f7 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -63,9 +63,9 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute, static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); area.sx = sy * vc->vc_font.height; area.sy = vyres - ((sx + width) * vc->vc_font.width); @@ -80,9 +80,9 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width, int fg, int bg) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fb_fillrect region; - u32 vyres = GETVYRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); region.color = bg; region.dx = sy * vc->vc_font.height; @@ -99,13 +99,13 @@ static inline void ccw_putcs_aligned(struct vc_data *vc, struct fb_info *info, u32 d_pitch, u32 s_pitch, u32 cellsize, struct fb_image *image, u8 *buf, u8 *dst) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; u32 idx = (vc->vc_font.height + 7) >> 3; u8 *src; while (cnt--) { - src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize; + src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize; if (attr) { ccw_update_attr(buf, src, attr, vc); @@ -130,7 +130,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, int fg, int bg) { struct fb_image image; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; u32 width = (vc->vc_font.height + 7)/8; u32 cellsize = width * vc->vc_font.width; u32 maxcnt = info->pixmap.size/cellsize; @@ -139,9 +139,9 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); - if (!ops->fontbuffer) + if (!par->fontbuffer) return; image.fg_color = fg; @@ -221,28 +221,28 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, int fg, int bg) { struct fb_cursor cursor; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.height + 7) >> 3, c; - int y = real_y(ops->p, vc->state.y); + int y = real_y(par->p, vc->state.y); int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); - if (!ops->fontbuffer) + if (!par->fontbuffer) return; cursor.set = 0; c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); - src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); + src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); - if (ops->cursor_state.image.data != src || - ops->cursor_reset) { - ops->cursor_state.image.data = src; - cursor.set |= FB_CUR_SETIMAGE; + if (par->cursor_state.image.data != src || + par->cursor_reset) { + par->cursor_state.image.data = src; + cursor.set |= FB_CUR_SETIMAGE; } if (attribute) { @@ -251,49 +251,49 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC); if (!dst) return; - kfree(ops->cursor_data); - ops->cursor_data = dst; + kfree(par->cursor_data); + par->cursor_data = dst; ccw_update_attr(dst, src, attribute, vc); src = dst; } - if (ops->cursor_state.image.fg_color != fg || - ops->cursor_state.image.bg_color != bg || - ops->cursor_reset) { - ops->cursor_state.image.fg_color = fg; - ops->cursor_state.image.bg_color = bg; + if (par->cursor_state.image.fg_color != fg || + par->cursor_state.image.bg_color != bg || + par->cursor_reset) { + par->cursor_state.image.fg_color = fg; + par->cursor_state.image.bg_color = bg; cursor.set |= FB_CUR_SETCMAP; } - if (ops->cursor_state.image.height != vc->vc_font.width || - ops->cursor_state.image.width != vc->vc_font.height || - ops->cursor_reset) { - ops->cursor_state.image.height = vc->vc_font.width; - ops->cursor_state.image.width = vc->vc_font.height; + if (par->cursor_state.image.height != vc->vc_font.width || + par->cursor_state.image.width != vc->vc_font.height || + par->cursor_reset) { + par->cursor_state.image.height = vc->vc_font.width; + par->cursor_state.image.width = vc->vc_font.height; cursor.set |= FB_CUR_SETSIZE; } dx = y * vc->vc_font.height; dy = vyres - ((vc->state.x + 1) * vc->vc_font.width); - if (ops->cursor_state.image.dx != dx || - ops->cursor_state.image.dy != dy || - ops->cursor_reset) { - ops->cursor_state.image.dx = dx; - ops->cursor_state.image.dy = dy; + if (par->cursor_state.image.dx != dx || + par->cursor_state.image.dy != dy || + par->cursor_reset) { + par->cursor_state.image.dx = dx; + par->cursor_state.image.dy = dy; cursor.set |= FB_CUR_SETPOS; } - if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || - ops->cursor_reset) { - ops->cursor_state.hot.x = cursor.hot.y = 0; + if (par->cursor_state.hot.x || par->cursor_state.hot.y || + par->cursor_reset) { + par->cursor_state.hot.x = cursor.hot.y = 0; cursor.set |= FB_CUR_SETHOT; } if (cursor.set & FB_CUR_SETSIZE || - vc->vc_cursor_type != ops->p->cursor_shape || - ops->cursor_state.mask == NULL || - ops->cursor_reset) { + vc->vc_cursor_type != par->p->cursor_shape || + par->cursor_state.mask == NULL || + par->cursor_reset) { char *tmp, *mask = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC); int cur_height, size, i = 0; @@ -309,13 +309,13 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, return; } - kfree(ops->cursor_state.mask); - ops->cursor_state.mask = mask; + kfree(par->cursor_state.mask); + par->cursor_state.mask = mask; - ops->p->cursor_shape = vc->vc_cursor_type; + par->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (CUR_SIZE(ops->p->cursor_shape)) { + switch (CUR_SIZE(par->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; @@ -348,19 +348,19 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, kfree(tmp); } - ops->cursor_state.enable = enable && !use_sw; + par->cursor_state.enable = enable && !use_sw; cursor.image.data = src; - cursor.image.fg_color = ops->cursor_state.image.fg_color; - cursor.image.bg_color = ops->cursor_state.image.bg_color; - cursor.image.dx = ops->cursor_state.image.dx; - cursor.image.dy = ops->cursor_state.image.dy; - cursor.image.height = ops->cursor_state.image.height; - cursor.image.width = ops->cursor_state.image.width; - cursor.hot.x = ops->cursor_state.hot.x; - cursor.hot.y = ops->cursor_state.hot.y; - cursor.mask = ops->cursor_state.mask; - cursor.enable = ops->cursor_state.enable; + cursor.image.fg_color = par->cursor_state.image.fg_color; + cursor.image.bg_color = par->cursor_state.image.bg_color; + cursor.image.dx = par->cursor_state.image.dx; + cursor.image.dy = par->cursor_state.image.dy; + cursor.image.height = par->cursor_state.image.height; + cursor.image.width = par->cursor_state.image.width; + cursor.hot.x = par->cursor_state.hot.x; + cursor.hot.y = par->cursor_state.hot.y; + cursor.mask = par->cursor_state.mask; + cursor.enable = par->cursor_state.enable; cursor.image.depth = 1; cursor.rop = ROP_XOR; @@ -370,32 +370,37 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, if (err) soft_cursor(info, &cursor); - ops->cursor_reset = 0; + par->cursor_reset = 0; } static int ccw_update_start(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; u32 yoffset; - u32 vyres = GETVYRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); int err; - yoffset = (vyres - info->var.yres) - ops->var.xoffset; - ops->var.xoffset = ops->var.yoffset; - ops->var.yoffset = yoffset; - err = fb_pan_display(info, &ops->var); - ops->var.xoffset = info->var.xoffset; - ops->var.yoffset = info->var.yoffset; - ops->var.vmode = info->var.vmode; + yoffset = (vyres - info->var.yres) - par->var.xoffset; + par->var.xoffset = par->var.yoffset; + par->var.yoffset = yoffset; + err = fb_pan_display(info, &par->var); + par->var.xoffset = info->var.xoffset; + par->var.yoffset = info->var.yoffset; + par->var.vmode = info->var.vmode; return err; } -void fbcon_rotate_ccw(struct fbcon_ops *ops) +static const struct fbcon_bitops ccw_fbcon_bitops = { + .bmove = ccw_bmove, + .clear = ccw_clear, + .putcs = ccw_putcs, + .clear_margins = ccw_clear_margins, + .cursor = ccw_cursor, + .update_start = ccw_update_start, + .rotate_font = fbcon_rotate_font, +}; + +void fbcon_set_bitops_ccw(struct fbcon_par *par) { - ops->bmove = ccw_bmove; - ops->clear = ccw_clear; - ops->putcs = ccw_putcs; - ops->clear_margins = ccw_clear_margins; - ops->cursor = ccw_cursor; - ops->update_start = ccw_update_start; + par->bitops = &ccw_fbcon_bitops; } diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index b9dac7940fb7..3c3ad3471ec4 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -48,9 +48,9 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute, static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fb_copyarea area; - u32 vxres = GETVXRES(ops->p, info); + u32 vxres = GETVXRES(par->p, info); area.sx = vxres - ((sy + height) * vc->vc_font.height); area.sy = sx * vc->vc_font.width; @@ -65,9 +65,9 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width, int fg, int bg) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fb_fillrect region; - u32 vxres = GETVXRES(ops->p, info); + u32 vxres = GETVXRES(par->p, info); region.color = bg; region.dx = vxres - ((sy + height) * vc->vc_font.height); @@ -84,13 +84,13 @@ static inline void cw_putcs_aligned(struct vc_data *vc, struct fb_info *info, u32 d_pitch, u32 s_pitch, u32 cellsize, struct fb_image *image, u8 *buf, u8 *dst) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; u32 idx = (vc->vc_font.height + 7) >> 3; u8 *src; while (cnt--) { - src = ops->fontbuffer + (scr_readw(s++) & charmask)*cellsize; + src = par->fontbuffer + (scr_readw(s++) & charmask) * cellsize; if (attr) { cw_update_attr(buf, src, attr, vc); @@ -115,7 +115,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, int fg, int bg) { struct fb_image image; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; u32 width = (vc->vc_font.height + 7)/8; u32 cellsize = width * vc->vc_font.width; u32 maxcnt = info->pixmap.size/cellsize; @@ -124,9 +124,9 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vxres = GETVXRES(ops->p, info); + u32 vxres = GETVXRES(par->p, info); - if (!ops->fontbuffer) + if (!par->fontbuffer) return; image.fg_color = fg; @@ -204,28 +204,28 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, int fg, int bg) { struct fb_cursor cursor; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.height + 7) >> 3, c; - int y = real_y(ops->p, vc->state.y); + int y = real_y(par->p, vc->state.y); int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vxres = GETVXRES(ops->p, info); + u32 vxres = GETVXRES(par->p, info); - if (!ops->fontbuffer) + if (!par->fontbuffer) return; cursor.set = 0; c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); - src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); + src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); - if (ops->cursor_state.image.data != src || - ops->cursor_reset) { - ops->cursor_state.image.data = src; - cursor.set |= FB_CUR_SETIMAGE; + if (par->cursor_state.image.data != src || + par->cursor_reset) { + par->cursor_state.image.data = src; + cursor.set |= FB_CUR_SETIMAGE; } if (attribute) { @@ -234,49 +234,49 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC); if (!dst) return; - kfree(ops->cursor_data); - ops->cursor_data = dst; + kfree(par->cursor_data); + par->cursor_data = dst; cw_update_attr(dst, src, attribute, vc); src = dst; } - if (ops->cursor_state.image.fg_color != fg || - ops->cursor_state.image.bg_color != bg || - ops->cursor_reset) { - ops->cursor_state.image.fg_color = fg; - ops->cursor_state.image.bg_color = bg; + if (par->cursor_state.image.fg_color != fg || + par->cursor_state.image.bg_color != bg || + par->cursor_reset) { + par->cursor_state.image.fg_color = fg; + par->cursor_state.image.bg_color = bg; cursor.set |= FB_CUR_SETCMAP; } - if (ops->cursor_state.image.height != vc->vc_font.width || - ops->cursor_state.image.width != vc->vc_font.height || - ops->cursor_reset) { - ops->cursor_state.image.height = vc->vc_font.width; - ops->cursor_state.image.width = vc->vc_font.height; + if (par->cursor_state.image.height != vc->vc_font.width || + par->cursor_state.image.width != vc->vc_font.height || + par->cursor_reset) { + par->cursor_state.image.height = vc->vc_font.width; + par->cursor_state.image.width = vc->vc_font.height; cursor.set |= FB_CUR_SETSIZE; } dx = vxres - ((y * vc->vc_font.height) + vc->vc_font.height); dy = vc->state.x * vc->vc_font.width; - if (ops->cursor_state.image.dx != dx || - ops->cursor_state.image.dy != dy || - ops->cursor_reset) { - ops->cursor_state.image.dx = dx; - ops->cursor_state.image.dy = dy; + if (par->cursor_state.image.dx != dx || + par->cursor_state.image.dy != dy || + par->cursor_reset) { + par->cursor_state.image.dx = dx; + par->cursor_state.image.dy = dy; cursor.set |= FB_CUR_SETPOS; } - if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || - ops->cursor_reset) { - ops->cursor_state.hot.x = cursor.hot.y = 0; + if (par->cursor_state.hot.x || par->cursor_state.hot.y || + par->cursor_reset) { + par->cursor_state.hot.x = cursor.hot.y = 0; cursor.set |= FB_CUR_SETHOT; } if (cursor.set & FB_CUR_SETSIZE || - vc->vc_cursor_type != ops->p->cursor_shape || - ops->cursor_state.mask == NULL || - ops->cursor_reset) { + vc->vc_cursor_type != par->p->cursor_shape || + par->cursor_state.mask == NULL || + par->cursor_reset) { char *tmp, *mask = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC); int cur_height, size, i = 0; @@ -292,13 +292,13 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, return; } - kfree(ops->cursor_state.mask); - ops->cursor_state.mask = mask; + kfree(par->cursor_state.mask); + par->cursor_state.mask = mask; - ops->p->cursor_shape = vc->vc_cursor_type; + par->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (CUR_SIZE(ops->p->cursor_shape)) { + switch (CUR_SIZE(par->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; @@ -331,19 +331,19 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, kfree(tmp); } - ops->cursor_state.enable = enable && !use_sw; + par->cursor_state.enable = enable && !use_sw; cursor.image.data = src; - cursor.image.fg_color = ops->cursor_state.image.fg_color; - cursor.image.bg_color = ops->cursor_state.image.bg_color; - cursor.image.dx = ops->cursor_state.image.dx; - cursor.image.dy = ops->cursor_state.image.dy; - cursor.image.height = ops->cursor_state.image.height; - cursor.image.width = ops->cursor_state.image.width; - cursor.hot.x = ops->cursor_state.hot.x; - cursor.hot.y = ops->cursor_state.hot.y; - cursor.mask = ops->cursor_state.mask; - cursor.enable = ops->cursor_state.enable; + cursor.image.fg_color = par->cursor_state.image.fg_color; + cursor.image.bg_color = par->cursor_state.image.bg_color; + cursor.image.dx = par->cursor_state.image.dx; + cursor.image.dy = par->cursor_state.image.dy; + cursor.image.height = par->cursor_state.image.height; + cursor.image.width = par->cursor_state.image.width; + cursor.hot.x = par->cursor_state.hot.x; + cursor.hot.y = par->cursor_state.hot.y; + cursor.mask = par->cursor_state.mask; + cursor.enable = par->cursor_state.enable; cursor.image.depth = 1; cursor.rop = ROP_XOR; @@ -353,32 +353,37 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable, if (err) soft_cursor(info, &cursor); - ops->cursor_reset = 0; + par->cursor_reset = 0; } static int cw_update_start(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; - u32 vxres = GETVXRES(ops->p, info); + struct fbcon_par *par = info->fbcon_par; + u32 vxres = GETVXRES(par->p, info); u32 xoffset; int err; - xoffset = vxres - (info->var.xres + ops->var.yoffset); - ops->var.yoffset = ops->var.xoffset; - ops->var.xoffset = xoffset; - err = fb_pan_display(info, &ops->var); - ops->var.xoffset = info->var.xoffset; - ops->var.yoffset = info->var.yoffset; - ops->var.vmode = info->var.vmode; + xoffset = vxres - (info->var.xres + par->var.yoffset); + par->var.yoffset = par->var.xoffset; + par->var.xoffset = xoffset; + err = fb_pan_display(info, &par->var); + par->var.xoffset = info->var.xoffset; + par->var.yoffset = info->var.yoffset; + par->var.vmode = info->var.vmode; return err; } -void fbcon_rotate_cw(struct fbcon_ops *ops) +static const struct fbcon_bitops cw_fbcon_bitops = { + .bmove = cw_bmove, + .clear = cw_clear, + .putcs = cw_putcs, + .clear_margins = cw_clear_margins, + .cursor = cw_cursor, + .update_start = cw_update_start, + .rotate_font = fbcon_rotate_font, +}; + +void fbcon_set_bitops_cw(struct fbcon_par *par) { - ops->bmove = cw_bmove; - ops->clear = cw_clear; - ops->putcs = cw_putcs; - ops->clear_margins = cw_clear_margins; - ops->cursor = cw_cursor; - ops->update_start = cw_update_start; + par->bitops = &cw_fbcon_bitops; } diff --git a/drivers/video/fbdev/core/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c index ec3c883400f7..1562a8f20b4f 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.c +++ b/drivers/video/fbdev/core/fbcon_rotate.c @@ -18,34 +18,34 @@ #include "fbcon.h" #include "fbcon_rotate.h" -static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc) +int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int len, err = 0; int s_cellsize, d_cellsize, i; const u8 *src; u8 *dst; - if (vc->vc_font.data == ops->fontdata && - ops->p->con_rotate == ops->cur_rotate) + if (vc->vc_font.data == par->fontdata && + par->p->con_rotate == par->cur_rotate) goto finished; - src = ops->fontdata = vc->vc_font.data; - ops->cur_rotate = ops->p->con_rotate; + src = par->fontdata = vc->vc_font.data; + par->cur_rotate = par->p->con_rotate; len = vc->vc_font.charcount; s_cellsize = ((vc->vc_font.width + 7)/8) * vc->vc_font.height; d_cellsize = s_cellsize; - if (ops->rotate == FB_ROTATE_CW || - ops->rotate == FB_ROTATE_CCW) + if (par->rotate == FB_ROTATE_CW || + par->rotate == FB_ROTATE_CCW) d_cellsize = ((vc->vc_font.height + 7)/8) * vc->vc_font.width; if (info->fbops->fb_sync) info->fbops->fb_sync(info); - if (ops->fd_size < d_cellsize * len) { + if (par->fd_size < d_cellsize * len) { dst = kmalloc_array(len, d_cellsize, GFP_KERNEL); if (dst == NULL) { @@ -53,15 +53,15 @@ static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc) goto finished; } - ops->fd_size = d_cellsize * len; - kfree(ops->fontbuffer); - ops->fontbuffer = dst; + par->fd_size = d_cellsize * len; + kfree(par->fontbuffer); + par->fontbuffer = dst; } - dst = ops->fontbuffer; - memset(dst, 0, ops->fd_size); + dst = par->fontbuffer; + memset(dst, 0, par->fd_size); - switch (ops->rotate) { + switch (par->rotate) { case FB_ROTATE_UD: for (i = len; i--; ) { rotate_ud(src, dst, vc->vc_font.width, @@ -92,20 +92,3 @@ static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc) finished: return err; } - -void fbcon_set_rotate(struct fbcon_ops *ops) -{ - ops->rotate_font = fbcon_rotate_font; - - switch(ops->rotate) { - case FB_ROTATE_CW: - fbcon_rotate_cw(ops); - break; - case FB_ROTATE_UD: - fbcon_rotate_ud(ops); - break; - case FB_ROTATE_CCW: - fbcon_rotate_ccw(ops); - break; - } -} diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h index 01cbe303b8a2..8cb019e8a9c0 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.h +++ b/drivers/video/fbdev/core/fbcon_rotate.h @@ -90,7 +90,19 @@ static inline void rotate_ccw(const char *in, char *out, u32 width, u32 height) } } -extern void fbcon_rotate_cw(struct fbcon_ops *ops); -extern void fbcon_rotate_ud(struct fbcon_ops *ops); -extern void fbcon_rotate_ccw(struct fbcon_ops *ops); +int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc); + +#if defined(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION) +void fbcon_set_bitops_cw(struct fbcon_par *par); +void fbcon_set_bitops_ud(struct fbcon_par *par); +void fbcon_set_bitops_ccw(struct fbcon_par *par); +#else +static inline void fbcon_set_bitops_cw(struct fbcon_par *par) +{ } +static inline void fbcon_set_bitops_ud(struct fbcon_par *par) +{ } +static inline void fbcon_set_bitops_ccw(struct fbcon_par *par) +{ } +#endif + #endif diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 0af7913a2abd..6fc30cad5b19 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -48,10 +48,10 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute, static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p, info); - u32 vxres = GETVXRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); + u32 vxres = GETVXRES(par->p, info); area.sy = vyres - ((sy + height) * vc->vc_font.height); area.sx = vxres - ((sx + width) * vc->vc_font.width); @@ -66,10 +66,10 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width, int fg, int bg) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; struct fb_fillrect region; - u32 vyres = GETVYRES(ops->p, info); - u32 vxres = GETVXRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); + u32 vxres = GETVXRES(par->p, info); region.color = bg; region.dy = vyres - ((sy + height) * vc->vc_font.height); @@ -86,13 +86,13 @@ static inline void ud_putcs_aligned(struct vc_data *vc, struct fb_info *info, u32 d_pitch, u32 s_pitch, u32 cellsize, struct fb_image *image, u8 *buf, u8 *dst) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; u32 idx = vc->vc_font.width >> 3; u8 *src; while (cnt--) { - src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize; + src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize; if (attr) { ud_update_attr(buf, src, attr, vc); @@ -119,7 +119,7 @@ static inline void ud_putcs_unaligned(struct vc_data *vc, struct fb_image *image, u8 *buf, u8 *dst) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; u32 shift_low = 0, mod = vc->vc_font.width % 8; u32 shift_high = 8; @@ -127,7 +127,7 @@ static inline void ud_putcs_unaligned(struct vc_data *vc, u8 *src; while (cnt--) { - src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize; + src = par->fontbuffer + (scr_readw(s--) & charmask) * cellsize; if (attr) { ud_update_attr(buf, src, attr, vc); @@ -152,7 +152,7 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, int fg, int bg) { struct fb_image image; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; u32 width = (vc->vc_font.width + 7)/8; u32 cellsize = width * vc->vc_font.height; u32 maxcnt = info->pixmap.size/cellsize; @@ -161,10 +161,10 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, u32 mod = vc->vc_font.width % 8, cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p, info); - u32 vxres = GETVXRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); + u32 vxres = GETVXRES(par->p, info); - if (!ops->fontbuffer) + if (!par->fontbuffer) return; image.fg_color = fg; @@ -251,29 +251,29 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable, int fg, int bg) { struct fb_cursor cursor; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.width + 7) >> 3, c; - int y = real_y(ops->p, vc->state.y); + int y = real_y(par->p, vc->state.y); int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p, info); - u32 vxres = GETVXRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); + u32 vxres = GETVXRES(par->p, info); - if (!ops->fontbuffer) + if (!par->fontbuffer) return; cursor.set = 0; c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); - src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); + src = par->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); - if (ops->cursor_state.image.data != src || - ops->cursor_reset) { - ops->cursor_state.image.data = src; - cursor.set |= FB_CUR_SETIMAGE; + if (par->cursor_state.image.data != src || + par->cursor_reset) { + par->cursor_state.image.data = src; + cursor.set |= FB_CUR_SETIMAGE; } if (attribute) { @@ -282,49 +282,49 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable, dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC); if (!dst) return; - kfree(ops->cursor_data); - ops->cursor_data = dst; + kfree(par->cursor_data); + par->cursor_data = dst; ud_update_attr(dst, src, attribute, vc); src = dst; } - if (ops->cursor_state.image.fg_color != fg || - ops->cursor_state.image.bg_color != bg || - ops->cursor_reset) { - ops->cursor_state.image.fg_color = fg; - ops->cursor_state.image.bg_color = bg; + if (par->cursor_state.image.fg_color != fg || + par->cursor_state.image.bg_color != bg || + par->cursor_reset) { + par->cursor_state.image.fg_color = fg; + par->cursor_state.image.bg_color = bg; cursor.set |= FB_CUR_SETCMAP; } - if (ops->cursor_state.image.height != vc->vc_font.height || - ops->cursor_state.image.width != vc->vc_font.width || - ops->cursor_reset) { - ops->cursor_state.image.height = vc->vc_font.height; - ops->cursor_state.image.width = vc->vc_font.width; + if (par->cursor_state.image.height != vc->vc_font.height || + par->cursor_state.image.width != vc->vc_font.width || + par->cursor_reset) { + par->cursor_state.image.height = vc->vc_font.height; + par->cursor_state.image.width = vc->vc_font.width; cursor.set |= FB_CUR_SETSIZE; } dy = vyres - ((y * vc->vc_font.height) + vc->vc_font.height); dx = vxres - ((vc->state.x * vc->vc_font.width) + vc->vc_font.width); - if (ops->cursor_state.image.dx != dx || - ops->cursor_state.image.dy != dy || - ops->cursor_reset) { - ops->cursor_state.image.dx = dx; - ops->cursor_state.image.dy = dy; + if (par->cursor_state.image.dx != dx || + par->cursor_state.image.dy != dy || + par->cursor_reset) { + par->cursor_state.image.dx = dx; + par->cursor_state.image.dy = dy; cursor.set |= FB_CUR_SETPOS; } - if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || - ops->cursor_reset) { - ops->cursor_state.hot.x = cursor.hot.y = 0; + if (par->cursor_state.hot.x || par->cursor_state.hot.y || + par->cursor_reset) { + par->cursor_state.hot.x = cursor.hot.y = 0; cursor.set |= FB_CUR_SETHOT; } if (cursor.set & FB_CUR_SETSIZE || - vc->vc_cursor_type != ops->p->cursor_shape || - ops->cursor_state.mask == NULL || - ops->cursor_reset) { + vc->vc_cursor_type != par->p->cursor_shape || + par->cursor_state.mask == NULL || + par->cursor_reset) { char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC); int cur_height, size, i = 0; u8 msk = 0xff; @@ -332,13 +332,13 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable, if (!mask) return; - kfree(ops->cursor_state.mask); - ops->cursor_state.mask = mask; + kfree(par->cursor_state.mask); + par->cursor_state.mask = mask; - ops->p->cursor_shape = vc->vc_cursor_type; + par->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (CUR_SIZE(ops->p->cursor_shape)) { + switch (CUR_SIZE(par->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; @@ -371,19 +371,19 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable, mask[i++] = ~msk; } - ops->cursor_state.enable = enable && !use_sw; + par->cursor_state.enable = enable && !use_sw; cursor.image.data = src; - cursor.image.fg_color = ops->cursor_state.image.fg_color; - cursor.image.bg_color = ops->cursor_state.image.bg_color; - cursor.image.dx = ops->cursor_state.image.dx; - cursor.image.dy = ops->cursor_state.image.dy; - cursor.image.height = ops->cursor_state.image.height; - cursor.image.width = ops->cursor_state.image.width; - cursor.hot.x = ops->cursor_state.hot.x; - cursor.hot.y = ops->cursor_state.hot.y; - cursor.mask = ops->cursor_state.mask; - cursor.enable = ops->cursor_state.enable; + cursor.image.fg_color = par->cursor_state.image.fg_color; + cursor.image.bg_color = par->cursor_state.image.bg_color; + cursor.image.dx = par->cursor_state.image.dx; + cursor.image.dy = par->cursor_state.image.dy; + cursor.image.height = par->cursor_state.image.height; + cursor.image.width = par->cursor_state.image.width; + cursor.hot.x = par->cursor_state.hot.x; + cursor.hot.y = par->cursor_state.hot.y; + cursor.mask = par->cursor_state.mask; + cursor.enable = par->cursor_state.enable; cursor.image.depth = 1; cursor.rop = ROP_XOR; @@ -393,36 +393,41 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable, if (err) soft_cursor(info, &cursor); - ops->cursor_reset = 0; + par->cursor_reset = 0; } static int ud_update_start(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int xoffset, yoffset; - u32 vyres = GETVYRES(ops->p, info); - u32 vxres = GETVXRES(ops->p, info); + u32 vyres = GETVYRES(par->p, info); + u32 vxres = GETVXRES(par->p, info); int err; - xoffset = vxres - info->var.xres - ops->var.xoffset; - yoffset = vyres - info->var.yres - ops->var.yoffset; + xoffset = vxres - info->var.xres - par->var.xoffset; + yoffset = vyres - info->var.yres - par->var.yoffset; if (yoffset < 0) yoffset += vyres; - ops->var.xoffset = xoffset; - ops->var.yoffset = yoffset; - err = fb_pan_display(info, &ops->var); - ops->var.xoffset = info->var.xoffset; - ops->var.yoffset = info->var.yoffset; - ops->var.vmode = info->var.vmode; + par->var.xoffset = xoffset; + par->var.yoffset = yoffset; + err = fb_pan_display(info, &par->var); + par->var.xoffset = info->var.xoffset; + par->var.yoffset = info->var.yoffset; + par->var.vmode = info->var.vmode; return err; } -void fbcon_rotate_ud(struct fbcon_ops *ops) +static const struct fbcon_bitops ud_fbcon_bitops = { + .bmove = ud_bmove, + .clear = ud_clear, + .putcs = ud_putcs, + .clear_margins = ud_clear_margins, + .cursor = ud_cursor, + .update_start = ud_update_start, + .rotate_font = fbcon_rotate_font, +}; + +void fbcon_set_bitops_ud(struct fbcon_par *par) { - ops->bmove = ud_bmove; - ops->clear = ud_clear; - ops->putcs = ud_putcs; - ops->clear_margins = ud_clear_margins; - ops->cursor = ud_cursor; - ops->update_start = ud_update_start; + par->bitops = &ud_fbcon_bitops; } diff --git a/drivers/video/fbdev/core/softcursor.c b/drivers/video/fbdev/core/softcursor.c index 29e5b21cf373..900788c05915 100644 --- a/drivers/video/fbdev/core/softcursor.c +++ b/drivers/video/fbdev/core/softcursor.c @@ -21,7 +21,7 @@ int soft_cursor(struct fb_info *info, struct fb_cursor *cursor) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; unsigned int scan_align = info->pixmap.scan_align - 1; unsigned int buf_align = info->pixmap.buf_align - 1; unsigned int i, size, dsize, s_pitch, d_pitch; @@ -34,19 +34,19 @@ int soft_cursor(struct fb_info *info, struct fb_cursor *cursor) s_pitch = (cursor->image.width + 7) >> 3; dsize = s_pitch * cursor->image.height; - if (dsize + sizeof(struct fb_image) != ops->cursor_size) { - kfree(ops->cursor_src); - ops->cursor_size = dsize + sizeof(struct fb_image); + if (dsize + sizeof(struct fb_image) != par->cursor_size) { + kfree(par->cursor_src); + par->cursor_size = dsize + sizeof(struct fb_image); - ops->cursor_src = kmalloc(ops->cursor_size, GFP_ATOMIC); - if (!ops->cursor_src) { - ops->cursor_size = 0; + par->cursor_src = kmalloc(par->cursor_size, GFP_ATOMIC); + if (!par->cursor_src) { + par->cursor_size = 0; return -ENOMEM; } } - src = ops->cursor_src + sizeof(struct fb_image); - image = (struct fb_image *)ops->cursor_src; + src = par->cursor_src + sizeof(struct fb_image); + image = (struct fb_image *)par->cursor_src; *image = cursor->image; d_pitch = (s_pitch + scan_align) & ~scan_align; diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index d342b90c42b7..a9db668caf72 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -151,34 +151,38 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, bool enable, static int tile_update_start(struct fb_info *info) { - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; int err; - err = fb_pan_display(info, &ops->var); - ops->var.xoffset = info->var.xoffset; - ops->var.yoffset = info->var.yoffset; - ops->var.vmode = info->var.vmode; + err = fb_pan_display(info, &par->var); + par->var.xoffset = info->var.xoffset; + par->var.yoffset = info->var.yoffset; + par->var.vmode = info->var.vmode; return err; } +static const struct fbcon_bitops tile_fbcon_bitops = { + .bmove = tile_bmove, + .clear = tile_clear, + .putcs = tile_putcs, + .clear_margins = tile_clear_margins, + .cursor = tile_cursor, + .update_start = tile_update_start, +}; + void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info) { struct fb_tilemap map; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_par *par = info->fbcon_par; - ops->bmove = tile_bmove; - ops->clear = tile_clear; - ops->putcs = tile_putcs; - ops->clear_margins = tile_clear_margins; - ops->cursor = tile_cursor; - ops->update_start = tile_update_start; + par->bitops = &tile_fbcon_bitops; - if (ops->p) { + if (par->p) { map.width = vc->vc_font.width; map.height = vc->vc_font.height; map.depth = 1; map.length = vc->vc_font.charcount; - map.data = ops->p->fontdata; + map.data = par->p->fontdata; info->tileops->fb_settile(info, &map); } } diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 9bf282d2453c..499a9edf0ca3 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1795,7 +1795,14 @@ static int reclaim_bgs_cmp(void *unused, const struct list_head *a, bg1 = list_entry(a, struct btrfs_block_group, bg_list); bg2 = list_entry(b, struct btrfs_block_group, bg_list); - return bg1->used > bg2->used; + /* + * Some other task may be updating the ->used field concurrently, but it + * is not serious if we get a stale value or load/store tearing issues, + * as sorting the list of block groups to reclaim is not critical and an + * occasional imperfect order is ok. So silence KCSAN and avoid the + * overhead of locking or any other synchronization. + */ + return data_race(bg1->used > bg2->used); } static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d09d622016ef..35e3071cec06 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1616,25 +1616,29 @@ out: } /* - * Convert the compression suffix (eg. after "zlib" starting with ":") to - * level, unrecognized string will set the default level. Negative level - * numbers are allowed. + * Convert the compression suffix (eg. after "zlib" starting with ":") to level. + * + * If the resulting level exceeds the algo's supported levels, it will be clamped. + * + * Return <0 if no valid string can be found. + * Return 0 if everything is fine. */ -int btrfs_compress_str2level(unsigned int type, const char *str) +int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret) { int level = 0; int ret; - if (!type) + if (!type) { + *level_ret = btrfs_compress_set_level(type, level); return 0; + } if (str[0] == ':') { ret = kstrtoint(str + 1, 10, &level); if (ret) - level = 0; + return ret; } - level = btrfs_compress_set_level(type, level); - - return level; + *level_ret = btrfs_compress_set_level(type, level); + return 0; } diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 1b38e707bbd9..7b41b2b5ff44 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -102,7 +102,7 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, bool writeback); void btrfs_submit_compressed_read(struct btrfs_bio *bbio); -int btrfs_compress_str2level(unsigned int type, const char *str); +int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret); struct folio *btrfs_alloc_compr_folio(void); void btrfs_free_compr_folio(struct folio *folio); diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 0f8d8e275143..c0c1ddd46b67 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1843,7 +1843,6 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, int btrfs_fill_inode(struct btrfs_inode *inode, u32 *rdev) { - struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_delayed_node *delayed_node; struct btrfs_inode_item *inode_item; struct inode *vfs_inode = &inode->vfs_inode; @@ -1864,8 +1863,6 @@ int btrfs_fill_inode(struct btrfs_inode *inode, u32 *rdev) i_uid_write(vfs_inode, btrfs_stack_inode_uid(inode_item)); i_gid_write(vfs_inode, btrfs_stack_inode_gid(inode_item)); btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); - btrfs_inode_set_file_extent_range(inode, 0, - round_up(i_size_read(vfs_inode), fs_info->sectorsize)); vfs_inode->i_mode = btrfs_stack_inode_mode(inode_item); set_nlink(vfs_inode, btrfs_stack_inode_nlink(inode_item)); inode_set_bytes(vfs_inode, btrfs_stack_inode_nbytes(inode_item)); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e7218e78bff4..18db1053cdf0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3885,10 +3885,6 @@ static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path bool filled = false; int first_xattr_slot; - ret = btrfs_init_file_extent_tree(inode); - if (ret) - goto out; - ret = btrfs_fill_inode(inode, &rdev); if (!ret) filled = true; @@ -3920,8 +3916,6 @@ static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item)); i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item)); btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); - btrfs_inode_set_file_extent_range(inode, 0, - round_up(i_size_read(vfs_inode), fs_info->sectorsize)); inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime), btrfs_timespec_nsec(leaf, &inode_item->atime)); @@ -3953,6 +3947,11 @@ static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path btrfs_set_inode_mapping_order(inode); cache_index: + ret = btrfs_init_file_extent_tree(inode); + if (ret) + goto out; + btrfs_inode_set_file_extent_range(inode, 0, + round_up(i_size_read(vfs_inode), fs_info->sectorsize)); /* * If we were modified in the current generation and evicted from memory * and then re-read we need to do a full sync since we don't have any diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index 3871c3a6c743..9f1858b42c0e 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -980,11 +980,18 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info) if (!btrfs_test_opt(fs_info, REF_VERIFY)) return 0; + extent_root = btrfs_extent_root(fs_info, 0); + /* If the extent tree is damaged we cannot ignore it (IGNOREBADROOTS). */ + if (IS_ERR(extent_root)) { + btrfs_warn(fs_info, "ref-verify: extent tree not available, disabling"); + btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); + return 0; + } + path = btrfs_alloc_path(); if (!path) return -ENOMEM; - extent_root = btrfs_extent_root(fs_info, 0); eb = btrfs_read_lock_root_node(extent_root); level = btrfs_header_level(eb); path->nodes[level] = eb; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index df1f6cc3fe21..b06b8f325537 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -276,6 +276,7 @@ static int btrfs_parse_compress(struct btrfs_fs_context *ctx, const struct fs_parameter *param, int opt) { const char *string = param->string; + int ret; /* * Provide the same semantics as older kernels that don't use fs @@ -294,15 +295,19 @@ static int btrfs_parse_compress(struct btrfs_fs_context *ctx, btrfs_clear_opt(ctx->mount_opt, NODATASUM); } else if (btrfs_match_compress_type(string, "zlib", true)) { ctx->compress_type = BTRFS_COMPRESS_ZLIB; - ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, - string + 4); + ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, string + 4, + &ctx->compress_level); + if (ret < 0) + goto error; btrfs_set_opt(ctx->mount_opt, COMPRESS); btrfs_clear_opt(ctx->mount_opt, NODATACOW); btrfs_clear_opt(ctx->mount_opt, NODATASUM); } else if (btrfs_match_compress_type(string, "lzo", true)) { ctx->compress_type = BTRFS_COMPRESS_LZO; - ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_LZO, - string + 3); + ret = btrfs_compress_str2level(BTRFS_COMPRESS_LZO, string + 3, + &ctx->compress_level); + if (ret < 0) + goto error; if (string[3] == ':' && string[4]) btrfs_warn(NULL, "Compression level ignored for LZO"); btrfs_set_opt(ctx->mount_opt, COMPRESS); @@ -310,8 +315,10 @@ static int btrfs_parse_compress(struct btrfs_fs_context *ctx, btrfs_clear_opt(ctx->mount_opt, NODATASUM); } else if (btrfs_match_compress_type(string, "zstd", true)) { ctx->compress_type = BTRFS_COMPRESS_ZSTD; - ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, - string + 4); + ret = btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, string + 4, + &ctx->compress_level); + if (ret < 0) + goto error; btrfs_set_opt(ctx->mount_opt, COMPRESS); btrfs_clear_opt(ctx->mount_opt, NODATACOW); btrfs_clear_opt(ctx->mount_opt, NODATASUM); @@ -322,10 +329,14 @@ static int btrfs_parse_compress(struct btrfs_fs_context *ctx, btrfs_clear_opt(ctx->mount_opt, COMPRESS); btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); } else { - btrfs_err(NULL, "unrecognized compression value %s", string); - return -EINVAL; + ret = -EINVAL; + goto error; } return 0; +error: + btrfs_err(NULL, "failed to parse compression option '%s'", string); + return ret; + } static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 0f556f4de3f9..a997c7cc35a2 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1756,10 +1756,10 @@ static int check_inode_ref(struct extent_buffer *leaf, while (ptr < end) { u16 namelen; - if (unlikely(ptr + sizeof(iref) > end)) { + if (unlikely(ptr + sizeof(*iref) > end)) { inode_ref_err(leaf, slot, "inode ref overflow, ptr %lu end %lu inode_ref_size %zu", - ptr, end, sizeof(iref)); + ptr, end, sizeof(*iref)); return -EUCLEAN; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 7d5d90845ca9..7a63afedd01e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1964,7 +1964,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, search_key.objectid = log_key.objectid; search_key.type = BTRFS_INODE_EXTREF_KEY; - search_key.offset = key->objectid; + search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len); ret = backref_in_log(root->log_root, &search_key, key->objectid, &name); if (ret < 0) { goto out; diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index ea662036f441..efc2a81f50e5 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -2582,9 +2582,9 @@ again: spin_lock(&space_info->lock); space_info->total_bytes -= bg->length; space_info->disk_total -= bg->length * factor; + space_info->disk_total -= bg->zone_unusable; /* There is no allocation ever happened. */ ASSERT(bg->used == 0); - ASSERT(bg->zone_unusable == 0); /* No super block in a block group on the zoned setup. */ ASSERT(bg->bytes_super == 0); spin_unlock(&space_info->lock); diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c index 14868a3dd592..bc52afbfc5c7 100644 --- a/fs/nilfs2/sysfs.c +++ b/fs/nilfs2/sysfs.c @@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs) ************************************************************************/ static ssize_t nilfs_feature_revision_show(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%d.%d\n", NILFS_CURRENT_REV, NILFS_MINOR_REV); @@ -1087,7 +1087,7 @@ static const char features_readme_str[] = "(1) revision\n\tshow current revision of NILFS file system driver.\n"; static ssize_t nilfs_feature_README_show(struct kobject *kobj, - struct attribute *attr, + struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, features_readme_str); diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h index 78a87a016928..d370cd5cce3f 100644 --- a/fs/nilfs2/sysfs.h +++ b/fs/nilfs2/sysfs.h @@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups { struct completion sg_segments_kobj_unregister; }; -#define NILFS_COMMON_ATTR_STRUCT(name) \ +#define NILFS_KOBJ_ATTR_STRUCT(name) \ struct nilfs_##name##_attr { \ struct attribute attr; \ - ssize_t (*show)(struct kobject *, struct attribute *, \ + ssize_t (*show)(struct kobject *, struct kobj_attribute *, \ char *); \ - ssize_t (*store)(struct kobject *, struct attribute *, \ + ssize_t (*store)(struct kobject *, struct kobj_attribute *, \ const char *, size_t); \ } -NILFS_COMMON_ATTR_STRUCT(feature); +NILFS_KOBJ_ATTR_STRUCT(feature); #define NILFS_DEV_ATTR_STRUCT(name) \ struct nilfs_##name##_attr { \ diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index c34c533b2efa..e8fba98690ce 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -312,8 +312,8 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode); extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon); -extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon, - const char *path); +void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon, + struct dentry *dentry); extern void cifs_mark_open_handles_for_deleted_file(struct inode *inode, const char *path); diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index 11d442e8b3d6..0f0d2dae6283 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -1984,7 +1984,7 @@ static int __cifs_unlink(struct inode *dir, struct dentry *dentry, bool sillyren } netfs_wait_for_outstanding_io(inode); - cifs_close_deferred_file_under_dentry(tcon, full_path); + cifs_close_deferred_file_under_dentry(tcon, dentry); #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { @@ -2003,8 +2003,21 @@ retry_std_delete: goto psx_del_no_retry; } - if (sillyrename || (server->vals->protocol_id > SMB10_PROT_ID && - d_is_positive(dentry) && d_count(dentry) > 2)) + /* For SMB2+, if the file is open, we always perform a silly rename. + * + * We check for d_count() right after calling + * cifs_close_deferred_file_under_dentry() to make sure that the + * dentry's refcount gets dropped in case the file had any deferred + * close. + */ + if (!sillyrename && server->vals->protocol_id > SMB10_PROT_ID) { + spin_lock(&dentry->d_lock); + if (d_count(dentry) > 1) + sillyrename = true; + spin_unlock(&dentry->d_lock); + } + + if (sillyrename) rc = -EBUSY; else rc = server->ops->unlink(xid, tcon, full_path, cifs_sb, dentry); @@ -2538,10 +2551,10 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, goto cifs_rename_exit; } - cifs_close_deferred_file_under_dentry(tcon, from_name); + cifs_close_deferred_file_under_dentry(tcon, source_dentry); if (d_inode(target_dentry) != NULL) { netfs_wait_for_outstanding_io(d_inode(target_dentry)); - cifs_close_deferred_file_under_dentry(tcon, to_name); + cifs_close_deferred_file_under_dentry(tcon, target_dentry); } rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c index da23cc12a52c..dda6dece802a 100644 --- a/fs/smb/client/misc.c +++ b/fs/smb/client/misc.c @@ -832,33 +832,28 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon) kfree(tmp_list); } } -void -cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) + +void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, + struct dentry *dentry) { - struct cifsFileInfo *cfile; struct file_list *tmp_list, *tmp_next_list; - void *page; - const char *full_path; + struct cifsFileInfo *cfile; LIST_HEAD(file_head); - page = alloc_dentry_path(); spin_lock(&tcon->open_file_lock); list_for_each_entry(cfile, &tcon->openFileList, tlist) { - full_path = build_path_from_dentry(cfile->dentry, page); - if (strstr(full_path, path)) { - if (delayed_work_pending(&cfile->deferred)) { - if (cancel_delayed_work(&cfile->deferred)) { - spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); - cifs_del_deferred_close(cfile); - spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); - - tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); - if (tmp_list == NULL) - break; - tmp_list->cfile = cfile; - list_add_tail(&tmp_list->list, &file_head); - } - } + if ((cfile->dentry == dentry) && + delayed_work_pending(&cfile->deferred) && + cancel_delayed_work(&cfile->deferred)) { + spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + cifs_del_deferred_close(cfile); + spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); + if (tmp_list == NULL) + break; + tmp_list->cfile = cfile; + list_add_tail(&tmp_list->list, &file_head); } } spin_unlock(&tcon->open_file_lock); @@ -868,7 +863,6 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) list_del(&tmp_list->list); kfree(tmp_list); } - free_dentry_path(page); } /* diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c index 02d6db431fd4..e0fce5033004 100644 --- a/fs/smb/client/smbdirect.c +++ b/fs/smb/client/smbdirect.c @@ -453,9 +453,12 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) struct smbdirect_recv_io *response = container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe); struct smbdirect_socket *sc = response->socket; + struct smbdirect_socket_parameters *sp = &sc->parameters; struct smbd_connection *info = container_of(sc, struct smbd_connection, socket); - int data_length = 0; + u32 data_offset = 0; + u32 data_length = 0; + u32 remaining_data_length = 0; log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n", response, sc->recv_io.expected, wc->status, wc->opcode, @@ -487,7 +490,22 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) /* SMBD data transfer packet */ case SMBDIRECT_EXPECT_DATA_TRANSFER: data_transfer = smbdirect_recv_io_payload(response); + + if (wc->byte_len < + offsetof(struct smbdirect_data_transfer, padding)) + goto error; + + remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length); + data_offset = le32_to_cpu(data_transfer->data_offset); data_length = le32_to_cpu(data_transfer->data_length); + if (wc->byte_len < data_offset || + (u64)wc->byte_len < (u64)data_offset + data_length) + goto error; + + if (remaining_data_length > sp->max_fragmented_recv_size || + data_length > sp->max_fragmented_recv_size || + (u64)remaining_data_length + (u64)data_length > (u64)sp->max_fragmented_recv_size) + goto error; if (data_length) { if (sc->recv_io.reassembly.full_packet_received) @@ -1090,8 +1108,10 @@ static int smbd_negotiate(struct smbd_connection *info) log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n", rc, response->sge.addr, response->sge.length, response->sge.lkey); - if (rc) + if (rc) { + put_receive_buffer(info, response); return rc; + } init_completion(&info->negotiate_completion); info->negotiate_done = false; @@ -1329,13 +1349,16 @@ void smbd_destroy(struct TCP_Server_Info *server) sc->status == SMBDIRECT_SOCKET_DISCONNECTED); } + log_rdma_event(INFO, "cancelling post_send_credits_work\n"); + disable_work_sync(&info->post_send_credits_work); + log_rdma_event(INFO, "destroying qp\n"); ib_drain_qp(sc->ib.qp); rdma_destroy_qp(sc->rdma.cm_id); sc->ib.qp = NULL; log_rdma_event(INFO, "cancelling idle timer\n"); - cancel_delayed_work_sync(&info->idle_timer_work); + disable_delayed_work_sync(&info->idle_timer_work); /* It's not possible for upper layer to get to reassembly */ log_rdma_event(INFO, "drain the reassembly queue\n"); @@ -1708,7 +1731,7 @@ allocate_mr_failed: return NULL; negotiation_failed: - cancel_delayed_work_sync(&info->idle_timer_work); + disable_delayed_work_sync(&info->idle_timer_work); destroy_caches_and_workqueue(info); sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED; rdma_disconnect(sc->rdma.cm_id); @@ -2067,7 +2090,7 @@ static void destroy_mr_list(struct smbd_connection *info) struct smbdirect_socket *sc = &info->socket; struct smbd_mr *mr, *tmp; - cancel_work_sync(&info->mr_recovery_work); + disable_work_sync(&info->mr_recovery_work); list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { if (mr->state == MR_INVALIDATED) ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c index 5466aa8c39b1..6550bd9f002c 100644 --- a/fs/smb/server/transport_rdma.c +++ b/fs/smb/server/transport_rdma.c @@ -554,7 +554,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) case SMB_DIRECT_MSG_DATA_TRANSFER: { struct smb_direct_data_transfer *data_transfer = (struct smb_direct_data_transfer *)recvmsg->packet; - unsigned int data_length; + u32 remaining_data_length, data_offset, data_length; int avail_recvmsg_count, receive_credits; if (wc->byte_len < @@ -564,15 +564,25 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc) return; } + remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length); data_length = le32_to_cpu(data_transfer->data_length); - if (data_length) { - if (wc->byte_len < sizeof(struct smb_direct_data_transfer) + - (u64)data_length) { - put_recvmsg(t, recvmsg); - smb_direct_disconnect_rdma_connection(t); - return; - } + data_offset = le32_to_cpu(data_transfer->data_offset); + if (wc->byte_len < data_offset || + wc->byte_len < (u64)data_offset + data_length) { + put_recvmsg(t, recvmsg); + smb_direct_disconnect_rdma_connection(t); + return; + } + if (remaining_data_length > t->max_fragmented_recv_size || + data_length > t->max_fragmented_recv_size || + (u64)remaining_data_length + (u64)data_length > + (u64)t->max_fragmented_recv_size) { + put_recvmsg(t, recvmsg); + smb_direct_disconnect_rdma_connection(t); + return; + } + if (data_length) { if (t->full_packet_received) recvmsg->first_segment = true; @@ -1209,78 +1219,130 @@ static int smb_direct_writev(struct ksmbd_transport *t, bool need_invalidate, unsigned int remote_key) { struct smb_direct_transport *st = smb_trans_direct_transfort(t); - int remaining_data_length; - int start, i, j; - int max_iov_size = st->max_send_size - + size_t remaining_data_length; + size_t iov_idx; + size_t iov_ofs; + size_t max_iov_size = st->max_send_size - sizeof(struct smb_direct_data_transfer); int ret; - struct kvec vec; struct smb_direct_send_ctx send_ctx; + int error = 0; if (st->status != SMB_DIRECT_CS_CONNECTED) return -ENOTCONN; //FIXME: skip RFC1002 header.. + if (WARN_ON_ONCE(niovs <= 1 || iov[0].iov_len != 4)) + return -EINVAL; buflen -= 4; + iov_idx = 1; + iov_ofs = 0; remaining_data_length = buflen; ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen); smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key); - start = i = 1; - buflen = 0; - while (true) { - buflen += iov[i].iov_len; - if (buflen > max_iov_size) { - if (i > start) { - remaining_data_length -= - (buflen - iov[i].iov_len); - ret = smb_direct_post_send_data(st, &send_ctx, - &iov[start], i - start, - remaining_data_length); - if (ret) + while (remaining_data_length) { + struct kvec vecs[SMB_DIRECT_MAX_SEND_SGES - 1]; /* minus smbdirect hdr */ + size_t possible_bytes = max_iov_size; + size_t possible_vecs; + size_t bytes = 0; + size_t nvecs = 0; + + /* + * For the last message remaining_data_length should be + * have been 0 already! + */ + if (WARN_ON_ONCE(iov_idx >= niovs)) { + error = -EINVAL; + goto done; + } + + /* + * We have 2 factors which limit the arguments we pass + * to smb_direct_post_send_data(): + * + * 1. The number of supported sges for the send, + * while one is reserved for the smbdirect header. + * And we currently need one SGE per page. + * 2. The number of negotiated payload bytes per send. + */ + possible_vecs = min_t(size_t, ARRAY_SIZE(vecs), niovs - iov_idx); + + while (iov_idx < niovs && possible_vecs && possible_bytes) { + struct kvec *v = &vecs[nvecs]; + int page_count; + + v->iov_base = ((u8 *)iov[iov_idx].iov_base) + iov_ofs; + v->iov_len = min_t(size_t, + iov[iov_idx].iov_len - iov_ofs, + possible_bytes); + page_count = get_buf_page_count(v->iov_base, v->iov_len); + if (page_count > possible_vecs) { + /* + * If the number of pages in the buffer + * is to much (because we currently require + * one SGE per page), we need to limit the + * length. + * + * We know possible_vecs is at least 1, + * so we always keep the first page. + * + * We need to calculate the number extra + * pages (epages) we can also keep. + * + * We calculate the number of bytes in the + * first page (fplen), this should never be + * larger than v->iov_len because page_count is + * at least 2, but adding a limitation feels + * better. + * + * Then we calculate the number of bytes (elen) + * we can keep for the extra pages. + */ + size_t epages = possible_vecs - 1; + size_t fpofs = offset_in_page(v->iov_base); + size_t fplen = min_t(size_t, PAGE_SIZE - fpofs, v->iov_len); + size_t elen = min_t(size_t, v->iov_len - fplen, epages*PAGE_SIZE); + + v->iov_len = fplen + elen; + page_count = get_buf_page_count(v->iov_base, v->iov_len); + if (WARN_ON_ONCE(page_count > possible_vecs)) { + /* + * Something went wrong in the above + * logic... + */ + error = -EINVAL; goto done; - } else { - /* iov[start] is too big, break it */ - int nvec = (buflen + max_iov_size - 1) / - max_iov_size; - - for (j = 0; j < nvec; j++) { - vec.iov_base = - (char *)iov[start].iov_base + - j * max_iov_size; - vec.iov_len = - min_t(int, max_iov_size, - buflen - max_iov_size * j); - remaining_data_length -= vec.iov_len; - ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1, - remaining_data_length); - if (ret) - goto done; } - i++; - if (i == niovs) - break; } - start = i; - buflen = 0; - } else { - i++; - if (i == niovs) { - /* send out all remaining vecs */ - remaining_data_length -= buflen; - ret = smb_direct_post_send_data(st, &send_ctx, - &iov[start], i - start, - remaining_data_length); - if (ret) - goto done; - break; + possible_vecs -= page_count; + nvecs += 1; + possible_bytes -= v->iov_len; + bytes += v->iov_len; + + iov_ofs += v->iov_len; + if (iov_ofs >= iov[iov_idx].iov_len) { + iov_idx += 1; + iov_ofs = 0; } } + + remaining_data_length -= bytes; + + ret = smb_direct_post_send_data(st, &send_ctx, + vecs, nvecs, + remaining_data_length); + if (unlikely(ret)) { + error = ret; + goto done; + } } done: ret = smb_direct_flush_send_list(st, &send_ctx, true); + if (unlikely(!ret && error)) + ret = error; /* * As an optimization, we don't wait for individual I/O to finish @@ -1744,6 +1806,11 @@ static int smb_direct_init_params(struct smb_direct_transport *t, return -EINVAL; } + if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) { + pr_err("warning: device max_send_sge = %d too small\n", + device->attrs.max_send_sge); + return -EINVAL; + } if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) { pr_err("warning: device max_recv_sge = %d too small\n", device->attrs.max_recv_sge); @@ -1767,7 +1834,7 @@ static int smb_direct_init_params(struct smb_direct_transport *t, cap->max_send_wr = max_send_wrs; cap->max_recv_wr = t->recv_credit_max; - cap->max_send_sge = max_sge_per_wr; + cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES; cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES; cap->max_inline_data = 0; cap->max_rdma_ctxs = t->max_rw_credits; diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index f7b3b93f3a49..0c70f3a55575 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -135,6 +135,7 @@ struct af_alg_async_req { * SG? * @enc: Cryptographic operation to be performed when * recvmsg is invoked. + * @write: True if we are in the middle of a write. * @init: True if metadata has been sent. * @len: Length of memory allocated for this data structure. * @inflight: Non-zero when AIO requests are in flight. @@ -151,10 +152,11 @@ struct af_alg_ctx { size_t used; atomic_t rcvused; - bool more; - bool merge; - bool enc; - bool init; + u32 more:1, + merge:1, + enc:1, + write:1, + init:1; unsigned int len; diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 811e9238a77c..cf318e3ddb5c 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -115,6 +115,7 @@ #define DP_MAX_LANE_COUNT 0x002 # define DP_MAX_LANE_COUNT_MASK 0x1f +# define DP_POST_LT_ADJ_REQ_SUPPORTED (1 << 5) /* 1.3 */ # define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ # define DP_ENHANCED_FRAME_CAP (1 << 7) @@ -583,6 +584,7 @@ #define DP_LANE_COUNT_SET 0x101 # define DP_LANE_COUNT_MASK 0x0f +# define DP_POST_LT_ADJ_REQ_GRANTED (1 << 5) /* 1.3 */ # define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) #define DP_TRAINING_PATTERN_SET 0x102 @@ -800,6 +802,7 @@ #define DP_LANE_ALIGN_STATUS_UPDATED 0x204 #define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_POST_LT_ADJ_REQ_IN_PROGRESS (1 << 1) /* 1.3 */ #define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */ #define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */ #define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */ diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index 87caa4f1fdb8..52ce28097015 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -37,6 +37,7 @@ bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); +bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]); u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], int lane); u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], @@ -156,6 +157,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) } static inline bool +drm_dp_post_lt_adj_req_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x13 && + (dpcd[DP_MAX_LANE_COUNT] & DP_POST_LT_ADJ_REQ_SUPPORTED); +} + +static inline bool drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { return dpcd[DP_DPCD_REV] >= 0x11 && diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 76e05930f50e..0ff7ab4aa868 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -1362,6 +1362,13 @@ drm_bridge_get_current_state(struct drm_bridge *bridge) * drm_bridge_get_next_bridge() - Get the next bridge in the chain * @bridge: bridge object * + * The caller is responsible of having a reference to @bridge via + * drm_bridge_get() or equivalent. This function leaves the refcount of + * @bridge unmodified. + * + * The refcount of the returned bridge is incremented. Use drm_bridge_put() + * when done with it. + * * RETURNS: * the next bridge in the chain after @bridge, or NULL if @bridge is the last. */ @@ -1371,7 +1378,7 @@ drm_bridge_get_next_bridge(struct drm_bridge *bridge) if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain)) return NULL; - return list_next_entry(bridge, chain_node); + return drm_bridge_get(list_next_entry(bridge, chain_node)); } /** @@ -1434,15 +1441,61 @@ drm_bridge_chain_get_last_bridge(struct drm_encoder *encoder) } /** - * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain + * drm_bridge_get_next_bridge_and_put - Get the next bridge in the chain + * and put the previous + * @bridge: bridge object + * + * Same as drm_bridge_get_next_bridge() but additionally puts the @bridge. + * + * RETURNS: + * the next bridge in the chain after @bridge, or NULL if @bridge is the last. + */ +static inline struct drm_bridge * +drm_bridge_get_next_bridge_and_put(struct drm_bridge *bridge) +{ + struct drm_bridge *next = drm_bridge_get_next_bridge(bridge); + + drm_bridge_put(bridge); + + return next; +} + +/** + * drm_for_each_bridge_in_chain_scoped - iterate over all bridges attached + * to an encoder * @encoder: the encoder to iterate bridges on * @bridge: a bridge pointer updated to point to the current bridge at each * iteration * * Iterate over all bridges present in the bridge chain attached to @encoder. + * + * Automatically gets/puts the bridge reference while iterating, and puts + * the reference even if returning or breaking in the middle of the loop. + */ +#define drm_for_each_bridge_in_chain_scoped(encoder, bridge) \ + for (struct drm_bridge *bridge __free(drm_bridge_put) = \ + drm_bridge_chain_get_first_bridge(encoder); \ + bridge; \ + bridge = drm_bridge_get_next_bridge_and_put(bridge)) + +/** + * drm_for_each_bridge_in_chain_from - iterate over all bridges starting + * from the given bridge + * @first_bridge: the bridge to start from + * @bridge: a bridge pointer updated to point to the current bridge at each + * iteration + * + * Iterate over all bridges in the encoder chain starting from + * @first_bridge, included. + * + * Automatically gets/puts the bridge reference while iterating, and puts + * the reference even if returning or breaking in the middle of the loop. */ -#define drm_for_each_bridge_in_chain(encoder, bridge) \ - list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node) +#define drm_for_each_bridge_in_chain_from(first_bridge, bridge) \ + for (struct drm_bridge *bridge __free(drm_bridge_put) = \ + drm_bridge_get(first_bridge); \ + bridge; \ + bridge = drm_bridge_get_next_bridge_and_put(bridge)) enum drm_mode_status drm_bridge_chain_mode_valid(struct drm_bridge *bridge, diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h index 513837632b7d..04afd7c21a82 100644 --- a/include/drm/drm_buddy.h +++ b/include/drm/drm_buddy.h @@ -13,15 +13,6 @@ #include <drm/drm_print.h> -#define range_overflows(start, size, max) ({ \ - typeof(start) start__ = (start); \ - typeof(size) size__ = (size); \ - typeof(max) max__ = (max); \ - (void)(&start__ == &size__); \ - (void)(&start__ == &max__); \ - start__ >= max__ || size__ > max__ - start__; \ -}) - #define DRM_BUDDY_RANGE_ALLOCATION BIT(0) #define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1) #define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index 146ca80e35db..bdd845e383ef 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -220,6 +220,7 @@ int drm_client_modeset_check(struct drm_client_dev *client); int drm_client_modeset_commit_locked(struct drm_client_dev *client); int drm_client_modeset_commit(struct drm_client_dev *client); int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); +int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index); /** * drm_client_for_each_modeset() - Iterate over client modesets diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 59fd3f4d5995..778b2cca6c49 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -193,16 +193,6 @@ struct drm_device { char *unique; /** - * @struct_mutex: - * - * Lock for others (not &drm_minor.master and &drm_file.is_master) - * - * TODO: This lock used to be the BKL of the DRM subsystem. Move the - * lock into i915, which is the only remaining user. - */ - struct mutex struct_mutex; - - /** * @master_mutex: * * Lock for &drm_minor.master and &drm_file.is_master diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 32d57d6c5327..2b5c1aef80b0 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -128,10 +128,6 @@ void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_ const struct iosys_map *src, const struct drm_framebuffer *fb, const struct drm_rect *clip, struct drm_format_conv_state *state); -int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, - const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, struct drm_format_conv_state *state); - void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, const struct drm_rect *clip, struct drm_format_conv_state *state); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 92f5db84b9c2..589f7bfe7506 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -107,10 +107,12 @@ struct drm_gem_shmem_object { #define to_drm_gem_shmem_obj(obj) \ container_of(obj, struct drm_gem_shmem_object, base) +int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size); struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, size_t size, struct vfsmount *gemfs); +void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem); diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h index 0e336148309d..5434048a2ca4 100644 --- a/include/drm/drm_gpusvm.h +++ b/include/drm/drm_gpusvm.h @@ -106,16 +106,16 @@ struct drm_gpusvm_notifier { }; /** - * struct drm_gpusvm_range_flags - Structure representing a GPU SVM range flags + * struct drm_gpusvm_pages_flags - Structure representing a GPU SVM pages flags * - * @migrate_devmem: Flag indicating whether the range can be migrated to device memory - * @unmapped: Flag indicating if the range has been unmapped - * @partial_unmap: Flag indicating if the range has been partially unmapped - * @has_devmem_pages: Flag indicating if the range has devmem pages - * @has_dma_mapping: Flag indicating if the range has a DMA mapping - * @__flags: Flags for range in u16 form (used for READ_ONCE) + * @migrate_devmem: Flag indicating whether the pages can be migrated to device memory + * @unmapped: Flag indicating if the pages has been unmapped + * @partial_unmap: Flag indicating if the pages has been partially unmapped + * @has_devmem_pages: Flag indicating if the pages has devmem pages + * @has_dma_mapping: Flag indicating if the pages has a DMA mapping + * @__flags: Flags for pages in u16 form (used for READ_ONCE) */ -struct drm_gpusvm_range_flags { +struct drm_gpusvm_pages_flags { union { struct { /* All flags below must be set upon creation */ @@ -131,6 +131,27 @@ struct drm_gpusvm_range_flags { }; /** + * struct drm_gpusvm_pages - Structure representing a GPU SVM mapped pages + * + * @dma_addr: Device address array + * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping. + * Note this is assuming only one drm_pagemap per range is allowed. + * @notifier_seq: Notifier sequence number of the range's pages + * @flags: Flags for range + * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory + * @flags.unmapped: Flag indicating if the range has been unmapped + * @flags.partial_unmap: Flag indicating if the range has been partially unmapped + * @flags.has_devmem_pages: Flag indicating if the range has devmem pages + * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping + */ +struct drm_gpusvm_pages { + struct drm_pagemap_addr *dma_addr; + struct drm_pagemap *dpagemap; + unsigned long notifier_seq; + struct drm_gpusvm_pages_flags flags; +}; + +/** * struct drm_gpusvm_range - Structure representing a GPU SVM range * * @gpusvm: Pointer to the GPU SVM structure @@ -138,11 +159,7 @@ struct drm_gpusvm_range_flags { * @refcount: Reference count for the range * @itree: Interval tree node for the range (inserted in GPU SVM notifier) * @entry: List entry to fast interval tree traversal - * @notifier_seq: Notifier sequence number of the range's pages - * @dma_addr: Device address array - * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping. - * Note this is assuming only one drm_pagemap per range is allowed. - * @flags: Flags for range + * @pages: The pages for this range. * * This structure represents a GPU SVM range used for tracking memory ranges * mapped in a DRM device. @@ -153,10 +170,7 @@ struct drm_gpusvm_range { struct kref refcount; struct interval_tree_node itree; struct list_head entry; - unsigned long notifier_seq; - struct drm_pagemap_addr *dma_addr; - struct drm_pagemap *dpagemap; - struct drm_gpusvm_range_flags flags; + struct drm_gpusvm_pages pages; }; /** @@ -293,6 +307,22 @@ drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range, const struct mmu_notifier_range *mmu_range); +int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + struct mm_struct *mm, + struct mmu_interval_notifier *notifier, + unsigned long pages_start, unsigned long pages_end, + const struct drm_gpusvm_ctx *ctx); + +void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + unsigned long npages, + const struct drm_gpusvm_ctx *ctx); + +void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm, + struct drm_gpusvm_pages *svm_pages, + unsigned long npages); + #ifdef CONFIG_LOCKDEP /** * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 323a505e6e6a..fb88301b3c45 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -546,7 +546,7 @@ struct drm_sched_backend_ops { * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, * as there's usually one run-queue per priority, but could be less. * @sched_rq: An allocated array of run-queues of size @num_rqs; - * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler + * @job_scheduled: once drm_sched_entity_flush() is called the scheduler * waits on this wait queue until all the scheduled jobs are * finished. * @job_id_count: used to assign unique id to the each job. diff --git a/include/drm/intel/i915_component.h b/include/drm/intel/i915_component.h index 4ea3b17aa143..8082db222e00 100644 --- a/include/drm/intel/i915_component.h +++ b/include/drm/intel/i915_component.h @@ -31,6 +31,7 @@ enum i915_component_type { I915_COMPONENT_HDCP, I915_COMPONENT_PXP, I915_COMPONENT_GSC_PROXY, + INTEL_COMPONENT_LB, }; /* MAX_PORT is the number of port diff --git a/include/drm/intel/intel_lb_mei_interface.h b/include/drm/intel/intel_lb_mei_interface.h new file mode 100644 index 000000000000..d65be2cba2ab --- /dev/null +++ b/include/drm/intel/intel_lb_mei_interface.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (c) 2025 Intel Corporation + */ + +#ifndef _INTEL_LB_MEI_INTERFACE_H_ +#define _INTEL_LB_MEI_INTERFACE_H_ + +#include <linux/types.h> + +struct device; + +/** + * define INTEL_LB_FLAG_IS_PERSISTENT - Mark the payload as persistent + * + * This flag indicates that the late binding payload should be stored + * persistently in flash across warm resets. + */ +#define INTEL_LB_FLAG_IS_PERSISTENT BIT(0) + +/** + * enum intel_lb_type - enum to determine late binding payload type + * @INTEL_LB_TYPE_FAN_CONTROL: Fan controller configuration + */ +enum intel_lb_type { + INTEL_LB_TYPE_FAN_CONTROL = 1, +}; + +/** + * enum intel_lb_status - Status codes returned on late binding transmissions + * @INTEL_LB_STATUS_SUCCESS: Operation completed successfully + * @INTEL_LB_STATUS_4ID_MISMATCH: Mismatch in the expected 4ID (firmware identity/token) + * @INTEL_LB_STATUS_ARB_FAILURE: Arbitration failure (e.g. conflicting access or state) + * @INTEL_LB_STATUS_GENERAL_ERROR: General firmware error not covered by other codes + * @INTEL_LB_STATUS_INVALID_PARAMS: One or more input parameters are invalid + * @INTEL_LB_STATUS_INVALID_SIGNATURE: Payload has an invalid or untrusted signature + * @INTEL_LB_STATUS_INVALID_PAYLOAD: Payload contents are not accepted by firmware + * @INTEL_LB_STATUS_TIMEOUT: Operation timed out before completion + */ +enum intel_lb_status { + INTEL_LB_STATUS_SUCCESS = 0, + INTEL_LB_STATUS_4ID_MISMATCH = 1, + INTEL_LB_STATUS_ARB_FAILURE = 2, + INTEL_LB_STATUS_GENERAL_ERROR = 3, + INTEL_LB_STATUS_INVALID_PARAMS = 4, + INTEL_LB_STATUS_INVALID_SIGNATURE = 5, + INTEL_LB_STATUS_INVALID_PAYLOAD = 6, + INTEL_LB_STATUS_TIMEOUT = 7, +}; + +/** + * struct intel_lb_component_ops - Ops for late binding services + */ +struct intel_lb_component_ops { + /** + * push_payload - Sends a payload to the authentication firmware + * @dev: Device struct corresponding to the mei device + * @type: Payload type (see &enum intel_lb_type) + * @flags: Payload flags bitmap (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT) + * @payload: Pointer to payload buffer + * @payload_size: Payload buffer size in bytes + * + * Return: 0 success, negative errno value on transport failure, + * positive status returned by firmware + */ + int (*push_payload)(struct device *dev, u32 type, u32 flags, + const void *payload, size_t payload_size); +}; + +#endif /* _INTEL_LB_MEI_INTERFACE_H_ */ diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index e664a96540eb..bca3a8849d47 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -391,7 +391,7 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_operation_ctx *ctx); -void ttm_bo_put(struct ttm_buffer_object *bo); +void ttm_bo_fini(struct ttm_buffer_object *bo); void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, struct ttm_lru_bulk_move *bulk); bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index e52bba15012f..f49daa504c36 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -36,7 +36,7 @@ #include <drm/ttm/ttm_kmap_iter.h> #define TTM_MAX_BO_PRIORITY 4U -#define TTM_NUM_MEM_TYPES 8 +#define TTM_NUM_MEM_TYPES 9 struct dmem_cgroup_device; struct ttm_device; diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 404883c7af6e..4000ff16f295 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -8,8 +8,8 @@ #include <linux/bits.h> #include <linux/kvm.h> #include <linux/irqreturn.h> -#include <linux/kref.h> #include <linux/mutex.h> +#include <linux/refcount.h> #include <linux/spinlock.h> #include <linux/static_key.h> #include <linux/types.h> @@ -139,10 +139,13 @@ struct vgic_irq { bool pending_latch; /* The pending latch state used to calculate * the pending state for both level * and edge triggered IRQs. */ - bool active; /* not used for LPIs */ + bool active; + bool pending_release; /* Used for LPIs only, unreferenced IRQ + * pending a release */ + bool enabled; bool hw; /* Tied to HW IRQ */ - struct kref refcount; /* Used for LPIs */ + refcount_t refcount; /* Used for LPIs */ u32 hwintid; /* HW INTID number */ unsigned int host_irq; /* linux irq corresponding to hwintid */ union { diff --git a/include/linux/damon.h b/include/linux/damon.h index f13664c62ddd..9e62b2a85538 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -636,6 +636,7 @@ struct damon_operations { * @data: Data that will be passed to @fn. * @repeat: Repeat invocations. * @return_code: Return code from @fn invocation. + * @dealloc_on_cancel: De-allocate when canceled. * * Control damon_call(), which requests specific kdamond to invoke a given * function. Refer to damon_call() for more details. @@ -645,6 +646,7 @@ struct damon_call_control { void *data; bool repeat; int return_code; + bool dealloc_on_cancel; /* private: internal use only */ /* informs if the kdamond finished handling of the request */ struct completion completion; diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 80a178f3d896..12f5ee43850e 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -420,9 +420,6 @@ struct io_ring_ctx { struct list_head defer_list; unsigned nr_drained; - struct io_alloc_cache msg_cache; - spinlock_t msg_lock; - #ifdef CONFIG_NET_RX_BUSY_POLL struct list_head napi_list; /* track busy poll napi_id */ spinlock_t napi_lock; /* napi_list lock */ diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 725fd7727422..a82755e1fc40 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -113,6 +113,7 @@ int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, mei_cldev_cb_t notif_cb); u8 mei_cldev_ver(const struct mei_cl_device *cldev); +size_t mei_cldev_mtu(const struct mei_cl_device *cldev); void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev); void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 8c5fbfb85749..10fe492e1fed 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -663,6 +663,7 @@ struct mlx5e_resources { bool tisn_valid; } hw_objs; struct net_device *uplink_netdev; + netdevice_tracker tracker; struct mutex uplink_netdev_lock; struct mlx5_crypto_dek_priv *dek_priv; }; diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 154ed0dbb43f..725f95f7e416 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -239,6 +239,76 @@ static inline bool __must_check __must_check_overflow(bool overflow) __overflows_type(n, T)) /** + * range_overflows() - Check if a range is out of bounds + * @start: Start of the range. + * @size: Size of the range. + * @max: Exclusive upper boundary. + * + * A strict check to determine if the range [@start, @start + @size) is + * invalid with respect to the allowable range [0, @max). Any range + * starting at or beyond @max is considered an overflow, even if @size is 0. + * + * Returns: true if the range is out of bounds. + */ +#define range_overflows(start, size, max) ({ \ + typeof(start) start__ = (start); \ + typeof(size) size__ = (size); \ + typeof(max) max__ = (max); \ + (void)(&start__ == &size__); \ + (void)(&start__ == &max__); \ + start__ >= max__ || size__ > max__ - start__; \ +}) + +/** + * range_overflows_t() - Check if a range is out of bounds + * @type: Data type to use. + * @start: Start of the range. + * @size: Size of the range. + * @max: Exclusive upper boundary. + * + * Same as range_overflows() but forcing the parameters to @type. + * + * Returns: true if the range is out of bounds. + */ +#define range_overflows_t(type, start, size, max) \ + range_overflows((type)(start), (type)(size), (type)(max)) + +/** + * range_end_overflows() - Check if a range's endpoint is out of bounds + * @start: Start of the range. + * @size: Size of the range. + * @max: Exclusive upper boundary. + * + * Checks only if the endpoint of a range (@start + @size) exceeds @max. + * Unlike range_overflows(), a zero-sized range at the boundary (@start == @max) + * is not considered an overflow. Useful for iterator-style checks. + * + * Returns: true if the endpoint exceeds the boundary. + */ +#define range_end_overflows(start, size, max) ({ \ + typeof(start) start__ = (start); \ + typeof(size) size__ = (size); \ + typeof(max) max__ = (max); \ + (void)(&start__ == &size__); \ + (void)(&start__ == &max__); \ + start__ > max__ || size__ > max__ - start__; \ +}) + +/** + * range_end_overflows_t() - Check if a range's endpoint is out of bounds + * @type: Data type to use. + * @start: Start of the range. + * @size: Size of the range. + * @max: Exclusive upper boundary. + * + * Same as range_end_overflows() but forcing the parameters to @type. + * + * Returns: true if the endpoint exceeds the boundary. + */ +#define range_end_overflows_t(type, start, size, max) \ + range_end_overflows((type)(start), (type)(size), (type)(max)) + +/** * castable_to_type - like __same_type(), but also allows for casted literals * * @n: variable or constant value diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index c84edf217819..f67a2cb7d781 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -115,6 +115,12 @@ struct dev_pm_domain_list { * genpd provider specific way, likely through a * parent device node. This flag makes genpd to * skip its internal support for this. + * + * GENPD_FLAG_NO_STAY_ON: For genpd OF providers a powered-on PM domain at + * initialization is prevented from being + * powered-off until the ->sync_state() callback is + * invoked. This flag informs genpd to allow a + * power-off without waiting for ->sync_state(). */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) @@ -126,6 +132,7 @@ struct dev_pm_domain_list { #define GENPD_FLAG_OPP_TABLE_FW (1U << 7) #define GENPD_FLAG_DEV_NAME_FW (1U << 8) #define GENPD_FLAG_NO_SYNC_STATE (1U << 9) +#define GENPD_FLAG_NO_STAY_ON (1U << 10) enum gpd_status { GENPD_STATE_ON = 0, /* PM domain is on */ diff --git a/include/linux/rv.h b/include/linux/rv.h index 14410a42faef..9520aab34bcb 100644 --- a/include/linux/rv.h +++ b/include/linux/rv.h @@ -7,16 +7,14 @@ #ifndef _LINUX_RV_H #define _LINUX_RV_H -#include <linux/types.h> -#include <linux/list.h> - #define MAX_DA_NAME_LEN 32 #define MAX_DA_RETRY_RACING_EVENTS 3 #ifdef CONFIG_RV +#include <linux/array_size.h> #include <linux/bitops.h> +#include <linux/list.h> #include <linux/types.h> -#include <linux/array_size.h> /* * Deterministic automaton per-object variables. diff --git a/include/linux/swap.h b/include/linux/swap.h index 2fe6ed2cc3fd..7012a0f758d8 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -385,6 +385,16 @@ void folio_add_lru_vma(struct folio *, struct vm_area_struct *); void mark_page_accessed(struct page *); void folio_mark_accessed(struct folio *); +static inline bool folio_may_be_lru_cached(struct folio *folio) +{ + /* + * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting. + * Holding small numbers of low-order mTHP folios in per-CPU LRU cache + * will be sensible, but nobody has implemented and tested that yet. + */ + return !folio_test_large(folio); +} + extern atomic_t lru_disable_count; static inline bool lru_cache_disabled(void) diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 4160731dcb6e..1fc2fb03ce3f 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -3,6 +3,7 @@ #define __NET_DST_METADATA_H 1 #include <linux/skbuff.h> +#include <net/ip.h> #include <net/ip_tunnels.h> #include <net/macsec.h> #include <net/dst.h> @@ -220,9 +221,15 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, int md_size) { const struct iphdr *iph = ip_hdr(skb); + struct metadata_dst *tun_dst; + + tun_dst = __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl, + 0, flags, tunnel_id, md_size); - return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl, - 0, flags, tunnel_id, md_size); + if (tun_dst && (iph->frag_off & htons(IP_DF))) + __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, + tun_dst->u.tun_info.key.tun_flags); + return tun_dst; } static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr, diff --git a/include/net/sock.h b/include/net/sock.h index fb13322a11fc..2e14283c5be1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2061,6 +2061,9 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock) if (sock) { WRITE_ONCE(sk->sk_uid, SOCK_INODE(sock)->i_uid); WRITE_ONCE(sk->sk_ino, SOCK_INODE(sock)->i_ino); + } else { + /* Note: sk_uid is unchanged. */ + WRITE_ONCE(sk->sk_ino, 0); } } @@ -2082,8 +2085,6 @@ static inline void sock_orphan(struct sock *sk) sock_set_flag(sk, SOCK_DEAD); sk_set_socket(sk, NULL); sk->sk_wq = NULL; - /* Note: sk_uid is unchanged. */ - WRITE_ONCE(sk->sk_ino, 0); write_unlock_bh(&sk->sk_callback_lock); } diff --git a/include/sound/sdca.h b/include/sound/sdca.h index 5a5d6de78d72..9c6a351c9d47 100644 --- a/include/sound/sdca.h +++ b/include/sound/sdca.h @@ -46,6 +46,7 @@ struct sdca_device_data { enum sdca_quirk { SDCA_QUIRKS_RT712_VB, + SDCA_QUIRKS_SKIP_FUNC_TYPE_PATCHING, }; #if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_SOC_SDCA) diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h index 06ec126cdcc3..ea68856e4c8c 100644 --- a/include/sound/sdca_function.h +++ b/include/sound/sdca_function.h @@ -1063,27 +1063,30 @@ struct sdca_entity_ge { /** * struct sdca_entity_hide - information specific to HIDE Entities * @hid: HID device structure - * @hidtx_ids: HIDTx Report ID * @num_hidtx_ids: number of HIDTx Report ID - * @hidrx_ids: HIDRx Report ID * @num_hidrx_ids: number of HIDRx Report ID - * @hide_reside_function_num: indicating which Audio Function Numbers within this Device - * @max_delay: the maximum time in microseconds allowed for the Device to change the ownership from Device to Host - * @af_number_list: which Audio Function Numbers within this Device are sending/receiving the messages in this HIDE - * @hid_desc: HID descriptor for the HIDE Entity + * @hidtx_ids: HIDTx Report ID + * @hidrx_ids: HIDRx Report ID + * @af_number_list: which Audio Function Numbers within this Device are + * sending/receiving the messages in this HIDE + * @hide_reside_function_num: indicating which Audio Function Numbers + * within this Device + * @max_delay: the maximum time in microseconds allowed for the Device + * to change the ownership from Device to Host * @hid_report_desc: HID Report Descriptor for the HIDE Entity + * @hid_desc: HID descriptor for the HIDE Entity */ struct sdca_entity_hide { struct hid_device *hid; unsigned int *hidtx_ids; - int num_hidtx_ids; unsigned int *hidrx_ids; + int num_hidtx_ids; int num_hidrx_ids; + unsigned int af_number_list[SDCA_MAX_FUNCTION_COUNT]; unsigned int hide_reside_function_num; unsigned int max_delay; - unsigned int af_number_list[SDCA_MAX_FUNCTION_COUNT]; - struct hid_descriptor hid_desc; unsigned char *hid_report_desc; + struct hid_descriptor hid_desc; }; /** diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 9cebd072a042..cd7402e36b6d 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -105,6 +105,8 @@ extern "C" { * * %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for * signalling user mode queues. + * + * %AMDGPU_GEM_DOMAIN_MMIO_REMAP MMIO remap page (special mapping for HDP flushing). */ #define AMDGPU_GEM_DOMAIN_CPU 0x1 #define AMDGPU_GEM_DOMAIN_GTT 0x2 @@ -113,13 +115,15 @@ extern "C" { #define AMDGPU_GEM_DOMAIN_GWS 0x10 #define AMDGPU_GEM_DOMAIN_OA 0x20 #define AMDGPU_GEM_DOMAIN_DOORBELL 0x40 +#define AMDGPU_GEM_DOMAIN_MMIO_REMAP 0x80 #define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \ AMDGPU_GEM_DOMAIN_GTT | \ AMDGPU_GEM_DOMAIN_VRAM | \ AMDGPU_GEM_DOMAIN_GDS | \ AMDGPU_GEM_DOMAIN_GWS | \ - AMDGPU_GEM_DOMAIN_OA | \ - AMDGPU_GEM_DOMAIN_DOORBELL) + AMDGPU_GEM_DOMAIN_OA | \ + AMDGPU_GEM_DOMAIN_DOORBELL | \ + AMDGPU_GEM_DOMAIN_MMIO_REMAP) /* Flag that CPU access will be required for the case of VRAM domain */ #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) @@ -1084,10 +1088,11 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow { * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU * */ -#define AMDGPU_IDS_FLAGS_FUSION 0x1 -#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2 -#define AMDGPU_IDS_FLAGS_TMZ 0x4 -#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8 +#define AMDGPU_IDS_FLAGS_FUSION 0x01 +#define AMDGPU_IDS_FLAGS_PREEMPTION 0x02 +#define AMDGPU_IDS_FLAGS_TMZ 0x04 +#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x08 +#define AMDGPU_IDS_FLAGS_GANG_SUBMIT 0x10 /* * Query h/w info: Flag identifying VF/PF/PT mode diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h index 160ee1411d4a..e470b0221e02 100644 --- a/include/uapi/drm/ivpu_accel.h +++ b/include/uapi/drm/ivpu_accel.h @@ -90,6 +90,7 @@ extern "C" { #define DRM_IVPU_PARAM_TILE_CONFIG 11 #define DRM_IVPU_PARAM_SKU 12 #define DRM_IVPU_PARAM_CAPABILITIES 13 +#define DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE 14 #define DRM_IVPU_PLATFORM_TYPE_SILICON 0 @@ -176,6 +177,9 @@ struct drm_ivpu_param { * * %DRM_IVPU_PARAM_CAPABILITIES: * Supported capabilities (read-only) + * + * %DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE: + * Size of the preemption buffer (read-only) */ __u32 param; @@ -371,6 +375,13 @@ struct drm_ivpu_cmdq_submit { * to be executed. The offset has to be 8-byte aligned. */ __u32 commands_offset; + /** + * @preempt_buffer_index: + * + * Index of the preemption buffer in the buffers_ptr array. + */ + __u32 preempt_buffer_index; + __u32 reserved; }; /* drm_ivpu_bo_wait job status codes */ diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index ed67510395bd..e8b47c9f6976 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -22,6 +22,8 @@ extern "C" { #define DRM_PANFROST_PERFCNT_DUMP 0x07 #define DRM_PANFROST_MADVISE 0x08 #define DRM_PANFROST_SET_LABEL_BO 0x09 +#define DRM_PANFROST_JM_CTX_CREATE 0x0a +#define DRM_PANFROST_JM_CTX_DESTROY 0x0b #define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit) #define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo) @@ -31,6 +33,8 @@ extern "C" { #define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset) #define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise) #define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo) +#define DRM_IOCTL_PANFROST_JM_CTX_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_CREATE, struct drm_panfrost_jm_ctx_create) +#define DRM_IOCTL_PANFROST_JM_CTX_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_DESTROY, struct drm_panfrost_jm_ctx_destroy) /* * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module @@ -71,6 +75,12 @@ struct drm_panfrost_submit { /** A combination of PANFROST_JD_REQ_* */ __u32 requirements; + + /** JM context handle. Zero if you want to use the default context. */ + __u32 jm_ctx_handle; + + /** Padding field. MBZ. */ + __u32 pad; }; /** @@ -177,6 +187,7 @@ enum drm_panfrost_param { DRM_PANFROST_PARAM_AFBC_FEATURES, DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP, DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY, + DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES, }; struct drm_panfrost_get_param { @@ -299,6 +310,45 @@ struct panfrost_dump_registers { __u32 value; }; +enum drm_panfrost_jm_ctx_priority { + /** + * @PANFROST_JM_CTX_PRIORITY_LOW: Low priority context. + */ + PANFROST_JM_CTX_PRIORITY_LOW = 0, + + /** + * @PANFROST_JM_CTX_PRIORITY_MEDIUM: Medium priority context. + */ + PANFROST_JM_CTX_PRIORITY_MEDIUM, + + /** + * @PANFROST_JM_CTX_PRIORITY_HIGH: High priority context. + * + * Requires CAP_SYS_NICE or DRM_MASTER. + */ + PANFROST_JM_CTX_PRIORITY_HIGH, +}; + +struct drm_panfrost_jm_ctx_create { + /** @handle: Handle of the created JM context */ + __u32 handle; + + /** @priority: Context priority (see enum drm_panfrost_jm_ctx_priority). */ + __u32 priority; +}; + +struct drm_panfrost_jm_ctx_destroy { + /** + * @handle: Handle of the JM context to destroy. + * + * Must be a valid context handle returned by DRM_IOCTL_PANTHOR_JM_CTX_CREATE. + */ + __u32 handle; + + /** @pad: Padding field, MBZ. */ + __u32 pad; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 67d015df8893..5fd5b4cf75ca 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -31,6 +31,8 @@ #define MPTCP_INFO_FLAG_FALLBACK _BITUL(0) #define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1) +#define MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 _BITUL(0) + #define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0) #define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) #define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h index 6ac84b2f636c..7359d34da446 100644 --- a/include/uapi/linux/mptcp_pm.h +++ b/include/uapi/linux/mptcp_pm.h @@ -16,10 +16,10 @@ * good time to allocate memory and send ADD_ADDR if needed. Depending on the * traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED * is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, - * sport, dport, server-side. + * sport, dport, server-side, [flags]. * @MPTCP_EVENT_ESTABLISHED: A MPTCP connection is established (can start new * subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, - * sport, dport, server-side. + * sport, dport, server-side, [flags]. * @MPTCP_EVENT_CLOSED: A MPTCP connection has stopped. Attribute: token. * @MPTCP_EVENT_ANNOUNCED: A new address has been announced by the peer. * Attributes: token, rem_id, family, daddr4 | daddr6 [, dport]. diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 17dfaa0395c4..1d03b2fc4b25 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -352,16 +352,16 @@ static void create_worker_cb(struct callback_head *cb) struct io_wq *wq; struct io_wq_acct *acct; - bool do_create = false; + bool activated_free_worker, do_create = false; worker = container_of(cb, struct io_worker, create_work); wq = worker->wq; acct = worker->acct; rcu_read_lock(); - do_create = !io_acct_activate_free_worker(acct); + activated_free_worker = io_acct_activate_free_worker(acct); rcu_read_unlock(); - if (!do_create) + if (activated_free_worker) goto no_need_create; raw_spin_lock(&acct->workers_lock); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 93633613a165..93665cebe9bd 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -290,7 +290,6 @@ static void io_free_alloc_caches(struct io_ring_ctx *ctx) io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); io_alloc_cache_free(&ctx->cmd_cache, io_cmd_cache_free); - io_alloc_cache_free(&ctx->msg_cache, kfree); io_futex_cache_free(ctx); io_rsrc_cache_free(ctx); } @@ -337,9 +336,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX, sizeof(struct io_async_cmd), sizeof(struct io_async_cmd)); - spin_lock_init(&ctx->msg_lock); - ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX, - sizeof(struct io_kiocb), 0); ret |= io_futex_cache_init(ctx); ret |= io_rsrc_cache_init(ctx); if (ret) @@ -1406,8 +1402,10 @@ static void io_req_task_cancel(struct io_kiocb *req, io_tw_token_t tw) void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw) { - io_tw_lock(req->ctx, tw); - if (unlikely(io_should_terminate_tw())) + struct io_ring_ctx *ctx = req->ctx; + + io_tw_lock(ctx, tw); + if (unlikely(io_should_terminate_tw(ctx))) io_req_defer_failed(req, -EFAULT); else if (req->flags & REQ_F_FORCE_ASYNC) io_queue_iowq(req); diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index abc6de227f74..1880902be6fd 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -476,9 +476,9 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) * 2) PF_KTHREAD is set, in which case the invoker of the task_work is * our fallback task_work. */ -static inline bool io_should_terminate_tw(void) +static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx) { - return current->flags & (PF_KTHREAD | PF_EXITING); + return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs); } static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 4c2578f2efcb..5e5b94236d72 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -11,7 +11,6 @@ #include "io_uring.h" #include "rsrc.h" #include "filetable.h" -#include "alloc_cache.h" #include "msg_ring.h" /* All valid masks for MSG_RING */ @@ -76,13 +75,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, io_tw_token_t tw) struct io_ring_ctx *ctx = req->ctx; io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags); - if (spin_trylock(&ctx->msg_lock)) { - if (io_alloc_cache_put(&ctx->msg_cache, req)) - req = NULL; - spin_unlock(&ctx->msg_lock); - } - if (req) - kfree_rcu(req, rcu_head); + kfree_rcu(req, rcu_head); percpu_ref_put(&ctx->refs); } @@ -104,26 +97,13 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req, return 0; } -static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx) -{ - struct io_kiocb *req = NULL; - - if (spin_trylock(&ctx->msg_lock)) { - req = io_alloc_cache_get(&ctx->msg_cache); - spin_unlock(&ctx->msg_lock); - if (req) - return req; - } - return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); -} - static int io_msg_data_remote(struct io_ring_ctx *target_ctx, struct io_msg *msg) { struct io_kiocb *target; u32 flags = 0; - target = io_msg_get_kiocb(target_ctx); + target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO) ; if (unlikely(!target)) return -ENOMEM; diff --git a/io_uring/notif.c b/io_uring/notif.c index 9a6f6e92d742..ea9c0116cec2 100644 --- a/io_uring/notif.c +++ b/io_uring/notif.c @@ -85,7 +85,7 @@ static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg) return -EEXIST; prev_nd = container_of(prev_uarg, struct io_notif_data, uarg); - prev_notif = cmd_to_io_kiocb(nd); + prev_notif = cmd_to_io_kiocb(prev_nd); /* make sure all noifications can be finished in the same task_work */ if (unlikely(notif->ctx != prev_notif->ctx || diff --git a/io_uring/poll.c b/io_uring/poll.c index c786e587563b..6090a26975d4 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -224,7 +224,7 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw) { int v; - if (unlikely(io_should_terminate_tw())) + if (unlikely(io_should_terminate_tw(req->ctx))) return -ECANCELED; do { diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 7f13bfa9f2b6..17e3aab0af36 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -324,7 +324,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, io_tw_token_t tw) int ret; if (prev) { - if (!io_should_terminate_tw()) { + if (!io_should_terminate_tw(req->ctx)) { struct io_cancel_data cd = { .ctx = req->ctx, .data = prev->cqe.user_data, diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 053bac89b6c0..213716e10d70 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -118,7 +118,7 @@ static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw) struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); unsigned int flags = IO_URING_F_COMPLETE_DEFER; - if (io_should_terminate_tw()) + if (io_should_terminate_tw(req->ctx)) flags |= IO_URING_F_TASK_DEAD; /* task_work executor checks the deffered list completion */ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 312c6a8b55bb..77d02f87f3f1 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -126,8 +126,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem); * of concurrent destructions. Use a separate workqueue so that cgroup * destruction work items don't end up filling up max_active of system_wq * which may lead to deadlock. + * + * A cgroup destruction should enqueue work sequentially to: + * cgroup_offline_wq: use for css offline work + * cgroup_release_wq: use for css release work + * cgroup_free_wq: use for free work + * + * Rationale for using separate workqueues: + * The cgroup root free work may depend on completion of other css offline + * operations. If all tasks were enqueued to a single workqueue, this could + * create a deadlock scenario where: + * - Free work waits for other css offline work to complete. + * - But other css offline work is queued after free work in the same queue. + * + * Example deadlock scenario with single workqueue (cgroup_destroy_wq): + * 1. umount net_prio + * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx) + * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx) + * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline. + * 5. net_prio root destruction blocks waiting for perf_event CSS A offline, + * which can never complete as it's behind in the same queue and + * workqueue's max_active is 1. */ -static struct workqueue_struct *cgroup_destroy_wq; +static struct workqueue_struct *cgroup_offline_wq; +static struct workqueue_struct *cgroup_release_wq; +static struct workqueue_struct *cgroup_free_wq; /* generate an array of cgroup subsystem pointers */ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, @@ -4159,6 +4182,7 @@ static void cgroup_file_release(struct kernfs_open_file *of) cft->release(of); put_cgroup_ns(ctx->ns); kfree(ctx); + of->priv = NULL; } static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, @@ -5558,7 +5582,7 @@ static void css_release_work_fn(struct work_struct *work) cgroup_unlock(); INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); - queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); + queue_rcu_work(cgroup_free_wq, &css->destroy_rwork); } static void css_release(struct percpu_ref *ref) @@ -5567,7 +5591,7 @@ static void css_release(struct percpu_ref *ref) container_of(ref, struct cgroup_subsys_state, refcnt); INIT_WORK(&css->destroy_work, css_release_work_fn); - queue_work(cgroup_destroy_wq, &css->destroy_work); + queue_work(cgroup_release_wq, &css->destroy_work); } static void init_and_link_css(struct cgroup_subsys_state *css, @@ -5701,7 +5725,7 @@ err_list_del: list_del_rcu(&css->sibling); err_free_css: INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); - queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); + queue_rcu_work(cgroup_free_wq, &css->destroy_rwork); return ERR_PTR(err); } @@ -5939,7 +5963,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref) if (atomic_dec_and_test(&css->online_cnt)) { INIT_WORK(&css->destroy_work, css_killed_work_fn); - queue_work(cgroup_destroy_wq, &css->destroy_work); + queue_work(cgroup_offline_wq, &css->destroy_work); } } @@ -6325,8 +6349,14 @@ static int __init cgroup_wq_init(void) * We would prefer to do this in cgroup_init() above, but that * is called before init_workqueues(): so leave this until after. */ - cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); - BUG_ON(!cgroup_destroy_wq); + cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1); + BUG_ON(!cgroup_offline_wq); + + cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1); + BUG_ON(!cgroup_release_wq); + + cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1); + BUG_ON(!cgroup_free_wq); return 0; } core_initcall(cgroup_wq_init); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index be00629f0ba4..ccba6fc3c3fe 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9551,7 +9551,7 @@ static unsigned long tg_weight(struct task_group *tg) #ifdef CONFIG_FAIR_GROUP_SCHED return scale_load_down(tg->shares); #else - return sched_weight_from_cgroup(tg->scx_weight); + return sched_weight_from_cgroup(tg->scx.weight); #endif } diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 4ae32ef179dd..088ceff38c8a 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -6788,12 +6788,8 @@ __bpf_kfunc u32 scx_bpf_reenqueue_local(void) * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to * the current local DSQ for running tasks and thus are not * visible to the BPF scheduler. - * - * Also skip re-enqueueing tasks that can only run on this - * CPU, as they would just be re-added to the same local - * DSQ without any benefit. */ - if (p->migration_pending || is_migration_disabled(p) || p->nr_cpus_allowed == 1) + if (p->migration_pending) continue; dispatch_dequeue(rq, p); diff --git a/kernel/trace/rv/monitors/sleep/sleep.c b/kernel/trace/rv/monitors/sleep/sleep.c index eea447b06907..c1347da69e9d 100644 --- a/kernel/trace/rv/monitors/sleep/sleep.c +++ b/kernel/trace/rv/monitors/sleep/sleep.c @@ -127,7 +127,9 @@ static void handle_sys_enter(void *data, struct pt_regs *regs, long id) mon = ltl_get_monitor(current); switch (id) { +#ifdef __NR_clock_nanosleep case __NR_clock_nanosleep: +#endif #ifdef __NR_clock_nanosleep_time64 case __NR_clock_nanosleep_time64: #endif @@ -138,7 +140,9 @@ static void handle_sys_enter(void *data, struct pt_regs *regs, long id) ltl_atom_update(current, LTL_CLOCK_NANOSLEEP, true); break; +#ifdef __NR_futex case __NR_futex: +#endif #ifdef __NR_futex_time64 case __NR_futex_time64: #endif diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c index 1482e91c39f4..48338520376f 100644 --- a/kernel/trace/rv/rv.c +++ b/kernel/trace/rv/rv.c @@ -495,7 +495,7 @@ static void *available_monitors_next(struct seq_file *m, void *p, loff_t *pos) */ static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos) { - struct rv_monitor *mon = p; + struct rv_monitor *mon = container_of(p, struct rv_monitor, list); (*pos)++; @@ -805,7 +805,7 @@ int rv_register_monitor(struct rv_monitor *monitor, struct rv_monitor *parent) retval = create_monitor_dir(monitor, parent); if (retval) - return retval; + goto out_unlock; /* keep children close to the parent for easier visualisation */ if (parent) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index ccae62d4fb91..fa60362a3f31 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -908,6 +908,8 @@ static int trace_kprobe_create_internal(int argc, const char *argv[], return -EINVAL; } buf = kmemdup(&argv[0][1], len + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; buf[len] = '\0'; ret = kstrtouint(buf, 0, &maxactive); if (ret || !maxactive) { diff --git a/mm/damon/core.c b/mm/damon/core.c index c2e0b469fd43..08065b363972 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -2479,10 +2479,14 @@ static void kdamond_call(struct damon_ctx *ctx, bool cancel) mutex_lock(&ctx->call_controls_lock); list_del(&control->list); mutex_unlock(&ctx->call_controls_lock); - if (!control->repeat) + if (!control->repeat) { complete(&control->completion); - else + } else if (control->canceled && control->dealloc_on_cancel) { + kfree(control); + continue; + } else { list_add(&control->list, &repeat_controls); + } } control = list_first_entry_or_null(&repeat_controls, struct damon_call_control, list); diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 7b9254cadd5f..c96c2154128f 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1534,14 +1534,10 @@ static int damon_sysfs_repeat_call_fn(void *data) return 0; } -static struct damon_call_control damon_sysfs_repeat_call_control = { - .fn = damon_sysfs_repeat_call_fn, - .repeat = true, -}; - static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond) { struct damon_ctx *ctx; + struct damon_call_control *repeat_call_control; int err; if (damon_sysfs_kdamond_running(kdamond)) @@ -1554,18 +1550,29 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond) damon_destroy_ctx(kdamond->damon_ctx); kdamond->damon_ctx = NULL; + repeat_call_control = kmalloc(sizeof(*repeat_call_control), + GFP_KERNEL); + if (!repeat_call_control) + return -ENOMEM; + ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]); - if (IS_ERR(ctx)) + if (IS_ERR(ctx)) { + kfree(repeat_call_control); return PTR_ERR(ctx); + } err = damon_start(&ctx, 1, false); if (err) { + kfree(repeat_call_control); damon_destroy_ctx(ctx); return err; } kdamond->damon_ctx = ctx; - damon_sysfs_repeat_call_control.data = kdamond; - damon_call(ctx, &damon_sysfs_repeat_call_control); + repeat_call_control->fn = damon_sysfs_repeat_call_fn; + repeat_call_control->data = kdamond; + repeat_call_control->repeat = true; + repeat_call_control->dealloc_on_cancel = true; + damon_call(ctx, repeat_call_control); return err; } @@ -2287,8 +2287,8 @@ static unsigned long collect_longterm_unpinnable_folios( struct pages_or_folios *pofs) { unsigned long collected = 0; - bool drain_allow = true; struct folio *folio; + int drained = 0; long i = 0; for (folio = pofs_get_folio(pofs, i); folio; @@ -2307,9 +2307,17 @@ static unsigned long collect_longterm_unpinnable_folios( continue; } - if (!folio_test_lru(folio) && drain_allow) { + if (drained == 0 && folio_may_be_lru_cached(folio) && + folio_ref_count(folio) != + folio_expected_ref_count(folio) + 1) { + lru_add_drain(); + drained = 1; + } + if (drained == 1 && folio_may_be_lru_cached(folio) && + folio_ref_count(folio) != + folio_expected_ref_count(folio) + 1) { lru_add_drain_all(); - drain_allow = false; + drained = 2; } if (!folio_isolate_lru(folio)) diff --git a/mm/mlock.c b/mm/mlock.c index a1d93ad33c6d..bb0776f5ef7c 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -255,7 +255,7 @@ void mlock_folio(struct folio *folio) folio_get(folio); if (!folio_batch_add(fbatch, mlock_lru(folio)) || - folio_test_large(folio) || lru_cache_disabled()) + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } @@ -278,7 +278,7 @@ void mlock_new_folio(struct folio *folio) folio_get(folio); if (!folio_batch_add(fbatch, mlock_new(folio)) || - folio_test_large(folio) || lru_cache_disabled()) + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } @@ -299,7 +299,7 @@ void munlock_folio(struct folio *folio) */ folio_get(folio); if (!folio_batch_add(fbatch, folio) || - folio_test_large(folio) || lru_cache_disabled()) + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } diff --git a/mm/swap.c b/mm/swap.c index 3632dd061beb..b74ebe865dd9 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -164,6 +164,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) for (i = 0; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; + /* block memcg migration while the folio moves between lru */ + if (move_fn != lru_add && !folio_test_clear_lru(folio)) + continue; + folio_lruvec_relock_irqsave(folio, &lruvec, &flags); move_fn(lruvec, folio); @@ -176,14 +180,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) } static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, - struct folio *folio, move_fn_t move_fn, - bool on_lru, bool disable_irq) + struct folio *folio, move_fn_t move_fn, bool disable_irq) { unsigned long flags; - if (on_lru && !folio_test_clear_lru(folio)) - return; - folio_get(folio); if (disable_irq) @@ -191,8 +191,8 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, else local_lock(&cpu_fbatches.lock); - if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) || - lru_cache_disabled()) + if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || + !folio_may_be_lru_cached(folio) || lru_cache_disabled()) folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn); if (disable_irq) @@ -201,13 +201,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, local_unlock(&cpu_fbatches.lock); } -#define folio_batch_add_and_move(folio, op, on_lru) \ - __folio_batch_add_and_move( \ - &cpu_fbatches.op, \ - folio, \ - op, \ - on_lru, \ - offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \ +#define folio_batch_add_and_move(folio, op) \ + __folio_batch_add_and_move( \ + &cpu_fbatches.op, \ + folio, \ + op, \ + offsetof(struct cpu_fbatches, op) >= \ + offsetof(struct cpu_fbatches, lock_irq) \ ) static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) @@ -231,10 +231,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) void folio_rotate_reclaimable(struct folio *folio) { if (folio_test_locked(folio) || folio_test_dirty(folio) || - folio_test_unevictable(folio)) + folio_test_unevictable(folio) || !folio_test_lru(folio)) return; - folio_batch_add_and_move(folio, lru_move_tail, true); + folio_batch_add_and_move(folio, lru_move_tail); } void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, @@ -328,10 +328,11 @@ static void folio_activate_drain(int cpu) void folio_activate(struct folio *folio) { - if (folio_test_active(folio) || folio_test_unevictable(folio)) + if (folio_test_active(folio) || folio_test_unevictable(folio) || + !folio_test_lru(folio)) return; - folio_batch_add_and_move(folio, lru_activate, true); + folio_batch_add_and_move(folio, lru_activate); } #else @@ -507,7 +508,7 @@ void folio_add_lru(struct folio *folio) lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) folio_set_active(folio); - folio_batch_add_and_move(folio, lru_add, false); + folio_batch_add_and_move(folio, lru_add); } EXPORT_SYMBOL(folio_add_lru); @@ -685,13 +686,13 @@ void lru_add_drain_cpu(int cpu) void deactivate_file_folio(struct folio *folio) { /* Deactivating an unevictable folio will not accelerate reclaim */ - if (folio_test_unevictable(folio)) + if (folio_test_unevictable(folio) || !folio_test_lru(folio)) return; if (lru_gen_enabled() && lru_gen_clear_refs(folio)) return; - folio_batch_add_and_move(folio, lru_deactivate_file, true); + folio_batch_add_and_move(folio, lru_deactivate_file); } /* @@ -704,13 +705,13 @@ void deactivate_file_folio(struct folio *folio) */ void folio_deactivate(struct folio *folio) { - if (folio_test_unevictable(folio)) + if (folio_test_unevictable(folio) || !folio_test_lru(folio)) return; if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio)) return; - folio_batch_add_and_move(folio, lru_deactivate, true); + folio_batch_add_and_move(folio, lru_deactivate); } /** @@ -723,10 +724,11 @@ void folio_deactivate(struct folio *folio) void folio_mark_lazyfree(struct folio *folio) { if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || + !folio_test_lru(folio) || folio_test_swapcache(folio) || folio_test_unevictable(folio)) return; - folio_batch_add_and_move(folio, lru_lazyfree, true); + folio_batch_add_and_move(folio, lru_lazyfree); } void lru_add_drain(void) diff --git a/mm/vmscan.c b/mm/vmscan.c index a48aec8bfd92..674999999cd0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4507,7 +4507,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c } /* ineligible */ - if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { + if (zone > sc->reclaim_idx) { gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); return true; diff --git a/net/core/dev.c b/net/core/dev.c index 93a25d87b86b..8d49b2198d07 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6965,7 +6965,7 @@ static void napi_stop_kthread(struct napi_struct *napi) * the kthread. */ while (true) { - if (!test_bit(NAPIF_STATE_SCHED_THREADED, &napi->state)) + if (!test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) break; msleep(20); diff --git a/net/devlink/rate.c b/net/devlink/rate.c index 110b3fa8a0b1..264fb82cba19 100644 --- a/net/devlink/rate.c +++ b/net/devlink/rate.c @@ -34,7 +34,7 @@ devlink_rate_leaf_get_from_info(struct devlink *devlink, struct genl_info *info) static struct devlink_rate * devlink_rate_node_get_by_name(struct devlink *devlink, const char *node_name) { - static struct devlink_rate *devlink_rate; + struct devlink_rate *devlink_rate; list_for_each_entry(devlink_rate, &devlink->rate_list, list) { if (devlink_rate_is_node(devlink_rate) && @@ -819,8 +819,8 @@ EXPORT_SYMBOL_GPL(devl_rate_leaf_destroy); */ void devl_rate_nodes_destroy(struct devlink *devlink) { - static struct devlink_rate *devlink_rate, *tmp; const struct devlink_ops *ops = devlink->ops; + struct devlink_rate *devlink_rate, *tmp; devl_assert_locked(devlink); diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 4f58648a27ad..92e6a681c797 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -905,7 +905,7 @@ int ethtool_net_get_ts_info_by_phc(struct net_device *dev, int err; if (!ops->get_ts_info) - return -ENODEV; + return -EOPNOTSUPP; /* Does ptp comes from netdev */ ethtool_init_tsinfo(info); @@ -973,7 +973,7 @@ int ethtool_get_ts_info_by_phc(struct net_device *dev, int err; err = ethtool_net_get_ts_info_by_phc(dev, info, hwprov_desc); - if (err == -ENODEV) { + if (err == -ENODEV || err == -EOPNOTSUPP) { struct phy_device *phy; phy = ethtool_phy_get_ts_info_by_phc(dev, info, hwprov_desc); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 71a956fbfc55..ad76556800f2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3327,6 +3327,7 @@ int tcp_disconnect(struct sock *sk, int flags) struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int old_state = sk->sk_state; + struct request_sock *req; u32 seq; if (old_state != TCP_CLOSE) @@ -3442,6 +3443,10 @@ int tcp_disconnect(struct sock *sk, int flags) /* Clean up fastopen related fields */ + req = rcu_dereference_protected(tp->fastopen_rsk, + lockdep_sock_is_held(sk)); + if (req) + reqsk_fastopen_remove(sk, req, false); tcp_free_fastopen_req(tp); inet_clear_bit(DEFER_CONNECT, sk); tp->fastopen_client_fail = 0; diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c index bbb8d5f0eae7..3338b6cc85c4 100644 --- a/net/ipv4/tcp_ao.c +++ b/net/ipv4/tcp_ao.c @@ -1178,7 +1178,9 @@ void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb) if (!ao) return; - WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq); + /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */ + if (skb) + WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq); ao->rcv_sne = 0; hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk)) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 2a8ea28442b2..1103b3341a70 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -985,13 +985,13 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, return false; } - if (mp_opt->deny_join_id0) - WRITE_ONCE(msk->pm.remote_deny_join_id0, true); - if (unlikely(!READ_ONCE(msk->pm.server_side))) pr_warn_once("bogus mpc option on established client sk"); set_fully_established: + if (mp_opt->deny_join_id0) + WRITE_ONCE(msk->pm.remote_deny_join_id0, true); + mptcp_data_lock((struct sock *)msk); __mptcp_subflow_fully_established(msk, subflow, mp_opt); mptcp_data_unlock((struct sock *)msk); diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 50aaf259959a..ce7d42d3bd00 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -408,6 +408,7 @@ static int mptcp_event_created(struct sk_buff *skb, const struct sock *ssk) { int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)); + u16 flags = 0; if (err) return err; @@ -415,6 +416,12 @@ static int mptcp_event_created(struct sk_buff *skb, if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side))) return -EMSGSIZE; + if (READ_ONCE(msk->pm.remote_deny_join_id0)) + flags |= MPTCP_PM_EV_FLAG_DENY_JOIN_ID0; + + if (flags && nla_put_u16(skb, MPTCP_ATTR_FLAGS, flags)) + return -EMSGSIZE; + return mptcp_event_add_subflow(skb, ssk); } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index e6fd97b21e9e..5e497a83e967 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -371,6 +371,20 @@ static void mptcp_close_wake_up(struct sock *sk) sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } +static void mptcp_shutdown_subflows(struct mptcp_sock *msk) +{ + struct mptcp_subflow_context *subflow; + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + bool slow; + + slow = lock_sock_fast(ssk); + tcp_shutdown(ssk, SEND_SHUTDOWN); + unlock_sock_fast(ssk, slow); + } +} + /* called under the msk socket lock */ static bool mptcp_pending_data_fin_ack(struct sock *sk) { @@ -395,6 +409,7 @@ static void mptcp_check_data_fin_ack(struct sock *sk) break; case TCP_CLOSING: case TCP_LAST_ACK: + mptcp_shutdown_subflows(msk); mptcp_set_state(sk, TCP_CLOSE); break; } @@ -563,6 +578,7 @@ static bool mptcp_check_data_fin(struct sock *sk) mptcp_set_state(sk, TCP_CLOSING); break; case TCP_FIN_WAIT2: + mptcp_shutdown_subflows(msk); mptcp_set_state(sk, TCP_CLOSE); break; default: diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 3f1b62a9fe88..f31a3a79531a 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -883,6 +883,10 @@ create_child: ctx->subflow_id = 1; owner = mptcp_sk(ctx->conn); + + if (mp_opt.deny_join_id0) + WRITE_ONCE(owner->pm.remote_deny_join_id0, true); + mptcp_pm_new_connection(owner, child, 1); /* with OoO packets we can reach here without ingress diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index 28c1b0022178..bd861191157b 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -133,12 +133,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, &off, PAGE_SIZE); - if (unlikely(ret != ibmr->sg_dma_len)) - return ret < 0 ? ret : -EINVAL; + if (unlikely(ret != ibmr->sg_dma_len)) { + ret = ret < 0 ? ret : -EINVAL; + goto out_inc; + } - if (cmpxchg(&frmr->fr_state, - FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) - return -EBUSY; + if (cmpxchg(&frmr->fr_state, FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) { + ret = -EBUSY; + goto out_inc; + } atomic_inc(&ibmr->ic->i_fastreg_inuse_count); @@ -166,11 +169,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) /* Failure here can be because of -ENOMEM as well */ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE); - atomic_inc(&ibmr->ic->i_fastreg_wrs); if (printk_ratelimit()) pr_warn("RDS/IB: %s returned error(%d)\n", __func__, ret); - goto out; + goto out_inc; } /* Wait for the registration to complete in order to prevent an invalid @@ -179,8 +181,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) */ wait_event(frmr->fr_reg_done, !frmr->fr_reg); -out: + return ret; +out_inc: + atomic_inc(&ibmr->ic->i_fastreg_wrs); return ret; } diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 41e657e97761..cf2dcec6ce5a 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -94,10 +94,10 @@ static const struct dmi_system_id rfkill_gpio_deny_table[] = { static int rfkill_gpio_probe(struct platform_device *pdev) { struct rfkill_gpio_data *rfkill; - struct gpio_desc *gpio; + const char *type_name = NULL; const char *name_property; const char *type_property; - const char *type_name; + struct gpio_desc *gpio; int ret; if (dmi_check_system(rfkill_gpio_deny_table)) diff --git a/net/rxrpc/rxgk.c b/net/rxrpc/rxgk.c index 1e19c605bcc8..dce5a3d8a964 100644 --- a/net/rxrpc/rxgk.c +++ b/net/rxrpc/rxgk.c @@ -475,7 +475,7 @@ static int rxgk_verify_packet_integrity(struct rxrpc_call *call, struct krb5_buffer metadata; unsigned int offset = sp->offset, len = sp->len; size_t data_offset = 0, data_len = len; - u32 ac; + u32 ac = 0; int ret = -ENOMEM; _enter(""); @@ -499,9 +499,10 @@ static int rxgk_verify_packet_integrity(struct rxrpc_call *call, ret = rxgk_verify_mic_skb(gk->krb5, gk->rx_Kc, &metadata, skb, &offset, &len, &ac); kfree(hdr); - if (ret == -EPROTO) { - rxrpc_abort_eproto(call, skb, ac, - rxgk_abort_1_verify_mic_eproto); + if (ret < 0) { + if (ret != -ENOMEM) + rxrpc_abort_eproto(call, skb, ac, + rxgk_abort_1_verify_mic_eproto); } else { sp->offset = offset; sp->len = len; @@ -524,15 +525,16 @@ static int rxgk_verify_packet_encrypted(struct rxrpc_call *call, struct rxgk_header hdr; unsigned int offset = sp->offset, len = sp->len; int ret; - u32 ac; + u32 ac = 0; _enter(""); ret = rxgk_decrypt_skb(gk->krb5, gk->rx_enc, skb, &offset, &len, &ac); - if (ret == -EPROTO) - rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto); - if (ret < 0) + if (ret < 0) { + if (ret != -ENOMEM) + rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto); goto error; + } if (len < sizeof(hdr)) { ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT, diff --git a/net/rxrpc/rxgk_app.c b/net/rxrpc/rxgk_app.c index b94b77a1c317..30275cb5ba3e 100644 --- a/net/rxrpc/rxgk_app.c +++ b/net/rxrpc/rxgk_app.c @@ -54,6 +54,10 @@ int rxgk_yfs_decode_ticket(struct rxrpc_connection *conn, struct sk_buff *skb, _enter(""); + if (ticket_len < 10 * sizeof(__be32)) + return rxrpc_abort_conn(conn, skb, RXGK_INCONSISTENCY, -EPROTO, + rxgk_abort_resp_short_yfs_tkt); + /* Get the session key length */ ret = skb_copy_bits(skb, ticket_offset, tmp, sizeof(tmp)); if (ret < 0) @@ -187,7 +191,7 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb, struct key *server_key; unsigned int ticket_offset, ticket_len; u32 kvno, enctype; - int ret, ec; + int ret, ec = 0; struct { __be32 kvno; @@ -195,22 +199,23 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb, __be32 token_len; } container; + if (token_len < sizeof(container)) + goto short_packet; + /* Decode the RXGK_TokenContainer object. This tells us which server * key we should be using. We can then fetch the key, get the secret * and set up the crypto to extract the token. */ if (skb_copy_bits(skb, token_offset, &container, sizeof(container)) < 0) - return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO, - rxgk_abort_resp_tok_short); + goto short_packet; kvno = ntohl(container.kvno); enctype = ntohl(container.enctype); ticket_len = ntohl(container.token_len); ticket_offset = token_offset + sizeof(container); - if (xdr_round_up(ticket_len) > token_len - 3 * 4) - return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO, - rxgk_abort_resp_tok_short); + if (xdr_round_up(ticket_len) > token_len - sizeof(container)) + goto short_packet; _debug("KVNO %u", kvno); _debug("ENC %u", enctype); @@ -236,9 +241,11 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb, &ticket_offset, &ticket_len, &ec); crypto_free_aead(token_enc); token_enc = NULL; - if (ret < 0) - return rxrpc_abort_conn(conn, skb, ec, ret, - rxgk_abort_resp_tok_dec); + if (ret < 0) { + if (ret != -ENOMEM) + return rxrpc_abort_conn(conn, skb, ec, ret, + rxgk_abort_resp_tok_dec); + } ret = conn->security->default_decode_ticket(conn, skb, ticket_offset, ticket_len, _key); @@ -283,4 +290,8 @@ temporary_error: * also come out this way if the ticket decryption fails. */ return ret; + +short_packet: + return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO, + rxgk_abort_resp_tok_short); } diff --git a/net/rxrpc/rxgk_common.h b/net/rxrpc/rxgk_common.h index 7370a5655985..80164d89e19c 100644 --- a/net/rxrpc/rxgk_common.h +++ b/net/rxrpc/rxgk_common.h @@ -88,11 +88,16 @@ int rxgk_decrypt_skb(const struct krb5_enctype *krb5, *_offset += offset; *_len = len; break; + case -EBADMSG: /* Checksum mismatch. */ case -EPROTO: - case -EBADMSG: *_error_code = RXGK_SEALEDINCON; break; + case -EMSGSIZE: + *_error_code = RXGK_PACKETSHORT; + break; + case -ENOPKG: /* Would prefer RXGK_BADETYPE, but not available for YFS. */ default: + *_error_code = RXGK_INCONSISTENCY; break; } @@ -127,11 +132,16 @@ int rxgk_verify_mic_skb(const struct krb5_enctype *krb5, *_offset += offset; *_len = len; break; + case -EBADMSG: /* Checksum mismatch */ case -EPROTO: - case -EBADMSG: *_error_code = RXGK_SEALEDINCON; break; + case -EMSGSIZE: + *_error_code = RXGK_PACKETSHORT; + break; + case -ENOPKG: /* Would prefer RXGK_BADETYPE, but not available for YFS. */ default: + *_error_code = RXGK_INCONSISTENCY; break; } diff --git a/net/tls/tls.h b/net/tls/tls.h index 4e077068e6d9..e4c42731ce39 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -141,6 +141,7 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx); int wait_on_pending_writer(struct sock *sk, long *timeo); void tls_err_abort(struct sock *sk, int err); +void tls_strp_abort_strp(struct tls_strparser *strp, int err); int init_prot_info(struct tls_prot_info *prot, const struct tls_crypto_info *crypto_info, diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index d71643b494a1..98e12f0ff57e 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -13,7 +13,7 @@ static struct workqueue_struct *tls_strp_wq; -static void tls_strp_abort_strp(struct tls_strparser *strp, int err) +void tls_strp_abort_strp(struct tls_strparser *strp, int err) { if (strp->stopped) return; @@ -211,11 +211,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb, struct sk_buff *in_skb, unsigned int offset, size_t in_len) { + unsigned int nfrag = skb->len / PAGE_SIZE; size_t len, chunk; skb_frag_t *frag; int sz; - frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; + if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) { + DEBUG_NET_WARN_ON_ONCE(1); + return -EMSGSIZE; + } + + frag = &skb_shinfo(skb)->frags[nfrag]; len = in_len; /* First make sure we got the header */ @@ -520,10 +526,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp) tls_strp_load_anchor_with_queue(strp, inq); if (!strp->stm.full_len) { sz = tls_rx_msg_size(strp, strp->anchor); - if (sz < 0) { - tls_strp_abort_strp(strp, sz); + if (sz < 0) return sz; - } strp->stm.full_len = sz; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index bac65d0d4e3e..daac9fd4be7e 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -2474,8 +2474,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb) return data_len + TLS_HEADER_SIZE; read_failure: - tls_err_abort(strp->sk, ret); - + tls_strp_abort_strp(strp, ret); return ret; } diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 84d60635e8a9..c2cc52ee9945 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -47,6 +47,7 @@ #include <linux/cpumask.h> #include <linux/cred.h> #include <linux/device/faux.h> +#include <linux/dma-direction.h> #include <linux/dma-mapping.h> #include <linux/errname.h> #include <linux/ethtool.h> @@ -57,6 +58,7 @@ #include <linux/jiffies.h> #include <linux/jump_label.h> #include <linux/mdio.h> +#include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/of_device.h> #include <linux/pci.h> diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 7cf7fe95e41d..e94542bf6ea7 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -39,6 +39,7 @@ #include "rcu.c" #include "refcount.c" #include "regulator.c" +#include "scatterlist.c" #include "security.c" #include "signal.c" #include "slab.c" diff --git a/rust/helpers/scatterlist.c b/rust/helpers/scatterlist.c new file mode 100644 index 000000000000..80c956ee09ab --- /dev/null +++ b/rust/helpers/scatterlist.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/dma-direction.h> + +dma_addr_t rust_helper_sg_dma_address(struct scatterlist *sg) +{ + return sg_dma_address(sg); +} + +unsigned int rust_helper_sg_dma_len(struct scatterlist *sg) +{ + return sg_dma_len(sg); +} + +struct scatterlist *rust_helper_sg_next(struct scatterlist *sg) +{ + return sg_next(sg); +} + +void rust_helper_dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + return dma_unmap_sgtable(dev, sgt, dir, attrs); +} diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs index 2692cf90c948..84ee7e9d7b0e 100644 --- a/rust/kernel/alloc/allocator.rs +++ b/rust/kernel/alloc/allocator.rs @@ -15,8 +15,12 @@ use core::ptr::NonNull; use crate::alloc::{AllocError, Allocator}; use crate::bindings; +use crate::page; use crate::pr_warn; +mod iter; +pub use self::iter::VmallocPageIter; + /// The contiguous kernel allocator. /// /// `Kmalloc` is typically used for physically contiguous allocations up to page size, but also @@ -142,6 +146,54 @@ unsafe impl Allocator for Kmalloc { } } +impl Vmalloc { + /// Convert a pointer to a [`Vmalloc`] allocation to a [`page::BorrowedPage`]. + /// + /// # Examples + /// + /// ``` + /// # use core::ptr::{NonNull, from_mut}; + /// # use kernel::{page, prelude::*}; + /// use kernel::alloc::allocator::Vmalloc; + /// + /// let mut vbox = VBox::<[u8; page::PAGE_SIZE]>::new_uninit(GFP_KERNEL)?; + /// + /// { + /// // SAFETY: By the type invariant of `Box` the inner pointer of `vbox` is non-null. + /// let ptr = unsafe { NonNull::new_unchecked(from_mut(&mut *vbox)) }; + /// + /// // SAFETY: + /// // `ptr` is a valid pointer to a `Vmalloc` allocation. + /// // `ptr` is valid for the entire lifetime of `page`. + /// let page = unsafe { Vmalloc::to_page(ptr.cast()) }; + /// + /// // SAFETY: There is no concurrent read or write to the same page. + /// unsafe { page.fill_zero_raw(0, page::PAGE_SIZE)? }; + /// } + /// # Ok::<(), Error>(()) + /// ``` + /// + /// # Safety + /// + /// - `ptr` must be a valid pointer to a [`Vmalloc`] allocation. + /// - `ptr` must remain valid for the entire duration of `'a`. + pub unsafe fn to_page<'a>(ptr: NonNull<u8>) -> page::BorrowedPage<'a> { + // SAFETY: `ptr` is a valid pointer to `Vmalloc` memory. + let page = unsafe { bindings::vmalloc_to_page(ptr.as_ptr().cast()) }; + + // SAFETY: `vmalloc_to_page` returns a valid pointer to a `struct page` for a valid pointer + // to `Vmalloc` memory. + let page = unsafe { NonNull::new_unchecked(page) }; + + // SAFETY: + // - `page` is a valid pointer to a `struct page`, given that by the safety requirements of + // this function `ptr` is a valid pointer to a `Vmalloc` allocation. + // - By the safety requirements of this function `ptr` is valid for the entire lifetime of + // `'a`. + unsafe { page::BorrowedPage::from_raw(page) } + } +} + // SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that // - memory remains valid until it is explicitly freed, // - passing a pointer to a valid memory allocation is OK, diff --git a/rust/kernel/alloc/allocator/iter.rs b/rust/kernel/alloc/allocator/iter.rs new file mode 100644 index 000000000000..5759f86029b7 --- /dev/null +++ b/rust/kernel/alloc/allocator/iter.rs @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 + +use super::Vmalloc; +use crate::page; +use core::marker::PhantomData; +use core::ptr::NonNull; + +/// An [`Iterator`] of [`page::BorrowedPage`] items owned by a [`Vmalloc`] allocation. +/// +/// # Guarantees +/// +/// The pages iterated by the [`Iterator`] appear in the order as they are mapped in the CPU's +/// virtual address space ascendingly. +/// +/// # Invariants +/// +/// - `buf` is a valid and [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation. +/// - `size` is the number of bytes from `buf` until the end of the [`Vmalloc`] allocation `buf` +/// points to. +pub struct VmallocPageIter<'a> { + /// The base address of the [`Vmalloc`] buffer. + buf: NonNull<u8>, + /// The size of the buffer pointed to by `buf` in bytes. + size: usize, + /// The current page index of the [`Iterator`]. + index: usize, + _p: PhantomData<page::BorrowedPage<'a>>, +} + +impl<'a> Iterator for VmallocPageIter<'a> { + type Item = page::BorrowedPage<'a>; + + fn next(&mut self) -> Option<Self::Item> { + let offset = self.index.checked_mul(page::PAGE_SIZE)?; + + // Even though `self.size()` may be smaller than `Self::page_count() * page::PAGE_SIZE`, it + // is always a number between `(Self::page_count() - 1) * page::PAGE_SIZE` and + // `Self::page_count() * page::PAGE_SIZE`, hence the check below is sufficient. + if offset < self.size() { + self.index += 1; + } else { + return None; + } + + // TODO: Use `NonNull::add()` instead, once the minimum supported compiler version is + // bumped to 1.80 or later. + // + // SAFETY: `offset` is in the interval `[0, (self.page_count() - 1) * page::PAGE_SIZE]`, + // hence the resulting pointer is guaranteed to be within the same allocation. + let ptr = unsafe { self.buf.as_ptr().add(offset) }; + + // SAFETY: `ptr` is guaranteed to be non-null given that it is derived from `self.buf`. + let ptr = unsafe { NonNull::new_unchecked(ptr) }; + + // SAFETY: + // - `ptr` is a valid pointer to a `Vmalloc` allocation. + // - `ptr` is valid for the duration of `'a`. + Some(unsafe { Vmalloc::to_page(ptr) }) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let remaining = self.page_count().saturating_sub(self.index); + + (remaining, Some(remaining)) + } +} + +impl<'a> VmallocPageIter<'a> { + /// Creates a new [`VmallocPageIter`] instance. + /// + /// # Safety + /// + /// - `buf` must be a [`page::PAGE_SIZE`] aligned pointer into a [`Vmalloc`] allocation. + /// - `buf` must be valid for at least the lifetime of `'a`. + /// - `size` must be the number of bytes from `buf` until the end of the [`Vmalloc`] allocation + /// `buf` points to. + pub unsafe fn new(buf: NonNull<u8>, size: usize) -> Self { + // INVARIANT: By the safety requirements, `buf` is a valid and `page::PAGE_SIZE` aligned + // pointer into a [`Vmalloc`] allocation. + Self { + buf, + size, + index: 0, + _p: PhantomData, + } + } + + /// Returns the size of the backing [`Vmalloc`] allocation in bytes. + /// + /// Note that this is the size the [`Vmalloc`] allocation has been allocated with. Hence, this + /// number may be smaller than `[`Self::page_count`] * [`page::PAGE_SIZE`]`. + #[inline] + pub fn size(&self) -> usize { + self.size + } + + /// Returns the number of pages owned by the backing [`Vmalloc`] allocation. + #[inline] + pub fn page_count(&self) -> usize { + self.size().div_ceil(page::PAGE_SIZE) + } +} diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs index 90dd987d40e4..7b10e276f621 100644 --- a/rust/kernel/alloc/allocator_test.rs +++ b/rust/kernel/alloc/allocator_test.rs @@ -12,8 +12,10 @@ use super::{flags::*, AllocError, Allocator, Flags}; use core::alloc::Layout; use core::cmp; +use core::marker::PhantomData; use core::ptr; use core::ptr::NonNull; +use kernel::page; /// The userspace allocator based on libc. pub struct Cmalloc; @@ -33,6 +35,33 @@ impl Cmalloc { } } +pub struct VmallocPageIter<'a> { + _p: PhantomData<page::BorrowedPage<'a>>, +} + +impl<'a> Iterator for VmallocPageIter<'a> { + type Item = page::BorrowedPage<'a>; + + fn next(&mut self) -> Option<Self::Item> { + None + } +} + +impl<'a> VmallocPageIter<'a> { + #[allow(clippy::missing_safety_doc)] + pub unsafe fn new(_buf: NonNull<u8>, _size: usize) -> Self { + Self { _p: PhantomData } + } + + pub fn size(&self) -> usize { + 0 + } + + pub fn page_count(&self) -> usize { + 0 + } +} + extern "C" { #[link_name = "aligned_alloc"] fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void; diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs index 856d05aa60f1..b69ff4a1d748 100644 --- a/rust/kernel/alloc/kbox.rs +++ b/rust/kernel/alloc/kbox.rs @@ -3,7 +3,7 @@ //! Implementation of [`Box`]. #[allow(unused_imports)] // Used in doc comments. -use super::allocator::{KVmalloc, Kmalloc, Vmalloc}; +use super::allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter}; use super::{AllocError, Allocator, Flags}; use core::alloc::Layout; use core::borrow::{Borrow, BorrowMut}; @@ -18,6 +18,7 @@ use core::result::Result; use crate::ffi::c_void; use crate::init::InPlaceInit; +use crate::page::AsPageIter; use crate::types::ForeignOwnable; use pin_init::{InPlaceWrite, Init, PinInit, ZeroableOption}; @@ -598,3 +599,40 @@ where unsafe { A::free(self.0.cast(), layout) }; } } + +/// # Examples +/// +/// ``` +/// # use kernel::prelude::*; +/// use kernel::alloc::allocator::VmallocPageIter; +/// use kernel::page::{AsPageIter, PAGE_SIZE}; +/// +/// let mut vbox = VBox::new((), GFP_KERNEL)?; +/// +/// assert!(vbox.page_iter().next().is_none()); +/// +/// let mut vbox = VBox::<[u8; PAGE_SIZE]>::new_uninit(GFP_KERNEL)?; +/// +/// let page = vbox.page_iter().next().expect("At least one page should be available.\n"); +/// +/// // SAFETY: There is no concurrent read or write to the same page. +/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? }; +/// # Ok::<(), Error>(()) +/// ``` +impl<T> AsPageIter for VBox<T> { + type Iter<'a> + = VmallocPageIter<'a> + where + T: 'a; + + fn page_iter(&mut self) -> Self::Iter<'_> { + let ptr = self.0.cast(); + let size = core::mem::size_of::<T>(); + + // SAFETY: + // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation. + // - `ptr` is guaranteed to be valid for the lifetime of `'a`. + // - `size` is the size of the `Vmalloc` allocation `ptr` points to. + unsafe { VmallocPageIter::new(ptr, size) } + } +} diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs index 3c72e0bdddb8..ac438e70a1ed 100644 --- a/rust/kernel/alloc/kvec.rs +++ b/rust/kernel/alloc/kvec.rs @@ -3,10 +3,11 @@ //! Implementation of [`Vec`]. use super::{ - allocator::{KVmalloc, Kmalloc, Vmalloc}, + allocator::{KVmalloc, Kmalloc, Vmalloc, VmallocPageIter}, layout::ArrayLayout, AllocError, Allocator, Box, Flags, }; +use crate::page::AsPageIter; use core::{ borrow::{Borrow, BorrowMut}, fmt, @@ -1017,6 +1018,43 @@ where } } +/// # Examples +/// +/// ``` +/// # use kernel::prelude::*; +/// use kernel::alloc::allocator::VmallocPageIter; +/// use kernel::page::{AsPageIter, PAGE_SIZE}; +/// +/// let mut vec = VVec::<u8>::new(); +/// +/// assert!(vec.page_iter().next().is_none()); +/// +/// vec.reserve(PAGE_SIZE, GFP_KERNEL)?; +/// +/// let page = vec.page_iter().next().expect("At least one page should be available.\n"); +/// +/// // SAFETY: There is no concurrent read or write to the same page. +/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? }; +/// # Ok::<(), Error>(()) +/// ``` +impl<T> AsPageIter for VVec<T> { + type Iter<'a> + = VmallocPageIter<'a> + where + T: 'a; + + fn page_iter(&mut self) -> Self::Iter<'_> { + let ptr = self.ptr.cast(); + let size = self.layout.size(); + + // SAFETY: + // - `ptr` is a valid pointer to the beginning of a `Vmalloc` allocation. + // - `ptr` is guaranteed to be valid for the lifetime of `'a`. + // - `size` is the size of the `Vmalloc` allocation `ptr` points to. + unsafe { VmallocPageIter::new(ptr, size) } + } +} + /// An [`Iterator`] implementation for [`Vec`] that moves elements out of a vector. /// /// This structure is created by the [`Vec::into_iter`] method on [`Vec`] (provided by the diff --git a/rust/kernel/alloc/layout.rs b/rust/kernel/alloc/layout.rs index 93ed514f7cc7..666accb7859c 100644 --- a/rust/kernel/alloc/layout.rs +++ b/rust/kernel/alloc/layout.rs @@ -98,6 +98,11 @@ impl<T> ArrayLayout<T> { pub const fn is_empty(&self) -> bool { self.len == 0 } + + /// Returns the size of the [`ArrayLayout`] in bytes. + pub const fn size(&self) -> usize { + self.len() * core::mem::size_of::<T>() + } } impl<T> From<ArrayLayout<T>> for Layout { diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs index d04e3fcebafb..aea6b7e31170 100644 --- a/rust/kernel/devres.rs +++ b/rust/kernel/devres.rs @@ -135,11 +135,9 @@ impl<T: Send> Devres<T> { T: 'a, Error: From<E>, { - let callback = Self::devres_callback; - try_pin_init!(&this in Self { dev: dev.into(), - callback, + callback: Self::devres_callback, // INVARIANT: `inner` is properly initialized. inner <- Opaque::pin_init(try_pin_init!(Inner { devm <- Completion::new(), @@ -160,7 +158,7 @@ impl<T: Send> Devres<T> { // properly initialized, because we require `dev` (i.e. the *bound* device) to // live at least as long as the returned `impl PinInit<Self, Error>`. to_result(unsafe { - bindings::devm_add_action(dev.as_raw(), Some(callback), inner.cast()) + bindings::devm_add_action(dev.as_raw(), Some(*callback), inner.cast()) }).inspect_err(|_| { let inner = Opaque::cast_into(inner); diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs index 2bc8ab51ec28..b2a6282876da 100644 --- a/rust/kernel/dma.rs +++ b/rust/kernel/dma.rs @@ -13,6 +13,16 @@ use crate::{ types::ARef, }; +/// DMA address type. +/// +/// Represents a bus address used for Direct Memory Access (DMA) operations. +/// +/// This is an alias of the kernel's `dma_addr_t`, which may be `u32` or `u64` depending on +/// `CONFIG_ARCH_DMA_ADDR_T_64BIT`. +/// +/// Note that this may be `u64` even on 32-bit architectures. +pub type DmaAddress = bindings::dma_addr_t; + /// Trait to be implemented by DMA capable bus devices. /// /// The [`dma::Device`](Device) trait should be implemented by bus specific device representations, @@ -244,6 +254,74 @@ pub mod attrs { pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED); } +/// DMA data direction. +/// +/// Corresponds to the C [`enum dma_data_direction`]. +/// +/// [`enum dma_data_direction`]: srctree/include/linux/dma-direction.h +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +#[repr(u32)] +pub enum DataDirection { + /// The DMA mapping is for bidirectional data transfer. + /// + /// This is used when the buffer can be both read from and written to by the device. + /// The cache for the corresponding memory region is both flushed and invalidated. + Bidirectional = Self::const_cast(bindings::dma_data_direction_DMA_BIDIRECTIONAL), + + /// The DMA mapping is for data transfer from memory to the device (write). + /// + /// The CPU has prepared data in the buffer, and the device will read it. + /// The cache for the corresponding memory region is flushed before device access. + ToDevice = Self::const_cast(bindings::dma_data_direction_DMA_TO_DEVICE), + + /// The DMA mapping is for data transfer from the device to memory (read). + /// + /// The device will write data into the buffer for the CPU to read. + /// The cache for the corresponding memory region is invalidated before CPU access. + FromDevice = Self::const_cast(bindings::dma_data_direction_DMA_FROM_DEVICE), + + /// The DMA mapping is not for data transfer. + /// + /// This is primarily for debugging purposes. With this direction, the DMA mapping API + /// will not perform any cache coherency operations. + None = Self::const_cast(bindings::dma_data_direction_DMA_NONE), +} + +impl DataDirection { + /// Casts the bindgen-generated enum type to a `u32` at compile time. + /// + /// This function will cause a compile-time error if the underlying value of the + /// C enum is out of bounds for `u32`. + const fn const_cast(val: bindings::dma_data_direction) -> u32 { + // CAST: The C standard allows compilers to choose different integer types for enums. + // To safely check the value, we cast it to a wide signed integer type (`i128`) + // which can hold any standard C integer enum type without truncation. + let wide_val = val as i128; + + // Check if the value is outside the valid range for the target type `u32`. + // CAST: `u32::MAX` is cast to `i128` to match the type of `wide_val` for the comparison. + if wide_val < 0 || wide_val > u32::MAX as i128 { + // Trigger a compile-time error in a const context. + build_error!("C enum value is out of bounds for the target type `u32`."); + } + + // CAST: This cast is valid because the check above guarantees that `wide_val` + // is within the representable range of `u32`. + wide_val as u32 + } +} + +impl From<DataDirection> for bindings::dma_data_direction { + /// Returns the raw representation of [`enum dma_data_direction`]. + fn from(direction: DataDirection) -> Self { + // CAST: `direction as u32` gets the underlying representation of our `#[repr(u32)]` enum. + // The subsequent cast to `Self` (the bindgen type) assumes the C enum is compatible + // with the enum variants of `DataDirection`, which is a valid assumption given our + // compile-time checks. + direction as u32 as Self + } +} + /// An abstraction of the `dma_alloc_coherent` API. /// /// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map @@ -275,7 +353,7 @@ pub mod attrs { // entire `CoherentAllocation` including the allocated memory itself. pub struct CoherentAllocation<T: AsBytes + FromBytes> { dev: ARef<device::Device>, - dma_handle: bindings::dma_addr_t, + dma_handle: DmaAddress, count: usize, cpu_addr: *mut T, dma_attrs: Attrs, @@ -376,7 +454,7 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> { /// Returns a DMA handle which may be given to the device as the DMA address base of /// the region. - pub fn dma_handle(&self) -> bindings::dma_addr_t { + pub fn dma_handle(&self) -> DmaAddress { self.dma_handle } @@ -384,13 +462,13 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> { /// device as the DMA address base of the region. /// /// Returns `EINVAL` if `offset` is not within the bounds of the allocation. - pub fn dma_handle_with_offset(&self, offset: usize) -> Result<bindings::dma_addr_t> { + pub fn dma_handle_with_offset(&self, offset: usize) -> Result<DmaAddress> { if offset >= self.count { Err(EINVAL) } else { // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits // into a `usize`, and `offset` is inferior to `count`. - Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as bindings::dma_addr_t) + Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as DmaAddress) } } diff --git a/rust/kernel/drm/driver.rs b/rust/kernel/drm/driver.rs index 8fefae41bcc6..91e13b6ca26a 100644 --- a/rust/kernel/drm/driver.rs +++ b/rust/kernel/drm/driver.rs @@ -86,6 +86,9 @@ pub struct AllocOps { /// Trait for memory manager implementations. Implemented internally. pub trait AllocImpl: super::private::Sealed + drm::gem::IntoGEMObject { + /// The [`Driver`] implementation for this [`AllocImpl`]. + type Driver: drm::Driver; + /// The C callback operations for this memory manager. const ALLOC_OPS: AllocOps; } diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs index a822aedee949..6ccbb25628a1 100644 --- a/rust/kernel/drm/gem/mod.rs +++ b/rust/kernel/drm/gem/mod.rs @@ -13,34 +13,34 @@ use crate::{ sync::aref::{ARef, AlwaysRefCounted}, types::Opaque, }; -use core::{mem, ops::Deref, ptr::NonNull}; +use core::{ops::Deref, ptr::NonNull}; + +/// A type alias for retrieving a [`Driver`]s [`DriverFile`] implementation from its +/// [`DriverObject`] implementation. +/// +/// [`Driver`]: drm::Driver +/// [`DriverFile`]: drm::file::DriverFile +pub type DriverFile<T> = drm::File<<<T as DriverObject>::Driver as drm::Driver>::File>; /// GEM object functions, which must be implemented by drivers. -pub trait BaseDriverObject<T: BaseObject>: Sync + Send + Sized { +pub trait DriverObject: Sync + Send + Sized { + /// Parent `Driver` for this object. + type Driver: drm::Driver; + /// Create a new driver data object for a GEM object of a given size. - fn new(dev: &drm::Device<T::Driver>, size: usize) -> impl PinInit<Self, Error>; + fn new(dev: &drm::Device<Self::Driver>, size: usize) -> impl PinInit<Self, Error>; /// Open a new handle to an existing object, associated with a File. - fn open( - _obj: &<<T as IntoGEMObject>::Driver as drm::Driver>::Object, - _file: &drm::File<<<T as IntoGEMObject>::Driver as drm::Driver>::File>, - ) -> Result { + fn open(_obj: &<Self::Driver as drm::Driver>::Object, _file: &DriverFile<Self>) -> Result { Ok(()) } /// Close a handle to an existing object, associated with a File. - fn close( - _obj: &<<T as IntoGEMObject>::Driver as drm::Driver>::Object, - _file: &drm::File<<<T as IntoGEMObject>::Driver as drm::Driver>::File>, - ) { - } + fn close(_obj: &<Self::Driver as drm::Driver>::Object, _file: &DriverFile<Self>) {} } /// Trait that represents a GEM object subtype pub trait IntoGEMObject: Sized + super::private::Sealed + AlwaysRefCounted { - /// Owning driver for this type - type Driver: drm::Driver; - /// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as /// this owning object is valid. fn as_raw(&self) -> *mut bindings::drm_gem_object; @@ -75,25 +75,16 @@ unsafe impl<T: IntoGEMObject> AlwaysRefCounted for T { } } -/// Trait which must be implemented by drivers using base GEM objects. -pub trait DriverObject: BaseDriverObject<Object<Self>> { - /// Parent `Driver` for this object. - type Driver: drm::Driver; -} - -extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>( +extern "C" fn open_callback<T: DriverObject>( raw_obj: *mut bindings::drm_gem_object, raw_file: *mut bindings::drm_file, ) -> core::ffi::c_int { // SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`. - let file = unsafe { - drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::from_raw(raw_file) - }; - // SAFETY: `open_callback` is specified in the AllocOps structure for `Object<T>`, ensuring that - // `raw_obj` is indeed contained within a `Object<T>`. - let obj = unsafe { - <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) - }; + let file = unsafe { DriverFile::<T>::from_raw(raw_file) }; + + // SAFETY: `open_callback` is specified in the AllocOps structure for `DriverObject<T>`, + // ensuring that `raw_obj` is contained within a `DriverObject<T>` + let obj = unsafe { <<T::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) }; match T::open(obj, file) { Err(e) => e.to_errno(), @@ -101,26 +92,21 @@ extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>( } } -extern "C" fn close_callback<T: BaseDriverObject<U>, U: BaseObject>( +extern "C" fn close_callback<T: DriverObject>( raw_obj: *mut bindings::drm_gem_object, raw_file: *mut bindings::drm_file, ) { // SAFETY: `open_callback` is only ever called with a valid pointer to a `struct drm_file`. - let file = unsafe { - drm::File::<<<U as IntoGEMObject>::Driver as drm::Driver>::File>::from_raw(raw_file) - }; + let file = unsafe { DriverFile::<T>::from_raw(raw_file) }; + // SAFETY: `close_callback` is specified in the AllocOps structure for `Object<T>`, ensuring // that `raw_obj` is indeed contained within a `Object<T>`. - let obj = unsafe { - <<<U as IntoGEMObject>::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) - }; + let obj = unsafe { <<T::Driver as drm::Driver>::Object as IntoGEMObject>::from_raw(raw_obj) }; T::close(obj, file); } impl<T: DriverObject> IntoGEMObject for Object<T> { - type Driver = T::Driver; - fn as_raw(&self) -> *mut bindings::drm_gem_object { self.obj.get() } @@ -142,10 +128,12 @@ pub trait BaseObject: IntoGEMObject { /// Creates a new handle for the object associated with a given `File` /// (or returns an existing one). - fn create_handle( - &self, - file: &drm::File<<<Self as IntoGEMObject>::Driver as drm::Driver>::File>, - ) -> Result<u32> { + fn create_handle<D, F>(&self, file: &drm::File<F>) -> Result<u32> + where + Self: AllocImpl<Driver = D>, + D: drm::Driver<Object = Self, File = F>, + F: drm::file::DriverFile<Driver = D>, + { let mut handle: u32 = 0; // SAFETY: The arguments are all valid per the type invariants. to_result(unsafe { @@ -155,10 +143,12 @@ pub trait BaseObject: IntoGEMObject { } /// Looks up an object by its handle for a given `File`. - fn lookup_handle( - file: &drm::File<<<Self as IntoGEMObject>::Driver as drm::Driver>::File>, - handle: u32, - ) -> Result<ARef<Self>> { + fn lookup_handle<D, F>(file: &drm::File<F>, handle: u32) -> Result<ARef<Self>> + where + Self: AllocImpl<Driver = D>, + D: drm::Driver<Object = Self, File = F>, + F: drm::file::DriverFile<Driver = D>, + { // SAFETY: The arguments are all valid per the type invariants. let ptr = unsafe { bindings::drm_gem_object_lookup(file.as_raw().cast(), handle) }; if ptr.is_null() { @@ -208,13 +198,10 @@ pub struct Object<T: DriverObject + Send + Sync> { } impl<T: DriverObject> Object<T> { - /// The size of this object's structure. - pub const SIZE: usize = mem::size_of::<Self>(); - const OBJECT_FUNCS: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs { free: Some(Self::free_callback), - open: Some(open_callback::<T, Object<T>>), - close: Some(close_callback::<T, Object<T>>), + open: Some(open_callback::<T>), + close: Some(close_callback::<T>), print_info: None, export: None, pin: None, @@ -297,6 +284,8 @@ impl<T: DriverObject> Deref for Object<T> { } impl<T: DriverObject> AllocImpl for Object<T> { + type Driver = T::Driver; + const ALLOC_OPS: AllocOps = AllocOps { gem_create_object: None, prime_handle_to_fd: None, diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index fef97f2a5098..44e4b8853ff3 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -18,6 +18,7 @@ // // Stable since Rust 1.79.0. #![feature(inline_const)] +#![feature(pointer_is_aligned)] // // Stable since Rust 1.81.0. #![feature(lint_reasons)] @@ -113,6 +114,7 @@ pub mod print; pub mod rbtree; pub mod regulator; pub mod revocable; +pub mod scatterlist; pub mod security; pub mod seq_file; pub mod sizes; diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs index 7c1b17246ed5..75ef096075cb 100644 --- a/rust/kernel/page.rs +++ b/rust/kernel/page.rs @@ -9,7 +9,12 @@ use crate::{ error::Result, uaccess::UserSliceReader, }; -use core::ptr::{self, NonNull}; +use core::{ + marker::PhantomData, + mem::ManuallyDrop, + ops::Deref, + ptr::{self, NonNull}, +}; /// A bitwise shift for the page size. pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize; @@ -30,6 +35,86 @@ pub const fn page_align(addr: usize) -> usize { (addr + (PAGE_SIZE - 1)) & PAGE_MASK } +/// Representation of a non-owning reference to a [`Page`]. +/// +/// This type provides a borrowed version of a [`Page`] that is owned by some other entity, e.g. a +/// [`Vmalloc`] allocation such as [`VBox`]. +/// +/// # Example +/// +/// ``` +/// # use kernel::{bindings, prelude::*}; +/// use kernel::page::{BorrowedPage, Page, PAGE_SIZE}; +/// # use core::{mem::MaybeUninit, ptr, ptr::NonNull }; +/// +/// fn borrow_page<'a>(vbox: &'a mut VBox<MaybeUninit<[u8; PAGE_SIZE]>>) -> BorrowedPage<'a> { +/// let ptr = ptr::from_ref(&**vbox); +/// +/// // SAFETY: `ptr` is a valid pointer to `Vmalloc` memory. +/// let page = unsafe { bindings::vmalloc_to_page(ptr.cast()) }; +/// +/// // SAFETY: `vmalloc_to_page` returns a valid pointer to a `struct page` for a valid +/// // pointer to `Vmalloc` memory. +/// let page = unsafe { NonNull::new_unchecked(page) }; +/// +/// // SAFETY: +/// // - `self.0` is a valid pointer to a `struct page`. +/// // - `self.0` is valid for the entire lifetime of `self`. +/// unsafe { BorrowedPage::from_raw(page) } +/// } +/// +/// let mut vbox = VBox::<[u8; PAGE_SIZE]>::new_uninit(GFP_KERNEL)?; +/// let page = borrow_page(&mut vbox); +/// +/// // SAFETY: There is no concurrent read or write to this page. +/// unsafe { page.fill_zero_raw(0, PAGE_SIZE)? }; +/// # Ok::<(), Error>(()) +/// ``` +/// +/// # Invariants +/// +/// The borrowed underlying pointer to a `struct page` is valid for the entire lifetime `'a`. +/// +/// [`VBox`]: kernel::alloc::VBox +/// [`Vmalloc`]: kernel::alloc::allocator::Vmalloc +pub struct BorrowedPage<'a>(ManuallyDrop<Page>, PhantomData<&'a Page>); + +impl<'a> BorrowedPage<'a> { + /// Constructs a [`BorrowedPage`] from a raw pointer to a `struct page`. + /// + /// # Safety + /// + /// - `ptr` must point to a valid `bindings::page`. + /// - `ptr` must remain valid for the entire lifetime `'a`. + pub unsafe fn from_raw(ptr: NonNull<bindings::page>) -> Self { + let page = Page { page: ptr }; + + // INVARIANT: The safety requirements guarantee that `ptr` is valid for the entire lifetime + // `'a`. + Self(ManuallyDrop::new(page), PhantomData) + } +} + +impl<'a> Deref for BorrowedPage<'a> { + type Target = Page; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Trait to be implemented by types which provide an [`Iterator`] implementation of +/// [`BorrowedPage`] items, such as [`VmallocPageIter`](kernel::alloc::allocator::VmallocPageIter). +pub trait AsPageIter { + /// The [`Iterator`] type, e.g. [`VmallocPageIter`](kernel::alloc::allocator::VmallocPageIter). + type Iter<'a>: Iterator<Item = BorrowedPage<'a>> + where + Self: 'a; + + /// Returns an [`Iterator`] of [`BorrowedPage`] items over all pages owned by `self`. + fn page_iter(&mut self) -> Self::Iter<'_>; +} + /// A pointer to a page that owns the page allocation. /// /// # Invariants diff --git a/rust/kernel/scatterlist.rs b/rust/kernel/scatterlist.rs new file mode 100644 index 000000000000..9709dff60b5a --- /dev/null +++ b/rust/kernel/scatterlist.rs @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Abstractions for scatter-gather lists. +//! +//! C header: [`include/linux/scatterlist.h`](srctree/include/linux/scatterlist.h) +//! +//! Scatter-gather (SG) I/O is a memory access technique that allows devices to perform DMA +//! operations on data buffers that are not physically contiguous in memory. It works by creating a +//! "scatter-gather list", an array where each entry specifies the address and length of a +//! physically contiguous memory segment. +//! +//! The device's DMA controller can then read this list and process the segments sequentially as +//! part of one logical I/O request. This avoids the need for a single, large, physically contiguous +//! memory buffer, which can be difficult or impossible to allocate. +//! +//! This module provides safe Rust abstractions over the kernel's `struct scatterlist` and +//! `struct sg_table` types. +//! +//! The main entry point is the [`SGTable`] type, which represents a complete scatter-gather table. +//! It can be either: +//! +//! - An owned table ([`SGTable<Owned<P>>`]), created from a Rust memory buffer (e.g., [`VVec`]). +//! This type manages the allocation of the `struct sg_table`, the DMA mapping of the buffer, and +//! the automatic cleanup of all resources. +//! - A borrowed reference (&[`SGTable`]), which provides safe, read-only access to a table that was +//! allocated by other (e.g., C) code. +//! +//! Individual entries in the table are represented by [`SGEntry`], which can be accessed by +//! iterating over an [`SGTable`]. + +use crate::{ + alloc, + alloc::allocator::VmallocPageIter, + bindings, + device::{Bound, Device}, + devres::Devres, + dma, error, + io::resource::ResourceSize, + page, + prelude::*, + types::{ARef, Opaque}, +}; +use core::{ops::Deref, ptr::NonNull}; + +/// A single entry in a scatter-gather list. +/// +/// An `SGEntry` represents a single, physically contiguous segment of memory that has been mapped +/// for DMA. +/// +/// Instances of this struct are obtained by iterating over an [`SGTable`]. Drivers do not create +/// or own [`SGEntry`] objects directly. +#[repr(transparent)] +pub struct SGEntry(Opaque<bindings::scatterlist>); + +// SAFETY: `SGEntry` can be sent to any task. +unsafe impl Send for SGEntry {} + +// SAFETY: `SGEntry` has no interior mutability and can be accessed concurrently. +unsafe impl Sync for SGEntry {} + +impl SGEntry { + /// Convert a raw `struct scatterlist *` to a `&'a SGEntry`. + /// + /// # Safety + /// + /// Callers must ensure that the `struct scatterlist` pointed to by `ptr` is valid for the + /// lifetime `'a`. + #[inline] + unsafe fn from_raw<'a>(ptr: *mut bindings::scatterlist) -> &'a Self { + // SAFETY: The safety requirements of this function guarantee that `ptr` is a valid pointer + // to a `struct scatterlist` for the duration of `'a`. + unsafe { &*ptr.cast() } + } + + /// Obtain the raw `struct scatterlist *`. + #[inline] + fn as_raw(&self) -> *mut bindings::scatterlist { + self.0.get() + } + + /// Returns the DMA address of this SG entry. + /// + /// This is the address that the device should use to access the memory segment. + #[inline] + pub fn dma_address(&self) -> dma::DmaAddress { + // SAFETY: `self.as_raw()` is a valid pointer to a `struct scatterlist`. + unsafe { bindings::sg_dma_address(self.as_raw()) } + } + + /// Returns the length of this SG entry in bytes. + #[inline] + pub fn dma_len(&self) -> ResourceSize { + #[allow(clippy::useless_conversion)] + // SAFETY: `self.as_raw()` is a valid pointer to a `struct scatterlist`. + unsafe { bindings::sg_dma_len(self.as_raw()) }.into() + } +} + +/// The borrowed generic type of an [`SGTable`], representing a borrowed or externally managed +/// table. +#[repr(transparent)] +pub struct Borrowed(Opaque<bindings::sg_table>); + +// SAFETY: `Borrowed` can be sent to any task. +unsafe impl Send for Borrowed {} + +// SAFETY: `Borrowed` has no interior mutability and can be accessed concurrently. +unsafe impl Sync for Borrowed {} + +/// A scatter-gather table. +/// +/// This struct is a wrapper around the kernel's `struct sg_table`. It manages a list of DMA-mapped +/// memory segments that can be passed to a device for I/O operations. +/// +/// The generic parameter `T` is used as a generic type to distinguish between owned and borrowed +/// tables. +/// +/// - [`SGTable<Owned>`]: An owned table created and managed entirely by Rust code. It handles +/// allocation, DMA mapping, and cleanup of all associated resources. See [`SGTable::new`]. +/// - [`SGTable<Borrowed>`} (or simply [`SGTable`]): Represents a table whose lifetime is managed +/// externally. It can be used safely via a borrowed reference `&'a SGTable`, where `'a` is the +/// external lifetime. +/// +/// All [`SGTable`] variants can be iterated over the individual [`SGEntry`]s. +#[repr(transparent)] +#[pin_data] +pub struct SGTable<T: private::Sealed = Borrowed> { + #[pin] + inner: T, +} + +impl SGTable { + /// Creates a borrowed `&'a SGTable` from a raw `struct sg_table` pointer. + /// + /// This allows safe access to an `sg_table` that is managed elsewhere (for example, in C code). + /// + /// # Safety + /// + /// Callers must ensure that: + /// + /// - the `struct sg_table` pointed to by `ptr` is valid for the entire lifetime of `'a`, + /// - the data behind `ptr` is not modified concurrently for the duration of `'a`. + #[inline] + pub unsafe fn from_raw<'a>(ptr: *mut bindings::sg_table) -> &'a Self { + // SAFETY: The safety requirements of this function guarantee that `ptr` is a valid pointer + // to a `struct sg_table` for the duration of `'a`. + unsafe { &*ptr.cast() } + } + + #[inline] + fn as_raw(&self) -> *mut bindings::sg_table { + self.inner.0.get() + } + + /// Returns an [`SGTableIter`] bound to the lifetime of `self`. + pub fn iter(&self) -> SGTableIter<'_> { + // SAFETY: `self.as_raw()` is a valid pointer to a `struct sg_table`. + let nents = unsafe { (*self.as_raw()).nents }; + + let pos = if nents > 0 { + // SAFETY: `self.as_raw()` is a valid pointer to a `struct sg_table`. + let ptr = unsafe { (*self.as_raw()).sgl }; + + // SAFETY: `ptr` is guaranteed to be a valid pointer to a `struct scatterlist`. + Some(unsafe { SGEntry::from_raw(ptr) }) + } else { + None + }; + + SGTableIter { pos, nents } + } +} + +/// Represents the DMA mapping state of a `struct sg_table`. +/// +/// This is used as an inner type of [`Owned`] to manage the DMA mapping lifecycle. +/// +/// # Invariants +/// +/// - `sgt` is a valid pointer to a `struct sg_table` for the entire lifetime of the +/// [`DmaMappedSgt`]. +/// - `sgt` is always DMA mapped. +struct DmaMappedSgt { + sgt: NonNull<bindings::sg_table>, + dev: ARef<Device>, + dir: dma::DataDirection, +} + +// SAFETY: `DmaMappedSgt` can be sent to any task. +unsafe impl Send for DmaMappedSgt {} + +// SAFETY: `DmaMappedSgt` has no interior mutability and can be accessed concurrently. +unsafe impl Sync for DmaMappedSgt {} + +impl DmaMappedSgt { + /// # Safety + /// + /// - `sgt` must be a valid pointer to a `struct sg_table` for the entire lifetime of the + /// returned [`DmaMappedSgt`]. + /// - The caller must guarantee that `sgt` remains DMA mapped for the entire lifetime of + /// [`DmaMappedSgt`]. + unsafe fn new( + sgt: NonNull<bindings::sg_table>, + dev: &Device<Bound>, + dir: dma::DataDirection, + ) -> Result<Self> { + // SAFETY: + // - `dev.as_raw()` is a valid pointer to a `struct device`, which is guaranteed to be + // bound to a driver for the duration of this call. + // - `sgt` is a valid pointer to a `struct sg_table`. + error::to_result(unsafe { + bindings::dma_map_sgtable(dev.as_raw(), sgt.as_ptr(), dir.into(), 0) + })?; + + // INVARIANT: By the safety requirements of this function it is guaranteed that `sgt` is + // valid for the entire lifetime of this object instance. + Ok(Self { + sgt, + dev: dev.into(), + dir, + }) + } +} + +impl Drop for DmaMappedSgt { + #[inline] + fn drop(&mut self) { + // SAFETY: + // - `self.dev.as_raw()` is a pointer to a valid `struct device`. + // - `self.dev` is the same device the mapping has been created for in `Self::new()`. + // - `self.sgt.as_ptr()` is a valid pointer to a `struct sg_table` by the type invariants + // of `Self`. + // - `self.dir` is the same `dma::DataDirection` the mapping has been created with in + // `Self::new()`. + unsafe { + bindings::dma_unmap_sgtable(self.dev.as_raw(), self.sgt.as_ptr(), self.dir.into(), 0) + }; + } +} + +/// A transparent wrapper around a `struct sg_table`. +/// +/// While we could also create the `struct sg_table` in the constructor of [`Owned`], we can't tear +/// down the `struct sg_table` in [`Owned::drop`]; the drop order in [`Owned`] matters. +#[repr(transparent)] +struct RawSGTable(Opaque<bindings::sg_table>); + +// SAFETY: `RawSGTable` can be sent to any task. +unsafe impl Send for RawSGTable {} + +// SAFETY: `RawSGTable` has no interior mutability and can be accessed concurrently. +unsafe impl Sync for RawSGTable {} + +impl RawSGTable { + /// # Safety + /// + /// - `pages` must be a slice of valid `struct page *`. + /// - The pages pointed to by `pages` must remain valid for the entire lifetime of the returned + /// [`RawSGTable`]. + unsafe fn new( + pages: &mut [*mut bindings::page], + size: usize, + max_segment: u32, + flags: alloc::Flags, + ) -> Result<Self> { + // `sg_alloc_table_from_pages_segment()` expects at least one page, otherwise it + // produces a NPE. + if pages.is_empty() { + return Err(EINVAL); + } + + let sgt = Opaque::zeroed(); + // SAFETY: + // - `sgt.get()` is a valid pointer to uninitialized memory. + // - As by the check above, `pages` is not empty. + error::to_result(unsafe { + bindings::sg_alloc_table_from_pages_segment( + sgt.get(), + pages.as_mut_ptr(), + pages.len().try_into()?, + 0, + size, + max_segment, + flags.as_raw(), + ) + })?; + + Ok(Self(sgt)) + } + + #[inline] + fn as_raw(&self) -> *mut bindings::sg_table { + self.0.get() + } +} + +impl Drop for RawSGTable { + #[inline] + fn drop(&mut self) { + // SAFETY: `sgt` is a valid and initialized `struct sg_table`. + unsafe { bindings::sg_free_table(self.0.get()) }; + } +} + +/// The [`Owned`] generic type of an [`SGTable`]. +/// +/// A [`SGTable<Owned>`] signifies that the [`SGTable`] owns all associated resources: +/// +/// - The backing memory pages. +/// - The `struct sg_table` allocation (`sgt`). +/// - The DMA mapping, managed through a [`Devres`]-managed `DmaMappedSgt`. +/// +/// Users interact with this type through the [`SGTable`] handle and do not need to manage +/// [`Owned`] directly. +#[pin_data] +pub struct Owned<P> { + // Note: The drop order is relevant; we first have to unmap the `struct sg_table`, then free the + // `struct sg_table` and finally free the backing pages. + #[pin] + dma: Devres<DmaMappedSgt>, + sgt: RawSGTable, + _pages: P, +} + +// SAFETY: `Owned` can be sent to any task if `P` can be send to any task. +unsafe impl<P: Send> Send for Owned<P> {} + +// SAFETY: `Owned` has no interior mutability and can be accessed concurrently if `P` can be +// accessed concurrently. +unsafe impl<P: Sync> Sync for Owned<P> {} + +impl<P> Owned<P> +where + for<'a> P: page::AsPageIter<Iter<'a> = VmallocPageIter<'a>> + 'static, +{ + fn new( + dev: &Device<Bound>, + mut pages: P, + dir: dma::DataDirection, + flags: alloc::Flags, + ) -> Result<impl PinInit<Self, Error> + '_> { + let page_iter = pages.page_iter(); + let size = page_iter.size(); + + let mut page_vec: KVec<*mut bindings::page> = + KVec::with_capacity(page_iter.page_count(), flags)?; + + for page in page_iter { + page_vec.push(page.as_ptr(), flags)?; + } + + // `dma_max_mapping_size` returns `size_t`, but `sg_alloc_table_from_pages_segment()` takes + // an `unsigned int`. + // + // SAFETY: `dev.as_raw()` is a valid pointer to a `struct device`. + let max_segment = match unsafe { bindings::dma_max_mapping_size(dev.as_raw()) } { + 0 => u32::MAX, + max_segment => u32::try_from(max_segment).unwrap_or(u32::MAX), + }; + + Ok(try_pin_init!(&this in Self { + // SAFETY: + // - `page_vec` is a `KVec` of valid `struct page *` obtained from `pages`. + // - The pages contained in `pages` remain valid for the entire lifetime of the + // `RawSGTable`. + sgt: unsafe { RawSGTable::new(&mut page_vec, size, max_segment, flags) }?, + dma <- { + // SAFETY: `this` is a valid pointer to uninitialized memory. + let sgt = unsafe { &raw mut (*this.as_ptr()).sgt }.cast(); + + // SAFETY: `sgt` is guaranteed to be non-null. + let sgt = unsafe { NonNull::new_unchecked(sgt) }; + + // SAFETY: + // - It is guaranteed that the object returned by `DmaMappedSgt::new` won't out-live + // `sgt`. + // - `sgt` is never DMA unmapped manually. + Devres::new(dev, unsafe { DmaMappedSgt::new(sgt, dev, dir) }) + }, + _pages: pages, + })) + } +} + +impl<P> SGTable<Owned<P>> +where + for<'a> P: page::AsPageIter<Iter<'a> = VmallocPageIter<'a>> + 'static, +{ + /// Allocates a new scatter-gather table from the given pages and maps it for DMA. + /// + /// This constructor creates a new [`SGTable<Owned>`] that takes ownership of `P`. + /// It allocates a `struct sg_table`, populates it with entries corresponding to the physical + /// pages of `P`, and maps the table for DMA with the specified [`Device`] and + /// [`dma::DataDirection`]. + /// + /// The DMA mapping is managed through [`Devres`], ensuring that the DMA mapping is unmapped + /// once the associated [`Device`] is unbound, or when the [`SGTable<Owned>`] is dropped. + /// + /// # Parameters + /// + /// * `dev`: The [`Device`] that will be performing the DMA. + /// * `pages`: The entity providing the backing pages. It must implement [`page::AsPageIter`]. + /// The ownership of this entity is moved into the new [`SGTable<Owned>`]. + /// * `dir`: The [`dma::DataDirection`] of the DMA transfer. + /// * `flags`: Allocation flags for internal allocations (e.g., [`GFP_KERNEL`]). + /// + /// # Examples + /// + /// ``` + /// use kernel::{ + /// device::{Bound, Device}, + /// dma, page, + /// prelude::*, + /// scatterlist::{SGTable, Owned}, + /// }; + /// + /// fn test(dev: &Device<Bound>) -> Result { + /// let size = 4 * page::PAGE_SIZE; + /// let pages = VVec::<u8>::with_capacity(size, GFP_KERNEL)?; + /// + /// let sgt = KBox::pin_init(SGTable::new( + /// dev, + /// pages, + /// dma::DataDirection::ToDevice, + /// GFP_KERNEL, + /// ), GFP_KERNEL)?; + /// + /// Ok(()) + /// } + /// ``` + pub fn new( + dev: &Device<Bound>, + pages: P, + dir: dma::DataDirection, + flags: alloc::Flags, + ) -> impl PinInit<Self, Error> + '_ { + try_pin_init!(Self { + inner <- Owned::new(dev, pages, dir, flags)? + }) + } +} + +impl<P> Deref for SGTable<Owned<P>> { + type Target = SGTable; + + #[inline] + fn deref(&self) -> &Self::Target { + // SAFETY: + // - `self.inner.sgt.as_raw()` is a valid pointer to a `struct sg_table` for the entire + // lifetime of `self`. + // - The backing `struct sg_table` is not modified for the entire lifetime of `self`. + unsafe { SGTable::from_raw(self.inner.sgt.as_raw()) } + } +} + +mod private { + pub trait Sealed {} + + impl Sealed for super::Borrowed {} + impl<P> Sealed for super::Owned<P> {} +} + +/// An [`Iterator`] over the DMA mapped [`SGEntry`] items of an [`SGTable`]. +/// +/// Note that the existence of an [`SGTableIter`] does not guarantee that the [`SGEntry`] items +/// actually remain DMA mapped; they are prone to be unmapped on device unbind. +pub struct SGTableIter<'a> { + pos: Option<&'a SGEntry>, + /// The number of DMA mapped entries in a `struct sg_table`. + nents: c_uint, +} + +impl<'a> Iterator for SGTableIter<'a> { + type Item = &'a SGEntry; + + fn next(&mut self) -> Option<Self::Item> { + let entry = self.pos?; + self.nents = self.nents.saturating_sub(1); + + // SAFETY: `entry.as_raw()` is a valid pointer to a `struct scatterlist`. + let next = unsafe { bindings::sg_next(entry.as_raw()) }; + + self.pos = (!next.is_null() && self.nents > 0).then(|| { + // SAFETY: If `next` is not NULL, `sg_next()` guarantees to return a valid pointer to + // the next `struct scatterlist`. + unsafe { SGEntry::from_raw(next) } + }); + + Some(entry) + } +} diff --git a/rust/kernel/transmute.rs b/rust/kernel/transmute.rs index 1c7d43771a37..cfc37d81adf2 100644 --- a/rust/kernel/transmute.rs +++ b/rust/kernel/transmute.rs @@ -2,6 +2,8 @@ //! Traits for transmuting types. +use core::mem::size_of; + /// Types for which any bit pattern is valid. /// /// Not all types are valid for all values. For example, a `bool` must be either zero or one, so @@ -9,10 +11,93 @@ /// /// It's okay for the type to have padding, as initializing those bytes has no effect. /// +/// # Examples +/// +/// ``` +/// use kernel::transmute::FromBytes; +/// +/// # fn test() -> Option<()> { +/// let raw = [1, 2, 3, 4]; +/// +/// let result = u32::from_bytes(&raw)?; +/// +/// #[cfg(target_endian = "little")] +/// assert_eq!(*result, 0x4030201); +/// +/// #[cfg(target_endian = "big")] +/// assert_eq!(*result, 0x1020304); +/// +/// # Some(()) } +/// # test().ok_or(EINVAL)?; +/// # Ok::<(), Error>(()) +/// ``` +/// /// # Safety /// /// All bit-patterns must be valid for this type. This type must not have interior mutability. -pub unsafe trait FromBytes {} +pub unsafe trait FromBytes { + /// Converts a slice of bytes to a reference to `Self`. + /// + /// Succeeds if the reference is properly aligned, and the size of `bytes` is equal to that of + /// `T` and different from zero. + /// + /// Otherwise, returns [`None`]. + fn from_bytes(bytes: &[u8]) -> Option<&Self> + where + Self: Sized, + { + let slice_ptr = bytes.as_ptr().cast::<Self>(); + let size = size_of::<Self>(); + + #[allow(clippy::incompatible_msrv)] + if bytes.len() == size && slice_ptr.is_aligned() { + // SAFETY: Size and alignment were just checked. + unsafe { Some(&*slice_ptr) } + } else { + None + } + } + + /// Converts a mutable slice of bytes to a reference to `Self`. + /// + /// Succeeds if the reference is properly aligned, and the size of `bytes` is equal to that of + /// `T` and different from zero. + /// + /// Otherwise, returns [`None`]. + fn from_bytes_mut(bytes: &mut [u8]) -> Option<&mut Self> + where + Self: AsBytes + Sized, + { + let slice_ptr = bytes.as_mut_ptr().cast::<Self>(); + let size = size_of::<Self>(); + + #[allow(clippy::incompatible_msrv)] + if bytes.len() == size && slice_ptr.is_aligned() { + // SAFETY: Size and alignment were just checked. + unsafe { Some(&mut *slice_ptr) } + } else { + None + } + } + + /// Creates an owned instance of `Self` by copying `bytes`. + /// + /// Unlike [`FromBytes::from_bytes`], which requires aligned input, this method can be used on + /// non-aligned data at the cost of a copy. + fn from_bytes_copy(bytes: &[u8]) -> Option<Self> + where + Self: Sized, + { + if bytes.len() == size_of::<Self>() { + // SAFETY: we just verified that `bytes` has the same size as `Self`, and per the + // invariants of `FromBytes`, any byte sequence of the correct length is a valid value + // for `Self`. + Some(unsafe { core::ptr::read_unaligned(bytes.as_ptr().cast::<Self>()) }) + } else { + None + } + } +} macro_rules! impl_frombytes { ($($({$($generics:tt)*})? $t:ty, )*) => { @@ -47,7 +132,32 @@ impl_frombytes! { /// /// Values of this type may not contain any uninitialized bytes. This type must not have interior /// mutability. -pub unsafe trait AsBytes {} +pub unsafe trait AsBytes { + /// Returns `self` as a slice of bytes. + fn as_bytes(&self) -> &[u8] { + // CAST: `Self` implements `AsBytes` thus all bytes of `self` are initialized. + let data = core::ptr::from_ref(self).cast::<u8>(); + let len = core::mem::size_of_val(self); + + // SAFETY: `data` is non-null and valid for reads of `len * sizeof::<u8>()` bytes. + unsafe { core::slice::from_raw_parts(data, len) } + } + + /// Returns `self` as a mutable slice of bytes. + fn as_bytes_mut(&mut self) -> &mut [u8] + where + Self: FromBytes, + { + // CAST: `Self` implements both `AsBytes` and `FromBytes` thus making `Self` + // bi-directionally transmutable to `[u8; size_of_val(self)]`. + let data = core::ptr::from_mut(self).cast::<u8>(); + let len = core::mem::size_of_val(self); + + // SAFETY: `data` is non-null and valid for read and writes of `len * sizeof::<u8>()` + // bytes. + unsafe { core::slice::from_raw_parts_mut(data, len) } + } +} macro_rules! impl_asbytes { ($($({$($generics:tt)*})? $t:ty, )*) => { diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs index b9343d5bc00f..706e833e9702 100644 --- a/rust/kernel/workqueue.rs +++ b/rust/kernel/workqueue.rs @@ -356,18 +356,11 @@ struct ClosureWork<T> { func: Option<T>, } -impl<T> ClosureWork<T> { - fn project(self: Pin<&mut Self>) -> &mut Option<T> { - // SAFETY: The `func` field is not structurally pinned. - unsafe { &mut self.get_unchecked_mut().func } - } -} - impl<T: FnOnce()> WorkItem for ClosureWork<T> { type Pointer = Pin<KBox<Self>>; fn run(mut this: Pin<KBox<Self>>) { - if let Some(func) = this.as_mut().project().take() { + if let Some(func) = this.as_mut().project().func.take() { (func)() } } diff --git a/rust/pin-init/README.md b/rust/pin-init/README.md index a4c01a8d78b2..723e275445d4 100644 --- a/rust/pin-init/README.md +++ b/rust/pin-init/README.md @@ -6,6 +6,18 @@  # `pin-init` +> [!NOTE] +> +> This crate was originally named [`pinned-init`], but the migration to +> `pin-init` is not yet complete. The `legcay` branch contains the current +> version of the `pinned-init` crate & the `main` branch already incorporates +> the rename to `pin-init`. +> +> There are still some changes needed on the kernel side before the migration +> can be completed. + +[`pinned-init`]: https://crates.io/crates/pinned-init + <!-- cargo-rdme start --> Library to safely and fallibly initialize pinned `struct`s using in-place constructors. diff --git a/rust/pin-init/examples/error.rs b/rust/pin-init/examples/error.rs index e0cc258746ce..8f4e135eb8ba 100644 --- a/rust/pin-init/examples/error.rs +++ b/rust/pin-init/examples/error.rs @@ -24,4 +24,6 @@ impl From<AllocError> for Error { } #[allow(dead_code)] -fn main() {} +fn main() { + let _ = Error; +} diff --git a/rust/pin-init/src/lib.rs b/rust/pin-init/src/lib.rs index 62e013a5cc20..dd553212836e 100644 --- a/rust/pin-init/src/lib.rs +++ b/rust/pin-init/src/lib.rs @@ -740,6 +740,8 @@ macro_rules! stack_try_pin_init { /// As already mentioned in the examples above, inside of `pin_init!` a `struct` initializer with /// the following modifications is expected: /// - Fields that you want to initialize in-place have to use `<-` instead of `:`. +/// - You can use `_: { /* run any user-code here */ },` anywhere where you can place fields in +/// order to run arbitrary code. /// - In front of the initializer you can write `&this in` to have access to a [`NonNull<Self>`] /// pointer named `this` inside of the initializer. /// - Using struct update syntax one can place `..Zeroable::init_zeroed()` at the very end of the @@ -994,7 +996,7 @@ macro_rules! try_init { /// } /// /// impl<T> Foo<T> { -/// fn project(self: Pin<&mut Self>) -> Pin<&mut T> { +/// fn project_this(self: Pin<&mut Self>) -> Pin<&mut T> { /// assert_pinned!(Foo<T>, elem, T, inline); /// /// // SAFETY: The field is structurally pinned. diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs index 9ced630737b8..d6acf2cd291e 100644 --- a/rust/pin-init/src/macros.rs +++ b/rust/pin-init/src/macros.rs @@ -831,6 +831,17 @@ macro_rules! __pin_data { $($fields)* } + $crate::__pin_data!(make_pin_projections: + @vis($vis), + @name($name), + @impl_generics($($impl_generics)*), + @ty_generics($($ty_generics)*), + @decl_generics($($decl_generics)*), + @where($($whr)*), + @pinned($($pinned)*), + @not_pinned($($not_pinned)*), + ); + // We put the rest into this const item, because it then will not be accessible to anything // outside. const _: () = { @@ -980,6 +991,56 @@ macro_rules! __pin_data { stringify!($($rest)*), ); }; + (make_pin_projections: + @vis($vis:vis), + @name($name:ident), + @impl_generics($($impl_generics:tt)*), + @ty_generics($($ty_generics:tt)*), + @decl_generics($($decl_generics:tt)*), + @where($($whr:tt)*), + @pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?), + @not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?), + ) => { + $crate::macros::paste! { + #[doc(hidden)] + $vis struct [< $name Projection >] <'__pin, $($decl_generics)*> { + $($(#[$($p_attr)*])* $pvis $p_field : ::core::pin::Pin<&'__pin mut $p_type>,)* + $($(#[$($attr)*])* $fvis $field : &'__pin mut $type,)* + ___pin_phantom_data: ::core::marker::PhantomData<&'__pin mut ()>, + } + + impl<$($impl_generics)*> $name<$($ty_generics)*> + where $($whr)* + { + /// Pin-projects all fields of `Self`. + /// + /// These fields are structurally pinned: + $(#[doc = ::core::concat!(" - `", ::core::stringify!($p_field), "`")])* + /// + /// These fields are **not** structurally pinned: + $(#[doc = ::core::concat!(" - `", ::core::stringify!($field), "`")])* + #[inline] + $vis fn project<'__pin>( + self: ::core::pin::Pin<&'__pin mut Self>, + ) -> [< $name Projection >] <'__pin, $($ty_generics)*> { + // SAFETY: we only give access to `&mut` for fields not structurally pinned. + let this = unsafe { ::core::pin::Pin::get_unchecked_mut(self) }; + [< $name Projection >] { + $( + // SAFETY: `$p_field` is structurally pinned. + $(#[$($p_attr)*])* + $p_field : unsafe { ::core::pin::Pin::new_unchecked(&mut this.$p_field) }, + )* + $( + $(#[$($attr)*])* + $field : &mut this.$field, + )* + ___pin_phantom_data: ::core::marker::PhantomData, + } + } + } + } + }; (make_pin_data: @pin_data($pin_data:ident), @impl_generics($($impl_generics:tt)*), @@ -988,38 +1049,56 @@ macro_rules! __pin_data { @pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?), @not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?), ) => { - // For every field, we create a projection function according to its projection type. If a - // field is structurally pinned, then it must be initialized via `PinInit`, if it is not - // structurally pinned, then it can be initialized via `Init`. - // - // The functions are `unsafe` to prevent accidentally calling them. - #[allow(dead_code)] - #[expect(clippy::missing_safety_doc)] - impl<$($impl_generics)*> $pin_data<$($ty_generics)*> - where $($whr)* - { - $( - $(#[$($p_attr)*])* - $pvis unsafe fn $p_field<E>( - self, - slot: *mut $p_type, - init: impl $crate::PinInit<$p_type, E>, - ) -> ::core::result::Result<(), E> { - // SAFETY: TODO. - unsafe { $crate::PinInit::__pinned_init(init, slot) } - } - )* - $( - $(#[$($attr)*])* - $fvis unsafe fn $field<E>( - self, - slot: *mut $type, - init: impl $crate::Init<$type, E>, - ) -> ::core::result::Result<(), E> { - // SAFETY: TODO. - unsafe { $crate::Init::__init(init, slot) } - } - )* + $crate::macros::paste! { + // For every field, we create a projection function according to its projection type. If a + // field is structurally pinned, then it must be initialized via `PinInit`, if it is not + // structurally pinned, then it can be initialized via `Init`. + // + // The functions are `unsafe` to prevent accidentally calling them. + #[allow(dead_code)] + #[expect(clippy::missing_safety_doc)] + impl<$($impl_generics)*> $pin_data<$($ty_generics)*> + where $($whr)* + { + $( + $(#[$($p_attr)*])* + $pvis unsafe fn $p_field<E>( + self, + slot: *mut $p_type, + init: impl $crate::PinInit<$p_type, E>, + ) -> ::core::result::Result<(), E> { + // SAFETY: TODO. + unsafe { $crate::PinInit::__pinned_init(init, slot) } + } + + $(#[$($p_attr)*])* + $pvis unsafe fn [<__project_ $p_field>]<'__slot>( + self, + slot: &'__slot mut $p_type, + ) -> ::core::pin::Pin<&'__slot mut $p_type> { + ::core::pin::Pin::new_unchecked(slot) + } + )* + $( + $(#[$($attr)*])* + $fvis unsafe fn $field<E>( + self, + slot: *mut $type, + init: impl $crate::Init<$type, E>, + ) -> ::core::result::Result<(), E> { + // SAFETY: TODO. + unsafe { $crate::Init::__init(init, slot) } + } + + $(#[$($attr)*])* + $fvis unsafe fn [<__project_ $field>]<'__slot>( + self, + slot: &'__slot mut $type, + ) -> &'__slot mut $type { + slot + } + )* + } } }; } @@ -1202,6 +1281,21 @@ macro_rules! __init_internal { // have been initialized. Therefore we can now dismiss the guards by forgetting them. $(::core::mem::forget($guards);)* }; + (init_slot($($use_data:ident)?): + @data($data:ident), + @slot($slot:ident), + @guards($($guards:ident,)*), + // arbitrary code block + @munch_fields(_: { $($code:tt)* }, $($rest:tt)*), + ) => { + { $($code)* } + $crate::__init_internal!(init_slot($($use_data)?): + @data($data), + @slot($slot), + @guards($($guards,)*), + @munch_fields($($rest)*), + ); + }; (init_slot($use_data:ident): // `use_data` is present, so we use the `data` to init fields. @data($data:ident), @slot($slot:ident), @@ -1216,6 +1310,13 @@ macro_rules! __init_internal { // return when an error/panic occurs. // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`. unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? }; + // SAFETY: + // - the project function does the correct field projection, + // - the field has been initialized, + // - the reference is only valid until the end of the initializer. + #[allow(unused_variables)] + let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) }); + // Create the drop guard: // // We rely on macro hygiene to make it impossible for users to access this local variable. @@ -1247,6 +1348,14 @@ macro_rules! __init_internal { // SAFETY: `slot` is valid, because we are inside of an initializer closure, we // return when an error/panic occurs. unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? }; + + // SAFETY: + // - the field is not structurally pinned, since the line above must compile, + // - the field has been initialized, + // - the reference is only valid until the end of the initializer. + #[allow(unused_variables)] + let $field = unsafe { &mut (*$slot).$field }; + // Create the drop guard: // // We rely on macro hygiene to make it impossible for users to access this local variable. @@ -1265,7 +1374,48 @@ macro_rules! __init_internal { ); } }; - (init_slot($($use_data:ident)?): + (init_slot(): // No `use_data`, so all fields are not structurally pinned + @data($data:ident), + @slot($slot:ident), + @guards($($guards:ident,)*), + // Init by-value. + @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*), + ) => { + { + $(let $field = $val;)? + // Initialize the field. + // + // SAFETY: The memory at `slot` is uninitialized. + unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) }; + } + + #[allow(unused_variables)] + // SAFETY: + // - the field is not structurally pinned, since no `use_data` was required to create this + // initializer, + // - the field has been initialized, + // - the reference is only valid until the end of the initializer. + let $field = unsafe { &mut (*$slot).$field }; + + // Create the drop guard: + // + // We rely on macro hygiene to make it impossible for users to access this local variable. + // We use `paste!` to create new hygiene for `$field`. + $crate::macros::paste! { + // SAFETY: We forget the guard later when initialization has succeeded. + let [< __ $field _guard >] = unsafe { + $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field)) + }; + + $crate::__init_internal!(init_slot(): + @data($data), + @slot($slot), + @guards([< __ $field _guard >], $($guards,)*), + @munch_fields($($rest)*), + ); + } + }; + (init_slot($use_data:ident): @data($data:ident), @slot($slot:ident), @guards($($guards:ident,)*), @@ -1279,6 +1429,13 @@ macro_rules! __init_internal { // SAFETY: The memory at `slot` is uninitialized. unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) }; } + // SAFETY: + // - the project function does the correct field projection, + // - the field has been initialized, + // - the reference is only valid until the end of the initializer. + #[allow(unused_variables)] + let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) }); + // Create the drop guard: // // We rely on macro hygiene to make it impossible for users to access this local variable. @@ -1289,7 +1446,7 @@ macro_rules! __init_internal { $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field)) }; - $crate::__init_internal!(init_slot($($use_data)?): + $crate::__init_internal!(init_slot($use_data): @data($data), @slot($slot), @guards([< __ $field _guard >], $($guards,)*), @@ -1300,6 +1457,20 @@ macro_rules! __init_internal { (make_initializer: @slot($slot:ident), @type_name($t:path), + @munch_fields(_: { $($code:tt)* }, $($rest:tt)*), + @acc($($acc:tt)*), + ) => { + // code blocks are ignored for the initializer check + $crate::__init_internal!(make_initializer: + @slot($slot), + @type_name($t), + @munch_fields($($rest)*), + @acc($($acc)*), + ); + }; + (make_initializer: + @slot($slot:ident), + @type_name($t:path), @munch_fields(..Zeroable::init_zeroed() $(,)?), @acc($($acc:tt)*), ) => { diff --git a/rust/uapi/uapi_helper.h b/rust/uapi/uapi_helper.h index 1409441359f5..d4a239cf2a64 100644 --- a/rust/uapi/uapi_helper.h +++ b/rust/uapi/uapi_helper.h @@ -9,6 +9,7 @@ #include <uapi/asm-generic/ioctl.h> #include <uapi/drm/drm.h> #include <uapi/drm/nova_drm.h> +#include <uapi/drm/panthor_drm.h> #include <uapi/linux/mdio.h> #include <uapi/linux/mii.h> #include <uapi/linux/ethtool.h> diff --git a/samples/damon/mtier.c b/samples/damon/mtier.c index 7ebd352138e4..beaf36657dea 100644 --- a/samples/damon/mtier.c +++ b/samples/damon/mtier.c @@ -208,6 +208,9 @@ static int damon_sample_mtier_enable_store( if (enabled == is_enabled) return 0; + if (!init_called) + return 0; + if (enabled) { err = damon_sample_mtier_start(); if (err) diff --git a/samples/damon/prcl.c b/samples/damon/prcl.c index 1b839c06a612..0226652f94d5 100644 --- a/samples/damon/prcl.c +++ b/samples/damon/prcl.c @@ -137,6 +137,9 @@ static int damon_sample_prcl_enable_store( if (enabled == is_enabled) return 0; + if (!init_called) + return 0; + if (enabled) { err = damon_sample_prcl_start(); if (err) diff --git a/samples/damon/wsse.c b/samples/damon/wsse.c index da052023b099..21eaf15f987d 100644 --- a/samples/damon/wsse.c +++ b/samples/damon/wsse.c @@ -118,6 +118,9 @@ static int damon_sample_wsse_enable_store( return 0; if (enabled) { + if (!init_called) + return 0; + err = damon_sample_wsse_start(); if (err) enabled = false; diff --git a/samples/rust/rust_dma.rs b/samples/rust/rust_dma.rs index c5e7cce68654..04007e29fd85 100644 --- a/samples/rust/rust_dma.rs +++ b/samples/rust/rust_dma.rs @@ -7,15 +7,19 @@ use kernel::{ bindings, device::Core, - dma::{CoherentAllocation, Device, DmaMask}, - pci, + dma::{CoherentAllocation, DataDirection, Device, DmaMask}, + page, pci, prelude::*, + scatterlist::{Owned, SGTable}, types::ARef, }; +#[pin_data(PinnedDrop)] struct DmaSampleDriver { pdev: ARef<pci::Device>, ca: CoherentAllocation<MyStruct>, + #[pin] + sgt: SGTable<Owned<VVec<u8>>>, } const TEST_VALUES: [(u32, u32); 5] = [ @@ -70,21 +74,30 @@ impl pci::Driver for DmaSampleDriver { kernel::dma_write!(ca[i] = MyStruct::new(value.0, value.1))?; } - let drvdata = KBox::new( - Self { + let size = 4 * page::PAGE_SIZE; + let pages = VVec::with_capacity(size, GFP_KERNEL)?; + + let sgt = SGTable::new(pdev.as_ref(), pages, DataDirection::ToDevice, GFP_KERNEL); + + let drvdata = KBox::pin_init( + try_pin_init!(Self { pdev: pdev.into(), ca, - }, + sgt <- sgt, + }), GFP_KERNEL, )?; - Ok(drvdata.into()) + Ok(drvdata) } } -impl Drop for DmaSampleDriver { - fn drop(&mut self) { - dev_info!(self.pdev.as_ref(), "Unload DMA test driver.\n"); +#[pinned_drop] +impl PinnedDrop for DmaSampleDriver { + fn drop(self: Pin<&mut Self>) { + let dev = self.pdev.as_ref(); + + dev_info!(dev, "Unload DMA test driver.\n"); for (i, value) in TEST_VALUES.into_iter().enumerate() { let val0 = kernel::dma_read!(self.ca[i].h); @@ -99,6 +112,10 @@ impl Drop for DmaSampleDriver { assert_eq!(val1, value.1); } } + + for (i, entry) in self.sgt.iter().enumerate() { + dev_info!(dev, "Entry[{}]: DMA address: {:#x}", i, entry.dma_address()); + } } } diff --git a/samples/rust/rust_driver_pci.rs b/samples/rust/rust_driver_pci.rs index 606946ff4d7f..1ac0b06fa3b3 100644 --- a/samples/rust/rust_driver_pci.rs +++ b/samples/rust/rust_driver_pci.rs @@ -78,8 +78,8 @@ impl pci::Driver for SampleDriver { let drvdata = KBox::pin_init( try_pin_init!(Self { - pdev: pdev.into(), bar <- pdev.iomap_region_sized::<{ Regs::END }>(0, c_str!("rust_driver_pci")), + pdev: pdev.into(), index: *info, }), GFP_KERNEL, diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c index 85bb8c4d3b17..f267437c9698 100644 --- a/sound/hda/codecs/realtek/alc269.c +++ b/sound/hda/codecs/realtek/alc269.c @@ -3702,6 +3702,7 @@ enum { ALC236_FIXUP_DELL_DUAL_CODECS, ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI, ALC287_FIXUP_TAS2781_I2C, + ALC295_FIXUP_DELL_TAS2781_I2C, ALC245_FIXUP_TAS2781_SPI_2, ALC287_FIXUP_TXNW2781_I2C, ALC287_FIXUP_YOGA7_14ARB7_I2C, @@ -5167,6 +5168,12 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc294_fixup_gx502_hp, }, + [ALC295_FIXUP_DELL_TAS2781_I2C] = { + .type = HDA_FIXUP_FUNC, + .v.func = tas2781_fixup_tias_i2c, + .chained = true, + .chain_id = ALC289_FIXUP_DUAL_SPK + }, [ALC294_FIXUP_ASUS_GU502_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -6289,8 +6296,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), SND_PCI_QUIRK(0x1028, 0x0c28, "Dell Inspiron 16 Plus 7630", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), SND_PCI_QUIRK(0x1028, 0x0c4d, "Dell", ALC287_FIXUP_CS35L41_I2C_4), - SND_PCI_QUIRK(0x1028, 0x0c94, "Dell Polaris 3 metal", ALC287_FIXUP_TAS2781_I2C), - SND_PCI_QUIRK(0x1028, 0x0c96, "Dell Polaris 2in1", ALC287_FIXUP_TAS2781_I2C), + SND_PCI_QUIRK(0x1028, 0x0c94, "Dell Polaris 3 metal", ALC295_FIXUP_DELL_TAS2781_I2C), + SND_PCI_QUIRK(0x1028, 0x0c96, "Dell Polaris 2in1", ALC295_FIXUP_DELL_TAS2781_I2C), SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2), SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC289_FIXUP_DELL_CS35L41_SPI_2), @@ -6469,6 +6476,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x89a0, "HP Laptop 15-dw4xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED), @@ -7071,8 +7079,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x38be, "Yoga S980-14.5 proX YC Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38bf, "Yoga S980-14.5 proX LX Dual", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38c3, "Y980 DUAL", ALC287_FIXUP_TAS2781_I2C), - SND_PCI_QUIRK(0x17aa, 0x38c7, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4), - SND_PCI_QUIRK(0x17aa, 0x38c8, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4), + SND_PCI_QUIRK(0x17aa, 0x38c7, "Thinkbook 13x Gen 4", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x38c8, "Thinkbook 13x Gen 4", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x38cb, "Y790 YG DUAL", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38cd, "Y790 VECO DUAL", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x38d2, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN), @@ -7093,6 +7101,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC), SND_PCI_QUIRK(0x17aa, 0x391f, "Yoga S990-16 pro Quad YC Quad", ALC287_FIXUP_TXNW2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3920, "Yoga S990-16 pro Quad VECO Quad", ALC287_FIXUP_TXNW2781_I2C), + SND_PCI_QUIRK(0x17aa, 0x3929, "Thinkbook 13x Gen 5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x392b, "Thinkbook 13x Gen 5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), diff --git a/sound/hda/codecs/side-codecs/cs35l41_hda_property.c b/sound/hda/codecs/side-codecs/cs35l41_hda_property.c index d8249d997c2a..16d5ea77192f 100644 --- a/sound/hda/codecs/side-codecs/cs35l41_hda_property.c +++ b/sound/hda/codecs/side-codecs/cs35l41_hda_property.c @@ -135,6 +135,8 @@ static const struct cs35l41_config cs35l41_config_table[] = { { "17AA38C8", 4, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT }, 0, 2, -1, 1000, 4500, 24 }, { "17AA38F9", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 }, { "17AA38FA", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 }, + { "17AA3929", 4, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT }, 0, 2, -1, 1000, 4500, 24 }, + { "17AA392B", 4, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT }, 0, 2, -1, 1000, 4500, 24 }, {} }; @@ -558,6 +560,8 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = { { "CSC3551", "17AA38C8", generic_dsd_config }, { "CSC3551", "17AA38F9", generic_dsd_config }, { "CSC3551", "17AA38FA", generic_dsd_config }, + { "CSC3551", "17AA3929", generic_dsd_config }, + { "CSC3551", "17AA392B", generic_dsd_config }, {} }; diff --git a/sound/hda/codecs/side-codecs/tas2781_hda.c b/sound/hda/codecs/side-codecs/tas2781_hda.c index f46d2e06c64f..96e6d82dc69e 100644 --- a/sound/hda/codecs/side-codecs/tas2781_hda.c +++ b/sound/hda/codecs/side-codecs/tas2781_hda.c @@ -33,6 +33,23 @@ const efi_guid_t tasdev_fct_efi_guid[] = { }; EXPORT_SYMBOL_NS_GPL(tasdev_fct_efi_guid, "SND_HDA_SCODEC_TAS2781"); +/* + * The order of calibrated-data writing function is a bit different from the + * order in UEFI. Here is the conversion to match the order of calibrated-data + * writing function. + */ +static void cali_cnv(unsigned char *data, unsigned int base, int offset) +{ + struct cali_reg reg_data; + + memcpy(®_data, &data[base], sizeof(reg_data)); + /* the data order has to be swapped between r0_low_reg and inv0_reg */ + swap(reg_data.r0_low_reg, reg_data.invr0_reg); + + cpu_to_be32_array((__force __be32 *)(data + offset + 1), + (u32 *)®_data, TASDEV_CALIB_N); +} + static void tas2781_apply_calib(struct tasdevice_priv *p) { struct calidata *cali_data = &p->cali_data; @@ -103,8 +120,7 @@ static void tas2781_apply_calib(struct tasdevice_priv *p) data[l] = k; oft++; - for (i = 0; i < TASDEV_CALIB_N * 4; i++) - data[l + i + 1] = data[4 * oft + i]; + cali_cnv(data, 4 * oft, l); k++; } } @@ -130,9 +146,8 @@ static void tas2781_apply_calib(struct tasdevice_priv *p) for (j = p->ndev - 1; j >= 0; j--) { l = j * (cali_data->cali_dat_sz_per_dev + 1); - for (i = TASDEV_CALIB_N * 4; i > 0 ; i--) - data[l + i] = data[p->index * 5 + i]; - data[l+i] = j; + cali_cnv(data, cali_data->cali_dat_sz_per_dev * j, l); + data[l] = j; } } @@ -178,6 +193,11 @@ int tas2781_save_calibration(struct tas2781_hda *hda) efi_status_t status; int i; + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) { + dev_err(p->dev, "%s: NO EFI FOUND!\n", __func__); + return -EINVAL; + } + if (hda->catlog_id < LENOVO) efi_guid = tasdev_fct_efi_guid[hda->catlog_id]; diff --git a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c index 45a70fbf6205..b5b7a1e82b75 100644 --- a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c +++ b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c @@ -315,6 +315,11 @@ static int tas2563_save_calibration(struct tas2781_hda *h) unsigned int attr; int ret, i, j, k; + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) { + dev_err(p->dev, "%s: NO EFI FOUND!\n", __func__); + return -EINVAL; + } + cd->cali_dat_sz_per_dev = TAS2563_CAL_DATA_SIZE * TASDEV_CALIB_N; /* extra byte for each device is the device number */ diff --git a/sound/hda/core/intel-dsp-config.c b/sound/hda/core/intel-dsp-config.c index c15284742899..2a9e35cddcf7 100644 --- a/sound/hda/core/intel-dsp-config.c +++ b/sound/hda/core/intel-dsp-config.c @@ -650,6 +650,8 @@ static int snd_intel_dsp_check_soundwire(struct pci_dev *pci) int ret; handle = ACPI_HANDLE(&pci->dev); + if (!handle) + return -ENODEV; ret = sdw_intel_acpi_scan(handle, &info); if (ret < 0) diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c index 617690362ad7..4ba0a66981ea 100644 --- a/sound/soc/amd/acp/acp-i2s.c +++ b/sound/soc/amd/acp/acp-i2s.c @@ -73,7 +73,7 @@ static int acp_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct device *dev = cpu_dai->component->dev; - struct acp_chip_info *chip = dev_get_platdata(dev); + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); int mode; mode = fmt & SND_SOC_DAIFMT_FORMAT_MASK; @@ -199,7 +199,7 @@ static int acp_i2s_hwparams(struct snd_pcm_substream *substream, struct snd_pcm_ u32 reg_val, fmt_reg, tdm_fmt; u32 lrclk_div_val, bclk_div_val; - chip = dev_get_platdata(dev); + chip = dev_get_drvdata(dev->parent); rsrc = chip->rsrc; /* These values are as per Hardware Spec */ @@ -386,7 +386,7 @@ static int acp_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct { struct acp_stream *stream = substream->runtime->private_data; struct device *dev = dai->component->dev; - struct acp_chip_info *chip = dev_get_platdata(dev); + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); struct acp_resource *rsrc = chip->rsrc; u32 val, period_bytes, reg_val, ier_val, water_val, buf_size, buf_reg; @@ -516,14 +516,13 @@ static int acp_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct static int acp_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct device *dev = dai->component->dev; - struct acp_chip_info *chip = dev_get_platdata(dev); + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); struct acp_resource *rsrc = chip->rsrc; struct acp_stream *stream = substream->runtime->private_data; u32 reg_dma_size = 0, reg_fifo_size = 0, reg_fifo_addr = 0; u32 phy_addr = 0, acp_fifo_addr = 0, ext_int_ctrl; unsigned int dir = substream->stream; - chip = dev_get_platdata(dev); switch (dai->driver->id) { case I2S_SP_INSTANCE: if (dir == SNDRV_PCM_STREAM_PLAYBACK) { @@ -632,7 +631,7 @@ static int acp_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_d { struct acp_stream *stream = substream->runtime->private_data; struct device *dev = dai->component->dev; - struct acp_chip_info *chip = dev_get_platdata(dev); + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); struct acp_resource *rsrc = chip->rsrc; unsigned int dir = substream->stream; unsigned int irq_bit = 0; diff --git a/sound/soc/amd/acp/acp-sdw-legacy-mach.c b/sound/soc/amd/acp/acp-sdw-legacy-mach.c index c2197b75a7dd..5a3cfedacbaf 100644 --- a/sound/soc/amd/acp/acp-sdw-legacy-mach.c +++ b/sound/soc/amd/acp/acp-sdw-legacy-mach.c @@ -79,6 +79,22 @@ static const struct dmi_system_id soc_sdw_quirk_table[] = { }, .driver_data = (void *)(ASOC_SDW_CODEC_SPKR), }, + { + .callback = soc_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0DD3"), + }, + .driver_data = (void *)(ASOC_SDW_CODEC_SPKR), + }, + { + .callback = soc_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0DD4"), + }, + .driver_data = (void *)(ASOC_SDW_CODEC_SPKR), + }, {} }; diff --git a/sound/soc/amd/acp/amd.h b/sound/soc/amd/acp/amd.h index cb8d97122f95..73a028e67246 100644 --- a/sound/soc/amd/acp/amd.h +++ b/sound/soc/amd/acp/amd.h @@ -130,7 +130,7 @@ #define PDM_DMA_INTR_MASK 0x10000 #define PDM_DEC_64 0x2 #define PDM_CLK_FREQ_MASK 0x07 -#define PDM_MISC_CTRL_MASK 0x10 +#define PDM_MISC_CTRL_MASK 0x18 #define PDM_ENABLE 0x01 #define PDM_DISABLE 0x00 #define DMA_EN_MASK 0x02 diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c index 238dbdb46c18..a8fc842cc94e 100644 --- a/sound/soc/codecs/lpass-rx-macro.c +++ b/sound/soc/codecs/lpass-rx-macro.c @@ -618,6 +618,7 @@ static struct interp_sample_rate sr_val_tbl[] = { {176400, 0xB}, {352800, 0xC}, }; +/* Matches also rx_macro_mux_text */ enum { RX_MACRO_AIF1_PB, RX_MACRO_AIF2_PB, @@ -722,6 +723,7 @@ static const char * const rx_int2_2_interp_mux_text[] = { "ZERO", "RX INT2_2 MUX", }; +/* Order must match RX_MACRO_MAX_DAIS enum (offset by 1) */ static const char *const rx_macro_mux_text[] = { "ZERO", "AIF1_PB", "AIF2_PB", "AIF3_PB", "AIF4_PB" }; @@ -2474,6 +2476,7 @@ static int rx_macro_mux_put(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; struct snd_soc_dapm_update *update = NULL; u32 rx_port_value = ucontrol->value.enumerated.item[0]; + unsigned int dai_id; u32 aif_rst; struct rx_macro *rx = snd_soc_component_get_drvdata(component); @@ -2490,19 +2493,24 @@ static int rx_macro_mux_put(struct snd_kcontrol *kcontrol, switch (rx_port_value) { case 0: - if (rx->active_ch_cnt[aif_rst]) { - clear_bit(widget->shift, - &rx->active_ch_mask[aif_rst]); - rx->active_ch_cnt[aif_rst]--; + /* + * active_ch_cnt and active_ch_mask use DAI IDs (RX_MACRO_MAX_DAIS). + * active_ch_cnt == 0 was tested in if() above. + */ + dai_id = aif_rst - 1; + if (rx->active_ch_cnt[dai_id]) { + clear_bit(widget->shift, &rx->active_ch_mask[dai_id]); + rx->active_ch_cnt[dai_id]--; } break; case 1: case 2: case 3: case 4: - set_bit(widget->shift, - &rx->active_ch_mask[rx_port_value]); - rx->active_ch_cnt[rx_port_value]++; + /* active_ch_cnt and active_ch_mask use DAI IDs (WSA_MACRO_MAX_DAIS). */ + dai_id = rx_port_value - 1; + set_bit(widget->shift, &rx->active_ch_mask[dai_id]); + rx->active_ch_cnt[dai_id]++; break; default: dev_err(component->dev, diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c index da6adb3de21d..d7eec9fdaf9c 100644 --- a/sound/soc/codecs/lpass-wsa-macro.c +++ b/sound/soc/codecs/lpass-wsa-macro.c @@ -368,6 +368,7 @@ static struct interp_sample_rate int_mix_sample_rate_val[] = { {192000, 0x6}, /* 192K */ }; +/* Matches also rx_mux_text */ enum { WSA_MACRO_AIF1_PB, WSA_MACRO_AIF_MIX1_PB, @@ -465,6 +466,7 @@ static const char *const rx_mix_ec_text[] = { "ZERO", "RX_MIX_TX0", "RX_MIX_TX1" }; +/* Order must match WSA_MACRO_MAX_DAIS enum (offset by 1) */ static const char *const rx_mux_text[] = { "ZERO", "AIF1_PB", "AIF_MIX1_PB" }; @@ -2207,6 +2209,7 @@ static int wsa_macro_rx_mux_put(struct snd_kcontrol *kcontrol, u32 rx_port_value = ucontrol->value.integer.value[0]; u32 bit_input; u32 aif_rst; + unsigned int dai_id; struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); aif_rst = wsa->rx_port_value[widget->shift]; @@ -2224,17 +2227,22 @@ static int wsa_macro_rx_mux_put(struct snd_kcontrol *kcontrol, switch (rx_port_value) { case 0: - if (wsa->active_ch_cnt[aif_rst]) { - clear_bit(bit_input, - &wsa->active_ch_mask[aif_rst]); - wsa->active_ch_cnt[aif_rst]--; + /* + * active_ch_cnt and active_ch_mask use DAI IDs (WSA_MACRO_MAX_DAIS). + * active_ch_cnt == 0 was tested in if() above. + */ + dai_id = aif_rst - 1; + if (wsa->active_ch_cnt[dai_id]) { + clear_bit(bit_input, &wsa->active_ch_mask[dai_id]); + wsa->active_ch_cnt[dai_id]--; } break; case 1: case 2: - set_bit(bit_input, - &wsa->active_ch_mask[rx_port_value]); - wsa->active_ch_cnt[rx_port_value]++; + /* active_ch_cnt and active_ch_mask use DAI IDs (WSA_MACRO_MAX_DAIS). */ + dai_id = rx_port_value - 1; + set_bit(bit_input, &wsa->active_ch_mask[dai_id]); + wsa->active_ch_cnt[dai_id]++; break; default: dev_err(component->dev, diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c index 80b921695e7d..1d80a4b862e2 100644 --- a/sound/soc/codecs/rt5682s.c +++ b/sound/soc/codecs/rt5682s.c @@ -653,14 +653,15 @@ static void rt5682s_sar_power_mode(struct snd_soc_component *component, int mode switch (mode) { case SAR_PWR_SAVING: snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_3, - RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_DIS); + RT5682S_CBJ_IN_BUF_MASK, RT5682S_CBJ_IN_BUF_EN); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, - RT5682S_MB1_PATH_MASK | RT5682S_MB2_PATH_MASK, - RT5682S_CTRL_MB1_REG | RT5682S_CTRL_MB2_REG); + RT5682S_MB1_PATH_MASK | RT5682S_MB2_PATH_MASK | + RT5682S_VREF_POW_MASK, RT5682S_CTRL_MB1_FSM | + RT5682S_CTRL_MB2_FSM | RT5682S_VREF_POW_FSM); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS | - RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU); + RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_MANU); usleep_range(5000, 5500); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK, RT5682S_SAR_BUTDET_EN); @@ -688,7 +689,7 @@ static void rt5682s_sar_power_mode(struct snd_soc_component *component, int mode snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS | - RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU); + RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_MANU); break; default: dev_err(component->dev, "Invalid SAR Power mode: %d\n", mode); @@ -725,7 +726,7 @@ static void rt5682s_disable_push_button_irq(struct snd_soc_component *component) snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_BUTDET_MASK | RT5682S_SAR_BUTDET_POW_MASK | RT5682S_SAR_SEL_MB1_2_CTL_MASK, RT5682S_SAR_BUTDET_DIS | - RT5682S_SAR_BUTDET_POW_SAV | RT5682S_SAR_SEL_MB1_2_MANU); + RT5682S_SAR_BUTDET_POW_NORM | RT5682S_SAR_SEL_MB1_2_MANU); } /** @@ -786,7 +787,7 @@ static int rt5682s_headset_detect(struct snd_soc_component *component, int jack_ jack_type = SND_JACK_HEADSET; snd_soc_component_write(component, RT5682S_SAR_IL_CMD_3, 0x024c); snd_soc_component_update_bits(component, RT5682S_CBJ_CTRL_1, - RT5682S_FAST_OFF_MASK, RT5682S_FAST_OFF_EN); + RT5682S_FAST_OFF_MASK, RT5682S_FAST_OFF_DIS); snd_soc_component_update_bits(component, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_SEL_MB1_2_MASK, val << RT5682S_SAR_SEL_MB1_2_SFT); rt5682s_enable_push_button_irq(component); @@ -966,7 +967,7 @@ static int rt5682s_set_jack_detect(struct snd_soc_component *component, RT5682S_EMB_JD_MASK | RT5682S_DET_TYPE | RT5682S_POL_FAST_OFF_MASK | RT5682S_MIC_CAP_MASK, RT5682S_EMB_JD_EN | RT5682S_DET_TYPE | - RT5682S_POL_FAST_OFF_HIGH | RT5682S_MIC_CAP_HS); + RT5682S_POL_FAST_OFF_LOW | RT5682S_MIC_CAP_HS); regmap_update_bits(rt5682s->regmap, RT5682S_SAR_IL_CMD_1, RT5682S_SAR_POW_MASK, RT5682S_SAR_POW_EN); regmap_update_bits(rt5682s->regmap, RT5682S_GPIO_CTRL_1, diff --git a/sound/soc/codecs/rt712-sdca.c b/sound/soc/codecs/rt712-sdca.c index 5b298db5f0f6..0ebaae426e73 100644 --- a/sound/soc/codecs/rt712-sdca.c +++ b/sound/soc/codecs/rt712-sdca.c @@ -1890,11 +1890,9 @@ int rt712_sdca_io_init(struct device *dev, struct sdw_slave *slave) rt712_sdca_va_io_init(rt712); } else { - if (!rt712->dmic_function_found) { - dev_err(&slave->dev, "%s RT712 VB detected but no SMART_MIC function exposed in ACPI\n", + if (!rt712->dmic_function_found) + dev_warn(&slave->dev, "%s RT712 VB detected but no SMART_MIC function exposed in ACPI\n", __func__); - goto suspend; - } /* multilanes and DMIC are supported by rt712vb */ prop->lane_control_support = true; diff --git a/sound/soc/codecs/sma1307.c b/sound/soc/codecs/sma1307.c index 6a601e7134ea..b683e676640d 100644 --- a/sound/soc/codecs/sma1307.c +++ b/sound/soc/codecs/sma1307.c @@ -1737,9 +1737,10 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil sma1307->set.checksum = data[sma1307->set.header_size - 2]; sma1307->set.num_mode = data[sma1307->set.header_size - 1]; num_mode = sma1307->set.num_mode; - sma1307->set.header = devm_kzalloc(sma1307->dev, - sma1307->set.header_size, - GFP_KERNEL); + sma1307->set.header = devm_kmalloc_array(sma1307->dev, + sma1307->set.header_size, + sizeof(int), + GFP_KERNEL); if (!sma1307->set.header) { sma1307->set.status = false; return; diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c index 401ee20897b1..94873ea63014 100644 --- a/sound/soc/codecs/wm8940.c +++ b/sound/soc/codecs/wm8940.c @@ -220,7 +220,7 @@ static const struct snd_kcontrol_new wm8940_snd_controls[] = { SOC_SINGLE_TLV("Digital Capture Volume", WM8940_ADCVOL, 0, 255, 0, wm8940_adc_tlv), SOC_ENUM("Mic Bias Level", wm8940_mic_bias_level_enum), - SOC_SINGLE_TLV("Capture Boost Volue", WM8940_ADCBOOST, + SOC_SINGLE_TLV("Capture Boost Volume", WM8940_ADCBOOST, 8, 1, 0, wm8940_capture_boost_vol_tlv), SOC_SINGLE_TLV("Speaker Playback Volume", WM8940_SPKVOL, 0, 63, 0, wm8940_spk_vol_tlv), @@ -693,7 +693,12 @@ static int wm8940_update_clocks(struct snd_soc_dai *dai) f = wm8940_get_mclkdiv(priv->mclk, fs256, &mclkdiv); if (f != priv->mclk) { /* The PLL performs best around 90MHz */ - fpll = wm8940_get_mclkdiv(22500000, fs256, &mclkdiv); + if (fs256 % 8000) + f = 22579200; + else + f = 24576000; + + fpll = wm8940_get_mclkdiv(f, fs256, &mclkdiv); } wm8940_set_dai_pll(dai, 0, 0, priv->mclk, fpll); diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c index bdf437a5403f..db16d893a235 100644 --- a/sound/soc/codecs/wm8974.c +++ b/sound/soc/codecs/wm8974.c @@ -419,10 +419,14 @@ static int wm8974_update_clocks(struct snd_soc_dai *dai) fs256 = 256 * priv->fs; f = wm8974_get_mclkdiv(priv->mclk, fs256, &mclkdiv); - if (f != priv->mclk) { /* The PLL performs best around 90MHz */ - fpll = wm8974_get_mclkdiv(22500000, fs256, &mclkdiv); + if (fs256 % 8000) + f = 22579200; + else + f = 24576000; + + fpll = wm8974_get_mclkdiv(f, fs256, &mclkdiv); } wm8974_set_dai_pll(dai, 0, 0, priv->mclk, fpll); diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index f997b2dc221b..28f03a5f29f7 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -761,7 +761,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { .callback = sof_sdw_quirk_cb, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), - DMI_MATCH(DMI_PRODUCT_NAME, "Fatcat"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Fatcat"), }, .driver_data = (void *)(SOC_SDW_PCH_DMIC | SOF_BT_OFFLOAD_SSP(2) | diff --git a/sound/soc/intel/boards/sof_ssp_amp.c b/sound/soc/intel/boards/sof_ssp_amp.c index 48ee5353bdf1..729c0cd7c19c 100644 --- a/sound/soc/intel/boards/sof_ssp_amp.c +++ b/sound/soc/intel/boards/sof_ssp_amp.c @@ -216,6 +216,12 @@ static const struct platform_device_id board_ids[] = { /* SSP 0 and SSP 2 are used for HDMI IN */ SOF_HDMI_PLAYBACK_PRESENT), }, + { + .name = "ptl_lt6911_hdmi_ssp", + .driver_data = (kernel_ulong_t)(SOF_SSP_MASK_HDMI_CAPTURE(0x5) | + /* SSP 0 and SSP 2 are used for HDMI IN */ + SOF_HDMI_PLAYBACK_PRESENT), + }, { } }; MODULE_DEVICE_TABLE(platform, board_ids); diff --git a/sound/soc/intel/catpt/pcm.c b/sound/soc/intel/catpt/pcm.c index 46acb7fdc547..bf734c69c4e0 100644 --- a/sound/soc/intel/catpt/pcm.c +++ b/sound/soc/intel/catpt/pcm.c @@ -568,8 +568,9 @@ static const struct snd_pcm_hardware catpt_pcm_hardware = { SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, .formats = SNDRV_PCM_FMTBIT_S16_LE | - SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, + .subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 | + SNDRV_PCM_SUBFMTBIT_MSBITS_MAX, .period_bytes_min = PAGE_SIZE, .period_bytes_max = CATPT_BUFFER_MAX_SIZE / CATPT_PCM_PERIODS_MIN, .periods_min = CATPT_PCM_PERIODS_MIN, @@ -698,14 +699,18 @@ static struct snd_soc_dai_driver dai_drivers[] = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, + .subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 | + SNDRV_PCM_SUBFMTBIT_MSBITS_MAX, }, .capture = { .stream_name = "Analog Capture", .channels_min = 2, .channels_max = 4, .rates = SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, + .subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 | + SNDRV_PCM_SUBFMTBIT_MSBITS_MAX, }, }, { @@ -717,7 +722,9 @@ static struct snd_soc_dai_driver dai_drivers[] = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, + .subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 | + SNDRV_PCM_SUBFMTBIT_MSBITS_MAX, }, }, { @@ -729,7 +736,9 @@ static struct snd_soc_dai_driver dai_drivers[] = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, + .subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 | + SNDRV_PCM_SUBFMTBIT_MSBITS_MAX, }, }, { @@ -741,7 +750,9 @@ static struct snd_soc_dai_driver dai_drivers[] = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, + .subformats = SNDRV_PCM_SUBFMTBIT_MSBITS_24 | + SNDRV_PCM_SUBFMTBIT_MSBITS_MAX, }, }, { diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c index e292701dfcfe..3c8b10e21ceb 100644 --- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c @@ -61,6 +61,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_machines[] = { SND_SOC_ACPI_TPLG_INTEL_SSP_MSB | SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER, }, + /* place amp-only boards in the end of table */ + { + .id = "INTC10B0", + .drv_name = "ptl_lt6911_hdmi_ssp", + .sof_tplg_filename = "sof-ptl-hdmi-ssp02.tplg", + }, {}, }; EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_ptl_machines); diff --git a/sound/soc/qcom/qdsp6/audioreach.c b/sound/soc/qcom/qdsp6/audioreach.c index 4ebaaf736fb9..3f5eed5afce5 100644 --- a/sound/soc/qcom/qdsp6/audioreach.c +++ b/sound/soc/qcom/qdsp6/audioreach.c @@ -971,6 +971,7 @@ static int audioreach_i2s_set_media_format(struct q6apm_graph *graph, param_data->param_id = PARAM_ID_I2S_INTF_CFG; param_data->param_size = ic_sz - APM_MODULE_PARAM_DATA_SIZE; + intf_cfg->cfg.lpaif_type = module->hw_interface_type; intf_cfg->cfg.intf_idx = module->hw_interface_idx; intf_cfg->cfg.sd_line_idx = module->sd_line_idx; diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c index a0d90462fd6a..528756f1332b 100644 --- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c +++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c @@ -213,8 +213,10 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s return 0; err: - q6apm_graph_close(dai_data->graph[dai->id]); - dai_data->graph[dai->id] = NULL; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + q6apm_graph_close(dai_data->graph[dai->id]); + dai_data->graph[dai->id] = NULL; + } return rc; } @@ -260,6 +262,7 @@ static const struct snd_soc_dai_ops q6i2s_ops = { .shutdown = q6apm_lpass_dai_shutdown, .set_channel_map = q6dma_set_channel_map, .hw_params = q6dma_hw_params, + .set_fmt = q6i2s_set_fmt, }; static const struct snd_soc_dai_ops q6hdmi_ops = { diff --git a/sound/soc/qcom/sc8280xp.c b/sound/soc/qcom/sc8280xp.c index 73f9f82c4e25..288ccd7f8866 100644 --- a/sound/soc/qcom/sc8280xp.c +++ b/sound/soc/qcom/sc8280xp.c @@ -32,6 +32,10 @@ static int sc8280xp_snd_init(struct snd_soc_pcm_runtime *rtd) int dp_pcm_id = 0; switch (cpu_dai->id) { + case PRIMARY_MI2S_RX...QUATERNARY_MI2S_TX: + case QUINARY_MI2S_RX...QUINARY_MI2S_TX: + snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_BP_FP); + break; case WSA_CODEC_DMA_RX_0: case WSA_CODEC_DMA_RX_1: /* @@ -186,7 +190,7 @@ static int sc8280xp_platform_probe(struct platform_device *pdev) static const struct of_device_id snd_sc8280xp_dt_match[] = { {.compatible = "qcom,qcm6490-idp-sndcard", "qcm6490"}, {.compatible = "qcom,qcs6490-rb3gen2-sndcard", "qcs6490"}, - {.compatible = "qcom,qcs8275-sndcard", "qcs8275"}, + {.compatible = "qcom,qcs8275-sndcard", "qcs8300"}, {.compatible = "qcom,qcs9075-sndcard", "qcs9075"}, {.compatible = "qcom,qcs9100-sndcard", "qcs9100"}, {.compatible = "qcom,sc8280xp-sndcard", "sc8280xp"}, diff --git a/sound/soc/sdca/sdca_device.c b/sound/soc/sdca/sdca_device.c index 0244cdcdd109..4798ce2c8f0b 100644 --- a/sound/soc/sdca/sdca_device.c +++ b/sound/soc/sdca/sdca_device.c @@ -7,6 +7,7 @@ */ #include <linux/acpi.h> +#include <linux/dmi.h> #include <linux/module.h> #include <linux/property.h> #include <linux/soundwire/sdw.h> @@ -55,11 +56,30 @@ static bool sdca_device_quirk_rt712_vb(struct sdw_slave *slave) return false; } +static bool sdca_device_quirk_skip_func_type_patching(struct sdw_slave *slave) +{ + const char *vendor, *sku; + + vendor = dmi_get_system_info(DMI_SYS_VENDOR); + sku = dmi_get_system_info(DMI_PRODUCT_SKU); + + if (vendor && sku && + !strcmp(vendor, "Dell Inc.") && + (!strcmp(sku, "0C62") || !strcmp(sku, "0C63") || !strcmp(sku, "0C6B")) && + slave->sdca_data.interface_revision == 0x061c && + slave->id.mfg_id == 0x01fa && slave->id.part_id == 0x4243) + return true; + + return false; +} + bool sdca_device_quirk_match(struct sdw_slave *slave, enum sdca_quirk quirk) { switch (quirk) { case SDCA_QUIRKS_RT712_VB: return sdca_device_quirk_rt712_vb(slave); + case SDCA_QUIRKS_SKIP_FUNC_TYPE_PATCHING: + return sdca_device_quirk_skip_func_type_patching(slave); default: break; } diff --git a/sound/soc/sdca/sdca_functions.c b/sound/soc/sdca/sdca_functions.c index f26f597dca9e..13f68f7b6dd6 100644 --- a/sound/soc/sdca/sdca_functions.c +++ b/sound/soc/sdca/sdca_functions.c @@ -90,6 +90,7 @@ static int find_sdca_function(struct acpi_device *adev, void *data) { struct fwnode_handle *function_node = acpi_fwnode_handle(adev); struct sdca_device_data *sdca_data = data; + struct sdw_slave *slave = container_of(sdca_data, struct sdw_slave, sdca_data); struct device *dev = &adev->dev; struct fwnode_handle *control5; /* used to identify function type */ const char *function_name; @@ -137,11 +138,13 @@ static int find_sdca_function(struct acpi_device *adev, void *data) return ret; } - ret = patch_sdca_function_type(sdca_data->interface_revision, &function_type); - if (ret < 0) { - dev_err(dev, "SDCA version %#x invalid function type %d\n", - sdca_data->interface_revision, function_type); - return ret; + if (!sdca_device_quirk_match(slave, SDCA_QUIRKS_SKIP_FUNC_TYPE_PATCHING)) { + ret = patch_sdca_function_type(sdca_data->interface_revision, &function_type); + if (ret < 0) { + dev_err(dev, "SDCA version %#x invalid function type %d\n", + sdca_data->interface_revision, function_type); + return ret; + } } function_name = get_sdca_function_name(function_type); diff --git a/sound/soc/sdca/sdca_interrupts.c b/sound/soc/sdca/sdca_interrupts.c index 8018773ee426..79bf3042f57d 100644 --- a/sound/soc/sdca/sdca_interrupts.c +++ b/sound/soc/sdca/sdca_interrupts.c @@ -155,7 +155,7 @@ static irqreturn_t detected_mode_handler(int irq, void *data) SDCA_CTL_SELECTED_MODE_NAME); if (!name) - return -ENOMEM; + return IRQ_NONE; kctl = snd_soc_component_get_kcontrol(component, name); if (!kctl) { diff --git a/sound/soc/sdca/sdca_regmap.c b/sound/soc/sdca/sdca_regmap.c index 5cb3048ea8cf..72f893e00ff5 100644 --- a/sound/soc/sdca/sdca_regmap.c +++ b/sound/soc/sdca/sdca_regmap.c @@ -196,7 +196,7 @@ int sdca_regmap_mbq_size(struct sdca_function_data *function, unsigned int reg) control = function_find_control(function, reg); if (!control) - return false; + return -EINVAL; return clamp_val(control->nbits / BITS_PER_BYTE, sizeof(u8), sizeof(u32)); } diff --git a/sound/soc/sof/imx/imx-common.c b/sound/soc/sof/imx/imx-common.c index f00b381cec3b..d66c198b861a 100644 --- a/sound/soc/sof/imx/imx-common.c +++ b/sound/soc/sof/imx/imx-common.c @@ -316,9 +316,9 @@ static int imx_parse_ioremap_memory(struct snd_sof_dev *sdev) } sdev->bar[blk_type] = devm_ioremap_resource(sdev->dev, res); - if (!sdev->bar[blk_type]) + if (IS_ERR(sdev->bar[blk_type])) return dev_err_probe(sdev->dev, - -ENOMEM, + PTR_ERR(sdev->bar[blk_type]), "failed to ioremap %s region\n", chip_info->memory[i].name); } diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c index aa6b0247d5c9..a34f472ef175 100644 --- a/sound/soc/sof/intel/hda-stream.c +++ b/sound/soc/sof/intel/hda-stream.c @@ -890,7 +890,7 @@ int hda_dsp_stream_init(struct snd_sof_dev *sdev) if (num_capture >= SOF_HDA_CAPTURE_STREAMS) { dev_err(sdev->dev, "error: too many capture streams %d\n", - num_playback); + num_capture); return -EINVAL; } diff --git a/sound/usb/qcom/qc_audio_offload.c b/sound/usb/qcom/qc_audio_offload.c index a25c5a531690..9ad76fff741b 100644 --- a/sound/usb/qcom/qc_audio_offload.c +++ b/sound/usb/qcom/qc_audio_offload.c @@ -538,38 +538,33 @@ static void uaudio_iommu_unmap(enum mem_type mtype, unsigned long iova, umap_size, iova, mapped_iova_size); } +static int uaudio_iommu_map_prot(bool dma_coherent) +{ + int prot = IOMMU_READ | IOMMU_WRITE; + + if (dma_coherent) + prot |= IOMMU_CACHE; + return prot; +} + /** - * uaudio_iommu_map() - maps iommu memory for adsp + * uaudio_iommu_map_pa() - maps iommu memory for adsp * @mtype: ring type * @dma_coherent: dma coherent * @pa: physical address for ring/buffer * @size: size of memory region - * @sgt: sg table for memory region * * Maps the XHCI related resources to a memory region that is assigned to be * used by the adsp. This will be mapped to the domain, which is created by * the ASoC USB backend driver. * */ -static unsigned long uaudio_iommu_map(enum mem_type mtype, bool dma_coherent, - phys_addr_t pa, size_t size, - struct sg_table *sgt) +static unsigned long uaudio_iommu_map_pa(enum mem_type mtype, bool dma_coherent, + phys_addr_t pa, size_t size) { - struct scatterlist *sg; unsigned long iova = 0; - size_t total_len = 0; - unsigned long iova_sg; - phys_addr_t pa_sg; bool map = true; - size_t sg_len; - int prot; - int ret; - int i; - - prot = IOMMU_READ | IOMMU_WRITE; - - if (dma_coherent) - prot |= IOMMU_CACHE; + int prot = uaudio_iommu_map_prot(dma_coherent); switch (mtype) { case MEM_EVENT_RING: @@ -583,20 +578,41 @@ static unsigned long uaudio_iommu_map(enum mem_type mtype, bool dma_coherent, &uaudio_qdev->xfer_ring_iova_size, &uaudio_qdev->xfer_ring_list, size); break; - case MEM_XFER_BUF: - iova = uaudio_get_iova(&uaudio_qdev->curr_xfer_buf_iova, - &uaudio_qdev->xfer_buf_iova_size, - &uaudio_qdev->xfer_buf_list, size); - break; default: dev_err(uaudio_qdev->data->dev, "unknown mem type %d\n", mtype); } if (!iova || !map) - goto done; + return 0; + + iommu_map(uaudio_qdev->data->domain, iova, pa, size, prot, GFP_KERNEL); - if (!sgt) - goto skip_sgt_map; + return iova; +} + +static unsigned long uaudio_iommu_map_xfer_buf(bool dma_coherent, size_t size, + struct sg_table *sgt) +{ + struct scatterlist *sg; + unsigned long iova = 0; + size_t total_len = 0; + unsigned long iova_sg; + phys_addr_t pa_sg; + size_t sg_len; + int prot = uaudio_iommu_map_prot(dma_coherent); + int ret; + int i; + + prot = IOMMU_READ | IOMMU_WRITE; + + if (dma_coherent) + prot |= IOMMU_CACHE; + + iova = uaudio_get_iova(&uaudio_qdev->curr_xfer_buf_iova, + &uaudio_qdev->xfer_buf_iova_size, + &uaudio_qdev->xfer_buf_list, size); + if (!iova) + goto done; iova_sg = iova; for_each_sg(sgt->sgl, sg, sgt->nents, i) { @@ -618,11 +634,6 @@ static unsigned long uaudio_iommu_map(enum mem_type mtype, bool dma_coherent, uaudio_iommu_unmap(MEM_XFER_BUF, iova, size, total_len); iova = 0; } - return iova; - -skip_sgt_map: - iommu_map(uaudio_qdev->data->domain, iova, pa, size, prot, GFP_KERNEL); - done: return iova; } @@ -1020,7 +1031,6 @@ static int uaudio_transfer_buffer_setup(struct snd_usb_substream *subs, struct sg_table xfer_buf_sgt; dma_addr_t xfer_buf_dma; void *xfer_buf; - phys_addr_t xfer_buf_pa; u32 len = xfer_buf_len; bool dma_coherent; dma_addr_t xfer_buf_dma_sysdev; @@ -1051,18 +1061,12 @@ static int uaudio_transfer_buffer_setup(struct snd_usb_substream *subs, if (!xfer_buf) return -ENOMEM; - /* Remapping is not possible if xfer_buf is outside of linear map */ - xfer_buf_pa = virt_to_phys(xfer_buf); - if (WARN_ON(!page_is_ram(PFN_DOWN(xfer_buf_pa)))) { - ret = -ENXIO; - goto unmap_sync; - } dma_get_sgtable(subs->dev->bus->sysdev, &xfer_buf_sgt, xfer_buf, xfer_buf_dma, len); /* map the physical buffer into sysdev as well */ - xfer_buf_dma_sysdev = uaudio_iommu_map(MEM_XFER_BUF, dma_coherent, - xfer_buf_pa, len, &xfer_buf_sgt); + xfer_buf_dma_sysdev = uaudio_iommu_map_xfer_buf(dma_coherent, + len, &xfer_buf_sgt); if (!xfer_buf_dma_sysdev) { ret = -ENOMEM; goto unmap_sync; @@ -1143,8 +1147,8 @@ uaudio_endpoint_setup(struct snd_usb_substream *subs, sg_free_table(sgt); /* data transfer ring */ - iova = uaudio_iommu_map(MEM_XFER_RING, dma_coherent, tr_pa, - PAGE_SIZE, NULL); + iova = uaudio_iommu_map_pa(MEM_XFER_RING, dma_coherent, tr_pa, + PAGE_SIZE); if (!iova) { ret = -ENOMEM; goto clear_pa; @@ -1207,8 +1211,8 @@ static int uaudio_event_ring_setup(struct snd_usb_substream *subs, mem_info->dma = sg_dma_address(sgt->sgl); sg_free_table(sgt); - iova = uaudio_iommu_map(MEM_EVENT_RING, dma_coherent, er_pa, - PAGE_SIZE, NULL); + iova = uaudio_iommu_map_pa(MEM_EVENT_RING, dma_coherent, er_pa, + PAGE_SIZE); if (!iova) { ret = -ENOMEM; goto clear_pa; diff --git a/tools/arch/loongarch/include/asm/inst.h b/tools/arch/loongarch/include/asm/inst.h index c25b5853181d..d68fad63c8b7 100644 --- a/tools/arch/loongarch/include/asm/inst.h +++ b/tools/arch/loongarch/include/asm/inst.h @@ -51,6 +51,10 @@ enum reg2i16_op { bgeu_op = 0x1b, }; +enum reg3_op { + amswapw_op = 0x70c0, +}; + struct reg0i15_format { unsigned int immediate : 15; unsigned int opcode : 17; @@ -96,6 +100,13 @@ struct reg2i16_format { unsigned int opcode : 6; }; +struct reg3_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int rk : 5; + unsigned int opcode : 17; +}; + union loongarch_instruction { unsigned int word; struct reg0i15_format reg0i15_format; @@ -105,6 +116,7 @@ union loongarch_instruction { struct reg2i12_format reg2i12_format; struct reg2i14_format reg2i14_format; struct reg2i16_format reg2i16_format; + struct reg3_format reg3_format; }; #define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction) diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c index 9ef569492560..ddaeb4eb3e24 100644 --- a/tools/lib/subcmd/help.c +++ b/tools/lib/subcmd/help.c @@ -75,6 +75,9 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) size_t ci, cj, ei; int cmp; + if (!excludes->cnt) + return; + ci = cj = ei = 0; while (ci < cmds->cnt && ei < excludes->cnt) { cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name); diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c index b6fdc68053cc..2e555c4060c5 100644 --- a/tools/objtool/arch/loongarch/decode.c +++ b/tools/objtool/arch/loongarch/decode.c @@ -278,6 +278,25 @@ static bool decode_insn_reg2i16_fomat(union loongarch_instruction inst, return true; } +static bool decode_insn_reg3_fomat(union loongarch_instruction inst, + struct instruction *insn) +{ + switch (inst.reg3_format.opcode) { + case amswapw_op: + if (inst.reg3_format.rd == LOONGARCH_GPR_ZERO && + inst.reg3_format.rk == LOONGARCH_GPR_RA && + inst.reg3_format.rj == LOONGARCH_GPR_ZERO) { + /* amswap.w $zero, $ra, $zero */ + insn->type = INSN_BUG; + } + break; + default: + return false; + } + + return true; +} + int arch_decode_instruction(struct objtool_file *file, const struct section *sec, unsigned long offset, unsigned int maxlen, struct instruction *insn) @@ -309,11 +328,19 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec return 0; if (decode_insn_reg2i16_fomat(inst, insn)) return 0; + if (decode_insn_reg3_fomat(inst, insn)) + return 0; - if (inst.word == 0) + if (inst.word == 0) { + /* andi $zero, $zero, 0x0 */ insn->type = INSN_NOP; - else if (inst.reg0i15_format.opcode == break_op) { - /* break */ + } else if (inst.reg0i15_format.opcode == break_op && + inst.reg0i15_format.immediate == 0x0) { + /* break 0x0 */ + insn->type = INSN_TRAP; + } else if (inst.reg0i15_format.opcode == break_op && + inst.reg0i15_format.immediate == 0x1) { + /* break 0x1 */ insn->type = INSN_BUG; } else if (inst.reg2_format.opcode == ertn_op) { /* ertn */ diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index fd49703021fd..078634461df2 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -2009,6 +2009,7 @@ static int __cmd_contention(int argc, const char **argv) .owner = show_lock_owner, .cgroups = RB_ROOT, }; + struct perf_env host_env; lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table)); if (!lockhash_table) @@ -2024,7 +2025,10 @@ static int __cmd_contention(int argc, const char **argv) eops.mmap = perf_event__process_mmap; eops.tracing_data = perf_event__process_tracing_data; - session = perf_session__new(use_bpf ? NULL : &data, &eops); + perf_env__init(&host_env); + session = __perf_session__new(use_bpf ? NULL : &data, &eops, + /*trace_event_repipe=*/false, &host_env); + if (IS_ERR(session)) { pr_err("Initializing perf session failed\n"); err = PTR_ERR(session); @@ -2142,6 +2146,7 @@ out_delete: evlist__delete(con.evlist); lock_contention_finish(&con); perf_session__delete(session); + perf_env__exit(&host_env); zfree(&lockhash_table); return err; } diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 85b2a93a59ac..779f6230130a 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -477,6 +477,7 @@ static int __maps__insert(struct maps *maps, struct map *new) } /* Insert the value at the end. */ maps_by_address[nr_maps] = map__get(new); + map__set_kmap_maps(new, maps); if (maps_by_name) maps_by_name[nr_maps] = map__get(new); @@ -502,8 +503,6 @@ static int __maps__insert(struct maps *maps, struct map *new) if (map__end(new) < map__start(new)) RC_CHK_ACCESS(maps)->ends_broken = true; - map__set_kmap_maps(new, maps); - return 0; } @@ -891,6 +890,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) if (before) { map__put(maps_by_address[i]); maps_by_address[i] = before; + map__set_kmap_maps(before, maps); if (maps_by_name) { map__put(maps_by_name[ni]); @@ -918,6 +918,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) */ map__put(maps_by_address[i]); maps_by_address[i] = map__get(new); + map__set_kmap_maps(new, maps); if (maps_by_name) { map__put(maps_by_name[ni]); @@ -942,14 +943,13 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) */ map__put(maps_by_address[i]); maps_by_address[i] = map__get(new); + map__set_kmap_maps(new, maps); if (maps_by_name) { map__put(maps_by_name[ni]); maps_by_name[ni] = map__get(new); } - map__set_kmap_maps(new, maps); - check_invariants(maps); return err; } @@ -1019,6 +1019,7 @@ int maps__copy_from(struct maps *dest, struct maps *parent) err = unwind__prepare_access(dest, new, NULL); if (!err) { dest_maps_by_address[i] = new; + map__set_kmap_maps(new, dest); if (dest_maps_by_name) dest_maps_by_name[i] = map__get(new); RC_CHK_ACCESS(dest)->nr_maps = i + 1; diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh index 7bc148889ca7..187b478d0ddf 100755 --- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh @@ -7,6 +7,8 @@ ALL_TESTS=" prio arp_validate num_grat_arp + fail_over_mac + vlan_over_bond " lib_dir=$(dirname "$0") @@ -352,8 +354,8 @@ garp_test() exp_num=$(echo "${param}" | cut -f6 -d ' ') active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave") - slowwait_for_counter $((exp_num + 5)) $exp_num \ - tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}" + slowwait_for_counter $((exp_num + 5)) $exp_num tc_rule_handle_stats_get \ + "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}" &> /dev/null # check result real_num=$(tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}") @@ -376,6 +378,197 @@ num_grat_arp() done } +check_all_mac_same() +{ + RET=0 + # all slaves should have same mac address (with the first port's mac) + local bond_mac=$(ip -n "$s_ns" -j link show bond0 | jq -r '.[]["address"]') + local eth0_mac=$(ip -n "$s_ns" -j link show eth0 | jq -r '.[]["address"]') + local eth1_mac=$(ip -n "$s_ns" -j link show eth1 | jq -r '.[]["address"]') + local eth2_mac=$(ip -n "$s_ns" -j link show eth2 | jq -r '.[]["address"]') + if [ "$bond_mac" != "${mac[0]}" ] || [ "$eth0_mac" != "$bond_mac" ] || \ + [ "$eth1_mac" != "$bond_mac" ] || [ "$eth2_mac" != "$bond_mac" ]; then + RET=1 + fi +} + +check_bond_mac_same_with_first() +{ + RET=0 + # bond mac address should be same with the first added slave + local bond_mac=$(ip -n "$s_ns" -j link show bond0 | jq -r '.[]["address"]') + if [ "$bond_mac" != "${mac[0]}" ]; then + RET=1 + fi +} + +check_bond_mac_same_with_active() +{ + RET=0 + # bond mac address should be same with active slave + local bond_mac=$(ip -n "$s_ns" -j link show bond0 | jq -r '.[]["address"]') + local active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave") + local active_slave_mac=$(ip -n "$s_ns" -j link show "$active_slave" | jq -r '.[]["address"]') + if [ "$bond_mac" != "$active_slave_mac" ]; then + RET=1 + fi +} + +check_backup_slave_mac_not_change() +{ + RET=0 + # backup slave's mac address is not changed + if ip -n "$s_ns" -d -j link show type bond_slave | jq -e '.[] + | select(.linkinfo.info_slave_data.state=="BACKUP") + | select(.address != .linkinfo.info_slave_data.perm_hwaddr)' &> /dev/null; then + RET=1 + fi +} + +check_backup_slave_mac_inherit() +{ + local backup_mac + RET=0 + + # backup slaves should use mac[1] or mac[2] + local backup_macs=$(ip -n "$s_ns" -d -j link show type bond_slave | \ + jq -r '.[] | select(.linkinfo.info_slave_data.state=="BACKUP") | .address') + for backup_mac in $backup_macs; do + if [ "$backup_mac" != "${mac[1]}" ] && [ "$backup_mac" != "${mac[2]}" ]; then + RET=1 + fi + done +} + +check_first_slave_random_mac() +{ + RET=0 + # remove the first added slave and added it back + ip -n "$s_ns" link set eth0 nomaster + ip -n "$s_ns" link set eth0 master bond0 + + # the first slave should use random mac address + eth0_mac=$(ip -n "$s_ns" -j link show eth0 | jq -r '.[]["address"]') + [ "$eth0_mac" = "${mac[0]}" ] && RET=1 + log_test "bond fail_over_mac follow" "random first slave mac" + + # remove the first slave, the permanent MAC address should be restored back + ip -n "$s_ns" link set eth0 nomaster + eth0_mac=$(ip -n "$s_ns" -j link show eth0 | jq -r '.[]["address"]') + [ "$eth0_mac" != "${mac[0]}" ] && RET=1 +} + +do_active_backup_failover() +{ + local active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave") + ip -n ${s_ns} link set ${active_slave} down + slowwait 2 active_slave_changed $active_slave + ip -n ${s_ns} link set ${active_slave} up +} + +fail_over_mac() +{ + # Bring down the first interface on the switch to force the bond to + # select another active interface instead of the first one that joined. + ip -n "$g_ns" link set s0 down + + # fail_over_mac none + bond_reset "mode active-backup miimon 100 fail_over_mac 0" + check_all_mac_same + log_test "fail_over_mac 0" "all slaves have same mac" + do_active_backup_failover + check_all_mac_same + log_test "fail_over_mac 0" "failover: all slaves have same mac" + + # fail_over_mac active + bond_reset "mode active-backup miimon 100 fail_over_mac 1" + check_bond_mac_same_with_active + log_test "fail_over_mac 1" "bond mac is same with active slave mac" + check_backup_slave_mac_not_change + log_test "fail_over_mac 1" "backup slave mac is not changed" + do_active_backup_failover + check_bond_mac_same_with_active + log_test "fail_over_mac 1" "failover: bond mac is same with active slave mac" + check_backup_slave_mac_not_change + log_test "fail_over_mac 1" "failover: backup slave mac is not changed" + + # fail_over_mac follow + bond_reset "mode active-backup miimon 100 fail_over_mac 2" + check_bond_mac_same_with_first + log_test "fail_over_mac 2" "bond mac is same with first slave mac" + check_bond_mac_same_with_active + log_test "fail_over_mac 2" "bond mac is same with active slave mac" + check_backup_slave_mac_inherit + log_test "fail_over_mac 2" "backup slave mac inherit" + do_active_backup_failover + check_bond_mac_same_with_first + log_test "fail_over_mac 2" "failover: bond mac is same with first slave mac" + check_bond_mac_same_with_active + log_test "fail_over_mac 2" "failover: bond mac is same with active slave mac" + check_backup_slave_mac_inherit + log_test "fail_over_mac 2" "failover: backup slave mac inherit" + check_first_slave_random_mac + log_test "fail_over_mac 2" "first slave mac random" +} + +vlan_over_bond_arp() +{ + local mode="$1" + RET=0 + + bond_reset "mode $mode arp_interval 100 arp_ip_target 192.0.3.10" + ip -n "${s_ns}" link add bond0.3 link bond0 type vlan id 3 + ip -n "${s_ns}" link set bond0.3 up + ip -n "${s_ns}" addr add 192.0.3.1/24 dev bond0.3 + ip -n "${s_ns}" addr add 2001:db8::3:1/64 dev bond0.3 + + slowwait_for_counter 5 5 tc_rule_handle_stats_get \ + "dev eth0.3 ingress" 101 ".packets" "-n ${c_ns}" &> /dev/null || RET=1 + log_test "vlan over bond arp" "$mode" +} + +vlan_over_bond_ns() +{ + local mode="$1" + RET=0 + + if skip_ns; then + log_test_skip "vlan_over_bond ns" "$mode" + return 0 + fi + + bond_reset "mode $mode arp_interval 100 ns_ip6_target 2001:db8::3:10" + ip -n "${s_ns}" link add bond0.3 link bond0 type vlan id 3 + ip -n "${s_ns}" link set bond0.3 up + ip -n "${s_ns}" addr add 192.0.3.1/24 dev bond0.3 + ip -n "${s_ns}" addr add 2001:db8::3:1/64 dev bond0.3 + + slowwait_for_counter 5 5 tc_rule_handle_stats_get \ + "dev eth0.3 ingress" 102 ".packets" "-n ${c_ns}" &> /dev/null || RET=1 + log_test "vlan over bond ns" "$mode" +} + +vlan_over_bond() +{ + # add vlan 3 for client + ip -n "${c_ns}" link add eth0.3 link eth0 type vlan id 3 + ip -n "${c_ns}" link set eth0.3 up + ip -n "${c_ns}" addr add 192.0.3.10/24 dev eth0.3 + ip -n "${c_ns}" addr add 2001:db8::3:10/64 dev eth0.3 + + # Add tc rule to check the vlan pkts + tc -n "${c_ns}" qdisc add dev eth0.3 clsact + tc -n "${c_ns}" filter add dev eth0.3 ingress protocol arp \ + handle 101 flower skip_hw arp_op request \ + arp_sip 192.0.3.1 arp_tip 192.0.3.10 action pass + tc -n "${c_ns}" filter add dev eth0.3 ingress protocol ipv6 \ + handle 102 flower skip_hw ip_proto icmpv6 \ + type 135 src_ip 2001:db8::3:1 action pass + + vlan_over_bond_arp "active-backup" + vlan_over_bond_ns "active-backup" +} + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh index 195ef83cfbf1..167aa4a4a12a 100644 --- a/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh @@ -39,6 +39,8 @@ g_ip4="192.0.2.254" s_ip6="2001:db8::1" c_ip6="2001:db8::10" g_ip6="2001:db8::254" +mac[0]="00:0a:0b:0c:0d:01" +mac[1]="00:0a:0b:0c:0d:02" gateway_create() { @@ -62,6 +64,7 @@ server_create() for i in $(seq 0 1); do ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} + ip -n "${s_ns}" link set "eth${i}" addr "${mac[$i]}" ip -n ${g_ns} link set s${i} up ip -n ${g_ns} link set s${i} master br0 diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh index 3a1333d9a85b..23a2932301cc 100644 --- a/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh @@ -26,6 +26,7 @@ # +-------------------------------------+ source bond_topo_2d1c.sh +mac[2]="00:0a:0b:0c:0d:03" setup_prepare() { @@ -36,6 +37,7 @@ setup_prepare() # Add the extra device as we use 3 down links for bond0 local i=2 ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} + ip -n "${s_ns}" link set "eth${i}" addr "${mac[$i]}" ip -n ${g_ns} link set s${i} up ip -n ${g_ns} link set s${i} master br0 ip -n ${s_ns} link set eth${i} master bond0 diff --git a/tools/testing/selftests/drivers/net/bonding/config b/tools/testing/selftests/drivers/net/bonding/config index 4d16a69ffc65..832fa1caeb66 100644 --- a/tools/testing/selftests/drivers/net/bonding/config +++ b/tools/testing/selftests/drivers/net/bonding/config @@ -10,3 +10,4 @@ CONFIG_NET_CLS_MATCHALL=m CONFIG_NET_SCH_INGRESS=y CONFIG_NLMON=y CONFIG_VETH=y +CONFIG_VLAN_8021Q=m diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c index 4f07ac9fa207..b148cadb96d0 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.c +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c @@ -1093,6 +1093,7 @@ int main_loop_s(int listensock) struct pollfd polls; socklen_t salen; int remotesock; + int err = 0; int fd = 0; again: @@ -1125,7 +1126,7 @@ again: SOCK_TEST_TCPULP(remotesock, 0); memset(&winfo, 0, sizeof(winfo)); - copyfd_io(fd, remotesock, 1, true, &winfo); + err = copyfd_io(fd, remotesock, 1, true, &winfo); } else { perror("accept"); return 1; @@ -1134,10 +1135,10 @@ again: if (cfg_input) close(fd); - if (--cfg_repeat > 0) + if (!err && --cfg_repeat > 0) goto again; - return 0; + return err; } static void init_rng(void) @@ -1247,7 +1248,7 @@ void xdisconnect(int fd) else xerror("bad family"); - strcpy(cmd, "ss -M | grep -q "); + strcpy(cmd, "ss -Mnt | grep -q "); cmdlen = strlen(cmd); if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen], sizeof(cmd) - cmdlen)) @@ -1257,7 +1258,7 @@ void xdisconnect(int fd) /* * wait until the pending data is completely flushed and all - * the MPTCP sockets reached the closed status. + * the sockets reached the closed status. * disconnect will bypass/ignore/drop any pending data. */ for (i = 0; ; i += msec_sleep) { diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh index c2ab9f7f0d21..47ecb5b3836e 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh @@ -211,6 +211,11 @@ if $checksum; then done fi +if $capture; then + rndh="${ns1:4}" + mptcp_lib_pr_info "Packet capture files will have this prefix: ${rndh}-" +fi + set_ethtool_flags() { local ns="$1" local dev="$2" @@ -361,7 +366,6 @@ do_transfer() if $capture; then local capuser - local rndh="${connector_ns:4}" if [ -z $SUDO_USER ] ; then capuser="" else diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh index 09cd24b2ae46..d62e653d48b0 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh @@ -384,7 +384,7 @@ mptcp_lib_make_file() { mptcp_lib_print_file_err() { ls -l "${1}" 1>&2 echo "Trailing bytes are: " - tail -c 27 "${1}" + tail -c 32 "${1}" | od -x | head -n2 } # $1: input file ; $2: output file ; $3: what kind of file diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c index e934dd26a59d..112c07c4c37a 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c @@ -667,22 +667,26 @@ static void process_one_client(int fd, int pipefd) do_getsockopts(&s, fd, ret, ret2); if (s.mptcpi_rcv_delta != (uint64_t)ret + 1) - xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret); + xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64 ", diff %" PRId64, + s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - (ret + 1)); /* be nice when running on top of older kernel */ if (s.pkt_stats_avail) { if (s.last_sample.mptcpi_bytes_sent != ret2) - xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64, + xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64 + ", diff %" PRId64, s.last_sample.mptcpi_bytes_sent, ret2, s.last_sample.mptcpi_bytes_sent - ret2); if (s.last_sample.mptcpi_bytes_received != ret) - xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64, + xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64 + ", diff %" PRId64, s.last_sample.mptcpi_bytes_received, ret, s.last_sample.mptcpi_bytes_received - ret); if (s.last_sample.mptcpi_bytes_acked != ret) - xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64, - s.last_sample.mptcpi_bytes_acked, ret2, - s.last_sample.mptcpi_bytes_acked - ret2); + xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64 + ", diff %" PRId64, + s.last_sample.mptcpi_bytes_acked, ret, + s.last_sample.mptcpi_bytes_acked - ret); } close(fd); diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c index 994a556f46c1..93fea3442216 100644 --- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c +++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c @@ -188,6 +188,13 @@ static int capture_events(int fd, int event_group) fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs)); else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE) fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs)); + else if (attrs->rta_type == MPTCP_ATTR_FLAGS) { + __u16 flags = *(__u16 *)RTA_DATA(attrs); + + /* only print when present, easier */ + if (flags & MPTCP_PM_EV_FLAG_DENY_JOIN_ID0) + fprintf(stderr, ",deny_join_id0:1"); + } attrs = RTA_NEXT(attrs, msg_len); } diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh index 970c329735ff..3d45991f24ed 100755 --- a/tools/testing/selftests/net/mptcp/userspace_pm.sh +++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh @@ -201,6 +201,9 @@ make_connection() is_v6="v4" fi + # set this on the client side only: will not affect the rest + ip netns exec "$ns2" sysctl -q net.mptcp.allow_join_initial_addr_port=0 + :>"$client_evts" :>"$server_evts" @@ -223,23 +226,28 @@ make_connection() local client_token local client_port local client_serverside + local client_nojoin local server_token local server_serverside + local server_nojoin client_token=$(mptcp_lib_evts_get_info token "$client_evts") client_port=$(mptcp_lib_evts_get_info sport "$client_evts") client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts") + client_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$client_evts") server_token=$(mptcp_lib_evts_get_info token "$server_evts") server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts") + server_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$server_evts") print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1" - if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] && - [ "$server_serverside" = 1 ] + if [ "${client_token}" != "" ] && [ "${server_token}" != "" ] && + [ "${client_serverside}" = 0 ] && [ "${server_serverside}" = 1 ] && + [ "${client_nojoin:-0}" = 0 ] && [ "${server_nojoin:-0}" = 1 ] then test_pass print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}" else - test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})" + test_fail "Expected tokens (c:${client_token} - s:${server_token}), server (c:${client_serverside} - s:${server_serverside}), nojoin (c:${client_nojoin} - s:${server_nojoin})" mptcp_lib_result_print_all_tap exit ${KSFT_FAIL} fi diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh index 3c8d3455d8e7..b327d3061ed5 100755 --- a/tools/testing/selftests/net/openvswitch/openvswitch.sh +++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh @@ -25,6 +25,7 @@ tests=" nat_related_v4 ip4-nat-related: ICMP related matches work with SNAT netlink_checks ovsnl: validate netlink attrs and settings upcall_interfaces ovs: test the upcall interfaces + tunnel_metadata ovs: test extraction of tunnel metadata drop_reason drop: test drop reasons are emitted psample psample: Sampling packets with psample" @@ -113,13 +114,13 @@ ovs_add_dp () { } ovs_add_if () { - info "Adding IF to DP: br:$2 if:$3" - if [ "$4" != "-u" ]; then - ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-if "$2" "$3" \ - || return 1 + info "Adding IF to DP: br:$3 if:$4 ($2)" + if [ "$5" != "-u" ]; then + ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-if \ + -t "$2" "$3" "$4" || return 1 else python3 $ovs_base/ovs-dpctl.py add-if \ - -u "$2" "$3" >$ovs_dir/$3.out 2>$ovs_dir/$3.err & + -u -t "$2" "$3" "$4" >$ovs_dir/$4.out 2>$ovs_dir/$4.err & pid=$! on_exit "ovs_sbx $1 kill -TERM $pid 2>/dev/null" fi @@ -166,9 +167,9 @@ ovs_add_netns_and_veths () { fi if [ "$7" != "-u" ]; then - ovs_add_if "$1" "$2" "$4" || return 1 + ovs_add_if "$1" "netdev" "$2" "$4" || return 1 else - ovs_add_if "$1" "$2" "$4" -u || return 1 + ovs_add_if "$1" "netdev" "$2" "$4" -u || return 1 fi if [ $TRACING -eq 1 ]; then @@ -756,6 +757,79 @@ test_upcall_interfaces() { return 0 } +ovs_add_kernel_tunnel() { + local sbxname=$1; shift + local ns=$1; shift + local tnl_type=$1; shift + local name=$1; shift + local addr=$1; shift + + info "setting up kernel ${tnl_type} tunnel ${name}" + ovs_sbx "${sbxname}" ip -netns ${ns} link add dev ${name} type ${tnl_type} $* || return 1 + on_exit "ovs_sbx ${sbxname} ip -netns ${ns} link del ${name} >/dev/null 2>&1" + ovs_sbx "${sbxname}" ip -netns ${ns} addr add dev ${name} ${addr} || return 1 + ovs_sbx "${sbxname}" ip -netns ${ns} link set dev ${name} mtu 1450 up || return 1 +} + +test_tunnel_metadata() { + which arping >/dev/null 2>&1 || return $ksft_skip + + sbxname="test_tunnel_metadata" + sbx_add "${sbxname}" || return 1 + + info "setting up new DP" + ovs_add_dp "${sbxname}" tdp0 -V 2:1 || return 1 + + ovs_add_netns_and_veths "${sbxname}" tdp0 tns left0 l0 \ + 172.31.110.1/24 || return 1 + + info "removing veth interface from openvswitch and setting IP" + ovs_del_if "${sbxname}" tdp0 left0 || return 1 + ovs_sbx "${sbxname}" ip addr add 172.31.110.2/24 dev left0 || return 1 + ovs_sbx "${sbxname}" ip link set left0 up || return 1 + + info "setting up tunnel port in openvswitch" + ovs_add_if "${sbxname}" "vxlan" tdp0 ovs-vxlan0 -u || return 1 + on_exit "ovs_sbx ${sbxname} ip link del ovs-vxlan0" + ovs_wait ip link show ovs-vxlan0 &>/dev/null || return 1 + ovs_sbx "${sbxname}" ip link set ovs-vxlan0 up || return 1 + + configs=$(echo ' + 1 172.31.221.1/24 1155332 32 set udpcsum flags\(df\|csum\) + 2 172.31.222.1/24 1234567 45 set noudpcsum flags\(df\) + 3 172.31.223.1/24 1020304 23 unset udpcsum flags\(csum\) + 4 172.31.224.1/24 1357986 15 unset noudpcsum' | sed '/^$/d') + + while read -r i addr id ttl df csum flags; do + ovs_add_kernel_tunnel "${sbxname}" tns vxlan vxlan${i} ${addr} \ + remote 172.31.110.2 id ${id} dstport 4789 \ + ttl ${ttl} df ${df} ${csum} || return 1 + done <<< "${configs}" + + ovs_wait grep -q 'listening on upcall packet handler' \ + ${ovs_dir}/ovs-vxlan0.out || return 1 + + info "sending arping" + for i in 1 2 3 4; do + ovs_sbx "${sbxname}" ip netns exec tns \ + arping -I vxlan${i} 172.31.22${i}.2 -c 1 \ + >${ovs_dir}/arping.stdout 2>${ovs_dir}/arping.stderr + done + + info "checking that received decapsulated packets carry correct metadata" + while read -r i addr id ttl df csum flags; do + arp_hdr="arp\\(sip=172.31.22${i}.1,tip=172.31.22${i}.2,op=1,sha=" + addrs="src=172.31.110.1,dst=172.31.110.2" + ports="tp_src=[0-9]*,tp_dst=4789" + tnl_md="tunnel\\(tun_id=${id},${addrs},ttl=${ttl},${ports},${flags}\\)" + + ovs_sbx "${sbxname}" grep -qE "MISS upcall.*${tnl_md}.*${arp_hdr}" \ + ${ovs_dir}/ovs-vxlan0.out || return 1 + done <<< "${configs}" + + return 0 +} + run_test() { ( tname="$1" diff --git a/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-after-disconnect.pkt b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-after-disconnect.pkt new file mode 100644 index 000000000000..26794e7ddfd5 --- /dev/null +++ b/tools/testing/selftests/net/packetdrill/tcp_fastopen_server_reset-after-disconnect.pkt @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +`./defaults.sh + ./set_sysctls.py /proc/sys/net/ipv4/tcp_fastopen=0x602 /proc/sys/net/ipv4/tcp_timestamps=0` + + 0 socket(..., SOCK_STREAM|SOCK_NONBLOCK, IPPROTO_TCP) = 3 + +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 + +0 bind(3, ..., ...) = 0 + +0 listen(3, 1) = 0 + + +0 < S 0:10(10) win 32792 <mss 1460,nop,nop,sackOK> + +0 > S. 0:0(0) ack 11 win 65535 <mss 1460,nop,nop,sackOK> + +// sk->sk_state is TCP_SYN_RECV + +.1 accept(3, ..., ...) = 4 + +// tcp_disconnect() sets sk->sk_state to TCP_CLOSE + +0 connect(4, AF_UNSPEC, ...) = 0 + +0 > R. 1:1(0) ack 11 win 65535 + +// connect() sets sk->sk_state to TCP_SYN_SENT + +0 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0 + +0 connect(4, ..., ...) = -1 EINPROGRESS (Operation is now in progress) + +0 > S 0:0(0) win 65535 <mss 1460,nop,nop,sackOK,nop,wscale 8> + +// tp->fastopen_rsk must be NULL + +1 > S 0:0(0) win 65535 <mss 1460,nop,nop,sackOK,nop,wscale 8> diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 0f5640d8dc7f..dd093f9df6f1 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -2770,6 +2770,22 @@ TEST_F(tls_err, poll_partial_rec_async) } } +/* Use OOB+large send to trigger copy mode due to memory pressure. + * OOB causes a short read. + */ +TEST_F(tls_err, oob_pressure) +{ + char buf[1<<16]; + int i; + + memrnd(buf, sizeof(buf)); + + EXPECT_EQ(send(self->fd2, buf, 5, MSG_OOB), 5); + EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf)); + for (i = 0; i < 64; i++) + EXPECT_EQ(send(self->fd2, buf, 5, MSG_OOB), 5); +} + TEST(non_established) { struct tls12_crypto_info_aes_gcm_256 tls12; struct sockaddr_in addr; |