summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2022-10-13 08:06:38 +1000
committerBen Skeggs <bskeggs@redhat.com>2022-10-13 08:06:38 +1000
commit9fcbca599ef693b7580fc9ce6b930fac7a077b6b (patch)
treec0d75abcfc0f85e6fcba03aa94c54584679e5e30
parent2428d9aef24a6a497b8740afadbb028c17b5e697 (diff)
mmu
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h131
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0000.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0080.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl2080.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/ctrl/ctrl90f1.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/nvos.h184
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/generated/g_rpc-structures.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/generated/g_sdk-structures.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/kernel/inc/vgpu/rpc_headers.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/r515.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/r515.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r515.c213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c7
28 files changed, 1239 insertions, 9 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index d4a495cc1d4c..4287e00e3dd3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -5,6 +5,28 @@
#include <core/falcon.h>
#include <core/firmware.h>
+struct nvkm_gsp_device {
+ struct nvkm_gsp_client *client;
+
+ u32 object;
+ u32 subdevice;
+};
+
+void nvkm_gsp_device_dtor(struct nvkm_gsp_device *);
+
+struct nvkm_gsp_client {
+ const struct nvkm_gsp_client_func {
+ int (*ctor)(struct nvkm_gsp_client *);
+ int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
+ } *func;
+ struct nvkm_gsp *gsp;
+
+ u32 object;
+};
+
+int nvkm_gsp_client_ctor(struct nvkm_gsp *, struct nvkm_gsp_client *);
+void nvkm_gsp_client_dtor(struct nvkm_gsp_client *);
+
struct nvkm_gsp_mem {
u32 size;
void *data;
@@ -106,6 +128,28 @@ struct nvkm_gsp {
u32 client;
u32 device;
u32 subdevice;
+
+ struct {
+ u64 rm_bar1_pdb;
+ u64 rm_bar2_pdb;
+ } bar;
+
+ const struct nvkm_gsp_rpc {
+ int (*update_bar_pde)(struct nvkm_gsp *, int bar, u64 addr);
+
+ void *(*rm_alloc_get)(struct nvkm_gsp *, u32 client, u32 parent, u32 object,
+ u32 oclass, u32 argc);
+ void *(*rm_alloc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
+ void (*rm_alloc_done)(struct nvkm_gsp *, void *repv);
+
+ int (*rm_free)(struct nvkm_gsp *, u32 client, u32 parent, u32 object);
+
+ void *(*rm_ctrl_get)(struct nvkm_gsp *, u32 client, u32 object, u32 cmd, u32 argc);
+ void *(*rm_ctrl_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
+ void (*rm_ctrl_done)(struct nvkm_gsp *, void *repv);
+ } *rpc;
+
+ atomic_t client_id; /*XXX: allocator */
};
static inline bool
@@ -114,6 +158,93 @@ nvkm_gsp_rm(struct nvkm_gsp *gsp)
return gsp && gsp->fw.img != NULL;
}
+static inline void *
+nvkm_gsp_rm_alloc_get(struct nvkm_gsp *gsp, u32 client, u32 parent, u32 object,
+ u32 oclass, u32 argc)
+{
+ return gsp->rpc->rm_alloc_get(gsp, client, parent, object, oclass, argc);
+}
+
+static inline void *
+nvkm_gsp_rm_alloc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+ return gsp->rpc->rm_alloc_push(gsp, argv, wait, repc);
+}
+
+static inline int
+nvkm_gsp_rm_alloc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+{
+ void *repv = gsp->rpc->rm_alloc_push(gsp, argv, wait, 0);
+
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static inline int
+nvkm_gsp_rm_free(struct nvkm_gsp *gsp, u32 client, u32 parent, u32 object)
+{
+ return gsp->rpc->rm_free(gsp, client, parent, object);
+}
+
+static inline void
+nvkm_gsp_rm_alloc_done(struct nvkm_gsp *gsp, void *repv)
+{
+ gsp->rpc->rm_alloc_done(gsp, repv);
+}
+
+static inline int
+nvkm_gsp_rm_alloc(struct nvkm_gsp *gsp, u32 client, u32 parent, u32 object, u32 oclass, u32 argc)
+{
+ void *argv = gsp->rpc->rm_alloc_get(gsp, client, parent, object, oclass, argc);
+
+ if (IS_ERR_OR_NULL(argv))
+ return argv ? PTR_ERR(argv) : -EIO;
+
+ return nvkm_gsp_rm_alloc_wr(gsp, argv, true);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_get(struct nvkm_gsp *gsp, u32 client, u32 object, u32 cmd, u32 argc)
+{
+ return gsp->rpc->rm_ctrl_get(gsp, client, object, cmd, argc);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+ return gsp->rpc->rm_ctrl_push(gsp, argv, wait, repc);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_rd(struct nvkm_gsp *gsp, u32 client, u32 object, u32 cmd, u32 repc)
+{
+ void *argv = nvkm_gsp_rm_ctrl_get(gsp, client, object, cmd, repc);
+
+ if (IS_ERR(argv))
+ return argv;
+
+ return nvkm_gsp_rm_ctrl_push(gsp, argv, true, repc);
+}
+
+static inline int
+nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+{
+ void *repv = gsp->rpc->rm_ctrl_push(gsp, argv, wait, 0);
+
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static inline void
+nvkm_gsp_rm_ctrl_done(struct nvkm_gsp *gsp, void *repv)
+{
+ gsp->rpc->rm_ctrl_done(gsp, repv);
+}
+
int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 70e7887ef4b4..a5c70c749648 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__
#include <core/subdev.h>
+#include <subdev/gsp.h>
struct nvkm_vma {
struct list_head head;
@@ -46,6 +47,16 @@ struct nvkm_vmm {
void *nullp;
bool replay;
+
+ struct {
+ u64 bar2_pdb;
+
+ struct nvkm_gsp_client client;
+ struct nvkm_gsp_device device;
+ u32 object;
+
+ struct nvkm_vma *rsvd;
+ } rm;
};
int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0000.h b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0000.h
new file mode 100644
index 000000000000..927ae39a6db7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0000.h
@@ -0,0 +1,39 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl0000_h_
+#define _cl0000_h_
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+#define NV01_ROOT (0x00000000)
+
+/* NvAlloc parameteters */
+typedef struct {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+} NV0000_ALLOC_PARAMETERS;
+
+#endif /* _cl0000_h_ */
+
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0080.h b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0080.h
new file mode 100644
index 000000000000..ec081dfea66f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0080.h
@@ -0,0 +1,51 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl0080_h_
+#define _cl0080_h_
+
+#define NV01_DEVICE_0 (0x00000080)
+
+/**
+ * @brief Alloc param
+ *
+ * @param vaMode mode for virtual address space allocation
+ * Three modes:
+ * NV_DEVICE_ALLOCATION_VAMODE_OPTIONAL_MULTIPLE_VASPACES
+ * NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE
+ * NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES
+ * Detailed description of these modes is in nvos.h
+ **/
+typedef struct {
+ NvU32 deviceId;
+ NvHandle hClientShare;
+ NvHandle hTargetClient;
+ NvHandle hTargetDevice;
+ NvV32 flags;
+ NvU64 vaSpaceSize NV_ALIGN_BYTES(8);
+ NvU64 vaStartInternal NV_ALIGN_BYTES(8);
+ NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
+ NvV32 vaMode;
+} NV0080_ALLOC_PARAMETERS;
+
+#endif /* _cl0080_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl2080.h b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl2080.h
new file mode 100644
index 000000000000..94ca1b15cf6c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/class/cl2080.h
@@ -0,0 +1,33 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2002-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _cl2080_h_
+#define _cl2080_h_
+
+#define NV20_SUBDEVICE_0 (0x00002080)
+
+typedef struct {
+ NvU32 subDeviceId;
+} NV2080_ALLOC_PARAMETERS;
+
+#endif /* _cl2080_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
new file mode 100644
index 000000000000..b54019bd99f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
@@ -0,0 +1,100 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#define GMMU_FMT_MAX_LEVELS 6
+
+/*!
+ * Pin PDEs for a given VA range on the server RM and then mirror the client's page
+ * directory/tables in the server.
+ *
+ * @ref
+ */
+#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
+
+#define NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID (0x6U)
+
+typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
+ /*!
+ * [in] GPU sub-device handle - this API only supports unicast.
+ * Pass 0 to use subDeviceId instead.
+ */
+ NvHandle hSubDevice;
+
+ /*!
+ * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
+ */
+ NvU32 subDeviceId;
+
+ /*!
+ * [in] Page size (VA coverage) of the level to reserve.
+ * This need not be a leaf (page table) page size - it can be
+ * the coverage of an arbitrary level (including root page directory).
+ */
+ NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
+
+ /*!
+ * [in] First GPU virtual address of the range to reserve.
+ * This must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
+
+ /*!
+ * [in] Last GPU virtual address of the range to reserve.
+ * This (+1) must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
+
+ /*!
+ * [in] Number of PDE levels to copy.
+ */
+ NvU32 numLevelsToCopy;
+
+ /*!
+ * [in] Per-level information.
+ */
+ struct {
+ /*!
+ * Physical address of this page level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+
+ /*!
+ * Size in bytes allocated for this level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+
+ /*!
+ * Aperture in which this page level instance resides.
+ */
+ NvU32 aperture;
+
+ /*!
+ * Page shift corresponding to the level
+ */
+ NvU8 pageShift;
+ } levels[GMMU_FMT_MAX_LEVELS];
+} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
+
+/* _ctrl90f1_h_ */
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/nvos.h b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/nvos.h
new file mode 100644
index 000000000000..ffb8850c8027
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/common/sdk/nvidia/inc/nvos.h
@@ -0,0 +1,184 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+ /***************************************************************************\
+|* *|
+|* NV Architecture Interface *|
+|* *|
+|* <nvos.h> defines the Operating System function and ioctl interfaces to *|
+|* NVIDIA's Unified Media Architecture (TM). *|
+|* *|
+ \***************************************************************************/
+
+#ifndef NVOS_INCLUDED
+#define NVOS_INCLUDED
+
+/**
+ * @brief NvAlloc parameters for VASPACE classes
+ *
+ * Used to create a new private virtual address space.
+ *
+ * index
+ * Tegra: With TEGRA_VASPACE_A, index specifies the IOMMU
+ * virtual address space to be created. Based on the
+ * index, RM/NVMEM will decide the HW ASID to be used with
+ * this VA Space. "index" takes values from the
+ * NVMEM_CLIENT_* defines in
+ * "drivers/common/inc/tegra/memory/ioctl.h".
+ *
+ * Big GPU: With FERMI_VASPACE_A, see NV_VASPACE_ALLOCATION_INDEX_GPU_*.
+ *
+ * flags
+ * MINIMIZE_PTETABLE_SIZE Pass hint to DMA HAL to use partial page tables.
+ * Depending on allocation pattern this may actually
+ * use more instance memory.
+ *
+ * RETRY_PTE_ALLOC_IN_SYS Fallback to PTEs allocation in sysmem. This is now
+ * enabled by default.
+ *
+ * SHARED_MANAGEMENT
+ * Indicates management of the VA space is shared with another
+ * component (e.g. driver layer, OS, etc.).
+ *
+ * The initial VA range from vaBase (inclusive) through vaSize (exclusive)
+ * is managed by RM. The range must be aligned to a top-level PDE's VA
+ * coverage since backing page table levels for this range are managed by RM.
+ * All normal RM virtual memory management APIs work within this range.
+ *
+ * An external component can manage the remaining VA ranges,
+ * from 0 (inclusive) to vaBase (exclusive) and from vaSize (inclusive) up to the
+ * maximum VA limit supported by HW.
+ * Management of these ranges includes VA sub-allocation and the
+ * backing lower page table levels.
+ *
+ * The top-level page directory is special since it is a shared resource.
+ * Management of the page directory is as follows:
+ * 1. Initially RM allocates a page directory for RM-managed PDEs.
+ * 2. The external component may create a full page directory and commit it
+ * with NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY.
+ * This will copy the RM-managed PDEs from the RM-managed page directory
+ * into the external page directory and commit channels to the external page directory.
+ * After this point RM will update the external page directory directly for
+ * operations that modify RM-managed PDEs.
+ * 3. The external component may use NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY repeatedly
+ * if it needs to update the page directory again (e.g. to resize or migrate).
+ * This will copy the RM-managed PDEs from the old external page directory
+ * into the new external page directory and commit channels to the new page directory.
+ * 4. The external component may restore management of the page directory back to
+ * RM with NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY.
+ * This will copy the RM-managed PDEs from the external page directory
+ * into the RM-managed page directory and commit channels to the RM-managed page directory.
+ * After this point RM will update the RM-managed page directory for
+ * operations that modify RM-managed PDEs.
+ * Note that operations (2) and (4) are symmetric - the RM perspective of management is identical
+ * before and after a sequence of SET => ... => UNSET.
+ *
+ * IS_MIRRORED <to be deprecated once CUDA uses EXTERNALLY_MANAGED>
+ * This flag will tell RM to create a mirrored
+ * kernel PDB for the address space associated
+ * with this device. When this flag is set
+ * the address space covered by the top PDE
+ * is restricted and cannot be allocated out of.
+ * ENABLE_PAGE_FAULTING
+ * Enable page faulting if the architecture supports it.
+ * As of now page faulting is only supported for compute on pascal+.
+ * IS_EXTERNALLY_OWNED
+ * This vaspace that has been allocated will be managed by
+ * an external driver. RM will not own the pagetables for this vaspace.
+ *
+ * ENABLE_NVLINK_ATS
+ * Enables VA translation for this address space using NVLINK ATS.
+ * Note, the GMMU page tables still exist and take priority over NVLINK ATS.
+ * VA space object creation will fail if:
+ * - hardware support is not available (NV_ERR_NOT_SUPPORTED)
+ * - incompatible options IS_MIRRORED or IS_EXTERNALLY_OWNED are set (NV_ERR_INVALID_ARGUMENT)
+ * IS_FLA
+ * Sets FLA flag for this VASPACE
+ *
+ * ALLOW_ZERO_ADDRESS
+ * Allows VASPACE Range to start from zero
+ * SKIP_SCRUB_MEMPOOL
+ * Skip scrubbing in MemPool
+ *
+ * vaBase [in, out]
+ * On input, the lowest usable base address of the VA space.
+ * If 0, RM will pick a default value - 0 is always reserved to respresent NULL pointers.
+ * The value must be aligned to the largest page size of the VA space.
+ * Larger values aid in debug since offsets added to NULL pointers will still fault.
+ *
+ * On output, the actual usable base address is returned.
+ *
+ * vaSize [in,out]
+ * On input, requested size of the virtual address space in bytes.
+ * Requesting a smaller size reduces the memory required for the initial
+ * page directory, but the VAS may be resized later (NV0080_CTRL_DMA_SET_VA_SPACE_SIZE).
+ * If 0, the default VA space size will be used.
+ *
+ * On output, the actual size of the VAS in bytes.
+ * NOTE: This corresponds to the VA_LIMIT + 1, so the usable size is (vaSize - vaBase).
+ *
+ * bigPageSIze
+ * Set the size of the big page in this address space object. Current HW supports
+ * either 64k or 128k as the size of the big page. HW that support multiple big
+ * page size per address space will use this size. Hw that do not support this feature
+ * will override to the default big page size that is supported by the system.
+ * If the big page size value is set to ZERO then we will pick the default page size
+ * of the system.
+ **/
+typedef struct
+{
+ NvU32 index;
+ NvV32 flags;
+ NvU64 vaSize NV_ALIGN_BYTES(8);
+ NvU64 vaStartInternal NV_ALIGN_BYTES(8);
+ NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
+ NvU32 bigPageSize;
+ NvU64 vaBase NV_ALIGN_BYTES(8);
+} NV_VASPACE_ALLOCATION_PARAMETERS;
+
+#define NV_VASPACE_ALLOCATION_FLAGS_NONE (0x00000000)
+#define NV_VASPACE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE BIT(0)
+#define NV_VASPACE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS BIT(1)
+#define NV_VASPACE_ALLOCATION_FLAGS_SHARED_MANAGEMENT BIT(2)
+#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3)
+#define NV_VASPACE_ALLOCATION_FLAGS_ENABLE_NVLINK_ATS BIT(4)
+#define NV_VASPACE_ALLOCATION_FLAGS_IS_MIRRORED BIT(5)
+#define NV_VASPACE_ALLOCATION_FLAGS_ENABLE_PAGE_FAULTING BIT(6)
+#define NV_VASPACE_ALLOCATION_FLAGS_VA_INTERNAL_LIMIT BIT(7)
+#define NV_VASPACE_ALLOCATION_FLAGS_ALLOW_ZERO_ADDRESS BIT(8)
+#define NV_VASPACE_ALLOCATION_FLAGS_IS_FLA BIT(9)
+#define NV_VASPACE_ALLOCATION_FLAGS_SKIP_SCRUB_MEMPOOL BIT(10)
+#define NV_VASPACE_ALLOCATION_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE BIT(11)
+
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_HOST 0x01 //<! Acquire reference to BAR1 VAS.
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_GLOBAL 0x02 //<! Acquire reference to global VAS.
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_DEVICE 0x03 //<! Acquire reference to device vaspace
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_FLA 0x04 //<! Acquire reference to FLA VAS.
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_MAX 0x05 //<! Increment this on adding index entries
+
+
+#define NV_VASPACE_BIG_PAGE_SIZE_64K (64 * 1024)
+#define NV_VASPACE_BIG_PAGE_SIZE_128K (128 * 1024)
+
+#endif /* NVOS_INCLUDED */
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/generated/g_rpc-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/generated/g_rpc-structures.h
index 4ce99d2749df..e5b98a644a10 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/generated/g_rpc-structures.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/generated/g_rpc-structures.h
@@ -27,6 +27,12 @@
* definition file: kernel/inc/vgpu/rpc-structures.def
*/
+#include <nvrm/515.48.07/nvidia/generated/g_sdk-structures.h>
+
+typedef struct rpc_free_v03_00
+{
+ NVOS00_PARAMETERS_v03_00 params;
+} rpc_free_v03_00;
typedef struct rpc_unloading_guest_driver_v1F_07
{
@@ -35,6 +41,34 @@ typedef struct rpc_unloading_guest_driver_v1F_07
NvU32 newLevel;
} rpc_unloading_guest_driver_v1F_07;
+typedef struct rpc_update_bar_pde_v15_00
+{
+ UpdateBarPde_v15_00 info;
+} rpc_update_bar_pde_v15_00;
+
+typedef struct rpc_gsp_rm_alloc_v03_00
+{
+ NvHandle hClient;
+ NvHandle hParent;
+ NvHandle hObject;
+ NvU32 hClass;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU8 params[];
+} rpc_gsp_rm_alloc_v03_00;
+
+typedef struct rpc_gsp_rm_control_v03_00
+{
+ NvHandle hClient;
+ NvHandle hObject;
+ NvU32 cmd;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvBool serialized;
+ NvU8 reserved[3];
+ NvU8 params[];
+} rpc_gsp_rm_control_v03_00;
+
typedef struct rpc_run_cpu_sequencer_v17_00
{
NvU32 bufferSizeDWord;
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/generated/g_sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/generated/g_sdk-structures.h
new file mode 100644
index 000000000000..0439f4e0550c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/generated/g_sdk-structures.h
@@ -0,0 +1,45 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * WARNING: This is an autogenerated file. DO NOT EDIT.
+ * This file is generated using below files:
+ * template file: kernel/inc/vgpu/gt_sdk-structures.h
+ * definition file: kernel/inc/vgpu/sdk-structures.def
+ */
+
+#include <nvrm/515.48.07/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+typedef struct NVOS00_PARAMETERS_v03_00
+{
+ NvHandle hRoot;
+ NvHandle hObjectParent;
+ NvHandle hObjectOld;
+ NvV32 status;
+} NVOS00_PARAMETERS_v03_00;
+
+typedef struct UpdateBarPde_v15_00
+{
+ NV_RPC_UPDATE_PDE_BAR_TYPE barType;
+ NvU64 entryValue NV_ALIGN_BYTES(8);
+ NvU64 entryLevelShift NV_ALIGN_BYTES(8);
+} UpdateBarPde_v15_00;
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/kernel/inc/vgpu/rpc_headers.h b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/kernel/inc/vgpu/rpc_headers.h
index aae570004153..93278463eff2 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/kernel/inc/vgpu/rpc_headers.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/515.48.07/nvidia/kernel/inc/vgpu/rpc_headers.h
@@ -26,6 +26,16 @@
#define MAX_GPC_COUNT 32
+/*
+ * Enums specifying the BAR number that we are going to update its PDE
+ */
+typedef enum
+{
+ NV_RPC_UPDATE_PDE_BAR_1,
+ NV_RPC_UPDATE_PDE_BAR_2,
+ NV_RPC_UPDATE_PDE_BAR_INVALID,
+} NV_RPC_UPDATE_PDE_BAR_TYPE;
+
typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
{
NvU32 headIndex;
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
index 9e44ffbeab97..d4801f1b48f6 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
@@ -2,8 +2,11 @@
#ifndef __NVRM_NVTYPES_H__
#define __NVRM_NVTYPES_H__
+#define NV_ALIGN_BYTES(a) __attribute__ ((__aligned__(a)))
#define NV_DECLARE_ALIGNED(f,a) f __attribute__ ((__aligned__(a)))
+typedef u32 NvV32;
+
typedef u8 NvU8;
typedef u16 NvU16;
typedef u32 NvU32;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 8faee3317a74..21c5fc474140 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -7,3 +7,4 @@ nvkm-y += nvkm/subdev/bar/gk20a.o
nvkm-y += nvkm/subdev/bar/gm107.o
nvkm-y += nvkm/subdev/bar/gm20b.o
nvkm-y += nvkm/subdev/bar/tu102.o
+nvkm-y += nvkm/subdev/bar/r515.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index daebfc991c76..dae12f70535d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -4,6 +4,9 @@
#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
#include <subdev/bar.h>
+int r515_bar_new_(const struct nvkm_bar_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+
void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_bar *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r515.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r515.c
new file mode 100644
index 000000000000..7653a0573471
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r515.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/mm.h>
+#include <subdev/gsp.h>
+#include <subdev/instmem.h>
+#include <subdev/mmu/vmm.h>
+
+static void
+r515_bar_bar2_wait(struct nvkm_bar *base)
+{
+}
+
+static void
+r515_bar_bar2_fini(struct nvkm_bar *bar)
+{
+ struct nvkm_gsp *gsp = bar->subdev.device->gsp;
+
+ WARN_ON(gsp->rpc->update_bar_pde(gsp, 2, 0));
+}
+
+static void
+r515_bar_bar2_init(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
+ struct nvkm_gsp *gsp = device->gsp;
+
+ WARN_ON(gsp->rpc->update_bar_pde(gsp, 2, vmm->pd->pde[0]->pt[0]->addr));
+ vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
+}
+
+static void
+r515_bar_bar1_wait(struct nvkm_bar *base)
+{
+}
+
+static void
+r515_bar_bar1_fini(struct nvkm_bar *base)
+{
+}
+
+#define wrap(p) container_of((p), struct wrap, memory)
+
+struct wrap {
+ struct nvkm_memory memory;
+ struct nvkm_device *device;
+ u64 addr;
+ u64 size;
+};
+
+static int
+wrap_kmap(struct nvkm_memory *memory, struct nvkm_memory **pmemory)
+{
+ return nvkm_instobj_wrap(wrap(memory)->device, memory, pmemory);
+}
+
+static int
+wrap_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ struct wrap *wrap = wrap(memory);
+ struct nvkm_mm_node mn = { .offset = wrap->addr >> 12, .length = wrap->size >> 12 };
+ struct nvkm_vmm_map map = {
+ .memory = &wrap->memory,
+ .offset = offset,
+ .mem = &mn,
+ };
+
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static u64
+wrap_size(struct nvkm_memory *memory)
+{
+ return wrap(memory)->size;
+}
+
+static u64
+wrap_addr(struct nvkm_memory *memory)
+{
+ return wrap(memory)->addr;
+}
+
+static u8
+wrap_page(struct nvkm_memory *memory)
+{
+ return 12;
+}
+
+static enum nvkm_memory_target
+wrap_target(struct nvkm_memory *memory)
+{
+ return NVKM_MEM_TARGET_VRAM;
+}
+
+static void *
+wrap_dtor(struct nvkm_memory *memory)
+{
+ return wrap(memory);
+}
+
+static const struct nvkm_memory_func
+wrap_func = {
+ .dtor = wrap_dtor,
+ .target = wrap_target,
+ .page = wrap_page,
+ .addr = wrap_addr,
+ .size = wrap_size,
+ .map = wrap_map,
+ .kmap = wrap_kmap,
+};
+
+static int
+wrap_vram(struct nvkm_device *device, u64 addr, u64 size, struct nvkm_memory **pmemory)
+{
+ struct wrap *wrap;
+
+ if (!(wrap = kzalloc(sizeof(*wrap), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_memory_ctor(&wrap_func, &wrap->memory);
+ wrap->device = device;
+ wrap->addr = addr;
+ wrap->size = size;
+
+ *pmemory = &wrap->memory;
+ return 0;
+}
+
+static void
+r515_bar_bar1_init(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm;
+ struct nvkm_memory *pd3;
+ int ret;
+
+ ret = wrap_vram(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3);
+ if (WARN_ON(ret))
+ return;
+
+ nvkm_memory_unref(&vmm->pd->pt[0]->memory);
+
+ ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory);
+ nvkm_memory_unref(&pd3);
+ if (WARN_ON(ret))
+ return;
+
+ vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory);
+}
+
+static void *
+r515_bar_dtor(struct nvkm_bar *bar)
+{
+ void *data = gf100_bar_dtor(bar);
+
+ kfree(bar->func);
+ return data;
+}
+
+int
+r515_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
+{
+ struct nvkm_bar_func *rm;
+ int ret;
+
+ if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ rm->dtor = r515_bar_dtor;
+ rm->oneinit = hw->oneinit;
+ rm->bar1.init = r515_bar_bar1_init;
+ rm->bar1.fini = r515_bar_bar1_fini;
+ rm->bar1.wait = r515_bar_bar1_wait;
+ rm->bar1.vmm = hw->bar1.vmm;
+ rm->bar2.init = r515_bar_bar2_init;
+ rm->bar2.fini = r515_bar_bar2_fini;
+ rm->bar2.wait = r515_bar_bar2_wait;
+ rm->bar2.vmm = hw->bar2.vmm;
+ rm->flush = NULL;
+
+ ret = gf100_bar_new_(rm, device, type, inst, pbar);
+ if (ret) {
+ kfree(rm);
+ return ret;
+ }
+
+ gf100_bar(*pbar)->bar2_halve = true;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
index 8041bb632835..97ed49f45a3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
@@ -97,7 +97,7 @@ tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bar **pbar)
{
if (nvkm_gsp_rm(device->gsp))
- return -ENODEV;
+ return r515_bar_new_(&tu102_bar, device, type, inst, pbar);
return gf100_bar_new_(&tu102_bar, device, type, inst, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r515.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r515.c
index 084148074a65..4ac02f49622c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r515.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r515.c
@@ -20,6 +20,22 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
+#include "ram.h"
+
+#include <subdev/gsp.h>
+
+static const struct nvkm_ram_func
+r515_fb_ram = {
+};
+
+static int
+r515_fb_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_gsp *gsp = fb->subdev.device->gsp;
+ u32 tail = (gsp->fb.size - gsp->fb.heap.addr);
+
+ return nvkm_ram_new_(&r515_fb_ram, fb, 0, gsp->fb.size - tail, pram);
+}
static void *
r515_fb_dtor(struct nvkm_fb *fb)
@@ -42,6 +58,7 @@ r515_fb_new(const struct nvkm_fb_func *hw,
rm->sysmem.flush_page_init = hw->sysmem.flush_page_init;
rm->vidmem.type = hw->vidmem.type;
rm->vidmem.size = hw->vidmem.size;
+ rm->ram_new = r515_fb_ram_new;
ret = nvkm_fb_new_(rm, device, type, inst, pfb);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index f3e388fd1cc9..8ebf9bc172fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -21,6 +21,43 @@
*/
#include "priv.h"
+void
+nvkm_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+ if (device->client) {
+ nvkm_gsp_rm_free(device->client->gsp, device->client->object, 0, device->object);
+ device->client = NULL;
+ }
+}
+
+void
+nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+ if (client->gsp) {
+ nvkm_gsp_rm_free(client->gsp, client->object, 0, client->object);
+ client->gsp = NULL;
+ }
+}
+
+int
+nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+ int ret;
+
+ if (!gsp->func->client)
+ return -ENODEV;
+
+ client->func = gsp->func->client;
+ client->gsp = gsp;
+ client->object = atomic_inc_return(&gsp->client_id);
+
+ ret = gsp->func->client->ctor(client);
+ if (ret)
+ client->gsp = NULL;
+
+ return ret;
+}
+
static int
nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
{
@@ -97,6 +134,7 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
return PTR_ERR(fwif);
gsp->func = fwif->func;
+ gsp->rpc = gsp->func->rpc;
return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index c3de3a31912a..712215d80734 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -41,6 +41,9 @@ ga100_gsp_r515_48_07 = {
.init = r515_gsp_init,
.fini = r515_gsp_fini,
.reset = tu102_gsp_reset,
+
+ .rpc = &r515_gsp_rpc,
+ .client = &r515_gsp_client,
};
static struct nvkm_gsp_fwif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index 7b8b17a85c98..ee2247a0c365 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -96,6 +96,9 @@ ga102_gsp_r515_48_07 = {
.init = r515_gsp_init,
.fini = r515_gsp_fini,
.reset = ga102_gsp_reset,
+
+ .rpc = &r515_gsp_rpc,
+ .client = &r515_gsp_client,
};
static const struct nvkm_gsp_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 06cda7c23aa7..8c40c403ebd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -3,6 +3,10 @@
#define __NVKM_GSP_PRIV_H__
#include <subdev/gsp.h>
enum nvkm_acr_lsf_id;
+struct nvkm_gsp_client;
+
+int nvkm_gsp_client_new_(const struct nvkm_gsp_client_func *, struct nvkm_gsp *,
+ struct nvkm_gsp_client **);
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
@@ -65,6 +69,9 @@ struct nvkm_gsp_func {
int (*init)(struct nvkm_gsp *);
int (*fini)(struct nvkm_gsp *);
int (*reset)(struct nvkm_gsp *);
+
+ const struct nvkm_gsp_rpc *rpc;
+ const struct nvkm_gsp_client_func *client;
};
extern const struct nvkm_gsp_func gv100_gsp;
@@ -80,6 +87,8 @@ void r515_gsp_dtor(struct nvkm_gsp *);
int r515_gsp_oneinit(struct nvkm_gsp *);
int r515_gsp_init(struct nvkm_gsp *);
int r515_gsp_fini(struct nvkm_gsp *);
+extern const struct nvkm_gsp_rpc r515_gsp_rpc;
+extern const struct nvkm_gsp_client_func r515_gsp_client;
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r515.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r515.c
index a5c47e63f1a8..2baa7bbdcf04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r515.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r515.c
@@ -27,6 +27,9 @@
#include <engine/sec2.h>
#include <nvrm/nvtypes.h>
+#include <nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0000.h>
+#include <nvrm/515.48.07/common/sdk/nvidia/inc/class/cl0080.h>
+#include <nvrm/515.48.07/common/sdk/nvidia/inc/class/cl2080.h>
#include <nvrm/515.48.07/common/shared/msgq/inc/msgq/msgq_priv.h>
#include <nvrm/515.48.07/common/uproc/os/libos-v2.0.0/include/gsp_fw_wpr_meta.h>
#include <nvrm/515.48.07/common/uproc/os/libos-v2.0.0/include/libos_init_args.h>
@@ -403,6 +406,199 @@ r515_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
}
static int
+r515_gsp_subdevice_ctor(struct nvkm_gsp_device *device, u32 *pobject)
+{
+ NV2080_ALLOC_PARAMETERS *args;
+
+ *pobject = 0x5d1d0000;
+
+ return nvkm_gsp_rm_alloc(device->client->gsp, device->client->object, device->object,
+ *pobject, NV20_SUBDEVICE_0, sizeof(*args));
+}
+
+static int
+r515_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+ NV0080_ALLOC_PARAMETERS *args;
+ int ret;
+
+ device->object = 0xde1d0000;
+
+ args = nvkm_gsp_rm_alloc_get(client->gsp, client->object, client->object, device->object,
+ NV01_DEVICE_0, sizeof(*args));
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClientShare = client->object;
+
+ ret = nvkm_gsp_rm_alloc_wr(client->gsp, args, true);
+ if (ret)
+ return ret;
+
+ device->client = client;
+
+ ret = r515_gsp_subdevice_ctor(device, &device->subdevice);
+ if (ret)
+ nvkm_gsp_device_dtor(device);
+
+ return ret;
+}
+
+static int
+r515_gsp_client_ctor(struct nvkm_gsp_client *client)
+{
+ NV0000_ALLOC_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(client->gsp, client->object, 0, 0, NV01_ROOT, sizeof(*args));
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClient = client->object;
+ args->processID = ~0;
+
+ return nvkm_gsp_rm_alloc_wr(client->gsp, args, true);
+}
+
+const struct nvkm_gsp_client_func
+r515_gsp_client = {
+ .ctor = r515_gsp_client_ctor,
+ .device_ctor = r515_gsp_device_ctor,
+};
+
+void
+r515_gsp_rpc_rm_ctrl_done(struct nvkm_gsp *gsp, void *repv)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+
+ r515_gsp_rpc_done(gsp, rpc);
+}
+
+void *
+r515_gsp_rpc_rm_ctrl_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+
+ rpc = r515_gsp_rpc_push(gsp, rpc, wait, repc);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->status) {
+ nvkm_error(&gsp->subdev, "RM_CTRL: 0x%x\n", rpc->status);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return rpc->params;
+}
+
+void *
+r515_gsp_rpc_rm_ctrl_get(struct nvkm_gsp *gsp, u32 client, u32 object, u32 cmd, u32 argc)
+{
+ rpc_gsp_rm_control_v03_00 *rpc;
+
+ rpc = r515_gsp_rpc_get(gsp, 76, sizeof(*rpc) + argc);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client;
+ rpc->hObject = object;
+ rpc->cmd = cmd;
+ rpc->status = 0;
+ rpc->paramsSize = argc;
+ rpc->serialized = 0;
+ return rpc->params;
+}
+
+int
+r515_gsp_rpc_rm_free(struct nvkm_gsp *gsp, u32 client, u32 parent, u32 object)
+{
+ rpc_free_v03_00 *rpc;
+
+ rpc = r515_gsp_rpc_get(gsp, 10, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->params.hRoot = client;
+ rpc->params.hObjectParent = parent;
+ rpc->params.hObjectOld = object;
+ return r515_gsp_rpc_wr(gsp, rpc, true);
+}
+
+void
+r515_gsp_rpc_rm_alloc_done(struct nvkm_gsp *gsp, void *repv)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+
+ r515_gsp_rpc_done(gsp, rpc);
+}
+
+void *
+r515_gsp_rpc_rm_alloc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+
+ rpc = r515_gsp_rpc_push(gsp, rpc, wait, sizeof(*rpc) + repc);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->status) {
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return rpc->params;
+}
+
+void *
+r515_gsp_rpc_rm_alloc_get(struct nvkm_gsp *gsp, u32 client, u32 parent, u32 object,
+ u32 oclass, u32 argc)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc;
+
+ rpc = r515_gsp_rpc_get(gsp, 103, sizeof(*rpc) + argc);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client;
+ rpc->hParent = parent;
+ rpc->hObject = object;
+ rpc->hClass = oclass;
+ rpc->status = 0;
+ rpc->paramsSize = argc;
+ return rpc->params;
+}
+
+static int
+r515_gsp_rpc_update_bar_pde(struct nvkm_gsp *gsp, int id, u64 addr)
+{
+ rpc_update_bar_pde_v15_00 *rpc;
+
+ if (WARN_ON(id != 2))
+ return -EINVAL;
+
+ rpc = r515_gsp_rpc_get(gsp, 70, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
+ rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
+ rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+
+ return r515_gsp_rpc_wr(gsp, rpc, true);
+}
+
+const struct nvkm_gsp_rpc
+r515_gsp_rpc = {
+ .update_bar_pde = r515_gsp_rpc_update_bar_pde,
+ .rm_alloc_get = r515_gsp_rpc_rm_alloc_get,
+ .rm_alloc_push = r515_gsp_rpc_rm_alloc_push,
+ .rm_alloc_done = r515_gsp_rpc_rm_alloc_done,
+ .rm_free = r515_gsp_rpc_rm_free,
+ .rm_ctrl_get = r515_gsp_rpc_rm_ctrl_get,
+ .rm_ctrl_push = r515_gsp_rpc_rm_ctrl_push,
+ .rm_ctrl_done = r515_gsp_rpc_rm_ctrl_done,
+};
+
+static int
r515_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp)
{
rpc_unloading_guest_driver_v1F_07 *rpc;
@@ -430,6 +626,8 @@ r515_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
gsp->client = rpc->hInternalClient;
gsp->device = rpc->hInternalDevice;
gsp->subdevice = rpc->hInternalSubdevice;
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
r515_gsp_rpc_done(gsp, rpc);
return 0;
@@ -497,6 +695,18 @@ r515_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
}
static int
+r515_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
+{
+ struct nvkm_gsp *gsp = priv;
+ struct nvkm_subdev *subdev = &gsp->subdev;
+
+ WARN_ON(repc != 0);
+
+ nvkm_error(subdev, "mmu fault queued\n");
+ return 0;
+}
+
+static int
r515_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
{
struct nvkm_gsp *gsp = priv;
@@ -1062,6 +1272,8 @@ r515_gsp_oneinit(struct nvkm_gsp *gsp)
gsp->fb.heap.size = 0x100000;
gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+ atomic_set(&gsp->client_id, 0xc1d00000);
return 0;
}
@@ -1151,6 +1363,7 @@ r515_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
}
r515_gsp_msg_ntfy_add(gsp, 0x00001002, r515_gsp_msg_run_cpu_sequencer, gsp);
+ r515_gsp_msg_ntfy_add(gsp, 0x00001005, r515_gsp_msg_mmu_fault_queued, gsp);
r515_gsp_msg_ntfy_add(gsp, 0x00001006, r515_gsp_msg_os_error_log, gsp);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 63780dce1ecc..f1403edbc336 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -158,6 +158,9 @@ tu102_gsp_r515_48_07 = {
.init = r515_gsp_init,
.fini = r515_gsp_fini,
.reset = tu102_gsp_reset,
+
+ .rpc = &r515_gsp_rpc,
+ .client = &r515_gsp_client,
};
static struct nvkm_gsp_fwif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
index 27f668f4cdb7..4838523fc89a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -41,6 +41,9 @@ tu116_gsp_r515_48_07 = {
.init = r515_gsp_init,
.fini = r515_gsp_fini,
.reset = tu102_gsp_reset,
+
+ .rpc = &r515_gsp_rpc,
+ .client = &r515_gsp_client,
};
static struct nvkm_gsp_fwif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 4bbdad9d1db1..ba826b0406fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -409,9 +409,6 @@ nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
{
struct nv50_instmem *imem;
- if (nvkm_gsp_rm(device->gsp))
- return -ENODEV;
-
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index 87979952df4b..d1fafba6ef3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -55,8 +55,5 @@ int
tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
- if (nvkm_gsp_rm(device->gsp))
- return -ENODEV;
-
return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 524cd3c0e3fe..2b98272799d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -29,6 +29,10 @@
#include <nvif/if000c.h>
#include <nvif/unpack.h>
+#include <nvrm/nvtypes.h>
+#include <nvrm/515.48.07/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/515.48.07/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
+
static const struct nvkm_object_func nvkm_uvmm;
struct nvkm_vmm *
nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
@@ -343,6 +347,7 @@ static void *
nvkm_uvmm_dtor(struct nvkm_object *object)
{
struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
+
nvkm_vmm_unref(&uvmm->vmm);
return uvmm;
}
@@ -353,11 +358,50 @@ nvkm_uvmm = {
.mthd = nvkm_uvmm_mthd,
};
+static int
+NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES(struct nvkm_gsp *gsp, u32 client, u32 object, struct nvkm_vmm *vmm)
+{
+ NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+ int ret;
+
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000, &vmm->rm.rsvd);
+ mutex_unlock(&vmm->mutex);
+ if (WARN_ON(ret))
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(gsp, client, object,
+ NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
+ sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->pageSize = 0x20000000;
+ ctrl->virtAddrLo = vmm->rm.rsvd->addr;
+ ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
+ ctrl->numLevelsToCopy = 3;
+ ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
+ ctrl->levels[0].size = 0x20;
+ ctrl->levels[0].aperture = 1;
+ ctrl->levels[0].pageShift = 0x2f;
+ ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
+ ctrl->levels[1].size = 0x1000;
+ ctrl->levels[1].aperture = 1;
+ ctrl->levels[1].pageShift = 0x26;
+ ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
+ ctrl->levels[2].size = 0x1000;
+ ctrl->levels[2].aperture = 1;
+ ctrl->levels[2].pageShift = 0x1d;
+
+ return nvkm_gsp_rm_ctrl_wr(gsp, ctrl, true);
+}
+
int
nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
+ struct nvkm_gsp *gsp = mmu->subdev.device->gsp;
const bool more = oclass->base.maxver >= 0;
union {
struct nvif_vmm_v0 v0;
@@ -394,6 +438,38 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
}
+ if (nvkm_gsp_rm(gsp)) {
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ NV_VASPACE_ALLOCATION_PARAMETERS *args;
+
+ ret = nvkm_gsp_client_ctor(gsp, &vmm->rm.client);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = vmm->rm.client.func->device_ctor(&vmm->rm.client, &vmm->rm.device);
+ if (WARN_ON(ret))
+ return ret;
+
+ vmm->rm.object = 0x90f10000;
+
+ args = nvkm_gsp_rm_alloc_get(gsp, vmm->rm.client.object, vmm->rm.device.object,
+ vmm->rm.object, 0x90f1, sizeof(*args));
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+
+ args = nvkm_gsp_rm_alloc_push(gsp, args, true, sizeof(*args));
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ nvkm_gsp_rm_alloc_done(gsp, args);
+
+ ret = NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES(gsp, vmm->rm.client.object, vmm->rm.object, vmm);
+ if (WARN_ON(ret))
+ return ret;
+ }
+
page = uvmm->vmm->func->page;
args->v0.page_nr = 0;
while (page && (page++)->shift)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index ae793f400ba1..d45c62842521 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -978,6 +978,16 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
struct nvkm_vma *vma;
struct rb_node *node;
+ if (vmm->rm.client.gsp) {
+ struct nvkm_gsp *gsp = vmm->rm.client.gsp;
+
+ nvkm_gsp_rm_free(gsp, vmm->rm.client.object, 0, vmm->rm.object);
+ nvkm_gsp_device_dtor(&vmm->rm.device);
+ nvkm_gsp_client_dtor(&vmm->rm.client);
+
+ nvkm_vmm_put(vmm, &vmm->rm.rsvd);
+ }
+
if (0)
nvkm_vmm_dump(vmm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index 6cb5eefa45e9..51f1de8744d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -35,9 +35,12 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
mutex_lock(&vmm->mmu->mutex);
- nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+ if (!vmm->rm.bar2_pdb)
+ nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+ else
+ nvkm_wr32(device, 0xb830a0, vmm->rm.bar2_pdb >> 8);
nvkm_wr32(device, 0xb830a4, 0x00000000);
- nvkm_wr32(device, 0x100e68, 0x00000000);
+// nvkm_wr32(device, 0x100e68, 0x00000000);
nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
nvkm_msec(device, 2000,