diff options
Diffstat (limited to 'include/rdma/ib_umem_odp.h')
-rw-r--r-- | include/rdma/ib_umem_odp.h | 48 |
1 files changed, 18 insertions, 30 deletions
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index dadc96dea39c..ae34ad019dd7 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -36,6 +36,7 @@ #include <rdma/ib_umem.h> #include <rdma/ib_verbs.h> #include <linux/interval_tree.h> +#include <linux/hmm.h> struct umem_odp_node { u64 __subtree_last; @@ -47,11 +48,11 @@ struct ib_umem_odp { struct ib_ucontext_per_mm *per_mm; /* - * An array of the pages included in the on-demand paging umem. - * Indices of pages that are currently not mapped into the device will - * contain NULL. + * An array of the pages included in the on-demand paging umem. Indices + * of pages that are currently not mapped into the device will contain + * 0. */ - struct page **page_list; + uint64_t *pfns; /* * An array of the same size as page_list, with DMA addresses mapped * for pages the pages in page_list. The lower two bits designate @@ -67,13 +68,9 @@ struct ib_umem_odp { struct mutex umem_mutex; void *private; /* for the HW driver to use. */ - int notifiers_seq; - int notifiers_count; - /* Tree tracking */ struct umem_odp_node interval_tree; - struct completion notifier_completion; int dying; struct work_struct work; }; @@ -108,11 +105,10 @@ struct ib_ucontext_per_mm { /* Protects umem_tree */ struct rw_semaphore umem_rwsem; - struct mmu_notifier mn; + struct hmm_mirror mirror; unsigned int odp_mrs_count; struct list_head ucontext_list; - struct rcu_head rcu; }; int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access); @@ -120,9 +116,18 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root_umem, unsigned long addr, size_t size); void ib_umem_odp_release(struct ib_umem_odp *umem_odp); -int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, - u64 bcnt, u64 access_mask, - unsigned long current_seq); +#define ODP_READ_BIT (1<<0ULL) +#define ODP_WRITE_BIT (1<<1ULL) +/* + * The device bit is not use by ODP but is there to full-fill HMM API which + * also support device with device memory (like GPU). So from ODP/RDMA POV + * this can be ignored. + */ +#define ODP_DEVICE_BIT (1<<2ULL) +#define ODP_FLAGS_BITS 3 + +long ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, + struct hmm_range *range); void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, u64 bound); @@ -145,23 +150,6 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length); -static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp, - unsigned long mmu_seq) -{ - /* - * This code is strongly based on the KVM code from - * mmu_notifier_retry. Should be called with - * the relevant locks taken (umem_odp->umem_mutex - * and the ucontext umem_mutex semaphore locked for read). - */ - - if (unlikely(umem_odp->notifiers_count)) - return 1; - if (umem_odp->notifiers_seq != mmu_seq) - return 1; - return 0; -} - #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access) |