summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2019-05-23 12:34:30 -0300
committerJérôme Glisse <jglisse@redhat.com>2019-05-23 16:34:08 -0400
commit72ea94afad8b51351bed3d2282052c44f0f09132 (patch)
treef9f6820a069166da5c59c80fa6c9f240612aefd8
parent31e1c6180a84272920adacd3fc040e82e11b55dd (diff)
mm/hmm: improve locking around hmm->dead v2
This value is being read without any locking, so it is just an unreliable hint, however in many cases we need to have certainty that code is not racing with mmput()/hmm_release(). Instead have hmm_range_register() hold a reference on the mm so we do not race with mm destruction. Changes since v1: - have range hold a reference on the mm Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Souptick Joarder <jrdr.linux@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/hmm.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index cc8ded733d29..9809435f1df8 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -921,16 +921,16 @@ int hmm_range_register(struct hmm_range *range,
range->hmm = mm->hmm;
kref_get(&range->hmm->kref);
+ /*
+ * We cannot set range->value to true if hmm_release has already run.
+ */
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+
range->page_shift = page_shift;
range->start = start;
range->end = end;
- /* Check if hmm_mm_destroy() was call. */
- if (range->hmm->mm == NULL || range->hmm->dead) {
- hmm_put(range->hmm);
- return -EFAULT;
- }
-
/* Initialize range to track CPU page table updates. */
mutex_lock(&range->hmm->lock);
@@ -969,6 +969,7 @@ void hmm_range_unregister(struct hmm_range *range)
/* Drop reference taken by hmm_range_register() */
range->valid = false;
+ mmput(hmm->mm);
hmm_put(hmm);
range->hmm = NULL;
}
@@ -996,9 +997,10 @@ long hmm_range_snapshot(struct hmm_range *range)
struct vm_area_struct *vma;
struct mm_walk mm_walk;
- /* Check if hmm_mm_destroy() was call. */
- if (hmm->mm == NULL || hmm->dead)
- return -EFAULT;
+ /* Caller must hold the mmap_sem, and range hold a reference on mm. */
+ lockdep_assert_held(hmm->mm->mmap_sem);
+ if (WARN_ON(!atomic_read(&hmm->mm->mm_users)))
+ return -EINVAL;
do {
/* If range is no longer valid force retry. */
@@ -1095,9 +1097,10 @@ long hmm_range_fault(struct hmm_range *range, bool block)
struct mm_walk mm_walk;
int ret;
- /* Check if hmm_mm_destroy() was call. */
- if (hmm->mm == NULL || hmm->dead)
- return -EFAULT;
+ /* Caller must hold the mmap_sem, and range hold a reference on mm. */
+ lockdep_assert_held(hmm->mm->mmap_sem);
+ if (WARN_ON(!atomic_read(&hmm->mm->mm_users)))
+ return -EINVAL;
do {
/* If range is no longer valid force retry. */