summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2018-05-21 14:43:08 -0400
committerJérôme Glisse <jglisse@redhat.com>2018-05-21 14:43:08 -0400
commit303da883654fc7746a552cb51236337a120ccd96 (patch)
tree23a764db850a49dde4081897c72d0de4e5be49f0
parentf442841e44e262fba4596caea19c8696728222fd (diff)
mm/hmm: invalidate device page table at start of invalidationhmm-4.19
Invalidate device page table at start of invalidation and invalidate in progress CPU page table snapshooting at both start and end of any invalidation. This is helpful when device need to dirty page because the device page table report the page as dirty. Dirtying page must happen in the start mmu notifier callback and not in the end one. Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: stable@vger.kernel.org
-rw-r--r--mm/hmm.c31
1 files changed, 12 insertions, 19 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 670163cb42fb..de1cc6088226 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -52,7 +52,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
*
* @mm: mm struct this HMM struct is bound to
* @lock: lock protecting ranges list
- * @sequence: we track updates to the CPU page table with a sequence number
* @ranges: list of range being snapshotted
* @mirrors: list of mirrors for this mm
* @mmu_notifier: mmu notifier to track updates to CPU page table
@@ -61,7 +60,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
struct hmm {
struct mm_struct *mm;
spinlock_t lock;
- atomic_t sequence;
struct list_head ranges;
struct list_head mirrors;
struct mmu_notifier mmu_notifier;
@@ -94,7 +92,6 @@ static struct hmm *hmm_register(struct mm_struct *mm)
return NULL;
INIT_LIST_HEAD(&hmm->mirrors);
init_rwsem(&hmm->mirrors_sem);
- atomic_set(&hmm->sequence, 0);
hmm->mmu_notifier.ops = NULL;
INIT_LIST_HEAD(&hmm->ranges);
spin_lock_init(&hmm->lock);
@@ -138,7 +135,8 @@ void hmm_mm_destroy(struct mm_struct *mm)
static void hmm_invalidate_range(struct hmm *hmm,
enum hmm_update_type action,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool device)
{
struct hmm_mirror *mirror;
struct hmm_range *range;
@@ -158,11 +156,13 @@ static void hmm_invalidate_range(struct hmm *hmm,
}
spin_unlock(&hmm->lock);
- down_read(&hmm->mirrors_sem);
- list_for_each_entry(mirror, &hmm->mirrors, list)
- mirror->ops->sync_cpu_device_pagetables(mirror, action,
- start, end);
- up_read(&hmm->mirrors_sem);
+ if (device) {
+ down_read(&hmm->mirrors_sem);
+ list_for_each_entry(mirror, &hmm->mirrors, list)
+ mirror->ops->sync_cpu_device_pagetables(mirror, action,
+ start, end);
+ up_read(&hmm->mirrors_sem);
+ }
}
static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -196,11 +196,7 @@ static void hmm_invalidate_range_start(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct hmm *hmm = mm->hmm;
-
- VM_BUG_ON(!hmm);
-
- atomic_inc(&hmm->sequence);
+ hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end, true);
}
static void hmm_invalidate_range_end(struct mmu_notifier *mn,
@@ -208,11 +204,8 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct hmm *hmm = mm->hmm;
-
- VM_BUG_ON(!hmm);
-
- hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
+ hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE,
+ start, end, false);
}
static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {