summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2019-04-17 13:24:18 -0400
committerJérôme Glisse <jglisse@redhat.com>2019-05-22 17:19:11 -0400
commite7fe07f1dd5909e16ce6219c45704b07fa15d0ac (patch)
treebcb338a550f9292fb3fddc68191d92c3b8e9fd17
parentf66327493a574f7c100c7c8834d4b96dafa733fa (diff)
block/dev: differentiate GUPed pages from iov_iter_get_pages*()
We want to handle differently pages that are coming from GUP (get_user_pages*()) as they will need to be release through new put_user_page() function and not with put_page(). Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: linux-fsdevel@vger.kernel.org Cc: linux-block@vger.kernel.org Cc: linux-mm@kvack.org Cc: John Hubbard <jhubbard@nvidia.com> Cc: Jan Kara <jack@suse.cz> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Ming Lei <ming.lei@redhat.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Matthew Wilcox <willy@infradead.org> Cc: Boaz Harrosh <boaz@plexistor.com>
-rw-r--r--fs/block_dev.c51
1 files changed, 36 insertions, 15 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 7657b6924d44..e4a384d4c26c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -259,11 +259,25 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
}
__set_current_state(TASK_RUNNING);
- bio_for_each_segment_all(bvec, &bio, iter_all) {
- if (should_dirty && !PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
- if (!bio_flagged(&bio, BIO_NO_PAGE_REF))
- put_page(bvec->bv_page);
+ if (iov_iter_get_pages_use_gup(iter)) {
+ /*
+ * This should never happens ! An iter that use GUP will take
+ * page reference and thus will need to call put_user_page().
+ */
+ BUG_ON(bio_flagged(&bio, BIO_NO_PAGE_REF));
+ bio_for_each_segment_all(bvec, &bio, iter_all) {
+ if (should_dirty && !PageCompound(bvec->bv_page))
+ put_user_pages_dirty_lock(&bvec->bv_page, 1);
+ else
+ put_user_page(bvec->bv_page);
+ }
+ } else {
+ bio_for_each_segment_all(bvec, &bio, iter_all) {
+ if (should_dirty && !PageCompound(bvec->bv_page))
+ set_page_dirty_lock(bvec->bv_page);
+ if (!bio_flagged(&bio, BIO_NO_PAGE_REF))
+ put_page(bvec->bv_page);
+ }
}
if (unlikely(bio.bi_status))
@@ -301,7 +315,7 @@ static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
}
-static void blkdev_bio_end_io(struct bio *bio)
+static void _blkdev_bio_end_io(struct bio *bio, bool from_gup)
{
struct blkdev_dio *dio = bio->bi_private;
bool should_dirty = dio->should_dirty;
@@ -333,19 +347,24 @@ static void blkdev_bio_end_io(struct bio *bio)
}
if (should_dirty) {
- bio_check_pages_dirty(bio, false);
+ bio_check_pages_dirty(bio, from_gup);
} else {
- if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
- struct bvec_iter_all iter_all;
- struct bio_vec *bvec;
-
- bio_for_each_segment_all(bvec, bio, iter_all)
- put_page(bvec->bv_page);
- }
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+ bio_release_pages(bio, from_gup);
bio_put(bio);
}
}
+static void blkdev_bio_end_io(struct bio *bio)
+{
+ _blkdev_bio_end_io(bio, false);
+}
+
+static void blkdev_bio_from_gup_end_io(struct bio *bio)
+{
+ _blkdev_bio_end_io(bio, true);
+}
+
static ssize_t
__blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
{
@@ -392,7 +411,9 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio->bi_iter.bi_sector = pos >> 9;
bio->bi_write_hint = iocb->ki_hint;
bio->bi_private = dio;
- bio->bi_end_io = blkdev_bio_end_io;
+ bio->bi_end_io = iov_iter_get_pages_use_gup(iter) ?
+ blkdev_bio_from_gup_end_io :
+ blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
ret = bio_iov_iter_get_pages(bio, iter);