summaryrefslogtreecommitdiff
path: root/drivers/media
diff options
context:
space:
mode:
authorPi-Hsun Shih <pihsun@chromium.org>2019-11-10 07:29:10 +0100
committerMauro Carvalho Chehab <mchehab@kernel.org>2019-11-10 07:29:10 +0100
commitdf4a3e7f88e3b0d7ae46d70b9ff8e3c0ea730785 (patch)
treea5d2f4308b8536a97e704d63af4addfbdee99d26 /drivers/media
parent2df200ab234a86836a8879a05a8007d6b884eb14 (diff)
media: v4l2-ctrl: Lock main_hdl on operations of requests_queued.
There's a race condition between the list_del_init in the v4l2_ctrl_request_complete, and the list_add_tail in the v4l2_ctrl_request_queue, since they can be called in different thread and the requests_queued list is not protected by a lock. This can lead to that the v4l2_ctrl_handler is still in the requests_queued list while the request_is_queued is already set to false, which would cause use-after-free if the v4l2_ctrl_handler is later released. Fix this by locking the ->lock of main_hdl (which is the owner of the requests_queued list) when doing list operations on the ->requests_queued list. Signed-off-by: Pi-Hsun Shih <pihsun@chromium.org> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
Diffstat (limited to 'drivers/media')
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index a565ae3ba7e4..2928c5e0a73d 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -3302,6 +3302,7 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
struct v4l2_ctrl_handler *prev_hdl = NULL;
struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+ mutex_lock(main_hdl->lock);
if (list_empty(&main_hdl->requests_queued))
goto queue;
@@ -3333,18 +3334,22 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
queue:
list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
hdl->request_is_queued = true;
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
{
struct v4l2_ctrl_handler *hdl =
container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
list_del_init(&hdl->requests);
+ mutex_lock(main_hdl->lock);
if (hdl->request_is_queued) {
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
}
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_release(struct media_request_object *obj)
@@ -4298,9 +4303,11 @@ void v4l2_ctrl_request_complete(struct media_request *req,
v4l2_ctrl_unlock(ctrl);
}
+ mutex_lock(main_hdl->lock);
WARN_ON(!hdl->request_is_queued);
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
+ mutex_unlock(main_hdl->lock);
media_request_object_complete(obj);
media_request_object_put(obj);
}