diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-08-15 00:02:49 +0200 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-08-21 13:05:03 +1000 |
commit | d0b2c5334f41bdd18adaa3fbc1f7b5f1daab7eac (patch) | |
tree | 0b62c0062f0571ecaa526cd3e0ff09221ea82726 | |
parent | de9564d8b9e69bf6603521e810d3cb46fa98ad81 (diff) |
drm/prime: Always add exported buffers to the handle cache
... not only when the dma-buf is freshly created. In contrived
examples someone else could have exported/imported the dma-buf already
and handed us the gem object with a flink name. If such on object gets
reexported as a dma_buf we won't have it in the handle cache already,
which breaks the guarantee that for dma-buf imports we always hand
back an existing handle if there is one.
This is exercised by igt/prime_self_import/with_one_bo_two_files
Now if we extend the locked sections just a notch more we can also
plug th racy buf/handle cache setup in handle_to_fd:
If evil userspace races a concurrent gem close against a prime export
operation we can end up tearing down the gem handle before the dma buf
handle cache is set up. When handle_to_fd gets around to adding the
handle to the cache there will be no one left to clean it up,
effectily leaking the bo (and the dma-buf, since the handle cache
holds a ref on the dma-buf):
Thread A Thread B
handle_to_fd:
lookup gem object from handle
creates new dma_buf
gem_close on the same handle
obj->dma_buf is set, but file priv buf
handle cache has no entry
obj->handle_count drops to 0
drm_prime_add_buf_handle sets up the handle cache
-> We have a dma-buf reference in the handle cache, but since the
handle_count of the gem object already dropped to 0 no on will clean
it up. When closing the drm device fd we'll hit the WARN_ON in
drm_prime_destroy_file_private.
The important change is to extend the critical section of the
filp->prime.lock to cover the gem handle lookup. This serializes with
a concurrent gem handle close.
This leak is exercised by igt/prime_self_import/export-vs-gem_close-race
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_prime.c | 81 | ||||
-rw-r--r-- | include/drm/drmP.h | 2 |
3 files changed, 53 insertions, 36 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 0a5a0ca0a52e..1ce88c3301a1 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -195,10 +195,12 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) | |||
195 | * Note: obj->dma_buf can't disappear as long as we still hold a | 195 | * Note: obj->dma_buf can't disappear as long as we still hold a |
196 | * handle reference in obj->handle_count. | 196 | * handle reference in obj->handle_count. |
197 | */ | 197 | */ |
198 | mutex_lock(&filp->prime.lock); | ||
198 | if (obj->dma_buf) { | 199 | if (obj->dma_buf) { |
199 | drm_prime_remove_buf_handle(&filp->prime, | 200 | drm_prime_remove_buf_handle_locked(&filp->prime, |
200 | obj->dma_buf); | 201 | obj->dma_buf); |
201 | } | 202 | } |
203 | mutex_unlock(&filp->prime.lock); | ||
202 | } | 204 | } |
203 | 205 | ||
204 | static void drm_gem_object_ref_bug(struct kref *list_kref) | 206 | static void drm_gem_object_ref_bug(struct kref *list_kref) |
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index ed1ea5c1a9ca..7ae2bfcab70e 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c | |||
@@ -83,6 +83,19 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, | ||
87 | uint32_t handle) | ||
88 | { | ||
89 | struct drm_prime_member *member; | ||
90 | |||
91 | list_for_each_entry(member, &prime_fpriv->head, entry) { | ||
92 | if (member->handle == handle) | ||
93 | return member->dma_buf; | ||
94 | } | ||
95 | |||
96 | return NULL; | ||
97 | } | ||
98 | |||
86 | static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, | 99 | static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, |
87 | struct dma_buf *dma_buf, | 100 | struct dma_buf *dma_buf, |
88 | uint32_t *handle) | 101 | uint32_t *handle) |
@@ -146,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf, | |||
146 | attach->priv = NULL; | 159 | attach->priv = NULL; |
147 | } | 160 | } |
148 | 161 | ||
149 | static void drm_prime_remove_buf_handle_locked( | 162 | void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, |
150 | struct drm_prime_file_private *prime_fpriv, | 163 | struct dma_buf *dma_buf) |
151 | struct dma_buf *dma_buf) | ||
152 | { | 164 | { |
153 | struct drm_prime_member *member, *safe; | 165 | struct drm_prime_member *member, *safe; |
154 | 166 | ||
@@ -337,6 +349,8 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, | |||
337 | */ | 349 | */ |
338 | obj->dma_buf = dmabuf; | 350 | obj->dma_buf = dmabuf; |
339 | get_dma_buf(obj->dma_buf); | 351 | get_dma_buf(obj->dma_buf); |
352 | /* Grab a new ref since the callers is now used by the dma-buf */ | ||
353 | drm_gem_object_reference(obj); | ||
340 | 354 | ||
341 | return dmabuf; | 355 | return dmabuf; |
342 | } | 356 | } |
@@ -349,10 +363,20 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, | |||
349 | int ret = 0; | 363 | int ret = 0; |
350 | struct dma_buf *dmabuf; | 364 | struct dma_buf *dmabuf; |
351 | 365 | ||
366 | mutex_lock(&file_priv->prime.lock); | ||
352 | obj = drm_gem_object_lookup(dev, file_priv, handle); | 367 | obj = drm_gem_object_lookup(dev, file_priv, handle); |
353 | if (!obj) | 368 | if (!obj) { |
354 | return -ENOENT; | 369 | ret = -ENOENT; |
370 | goto out_unlock; | ||
371 | } | ||
372 | |||
373 | dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); | ||
374 | if (dmabuf) { | ||
375 | get_dma_buf(dmabuf); | ||
376 | goto out_have_handle; | ||
377 | } | ||
355 | 378 | ||
379 | mutex_lock(&dev->object_name_lock); | ||
356 | /* re-export the original imported object */ | 380 | /* re-export the original imported object */ |
357 | if (obj->import_attach) { | 381 | if (obj->import_attach) { |
358 | dmabuf = obj->import_attach->dmabuf; | 382 | dmabuf = obj->import_attach->dmabuf; |
@@ -360,45 +384,45 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, | |||
360 | goto out_have_obj; | 384 | goto out_have_obj; |
361 | } | 385 | } |
362 | 386 | ||
363 | mutex_lock(&dev->object_name_lock); | ||
364 | if (obj->dma_buf) { | 387 | if (obj->dma_buf) { |
365 | get_dma_buf(obj->dma_buf); | 388 | get_dma_buf(obj->dma_buf); |
366 | dmabuf = obj->dma_buf; | 389 | dmabuf = obj->dma_buf; |
367 | mutex_unlock(&dev->object_name_lock); | ||
368 | goto out_have_obj; | 390 | goto out_have_obj; |
369 | } | 391 | } |
370 | 392 | ||
371 | dmabuf = export_and_register_object(dev, obj, flags); | 393 | dmabuf = export_and_register_object(dev, obj, flags); |
372 | mutex_unlock(&dev->object_name_lock); | ||
373 | if (IS_ERR(dmabuf)) { | 394 | if (IS_ERR(dmabuf)) { |
374 | /* normally the created dma-buf takes ownership of the ref, | 395 | /* normally the created dma-buf takes ownership of the ref, |
375 | * but if that fails then drop the ref | 396 | * but if that fails then drop the ref |
376 | */ | 397 | */ |
377 | ret = PTR_ERR(dmabuf); | 398 | ret = PTR_ERR(dmabuf); |
399 | mutex_unlock(&dev->object_name_lock); | ||
378 | goto out; | 400 | goto out; |
379 | } | 401 | } |
380 | 402 | ||
381 | mutex_lock(&file_priv->prime.lock); | 403 | out_have_obj: |
382 | /* if we've exported this buffer the cheat and add it to the import list | 404 | /* |
383 | * so we get the correct handle back | 405 | * If we've exported this buffer then cheat and add it to the import list |
406 | * so we get the correct handle back. We must do this under the | ||
407 | * protection of dev->object_name_lock to ensure that a racing gem close | ||
408 | * ioctl doesn't miss to remove this buffer handle from the cache. | ||
384 | */ | 409 | */ |
385 | ret = drm_prime_add_buf_handle(&file_priv->prime, | 410 | ret = drm_prime_add_buf_handle(&file_priv->prime, |
386 | dmabuf, handle); | 411 | dmabuf, handle); |
412 | mutex_unlock(&dev->object_name_lock); | ||
387 | if (ret) | 413 | if (ret) |
388 | goto fail_put_dmabuf; | 414 | goto fail_put_dmabuf; |
389 | 415 | ||
416 | out_have_handle: | ||
390 | ret = dma_buf_fd(dmabuf, flags); | 417 | ret = dma_buf_fd(dmabuf, flags); |
391 | if (ret < 0) | 418 | /* |
392 | goto fail_rm_handle; | 419 | * We must _not_ remove the buffer from the handle cache since the newly |
393 | 420 | * created dma buf is already linked in the global obj->dma_buf pointer, | |
394 | *prime_fd = ret; | 421 | * and that is invariant as long as a userspace gem handle exists. |
395 | mutex_unlock(&file_priv->prime.lock); | 422 | * Closing the handle will clean out the cache anyway, so we don't leak. |
396 | return 0; | 423 | */ |
397 | |||
398 | out_have_obj: | ||
399 | ret = dma_buf_fd(dmabuf, flags); | ||
400 | if (ret < 0) { | 424 | if (ret < 0) { |
401 | dma_buf_put(dmabuf); | 425 | goto fail_put_dmabuf; |
402 | } else { | 426 | } else { |
403 | *prime_fd = ret; | 427 | *prime_fd = ret; |
404 | ret = 0; | 428 | ret = 0; |
@@ -406,14 +430,13 @@ out_have_obj: | |||
406 | 430 | ||
407 | goto out; | 431 | goto out; |
408 | 432 | ||
409 | fail_rm_handle: | ||
410 | drm_prime_remove_buf_handle_locked(&file_priv->prime, | ||
411 | dmabuf); | ||
412 | mutex_unlock(&file_priv->prime.lock); | ||
413 | fail_put_dmabuf: | 433 | fail_put_dmabuf: |
414 | dma_buf_put(dmabuf); | 434 | dma_buf_put(dmabuf); |
415 | out: | 435 | out: |
416 | drm_gem_object_unreference_unlocked(obj); | 436 | drm_gem_object_unreference_unlocked(obj); |
437 | out_unlock: | ||
438 | mutex_unlock(&file_priv->prime.lock); | ||
439 | |||
417 | return ret; | 440 | return ret; |
418 | } | 441 | } |
419 | EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); | 442 | EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); |
@@ -669,11 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) | |||
669 | WARN_ON(!list_empty(&prime_fpriv->head)); | 692 | WARN_ON(!list_empty(&prime_fpriv->head)); |
670 | } | 693 | } |
671 | EXPORT_SYMBOL(drm_prime_destroy_file_private); | 694 | EXPORT_SYMBOL(drm_prime_destroy_file_private); |
672 | |||
673 | void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) | ||
674 | { | ||
675 | mutex_lock(&prime_fpriv->lock); | ||
676 | drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf); | ||
677 | mutex_unlock(&prime_fpriv->lock); | ||
678 | } | ||
679 | EXPORT_SYMBOL(drm_prime_remove_buf_handle); | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5914cc5c3fa6..90833dccc919 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -1508,7 +1508,7 @@ int drm_gem_dumb_destroy(struct drm_file *file, | |||
1508 | 1508 | ||
1509 | void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); | 1509 | void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); |
1510 | void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); | 1510 | void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); |
1511 | void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); | 1511 | void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); |
1512 | 1512 | ||
1513 | #if DRM_DEBUG_CODE | 1513 | #if DRM_DEBUG_CODE |
1514 | extern int drm_vma_info(struct seq_file *m, void *data); | 1514 | extern int drm_vma_info(struct seq_file *m, void *data); |