diff options
author | Michel Dänzer <michel.daenzer@amd.com> | 2016-04-01 15:29:26 +0900 |
---|---|---|
committer | Michel Dänzer <michel.daenzer@amd.com> | 2016-04-01 15:29:26 +0900 |
commit | 5ba95c3abeb8df82aa8d33a47596eae6403ea7af (patch) | |
tree | 15c5bc7bbdb7263e893cbb061c51c083496b4d06 | |
parent | 8ecfa69b5a833bd4c39e773a6acfd7eef9144d13 (diff) |
Identify DRM event queue entries by sequence number instead of by pointer
If the memory for an entry was allocated at the same address as that for
a previously cancelled entry, the handler could theoretically be called
prematurely, triggered by the DRM event which was submitted for the
cancelled entry.
(Ported from radeon commit 4693b1bd5b5c381e8b7b68a6f7f0c6696d6a68df)
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | src/amdgpu_dri2.c | 47 | ||||
-rw-r--r-- | src/amdgpu_drm_queue.c | 24 | ||||
-rw-r--r-- | src/amdgpu_drm_queue.h | 12 | ||||
-rw-r--r-- | src/amdgpu_kms.c | 36 | ||||
-rw-r--r-- | src/amdgpu_present.c | 17 | ||||
-rw-r--r-- | src/drmmode_display.c | 20 |
6 files changed, 84 insertions, 72 deletions
diff --git a/src/amdgpu_dri2.c b/src/amdgpu_dri2.c index 4478b16..29f60ba 100644 --- a/src/amdgpu_dri2.c +++ b/src/amdgpu_dri2.c @@ -395,7 +395,7 @@ typedef struct _DRI2FrameEvent { unsigned frame; xf86CrtcPtr crtc; OsTimerPtr timer; - struct amdgpu_drm_queue_entry *drm_queue; + uintptr_t drm_queue_seq; /* for swaps & flips only */ DRI2SwapEventPtr event_complete; @@ -961,8 +961,8 @@ CARD32 amdgpu_dri2_deferred_event(OsTimerPtr timer, CARD32 now, pointer data) */ if (!event_info->crtc) { ErrorF("%s no crtc\n", __func__); - if (event_info->drm_queue) - amdgpu_drm_abort_entry(event_info->drm_queue); + if (event_info->drm_queue_seq) + amdgpu_drm_abort_entry(event_info->drm_queue_seq); else amdgpu_dri2_frame_event_abort(NULL, data); return 0; @@ -974,9 +974,9 @@ CARD32 amdgpu_dri2_deferred_event(OsTimerPtr timer, CARD32 now, pointer data) if (ret) { xf86DrvMsg(scrn->scrnIndex, X_ERROR, "%s cannot get current time\n", __func__); - if (event_info->drm_queue) + if (event_info->drm_queue_seq) amdgpu_drm_queue_handler(pAMDGPUEnt->fd, 0, 0, 0, - event_info->drm_queue); + (void*)event_info->drm_queue_seq); else amdgpu_dri2_frame_event_handler(crtc, 0, 0, data); return 0; @@ -990,9 +990,10 @@ CARD32 amdgpu_dri2_deferred_event(OsTimerPtr timer, CARD32 now, pointer data) delta_seq = delta_t * drmmode_crtc->dpms_last_fps; delta_seq /= 1000000; frame = (CARD64) drmmode_crtc->dpms_last_seq + delta_seq; - if (event_info->drm_queue) + if (event_info->drm_queue_seq) amdgpu_drm_queue_handler(pAMDGPUEnt->fd, frame, drm_now / 1000000, - drm_now % 1000000, event_info->drm_queue); + drm_now % 1000000, + (void*)event_info->drm_queue_seq); else amdgpu_dri2_frame_event_handler(crtc, frame, drm_now, data); return 0; @@ -1023,7 +1024,7 @@ static int amdgpu_dri2_schedule_wait_msc(ClientPtr client, DrawablePtr draw, ScrnInfoPtr scrn = xf86ScreenToScrn(screen); AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn); DRI2FrameEventPtr wait_info = NULL; - struct amdgpu_drm_queue_entry *wait = NULL; + uintptr_t drm_queue_seq = 0; xf86CrtcPtr crtc = amdgpu_dri2_drawable_crtc(draw, TRUE); uint32_t msc_delta; drmVBlank vbl; @@ -1079,15 +1080,15 @@ static int amdgpu_dri2_schedule_wait_msc(ClientPtr client, DrawablePtr draw, current_msc = vbl.reply.sequence + msc_delta; current_msc &= 0xffffffff; - wait = amdgpu_drm_queue_alloc(crtc, client, AMDGPU_DRM_QUEUE_ID_DEFAULT, - wait_info, amdgpu_dri2_frame_event_handler, - amdgpu_dri2_frame_event_abort); - if (!wait) { + drm_queue_seq = amdgpu_drm_queue_alloc(crtc, client, AMDGPU_DRM_QUEUE_ID_DEFAULT, + wait_info, amdgpu_dri2_frame_event_handler, + amdgpu_dri2_frame_event_abort); + if (!drm_queue_seq) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, "Allocating DRM queue event entry failed.\n"); goto out_complete; } - wait_info->drm_queue = wait; + wait_info->drm_queue_seq = drm_queue_seq; /* * If divisor is zero, or current_msc is smaller than target_msc, @@ -1106,7 +1107,7 @@ static int amdgpu_dri2_schedule_wait_msc(ClientPtr client, DrawablePtr draw, vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT; vbl.request.type |= amdgpu_populate_vbl_request_type(crtc); vbl.request.sequence = target_msc - msc_delta; - vbl.request.signal = (unsigned long)wait; + vbl.request.signal = drm_queue_seq; ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl); if (ret) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, @@ -1138,7 +1139,7 @@ static int amdgpu_dri2_schedule_wait_msc(ClientPtr client, DrawablePtr draw, if ((current_msc % divisor) >= remainder) vbl.request.sequence += divisor; - vbl.request.signal = (unsigned long)wait; + vbl.request.signal = drm_queue_seq; ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl); if (ret) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, @@ -1190,7 +1191,7 @@ static int amdgpu_dri2_schedule_swap(ClientPtr client, DrawablePtr draw, drmVBlank vbl; int ret, flip = 0; DRI2FrameEventPtr swap_info = NULL; - struct amdgpu_drm_queue_entry *swap; + uintptr_t drm_queue_seq; CARD64 current_msc; BoxRec box; RegionRec region; @@ -1227,15 +1228,15 @@ static int amdgpu_dri2_schedule_swap(ClientPtr client, DrawablePtr draw, swap_info->back = back; swap_info->crtc = crtc; - swap = amdgpu_drm_queue_alloc(crtc, client, AMDGPU_DRM_QUEUE_ID_DEFAULT, - swap_info, amdgpu_dri2_frame_event_handler, - amdgpu_dri2_frame_event_abort); - if (!swap) { + drm_queue_seq = amdgpu_drm_queue_alloc(crtc, client, AMDGPU_DRM_QUEUE_ID_DEFAULT, + swap_info, amdgpu_dri2_frame_event_handler, + amdgpu_dri2_frame_event_abort); + if (!drm_queue_seq) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, "Allocating DRM queue entry failed.\n"); goto blit_fallback; } - swap_info->drm_queue = swap; + swap_info->drm_queue_seq = drm_queue_seq; /* * CRTC is in DPMS off state, fallback to blit, but calculate @@ -1304,7 +1305,7 @@ static int amdgpu_dri2_schedule_swap(ClientPtr client, DrawablePtr draw, *target_msc = current_msc; vbl.request.sequence = *target_msc - msc_delta; - vbl.request.signal = (unsigned long)swap; + vbl.request.signal = drm_queue_seq; ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl); if (ret) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, @@ -1350,7 +1351,7 @@ static int amdgpu_dri2_schedule_swap(ClientPtr client, DrawablePtr draw, /* Account for 1 frame extra pageflip delay if flip > 0 */ vbl.request.sequence -= flip; - vbl.request.signal = (unsigned long)swap; + vbl.request.signal = drm_queue_seq; ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl); if (ret) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, diff --git a/src/amdgpu_drm_queue.c b/src/amdgpu_drm_queue.c index 11b74a0..562a11a 100644 --- a/src/amdgpu_drm_queue.c +++ b/src/amdgpu_drm_queue.c @@ -40,6 +40,7 @@ struct amdgpu_drm_queue_entry { struct xorg_list list; uint64_t id; + uintptr_t seq; void *data; ClientPtr client; xf86CrtcPtr crtc; @@ -49,6 +50,7 @@ struct amdgpu_drm_queue_entry { static int amdgpu_drm_queue_refcnt; static struct xorg_list amdgpu_drm_queue; +static uintptr_t amdgpu_drm_queue_seq; /* @@ -58,11 +60,11 @@ void amdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec, unsigned int usec, void *user_ptr) { - struct amdgpu_drm_queue_entry *user_data = user_ptr; + uintptr_t seq = (uintptr_t)user_ptr; struct amdgpu_drm_queue_entry *e, *tmp; xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { - if (e == user_data) { + if (e->seq == seq) { xorg_list_del(&e->list); if (e->handler) e->handler(e->crtc, frame, @@ -80,7 +82,7 @@ amdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec, * Enqueue a potential drm response; when the associated response * appears, we've got data to pass to the handler from here */ -struct amdgpu_drm_queue_entry * +uintptr_t amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client, uint64_t id, void *data, amdgpu_drm_handler_proc handler, @@ -92,6 +94,9 @@ amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client, if (!e) return NULL; + if (!amdgpu_drm_queue_seq) + amdgpu_drm_queue_seq = 1; + e->seq = amdgpu_drm_queue_seq++; e->client = client; e->crtc = crtc; e->id = id; @@ -101,7 +106,7 @@ amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client, xorg_list_add(&e->list, &amdgpu_drm_queue); - return e; + return e->seq; } /* @@ -139,9 +144,16 @@ amdgpu_drm_abort_client(ClientPtr client) * Abort specific drm queue entry */ void -amdgpu_drm_abort_entry(struct amdgpu_drm_queue_entry *entry) +amdgpu_drm_abort_entry(uintptr_t seq) { - amdgpu_drm_abort_one(entry); + struct amdgpu_drm_queue_entry *e, *tmp; + + xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) { + if (e->seq == seq) { + amdgpu_drm_abort_one(e); + break; + } + } } /* diff --git a/src/amdgpu_drm_queue.h b/src/amdgpu_drm_queue.h index 892ba13..b4d4009 100644 --- a/src/amdgpu_drm_queue.h +++ b/src/amdgpu_drm_queue.h @@ -43,14 +43,12 @@ typedef void (*amdgpu_drm_abort_proc)(xf86CrtcPtr crtc, void *data); void amdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int tv_sec, unsigned int tv_usec, void *user_ptr); -struct amdgpu_drm_queue_entry *amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, - ClientPtr client, - uint64_t id, - void *data, - amdgpu_drm_handler_proc handler, - amdgpu_drm_abort_proc abort); +uintptr_t amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client, + uint64_t id, void *data, + amdgpu_drm_handler_proc handler, + amdgpu_drm_abort_proc abort); void amdgpu_drm_abort_client(ClientPtr client); -void amdgpu_drm_abort_entry(struct amdgpu_drm_queue_entry *entry); +void amdgpu_drm_abort_entry(uintptr_t seq); void amdgpu_drm_abort_id(uint64_t id); void amdgpu_drm_queue_init(); void amdgpu_drm_queue_close(ScrnInfoPtr scrn); diff --git a/src/amdgpu_kms.c b/src/amdgpu_kms.c index 6dcec69..ae98cf1 100644 --- a/src/amdgpu_kms.c +++ b/src/amdgpu_kms.c @@ -397,7 +397,7 @@ static void amdgpu_scanout_update(xf86CrtcPtr xf86_crtc) { drmmode_crtc_private_ptr drmmode_crtc = xf86_crtc->driver_private; - struct amdgpu_drm_queue_entry *drm_queue_entry; + uintptr_t drm_queue_seq; ScrnInfoPtr scrn; AMDGPUEntPtr pAMDGPUEnt; drmVBlank vbl; @@ -427,13 +427,13 @@ amdgpu_scanout_update(xf86CrtcPtr xf86_crtc) return; scrn = xf86_crtc->scrn; - drm_queue_entry = amdgpu_drm_queue_alloc(xf86_crtc, - AMDGPU_DRM_QUEUE_CLIENT_DEFAULT, - AMDGPU_DRM_QUEUE_ID_DEFAULT, - drmmode_crtc, - amdgpu_scanout_update_handler, - amdgpu_scanout_update_abort); - if (!drm_queue_entry) { + drm_queue_seq = amdgpu_drm_queue_alloc(xf86_crtc, + AMDGPU_DRM_QUEUE_CLIENT_DEFAULT, + AMDGPU_DRM_QUEUE_ID_DEFAULT, + drmmode_crtc, + amdgpu_scanout_update_handler, + amdgpu_scanout_update_abort); + if (!drm_queue_seq) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, "amdgpu_drm_queue_alloc failed for scanout update\n"); return; @@ -443,12 +443,12 @@ amdgpu_scanout_update(xf86CrtcPtr xf86_crtc) vbl.request.type = DRM_VBLANK_RELATIVE | DRM_VBLANK_EVENT; vbl.request.type |= amdgpu_populate_vbl_request_type(xf86_crtc); vbl.request.sequence = 1; - vbl.request.signal = (unsigned long)drm_queue_entry; + vbl.request.signal = drm_queue_seq; if (drmWaitVBlank(pAMDGPUEnt->fd, &vbl)) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, "drmWaitVBlank failed for scanout update: %s\n", strerror(errno)); - amdgpu_drm_abort_entry(drm_queue_entry); + amdgpu_drm_abort_entry(drm_queue_seq); return; } @@ -471,7 +471,7 @@ amdgpu_scanout_flip(ScreenPtr pScreen, AMDGPUInfoPtr info, drmmode_crtc_private_ptr drmmode_crtc = xf86_crtc->driver_private; ScrnInfoPtr scrn; AMDGPUEntPtr pAMDGPUEnt; - struct amdgpu_drm_queue_entry *drm_queue_entry; + uintptr_t drm_queue_seq; unsigned scanout_id; if (drmmode_crtc->scanout_update_pending) @@ -482,12 +482,12 @@ amdgpu_scanout_flip(ScreenPtr pScreen, AMDGPUInfoPtr info, return; scrn = xf86_crtc->scrn; - drm_queue_entry = amdgpu_drm_queue_alloc(xf86_crtc, - AMDGPU_DRM_QUEUE_CLIENT_DEFAULT, - AMDGPU_DRM_QUEUE_ID_DEFAULT, - drmmode_crtc, NULL, - amdgpu_scanout_flip_abort); - if (!drm_queue_entry) { + drm_queue_seq = amdgpu_drm_queue_alloc(xf86_crtc, + AMDGPU_DRM_QUEUE_CLIENT_DEFAULT, + AMDGPU_DRM_QUEUE_ID_DEFAULT, + drmmode_crtc, NULL, + amdgpu_scanout_flip_abort); + if (!drm_queue_seq) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, "Allocating DRM event queue entry failed.\n"); return; @@ -496,7 +496,7 @@ amdgpu_scanout_flip(ScreenPtr pScreen, AMDGPUInfoPtr info, pAMDGPUEnt = AMDGPUEntPriv(scrn); if (drmModePageFlip(pAMDGPUEnt->fd, drmmode_crtc->mode_crtc->crtc_id, drmmode_crtc->scanout[scanout_id].fb_id, - DRM_MODE_PAGE_FLIP_EVENT, drm_queue_entry)) { + DRM_MODE_PAGE_FLIP_EVENT, (void*)drm_queue_seq)) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, "flip queue failed in %s: %s\n", __func__, strerror(errno)); return; diff --git a/src/amdgpu_present.c b/src/amdgpu_present.c index 4b33ce2..4aa0708 100644 --- a/src/amdgpu_present.c +++ b/src/amdgpu_present.c @@ -157,7 +157,7 @@ amdgpu_present_queue_vblank(RRCrtcPtr crtc, uint64_t event_id, uint64_t msc) AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(scrn); int crtc_id = drmmode_get_crtc_id(xf86_crtc); struct amdgpu_present_vblank_event *event; - struct amdgpu_drm_queue_entry *queue; + uintptr_t drm_queue_seq; drmVBlank vbl; int ret; @@ -165,24 +165,25 @@ amdgpu_present_queue_vblank(RRCrtcPtr crtc, uint64_t event_id, uint64_t msc) if (!event) return BadAlloc; event->event_id = event_id; - queue = amdgpu_drm_queue_alloc(xf86_crtc, AMDGPU_DRM_QUEUE_CLIENT_DEFAULT, - event_id, event, - amdgpu_present_vblank_handler, - amdgpu_present_vblank_abort); - if (!queue) { + drm_queue_seq = amdgpu_drm_queue_alloc(xf86_crtc, + AMDGPU_DRM_QUEUE_CLIENT_DEFAULT, + event_id, event, + amdgpu_present_vblank_handler, + amdgpu_present_vblank_abort); + if (!drm_queue_seq) { free(event); return BadAlloc; } vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT | crtc_select(crtc_id); vbl.request.sequence = msc; - vbl.request.signal = (unsigned long)queue; + vbl.request.signal = drm_queue_seq; for (;;) { ret = drmWaitVBlank(pAMDGPUEnt->fd, &vbl); if (!ret) break; if (errno != EBUSY || !amdgpu_present_flush_drm_events(screen)) { - amdgpu_drm_abort_entry(queue); + amdgpu_drm_abort_entry(drm_queue_seq); return BadAlloc; } } diff --git a/src/drmmode_display.c b/src/drmmode_display.c index 2aea542..07ae9b2 100644 --- a/src/drmmode_display.c +++ b/src/drmmode_display.c @@ -2504,7 +2504,7 @@ Bool amdgpu_do_pageflip(ScrnInfoPtr scrn, ClientPtr client, drmmode_ptr drmmode = drmmode_crtc->drmmode; int i; drmmode_flipdata_ptr flipdata; - struct amdgpu_drm_queue_entry *drm_queue = NULL; + uintptr_t drm_queue_seq = 0; uint32_t new_front_handle; if (!amdgpu_pixmap_get_handle(new_front, &new_front_handle)) { @@ -2559,11 +2559,11 @@ Bool amdgpu_do_pageflip(ScrnInfoPtr scrn, ClientPtr client, if (drmmode_crtc->hw_id == ref_crtc_hw_id) flipdata->fe_crtc = crtc; - drm_queue = amdgpu_drm_queue_alloc(crtc, client, id, - flipdata, - drmmode_flip_handler, - drmmode_flip_abort); - if (!drm_queue) { + drm_queue_seq = amdgpu_drm_queue_alloc(crtc, client, id, + flipdata, + drmmode_flip_handler, + drmmode_flip_abort); + if (!drm_queue_seq) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, "Allocating DRM queue event entry failed.\n"); goto error; @@ -2571,13 +2571,13 @@ Bool amdgpu_do_pageflip(ScrnInfoPtr scrn, ClientPtr client, if (drmModePageFlip(pAMDGPUEnt->fd, drmmode_crtc->mode_crtc->crtc_id, drmmode->fb_id, DRM_MODE_PAGE_FLIP_EVENT, - drm_queue)) { + (void*)drm_queue_seq)) { xf86DrvMsg(scrn->scrnIndex, X_WARNING, "flip queue failed: %s\n", strerror(errno)); goto error; } drmmode_crtc->flip_pending = TRUE; - drm_queue = NULL; + drm_queue_seq = 0; } if (flipdata->flip_count > 0) @@ -2589,8 +2589,8 @@ error: drmmode->fb_id = flipdata->old_fb_id; } - if (drm_queue) - amdgpu_drm_abort_entry(drm_queue); + if (drm_queue_seq) + amdgpu_drm_abort_entry(drm_queue_seq); else if (crtc) drmmode_flip_abort(crtc, flipdata); else if (flipdata && flipdata->flip_count <= 1) |