summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJamey Sharp <jamey@minilop.net>2010-04-16 20:18:28 -0700
committerJamey Sharp <jamey@minilop.net>2010-04-18 01:28:38 -0700
commit933aee1d5c53b0cc7d608011a29188b594c8d70b (patch)
tree2ac0431b4b79b909c8e0564a0b8d134f0d44c325
parentaab43278ae619eb57d2dd9c7396f460f078588fc (diff)
Fix Xlib/XCB for multi-threaded applications (with caveats).
Rather than trying to group all response processing in one monolithic process_responses function, let _XEventsQueued, _XReadEvents, and _XReply each do their own thing with a minimum of code that can all be reasoned about independently. Tested with `ico -threads 20`, which seems to be able to make many icosahedrons dance at once quite nicely now. Caveats: - Anything that was not thread-safe in Xlib before XCB probably still isn't. XListFontsWithInfo, for instance. - If one thread is waiting for events and another thread tries to read a reply, both will hang until an event arrives. Previously, if this happened it might work sometimes, but otherwise would trigger either an assertion failure or a permanent hang. - Versions of libxcb up to and including 1.6 have a bug that can cause xcb_wait_for_event or xcb_wait_for_reply to hang if they run concurrently with xcb_writev or other writers. So you'll want that fix as well. Signed-off-by: Jamey Sharp <jamey@minilop.net> Reviewed-by: Josh Triplett <josh@freedesktop.org>
-rw-r--r--src/Xxcbint.h6
-rw-r--r--src/xcb_disp.c5
-rw-r--r--src/xcb_io.c344
3 files changed, 233 insertions, 122 deletions
diff --git a/src/Xxcbint.h b/src/Xxcbint.h
index 8b6a3619..c8191620 100644
--- a/src/Xxcbint.h
+++ b/src/Xxcbint.h
@@ -16,6 +16,7 @@ typedef struct PendingRequest PendingRequest;
struct PendingRequest {
PendingRequest *next;
unsigned long sequence;
+ unsigned reply_waiter;
};
typedef struct _X11XCBPrivate {
@@ -31,11 +32,10 @@ typedef struct _X11XCBPrivate {
enum XEventQueueOwner event_owner;
XID next_xid;
- /* handle simultaneous threads waiting for events,
- * used in wait_or_poll_for_event
- */
+ /* handle simultaneous threads waiting for responses */
xcondition_t event_notify;
int event_waiter;
+ xcondition_t reply_notify;
} _X11XCBPrivate;
/* xcb_disp.c */
diff --git a/src/xcb_disp.c b/src/xcb_disp.c
index 622afe76..fea1326c 100644
--- a/src/xcb_disp.c
+++ b/src/xcb_disp.c
@@ -98,9 +98,11 @@ int _XConnectXCB(Display *dpy, _Xconst char *display, char **fullnamep, int *scr
dpy->xcb->next_xid = xcb_generate_id(dpy->xcb->connection);
dpy->xcb->event_notify = xcondition_malloc();
- if (!dpy->xcb->event_notify)
+ dpy->xcb->reply_notify = xcondition_malloc();
+ if (!dpy->xcb->event_notify || !dpy->xcb->reply_notify)
return 0;
xcondition_init(dpy->xcb->event_notify);
+ xcondition_init(dpy->xcb->reply_notify);
return !xcb_connection_has_error(c);
}
@@ -115,5 +117,6 @@ void _XFreeX11XCBStructure(Display *dpy)
free(tmp);
}
xcondition_free(dpy->xcb->event_notify);
+ xcondition_free(dpy->xcb->reply_notify);
Xfree(dpy->xcb);
}
diff --git a/src/xcb_io.c b/src/xcb_io.c
index 19dc6a97..dac7622c 100644
--- a/src/xcb_io.c
+++ b/src/xcb_io.c
@@ -122,6 +122,7 @@ static PendingRequest *append_pending_request(Display *dpy, unsigned long sequen
assert(node);
node->next = NULL;
node->sequence = sequence;
+ node->reply_waiter = 0;
if(dpy->xcb->pending_requests_tail)
{
assert(XLIB_SEQUENCE_COMPARE(dpy->xcb->pending_requests_tail->sequence, <, node->sequence));
@@ -166,43 +167,6 @@ static int handle_error(Display *dpy, xError *err, Bool in_XReply)
return 0;
}
-static void call_handlers(Display *dpy, xcb_generic_reply_t *buf)
-{
- _XAsyncHandler *async, *next;
- for(async = dpy->async_handlers; async; async = next)
- {
- next = async->next;
- if(async->handler(dpy, (xReply *) buf, (char *) buf, sizeof(xReply) + (buf->length << 2), async->data))
- return;
- }
-}
-
-static xcb_generic_event_t * wait_or_poll_for_event(Display *dpy, int wait)
-{
- xcb_connection_t *c = dpy->xcb->connection;
- xcb_generic_event_t *event;
- if(wait)
- {
- if(dpy->xcb->event_waiter)
- {
- ConditionWait(dpy, dpy->xcb->event_notify);
- event = xcb_poll_for_event(c);
- }
- else
- {
- dpy->xcb->event_waiter = 1;
- UnlockDisplay(dpy);
- event = xcb_wait_for_event(c);
- InternalLockDisplay(dpy, /* don't skip user locks */ 0);
- dpy->xcb->event_waiter = 0;
- ConditionBroadcast(dpy, dpy->xcb->event_notify);
- }
- }
- else
- event = xcb_poll_for_event(c);
- return event;
-}
-
/* Widen a 32-bit sequence number into a native-word-size (unsigned long)
* sequence number. Treating the comparison as a 1 and shifting it avoids a
* conditional branch, and shifting by 16 twice avoids a compiler warning when
@@ -213,93 +177,114 @@ static void widen(unsigned long *wide, unsigned int narrow)
*wide = new + ((unsigned long) (new < *wide) << 16 << 16);
}
-static void process_responses(Display *dpy, int wait_for_first_event, xcb_generic_error_t **current_error, unsigned long current_request)
-{
- void *reply;
- xcb_generic_event_t *event = dpy->xcb->next_event;
- xcb_generic_error_t *error;
- xcb_connection_t *c = dpy->xcb->connection;
- if(!event && dpy->xcb->event_owner == XlibOwnsEventQueue)
- event = wait_or_poll_for_event(dpy, wait_for_first_event);
+/* Thread-safety rules:
+ *
+ * At most one thread can be reading from XCB's event queue at a time.
+ * If you are not the current event-reading thread and you need to find
+ * out if an event is available, you must wait.
+ *
+ * The same rule applies for reading replies.
+ *
+ * A single thread cannot be both the the event-reading and the
+ * reply-reading thread at the same time.
+ *
+ * We always look at both the current event and the first pending reply
+ * to decide which to process next.
+ *
+ * We always process all responses in sequence-number order, which may
+ * mean waiting for another thread (either the event_waiter or the
+ * reply_waiter) to handle an earlier response before we can process or
+ * return a later one. If so, we wait on the corresponding condition
+ * variable for that thread to process the response and wake us up.
+ */
+static xcb_generic_reply_t *poll_for_event(Display *dpy)
+{
+ /* Make sure the Display's sequence numbers are valid */
require_socket(dpy);
- while(1)
+ /* Precondition: This thread can safely get events from XCB. */
+ assert(dpy->xcb->event_owner == XlibOwnsEventQueue && !dpy->xcb->event_waiter);
+
+ if(!dpy->xcb->next_event)
+ dpy->xcb->next_event = xcb_poll_for_event(dpy->xcb->connection);
+
+ if(dpy->xcb->next_event)
{
PendingRequest *req = dpy->xcb->pending_requests;
+ xcb_generic_event_t *event = dpy->xcb->next_event;
unsigned long event_sequence = dpy->last_request_read;
- if(event)
- widen(&event_sequence, event->full_sequence);
- assert(!(req && current_request && !XLIB_SEQUENCE_COMPARE(req->sequence, <=, current_request)));
- if(event && (!req || XLIB_SEQUENCE_COMPARE(event_sequence, <=, req->sequence)))
+ widen(&event_sequence, event->full_sequence);
+ if(!req || XLIB_SEQUENCE_COMPARE(event_sequence, <, req->sequence)
+ || (event->response_type != X_Error && event_sequence == req->sequence))
{
+ assert(XLIB_SEQUENCE_COMPARE(event_sequence, <=, dpy->request));
dpy->last_request_read = event_sequence;
- if(event->response_type != X_Error)
- {
- /* GenericEvents may be > 32 bytes. In this
- * case, the event struct is trailed by the
- * additional bytes. the xcb_generic_event_t
- * struct uses 4 bytes for internal numbering,
- * so we need to shift the trailing data to be
- * after the first 32 bytes. */
- if (event->response_type == GenericEvent &&
- ((xcb_ge_event_t*)event)->length)
- {
- memmove(&event->full_sequence,
- &event[1],
- ((xcb_ge_event_t*)event)->length * 4);
- }
- _XEnq(dpy, (xEvent *) event);
- wait_for_first_event = 0;
- }
- else if(current_error && event_sequence == current_request)
- {
- /* This can only occur when called from
- * _XReply, which doesn't need a new event. */
- *current_error = (xcb_generic_error_t *) event;
- event = NULL;
- break;
- }
- else
- handle_error(dpy, (xError *) event, current_error != NULL);
- free(event);
- event = wait_or_poll_for_event(dpy, wait_for_first_event);
+ dpy->xcb->next_event = NULL;
+ return (xcb_generic_reply_t *) event;
}
- else if(req && req->sequence == current_request)
+ }
+ return NULL;
+}
+
+static xcb_generic_reply_t *poll_for_response(Display *dpy)
+{
+ void *response;
+ xcb_generic_error_t *error;
+ PendingRequest *req;
+ while(!(response = poll_for_event(dpy)) &&
+ (req = dpy->xcb->pending_requests) &&
+ !req->reply_waiter &&
+ xcb_poll_for_reply(dpy->xcb->connection, req->sequence, &response, &error))
+ {
+ assert(XLIB_SEQUENCE_COMPARE(req->sequence, <=, dpy->request));
+ dpy->last_request_read = req->sequence;
+ if(!response)
+ dequeue_pending_request(dpy, req);
+ if(error)
+ return (xcb_generic_reply_t *) error;
+ }
+ return response;
+}
+
+static void handle_response(Display *dpy, xcb_generic_reply_t *response, Bool in_XReply)
+{
+ _XAsyncHandler *async, *next;
+ switch(response->response_type)
+ {
+ case X_Reply:
+ for(async = dpy->async_handlers; async; async = next)
{
- break;
+ next = async->next;
+ if(async->handler(dpy, (xReply *) response, (char *) response, sizeof(xReply) + (response->length << 2), async->data))
+ break;
}
- else if(req && xcb_poll_for_reply(dpy->xcb->connection, req->sequence, &reply, &error))
+ break;
+
+ case X_Error:
+ handle_error(dpy, (xError *) response, in_XReply);
+ break;
+
+ default: /* event */
+ /* GenericEvents may be > 32 bytes. In this case, the
+ * event struct is trailed by the additional bytes. the
+ * xcb_generic_event_t struct uses 4 bytes for internal
+ * numbering, so we need to shift the trailing data to
+ * be after the first 32 bytes. */
+ if(response->response_type == GenericEvent && ((xcb_ge_event_t *) response)->length)
{
- if(reply || error)
- dpy->last_request_read = req->sequence;
- if(reply)
- {
- call_handlers(dpy, reply);
- free(reply);
- }
- if(error)
- {
- handle_error(dpy, (xError *) error, current_error != NULL);
- free(error);
- }
- if(!reply)
- dequeue_pending_request(dpy, req);
+ xcb_ge_event_t *event = (xcb_ge_event_t *) response;
+ memmove(&event->full_sequence, &event[1], event->length * 4);
}
- else
- break;
+ _XEnq(dpy, (xEvent *) response);
+ break;
}
-
- dpy->xcb->next_event = event;
-
- if(xcb_connection_has_error(c))
- _XIOError(dpy);
-
- assert(XLIB_SEQUENCE_COMPARE(dpy->last_request_read, <=, dpy->request));
+ free(response);
}
int _XEventsQueued(Display *dpy, int mode)
{
+ xcb_generic_reply_t *response;
if(dpy->flags & XlibDisplayIOError)
return 0;
if(dpy->xcb->event_owner != XlibOwnsEventQueue)
@@ -309,7 +294,17 @@ int _XEventsQueued(Display *dpy, int mode)
_XSend(dpy, NULL, 0);
else
check_internal_connections(dpy);
- process_responses(dpy, 0, NULL, 0);
+
+ /* If another thread is blocked waiting for events, then we must
+ * let that thread pick up the next event. Since it blocked, we
+ * can reasonably claim there are no new events right now. */
+ if(!dpy->xcb->event_waiter)
+ {
+ while((response = poll_for_response(dpy)))
+ handle_response(dpy, response, False);
+ if(xcb_connection_has_error(dpy->xcb->connection))
+ _XIOError(dpy);
+ }
return dpy->qlen;
}
@@ -318,15 +313,66 @@ int _XEventsQueued(Display *dpy, int mode)
*/
void _XReadEvents(Display *dpy)
{
+ xcb_generic_reply_t *response;
+ unsigned long serial;
+
if(dpy->flags & XlibDisplayIOError)
return;
_XSend(dpy, NULL, 0);
if(dpy->xcb->event_owner != XlibOwnsEventQueue)
return;
check_internal_connections(dpy);
- do {
- process_responses(dpy, 1, NULL, 0);
- } while (dpy->qlen == 0);
+
+ serial = dpy->next_event_serial_num;
+ while(serial == dpy->next_event_serial_num || dpy->qlen == 0)
+ {
+ if(dpy->xcb->event_waiter)
+ {
+ ConditionWait(dpy, dpy->xcb->event_notify);
+ /* Maybe the other thread got us an event. */
+ continue;
+ }
+
+ if(!dpy->xcb->next_event)
+ {
+ xcb_generic_event_t *event;
+ dpy->xcb->event_waiter = 1;
+ UnlockDisplay(dpy);
+ event = xcb_wait_for_event(dpy->xcb->connection);
+ InternalLockDisplay(dpy, /* don't skip user locks */ 0);
+ dpy->xcb->event_waiter = 0;
+ ConditionBroadcast(dpy, dpy->xcb->event_notify);
+ if(!event)
+ _XIOError(dpy);
+ dpy->xcb->next_event = event;
+ }
+
+ /* We've established most of the conditions for
+ * poll_for_response to return non-NULL. The exceptions
+ * are connection shutdown, and finding that another
+ * thread is waiting for the next reply we'd like to
+ * process. */
+
+ response = poll_for_response(dpy);
+ if(response)
+ handle_response(dpy, response, False);
+ else if(dpy->xcb->pending_requests->reply_waiter)
+ { /* need braces around ConditionWait */
+ ConditionWait(dpy, dpy->xcb->reply_notify);
+ }
+ else
+ _XIOError(dpy);
+ }
+
+ /* The preceding loop established that there is no
+ * event_waiter--unless we just called ConditionWait because of
+ * a reply_waiter, in which case another thread may have become
+ * the event_waiter while we slept unlocked. */
+ if(!dpy->xcb->event_waiter)
+ while((response = poll_for_response(dpy)))
+ handle_response(dpy, response, False);
+ if(xcb_connection_has_error(dpy->xcb->connection))
+ _XIOError(dpy);
}
/*
@@ -467,19 +513,83 @@ Status _XReply(Display *dpy, xReply *rep, int extra, Bool discard)
current = dpy->xcb->pending_requests_tail;
else
current = append_pending_request(dpy, dpy->request);
- /* FIXME: drop the Display lock while waiting?
- * Complicates process_responses. */
- reply = xcb_wait_for_reply(c, current->sequence, &error);
+ /* Don't let any other thread get this reply. */
+ current->reply_waiter = 1;
+
+ while(1)
+ {
+ PendingRequest *req = dpy->xcb->pending_requests;
+ xcb_generic_reply_t *response;
+
+ if(req != current && req->reply_waiter)
+ {
+ ConditionWait(dpy, dpy->xcb->reply_notify);
+ /* Another thread got this reply. */
+ continue;
+ }
+ req->reply_waiter = 1;
+ UnlockDisplay(dpy);
+ response = xcb_wait_for_reply(c, req->sequence, &error);
+ InternalLockDisplay(dpy, /* don't skip user locks */ 0);
+
+ /* We have the response we're looking for. Now, before
+ * letting anyone else process this sequence number, we
+ * need to process any events that should have come
+ * earlier. */
+
+ if(dpy->xcb->event_owner == XlibOwnsEventQueue)
+ {
+ xcb_generic_reply_t *event;
+ /* If some thread is already waiting for events,
+ * it will get the first one. That thread must
+ * process that event before we can continue. */
+ /* FIXME: That event might be after this reply,
+ * and might never even come--or there might be
+ * multiple threads trying to get events. */
+ while(dpy->xcb->event_waiter)
+ { /* need braces around ConditionWait */
+ ConditionWait(dpy, dpy->xcb->event_notify);
+ }
+ while((event = poll_for_event(dpy)))
+ handle_response(dpy, event, True);
+ }
+ req->reply_waiter = 0;
+ ConditionBroadcast(dpy, dpy->xcb->reply_notify);
+ assert(XLIB_SEQUENCE_COMPARE(req->sequence, <=, dpy->request));
+ dpy->last_request_read = req->sequence;
+ if(!response)
+ dequeue_pending_request(dpy, req);
+
+ if(req == current)
+ {
+ reply = (char *) response;
+ break;
+ }
+
+ if(error)
+ handle_response(dpy, (xcb_generic_reply_t *) error, True);
+ else if(response)
+ handle_response(dpy, response, True);
+ }
check_internal_connections(dpy);
- process_responses(dpy, 0, &error, current->sequence);
+
+ if(dpy->xcb->next_event && dpy->xcb->next_event->response_type == X_Error)
+ {
+ xcb_generic_event_t *event = dpy->xcb->next_event;
+ unsigned long event_sequence = dpy->last_request_read;
+ widen(&event_sequence, event->full_sequence);
+ if(event_sequence == current->sequence)
+ {
+ error = (xcb_generic_error_t *) event;
+ dpy->xcb->next_event = NULL;
+ }
+ }
if(error)
{
int ret_code;
- dpy->last_request_read = error->full_sequence;
-
/* Xlib is evil and assumes that even errors will be
* copied into rep. */
memcpy(rep, error, 32);
@@ -522,8 +632,6 @@ Status _XReply(Display *dpy, xReply *rep, int extra, Bool discard)
return 0;
}
- dpy->last_request_read = current->sequence;
-
/* there's no error and we have a reply. */
dpy->xcb->reply_data = reply;
dpy->xcb->reply_consumed = sizeof(xReply) + (extra * 4);