1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
26 /* Stuff that reads stuff from the server. */
41 #include <sys/select.h>
45 #include "xcb_windefs.h"
50 #define XCB_XGE_EVENT 35
53 xcb_generic_event_t *event;
54 struct event_list *next;
59 struct reply_list *next;
62 typedef struct pending_reply {
63 uint64_t first_request;
64 uint64_t last_request;
65 enum workarounds workaround;
67 struct pending_reply *next;
70 typedef struct reader_list {
73 struct reader_list *next;
76 static void wake_up_next_reader(xcb_connection_t *c)
80 pthreadret = pthread_cond_signal(c->in.readers->data);
82 pthreadret = pthread_cond_signal(&c->in.event_cond);
83 assert(pthreadret == 0);
86 static int read_packet(xcb_connection_t *c)
88 xcb_generic_reply_t genrep;
90 int eventlength = 0; /* length after first 32 bytes for GenericEvents */
92 pending_reply *pend = 0;
93 struct event_list *event;
95 /* Wait for there to be enough data for us to read a whole packet */
96 if(c->in.queue_len < length)
99 /* Get the response type, length, and sequence number. */
100 memcpy(&genrep, c->in.queue, sizeof(genrep));
102 /* Compute 32-bit sequence number of this packet. */
103 if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
105 uint64_t lastread = c->in.request_read;
106 c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
107 if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
108 c->in.request_read += 0x10000;
109 if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
110 c->in.request_expected = c->in.request_read;
112 if(c->in.request_read != lastread)
114 if(c->in.current_reply)
116 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
117 c->in.current_reply = 0;
118 c->in.current_reply_tail = &c->in.current_reply;
120 c->in.request_completed = c->in.request_read - 1;
123 while(c->in.pending_replies &&
124 c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
125 XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
127 pending_reply *oldpend = c->in.pending_replies;
128 c->in.pending_replies = oldpend->next;
130 c->in.pending_replies_tail = &c->in.pending_replies;
134 if(genrep.response_type == XCB_ERROR)
135 c->in.request_completed = c->in.request_read;
138 if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
140 pend = c->in.pending_replies;
142 !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
143 (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
144 XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
148 /* For reply packets, check that the entire packet is available. */
149 if(genrep.response_type == XCB_REPLY)
151 if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
153 uint32_t *p = (uint32_t *) c->in.queue;
154 genrep.length = p[2] * p[3] * 2;
156 length += genrep.length * 4;
159 /* XGE events may have sizes > 32 */
160 if (genrep.response_type == XCB_XGE_EVENT)
162 eventlength = ((xcb_ge_event_t*)&genrep)->length * 4;
165 buf = malloc(length + eventlength +
166 (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
169 _xcb_conn_shutdown(c);
173 if(_xcb_in_read_block(c, buf, length) <= 0)
179 /* pull in XGE event data if available, append after event struct */
182 if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
189 if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
195 if(genrep.response_type != XCB_REPLY)
196 ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
198 /* reply, or checked error */
199 if( genrep.response_type == XCB_REPLY ||
200 (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
203 struct reply_list *cur = malloc(sizeof(struct reply_list));
206 _xcb_conn_shutdown(c);
212 *c->in.current_reply_tail = cur;
213 c->in.current_reply_tail = &cur->next;
214 for(reader = c->in.readers;
216 XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
217 reader = reader->next)
219 if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
221 pthread_cond_signal(reader->data);
228 /* event, or unchecked error */
229 event = malloc(sizeof(struct event_list));
232 _xcb_conn_shutdown(c);
238 *c->in.events_tail = event;
239 c->in.events_tail = &event->next;
240 pthread_cond_signal(&c->in.event_cond);
241 return 1; /* I have something for you... */
244 static xcb_generic_event_t *get_event(xcb_connection_t *c)
246 struct event_list *cur = c->in.events;
247 xcb_generic_event_t *ret;
251 c->in.events = cur->next;
253 c->in.events_tail = &c->in.events;
258 static void free_reply_list(struct reply_list *head)
262 struct reply_list *cur = head;
269 static int read_block(const int fd, void *buf, const ssize_t len)
275 int ret = read(fd, ((char *) buf) + done, len - done);
277 int ret = recv(fd, ((char *) buf) + done, len - done,0);
283 if(ret < 0 && errno == EAGAIN)
285 if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
294 ret = poll(&pfd, 1, -1);
295 } while (ret == -1 && errno == EINTR);
302 ret = select(fd + 1, &fds, 0, 0, 0);
303 } while (ret == -1 && errno == EINTR);
305 /* the do while loop used for the non-windows version isn't required*/
306 /* for windows since there are no signals in Windows hence no EINTR*/
307 ret = select(fd + 1, &fds, 0, 0, 0);
309 #endif /* USE_POLL */
317 static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
319 struct reply_list *head;
321 /* If an error occurred when issuing the request, fail immediately. */
324 /* We've read requests past the one we want, so if it has replies we have
325 * them all and they're in the replies map. */
326 else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
328 head = _xcb_map_remove(c->in.replies, request);
329 if(head && head->next)
330 _xcb_map_put(c->in.replies, request, head->next);
332 /* We're currently processing the responses to the request we want, and we
333 * have a reply ready to return. So just return it without blocking. */
334 else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
336 head = c->in.current_reply;
337 c->in.current_reply = head->next;
339 c->in.current_reply_tail = &c->in.current_reply;
341 /* We know this request can't have any more replies, and we've already
342 * established it doesn't have a reply now. Don't bother blocking. */
343 else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
345 /* We may have more replies on the way for this request: block until we're
356 if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
359 *error = head->reply;
364 *reply = head->reply;
372 /* Public interface */
374 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
376 uint64_t widened_request;
383 pthread_mutex_lock(&c->iolock);
385 widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
386 if(widened_request > c->out.request)
387 widened_request -= UINT64_C(1) << 32;
389 /* If this request has not been written yet, write it. */
390 if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
392 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
394 reader_list **prev_reader;
396 for(prev_reader = &c->in.readers;
398 XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
399 prev_reader = &(*prev_reader)->next)
403 reader.request = request;
405 reader.next = *prev_reader;
406 *prev_reader = &reader;
408 while(!poll_for_reply(c, request, &ret, e))
409 if(!_xcb_conn_wait(c, &cond, 0, 0))
412 for(prev_reader = &c->in.readers;
414 XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
415 prev_reader = &(*prev_reader)->next)
417 if(*prev_reader == &reader)
419 *prev_reader = (*prev_reader)->next;
423 pthread_cond_destroy(&cond);
426 wake_up_next_reader(c);
427 pthread_mutex_unlock(&c->iolock);
431 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
434 pend = malloc(sizeof(*pend));
437 _xcb_conn_shutdown(c);
441 pend->first_request = seq;
442 pend->last_request = seq;
443 pend->workaround = 0;
444 pend->flags = XCB_REQUEST_DISCARD_REPLY;
445 pend->next = *prev_next;
449 c->in.pending_replies_tail = &pend->next;
452 static void discard_reply(xcb_connection_t *c, unsigned int request)
454 pending_reply *pend = 0;
455 pending_reply **prev_pend;
456 uint64_t widened_request;
458 /* We've read requests past the one we want, so if it has replies we have
459 * them all and they're in the replies map. */
460 if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
462 struct reply_list *head;
463 head = _xcb_map_remove(c->in.replies, request);
466 struct reply_list *next = head->next;
474 /* We're currently processing the responses to the request we want, and we
475 * have a reply ready to return. Free it, and mark the pend to free any further
477 if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
479 struct reply_list *head;
480 head = c->in.current_reply;
481 c->in.current_reply = NULL;
482 c->in.current_reply_tail = &c->in.current_reply;
485 struct reply_list *next = head->next;
491 pend = c->in.pending_replies;
493 !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
494 (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
495 XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
498 pend->flags |= XCB_REQUEST_DISCARD_REPLY;
500 insert_pending_discard(c, &c->in.pending_replies, c->in.request_read);
505 /* Walk the list of pending requests. Mark the first match for deletion. */
506 for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
508 if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, >, request))
511 if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, ==, request))
513 /* Pending reply found. Mark for discard: */
514 (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
519 /* Pending reply not found (likely due to _unchecked request). Create one: */
520 widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
521 if(widened_request > c->out.request)
522 widened_request -= UINT64_C(1) << 32;
524 insert_pending_discard(c, prev_pend, widened_request);
527 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
532 /* If an error occurred when issuing the request, fail immediately. */
536 pthread_mutex_lock(&c->iolock);
537 discard_reply(c, sequence);
538 pthread_mutex_unlock(&c->iolock);
541 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
549 return 1; /* would not block */
552 pthread_mutex_lock(&c->iolock);
553 ret = poll_for_reply(c, request, reply, error);
554 pthread_mutex_unlock(&c->iolock);
558 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
560 xcb_generic_event_t *ret;
563 pthread_mutex_lock(&c->iolock);
564 /* get_event returns 0 on empty list. */
565 while(!(ret = get_event(c)))
566 if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
569 wake_up_next_reader(c);
570 pthread_mutex_unlock(&c->iolock);
574 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
576 xcb_generic_event_t *ret = 0;
579 pthread_mutex_lock(&c->iolock);
580 /* FIXME: follow X meets Z architecture changes. */
582 if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
584 pthread_mutex_unlock(&c->iolock);
589 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
591 /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
592 * that would require factoring the locking out of xcb_get_input_focus,
593 * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
594 xcb_generic_error_t *ret;
598 if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_expected)
599 && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
601 free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
604 reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
609 /* Private interface */
611 int _xcb_in_init(_xcb_in *in)
613 if(pthread_cond_init(&in->event_cond, 0))
619 in->request_read = 0;
620 in->request_completed = 0;
622 in->replies = _xcb_map_new();
626 in->current_reply_tail = &in->current_reply;
627 in->events_tail = &in->events;
628 in->pending_replies_tail = &in->pending_replies;
633 void _xcb_in_destroy(_xcb_in *in)
635 pthread_cond_destroy(&in->event_cond);
636 free_reply_list(in->current_reply);
637 _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
640 struct event_list *e = in->events;
641 in->events = e->next;
645 while(in->pending_replies)
647 pending_reply *pend = in->pending_replies;
648 in->pending_replies = pend->next;
653 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
655 pending_reply *pend = malloc(sizeof(pending_reply));
656 assert(workaround != WORKAROUND_NONE || flags != 0);
659 _xcb_conn_shutdown(c);
662 pend->first_request = pend->last_request = request;
663 pend->workaround = workaround;
666 *c->in.pending_replies_tail = pend;
667 c->in.pending_replies_tail = &pend->next;
671 void _xcb_in_replies_done(xcb_connection_t *c)
673 struct pending_reply *pend;
674 if (c->in.pending_replies_tail != &c->in.pending_replies)
676 pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
677 if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
679 pend->last_request = c->out.request;
680 pend->workaround = WORKAROUND_NONE;
685 int _xcb_in_read(xcb_connection_t *c)
688 int n = read(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len);
690 int n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len,0);
693 c->in.queue_len += n;
694 while(read_packet(c))
697 if((n > 0) || (n < 0 && errno == EAGAIN))
699 if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
702 _xcb_conn_shutdown(c);
706 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
708 int done = c->in.queue_len;
712 memcpy(buf, c->in.queue, done);
713 c->in.queue_len -= done;
714 memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
718 int ret = read_block(c->fd, (char *) buf + done, len - done);
721 _xcb_conn_shutdown(c);