Switch to using the CMSG_* macros for FD passing
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include <assert.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <stdio.h>
37 #include <errno.h>
38
39 #include "xcb.h"
40 #include "xcbext.h"
41 #include "xcbint.h"
42 #if USE_POLL
43 #include <poll.h>
44 #endif
45 #ifndef _WIN32
46 #include <sys/select.h>
47 #include <sys/socket.h>
48 #endif
49
50 #ifdef _WIN32
51 #include "xcb_windefs.h"
52 #endif /* _WIN32 */
53
54 #define XCB_ERROR 0
55 #define XCB_REPLY 1
56 #define XCB_XGE_EVENT 35
57
58 struct event_list {
59     xcb_generic_event_t *event;
60     struct event_list *next;
61 };
62
63 struct xcb_special_event {
64
65     struct xcb_special_event *next;
66
67     /* Match XGE events for the specific extension and event ID (the
68      * first 32 bit word after evtype)
69      */
70     uint8_t     extension;
71     uint32_t    eid;
72     uint32_t    *stamp;
73
74     struct event_list   *events;
75     struct event_list   **events_tail;
76
77     pthread_cond_t special_event_cond;
78 };
79
80 struct reply_list {
81     void *reply;
82     struct reply_list *next;
83 };
84
85 typedef struct pending_reply {
86     uint64_t first_request;
87     uint64_t last_request;
88     enum workarounds workaround;
89     int flags;
90     struct pending_reply *next;
91 } pending_reply;
92
93 typedef struct reader_list {
94     uint64_t request;
95     pthread_cond_t *data;
96     struct reader_list *next;
97 } reader_list;
98
99 static void remove_finished_readers(reader_list **prev_reader, uint64_t completed)
100 {
101     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed))
102     {
103         /* If you don't have what you're looking for now, you never
104          * will. Wake up and leave me alone. */
105         pthread_cond_signal((*prev_reader)->data);
106         *prev_reader = (*prev_reader)->next;
107     }
108 }
109
110 #if HAVE_SENDMSG
111 static int read_fds(xcb_connection_t *c, int *fds, int nfd)
112 {
113     int *ifds = &c->in.in_fd.fd[c->in.in_fd.ifd];
114     int infd = c->in.in_fd.nfd - c->in.in_fd.ifd;
115
116     if (nfd > infd)
117         return 0;
118     memcpy(fds, ifds, nfd * sizeof (int));
119     c->in.in_fd.ifd += nfd;
120     return 1;
121 }
122 #endif
123
124 typedef struct xcb_ge_special_event_t {
125     uint8_t  response_type; /**<  */
126     uint8_t  extension; /**<  */
127     uint16_t sequence; /**<  */
128     uint32_t length; /**<  */
129     uint16_t evtype; /**<  */
130     uint8_t  pad0[2]; /**< */
131     uint32_t eid; /**< */
132     uint8_t  pad1[16]; /**<  */
133 } xcb_ge_special_event_t;
134
135 static int event_special(xcb_connection_t *c,
136                          struct event_list *event)
137 {
138     struct xcb_special_event *special_event;
139     struct xcb_ge_special_event_t *ges = (void *) event->event;
140
141     /* Special events are always XGE events */
142     if ((ges->response_type & 0x7f) != XCB_XGE_EVENT)
143         return 0;
144
145     for (special_event = c->in.special_events;
146          special_event;
147          special_event = special_event->next)
148     {
149         if (ges->extension == special_event->extension &&
150             ges->eid == special_event->eid)
151         {
152             *special_event->events_tail = event;
153             special_event->events_tail = &event->next;
154             if (special_event->stamp)
155                 ++(*special_event->stamp);
156             pthread_cond_signal(&special_event->special_event_cond);
157             return 1;
158         }
159     }
160
161     return 0;
162 }
163
164 static int read_packet(xcb_connection_t *c)
165 {
166     xcb_generic_reply_t genrep;
167     uint64_t length = 32;
168     uint64_t eventlength = 0; /* length after first 32 bytes for GenericEvents */
169     int nfd = 0;         /* Number of file descriptors attached to the reply */
170     uint64_t bufsize;
171     void *buf;
172     pending_reply *pend = 0;
173     struct event_list *event;
174
175     /* Wait for there to be enough data for us to read a whole packet */
176     if(c->in.queue_len < length)
177         return 0;
178
179     /* Get the response type, length, and sequence number. */
180     memcpy(&genrep, c->in.queue, sizeof(genrep));
181
182     /* Compute 32-bit sequence number of this packet. */
183     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
184     {
185         uint64_t lastread = c->in.request_read;
186         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
187         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
188             c->in.request_read += 0x10000;
189         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
190             c->in.request_expected = c->in.request_read;
191
192         if(c->in.request_read != lastread)
193         {
194             if(c->in.current_reply)
195             {
196                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
197                 c->in.current_reply = 0;
198                 c->in.current_reply_tail = &c->in.current_reply;
199             }
200             c->in.request_completed = c->in.request_read - 1;
201         }
202
203         while(c->in.pending_replies && 
204               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
205               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
206         {
207             pending_reply *oldpend = c->in.pending_replies;
208             c->in.pending_replies = oldpend->next;
209             if(!oldpend->next)
210                 c->in.pending_replies_tail = &c->in.pending_replies;
211             free(oldpend);
212         }
213
214         if(genrep.response_type == XCB_ERROR)
215             c->in.request_completed = c->in.request_read;
216
217         remove_finished_readers(&c->in.readers, c->in.request_completed);
218     }
219
220     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
221     {
222         pend = c->in.pending_replies;
223         if(pend &&
224            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
225              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
226               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
227             pend = 0;
228     }
229
230     /* For reply packets, check that the entire packet is available. */
231     if(genrep.response_type == XCB_REPLY)
232     {
233         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
234         {
235             uint32_t *p = (uint32_t *) c->in.queue;
236             genrep.length = p[2] * p[3] * 2;
237         }
238         length += genrep.length * 4;
239
240         /* XXX a bit of a hack -- we "know" that all FD replys place
241          * the number of fds in the pad0 byte */
242         if (pend && pend->flags & XCB_REQUEST_REPLY_FDS)
243             nfd = genrep.pad0;
244     }
245
246     /* XGE events may have sizes > 32 */
247     if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT)
248         eventlength = genrep.length * 4;
249
250     bufsize = length + eventlength + nfd * sizeof(int)  +
251         (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t));
252     if (bufsize < INT32_MAX)
253         buf = malloc((size_t) bufsize);
254     else
255         buf = NULL;
256     if(!buf)
257     {
258         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
259         return 0;
260     }
261
262     if(_xcb_in_read_block(c, buf, length) <= 0)
263     {
264         free(buf);
265         return 0;
266     }
267
268     /* pull in XGE event data if available, append after event struct */
269     if (eventlength)
270     {
271         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
272         {
273             free(buf);
274             return 0;
275         }
276     }
277
278 #if HAVE_SENDMSG
279     if (nfd)
280     {
281         if (!read_fds(c, (int *) &((char *) buf)[length], nfd))
282         {
283             free(buf);
284             return 0;
285         }
286     }
287 #endif
288
289     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
290     {
291         free(buf);
292         return 1;
293     }
294
295     if(genrep.response_type != XCB_REPLY)
296         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
297
298     /* reply, or checked error */
299     if( genrep.response_type == XCB_REPLY ||
300        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
301     {
302         struct reply_list *cur = malloc(sizeof(struct reply_list));
303         if(!cur)
304         {
305             _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
306             free(buf);
307             return 0;
308         }
309         cur->reply = buf;
310         cur->next = 0;
311         *c->in.current_reply_tail = cur;
312         c->in.current_reply_tail = &cur->next;
313         if(c->in.readers && c->in.readers->request == c->in.request_read)
314             pthread_cond_signal(c->in.readers->data);
315         return 1;
316     }
317
318     /* event, or unchecked error */
319     event = malloc(sizeof(struct event_list));
320     if(!event)
321     {
322         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
323         free(buf);
324         return 0;
325     }
326     event->event = buf;
327     event->next = 0;
328
329     if (!event_special(c, event)) {
330         *c->in.events_tail = event;
331         c->in.events_tail = &event->next;
332         pthread_cond_signal(&c->in.event_cond);
333     }
334     return 1; /* I have something for you... */
335 }
336
337 static xcb_generic_event_t *get_event(xcb_connection_t *c)
338 {
339     struct event_list *cur = c->in.events;
340     xcb_generic_event_t *ret;
341     if(!c->in.events)
342         return 0;
343     ret = cur->event;
344     c->in.events = cur->next;
345     if(!cur->next)
346         c->in.events_tail = &c->in.events;
347     free(cur);
348     return ret;
349 }
350
351 static void free_reply_list(struct reply_list *head)
352 {
353     while(head)
354     {
355         struct reply_list *cur = head;
356         head = cur->next;
357         free(cur->reply);
358         free(cur);
359     }
360 }
361
362 static int read_block(const int fd, void *buf, const ssize_t len)
363 {
364     int done = 0;
365     while(done < len)
366     {
367         int ret = recv(fd, ((char *) buf) + done, len - done, 0);
368         if(ret > 0)
369             done += ret;
370 #ifndef _WIN32
371         if(ret < 0 && errno == EAGAIN)
372 #else
373         if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
374 #endif /* !_Win32 */
375         {
376 #if USE_POLL
377             struct pollfd pfd;
378             pfd.fd = fd;
379             pfd.events = POLLIN;
380             pfd.revents = 0;
381             do {
382                 ret = poll(&pfd, 1, -1);
383             } while (ret == -1 && errno == EINTR);
384 #else
385             fd_set fds;
386             FD_ZERO(&fds);
387             FD_SET(fd, &fds);
388
389             /* Initializing errno here makes sure that for Win32 this loop will execute only once */
390             errno = 0;  
391             do {
392                 ret = select(fd + 1, &fds, 0, 0, 0);
393             } while (ret == -1 && errno == EINTR);
394 #endif /* USE_POLL */
395         }
396         if(ret <= 0)
397             return ret;
398     }
399     return len;
400 }
401
402 static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
403 {
404     struct reply_list *head;
405
406     /* If an error occurred when issuing the request, fail immediately. */
407     if(!request)
408         head = 0;
409     /* We've read requests past the one we want, so if it has replies we have
410      * them all and they're in the replies map. */
411     else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
412     {
413         head = _xcb_map_remove(c->in.replies, request);
414         if(head && head->next)
415             _xcb_map_put(c->in.replies, request, head->next);
416     }
417     /* We're currently processing the responses to the request we want, and we
418      * have a reply ready to return. So just return it without blocking. */
419     else if(request == c->in.request_read && c->in.current_reply)
420     {
421         head = c->in.current_reply;
422         c->in.current_reply = head->next;
423         if(!head->next)
424             c->in.current_reply_tail = &c->in.current_reply;
425     }
426     /* We know this request can't have any more replies, and we've already
427      * established it doesn't have a reply now. Don't bother blocking. */
428     else if(request == c->in.request_completed)
429         head = 0;
430     /* We may have more replies on the way for this request: block until we're
431      * sure. */
432     else
433         return 0;
434
435     if(error)
436         *error = 0;
437     *reply = 0;
438
439     if(head)
440     {
441         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
442         {
443             if(error)
444                 *error = head->reply;
445             else
446                 free(head->reply);
447         }
448         else
449             *reply = head->reply;
450
451         free(head);
452     }
453
454     return 1;
455 }
456
457 static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond)
458 {
459     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request))
460         prev_reader = &(*prev_reader)->next;
461     reader->request = request;
462     reader->data = cond;
463     reader->next = *prev_reader;
464     *prev_reader = reader;
465 }
466
467 static void remove_reader(reader_list **prev_reader, reader_list *reader)
468 {
469     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request))
470         if(*prev_reader == reader)
471         {
472             *prev_reader = (*prev_reader)->next;
473             break;
474         }
475 }
476
477 static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
478 {
479     void *ret = 0;
480
481     /* If this request has not been written yet, write it. */
482     if(c->out.return_socket || _xcb_out_flush_to(c, request))
483     {
484         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
485         reader_list reader;
486
487         insert_reader(&c->in.readers, &reader, request, &cond);
488
489         while(!poll_for_reply(c, request, &ret, e))
490             if(!_xcb_conn_wait(c, &cond, 0, 0))
491                 break;
492
493         remove_reader(&c->in.readers, &reader);
494         pthread_cond_destroy(&cond);
495     }
496
497     _xcb_in_wake_up_next_reader(c);
498     return ret;
499 }
500
501 static uint64_t widen(xcb_connection_t *c, unsigned int request)
502 {
503     uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
504     if(widened_request > c->out.request)
505         widened_request -= UINT64_C(1) << 32;
506     return widened_request;
507 }
508
509 /* Public interface */
510
511 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
512 {
513     void *ret;
514     if(e)
515         *e = 0;
516     if(c->has_error)
517         return 0;
518
519     pthread_mutex_lock(&c->iolock);
520     ret = wait_for_reply(c, widen(c, request), e);
521     pthread_mutex_unlock(&c->iolock);
522     return ret;
523 }
524
525 int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t reply_size)
526 {
527     return (int *) (&((char *) reply)[reply_size]);
528 }
529
530 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
531 {
532     pending_reply *pend;
533     pend = malloc(sizeof(*pend));
534     if(!pend)
535     {
536         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
537         return;
538     }
539
540     pend->first_request = seq;
541     pend->last_request = seq;
542     pend->workaround = 0;
543     pend->flags = XCB_REQUEST_DISCARD_REPLY;
544     pend->next = *prev_next;
545     *prev_next = pend;
546
547     if(!pend->next)
548         c->in.pending_replies_tail = &pend->next;
549 }
550
551 static void discard_reply(xcb_connection_t *c, uint64_t request)
552 {
553     void *reply;
554     pending_reply **prev_pend;
555
556     /* Free any replies or errors that we've already read. Stop if
557      * xcb_wait_for_reply would block or we've run out of replies. */
558     while(poll_for_reply(c, request, &reply, 0) && reply)
559         free(reply);
560
561     /* If we've proven there are no more responses coming, we're done. */
562     if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
563         return;
564
565     /* Walk the list of pending requests. Mark the first match for deletion. */
566     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
567     {
568         if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
569             break;
570
571         if((*prev_pend)->first_request == request)
572         {
573             /* Pending reply found. Mark for discard: */
574             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
575             return;
576         }
577     }
578
579     /* Pending reply not found (likely due to _unchecked request). Create one: */
580     insert_pending_discard(c, prev_pend, request);
581 }
582
583 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
584 {
585     if(c->has_error)
586         return;
587
588     /* If an error occurred when issuing the request, fail immediately. */
589     if(!sequence)
590         return;
591
592     pthread_mutex_lock(&c->iolock);
593     discard_reply(c, widen(c, sequence));
594     pthread_mutex_unlock(&c->iolock);
595 }
596
597 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
598 {
599     int ret;
600     if(c->has_error)
601     {
602         *reply = 0;
603         if(error)
604             *error = 0;
605         return 1; /* would not block */
606     }
607     assert(reply != 0);
608     pthread_mutex_lock(&c->iolock);
609     ret = poll_for_reply(c, widen(c, request), reply, error);
610     pthread_mutex_unlock(&c->iolock);
611     return ret;
612 }
613
614 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
615 {
616     xcb_generic_event_t *ret;
617     if(c->has_error)
618         return 0;
619     pthread_mutex_lock(&c->iolock);
620     /* get_event returns 0 on empty list. */
621     while(!(ret = get_event(c)))
622         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
623             break;
624
625     _xcb_in_wake_up_next_reader(c);
626     pthread_mutex_unlock(&c->iolock);
627     return ret;
628 }
629
630 static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued)
631 {
632     xcb_generic_event_t *ret = 0;
633     if(!c->has_error)
634     {
635         pthread_mutex_lock(&c->iolock);
636         /* FIXME: follow X meets Z architecture changes. */
637         ret = get_event(c);
638         if(!ret && !queued && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
639             ret = get_event(c);
640         pthread_mutex_unlock(&c->iolock);
641     }
642     return ret;
643 }
644
645 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
646 {
647     return poll_for_next_event(c, 0);
648 }
649
650 xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c)
651 {
652     return poll_for_next_event(c, 1);
653 }
654
655 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
656 {
657     uint64_t request;
658     xcb_generic_error_t *ret = 0;
659     void *reply;
660     if(c->has_error)
661         return 0;
662     pthread_mutex_lock(&c->iolock);
663     request = widen(c, cookie.sequence);
664     if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected)
665        && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
666     {
667         _xcb_out_send_sync(c);
668         _xcb_out_flush_to(c, c->out.request);
669     }
670     reply = wait_for_reply(c, request, &ret);
671     assert(!reply);
672     pthread_mutex_unlock(&c->iolock);
673     return ret;
674 }
675
676 static xcb_generic_event_t *get_special_event(xcb_connection_t *c,
677                                               xcb_special_event_t *se)
678 {
679     xcb_generic_event_t *event = NULL;
680     struct event_list *events;
681
682     if ((events = se->events) != NULL) {
683         event = events->event;
684         if (!(se->events = events->next))
685             se->events_tail = &se->events;
686         free (events);
687     }
688     return event;
689 }
690
691 xcb_generic_event_t *xcb_poll_for_special_event(xcb_connection_t *c,
692                                                 xcb_special_event_t *se)
693 {
694     xcb_generic_event_t *event;
695
696     if(c->has_error)
697         return 0;
698     pthread_mutex_lock(&c->iolock);
699     event = get_special_event(c, se);
700     pthread_mutex_unlock(&c->iolock);
701     return event;
702 }
703
704 xcb_generic_event_t *xcb_wait_for_special_event(xcb_connection_t *c,
705                                                 xcb_special_event_t *se)
706 {
707     xcb_generic_event_t *event;
708
709     if(c->has_error)
710         return 0;
711     pthread_mutex_lock(&c->iolock);
712
713     /* get_special_event returns 0 on empty list. */
714     while(!(event = get_special_event(c, se)))
715         if(!_xcb_conn_wait(c, &se->special_event_cond, 0, 0))
716             break;
717
718     pthread_mutex_unlock(&c->iolock);
719     return event;
720 }
721
722 xcb_special_event_t *
723 xcb_register_for_special_xge(xcb_connection_t *c,
724                              xcb_extension_t *ext,
725                              uint32_t eid,
726                              uint32_t *stamp)
727 {
728     xcb_special_event_t *se;
729     const xcb_query_extension_reply_t   *ext_reply;
730
731     if(c->has_error)
732         return NULL;
733     ext_reply = xcb_get_extension_data(c, ext);
734     if (!ext_reply)
735         return NULL;
736     pthread_mutex_lock(&c->iolock);
737     for (se = c->in.special_events; se; se = se->next) {
738         if (se->extension == ext_reply->major_opcode &&
739             se->eid == eid) {
740             pthread_mutex_unlock(&c->iolock);
741             return NULL;
742         }
743     }
744     se = calloc(1, sizeof(xcb_special_event_t));
745     if (!se) {
746         pthread_mutex_unlock(&c->iolock);
747         return NULL;
748     }
749             
750     se->extension = ext_reply->major_opcode;
751     se->eid = eid;
752
753     se->events = NULL;
754     se->events_tail = &se->events;
755     se->stamp = stamp;
756
757     pthread_cond_init(&se->special_event_cond, 0);
758
759     se->next = c->in.special_events;
760     c->in.special_events = se;
761     pthread_mutex_unlock(&c->iolock);
762     return se;
763 }
764
765 void
766 xcb_unregister_for_special_event(xcb_connection_t *c,
767                                  xcb_special_event_t *se)
768 {
769     xcb_special_event_t *s, **prev;
770     struct event_list   *events, *next;
771
772     if (!se)
773         return;
774
775     if (c->has_error)
776         return;
777
778     pthread_mutex_lock(&c->iolock);
779
780     for (prev = &c->in.special_events; (s = *prev) != NULL; prev = &(s->next)) {
781         if (s == se) {
782             *prev = se->next;
783             for (events = se->events; events; events = next) {
784                 next = events->next;
785                 free (events->event);
786                 free (events);
787             }
788             pthread_cond_destroy(&se->special_event_cond);
789             free (se);
790             break;
791         }
792     }
793     pthread_mutex_unlock(&c->iolock);
794 }
795
796 /* Private interface */
797
798 int _xcb_in_init(_xcb_in *in)
799 {
800     if(pthread_cond_init(&in->event_cond, 0))
801         return 0;
802     in->reading = 0;
803
804     in->queue_len = 0;
805
806     in->request_read = 0;
807     in->request_completed = 0;
808
809     in->replies = _xcb_map_new();
810     if(!in->replies)
811         return 0;
812
813     in->current_reply_tail = &in->current_reply;
814     in->events_tail = &in->events;
815     in->pending_replies_tail = &in->pending_replies;
816
817     return 1;
818 }
819
820 void _xcb_in_destroy(_xcb_in *in)
821 {
822     pthread_cond_destroy(&in->event_cond);
823     free_reply_list(in->current_reply);
824     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
825     while(in->events)
826     {
827         struct event_list *e = in->events;
828         in->events = e->next;
829         free(e->event);
830         free(e);
831     }
832     while(in->pending_replies)
833     {
834         pending_reply *pend = in->pending_replies;
835         in->pending_replies = pend->next;
836         free(pend);
837     }
838 }
839
840 void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
841 {
842     int pthreadret;
843     if(c->in.readers)
844         pthreadret = pthread_cond_signal(c->in.readers->data);
845     else
846         pthreadret = pthread_cond_signal(&c->in.event_cond);
847     assert(pthreadret == 0);
848 }
849
850 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
851 {
852     pending_reply *pend = malloc(sizeof(pending_reply));
853     assert(workaround != WORKAROUND_NONE || flags != 0);
854     if(!pend)
855     {
856         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
857         return 0;
858     }
859     pend->first_request = pend->last_request = request;
860     pend->workaround = workaround;
861     pend->flags = flags;
862     pend->next = 0;
863     *c->in.pending_replies_tail = pend;
864     c->in.pending_replies_tail = &pend->next;
865     return 1;
866 }
867
868 void _xcb_in_replies_done(xcb_connection_t *c)
869 {
870     struct pending_reply *pend;
871     if (c->in.pending_replies_tail != &c->in.pending_replies)
872     {
873         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
874         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
875         {
876             pend->last_request = c->out.request;
877             pend->workaround = WORKAROUND_NONE;
878         }
879     }
880 }
881
882 int _xcb_in_read(xcb_connection_t *c)
883 {
884     int n;
885
886 #if HAVE_SENDMSG
887     struct iovec    iov = {
888         .iov_base = c->in.queue + c->in.queue_len,
889         .iov_len = sizeof(c->in.queue) - c->in.queue_len,
890     };
891     char cmsgbuf[CMSG_SPACE(sizeof(int) * XCB_MAX_PASS_FD)];
892     struct msghdr msg = {
893         .msg_name = NULL,
894         .msg_namelen = 0,
895         .msg_iov = &iov,
896         .msg_iovlen = 1,
897         .msg_control = cmsgbuf,
898         .msg_controllen = CMSG_SPACE(sizeof(int) * (XCB_MAX_PASS_FD - c->in.in_fd.nfd)),
899     };
900     n = recvmsg(c->fd, &msg, 0);
901
902     /* Check for truncation errors. Only MSG_CTRUNC is
903      * probably possible here, which would indicate that
904      * the sender tried to transmit more than XCB_MAX_PASS_FD
905      * file descriptors.
906      */
907     if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) {
908         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
909         return 0;
910     }
911 #else
912     n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len, 0);
913 #endif
914     if(n > 0) {
915 #if HAVE_SENDMSG
916         struct cmsghdr *hdr;
917
918         for (hdr = CMSG_FIRSTHDR(&msg); hdr; hdr = CMSG_NXTHDR(&msg, hdr)) {
919             if (hdr->cmsg_level == SOL_SOCKET && hdr->cmsg_type == SCM_RIGHTS) {
920                 int nfd = (hdr->cmsg_len - CMSG_LEN(0)) / sizeof (int);
921                 memcpy(&c->in.in_fd.fd[c->in.in_fd.nfd], CMSG_DATA(hdr), nfd * sizeof (int));
922                 c->in.in_fd.nfd += nfd;
923             }
924         }
925 #endif
926         c->in.queue_len += n;
927     }
928     while(read_packet(c))
929         /* empty */;
930 #if HAVE_SENDMSG
931     if (c->in.in_fd.nfd) {
932         c->in.in_fd.nfd -= c->in.in_fd.ifd;
933         memmove(&c->in.in_fd.fd[0],
934                 &c->in.in_fd.fd[c->in.in_fd.ifd],
935                 c->in.in_fd.nfd * sizeof (int));
936         c->in.in_fd.ifd = 0;
937
938         /* If we have any left-over file descriptors after emptying
939          * the input buffer, then the server sent some that we weren't
940          * expecting.  Close them and mark the connection as broken;
941          */
942         if (c->in.queue_len == 0 && c->in.in_fd.nfd != 0) {
943             int i;
944             for (i = 0; i < c->in.in_fd.nfd; i++)
945                 close(c->in.in_fd.fd[i]);
946             _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
947             return 0;
948         }
949     }
950 #endif
951 #ifndef _WIN32
952     if((n > 0) || (n < 0 && errno == EAGAIN))
953 #else
954     if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
955 #endif /* !_WIN32 */
956         return 1;
957     _xcb_conn_shutdown(c, XCB_CONN_ERROR);
958     return 0;
959 }
960
961 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
962 {
963     int done = c->in.queue_len;
964     if(len < done)
965         done = len;
966
967     memcpy(buf, c->in.queue, done);
968     c->in.queue_len -= done;
969     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
970
971     if(len > done)
972     {
973         int ret = read_block(c->fd, (char *) buf + done, len - done);
974         if(ret <= 0)
975         {
976             _xcb_conn_shutdown(c, XCB_CONN_ERROR);
977             return ret;
978         }
979     }
980
981     return len;
982 }