generator: support listelement-ref
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include <assert.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <stdio.h>
37 #include <errno.h>
38
39 #if USE_POLL
40 #include <poll.h>
41 #endif
42 #ifndef _WIN32
43 #include <sys/select.h>
44 #include <sys/socket.h>
45 #endif
46
47 #ifdef _WIN32
48 #include "xcb_windefs.h"
49 #endif /* _WIN32 */
50
51 #include "xcb.h"
52 #include "xcbext.h"
53 #include "xcbint.h"
54
55 #define XCB_ERROR 0
56 #define XCB_REPLY 1
57 #define XCB_XGE_EVENT 35
58
59 struct event_list {
60     xcb_generic_event_t *event;
61     struct event_list *next;
62 };
63
64 struct xcb_special_event {
65
66     struct xcb_special_event *next;
67
68     /* Match XGE events for the specific extension and event ID (the
69      * first 32 bit word after evtype)
70      */
71     uint8_t     extension;
72     uint32_t    eid;
73     uint32_t    *stamp;
74
75     struct event_list   *events;
76     struct event_list   **events_tail;
77
78     pthread_cond_t special_event_cond;
79 };
80
81 struct reply_list {
82     void *reply;
83     struct reply_list *next;
84 };
85
86 typedef struct pending_reply {
87     uint64_t first_request;
88     uint64_t last_request;
89     enum workarounds workaround;
90     int flags;
91     struct pending_reply *next;
92 } pending_reply;
93
94 typedef struct reader_list {
95     uint64_t request;
96     pthread_cond_t *data;
97     struct reader_list *next;
98 } reader_list;
99
100 static void remove_finished_readers(reader_list **prev_reader, uint64_t completed)
101 {
102     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed))
103     {
104         /* If you don't have what you're looking for now, you never
105          * will. Wake up and leave me alone. */
106         pthread_cond_signal((*prev_reader)->data);
107         *prev_reader = (*prev_reader)->next;
108     }
109 }
110
111 #if HAVE_SENDMSG
112 static int read_fds(xcb_connection_t *c, int *fds, int nfd)
113 {
114     int *ifds = &c->in.in_fd.fd[c->in.in_fd.ifd];
115     int infd = c->in.in_fd.nfd - c->in.in_fd.ifd;
116
117     if (nfd > infd)
118         return 0;
119     memcpy(fds, ifds, nfd * sizeof (int));
120     c->in.in_fd.ifd += nfd;
121     return 1;
122 }
123 #endif
124
125 typedef struct xcb_ge_special_event_t {
126     uint8_t  response_type; /**<  */
127     uint8_t  extension; /**<  */
128     uint16_t sequence; /**<  */
129     uint32_t length; /**<  */
130     uint16_t evtype; /**<  */
131     uint8_t  pad0[2]; /**< */
132     uint32_t eid; /**< */
133     uint8_t  pad1[16]; /**<  */
134 } xcb_ge_special_event_t;
135
136 static int event_special(xcb_connection_t *c,
137                          struct event_list *event)
138 {
139     struct xcb_special_event *special_event;
140     struct xcb_ge_special_event_t *ges = (void *) event->event;
141
142     /* Special events are always XGE events */
143     if ((ges->response_type & 0x7f) != XCB_XGE_EVENT)
144         return 0;
145
146     for (special_event = c->in.special_events;
147          special_event;
148          special_event = special_event->next)
149     {
150         if (ges->extension == special_event->extension &&
151             ges->eid == special_event->eid)
152         {
153             *special_event->events_tail = event;
154             special_event->events_tail = &event->next;
155             if (special_event->stamp)
156                 ++(*special_event->stamp);
157             pthread_cond_signal(&special_event->special_event_cond);
158             return 1;
159         }
160     }
161
162     return 0;
163 }
164
165 static int read_packet(xcb_connection_t *c)
166 {
167     xcb_generic_reply_t genrep;
168     uint64_t length = 32;
169     uint64_t eventlength = 0; /* length after first 32 bytes for GenericEvents */
170     int nfd = 0;         /* Number of file descriptors attached to the reply */
171     uint64_t bufsize;
172     void *buf;
173     pending_reply *pend = 0;
174     struct event_list *event;
175
176     /* Wait for there to be enough data for us to read a whole packet */
177     if(c->in.queue_len < length)
178         return 0;
179
180     /* Get the response type, length, and sequence number. */
181     memcpy(&genrep, c->in.queue, sizeof(genrep));
182
183     /* Compute 32-bit sequence number of this packet. */
184     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
185     {
186         uint64_t lastread = c->in.request_read;
187         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
188         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
189             c->in.request_read += 0x10000;
190         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
191             c->in.request_expected = c->in.request_read;
192
193         if(c->in.request_read != lastread)
194         {
195             if(c->in.current_reply)
196             {
197                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
198                 c->in.current_reply = 0;
199                 c->in.current_reply_tail = &c->in.current_reply;
200             }
201             c->in.request_completed = c->in.request_read - 1;
202         }
203
204         while(c->in.pending_replies &&
205               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
206               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
207         {
208             pending_reply *oldpend = c->in.pending_replies;
209             c->in.pending_replies = oldpend->next;
210             if(!oldpend->next)
211                 c->in.pending_replies_tail = &c->in.pending_replies;
212             free(oldpend);
213         }
214
215         if(genrep.response_type == XCB_ERROR)
216             c->in.request_completed = c->in.request_read;
217
218         remove_finished_readers(&c->in.readers, c->in.request_completed);
219     }
220
221     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
222     {
223         pend = c->in.pending_replies;
224         if(pend &&
225            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
226              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
227               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
228             pend = 0;
229     }
230
231     /* For reply packets, check that the entire packet is available. */
232     if(genrep.response_type == XCB_REPLY)
233     {
234         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
235         {
236             uint32_t *p = (uint32_t *) c->in.queue;
237             genrep.length = p[2] * p[3] * 2;
238         }
239         length += genrep.length * 4;
240
241         /* XXX a bit of a hack -- we "know" that all FD replys place
242          * the number of fds in the pad0 byte */
243         if (pend && pend->flags & XCB_REQUEST_REPLY_FDS)
244             nfd = genrep.pad0;
245     }
246
247     /* XGE events may have sizes > 32 */
248     if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT)
249         eventlength = genrep.length * 4;
250
251     bufsize = length + eventlength + nfd * sizeof(int)  +
252         (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t));
253     if (bufsize < INT32_MAX)
254         buf = malloc((size_t) bufsize);
255     else
256         buf = NULL;
257     if(!buf)
258     {
259         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
260         return 0;
261     }
262
263     if(_xcb_in_read_block(c, buf, length) <= 0)
264     {
265         free(buf);
266         return 0;
267     }
268
269     /* pull in XGE event data if available, append after event struct */
270     if (eventlength)
271     {
272         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
273         {
274             free(buf);
275             return 0;
276         }
277     }
278
279 #if HAVE_SENDMSG
280     if (nfd)
281     {
282         if (!read_fds(c, (int *) &((char *) buf)[length], nfd))
283         {
284             free(buf);
285             return 0;
286         }
287     }
288 #endif
289
290     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
291     {
292         free(buf);
293         return 1;
294     }
295
296     if(genrep.response_type != XCB_REPLY)
297         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
298
299     /* reply, or checked error */
300     if( genrep.response_type == XCB_REPLY ||
301        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
302     {
303         struct reply_list *cur = malloc(sizeof(struct reply_list));
304         if(!cur)
305         {
306             _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
307             free(buf);
308             return 0;
309         }
310         cur->reply = buf;
311         cur->next = 0;
312         *c->in.current_reply_tail = cur;
313         c->in.current_reply_tail = &cur->next;
314         if(c->in.readers && c->in.readers->request == c->in.request_read)
315             pthread_cond_signal(c->in.readers->data);
316         return 1;
317     }
318
319     /* event, or unchecked error */
320     event = malloc(sizeof(struct event_list));
321     if(!event)
322     {
323         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
324         free(buf);
325         return 0;
326     }
327     event->event = buf;
328     event->next = 0;
329
330     if (!event_special(c, event)) {
331         *c->in.events_tail = event;
332         c->in.events_tail = &event->next;
333         pthread_cond_signal(&c->in.event_cond);
334     }
335     return 1; /* I have something for you... */
336 }
337
338 static xcb_generic_event_t *get_event(xcb_connection_t *c)
339 {
340     struct event_list *cur = c->in.events;
341     xcb_generic_event_t *ret;
342     if(!c->in.events)
343         return 0;
344     ret = cur->event;
345     c->in.events = cur->next;
346     if(!cur->next)
347         c->in.events_tail = &c->in.events;
348     free(cur);
349     return ret;
350 }
351
352 static void free_reply_list(struct reply_list *head)
353 {
354     while(head)
355     {
356         struct reply_list *cur = head;
357         head = cur->next;
358         free(cur->reply);
359         free(cur);
360     }
361 }
362
363 static int read_block(const int fd, void *buf, const ssize_t len)
364 {
365     int done = 0;
366     while(done < len)
367     {
368         int ret = recv(fd, ((char *) buf) + done, len - done, 0);
369         if(ret > 0)
370             done += ret;
371 #ifndef _WIN32
372         if(ret < 0 && errno == EAGAIN)
373 #else
374         if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
375 #endif /* !_Win32 */
376         {
377 #if USE_POLL
378             struct pollfd pfd;
379             pfd.fd = fd;
380             pfd.events = POLLIN;
381             pfd.revents = 0;
382             do {
383                 ret = poll(&pfd, 1, -1);
384             } while (ret == -1 && errno == EINTR);
385 #else
386             fd_set fds;
387             FD_ZERO(&fds);
388             FD_SET(fd, &fds);
389
390             /* Initializing errno here makes sure that for Win32 this loop will execute only once */
391             errno = 0;
392             do {
393                 ret = select(fd + 1, &fds, 0, 0, 0);
394             } while (ret == -1 && errno == EINTR);
395 #endif /* USE_POLL */
396         }
397         if(ret <= 0)
398             return ret;
399     }
400     return len;
401 }
402
403 static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
404 {
405     struct reply_list *head;
406
407     /* If an error occurred when issuing the request, fail immediately. */
408     if(!request)
409         head = 0;
410     /* We've read requests past the one we want, so if it has replies we have
411      * them all and they're in the replies map. */
412     else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
413     {
414         head = _xcb_map_remove(c->in.replies, request);
415         if(head && head->next)
416             _xcb_map_put(c->in.replies, request, head->next);
417     }
418     /* We're currently processing the responses to the request we want, and we
419      * have a reply ready to return. So just return it without blocking. */
420     else if(request == c->in.request_read && c->in.current_reply)
421     {
422         head = c->in.current_reply;
423         c->in.current_reply = head->next;
424         if(!head->next)
425             c->in.current_reply_tail = &c->in.current_reply;
426     }
427     /* We know this request can't have any more replies, and we've already
428      * established it doesn't have a reply now. Don't bother blocking. */
429     else if(request == c->in.request_completed)
430         head = 0;
431     /* We may have more replies on the way for this request: block until we're
432      * sure. */
433     else
434         return 0;
435
436     if(error)
437         *error = 0;
438     *reply = 0;
439
440     if(head)
441     {
442         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
443         {
444             if(error)
445                 *error = head->reply;
446             else
447                 free(head->reply);
448         }
449         else
450             *reply = head->reply;
451
452         free(head);
453     }
454
455     return 1;
456 }
457
458 static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond)
459 {
460     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request))
461         prev_reader = &(*prev_reader)->next;
462     reader->request = request;
463     reader->data = cond;
464     reader->next = *prev_reader;
465     *prev_reader = reader;
466 }
467
468 static void remove_reader(reader_list **prev_reader, reader_list *reader)
469 {
470     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request))
471         if(*prev_reader == reader)
472         {
473             *prev_reader = (*prev_reader)->next;
474             break;
475         }
476 }
477
478 static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
479 {
480     void *ret = 0;
481
482     /* If this request has not been written yet, write it. */
483     if(c->out.return_socket || _xcb_out_flush_to(c, request))
484     {
485         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
486         reader_list reader;
487
488         insert_reader(&c->in.readers, &reader, request, &cond);
489
490         while(!poll_for_reply(c, request, &ret, e))
491             if(!_xcb_conn_wait(c, &cond, 0, 0))
492                 break;
493
494         remove_reader(&c->in.readers, &reader);
495         pthread_cond_destroy(&cond);
496     }
497
498     _xcb_in_wake_up_next_reader(c);
499     return ret;
500 }
501
502 static uint64_t widen(xcb_connection_t *c, unsigned int request)
503 {
504     uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
505     if(widened_request > c->out.request)
506         widened_request -= UINT64_C(1) << 32;
507     return widened_request;
508 }
509
510 /* Public interface */
511
512 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
513 {
514     void *ret;
515     if(e)
516         *e = 0;
517     if(c->has_error)
518         return 0;
519
520     pthread_mutex_lock(&c->iolock);
521     ret = wait_for_reply(c, widen(c, request), e);
522     pthread_mutex_unlock(&c->iolock);
523     return ret;
524 }
525
526 int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t reply_size)
527 {
528     return (int *) (&((char *) reply)[reply_size]);
529 }
530
531 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
532 {
533     pending_reply *pend;
534     pend = malloc(sizeof(*pend));
535     if(!pend)
536     {
537         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
538         return;
539     }
540
541     pend->first_request = seq;
542     pend->last_request = seq;
543     pend->workaround = 0;
544     pend->flags = XCB_REQUEST_DISCARD_REPLY;
545     pend->next = *prev_next;
546     *prev_next = pend;
547
548     if(!pend->next)
549         c->in.pending_replies_tail = &pend->next;
550 }
551
552 static void discard_reply(xcb_connection_t *c, uint64_t request)
553 {
554     void *reply;
555     pending_reply **prev_pend;
556
557     /* Free any replies or errors that we've already read. Stop if
558      * xcb_wait_for_reply would block or we've run out of replies. */
559     while(poll_for_reply(c, request, &reply, 0) && reply)
560         free(reply);
561
562     /* If we've proven there are no more responses coming, we're done. */
563     if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
564         return;
565
566     /* Walk the list of pending requests. Mark the first match for deletion. */
567     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
568     {
569         if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
570             break;
571
572         if((*prev_pend)->first_request == request)
573         {
574             /* Pending reply found. Mark for discard: */
575             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
576             return;
577         }
578     }
579
580     /* Pending reply not found (likely due to _unchecked request). Create one: */
581     insert_pending_discard(c, prev_pend, request);
582 }
583
584 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
585 {
586     if(c->has_error)
587         return;
588
589     /* If an error occurred when issuing the request, fail immediately. */
590     if(!sequence)
591         return;
592
593     pthread_mutex_lock(&c->iolock);
594     discard_reply(c, widen(c, sequence));
595     pthread_mutex_unlock(&c->iolock);
596 }
597
598 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
599 {
600     int ret;
601     if(c->has_error)
602     {
603         *reply = 0;
604         if(error)
605             *error = 0;
606         return 1; /* would not block */
607     }
608     assert(reply != 0);
609     pthread_mutex_lock(&c->iolock);
610     ret = poll_for_reply(c, widen(c, request), reply, error);
611     pthread_mutex_unlock(&c->iolock);
612     return ret;
613 }
614
615 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
616 {
617     xcb_generic_event_t *ret;
618     if(c->has_error)
619         return 0;
620     pthread_mutex_lock(&c->iolock);
621     /* get_event returns 0 on empty list. */
622     while(!(ret = get_event(c)))
623         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
624             break;
625
626     _xcb_in_wake_up_next_reader(c);
627     pthread_mutex_unlock(&c->iolock);
628     return ret;
629 }
630
631 static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued)
632 {
633     xcb_generic_event_t *ret = 0;
634     if(!c->has_error)
635     {
636         pthread_mutex_lock(&c->iolock);
637         /* FIXME: follow X meets Z architecture changes. */
638         ret = get_event(c);
639         if(!ret && !queued && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
640             ret = get_event(c);
641         pthread_mutex_unlock(&c->iolock);
642     }
643     return ret;
644 }
645
646 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
647 {
648     return poll_for_next_event(c, 0);
649 }
650
651 xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c)
652 {
653     return poll_for_next_event(c, 1);
654 }
655
656 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
657 {
658     uint64_t request;
659     xcb_generic_error_t *ret = 0;
660     void *reply;
661     if(c->has_error)
662         return 0;
663     pthread_mutex_lock(&c->iolock);
664     request = widen(c, cookie.sequence);
665     if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected)
666        && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
667     {
668         _xcb_out_send_sync(c);
669         _xcb_out_flush_to(c, c->out.request);
670     }
671     reply = wait_for_reply(c, request, &ret);
672     assert(!reply);
673     pthread_mutex_unlock(&c->iolock);
674     return ret;
675 }
676
677 static xcb_generic_event_t *get_special_event(xcb_connection_t *c,
678                                               xcb_special_event_t *se)
679 {
680     xcb_generic_event_t *event = NULL;
681     struct event_list *events;
682
683     if ((events = se->events) != NULL) {
684         event = events->event;
685         if (!(se->events = events->next))
686             se->events_tail = &se->events;
687         free (events);
688     }
689     return event;
690 }
691
692 xcb_generic_event_t *xcb_poll_for_special_event(xcb_connection_t *c,
693                                                 xcb_special_event_t *se)
694 {
695     xcb_generic_event_t *event;
696
697     if(c->has_error)
698         return 0;
699     pthread_mutex_lock(&c->iolock);
700     event = get_special_event(c, se);
701     pthread_mutex_unlock(&c->iolock);
702     return event;
703 }
704
705 xcb_generic_event_t *xcb_wait_for_special_event(xcb_connection_t *c,
706                                                 xcb_special_event_t *se)
707 {
708     xcb_generic_event_t *event;
709
710     if(c->has_error)
711         return 0;
712     pthread_mutex_lock(&c->iolock);
713
714     /* get_special_event returns 0 on empty list. */
715     while(!(event = get_special_event(c, se)))
716         if(!_xcb_conn_wait(c, &se->special_event_cond, 0, 0))
717             break;
718
719     pthread_mutex_unlock(&c->iolock);
720     return event;
721 }
722
723 xcb_special_event_t *
724 xcb_register_for_special_xge(xcb_connection_t *c,
725                              xcb_extension_t *ext,
726                              uint32_t eid,
727                              uint32_t *stamp)
728 {
729     xcb_special_event_t *se;
730     const xcb_query_extension_reply_t   *ext_reply;
731
732     if(c->has_error)
733         return NULL;
734     ext_reply = xcb_get_extension_data(c, ext);
735     if (!ext_reply)
736         return NULL;
737     pthread_mutex_lock(&c->iolock);
738     for (se = c->in.special_events; se; se = se->next) {
739         if (se->extension == ext_reply->major_opcode &&
740             se->eid == eid) {
741             pthread_mutex_unlock(&c->iolock);
742             return NULL;
743         }
744     }
745     se = calloc(1, sizeof(xcb_special_event_t));
746     if (!se) {
747         pthread_mutex_unlock(&c->iolock);
748         return NULL;
749     }
750
751     se->extension = ext_reply->major_opcode;
752     se->eid = eid;
753
754     se->events = NULL;
755     se->events_tail = &se->events;
756     se->stamp = stamp;
757
758     pthread_cond_init(&se->special_event_cond, 0);
759
760     se->next = c->in.special_events;
761     c->in.special_events = se;
762     pthread_mutex_unlock(&c->iolock);
763     return se;
764 }
765
766 void
767 xcb_unregister_for_special_event(xcb_connection_t *c,
768                                  xcb_special_event_t *se)
769 {
770     xcb_special_event_t *s, **prev;
771     struct event_list   *events, *next;
772
773     if (!se)
774         return;
775
776     if (c->has_error)
777         return;
778
779     pthread_mutex_lock(&c->iolock);
780
781     for (prev = &c->in.special_events; (s = *prev) != NULL; prev = &(s->next)) {
782         if (s == se) {
783             *prev = se->next;
784             for (events = se->events; events; events = next) {
785                 next = events->next;
786                 free (events->event);
787                 free (events);
788             }
789             pthread_cond_destroy(&se->special_event_cond);
790             free (se);
791             break;
792         }
793     }
794     pthread_mutex_unlock(&c->iolock);
795 }
796
797 /* Private interface */
798
799 int _xcb_in_init(_xcb_in *in)
800 {
801     if(pthread_cond_init(&in->event_cond, 0))
802         return 0;
803     in->reading = 0;
804
805     in->queue_len = 0;
806
807     in->request_read = 0;
808     in->request_completed = 0;
809
810     in->replies = _xcb_map_new();
811     if(!in->replies)
812         return 0;
813
814     in->current_reply_tail = &in->current_reply;
815     in->events_tail = &in->events;
816     in->pending_replies_tail = &in->pending_replies;
817
818     return 1;
819 }
820
821 void _xcb_in_destroy(_xcb_in *in)
822 {
823     pthread_cond_destroy(&in->event_cond);
824     free_reply_list(in->current_reply);
825     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
826     while(in->events)
827     {
828         struct event_list *e = in->events;
829         in->events = e->next;
830         free(e->event);
831         free(e);
832     }
833     while(in->pending_replies)
834     {
835         pending_reply *pend = in->pending_replies;
836         in->pending_replies = pend->next;
837         free(pend);
838     }
839 }
840
841 void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
842 {
843     int pthreadret;
844     if(c->in.readers)
845         pthreadret = pthread_cond_signal(c->in.readers->data);
846     else
847         pthreadret = pthread_cond_signal(&c->in.event_cond);
848     assert(pthreadret == 0);
849 }
850
851 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
852 {
853     pending_reply *pend = malloc(sizeof(pending_reply));
854     assert(workaround != WORKAROUND_NONE || flags != 0);
855     if(!pend)
856     {
857         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
858         return 0;
859     }
860     pend->first_request = pend->last_request = request;
861     pend->workaround = workaround;
862     pend->flags = flags;
863     pend->next = 0;
864     *c->in.pending_replies_tail = pend;
865     c->in.pending_replies_tail = &pend->next;
866     return 1;
867 }
868
869 void _xcb_in_replies_done(xcb_connection_t *c)
870 {
871     struct pending_reply *pend;
872     if (c->in.pending_replies_tail != &c->in.pending_replies)
873     {
874         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
875         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
876         {
877             pend->last_request = c->out.request;
878             pend->workaround = WORKAROUND_NONE;
879         }
880     }
881 }
882
883 int _xcb_in_read(xcb_connection_t *c)
884 {
885     int n;
886
887 #if HAVE_SENDMSG
888     struct iovec    iov = {
889         .iov_base = c->in.queue + c->in.queue_len,
890         .iov_len = sizeof(c->in.queue) - c->in.queue_len,
891     };
892     union {
893         struct cmsghdr cmsghdr;
894         char buf[CMSG_SPACE(XCB_MAX_PASS_FD * sizeof(int))];
895     } cmsgbuf;
896     struct msghdr msg = {
897         .msg_name = NULL,
898         .msg_namelen = 0,
899         .msg_iov = &iov,
900         .msg_iovlen = 1,
901         .msg_control = cmsgbuf.buf,
902         .msg_controllen = CMSG_SPACE(sizeof(int) * (XCB_MAX_PASS_FD - c->in.in_fd.nfd)),
903     };
904     n = recvmsg(c->fd, &msg, 0);
905
906     /* Check for truncation errors. Only MSG_CTRUNC is
907      * probably possible here, which would indicate that
908      * the sender tried to transmit more than XCB_MAX_PASS_FD
909      * file descriptors.
910      */
911     if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) {
912         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
913         return 0;
914     }
915 #else
916     n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len, 0);
917 #endif
918     if(n > 0) {
919 #if HAVE_SENDMSG
920         struct cmsghdr *hdr;
921
922         if (msg.msg_controllen >= sizeof (struct cmsghdr)) {
923             for (hdr = CMSG_FIRSTHDR(&msg); hdr; hdr = CMSG_NXTHDR(&msg, hdr)) {
924                 if (hdr->cmsg_level == SOL_SOCKET && hdr->cmsg_type == SCM_RIGHTS) {
925                     int nfd = (hdr->cmsg_len - CMSG_LEN(0)) / sizeof (int);
926                     memcpy(&c->in.in_fd.fd[c->in.in_fd.nfd], CMSG_DATA(hdr), nfd * sizeof (int));
927                     c->in.in_fd.nfd += nfd;
928                 }
929             }
930         }
931 #endif
932         c->in.queue_len += n;
933     }
934     while(read_packet(c))
935         /* empty */;
936 #if HAVE_SENDMSG
937     if (c->in.in_fd.nfd) {
938         c->in.in_fd.nfd -= c->in.in_fd.ifd;
939         memmove(&c->in.in_fd.fd[0],
940                 &c->in.in_fd.fd[c->in.in_fd.ifd],
941                 c->in.in_fd.nfd * sizeof (int));
942         c->in.in_fd.ifd = 0;
943
944         /* If we have any left-over file descriptors after emptying
945          * the input buffer, then the server sent some that we weren't
946          * expecting.  Close them and mark the connection as broken;
947          */
948         if (c->in.queue_len == 0 && c->in.in_fd.nfd != 0) {
949             int i;
950             for (i = 0; i < c->in.in_fd.nfd; i++)
951                 close(c->in.in_fd.fd[i]);
952             _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
953             return 0;
954         }
955     }
956 #endif
957 #ifndef _WIN32
958     if((n > 0) || (n < 0 && errno == EAGAIN))
959 #else
960     if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
961 #endif /* !_WIN32 */
962         return 1;
963     _xcb_conn_shutdown(c, XCB_CONN_ERROR);
964     return 0;
965 }
966
967 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
968 {
969     int done = c->in.queue_len;
970     if(len < done)
971         done = len;
972
973     memcpy(buf, c->in.queue, done);
974     c->in.queue_len -= done;
975     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
976
977     if(len > done)
978     {
979         int ret = read_block(c->fd, (char *) buf + done, len - done);
980         if(ret <= 0)
981         {
982             _xcb_conn_shutdown(c, XCB_CONN_ERROR);
983             return ret;
984         }
985     }
986
987     return len;
988 }