1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
26 /* Stuff that reads stuff from the server. */
46 #include <sys/select.h>
47 #include <sys/socket.h>
51 #include "xcb_windefs.h"
56 #define XCB_XGE_EVENT 35
59 xcb_generic_event_t *event;
60 struct event_list *next;
63 struct xcb_special_event {
65 struct xcb_special_event *next;
67 /* Match XGE events for the specific extension and event ID (the
68 * first 32 bit word after evtype)
74 struct event_list *events;
75 struct event_list **events_tail;
77 pthread_cond_t special_event_cond;
82 struct reply_list *next;
85 typedef struct pending_reply {
86 uint64_t first_request;
87 uint64_t last_request;
88 enum workarounds workaround;
90 struct pending_reply *next;
93 typedef struct reader_list {
96 struct reader_list *next;
99 static void remove_finished_readers(reader_list **prev_reader, uint64_t completed)
101 while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed))
103 /* If you don't have what you're looking for now, you never
104 * will. Wake up and leave me alone. */
105 pthread_cond_signal((*prev_reader)->data);
106 *prev_reader = (*prev_reader)->next;
111 static int read_fds(xcb_connection_t *c, int *fds, int nfd)
113 int *ifds = &c->in.in_fd.fd[c->in.in_fd.ifd];
114 int infd = c->in.in_fd.nfd - c->in.in_fd.ifd;
118 memcpy(fds, ifds, nfd * sizeof (int));
119 c->in.in_fd.ifd += nfd;
124 typedef struct xcb_ge_special_event_t {
125 uint8_t response_type; /**< */
126 uint8_t extension; /**< */
127 uint16_t sequence; /**< */
128 uint32_t length; /**< */
129 uint16_t evtype; /**< */
130 uint8_t pad0[2]; /**< */
131 uint32_t eid; /**< */
132 uint8_t pad1[16]; /**< */
133 } xcb_ge_special_event_t;
135 static int event_special(xcb_connection_t *c,
136 struct event_list *event)
138 struct xcb_special_event *special_event;
139 struct xcb_ge_special_event_t *ges = (void *) event->event;
141 /* Special events are always XGE events */
142 if ((ges->response_type & 0x7f) != XCB_XGE_EVENT)
145 for (special_event = c->in.special_events;
147 special_event = special_event->next)
149 if (ges->extension == special_event->extension &&
150 ges->eid == special_event->eid)
152 *special_event->events_tail = event;
153 special_event->events_tail = &event->next;
154 if (special_event->stamp)
155 ++(*special_event->stamp);
156 pthread_cond_signal(&special_event->special_event_cond);
164 static int read_packet(xcb_connection_t *c)
166 xcb_generic_reply_t genrep;
167 uint64_t length = 32;
168 uint64_t eventlength = 0; /* length after first 32 bytes for GenericEvents */
169 int nfd = 0; /* Number of file descriptors attached to the reply */
172 pending_reply *pend = 0;
173 struct event_list *event;
175 /* Wait for there to be enough data for us to read a whole packet */
176 if(c->in.queue_len < length)
179 /* Get the response type, length, and sequence number. */
180 memcpy(&genrep, c->in.queue, sizeof(genrep));
182 /* Compute 32-bit sequence number of this packet. */
183 if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
185 uint64_t lastread = c->in.request_read;
186 c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
187 if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
188 c->in.request_read += 0x10000;
189 if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
190 c->in.request_expected = c->in.request_read;
192 if(c->in.request_read != lastread)
194 if(c->in.current_reply)
196 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
197 c->in.current_reply = 0;
198 c->in.current_reply_tail = &c->in.current_reply;
200 c->in.request_completed = c->in.request_read - 1;
203 while(c->in.pending_replies &&
204 c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
205 XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
207 pending_reply *oldpend = c->in.pending_replies;
208 c->in.pending_replies = oldpend->next;
210 c->in.pending_replies_tail = &c->in.pending_replies;
214 if(genrep.response_type == XCB_ERROR)
215 c->in.request_completed = c->in.request_read;
217 remove_finished_readers(&c->in.readers, c->in.request_completed);
220 if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
222 pend = c->in.pending_replies;
224 !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
225 (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
226 XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
230 /* For reply packets, check that the entire packet is available. */
231 if(genrep.response_type == XCB_REPLY)
233 if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
235 uint32_t *p = (uint32_t *) c->in.queue;
236 genrep.length = p[2] * p[3] * 2;
238 length += genrep.length * 4;
240 /* XXX a bit of a hack -- we "know" that all FD replys place
241 * the number of fds in the pad0 byte */
242 if (pend && pend->flags & XCB_REQUEST_REPLY_FDS)
246 /* XGE events may have sizes > 32 */
247 if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT)
248 eventlength = genrep.length * 4;
250 bufsize = length + eventlength + nfd * sizeof(int) +
251 (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t));
252 if (bufsize < INT32_MAX)
253 buf = malloc((size_t) bufsize);
258 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
262 if(_xcb_in_read_block(c, buf, length) <= 0)
268 /* pull in XGE event data if available, append after event struct */
271 if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
281 if (!read_fds(c, (int *) &((char *) buf)[length], nfd))
289 if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
295 if(genrep.response_type != XCB_REPLY)
296 ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
298 /* reply, or checked error */
299 if( genrep.response_type == XCB_REPLY ||
300 (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
302 struct reply_list *cur = malloc(sizeof(struct reply_list));
305 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
311 *c->in.current_reply_tail = cur;
312 c->in.current_reply_tail = &cur->next;
313 if(c->in.readers && c->in.readers->request == c->in.request_read)
314 pthread_cond_signal(c->in.readers->data);
318 /* event, or unchecked error */
319 event = malloc(sizeof(struct event_list));
322 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
329 if (!event_special(c, event)) {
330 *c->in.events_tail = event;
331 c->in.events_tail = &event->next;
332 pthread_cond_signal(&c->in.event_cond);
334 return 1; /* I have something for you... */
337 static xcb_generic_event_t *get_event(xcb_connection_t *c)
339 struct event_list *cur = c->in.events;
340 xcb_generic_event_t *ret;
344 c->in.events = cur->next;
346 c->in.events_tail = &c->in.events;
351 static void free_reply_list(struct reply_list *head)
355 struct reply_list *cur = head;
362 static int read_block(const int fd, void *buf, const ssize_t len)
367 int ret = recv(fd, ((char *) buf) + done, len - done, 0);
371 if(ret < 0 && errno == EAGAIN)
373 if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
382 ret = poll(&pfd, 1, -1);
383 } while (ret == -1 && errno == EINTR);
389 /* Initializing errno here makes sure that for Win32 this loop will execute only once */
392 ret = select(fd + 1, &fds, 0, 0, 0);
393 } while (ret == -1 && errno == EINTR);
394 #endif /* USE_POLL */
402 static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
404 struct reply_list *head;
406 /* If an error occurred when issuing the request, fail immediately. */
409 /* We've read requests past the one we want, so if it has replies we have
410 * them all and they're in the replies map. */
411 else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
413 head = _xcb_map_remove(c->in.replies, request);
414 if(head && head->next)
415 _xcb_map_put(c->in.replies, request, head->next);
417 /* We're currently processing the responses to the request we want, and we
418 * have a reply ready to return. So just return it without blocking. */
419 else if(request == c->in.request_read && c->in.current_reply)
421 head = c->in.current_reply;
422 c->in.current_reply = head->next;
424 c->in.current_reply_tail = &c->in.current_reply;
426 /* We know this request can't have any more replies, and we've already
427 * established it doesn't have a reply now. Don't bother blocking. */
428 else if(request == c->in.request_completed)
430 /* We may have more replies on the way for this request: block until we're
441 if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
444 *error = head->reply;
449 *reply = head->reply;
457 static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond)
459 while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request))
460 prev_reader = &(*prev_reader)->next;
461 reader->request = request;
463 reader->next = *prev_reader;
464 *prev_reader = reader;
467 static void remove_reader(reader_list **prev_reader, reader_list *reader)
469 while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request))
470 if(*prev_reader == reader)
472 *prev_reader = (*prev_reader)->next;
477 static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
481 /* If this request has not been written yet, write it. */
482 if(c->out.return_socket || _xcb_out_flush_to(c, request))
484 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
487 insert_reader(&c->in.readers, &reader, request, &cond);
489 while(!poll_for_reply(c, request, &ret, e))
490 if(!_xcb_conn_wait(c, &cond, 0, 0))
493 remove_reader(&c->in.readers, &reader);
494 pthread_cond_destroy(&cond);
497 _xcb_in_wake_up_next_reader(c);
501 static uint64_t widen(xcb_connection_t *c, unsigned int request)
503 uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
504 if(widened_request > c->out.request)
505 widened_request -= UINT64_C(1) << 32;
506 return widened_request;
509 /* Public interface */
511 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
519 pthread_mutex_lock(&c->iolock);
520 ret = wait_for_reply(c, widen(c, request), e);
521 pthread_mutex_unlock(&c->iolock);
525 int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t reply_size)
527 return (int *) (&((char *) reply)[reply_size]);
530 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
533 pend = malloc(sizeof(*pend));
536 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
540 pend->first_request = seq;
541 pend->last_request = seq;
542 pend->workaround = 0;
543 pend->flags = XCB_REQUEST_DISCARD_REPLY;
544 pend->next = *prev_next;
548 c->in.pending_replies_tail = &pend->next;
551 static void discard_reply(xcb_connection_t *c, uint64_t request)
554 pending_reply **prev_pend;
556 /* Free any replies or errors that we've already read. Stop if
557 * xcb_wait_for_reply would block or we've run out of replies. */
558 while(poll_for_reply(c, request, &reply, 0) && reply)
561 /* If we've proven there are no more responses coming, we're done. */
562 if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
565 /* Walk the list of pending requests. Mark the first match for deletion. */
566 for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
568 if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
571 if((*prev_pend)->first_request == request)
573 /* Pending reply found. Mark for discard: */
574 (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
579 /* Pending reply not found (likely due to _unchecked request). Create one: */
580 insert_pending_discard(c, prev_pend, request);
583 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
588 /* If an error occurred when issuing the request, fail immediately. */
592 pthread_mutex_lock(&c->iolock);
593 discard_reply(c, widen(c, sequence));
594 pthread_mutex_unlock(&c->iolock);
597 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
605 return 1; /* would not block */
608 pthread_mutex_lock(&c->iolock);
609 ret = poll_for_reply(c, widen(c, request), reply, error);
610 pthread_mutex_unlock(&c->iolock);
614 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
616 xcb_generic_event_t *ret;
619 pthread_mutex_lock(&c->iolock);
620 /* get_event returns 0 on empty list. */
621 while(!(ret = get_event(c)))
622 if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
625 _xcb_in_wake_up_next_reader(c);
626 pthread_mutex_unlock(&c->iolock);
630 static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued)
632 xcb_generic_event_t *ret = 0;
635 pthread_mutex_lock(&c->iolock);
636 /* FIXME: follow X meets Z architecture changes. */
638 if(!ret && !queued && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
640 pthread_mutex_unlock(&c->iolock);
645 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
647 return poll_for_next_event(c, 0);
650 xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c)
652 return poll_for_next_event(c, 1);
655 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
658 xcb_generic_error_t *ret = 0;
662 pthread_mutex_lock(&c->iolock);
663 request = widen(c, cookie.sequence);
664 if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected)
665 && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
667 _xcb_out_send_sync(c);
668 _xcb_out_flush_to(c, c->out.request);
670 reply = wait_for_reply(c, request, &ret);
672 pthread_mutex_unlock(&c->iolock);
676 static xcb_generic_event_t *get_special_event(xcb_connection_t *c,
677 xcb_special_event_t *se)
679 xcb_generic_event_t *event = NULL;
680 struct event_list *events;
682 if ((events = se->events) != NULL) {
683 event = events->event;
684 if (!(se->events = events->next))
685 se->events_tail = &se->events;
691 xcb_generic_event_t *xcb_poll_for_special_event(xcb_connection_t *c,
692 xcb_special_event_t *se)
694 xcb_generic_event_t *event;
698 pthread_mutex_lock(&c->iolock);
699 event = get_special_event(c, se);
700 pthread_mutex_unlock(&c->iolock);
704 xcb_generic_event_t *xcb_wait_for_special_event(xcb_connection_t *c,
705 xcb_special_event_t *se)
707 xcb_generic_event_t *event;
711 pthread_mutex_lock(&c->iolock);
713 /* get_special_event returns 0 on empty list. */
714 while(!(event = get_special_event(c, se)))
715 if(!_xcb_conn_wait(c, &se->special_event_cond, 0, 0))
718 pthread_mutex_unlock(&c->iolock);
722 xcb_special_event_t *
723 xcb_register_for_special_xge(xcb_connection_t *c,
724 xcb_extension_t *ext,
728 xcb_special_event_t *se;
729 const xcb_query_extension_reply_t *ext_reply;
733 ext_reply = xcb_get_extension_data(c, ext);
736 pthread_mutex_lock(&c->iolock);
737 for (se = c->in.special_events; se; se = se->next) {
738 if (se->extension == ext_reply->major_opcode &&
740 pthread_mutex_unlock(&c->iolock);
744 se = calloc(1, sizeof(xcb_special_event_t));
746 pthread_mutex_unlock(&c->iolock);
750 se->extension = ext_reply->major_opcode;
754 se->events_tail = &se->events;
757 pthread_cond_init(&se->special_event_cond, 0);
759 se->next = c->in.special_events;
760 c->in.special_events = se;
761 pthread_mutex_unlock(&c->iolock);
766 xcb_unregister_for_special_event(xcb_connection_t *c,
767 xcb_special_event_t *se)
769 xcb_special_event_t *s, **prev;
770 struct event_list *events, *next;
778 pthread_mutex_lock(&c->iolock);
780 for (prev = &c->in.special_events; (s = *prev) != NULL; prev = &(s->next)) {
783 for (events = se->events; events; events = next) {
785 free (events->event);
788 pthread_cond_destroy(&se->special_event_cond);
793 pthread_mutex_unlock(&c->iolock);
796 /* Private interface */
798 int _xcb_in_init(_xcb_in *in)
800 if(pthread_cond_init(&in->event_cond, 0))
806 in->request_read = 0;
807 in->request_completed = 0;
809 in->replies = _xcb_map_new();
813 in->current_reply_tail = &in->current_reply;
814 in->events_tail = &in->events;
815 in->pending_replies_tail = &in->pending_replies;
820 void _xcb_in_destroy(_xcb_in *in)
822 pthread_cond_destroy(&in->event_cond);
823 free_reply_list(in->current_reply);
824 _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
827 struct event_list *e = in->events;
828 in->events = e->next;
832 while(in->pending_replies)
834 pending_reply *pend = in->pending_replies;
835 in->pending_replies = pend->next;
840 void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
844 pthreadret = pthread_cond_signal(c->in.readers->data);
846 pthreadret = pthread_cond_signal(&c->in.event_cond);
847 assert(pthreadret == 0);
850 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
852 pending_reply *pend = malloc(sizeof(pending_reply));
853 assert(workaround != WORKAROUND_NONE || flags != 0);
856 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
859 pend->first_request = pend->last_request = request;
860 pend->workaround = workaround;
863 *c->in.pending_replies_tail = pend;
864 c->in.pending_replies_tail = &pend->next;
868 void _xcb_in_replies_done(xcb_connection_t *c)
870 struct pending_reply *pend;
871 if (c->in.pending_replies_tail != &c->in.pending_replies)
873 pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
874 if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
876 pend->last_request = c->out.request;
877 pend->workaround = WORKAROUND_NONE;
882 int _xcb_in_read(xcb_connection_t *c)
888 .iov_base = c->in.queue + c->in.queue_len,
889 .iov_len = sizeof(c->in.queue) - c->in.queue_len,
892 struct cmsghdr cmsghdr;
893 char buf[CMSG_SPACE(XCB_MAX_PASS_FD * sizeof(int))];
895 struct msghdr msg = {
900 .msg_control = cmsgbuf.buf,
901 .msg_controllen = CMSG_SPACE(sizeof(int) * (XCB_MAX_PASS_FD - c->in.in_fd.nfd)),
903 n = recvmsg(c->fd, &msg, 0);
905 /* Check for truncation errors. Only MSG_CTRUNC is
906 * probably possible here, which would indicate that
907 * the sender tried to transmit more than XCB_MAX_PASS_FD
910 if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) {
911 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
915 n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len, 0);
921 if (msg.msg_controllen >= sizeof (struct cmsghdr)) {
922 for (hdr = CMSG_FIRSTHDR(&msg); hdr; hdr = CMSG_NXTHDR(&msg, hdr)) {
923 if (hdr->cmsg_level == SOL_SOCKET && hdr->cmsg_type == SCM_RIGHTS) {
924 int nfd = (hdr->cmsg_len - CMSG_LEN(0)) / sizeof (int);
925 memcpy(&c->in.in_fd.fd[c->in.in_fd.nfd], CMSG_DATA(hdr), nfd * sizeof (int));
926 c->in.in_fd.nfd += nfd;
931 c->in.queue_len += n;
933 while(read_packet(c))
936 if (c->in.in_fd.nfd) {
937 c->in.in_fd.nfd -= c->in.in_fd.ifd;
938 memmove(&c->in.in_fd.fd[0],
939 &c->in.in_fd.fd[c->in.in_fd.ifd],
940 c->in.in_fd.nfd * sizeof (int));
943 /* If we have any left-over file descriptors after emptying
944 * the input buffer, then the server sent some that we weren't
945 * expecting. Close them and mark the connection as broken;
947 if (c->in.queue_len == 0 && c->in.in_fd.nfd != 0) {
949 for (i = 0; i < c->in.in_fd.nfd; i++)
950 close(c->in.in_fd.fd[i]);
951 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
957 if((n > 0) || (n < 0 && errno == EAGAIN))
959 if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
962 _xcb_conn_shutdown(c, XCB_CONN_ERROR);
966 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
968 int done = c->in.queue_len;
972 memcpy(buf, c->in.queue, done);
973 c->in.queue_len -= done;
974 memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
978 int ret = read_block(c->fd, (char *) buf + done, len - done);
981 _xcb_conn_shutdown(c, XCB_CONN_ERROR);