Handle XGE events with the "send event" flag
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <errno.h>
34
35 #include "xcb.h"
36 #include "xcbext.h"
37 #include "xcbint.h"
38 #if USE_POLL
39 #include <poll.h>
40 #endif
41 #ifndef _WIN32
42 #include <sys/select.h>
43 #include <sys/socket.h>
44 #endif
45
46 #ifdef _WIN32
47 #include "xcb_windefs.h"
48 #endif /* _WIN32 */
49
50 #define XCB_ERROR 0
51 #define XCB_REPLY 1
52 #define XCB_XGE_EVENT 35
53
54 /* required for compiling for Win32 using MinGW */
55 #ifndef MSG_WAITALL
56 #define MSG_WAITALL 0
57 #endif
58
59 struct event_list {
60     uint64_t sequence;
61     xcb_generic_event_t *event;
62     struct event_list *next;
63 };
64
65 struct reply_list {
66     void *reply;
67     struct reply_list *next;
68 };
69
70 typedef struct pending_reply {
71     uint64_t first_request;
72     uint64_t last_request;
73     enum workarounds workaround;
74     int flags;
75     struct pending_reply *next;
76 } pending_reply;
77
78 typedef struct reader_list {
79     uint64_t request;
80     pthread_cond_t *data;
81     struct reader_list *next;
82 } reader_list;
83
84 static void remove_finished_readers(reader_list **prev_reader, uint64_t completed)
85 {
86     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed))
87     {
88         /* If you don't have what you're looking for now, you never
89          * will. Wake up and leave me alone. */
90         pthread_cond_signal((*prev_reader)->data);
91         *prev_reader = (*prev_reader)->next;
92     }
93 }
94
95 static int read_packet(xcb_connection_t *c)
96 {
97     xcb_generic_reply_t genrep;
98     int length = 32;
99     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
100     void *buf;
101     pending_reply *pend = 0;
102     struct event_list *event;
103
104     /* Wait for there to be enough data for us to read a whole packet */
105     if(c->in.queue_len < length)
106         return 0;
107
108     /* Get the response type, length, and sequence number. */
109     memcpy(&genrep, c->in.queue, sizeof(genrep));
110
111     /* Compute 32-bit sequence number of this packet. */
112     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
113     {
114         uint64_t lastread = c->in.request_read;
115         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
116         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
117             c->in.request_read += 0x10000;
118         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
119             c->in.request_expected = c->in.request_read;
120
121         if(c->in.request_read != lastread)
122         {
123             if(c->in.current_reply)
124             {
125                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
126                 c->in.current_reply = 0;
127                 c->in.current_reply_tail = &c->in.current_reply;
128             }
129             c->in.request_completed = c->in.event_responses_completed = c->in.request_read - 1;
130         }
131
132         while(c->in.pending_replies && 
133               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
134               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
135         {
136             pending_reply *oldpend = c->in.pending_replies;
137             c->in.pending_replies = oldpend->next;
138             if(!oldpend->next)
139                 c->in.pending_replies_tail = &c->in.pending_replies;
140             free(oldpend);
141         }
142
143         if(genrep.response_type == XCB_ERROR)
144             c->in.request_completed = c->in.event_responses_completed = c->in.request_read;
145         else if(genrep.response_type == XCB_REPLY)
146             c->in.event_responses_completed = c->in.request_read;
147
148         remove_finished_readers(&c->in.readers, c->in.request_completed);
149         remove_finished_readers(&c->in.event_readers, c->in.event_responses_completed);
150     }
151
152     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
153     {
154         pend = c->in.pending_replies;
155         if(pend &&
156            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
157              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
158               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
159             pend = 0;
160     }
161
162     /* For reply packets, check that the entire packet is available. */
163     if(genrep.response_type == XCB_REPLY)
164     {
165         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
166         {
167             uint32_t *p = (uint32_t *) c->in.queue;
168             genrep.length = p[2] * p[3] * 2;
169         }
170         length += genrep.length * 4;
171     }
172
173     /* XGE events may have sizes > 32 */
174     if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT)
175         eventlength = genrep.length * 4;
176
177     buf = malloc(length + eventlength +
178             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
179     if(!buf)
180     {
181         _xcb_conn_shutdown(c);
182         return 0;
183     }
184
185     if(_xcb_in_read_block(c, buf, length) <= 0)
186     {
187         free(buf);
188         return 0;
189     }
190
191     /* pull in XGE event data if available, append after event struct */
192     if (eventlength)
193     {
194         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
195         {
196             free(buf);
197             return 0;
198         }
199     }
200
201     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
202     {
203         free(buf);
204         return 1;
205     }
206
207     if(genrep.response_type != XCB_REPLY)
208         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
209
210     /* reply, or checked error */
211     if( genrep.response_type == XCB_REPLY ||
212        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
213     {
214         struct reply_list *cur = malloc(sizeof(struct reply_list));
215         if(!cur)
216         {
217             _xcb_conn_shutdown(c);
218             free(buf);
219             return 0;
220         }
221         cur->reply = buf;
222         cur->next = 0;
223         *c->in.current_reply_tail = cur;
224         c->in.current_reply_tail = &cur->next;
225         if(c->in.readers && c->in.readers->request == c->in.request_read)
226             pthread_cond_signal(c->in.readers->data);
227         return 1;
228     }
229
230     /* event, or unchecked error */
231     event = malloc(sizeof(struct event_list));
232     if(!event)
233     {
234         _xcb_conn_shutdown(c);
235         free(buf);
236         return 0;
237     }
238     event->sequence = c->in.request_read;
239     event->event = buf;
240     event->next = 0;
241     *c->in.events_tail = event;
242     c->in.events_tail = &event->next;
243     if(c->in.event_readers)
244         pthread_cond_signal(c->in.event_readers->data);
245     else
246         pthread_cond_signal(&c->in.event_cond);
247     return 1; /* I have something for you... */
248 }
249
250 static xcb_generic_event_t *get_event(xcb_connection_t *c)
251 {
252     struct event_list *cur = c->in.events;
253     xcb_generic_event_t *ret;
254     if(!c->in.events)
255         return 0;
256     ret = cur->event;
257     c->in.events = cur->next;
258     if(!cur->next)
259         c->in.events_tail = &c->in.events;
260     free(cur);
261     return ret;
262 }
263
264 static void free_reply_list(struct reply_list *head)
265 {
266     while(head)
267     {
268         struct reply_list *cur = head;
269         head = cur->next;
270         free(cur->reply);
271         free(cur);
272     }
273 }
274
275 static int read_block(const int fd, void *buf, const ssize_t len)
276 {
277     int done = 0;
278     while(done < len)
279     {
280         int ret = recv(fd, ((char *) buf) + done, len - done,MSG_WAITALL);
281         if(ret > 0)
282             done += ret;
283 #ifndef _WIN32
284         if(ret < 0 && errno == EAGAIN)
285 #else
286         if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
287 #endif /* !_Win32 */
288         {
289 #if USE_POLL
290             struct pollfd pfd;
291             pfd.fd = fd;
292             pfd.events = POLLIN;
293             pfd.revents = 0;
294             do {
295                 ret = poll(&pfd, 1, -1);
296             } while (ret == -1 && errno == EINTR);
297 #else
298             fd_set fds;
299             FD_ZERO(&fds);
300             FD_SET(fd, &fds);
301
302             /* Initializing errno here makes sure that for Win32 this loop will execute only once */
303             errno = 0;  
304             do {
305                 ret = select(fd + 1, &fds, 0, 0, 0);
306             } while (ret == -1 && errno == EINTR);
307 #endif /* USE_POLL */
308         }
309         if(ret <= 0)
310             return ret;
311     }
312     return len;
313 }
314
315 static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
316 {
317     struct reply_list *head;
318
319     /* If an error occurred when issuing the request, fail immediately. */
320     if(!request)
321         head = 0;
322     /* We've read requests past the one we want, so if it has replies we have
323      * them all and they're in the replies map. */
324     else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
325     {
326         head = _xcb_map_remove(c->in.replies, request);
327         if(head && head->next)
328             _xcb_map_put(c->in.replies, request, head->next);
329     }
330     /* We're currently processing the responses to the request we want, and we
331      * have a reply ready to return. So just return it without blocking. */
332     else if(request == c->in.request_read && c->in.current_reply)
333     {
334         head = c->in.current_reply;
335         c->in.current_reply = head->next;
336         if(!head->next)
337             c->in.current_reply_tail = &c->in.current_reply;
338     }
339     /* We know this request can't have any more replies, and we've already
340      * established it doesn't have a reply now. Don't bother blocking. */
341     else if(request == c->in.request_completed)
342         head = 0;
343     /* We may have more replies on the way for this request: block until we're
344      * sure. */
345     else
346         return 0;
347
348     if(error)
349         *error = 0;
350     *reply = 0;
351
352     if(head)
353     {
354         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
355         {
356             if(error)
357                 *error = head->reply;
358             else
359                 free(head->reply);
360         }
361         else
362             *reply = head->reply;
363
364         free(head);
365     }
366
367     return 1;
368 }
369
370 static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond)
371 {
372     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request))
373         prev_reader = &(*prev_reader)->next;
374     reader->request = request;
375     reader->data = cond;
376     reader->next = *prev_reader;
377     *prev_reader = reader;
378 }
379
380 static void remove_reader(reader_list **prev_reader, reader_list *reader)
381 {
382     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request))
383         if(*prev_reader == reader)
384         {
385             *prev_reader = (*prev_reader)->next;
386             break;
387         }
388 }
389
390 static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
391 {
392     void *ret = 0;
393
394     /* If this request has not been written yet, write it. */
395     if(c->out.return_socket || _xcb_out_flush_to(c, request))
396     {
397         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
398         reader_list reader;
399
400         insert_reader(&c->in.readers, &reader, request, &cond);
401
402         while(!poll_for_reply(c, request, &ret, e))
403             if(!_xcb_conn_wait(c, &cond, 0, 0))
404                 break;
405
406         remove_reader(&c->in.readers, &reader);
407         pthread_cond_destroy(&cond);
408     }
409
410     _xcb_in_wake_up_next_reader(c);
411     return ret;
412 }
413
414 static uint64_t widen(xcb_connection_t *c, unsigned int request)
415 {
416     uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
417     if(widened_request > c->out.request)
418         widened_request -= UINT64_C(1) << 32;
419     return widened_request;
420 }
421
422 /* Public interface */
423
424 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
425 {
426     void *ret;
427     if(e)
428         *e = 0;
429     if(c->has_error)
430         return 0;
431
432     pthread_mutex_lock(&c->iolock);
433     ret = wait_for_reply(c, widen(c, request), e);
434     pthread_mutex_unlock(&c->iolock);
435     return ret;
436 }
437
438 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
439 {
440     pending_reply *pend;
441     pend = malloc(sizeof(*pend));
442     if(!pend)
443     {
444         _xcb_conn_shutdown(c);
445         return;
446     }
447
448     pend->first_request = seq;
449     pend->last_request = seq;
450     pend->workaround = 0;
451     pend->flags = XCB_REQUEST_DISCARD_REPLY;
452     pend->next = *prev_next;
453     *prev_next = pend;
454
455     if(!pend->next)
456         c->in.pending_replies_tail = &pend->next;
457 }
458
459 static void discard_reply(xcb_connection_t *c, uint64_t request)
460 {
461     void *reply;
462     pending_reply **prev_pend;
463
464     /* Free any replies or errors that we've already read. Stop if
465      * xcb_wait_for_reply would block or we've run out of replies. */
466     while(poll_for_reply(c, request, &reply, 0) && reply)
467         free(reply);
468
469     /* If we've proven there are no more responses coming, we're done. */
470     if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
471         return;
472
473     /* Walk the list of pending requests. Mark the first match for deletion. */
474     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
475     {
476         if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
477             break;
478
479         if((*prev_pend)->first_request == request)
480         {
481             /* Pending reply found. Mark for discard: */
482             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
483             return;
484         }
485     }
486
487     /* Pending reply not found (likely due to _unchecked request). Create one: */
488     insert_pending_discard(c, prev_pend, request);
489 }
490
491 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
492 {
493     if(c->has_error)
494         return;
495
496     /* If an error occurred when issuing the request, fail immediately. */
497     if(!sequence)
498         return;
499
500     pthread_mutex_lock(&c->iolock);
501     discard_reply(c, widen(c, sequence));
502     pthread_mutex_unlock(&c->iolock);
503 }
504
505 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
506 {
507     int ret;
508     if(c->has_error)
509     {
510         *reply = 0;
511         if(error)
512             *error = 0;
513         return 1; /* would not block */
514     }
515     assert(reply != 0);
516     pthread_mutex_lock(&c->iolock);
517     ret = poll_for_reply(c, widen(c, request), reply, error);
518     pthread_mutex_unlock(&c->iolock);
519     return ret;
520 }
521
522 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
523 {
524     xcb_generic_event_t *ret;
525     if(c->has_error)
526         return 0;
527     pthread_mutex_lock(&c->iolock);
528     /* get_event returns 0 on empty list. */
529     while(!(ret = get_event(c)))
530         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
531             break;
532
533     _xcb_in_wake_up_next_reader(c);
534     pthread_mutex_unlock(&c->iolock);
535     return ret;
536 }
537
538 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
539 {
540     xcb_generic_event_t *ret = 0;
541     if(!c->has_error)
542     {
543         pthread_mutex_lock(&c->iolock);
544         /* FIXME: follow X meets Z architecture changes. */
545         ret = get_event(c);
546         if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
547             ret = get_event(c);
548         pthread_mutex_unlock(&c->iolock);
549     }
550     return ret;
551 }
552
553 static xcb_generic_event_t *get_event_until(xcb_connection_t *c, uint64_t request)
554 {
555     if(c->in.events && XCB_SEQUENCE_COMPARE(c->in.events->sequence, <=, request))
556         return get_event(c);
557     return 0;
558 }
559
560 xcb_generic_event_t *xcb_wait_for_event_until(xcb_connection_t *c, unsigned int request)
561 {
562     pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
563     reader_list reader;
564     xcb_generic_event_t *ret;
565     if(c->has_error)
566         return 0;
567     pthread_mutex_lock(&c->iolock);
568
569     insert_reader(&c->in.event_readers, &reader, widen(c, request), &cond);
570
571     while(!(ret = get_event_until(c, reader.request)) && XCB_SEQUENCE_COMPARE(c->in.event_responses_completed, <, reader.request))
572         if(!_xcb_conn_wait(c, &cond, 0, 0))
573             break;
574
575     remove_reader(&c->in.event_readers, &reader);
576     pthread_cond_destroy(&cond);
577     _xcb_in_wake_up_next_reader(c);
578     pthread_mutex_unlock(&c->iolock);
579     return ret;
580 }
581
582 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
583 {
584     uint64_t request;
585     xcb_generic_error_t *ret = 0;
586     void *reply;
587     if(c->has_error)
588         return 0;
589     pthread_mutex_lock(&c->iolock);
590     request = widen(c, cookie.sequence);
591     if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected)
592        && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
593     {
594         _xcb_out_send_sync(c);
595         _xcb_out_flush_to(c, c->out.request);
596     }
597     reply = wait_for_reply(c, request, &ret);
598     assert(!reply);
599     pthread_mutex_unlock(&c->iolock);
600     return ret;
601 }
602
603 /* Private interface */
604
605 int _xcb_in_init(_xcb_in *in)
606 {
607     if(pthread_cond_init(&in->event_cond, 0))
608         return 0;
609     in->reading = 0;
610
611     in->queue_len = 0;
612
613     in->request_read = 0;
614     in->request_completed = 0;
615
616     in->replies = _xcb_map_new();
617     if(!in->replies)
618         return 0;
619
620     in->current_reply_tail = &in->current_reply;
621     in->events_tail = &in->events;
622     in->pending_replies_tail = &in->pending_replies;
623
624     return 1;
625 }
626
627 void _xcb_in_destroy(_xcb_in *in)
628 {
629     pthread_cond_destroy(&in->event_cond);
630     free_reply_list(in->current_reply);
631     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
632     while(in->events)
633     {
634         struct event_list *e = in->events;
635         in->events = e->next;
636         free(e->event);
637         free(e);
638     }
639     while(in->pending_replies)
640     {
641         pending_reply *pend = in->pending_replies;
642         in->pending_replies = pend->next;
643         free(pend);
644     }
645 }
646
647 void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
648 {
649     int pthreadret;
650     if(c->in.readers)
651         pthreadret = pthread_cond_signal(c->in.readers->data);
652     else if(c->in.event_readers)
653         pthreadret = pthread_cond_signal(c->in.event_readers->data);
654     else
655         pthreadret = pthread_cond_signal(&c->in.event_cond);
656     assert(pthreadret == 0);
657 }
658
659 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
660 {
661     pending_reply *pend = malloc(sizeof(pending_reply));
662     assert(workaround != WORKAROUND_NONE || flags != 0);
663     if(!pend)
664     {
665         _xcb_conn_shutdown(c);
666         return 0;
667     }
668     pend->first_request = pend->last_request = request;
669     pend->workaround = workaround;
670     pend->flags = flags;
671     pend->next = 0;
672     *c->in.pending_replies_tail = pend;
673     c->in.pending_replies_tail = &pend->next;
674     return 1;
675 }
676
677 void _xcb_in_replies_done(xcb_connection_t *c)
678 {
679     struct pending_reply *pend;
680     if (c->in.pending_replies_tail != &c->in.pending_replies)
681     {
682         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
683         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
684         {
685             pend->last_request = c->out.request;
686             pend->workaround = WORKAROUND_NONE;
687         }
688     }
689 }
690
691 int _xcb_in_read(xcb_connection_t *c)
692 {
693     int n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len,MSG_WAITALL);
694     if(n > 0)
695         c->in.queue_len += n;
696     while(read_packet(c))
697         /* empty */;
698 #ifndef _WIN32
699     if((n > 0) || (n < 0 && errno == EAGAIN))
700 #else
701     if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
702 #endif /* !_WIN32 */
703         return 1;
704     _xcb_conn_shutdown(c);
705     return 0;
706 }
707
708 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
709 {
710     int done = c->in.queue_len;
711     if(len < done)
712         done = len;
713
714     memcpy(buf, c->in.queue, done);
715     c->in.queue_len -= done;
716     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
717
718     if(len > done)
719     {
720         int ret = read_block(c->fd, (char *) buf + done, len - done);
721         if(ret <= 0)
722         {
723             _xcb_conn_shutdown(c);
724             return ret;
725         }
726     }
727
728     return len;
729 }