Replaced read() in read_block and _xcb_in_read() with recv for all
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <errno.h>
34
35 #include "xcb.h"
36 #include "xcbext.h"
37 #include "xcbint.h"
38 #if USE_POLL
39 #include <poll.h>
40 #elif !defined _WIN32
41 #include <sys/select.h>
42 #include <sys/socket.h>
43 #endif
44
45 #ifdef _WIN32
46 #include "xcb_windefs.h"
47 #endif /* _WIN32 */
48
49 #define XCB_ERROR 0
50 #define XCB_REPLY 1
51 #define XCB_XGE_EVENT 35
52
53 /* required for compiling for Win32 using MinGW */
54 #ifndef MSG_WAITALL
55 #define MSG_WAITALL 0
56 #endif
57
58 struct event_list {
59     xcb_generic_event_t *event;
60     struct event_list *next;
61 };
62
63 struct reply_list {
64     void *reply;
65     struct reply_list *next;
66 };
67
68 typedef struct pending_reply {
69     uint64_t first_request;
70     uint64_t last_request;
71     enum workarounds workaround;
72     int flags;
73     struct pending_reply *next;
74 } pending_reply;
75
76 typedef struct reader_list {
77     unsigned int request;
78     pthread_cond_t *data;
79     struct reader_list *next;
80 } reader_list;
81
82 static void wake_up_next_reader(xcb_connection_t *c)
83 {
84     int pthreadret;
85     if(c->in.readers)
86         pthreadret = pthread_cond_signal(c->in.readers->data);
87     else
88         pthreadret = pthread_cond_signal(&c->in.event_cond);
89     assert(pthreadret == 0);
90 }
91
92 static int read_packet(xcb_connection_t *c)
93 {
94     xcb_generic_reply_t genrep;
95     int length = 32;
96     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
97     void *buf;
98     pending_reply *pend = 0;
99     struct event_list *event;
100
101     /* Wait for there to be enough data for us to read a whole packet */
102     if(c->in.queue_len < length)
103         return 0;
104
105     /* Get the response type, length, and sequence number. */
106     memcpy(&genrep, c->in.queue, sizeof(genrep));
107
108     /* Compute 32-bit sequence number of this packet. */
109     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
110     {
111         uint64_t lastread = c->in.request_read;
112         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
113         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
114             c->in.request_read += 0x10000;
115         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
116             c->in.request_expected = c->in.request_read;
117
118         if(c->in.request_read != lastread)
119         {
120             if(c->in.current_reply)
121             {
122                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
123                 c->in.current_reply = 0;
124                 c->in.current_reply_tail = &c->in.current_reply;
125             }
126             c->in.request_completed = c->in.request_read - 1;
127         }
128
129         while(c->in.pending_replies && 
130               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
131               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
132         {
133             pending_reply *oldpend = c->in.pending_replies;
134             c->in.pending_replies = oldpend->next;
135             if(!oldpend->next)
136                 c->in.pending_replies_tail = &c->in.pending_replies;
137             free(oldpend);
138         }
139
140         if(genrep.response_type == XCB_ERROR)
141             c->in.request_completed = c->in.request_read;
142     }
143
144     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
145     {
146         pend = c->in.pending_replies;
147         if(pend &&
148            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
149              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
150               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
151             pend = 0;
152     }
153
154     /* For reply packets, check that the entire packet is available. */
155     if(genrep.response_type == XCB_REPLY)
156     {
157         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
158         {
159             uint32_t *p = (uint32_t *) c->in.queue;
160             genrep.length = p[2] * p[3] * 2;
161         }
162         length += genrep.length * 4;
163     }
164
165     /* XGE events may have sizes > 32 */
166     if (genrep.response_type == XCB_XGE_EVENT)
167     {
168         eventlength = ((xcb_ge_event_t*)&genrep)->length * 4;
169     }
170
171     buf = malloc(length + eventlength +
172             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
173     if(!buf)
174     {
175         _xcb_conn_shutdown(c);
176         return 0;
177     }
178
179     if(_xcb_in_read_block(c, buf, length) <= 0)
180     {
181         free(buf);
182         return 0;
183     }
184
185     /* pull in XGE event data if available, append after event struct */
186     if (eventlength)
187     {
188         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
189         {
190             free(buf);
191             return 0;
192         }
193     }
194
195     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
196     {
197         free(buf);
198         return 1;
199     }
200
201     if(genrep.response_type != XCB_REPLY)
202         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
203
204     /* reply, or checked error */
205     if( genrep.response_type == XCB_REPLY ||
206        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
207     {
208         reader_list *reader;
209         struct reply_list *cur = malloc(sizeof(struct reply_list));
210         if(!cur)
211         {
212             _xcb_conn_shutdown(c);
213             free(buf);
214             return 0;
215         }
216         cur->reply = buf;
217         cur->next = 0;
218         *c->in.current_reply_tail = cur;
219         c->in.current_reply_tail = &cur->next;
220         for(reader = c->in.readers; 
221             reader && 
222             XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
223             reader = reader->next)
224         {
225             if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
226             {
227                 pthread_cond_signal(reader->data);
228                 break;
229             }
230         }
231         return 1;
232     }
233
234     /* event, or unchecked error */
235     event = malloc(sizeof(struct event_list));
236     if(!event)
237     {
238         _xcb_conn_shutdown(c);
239         free(buf);
240         return 0;
241     }
242     event->event = buf;
243     event->next = 0;
244     *c->in.events_tail = event;
245     c->in.events_tail = &event->next;
246     pthread_cond_signal(&c->in.event_cond);
247     return 1; /* I have something for you... */
248 }
249
250 static xcb_generic_event_t *get_event(xcb_connection_t *c)
251 {
252     struct event_list *cur = c->in.events;
253     xcb_generic_event_t *ret;
254     if(!c->in.events)
255         return 0;
256     ret = cur->event;
257     c->in.events = cur->next;
258     if(!cur->next)
259         c->in.events_tail = &c->in.events;
260     free(cur);
261     return ret;
262 }
263
264 static void free_reply_list(struct reply_list *head)
265 {
266     while(head)
267     {
268         struct reply_list *cur = head;
269         head = cur->next;
270         free(cur->reply);
271         free(cur);
272     }
273 }
274
275 static int read_block(const int fd, void *buf, const ssize_t len)
276 {
277     int done = 0;
278     while(done < len)
279     {
280         int ret = recv(fd, ((char *) buf) + done, len - done,MSG_WAITALL);
281         if(ret > 0)
282             done += ret;
283 #ifndef _WIN32
284         if(ret < 0 && errno == EAGAIN)
285 #else
286         if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
287 #endif /* !_Win32 */
288         {
289 #if USE_POLL
290             struct pollfd pfd;
291             pfd.fd = fd;
292             pfd.events = POLLIN;
293             pfd.revents = 0;
294             do {
295                 ret = poll(&pfd, 1, -1);
296             } while (ret == -1 && errno == EINTR);
297 #else
298             fd_set fds;
299             FD_ZERO(&fds);
300             FD_SET(fd, &fds);
301
302             /* Initializing errno here makes sure that for Win32 this loop will execute only once */
303             errno = 0;  
304             do {
305                 ret = select(fd + 1, &fds, 0, 0, 0);
306             } while (ret == -1 && errno == EINTR);
307 #endif /* USE_POLL */
308         }
309         if(ret <= 0)
310             return ret;
311     }
312     return len;
313 }
314
315 static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
316 {
317     struct reply_list *head;
318
319     /* If an error occurred when issuing the request, fail immediately. */
320     if(!request)
321         head = 0;
322     /* We've read requests past the one we want, so if it has replies we have
323      * them all and they're in the replies map. */
324     else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
325     {
326         head = _xcb_map_remove(c->in.replies, request);
327         if(head && head->next)
328             _xcb_map_put(c->in.replies, request, head->next);
329     }
330     /* We're currently processing the responses to the request we want, and we
331      * have a reply ready to return. So just return it without blocking. */
332     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
333     {
334         head = c->in.current_reply;
335         c->in.current_reply = head->next;
336         if(!head->next)
337             c->in.current_reply_tail = &c->in.current_reply;
338     }
339     /* We know this request can't have any more replies, and we've already
340      * established it doesn't have a reply now. Don't bother blocking. */
341     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
342         head = 0;
343     /* We may have more replies on the way for this request: block until we're
344      * sure. */
345     else
346         return 0;
347
348     if(error)
349         *error = 0;
350     *reply = 0;
351
352     if(head)
353     {
354         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
355         {
356             if(error)
357                 *error = head->reply;
358             else
359                 free(head->reply);
360         }
361         else
362             *reply = head->reply;
363
364         free(head);
365     }
366
367     return 1;
368 }
369
370 /* Public interface */
371
372 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
373 {
374     uint64_t widened_request;
375     void *ret = 0;
376     if(e)
377         *e = 0;
378     if(c->has_error)
379         return 0;
380
381     pthread_mutex_lock(&c->iolock);
382
383     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
384     if(widened_request > c->out.request)
385         widened_request -= UINT64_C(1) << 32;
386
387     /* If this request has not been written yet, write it. */
388     if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
389     {
390         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
391         reader_list reader;
392         reader_list **prev_reader;
393
394         for(prev_reader = &c->in.readers; 
395             *prev_reader && 
396             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
397             prev_reader = &(*prev_reader)->next)
398         {
399             /* empty */;
400         }
401         reader.request = request;
402         reader.data = &cond;
403         reader.next = *prev_reader;
404         *prev_reader = &reader;
405
406         while(!poll_for_reply(c, request, &ret, e))
407             if(!_xcb_conn_wait(c, &cond, 0, 0))
408                 break;
409
410         for(prev_reader = &c->in.readers;
411             *prev_reader && 
412             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
413             prev_reader = &(*prev_reader)->next)
414         {
415             if(*prev_reader == &reader)
416             {
417                 *prev_reader = (*prev_reader)->next;
418                 break;
419             }
420         }
421         pthread_cond_destroy(&cond);
422     }
423
424     wake_up_next_reader(c);
425     pthread_mutex_unlock(&c->iolock);
426     return ret;
427 }
428
429 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
430 {
431     pending_reply *pend;
432     pend = malloc(sizeof(*pend));
433     if(!pend)
434     {
435         _xcb_conn_shutdown(c);
436         return;
437     }
438
439     pend->first_request = seq;
440     pend->last_request = seq;
441     pend->workaround = 0;
442     pend->flags = XCB_REQUEST_DISCARD_REPLY;
443     pend->next = *prev_next;
444     *prev_next = pend;
445
446     if(!pend->next)
447         c->in.pending_replies_tail = &pend->next;
448 }
449
450 static void discard_reply(xcb_connection_t *c, unsigned int request)
451 {
452     pending_reply *pend = 0;
453     pending_reply **prev_pend;
454     uint64_t widened_request;
455
456     /* We've read requests past the one we want, so if it has replies we have
457      * them all and they're in the replies map. */
458     if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
459     {
460         struct reply_list *head;
461         head = _xcb_map_remove(c->in.replies, request);
462         while (head)
463         {
464             struct reply_list *next = head->next;
465             free(head->reply);
466             free(head);
467             head = next;
468         }
469         return;
470     }
471
472     /* We're currently processing the responses to the request we want, and we
473      * have a reply ready to return. Free it, and mark the pend to free any further
474      * replies. */
475     if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
476     {
477         struct reply_list *head;
478         head = c->in.current_reply;
479         c->in.current_reply = NULL;
480         c->in.current_reply_tail = &c->in.current_reply;
481         while (head)
482         {
483             struct reply_list *next = head->next;
484             free(head->reply);
485             free(head);
486             head = next;
487         }
488
489         pend = c->in.pending_replies;
490         if(pend &&
491             !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
492              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
493               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
494             pend = 0;
495         if(pend)
496             pend->flags |= XCB_REQUEST_DISCARD_REPLY;
497         else
498             insert_pending_discard(c, &c->in.pending_replies, c->in.request_read);
499
500         return;
501     }
502
503     /* Walk the list of pending requests. Mark the first match for deletion. */
504     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
505     {
506         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, >, request))
507             break;
508
509         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, ==, request))
510         {
511             /* Pending reply found. Mark for discard: */
512             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
513             return;
514         }
515     }
516
517     /* Pending reply not found (likely due to _unchecked request). Create one: */
518     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
519     if(widened_request > c->out.request)
520         widened_request -= UINT64_C(1) << 32;
521
522     insert_pending_discard(c, prev_pend, widened_request);
523 }
524
525 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
526 {
527     if(c->has_error)
528         return;
529
530     /* If an error occurred when issuing the request, fail immediately. */
531     if(!sequence)
532         return;
533
534     pthread_mutex_lock(&c->iolock);
535     discard_reply(c, sequence);
536     pthread_mutex_unlock(&c->iolock);
537 }
538
539 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
540 {
541     int ret;
542     if(c->has_error)
543     {
544         *reply = 0;
545         if(error)
546             *error = 0;
547         return 1; /* would not block */
548     }
549     assert(reply != 0);
550     pthread_mutex_lock(&c->iolock);
551     ret = poll_for_reply(c, request, reply, error);
552     pthread_mutex_unlock(&c->iolock);
553     return ret;
554 }
555
556 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
557 {
558     xcb_generic_event_t *ret;
559     if(c->has_error)
560         return 0;
561     pthread_mutex_lock(&c->iolock);
562     /* get_event returns 0 on empty list. */
563     while(!(ret = get_event(c)))
564         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
565             break;
566
567     wake_up_next_reader(c);
568     pthread_mutex_unlock(&c->iolock);
569     return ret;
570 }
571
572 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
573 {
574     xcb_generic_event_t *ret = 0;
575     if(!c->has_error)
576     {
577         pthread_mutex_lock(&c->iolock);
578         /* FIXME: follow X meets Z architecture changes. */
579         ret = get_event(c);
580         if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
581             ret = get_event(c);
582         pthread_mutex_unlock(&c->iolock);
583     }
584     return ret;
585 }
586
587 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
588 {
589     /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
590      * that would require factoring the locking out of xcb_get_input_focus,
591      * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
592     xcb_generic_error_t *ret;
593     void *reply;
594     if(c->has_error)
595         return 0;
596     if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_expected)
597        && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
598     {
599         free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
600         assert(!ret);
601     }
602     reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
603     assert(!reply);
604     return ret;
605 }
606
607 /* Private interface */
608
609 int _xcb_in_init(_xcb_in *in)
610 {
611     if(pthread_cond_init(&in->event_cond, 0))
612         return 0;
613     in->reading = 0;
614
615     in->queue_len = 0;
616
617     in->request_read = 0;
618     in->request_completed = 0;
619
620     in->replies = _xcb_map_new();
621     if(!in->replies)
622         return 0;
623
624     in->current_reply_tail = &in->current_reply;
625     in->events_tail = &in->events;
626     in->pending_replies_tail = &in->pending_replies;
627
628     return 1;
629 }
630
631 void _xcb_in_destroy(_xcb_in *in)
632 {
633     pthread_cond_destroy(&in->event_cond);
634     free_reply_list(in->current_reply);
635     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
636     while(in->events)
637     {
638         struct event_list *e = in->events;
639         in->events = e->next;
640         free(e->event);
641         free(e);
642     }
643     while(in->pending_replies)
644     {
645         pending_reply *pend = in->pending_replies;
646         in->pending_replies = pend->next;
647         free(pend);
648     }
649 }
650
651 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
652 {
653     pending_reply *pend = malloc(sizeof(pending_reply));
654     assert(workaround != WORKAROUND_NONE || flags != 0);
655     if(!pend)
656     {
657         _xcb_conn_shutdown(c);
658         return 0;
659     }
660     pend->first_request = pend->last_request = request;
661     pend->workaround = workaround;
662     pend->flags = flags;
663     pend->next = 0;
664     *c->in.pending_replies_tail = pend;
665     c->in.pending_replies_tail = &pend->next;
666     return 1;
667 }
668
669 void _xcb_in_replies_done(xcb_connection_t *c)
670 {
671     struct pending_reply *pend;
672     if (c->in.pending_replies_tail != &c->in.pending_replies)
673     {
674         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
675         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
676         {
677             pend->last_request = c->out.request;
678             pend->workaround = WORKAROUND_NONE;
679         }
680     }
681 }
682
683 int _xcb_in_read(xcb_connection_t *c)
684 {
685     int n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len,MSG_WAITALL);
686     if(n > 0)
687         c->in.queue_len += n;
688     while(read_packet(c))
689         /* empty */;
690 #ifndef _WIN32
691     if((n > 0) || (n < 0 && errno == EAGAIN))
692 #else
693     if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
694 #endif /* !_WIN32 */
695         return 1;
696     _xcb_conn_shutdown(c);
697     return 0;
698 }
699
700 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
701 {
702     int done = c->in.queue_len;
703     if(len < done)
704         done = len;
705
706     memcpy(buf, c->in.queue, done);
707     c->in.queue_len -= done;
708     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
709
710     if(len > done)
711     {
712         int ret = read_block(c->fd, (char *) buf + done, len - done);
713         if(ret <= 0)
714         {
715             _xcb_conn_shutdown(c);
716             return ret;
717         }
718     }
719
720     return len;
721 }