Fix a busy loop on BSD and Mac OS
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <errno.h>
34
35 #include "xcb.h"
36 #include "xcbext.h"
37 #include "xcbint.h"
38 #if USE_POLL
39 #include <poll.h>
40 #endif
41 #ifndef _WIN32
42 #include <sys/select.h>
43 #include <sys/socket.h>
44 #endif
45
46 #ifdef _WIN32
47 #include "xcb_windefs.h"
48 #endif /* _WIN32 */
49
50 #define XCB_ERROR 0
51 #define XCB_REPLY 1
52 #define XCB_XGE_EVENT 35
53
54 struct event_list {
55     xcb_generic_event_t *event;
56     struct event_list *next;
57 };
58
59 struct reply_list {
60     void *reply;
61     struct reply_list *next;
62 };
63
64 typedef struct pending_reply {
65     uint64_t first_request;
66     uint64_t last_request;
67     enum workarounds workaround;
68     int flags;
69     struct pending_reply *next;
70 } pending_reply;
71
72 typedef struct reader_list {
73     uint64_t request;
74     pthread_cond_t *data;
75     struct reader_list *next;
76 } reader_list;
77
78 static void remove_finished_readers(reader_list **prev_reader, uint64_t completed)
79 {
80     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed))
81     {
82         /* If you don't have what you're looking for now, you never
83          * will. Wake up and leave me alone. */
84         pthread_cond_signal((*prev_reader)->data);
85         *prev_reader = (*prev_reader)->next;
86     }
87 }
88
89 static int read_packet(xcb_connection_t *c)
90 {
91     xcb_generic_reply_t genrep;
92     int length = 32;
93     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
94     void *buf;
95     pending_reply *pend = 0;
96     struct event_list *event;
97
98     /* Wait for there to be enough data for us to read a whole packet */
99     if(c->in.queue_len < length)
100         return 0;
101
102     /* Get the response type, length, and sequence number. */
103     memcpy(&genrep, c->in.queue, sizeof(genrep));
104
105     /* Compute 32-bit sequence number of this packet. */
106     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
107     {
108         uint64_t lastread = c->in.request_read;
109         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
110         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
111             c->in.request_read += 0x10000;
112         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
113             c->in.request_expected = c->in.request_read;
114
115         if(c->in.request_read != lastread)
116         {
117             if(c->in.current_reply)
118             {
119                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
120                 c->in.current_reply = 0;
121                 c->in.current_reply_tail = &c->in.current_reply;
122             }
123             c->in.request_completed = c->in.request_read - 1;
124         }
125
126         while(c->in.pending_replies && 
127               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
128               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
129         {
130             pending_reply *oldpend = c->in.pending_replies;
131             c->in.pending_replies = oldpend->next;
132             if(!oldpend->next)
133                 c->in.pending_replies_tail = &c->in.pending_replies;
134             free(oldpend);
135         }
136
137         if(genrep.response_type == XCB_ERROR)
138             c->in.request_completed = c->in.request_read;
139
140         remove_finished_readers(&c->in.readers, c->in.request_completed);
141     }
142
143     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
144     {
145         pend = c->in.pending_replies;
146         if(pend &&
147            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
148              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
149               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
150             pend = 0;
151     }
152
153     /* For reply packets, check that the entire packet is available. */
154     if(genrep.response_type == XCB_REPLY)
155     {
156         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
157         {
158             uint32_t *p = (uint32_t *) c->in.queue;
159             genrep.length = p[2] * p[3] * 2;
160         }
161         length += genrep.length * 4;
162     }
163
164     /* XGE events may have sizes > 32 */
165     if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT)
166         eventlength = genrep.length * 4;
167
168     buf = malloc(length + eventlength +
169             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
170     if(!buf)
171     {
172         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
173         return 0;
174     }
175
176     if(_xcb_in_read_block(c, buf, length) <= 0)
177     {
178         free(buf);
179         return 0;
180     }
181
182     /* pull in XGE event data if available, append after event struct */
183     if (eventlength)
184     {
185         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
186         {
187             free(buf);
188             return 0;
189         }
190     }
191
192     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
193     {
194         free(buf);
195         return 1;
196     }
197
198     if(genrep.response_type != XCB_REPLY)
199         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
200
201     /* reply, or checked error */
202     if( genrep.response_type == XCB_REPLY ||
203        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
204     {
205         struct reply_list *cur = malloc(sizeof(struct reply_list));
206         if(!cur)
207         {
208             _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
209             free(buf);
210             return 0;
211         }
212         cur->reply = buf;
213         cur->next = 0;
214         *c->in.current_reply_tail = cur;
215         c->in.current_reply_tail = &cur->next;
216         if(c->in.readers && c->in.readers->request == c->in.request_read)
217             pthread_cond_signal(c->in.readers->data);
218         return 1;
219     }
220
221     /* event, or unchecked error */
222     event = malloc(sizeof(struct event_list));
223     if(!event)
224     {
225         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
226         free(buf);
227         return 0;
228     }
229     event->event = buf;
230     event->next = 0;
231     *c->in.events_tail = event;
232     c->in.events_tail = &event->next;
233     pthread_cond_signal(&c->in.event_cond);
234     return 1; /* I have something for you... */
235 }
236
237 static xcb_generic_event_t *get_event(xcb_connection_t *c)
238 {
239     struct event_list *cur = c->in.events;
240     xcb_generic_event_t *ret;
241     if(!c->in.events)
242         return 0;
243     ret = cur->event;
244     c->in.events = cur->next;
245     if(!cur->next)
246         c->in.events_tail = &c->in.events;
247     free(cur);
248     return ret;
249 }
250
251 static void free_reply_list(struct reply_list *head)
252 {
253     while(head)
254     {
255         struct reply_list *cur = head;
256         head = cur->next;
257         free(cur->reply);
258         free(cur);
259     }
260 }
261
262 static int read_block(const int fd, void *buf, const ssize_t len)
263 {
264     int done = 0;
265     while(done < len)
266     {
267         int ret = recv(fd, ((char *) buf) + done, len - done, 0);
268         if(ret > 0)
269             done += ret;
270 #ifndef _WIN32
271         if(ret < 0 && errno == EAGAIN)
272 #else
273         if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
274 #endif /* !_Win32 */
275         {
276 #if USE_POLL
277             struct pollfd pfd;
278             pfd.fd = fd;
279             pfd.events = POLLIN;
280             pfd.revents = 0;
281             do {
282                 ret = poll(&pfd, 1, -1);
283             } while (ret == -1 && errno == EINTR);
284 #else
285             fd_set fds;
286             FD_ZERO(&fds);
287             FD_SET(fd, &fds);
288
289             /* Initializing errno here makes sure that for Win32 this loop will execute only once */
290             errno = 0;  
291             do {
292                 ret = select(fd + 1, &fds, 0, 0, 0);
293             } while (ret == -1 && errno == EINTR);
294 #endif /* USE_POLL */
295         }
296         if(ret <= 0)
297             return ret;
298     }
299     return len;
300 }
301
302 static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
303 {
304     struct reply_list *head;
305
306     /* If an error occurred when issuing the request, fail immediately. */
307     if(!request)
308         head = 0;
309     /* We've read requests past the one we want, so if it has replies we have
310      * them all and they're in the replies map. */
311     else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
312     {
313         head = _xcb_map_remove(c->in.replies, request);
314         if(head && head->next)
315             _xcb_map_put(c->in.replies, request, head->next);
316     }
317     /* We're currently processing the responses to the request we want, and we
318      * have a reply ready to return. So just return it without blocking. */
319     else if(request == c->in.request_read && c->in.current_reply)
320     {
321         head = c->in.current_reply;
322         c->in.current_reply = head->next;
323         if(!head->next)
324             c->in.current_reply_tail = &c->in.current_reply;
325     }
326     /* We know this request can't have any more replies, and we've already
327      * established it doesn't have a reply now. Don't bother blocking. */
328     else if(request == c->in.request_completed)
329         head = 0;
330     /* We may have more replies on the way for this request: block until we're
331      * sure. */
332     else
333         return 0;
334
335     if(error)
336         *error = 0;
337     *reply = 0;
338
339     if(head)
340     {
341         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
342         {
343             if(error)
344                 *error = head->reply;
345             else
346                 free(head->reply);
347         }
348         else
349             *reply = head->reply;
350
351         free(head);
352     }
353
354     return 1;
355 }
356
357 static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond)
358 {
359     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request))
360         prev_reader = &(*prev_reader)->next;
361     reader->request = request;
362     reader->data = cond;
363     reader->next = *prev_reader;
364     *prev_reader = reader;
365 }
366
367 static void remove_reader(reader_list **prev_reader, reader_list *reader)
368 {
369     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request))
370         if(*prev_reader == reader)
371         {
372             *prev_reader = (*prev_reader)->next;
373             break;
374         }
375 }
376
377 static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
378 {
379     void *ret = 0;
380
381     /* If this request has not been written yet, write it. */
382     if(c->out.return_socket || _xcb_out_flush_to(c, request))
383     {
384         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
385         reader_list reader;
386
387         insert_reader(&c->in.readers, &reader, request, &cond);
388
389         while(!poll_for_reply(c, request, &ret, e))
390             if(!_xcb_conn_wait(c, &cond, 0, 0))
391                 break;
392
393         remove_reader(&c->in.readers, &reader);
394         pthread_cond_destroy(&cond);
395     }
396
397     _xcb_in_wake_up_next_reader(c);
398     return ret;
399 }
400
401 static uint64_t widen(xcb_connection_t *c, unsigned int request)
402 {
403     uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
404     if(widened_request > c->out.request)
405         widened_request -= UINT64_C(1) << 32;
406     return widened_request;
407 }
408
409 /* Public interface */
410
411 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
412 {
413     void *ret;
414     if(e)
415         *e = 0;
416     if(c->has_error)
417         return 0;
418
419     pthread_mutex_lock(&c->iolock);
420     ret = wait_for_reply(c, widen(c, request), e);
421     pthread_mutex_unlock(&c->iolock);
422     return ret;
423 }
424
425 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
426 {
427     pending_reply *pend;
428     pend = malloc(sizeof(*pend));
429     if(!pend)
430     {
431         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
432         return;
433     }
434
435     pend->first_request = seq;
436     pend->last_request = seq;
437     pend->workaround = 0;
438     pend->flags = XCB_REQUEST_DISCARD_REPLY;
439     pend->next = *prev_next;
440     *prev_next = pend;
441
442     if(!pend->next)
443         c->in.pending_replies_tail = &pend->next;
444 }
445
446 static void discard_reply(xcb_connection_t *c, uint64_t request)
447 {
448     void *reply;
449     pending_reply **prev_pend;
450
451     /* Free any replies or errors that we've already read. Stop if
452      * xcb_wait_for_reply would block or we've run out of replies. */
453     while(poll_for_reply(c, request, &reply, 0) && reply)
454         free(reply);
455
456     /* If we've proven there are no more responses coming, we're done. */
457     if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
458         return;
459
460     /* Walk the list of pending requests. Mark the first match for deletion. */
461     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
462     {
463         if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
464             break;
465
466         if((*prev_pend)->first_request == request)
467         {
468             /* Pending reply found. Mark for discard: */
469             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
470             return;
471         }
472     }
473
474     /* Pending reply not found (likely due to _unchecked request). Create one: */
475     insert_pending_discard(c, prev_pend, request);
476 }
477
478 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
479 {
480     if(c->has_error)
481         return;
482
483     /* If an error occurred when issuing the request, fail immediately. */
484     if(!sequence)
485         return;
486
487     pthread_mutex_lock(&c->iolock);
488     discard_reply(c, widen(c, sequence));
489     pthread_mutex_unlock(&c->iolock);
490 }
491
492 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
493 {
494     int ret;
495     if(c->has_error)
496     {
497         *reply = 0;
498         if(error)
499             *error = 0;
500         return 1; /* would not block */
501     }
502     assert(reply != 0);
503     pthread_mutex_lock(&c->iolock);
504     ret = poll_for_reply(c, widen(c, request), reply, error);
505     pthread_mutex_unlock(&c->iolock);
506     return ret;
507 }
508
509 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
510 {
511     xcb_generic_event_t *ret;
512     if(c->has_error)
513         return 0;
514     pthread_mutex_lock(&c->iolock);
515     /* get_event returns 0 on empty list. */
516     while(!(ret = get_event(c)))
517         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
518             break;
519
520     _xcb_in_wake_up_next_reader(c);
521     pthread_mutex_unlock(&c->iolock);
522     return ret;
523 }
524
525 static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued)
526 {
527     xcb_generic_event_t *ret = 0;
528     if(!c->has_error)
529     {
530         pthread_mutex_lock(&c->iolock);
531         /* FIXME: follow X meets Z architecture changes. */
532         ret = get_event(c);
533         if(!ret && !queued && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
534             ret = get_event(c);
535         pthread_mutex_unlock(&c->iolock);
536     }
537     return ret;
538 }
539
540 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
541 {
542     return poll_for_next_event(c, 0);
543 }
544
545 xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c)
546 {
547     return poll_for_next_event(c, 1);
548 }
549
550 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
551 {
552     uint64_t request;
553     xcb_generic_error_t *ret = 0;
554     void *reply;
555     if(c->has_error)
556         return 0;
557     pthread_mutex_lock(&c->iolock);
558     request = widen(c, cookie.sequence);
559     if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected)
560        && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
561     {
562         _xcb_out_send_sync(c);
563         _xcb_out_flush_to(c, c->out.request);
564     }
565     reply = wait_for_reply(c, request, &ret);
566     assert(!reply);
567     pthread_mutex_unlock(&c->iolock);
568     return ret;
569 }
570
571 /* Private interface */
572
573 int _xcb_in_init(_xcb_in *in)
574 {
575     if(pthread_cond_init(&in->event_cond, 0))
576         return 0;
577     in->reading = 0;
578
579     in->queue_len = 0;
580
581     in->request_read = 0;
582     in->request_completed = 0;
583
584     in->replies = _xcb_map_new();
585     if(!in->replies)
586         return 0;
587
588     in->current_reply_tail = &in->current_reply;
589     in->events_tail = &in->events;
590     in->pending_replies_tail = &in->pending_replies;
591
592     return 1;
593 }
594
595 void _xcb_in_destroy(_xcb_in *in)
596 {
597     pthread_cond_destroy(&in->event_cond);
598     free_reply_list(in->current_reply);
599     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
600     while(in->events)
601     {
602         struct event_list *e = in->events;
603         in->events = e->next;
604         free(e->event);
605         free(e);
606     }
607     while(in->pending_replies)
608     {
609         pending_reply *pend = in->pending_replies;
610         in->pending_replies = pend->next;
611         free(pend);
612     }
613 }
614
615 void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
616 {
617     int pthreadret;
618     if(c->in.readers)
619         pthreadret = pthread_cond_signal(c->in.readers->data);
620     else
621         pthreadret = pthread_cond_signal(&c->in.event_cond);
622     assert(pthreadret == 0);
623 }
624
625 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
626 {
627     pending_reply *pend = malloc(sizeof(pending_reply));
628     assert(workaround != WORKAROUND_NONE || flags != 0);
629     if(!pend)
630     {
631         _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
632         return 0;
633     }
634     pend->first_request = pend->last_request = request;
635     pend->workaround = workaround;
636     pend->flags = flags;
637     pend->next = 0;
638     *c->in.pending_replies_tail = pend;
639     c->in.pending_replies_tail = &pend->next;
640     return 1;
641 }
642
643 void _xcb_in_replies_done(xcb_connection_t *c)
644 {
645     struct pending_reply *pend;
646     if (c->in.pending_replies_tail != &c->in.pending_replies)
647     {
648         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
649         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
650         {
651             pend->last_request = c->out.request;
652             pend->workaround = WORKAROUND_NONE;
653         }
654     }
655 }
656
657 int _xcb_in_read(xcb_connection_t *c)
658 {
659     int n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len, 0);
660     if(n > 0)
661         c->in.queue_len += n;
662     while(read_packet(c))
663         /* empty */;
664 #ifndef _WIN32
665     if((n > 0) || (n < 0 && errno == EAGAIN))
666 #else
667     if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
668 #endif /* !_WIN32 */
669         return 1;
670     _xcb_conn_shutdown(c, XCB_CONN_ERROR);
671     return 0;
672 }
673
674 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
675 {
676     int done = c->in.queue_len;
677     if(len < done)
678         done = len;
679
680     memcpy(buf, c->in.queue, done);
681     c->in.queue_len -= done;
682     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
683
684     if(len > done)
685     {
686         int ret = read_block(c->fd, (char *) buf + done, len - done);
687         if(ret <= 0)
688         {
689             _xcb_conn_shutdown(c, XCB_CONN_ERROR);
690             return ret;
691         }
692     }
693
694     return len;
695 }