Revert "Introduce xcb_wait_for_event_until, for consuming responses in wire-order."
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <errno.h>
34
35 #include "xcb.h"
36 #include "xcbext.h"
37 #include "xcbint.h"
38 #if USE_POLL
39 #include <poll.h>
40 #endif
41 #ifndef _WIN32
42 #include <sys/select.h>
43 #include <sys/socket.h>
44 #endif
45
46 #ifdef _WIN32
47 #include "xcb_windefs.h"
48 #endif /* _WIN32 */
49
50 #define XCB_ERROR 0
51 #define XCB_REPLY 1
52 #define XCB_XGE_EVENT 35
53
54 /* required for compiling for Win32 using MinGW */
55 #ifndef MSG_WAITALL
56 #define MSG_WAITALL 0
57 #endif
58
59 struct event_list {
60     xcb_generic_event_t *event;
61     struct event_list *next;
62 };
63
64 struct reply_list {
65     void *reply;
66     struct reply_list *next;
67 };
68
69 typedef struct pending_reply {
70     uint64_t first_request;
71     uint64_t last_request;
72     enum workarounds workaround;
73     int flags;
74     struct pending_reply *next;
75 } pending_reply;
76
77 typedef struct reader_list {
78     uint64_t request;
79     pthread_cond_t *data;
80     struct reader_list *next;
81 } reader_list;
82
83 static void remove_finished_readers(reader_list **prev_reader, uint64_t completed)
84 {
85     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed))
86     {
87         /* If you don't have what you're looking for now, you never
88          * will. Wake up and leave me alone. */
89         pthread_cond_signal((*prev_reader)->data);
90         *prev_reader = (*prev_reader)->next;
91     }
92 }
93
94 static int read_packet(xcb_connection_t *c)
95 {
96     xcb_generic_reply_t genrep;
97     int length = 32;
98     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
99     void *buf;
100     pending_reply *pend = 0;
101     struct event_list *event;
102
103     /* Wait for there to be enough data for us to read a whole packet */
104     if(c->in.queue_len < length)
105         return 0;
106
107     /* Get the response type, length, and sequence number. */
108     memcpy(&genrep, c->in.queue, sizeof(genrep));
109
110     /* Compute 32-bit sequence number of this packet. */
111     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
112     {
113         uint64_t lastread = c->in.request_read;
114         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
115         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
116             c->in.request_read += 0x10000;
117         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
118             c->in.request_expected = c->in.request_read;
119
120         if(c->in.request_read != lastread)
121         {
122             if(c->in.current_reply)
123             {
124                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
125                 c->in.current_reply = 0;
126                 c->in.current_reply_tail = &c->in.current_reply;
127             }
128             c->in.request_completed = c->in.request_read - 1;
129         }
130
131         while(c->in.pending_replies && 
132               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
133               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
134         {
135             pending_reply *oldpend = c->in.pending_replies;
136             c->in.pending_replies = oldpend->next;
137             if(!oldpend->next)
138                 c->in.pending_replies_tail = &c->in.pending_replies;
139             free(oldpend);
140         }
141
142         if(genrep.response_type == XCB_ERROR)
143             c->in.request_completed = c->in.request_read;
144
145         remove_finished_readers(&c->in.readers, c->in.request_completed);
146     }
147
148     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
149     {
150         pend = c->in.pending_replies;
151         if(pend &&
152            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
153              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
154               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
155             pend = 0;
156     }
157
158     /* For reply packets, check that the entire packet is available. */
159     if(genrep.response_type == XCB_REPLY)
160     {
161         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
162         {
163             uint32_t *p = (uint32_t *) c->in.queue;
164             genrep.length = p[2] * p[3] * 2;
165         }
166         length += genrep.length * 4;
167     }
168
169     /* XGE events may have sizes > 32 */
170     if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT)
171         eventlength = genrep.length * 4;
172
173     buf = malloc(length + eventlength +
174             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
175     if(!buf)
176     {
177         _xcb_conn_shutdown(c);
178         return 0;
179     }
180
181     if(_xcb_in_read_block(c, buf, length) <= 0)
182     {
183         free(buf);
184         return 0;
185     }
186
187     /* pull in XGE event data if available, append after event struct */
188     if (eventlength)
189     {
190         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
191         {
192             free(buf);
193             return 0;
194         }
195     }
196
197     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
198     {
199         free(buf);
200         return 1;
201     }
202
203     if(genrep.response_type != XCB_REPLY)
204         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
205
206     /* reply, or checked error */
207     if( genrep.response_type == XCB_REPLY ||
208        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
209     {
210         struct reply_list *cur = malloc(sizeof(struct reply_list));
211         if(!cur)
212         {
213             _xcb_conn_shutdown(c);
214             free(buf);
215             return 0;
216         }
217         cur->reply = buf;
218         cur->next = 0;
219         *c->in.current_reply_tail = cur;
220         c->in.current_reply_tail = &cur->next;
221         if(c->in.readers && c->in.readers->request == c->in.request_read)
222             pthread_cond_signal(c->in.readers->data);
223         return 1;
224     }
225
226     /* event, or unchecked error */
227     event = malloc(sizeof(struct event_list));
228     if(!event)
229     {
230         _xcb_conn_shutdown(c);
231         free(buf);
232         return 0;
233     }
234     event->event = buf;
235     event->next = 0;
236     *c->in.events_tail = event;
237     c->in.events_tail = &event->next;
238     pthread_cond_signal(&c->in.event_cond);
239     return 1; /* I have something for you... */
240 }
241
242 static xcb_generic_event_t *get_event(xcb_connection_t *c)
243 {
244     struct event_list *cur = c->in.events;
245     xcb_generic_event_t *ret;
246     if(!c->in.events)
247         return 0;
248     ret = cur->event;
249     c->in.events = cur->next;
250     if(!cur->next)
251         c->in.events_tail = &c->in.events;
252     free(cur);
253     return ret;
254 }
255
256 static void free_reply_list(struct reply_list *head)
257 {
258     while(head)
259     {
260         struct reply_list *cur = head;
261         head = cur->next;
262         free(cur->reply);
263         free(cur);
264     }
265 }
266
267 static int read_block(const int fd, void *buf, const ssize_t len)
268 {
269     int done = 0;
270     while(done < len)
271     {
272         int ret = recv(fd, ((char *) buf) + done, len - done,MSG_WAITALL);
273         if(ret > 0)
274             done += ret;
275 #ifndef _WIN32
276         if(ret < 0 && errno == EAGAIN)
277 #else
278         if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
279 #endif /* !_Win32 */
280         {
281 #if USE_POLL
282             struct pollfd pfd;
283             pfd.fd = fd;
284             pfd.events = POLLIN;
285             pfd.revents = 0;
286             do {
287                 ret = poll(&pfd, 1, -1);
288             } while (ret == -1 && errno == EINTR);
289 #else
290             fd_set fds;
291             FD_ZERO(&fds);
292             FD_SET(fd, &fds);
293
294             /* Initializing errno here makes sure that for Win32 this loop will execute only once */
295             errno = 0;  
296             do {
297                 ret = select(fd + 1, &fds, 0, 0, 0);
298             } while (ret == -1 && errno == EINTR);
299 #endif /* USE_POLL */
300         }
301         if(ret <= 0)
302             return ret;
303     }
304     return len;
305 }
306
307 static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
308 {
309     struct reply_list *head;
310
311     /* If an error occurred when issuing the request, fail immediately. */
312     if(!request)
313         head = 0;
314     /* We've read requests past the one we want, so if it has replies we have
315      * them all and they're in the replies map. */
316     else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
317     {
318         head = _xcb_map_remove(c->in.replies, request);
319         if(head && head->next)
320             _xcb_map_put(c->in.replies, request, head->next);
321     }
322     /* We're currently processing the responses to the request we want, and we
323      * have a reply ready to return. So just return it without blocking. */
324     else if(request == c->in.request_read && c->in.current_reply)
325     {
326         head = c->in.current_reply;
327         c->in.current_reply = head->next;
328         if(!head->next)
329             c->in.current_reply_tail = &c->in.current_reply;
330     }
331     /* We know this request can't have any more replies, and we've already
332      * established it doesn't have a reply now. Don't bother blocking. */
333     else if(request == c->in.request_completed)
334         head = 0;
335     /* We may have more replies on the way for this request: block until we're
336      * sure. */
337     else
338         return 0;
339
340     if(error)
341         *error = 0;
342     *reply = 0;
343
344     if(head)
345     {
346         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
347         {
348             if(error)
349                 *error = head->reply;
350             else
351                 free(head->reply);
352         }
353         else
354             *reply = head->reply;
355
356         free(head);
357     }
358
359     return 1;
360 }
361
362 static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond)
363 {
364     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request))
365         prev_reader = &(*prev_reader)->next;
366     reader->request = request;
367     reader->data = cond;
368     reader->next = *prev_reader;
369     *prev_reader = reader;
370 }
371
372 static void remove_reader(reader_list **prev_reader, reader_list *reader)
373 {
374     while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request))
375         if(*prev_reader == reader)
376         {
377             *prev_reader = (*prev_reader)->next;
378             break;
379         }
380 }
381
382 static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
383 {
384     void *ret = 0;
385
386     /* If this request has not been written yet, write it. */
387     if(c->out.return_socket || _xcb_out_flush_to(c, request))
388     {
389         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
390         reader_list reader;
391
392         insert_reader(&c->in.readers, &reader, request, &cond);
393
394         while(!poll_for_reply(c, request, &ret, e))
395             if(!_xcb_conn_wait(c, &cond, 0, 0))
396                 break;
397
398         remove_reader(&c->in.readers, &reader);
399         pthread_cond_destroy(&cond);
400     }
401
402     _xcb_in_wake_up_next_reader(c);
403     return ret;
404 }
405
406 static uint64_t widen(xcb_connection_t *c, unsigned int request)
407 {
408     uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
409     if(widened_request > c->out.request)
410         widened_request -= UINT64_C(1) << 32;
411     return widened_request;
412 }
413
414 /* Public interface */
415
416 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
417 {
418     void *ret;
419     if(e)
420         *e = 0;
421     if(c->has_error)
422         return 0;
423
424     pthread_mutex_lock(&c->iolock);
425     ret = wait_for_reply(c, widen(c, request), e);
426     pthread_mutex_unlock(&c->iolock);
427     return ret;
428 }
429
430 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
431 {
432     pending_reply *pend;
433     pend = malloc(sizeof(*pend));
434     if(!pend)
435     {
436         _xcb_conn_shutdown(c);
437         return;
438     }
439
440     pend->first_request = seq;
441     pend->last_request = seq;
442     pend->workaround = 0;
443     pend->flags = XCB_REQUEST_DISCARD_REPLY;
444     pend->next = *prev_next;
445     *prev_next = pend;
446
447     if(!pend->next)
448         c->in.pending_replies_tail = &pend->next;
449 }
450
451 static void discard_reply(xcb_connection_t *c, uint64_t request)
452 {
453     void *reply;
454     pending_reply **prev_pend;
455
456     /* Free any replies or errors that we've already read. Stop if
457      * xcb_wait_for_reply would block or we've run out of replies. */
458     while(poll_for_reply(c, request, &reply, 0) && reply)
459         free(reply);
460
461     /* If we've proven there are no more responses coming, we're done. */
462     if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
463         return;
464
465     /* Walk the list of pending requests. Mark the first match for deletion. */
466     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
467     {
468         if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
469             break;
470
471         if((*prev_pend)->first_request == request)
472         {
473             /* Pending reply found. Mark for discard: */
474             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
475             return;
476         }
477     }
478
479     /* Pending reply not found (likely due to _unchecked request). Create one: */
480     insert_pending_discard(c, prev_pend, request);
481 }
482
483 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
484 {
485     if(c->has_error)
486         return;
487
488     /* If an error occurred when issuing the request, fail immediately. */
489     if(!sequence)
490         return;
491
492     pthread_mutex_lock(&c->iolock);
493     discard_reply(c, widen(c, sequence));
494     pthread_mutex_unlock(&c->iolock);
495 }
496
497 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
498 {
499     int ret;
500     if(c->has_error)
501     {
502         *reply = 0;
503         if(error)
504             *error = 0;
505         return 1; /* would not block */
506     }
507     assert(reply != 0);
508     pthread_mutex_lock(&c->iolock);
509     ret = poll_for_reply(c, widen(c, request), reply, error);
510     pthread_mutex_unlock(&c->iolock);
511     return ret;
512 }
513
514 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
515 {
516     xcb_generic_event_t *ret;
517     if(c->has_error)
518         return 0;
519     pthread_mutex_lock(&c->iolock);
520     /* get_event returns 0 on empty list. */
521     while(!(ret = get_event(c)))
522         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
523             break;
524
525     _xcb_in_wake_up_next_reader(c);
526     pthread_mutex_unlock(&c->iolock);
527     return ret;
528 }
529
530 static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued)
531 {
532     xcb_generic_event_t *ret = 0;
533     if(!c->has_error)
534     {
535         pthread_mutex_lock(&c->iolock);
536         /* FIXME: follow X meets Z architecture changes. */
537         ret = get_event(c);
538         if(!ret && !queued && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
539             ret = get_event(c);
540         pthread_mutex_unlock(&c->iolock);
541     }
542     return ret;
543 }
544
545 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
546 {
547     return poll_for_next_event(c, 0);
548 }
549
550 xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c)
551 {
552     return poll_for_next_event(c, 1);
553 }
554
555 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
556 {
557     uint64_t request;
558     xcb_generic_error_t *ret = 0;
559     void *reply;
560     if(c->has_error)
561         return 0;
562     pthread_mutex_lock(&c->iolock);
563     request = widen(c, cookie.sequence);
564     if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected)
565        && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
566     {
567         _xcb_out_send_sync(c);
568         _xcb_out_flush_to(c, c->out.request);
569     }
570     reply = wait_for_reply(c, request, &ret);
571     assert(!reply);
572     pthread_mutex_unlock(&c->iolock);
573     return ret;
574 }
575
576 /* Private interface */
577
578 int _xcb_in_init(_xcb_in *in)
579 {
580     if(pthread_cond_init(&in->event_cond, 0))
581         return 0;
582     in->reading = 0;
583
584     in->queue_len = 0;
585
586     in->request_read = 0;
587     in->request_completed = 0;
588
589     in->replies = _xcb_map_new();
590     if(!in->replies)
591         return 0;
592
593     in->current_reply_tail = &in->current_reply;
594     in->events_tail = &in->events;
595     in->pending_replies_tail = &in->pending_replies;
596
597     return 1;
598 }
599
600 void _xcb_in_destroy(_xcb_in *in)
601 {
602     pthread_cond_destroy(&in->event_cond);
603     free_reply_list(in->current_reply);
604     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
605     while(in->events)
606     {
607         struct event_list *e = in->events;
608         in->events = e->next;
609         free(e->event);
610         free(e);
611     }
612     while(in->pending_replies)
613     {
614         pending_reply *pend = in->pending_replies;
615         in->pending_replies = pend->next;
616         free(pend);
617     }
618 }
619
620 void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
621 {
622     int pthreadret;
623     if(c->in.readers)
624         pthreadret = pthread_cond_signal(c->in.readers->data);
625     else
626         pthreadret = pthread_cond_signal(&c->in.event_cond);
627     assert(pthreadret == 0);
628 }
629
630 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
631 {
632     pending_reply *pend = malloc(sizeof(pending_reply));
633     assert(workaround != WORKAROUND_NONE || flags != 0);
634     if(!pend)
635     {
636         _xcb_conn_shutdown(c);
637         return 0;
638     }
639     pend->first_request = pend->last_request = request;
640     pend->workaround = workaround;
641     pend->flags = flags;
642     pend->next = 0;
643     *c->in.pending_replies_tail = pend;
644     c->in.pending_replies_tail = &pend->next;
645     return 1;
646 }
647
648 void _xcb_in_replies_done(xcb_connection_t *c)
649 {
650     struct pending_reply *pend;
651     if (c->in.pending_replies_tail != &c->in.pending_replies)
652     {
653         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
654         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
655         {
656             pend->last_request = c->out.request;
657             pend->workaround = WORKAROUND_NONE;
658         }
659     }
660 }
661
662 int _xcb_in_read(xcb_connection_t *c)
663 {
664     int n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len,MSG_WAITALL);
665     if(n > 0)
666         c->in.queue_len += n;
667     while(read_packet(c))
668         /* empty */;
669 #ifndef _WIN32
670     if((n > 0) || (n < 0 && errno == EAGAIN))
671 #else
672     if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
673 #endif /* !_WIN32 */
674         return 1;
675     _xcb_conn_shutdown(c);
676     return 0;
677 }
678
679 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
680 {
681     int done = c->in.queue_len;
682     if(len < done)
683         done = len;
684
685     memcpy(buf, c->in.queue, done);
686     c->in.queue_len -= done;
687     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
688
689     if(len > done)
690     {
691         int ret = read_block(c->fd, (char *) buf + done, len - done);
692         if(ret <= 0)
693         {
694             _xcb_conn_shutdown(c);
695             return ret;
696         }
697     }
698
699     return len;
700 }