xcb_request_check: Hold the I/O lock while deciding to sync.
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <errno.h>
34
35 #include "xcb.h"
36 #include "xcbext.h"
37 #include "xcbint.h"
38 #if USE_POLL
39 #include <poll.h>
40 #endif
41 #ifndef _WIN32
42 #include <sys/select.h>
43 #include <sys/socket.h>
44 #endif
45
46 #ifdef _WIN32
47 #include "xcb_windefs.h"
48 #endif /* _WIN32 */
49
50 #define XCB_ERROR 0
51 #define XCB_REPLY 1
52 #define XCB_XGE_EVENT 35
53
54 /* required for compiling for Win32 using MinGW */
55 #ifndef MSG_WAITALL
56 #define MSG_WAITALL 0
57 #endif
58
59 struct event_list {
60     xcb_generic_event_t *event;
61     struct event_list *next;
62 };
63
64 struct reply_list {
65     void *reply;
66     struct reply_list *next;
67 };
68
69 typedef struct pending_reply {
70     uint64_t first_request;
71     uint64_t last_request;
72     enum workarounds workaround;
73     int flags;
74     struct pending_reply *next;
75 } pending_reply;
76
77 typedef struct reader_list {
78     unsigned int request;
79     pthread_cond_t *data;
80     struct reader_list *next;
81 } reader_list;
82
83 static int read_packet(xcb_connection_t *c)
84 {
85     xcb_generic_reply_t genrep;
86     int length = 32;
87     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
88     void *buf;
89     pending_reply *pend = 0;
90     struct event_list *event;
91
92     /* Wait for there to be enough data for us to read a whole packet */
93     if(c->in.queue_len < length)
94         return 0;
95
96     /* Get the response type, length, and sequence number. */
97     memcpy(&genrep, c->in.queue, sizeof(genrep));
98
99     /* Compute 32-bit sequence number of this packet. */
100     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
101     {
102         uint64_t lastread = c->in.request_read;
103         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
104         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
105             c->in.request_read += 0x10000;
106         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
107             c->in.request_expected = c->in.request_read;
108
109         if(c->in.request_read != lastread)
110         {
111             if(c->in.current_reply)
112             {
113                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
114                 c->in.current_reply = 0;
115                 c->in.current_reply_tail = &c->in.current_reply;
116             }
117             c->in.request_completed = c->in.request_read - 1;
118         }
119
120         while(c->in.pending_replies && 
121               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
122               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
123         {
124             pending_reply *oldpend = c->in.pending_replies;
125             c->in.pending_replies = oldpend->next;
126             if(!oldpend->next)
127                 c->in.pending_replies_tail = &c->in.pending_replies;
128             free(oldpend);
129         }
130
131         if(genrep.response_type == XCB_ERROR)
132             c->in.request_completed = c->in.request_read;
133     }
134
135     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
136     {
137         pend = c->in.pending_replies;
138         if(pend &&
139            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
140              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
141               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
142             pend = 0;
143     }
144
145     /* For reply packets, check that the entire packet is available. */
146     if(genrep.response_type == XCB_REPLY)
147     {
148         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
149         {
150             uint32_t *p = (uint32_t *) c->in.queue;
151             genrep.length = p[2] * p[3] * 2;
152         }
153         length += genrep.length * 4;
154     }
155
156     /* XGE events may have sizes > 32 */
157     if (genrep.response_type == XCB_XGE_EVENT)
158         eventlength = genrep.length * 4;
159
160     buf = malloc(length + eventlength +
161             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
162     if(!buf)
163     {
164         _xcb_conn_shutdown(c);
165         return 0;
166     }
167
168     if(_xcb_in_read_block(c, buf, length) <= 0)
169     {
170         free(buf);
171         return 0;
172     }
173
174     /* pull in XGE event data if available, append after event struct */
175     if (eventlength)
176     {
177         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
178         {
179             free(buf);
180             return 0;
181         }
182     }
183
184     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
185     {
186         free(buf);
187         return 1;
188     }
189
190     if(genrep.response_type != XCB_REPLY)
191         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
192
193     /* reply, or checked error */
194     if( genrep.response_type == XCB_REPLY ||
195        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
196     {
197         reader_list *reader;
198         struct reply_list *cur = malloc(sizeof(struct reply_list));
199         if(!cur)
200         {
201             _xcb_conn_shutdown(c);
202             free(buf);
203             return 0;
204         }
205         cur->reply = buf;
206         cur->next = 0;
207         *c->in.current_reply_tail = cur;
208         c->in.current_reply_tail = &cur->next;
209         for(reader = c->in.readers; 
210             reader && 
211             XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
212             reader = reader->next)
213         {
214             if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
215             {
216                 pthread_cond_signal(reader->data);
217                 break;
218             }
219         }
220         return 1;
221     }
222
223     /* event, or unchecked error */
224     event = malloc(sizeof(struct event_list));
225     if(!event)
226     {
227         _xcb_conn_shutdown(c);
228         free(buf);
229         return 0;
230     }
231     event->event = buf;
232     event->next = 0;
233     *c->in.events_tail = event;
234     c->in.events_tail = &event->next;
235     pthread_cond_signal(&c->in.event_cond);
236     return 1; /* I have something for you... */
237 }
238
239 static xcb_generic_event_t *get_event(xcb_connection_t *c)
240 {
241     struct event_list *cur = c->in.events;
242     xcb_generic_event_t *ret;
243     if(!c->in.events)
244         return 0;
245     ret = cur->event;
246     c->in.events = cur->next;
247     if(!cur->next)
248         c->in.events_tail = &c->in.events;
249     free(cur);
250     return ret;
251 }
252
253 static void free_reply_list(struct reply_list *head)
254 {
255     while(head)
256     {
257         struct reply_list *cur = head;
258         head = cur->next;
259         free(cur->reply);
260         free(cur);
261     }
262 }
263
264 static int read_block(const int fd, void *buf, const ssize_t len)
265 {
266     int done = 0;
267     while(done < len)
268     {
269         int ret = recv(fd, ((char *) buf) + done, len - done,MSG_WAITALL);
270         if(ret > 0)
271             done += ret;
272 #ifndef _WIN32
273         if(ret < 0 && errno == EAGAIN)
274 #else
275         if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
276 #endif /* !_Win32 */
277         {
278 #if USE_POLL
279             struct pollfd pfd;
280             pfd.fd = fd;
281             pfd.events = POLLIN;
282             pfd.revents = 0;
283             do {
284                 ret = poll(&pfd, 1, -1);
285             } while (ret == -1 && errno == EINTR);
286 #else
287             fd_set fds;
288             FD_ZERO(&fds);
289             FD_SET(fd, &fds);
290
291             /* Initializing errno here makes sure that for Win32 this loop will execute only once */
292             errno = 0;  
293             do {
294                 ret = select(fd + 1, &fds, 0, 0, 0);
295             } while (ret == -1 && errno == EINTR);
296 #endif /* USE_POLL */
297         }
298         if(ret <= 0)
299             return ret;
300     }
301     return len;
302 }
303
304 static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
305 {
306     struct reply_list *head;
307
308     /* If an error occurred when issuing the request, fail immediately. */
309     if(!request)
310         head = 0;
311     /* We've read requests past the one we want, so if it has replies we have
312      * them all and they're in the replies map. */
313     else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
314     {
315         head = _xcb_map_remove(c->in.replies, request);
316         if(head && head->next)
317             _xcb_map_put(c->in.replies, request, head->next);
318     }
319     /* We're currently processing the responses to the request we want, and we
320      * have a reply ready to return. So just return it without blocking. */
321     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
322     {
323         head = c->in.current_reply;
324         c->in.current_reply = head->next;
325         if(!head->next)
326             c->in.current_reply_tail = &c->in.current_reply;
327     }
328     /* We know this request can't have any more replies, and we've already
329      * established it doesn't have a reply now. Don't bother blocking. */
330     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
331         head = 0;
332     /* We may have more replies on the way for this request: block until we're
333      * sure. */
334     else
335         return 0;
336
337     if(error)
338         *error = 0;
339     *reply = 0;
340
341     if(head)
342     {
343         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
344         {
345             if(error)
346                 *error = head->reply;
347             else
348                 free(head->reply);
349         }
350         else
351             *reply = head->reply;
352
353         free(head);
354     }
355
356     return 1;
357 }
358
359 static void *wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
360 {
361     void *ret = 0;
362     uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
363     if(widened_request > c->out.request)
364         widened_request -= UINT64_C(1) << 32;
365
366     /* If this request has not been written yet, write it. */
367     if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
368     {
369         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
370         reader_list reader;
371         reader_list **prev_reader;
372
373         for(prev_reader = &c->in.readers; 
374             *prev_reader && 
375             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
376             prev_reader = &(*prev_reader)->next)
377         {
378             /* empty */;
379         }
380         reader.request = request;
381         reader.data = &cond;
382         reader.next = *prev_reader;
383         *prev_reader = &reader;
384
385         while(!poll_for_reply(c, request, &ret, e))
386             if(!_xcb_conn_wait(c, &cond, 0, 0))
387                 break;
388
389         for(prev_reader = &c->in.readers;
390             *prev_reader && 
391             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
392             prev_reader = &(*prev_reader)->next)
393         {
394             if(*prev_reader == &reader)
395             {
396                 *prev_reader = (*prev_reader)->next;
397                 break;
398             }
399         }
400         pthread_cond_destroy(&cond);
401     }
402
403     _xcb_in_wake_up_next_reader(c);
404     return ret;
405 }
406
407 /* Public interface */
408
409 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
410 {
411     void *ret;
412     if(e)
413         *e = 0;
414     if(c->has_error)
415         return 0;
416
417     pthread_mutex_lock(&c->iolock);
418     ret = wait_for_reply(c, request, e);
419     pthread_mutex_unlock(&c->iolock);
420     return ret;
421 }
422
423 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
424 {
425     pending_reply *pend;
426     pend = malloc(sizeof(*pend));
427     if(!pend)
428     {
429         _xcb_conn_shutdown(c);
430         return;
431     }
432
433     pend->first_request = seq;
434     pend->last_request = seq;
435     pend->workaround = 0;
436     pend->flags = XCB_REQUEST_DISCARD_REPLY;
437     pend->next = *prev_next;
438     *prev_next = pend;
439
440     if(!pend->next)
441         c->in.pending_replies_tail = &pend->next;
442 }
443
444 static void discard_reply(xcb_connection_t *c, unsigned int request)
445 {
446     pending_reply *pend = 0;
447     pending_reply **prev_pend;
448     uint64_t widened_request;
449
450     /* We've read requests past the one we want, so if it has replies we have
451      * them all and they're in the replies map. */
452     if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
453     {
454         struct reply_list *head;
455         head = _xcb_map_remove(c->in.replies, request);
456         while (head)
457         {
458             struct reply_list *next = head->next;
459             free(head->reply);
460             free(head);
461             head = next;
462         }
463         return;
464     }
465
466     /* We're currently processing the responses to the request we want, and we
467      * have a reply ready to return. Free it, and mark the pend to free any further
468      * replies. */
469     if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
470     {
471         struct reply_list *head;
472         head = c->in.current_reply;
473         c->in.current_reply = NULL;
474         c->in.current_reply_tail = &c->in.current_reply;
475         while (head)
476         {
477             struct reply_list *next = head->next;
478             free(head->reply);
479             free(head);
480             head = next;
481         }
482
483         pend = c->in.pending_replies;
484         if(pend &&
485             !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
486              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
487               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
488             pend = 0;
489         if(pend)
490             pend->flags |= XCB_REQUEST_DISCARD_REPLY;
491         else
492             insert_pending_discard(c, &c->in.pending_replies, c->in.request_read);
493
494         return;
495     }
496
497     /* Walk the list of pending requests. Mark the first match for deletion. */
498     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
499     {
500         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, >, request))
501             break;
502
503         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, ==, request))
504         {
505             /* Pending reply found. Mark for discard: */
506             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
507             return;
508         }
509     }
510
511     /* Pending reply not found (likely due to _unchecked request). Create one: */
512     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
513     if(widened_request > c->out.request)
514         widened_request -= UINT64_C(1) << 32;
515
516     insert_pending_discard(c, prev_pend, widened_request);
517 }
518
519 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
520 {
521     if(c->has_error)
522         return;
523
524     /* If an error occurred when issuing the request, fail immediately. */
525     if(!sequence)
526         return;
527
528     pthread_mutex_lock(&c->iolock);
529     discard_reply(c, sequence);
530     pthread_mutex_unlock(&c->iolock);
531 }
532
533 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
534 {
535     int ret;
536     if(c->has_error)
537     {
538         *reply = 0;
539         if(error)
540             *error = 0;
541         return 1; /* would not block */
542     }
543     assert(reply != 0);
544     pthread_mutex_lock(&c->iolock);
545     ret = poll_for_reply(c, request, reply, error);
546     pthread_mutex_unlock(&c->iolock);
547     return ret;
548 }
549
550 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
551 {
552     xcb_generic_event_t *ret;
553     if(c->has_error)
554         return 0;
555     pthread_mutex_lock(&c->iolock);
556     /* get_event returns 0 on empty list. */
557     while(!(ret = get_event(c)))
558         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
559             break;
560
561     _xcb_in_wake_up_next_reader(c);
562     pthread_mutex_unlock(&c->iolock);
563     return ret;
564 }
565
566 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
567 {
568     xcb_generic_event_t *ret = 0;
569     if(!c->has_error)
570     {
571         pthread_mutex_lock(&c->iolock);
572         /* FIXME: follow X meets Z architecture changes. */
573         ret = get_event(c);
574         if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
575             ret = get_event(c);
576         pthread_mutex_unlock(&c->iolock);
577     }
578     return ret;
579 }
580
581 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
582 {
583     xcb_generic_error_t *ret = 0;
584     void *reply;
585     if(c->has_error)
586         return 0;
587     pthread_mutex_lock(&c->iolock);
588     if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>=,c->in.request_expected)
589        && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
590     {
591         _xcb_out_send_sync(c);
592         _xcb_out_flush_to(c, c->out.request);
593     }
594     reply = wait_for_reply(c, cookie.sequence, &ret);
595     assert(!reply);
596     pthread_mutex_unlock(&c->iolock);
597     return ret;
598 }
599
600 /* Private interface */
601
602 int _xcb_in_init(_xcb_in *in)
603 {
604     if(pthread_cond_init(&in->event_cond, 0))
605         return 0;
606     in->reading = 0;
607
608     in->queue_len = 0;
609
610     in->request_read = 0;
611     in->request_completed = 0;
612
613     in->replies = _xcb_map_new();
614     if(!in->replies)
615         return 0;
616
617     in->current_reply_tail = &in->current_reply;
618     in->events_tail = &in->events;
619     in->pending_replies_tail = &in->pending_replies;
620
621     return 1;
622 }
623
624 void _xcb_in_destroy(_xcb_in *in)
625 {
626     pthread_cond_destroy(&in->event_cond);
627     free_reply_list(in->current_reply);
628     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
629     while(in->events)
630     {
631         struct event_list *e = in->events;
632         in->events = e->next;
633         free(e->event);
634         free(e);
635     }
636     while(in->pending_replies)
637     {
638         pending_reply *pend = in->pending_replies;
639         in->pending_replies = pend->next;
640         free(pend);
641     }
642 }
643
644 void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
645 {
646     int pthreadret;
647     if(c->in.readers)
648         pthreadret = pthread_cond_signal(c->in.readers->data);
649     else
650         pthreadret = pthread_cond_signal(&c->in.event_cond);
651     assert(pthreadret == 0);
652 }
653
654 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
655 {
656     pending_reply *pend = malloc(sizeof(pending_reply));
657     assert(workaround != WORKAROUND_NONE || flags != 0);
658     if(!pend)
659     {
660         _xcb_conn_shutdown(c);
661         return 0;
662     }
663     pend->first_request = pend->last_request = request;
664     pend->workaround = workaround;
665     pend->flags = flags;
666     pend->next = 0;
667     *c->in.pending_replies_tail = pend;
668     c->in.pending_replies_tail = &pend->next;
669     return 1;
670 }
671
672 void _xcb_in_replies_done(xcb_connection_t *c)
673 {
674     struct pending_reply *pend;
675     if (c->in.pending_replies_tail != &c->in.pending_replies)
676     {
677         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
678         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
679         {
680             pend->last_request = c->out.request;
681             pend->workaround = WORKAROUND_NONE;
682         }
683     }
684 }
685
686 int _xcb_in_read(xcb_connection_t *c)
687 {
688     int n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len,MSG_WAITALL);
689     if(n > 0)
690         c->in.queue_len += n;
691     while(read_packet(c))
692         /* empty */;
693 #ifndef _WIN32
694     if((n > 0) || (n < 0 && errno == EAGAIN))
695 #else
696     if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
697 #endif /* !_WIN32 */
698         return 1;
699     _xcb_conn_shutdown(c);
700     return 0;
701 }
702
703 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
704 {
705     int done = c->in.queue_len;
706     if(len < done)
707         done = len;
708
709     memcpy(buf, c->in.queue, done);
710     c->in.queue_len -= done;
711     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
712
713     if(len > done)
714     {
715         int ret = read_block(c->fd, (char *) buf + done, len - done);
716         if(ret <= 0)
717         {
718             _xcb_conn_shutdown(c);
719             return ret;
720         }
721     }
722
723     return len;
724 }