windefs.h is now called xcb_windefs.h - changed all includes to reflect that.Replaced...
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <errno.h>
34
35 #include "xcb.h"
36 #include "xcbext.h"
37 #include "xcbint.h"
38 #if USE_POLL
39 #include <poll.h>
40 #elif !defined _WIN32
41 #include <sys/select.h>
42 #endif
43
44 #ifdef _WIN32
45 #include "xcb_windefs.h"
46 #endif /* _WIN32 */
47
48 #define XCB_ERROR 0
49 #define XCB_REPLY 1
50 #define XCB_XGE_EVENT 35
51
52 struct event_list {
53     xcb_generic_event_t *event;
54     struct event_list *next;
55 };
56
57 struct reply_list {
58     void *reply;
59     struct reply_list *next;
60 };
61
62 typedef struct pending_reply {
63     uint64_t first_request;
64     uint64_t last_request;
65     enum workarounds workaround;
66     int flags;
67     struct pending_reply *next;
68 } pending_reply;
69
70 typedef struct reader_list {
71     unsigned int request;
72     pthread_cond_t *data;
73     struct reader_list *next;
74 } reader_list;
75
76 static void wake_up_next_reader(xcb_connection_t *c)
77 {
78     int pthreadret;
79     if(c->in.readers)
80         pthreadret = pthread_cond_signal(c->in.readers->data);
81     else
82         pthreadret = pthread_cond_signal(&c->in.event_cond);
83     assert(pthreadret == 0);
84 }
85
86 static int read_packet(xcb_connection_t *c)
87 {
88     xcb_generic_reply_t genrep;
89     int length = 32;
90     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
91     void *buf;
92     pending_reply *pend = 0;
93     struct event_list *event;
94
95     /* Wait for there to be enough data for us to read a whole packet */
96     if(c->in.queue_len < length)
97         return 0;
98
99     /* Get the response type, length, and sequence number. */
100     memcpy(&genrep, c->in.queue, sizeof(genrep));
101
102     /* Compute 32-bit sequence number of this packet. */
103     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
104     {
105         uint64_t lastread = c->in.request_read;
106         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
107         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
108             c->in.request_read += 0x10000;
109         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
110             c->in.request_expected = c->in.request_read;
111
112         if(c->in.request_read != lastread)
113         {
114             if(c->in.current_reply)
115             {
116                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
117                 c->in.current_reply = 0;
118                 c->in.current_reply_tail = &c->in.current_reply;
119             }
120             c->in.request_completed = c->in.request_read - 1;
121         }
122
123         while(c->in.pending_replies && 
124               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
125               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
126         {
127             pending_reply *oldpend = c->in.pending_replies;
128             c->in.pending_replies = oldpend->next;
129             if(!oldpend->next)
130                 c->in.pending_replies_tail = &c->in.pending_replies;
131             free(oldpend);
132         }
133
134         if(genrep.response_type == XCB_ERROR)
135             c->in.request_completed = c->in.request_read;
136     }
137
138     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
139     {
140         pend = c->in.pending_replies;
141         if(pend &&
142            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
143              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
144               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
145             pend = 0;
146     }
147
148     /* For reply packets, check that the entire packet is available. */
149     if(genrep.response_type == XCB_REPLY)
150     {
151         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
152         {
153             uint32_t *p = (uint32_t *) c->in.queue;
154             genrep.length = p[2] * p[3] * 2;
155         }
156         length += genrep.length * 4;
157     }
158
159     /* XGE events may have sizes > 32 */
160     if (genrep.response_type == XCB_XGE_EVENT)
161     {
162         eventlength = ((xcb_ge_event_t*)&genrep)->length * 4;
163     }
164
165     buf = malloc(length + eventlength +
166             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
167     if(!buf)
168     {
169         _xcb_conn_shutdown(c);
170         return 0;
171     }
172
173     if(_xcb_in_read_block(c, buf, length) <= 0)
174     {
175         free(buf);
176         return 0;
177     }
178
179     /* pull in XGE event data if available, append after event struct */
180     if (eventlength)
181     {
182         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
183         {
184             free(buf);
185             return 0;
186         }
187     }
188
189     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
190     {
191         free(buf);
192         return 1;
193     }
194
195     if(genrep.response_type != XCB_REPLY)
196         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
197
198     /* reply, or checked error */
199     if( genrep.response_type == XCB_REPLY ||
200        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
201     {
202         reader_list *reader;
203         struct reply_list *cur = malloc(sizeof(struct reply_list));
204         if(!cur)
205         {
206             _xcb_conn_shutdown(c);
207             free(buf);
208             return 0;
209         }
210         cur->reply = buf;
211         cur->next = 0;
212         *c->in.current_reply_tail = cur;
213         c->in.current_reply_tail = &cur->next;
214         for(reader = c->in.readers; 
215             reader && 
216             XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
217             reader = reader->next)
218         {
219             if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
220             {
221                 pthread_cond_signal(reader->data);
222                 break;
223             }
224         }
225         return 1;
226     }
227
228     /* event, or unchecked error */
229     event = malloc(sizeof(struct event_list));
230     if(!event)
231     {
232         _xcb_conn_shutdown(c);
233         free(buf);
234         return 0;
235     }
236     event->event = buf;
237     event->next = 0;
238     *c->in.events_tail = event;
239     c->in.events_tail = &event->next;
240     pthread_cond_signal(&c->in.event_cond);
241     return 1; /* I have something for you... */
242 }
243
244 static xcb_generic_event_t *get_event(xcb_connection_t *c)
245 {
246     struct event_list *cur = c->in.events;
247     xcb_generic_event_t *ret;
248     if(!c->in.events)
249         return 0;
250     ret = cur->event;
251     c->in.events = cur->next;
252     if(!cur->next)
253         c->in.events_tail = &c->in.events;
254     free(cur);
255     return ret;
256 }
257
258 static void free_reply_list(struct reply_list *head)
259 {
260     while(head)
261     {
262         struct reply_list *cur = head;
263         head = cur->next;
264         free(cur->reply);
265         free(cur);
266     }
267 }
268
269 static int read_block(const int fd, void *buf, const ssize_t len)
270 {
271     int done = 0;
272     while(done < len)
273     {
274 #ifndef _WIN32
275         int ret = read(fd, ((char *) buf) + done, len - done);
276 #else
277         int ret = recv(fd, ((char *) buf) + done, len - done,0);
278 #endif /* !_WIN32 */
279
280         if(ret > 0)
281             done += ret;
282 #ifndef _WIN32
283         if(ret < 0 && errno == EAGAIN)
284 #else
285         if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
286 #endif /* !_Win32 */
287         {
288 #if USE_POLL
289 #ifndef _WIN32
290             struct pollfd pfd;
291             pfd.fd = fd;
292             pfd.events = POLLIN;
293             pfd.revents = 0;
294             do {
295                 ret = poll(&pfd, 1, -1);
296             } while (ret == -1 && errno == EINTR);
297 #endif /* !_WIN32 */
298 #else
299             fd_set fds;
300             FD_ZERO(&fds);
301             FD_SET(fd, &fds);
302 #ifndef _WIN32
303             do {
304                 ret = select(fd + 1, &fds, 0, 0, 0);
305             } while (ret == -1 && errno == EINTR);
306 #else
307             /* the do while loop used for the non-windows version isn't required*/
308             /* for windows since there are no signals in Windows hence no EINTR*/           
309             ret = select(fd + 1, &fds, 0, 0, 0);
310 #endif /* !_WIN32 */
311 #endif /* USE_POLL */
312         }
313         if(ret <= 0)
314             return ret;
315     }
316     return len;
317 }
318
319 static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
320 {
321     struct reply_list *head;
322
323     /* If an error occurred when issuing the request, fail immediately. */
324     if(!request)
325         head = 0;
326     /* We've read requests past the one we want, so if it has replies we have
327      * them all and they're in the replies map. */
328     else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
329     {
330         head = _xcb_map_remove(c->in.replies, request);
331         if(head && head->next)
332             _xcb_map_put(c->in.replies, request, head->next);
333     }
334     /* We're currently processing the responses to the request we want, and we
335      * have a reply ready to return. So just return it without blocking. */
336     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
337     {
338         head = c->in.current_reply;
339         c->in.current_reply = head->next;
340         if(!head->next)
341             c->in.current_reply_tail = &c->in.current_reply;
342     }
343     /* We know this request can't have any more replies, and we've already
344      * established it doesn't have a reply now. Don't bother blocking. */
345     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
346         head = 0;
347     /* We may have more replies on the way for this request: block until we're
348      * sure. */
349     else
350         return 0;
351
352     if(error)
353         *error = 0;
354     *reply = 0;
355
356     if(head)
357     {
358         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
359         {
360             if(error)
361                 *error = head->reply;
362             else
363                 free(head->reply);
364         }
365         else
366             *reply = head->reply;
367
368         free(head);
369     }
370
371     return 1;
372 }
373
374 /* Public interface */
375
376 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
377 {
378     uint64_t widened_request;
379     void *ret = 0;
380     if(e)
381         *e = 0;
382     if(c->has_error)
383         return 0;
384
385     pthread_mutex_lock(&c->iolock);
386
387     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
388     if(widened_request > c->out.request)
389         widened_request -= UINT64_C(1) << 32;
390
391     /* If this request has not been written yet, write it. */
392     if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
393     {
394         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
395         reader_list reader;
396         reader_list **prev_reader;
397
398         for(prev_reader = &c->in.readers; 
399             *prev_reader && 
400             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
401             prev_reader = &(*prev_reader)->next)
402         {
403             /* empty */;
404         }
405         reader.request = request;
406         reader.data = &cond;
407         reader.next = *prev_reader;
408         *prev_reader = &reader;
409
410         while(!poll_for_reply(c, request, &ret, e))
411             if(!_xcb_conn_wait(c, &cond, 0, 0))
412                 break;
413
414         for(prev_reader = &c->in.readers;
415             *prev_reader && 
416             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
417             prev_reader = &(*prev_reader)->next)
418         {
419             if(*prev_reader == &reader)
420             {
421                 *prev_reader = (*prev_reader)->next;
422                 break;
423             }
424         }
425         pthread_cond_destroy(&cond);
426     }
427
428     wake_up_next_reader(c);
429     pthread_mutex_unlock(&c->iolock);
430     return ret;
431 }
432
433 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
434 {
435     pending_reply *pend;
436     pend = malloc(sizeof(*pend));
437     if(!pend)
438     {
439         _xcb_conn_shutdown(c);
440         return;
441     }
442
443     pend->first_request = seq;
444     pend->last_request = seq;
445     pend->workaround = 0;
446     pend->flags = XCB_REQUEST_DISCARD_REPLY;
447     pend->next = *prev_next;
448     *prev_next = pend;
449
450     if(!pend->next)
451         c->in.pending_replies_tail = &pend->next;
452 }
453
454 static void discard_reply(xcb_connection_t *c, unsigned int request)
455 {
456     pending_reply *pend = 0;
457     pending_reply **prev_pend;
458     uint64_t widened_request;
459
460     /* We've read requests past the one we want, so if it has replies we have
461      * them all and they're in the replies map. */
462     if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
463     {
464         struct reply_list *head;
465         head = _xcb_map_remove(c->in.replies, request);
466         while (head)
467         {
468             struct reply_list *next = head->next;
469             free(head->reply);
470             free(head);
471             head = next;
472         }
473         return;
474     }
475
476     /* We're currently processing the responses to the request we want, and we
477      * have a reply ready to return. Free it, and mark the pend to free any further
478      * replies. */
479     if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
480     {
481         struct reply_list *head;
482         head = c->in.current_reply;
483         c->in.current_reply = NULL;
484         c->in.current_reply_tail = &c->in.current_reply;
485         while (head)
486         {
487             struct reply_list *next = head->next;
488             free(head->reply);
489             free(head);
490             head = next;
491         }
492
493         pend = c->in.pending_replies;
494         if(pend &&
495             !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
496              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
497               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
498             pend = 0;
499         if(pend)
500             pend->flags |= XCB_REQUEST_DISCARD_REPLY;
501         else
502             insert_pending_discard(c, &c->in.pending_replies, c->in.request_read);
503
504         return;
505     }
506
507     /* Walk the list of pending requests. Mark the first match for deletion. */
508     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
509     {
510         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, >, request))
511             break;
512
513         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, ==, request))
514         {
515             /* Pending reply found. Mark for discard: */
516             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
517             return;
518         }
519     }
520
521     /* Pending reply not found (likely due to _unchecked request). Create one: */
522     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
523     if(widened_request > c->out.request)
524         widened_request -= UINT64_C(1) << 32;
525
526     insert_pending_discard(c, prev_pend, widened_request);
527 }
528
529 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
530 {
531     if(c->has_error)
532         return;
533
534     /* If an error occurred when issuing the request, fail immediately. */
535     if(!sequence)
536         return;
537
538     pthread_mutex_lock(&c->iolock);
539     discard_reply(c, sequence);
540     pthread_mutex_unlock(&c->iolock);
541 }
542
543 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
544 {
545     int ret;
546     if(c->has_error)
547     {
548         *reply = 0;
549         if(error)
550             *error = 0;
551         return 1; /* would not block */
552     }
553     assert(reply != 0);
554     pthread_mutex_lock(&c->iolock);
555     ret = poll_for_reply(c, request, reply, error);
556     pthread_mutex_unlock(&c->iolock);
557     return ret;
558 }
559
560 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
561 {
562     xcb_generic_event_t *ret;
563     if(c->has_error)
564         return 0;
565     pthread_mutex_lock(&c->iolock);
566     /* get_event returns 0 on empty list. */
567     while(!(ret = get_event(c)))
568         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
569             break;
570
571     wake_up_next_reader(c);
572     pthread_mutex_unlock(&c->iolock);
573     return ret;
574 }
575
576 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
577 {
578     xcb_generic_event_t *ret = 0;
579     if(!c->has_error)
580     {
581         pthread_mutex_lock(&c->iolock);
582         /* FIXME: follow X meets Z architecture changes. */
583         ret = get_event(c);
584         if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
585             ret = get_event(c);
586         pthread_mutex_unlock(&c->iolock);
587     }
588     return ret;
589 }
590
591 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
592 {
593     /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
594      * that would require factoring the locking out of xcb_get_input_focus,
595      * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
596     xcb_generic_error_t *ret;
597     void *reply;
598     if(c->has_error)
599         return 0;
600     if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_expected)
601        && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
602     {
603         free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
604         assert(!ret);
605     }
606     reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
607     assert(!reply);
608     return ret;
609 }
610
611 /* Private interface */
612
613 int _xcb_in_init(_xcb_in *in)
614 {
615     if(pthread_cond_init(&in->event_cond, 0))
616         return 0;
617     in->reading = 0;
618
619     in->queue_len = 0;
620
621     in->request_read = 0;
622     in->request_completed = 0;
623
624     in->replies = _xcb_map_new();
625     if(!in->replies)
626         return 0;
627
628     in->current_reply_tail = &in->current_reply;
629     in->events_tail = &in->events;
630     in->pending_replies_tail = &in->pending_replies;
631
632     return 1;
633 }
634
635 void _xcb_in_destroy(_xcb_in *in)
636 {
637     pthread_cond_destroy(&in->event_cond);
638     free_reply_list(in->current_reply);
639     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
640     while(in->events)
641     {
642         struct event_list *e = in->events;
643         in->events = e->next;
644         free(e->event);
645         free(e);
646     }
647     while(in->pending_replies)
648     {
649         pending_reply *pend = in->pending_replies;
650         in->pending_replies = pend->next;
651         free(pend);
652     }
653 }
654
655 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
656 {
657     pending_reply *pend = malloc(sizeof(pending_reply));
658     assert(workaround != WORKAROUND_NONE || flags != 0);
659     if(!pend)
660     {
661         _xcb_conn_shutdown(c);
662         return 0;
663     }
664     pend->first_request = pend->last_request = request;
665     pend->workaround = workaround;
666     pend->flags = flags;
667     pend->next = 0;
668     *c->in.pending_replies_tail = pend;
669     c->in.pending_replies_tail = &pend->next;
670     return 1;
671 }
672
673 void _xcb_in_replies_done(xcb_connection_t *c)
674 {
675     struct pending_reply *pend;
676     if (c->in.pending_replies_tail != &c->in.pending_replies)
677     {
678         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
679         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
680         {
681             pend->last_request = c->out.request;
682             pend->workaround = WORKAROUND_NONE;
683         }
684     }
685 }
686
687 int _xcb_in_read(xcb_connection_t *c)
688 {
689 #ifndef _WIN32
690     int n = read(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len);
691 #else
692     int n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len,0);
693 #endif /* !_WIN32 */
694     if(n > 0)
695         c->in.queue_len += n;
696     while(read_packet(c))
697         /* empty */;
698 #ifndef _WIN32
699     if((n > 0) || (n < 0 && errno == EAGAIN))
700 #else
701     if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
702 #endif /* !_WIN32 */
703         return 1;
704     _xcb_conn_shutdown(c);
705     return 0;
706 }
707
708 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
709 {
710     int done = c->in.queue_len;
711     if(len < done)
712         done = len;
713
714     memcpy(buf, c->in.queue, done);
715     c->in.queue_len -= done;
716     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
717
718     if(len > done)
719     {
720         int ret = read_block(c->fd, (char *) buf + done, len - done);
721         if(ret <= 0)
722         {
723             _xcb_conn_shutdown(c);
724             return ret;
725         }
726     }
727
728     return len;
729 }