Fix strict-aliasing warning when getting generic event length.
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <errno.h>
34
35 #include "xcb.h"
36 #include "xcbext.h"
37 #include "xcbint.h"
38 #if USE_POLL
39 #include <poll.h>
40 #else
41 #include <sys/select.h>
42 #endif
43
44 #define XCB_ERROR 0
45 #define XCB_REPLY 1
46 #define XCB_XGE_EVENT 35
47
48 struct event_list {
49     xcb_generic_event_t *event;
50     struct event_list *next;
51 };
52
53 struct reply_list {
54     void *reply;
55     struct reply_list *next;
56 };
57
58 typedef struct pending_reply {
59     uint64_t first_request;
60     uint64_t last_request;
61     enum workarounds workaround;
62     int flags;
63     struct pending_reply *next;
64 } pending_reply;
65
66 typedef struct reader_list {
67     unsigned int request;
68     pthread_cond_t *data;
69     struct reader_list *next;
70 } reader_list;
71
72 static void wake_up_next_reader(xcb_connection_t *c)
73 {
74     int pthreadret;
75     if(c->in.readers)
76         pthreadret = pthread_cond_signal(c->in.readers->data);
77     else
78         pthreadret = pthread_cond_signal(&c->in.event_cond);
79     assert(pthreadret == 0);
80 }
81
82 static int read_packet(xcb_connection_t *c)
83 {
84     xcb_generic_reply_t genrep;
85     int length = 32;
86     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
87     void *buf;
88     pending_reply *pend = 0;
89     struct event_list *event;
90
91     /* Wait for there to be enough data for us to read a whole packet */
92     if(c->in.queue_len < length)
93         return 0;
94
95     /* Get the response type, length, and sequence number. */
96     memcpy(&genrep, c->in.queue, sizeof(genrep));
97
98     /* Compute 32-bit sequence number of this packet. */
99     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
100     {
101         uint64_t lastread = c->in.request_read;
102         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
103         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
104             c->in.request_read += 0x10000;
105         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
106             c->in.request_expected = c->in.request_read;
107
108         if(c->in.request_read != lastread)
109         {
110             if(c->in.current_reply)
111             {
112                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
113                 c->in.current_reply = 0;
114                 c->in.current_reply_tail = &c->in.current_reply;
115             }
116             c->in.request_completed = c->in.request_read - 1;
117         }
118
119         while(c->in.pending_replies && 
120               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
121               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
122         {
123             pending_reply *oldpend = c->in.pending_replies;
124             c->in.pending_replies = oldpend->next;
125             if(!oldpend->next)
126                 c->in.pending_replies_tail = &c->in.pending_replies;
127             free(oldpend);
128         }
129
130         if(genrep.response_type == XCB_ERROR)
131             c->in.request_completed = c->in.request_read;
132     }
133
134     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
135     {
136         pend = c->in.pending_replies;
137         if(pend &&
138            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
139              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
140               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
141             pend = 0;
142     }
143
144     /* For reply packets, check that the entire packet is available. */
145     if(genrep.response_type == XCB_REPLY)
146     {
147         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
148         {
149             uint32_t *p = (uint32_t *) c->in.queue;
150             genrep.length = p[2] * p[3] * 2;
151         }
152         length += genrep.length * 4;
153     }
154
155     /* XGE events may have sizes > 32 */
156     if (genrep.response_type == XCB_XGE_EVENT)
157         eventlength = genrep.length * 4;
158
159     buf = malloc(length + eventlength +
160             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
161     if(!buf)
162     {
163         _xcb_conn_shutdown(c);
164         return 0;
165     }
166
167     if(_xcb_in_read_block(c, buf, length) <= 0)
168     {
169         free(buf);
170         return 0;
171     }
172
173     /* pull in XGE event data if available, append after event struct */
174     if (eventlength)
175     {
176         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
177         {
178             free(buf);
179             return 0;
180         }
181     }
182
183     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
184     {
185         free(buf);
186         return 1;
187     }
188
189     if(genrep.response_type != XCB_REPLY)
190         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
191
192     /* reply, or checked error */
193     if( genrep.response_type == XCB_REPLY ||
194        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
195     {
196         reader_list *reader;
197         struct reply_list *cur = malloc(sizeof(struct reply_list));
198         if(!cur)
199         {
200             _xcb_conn_shutdown(c);
201             free(buf);
202             return 0;
203         }
204         cur->reply = buf;
205         cur->next = 0;
206         *c->in.current_reply_tail = cur;
207         c->in.current_reply_tail = &cur->next;
208         for(reader = c->in.readers; 
209             reader && 
210             XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
211             reader = reader->next)
212         {
213             if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
214             {
215                 pthread_cond_signal(reader->data);
216                 break;
217             }
218         }
219         return 1;
220     }
221
222     /* event, or unchecked error */
223     event = malloc(sizeof(struct event_list));
224     if(!event)
225     {
226         _xcb_conn_shutdown(c);
227         free(buf);
228         return 0;
229     }
230     event->event = buf;
231     event->next = 0;
232     *c->in.events_tail = event;
233     c->in.events_tail = &event->next;
234     pthread_cond_signal(&c->in.event_cond);
235     return 1; /* I have something for you... */
236 }
237
238 static xcb_generic_event_t *get_event(xcb_connection_t *c)
239 {
240     struct event_list *cur = c->in.events;
241     xcb_generic_event_t *ret;
242     if(!c->in.events)
243         return 0;
244     ret = cur->event;
245     c->in.events = cur->next;
246     if(!cur->next)
247         c->in.events_tail = &c->in.events;
248     free(cur);
249     return ret;
250 }
251
252 static void free_reply_list(struct reply_list *head)
253 {
254     while(head)
255     {
256         struct reply_list *cur = head;
257         head = cur->next;
258         free(cur->reply);
259         free(cur);
260     }
261 }
262
263 static int read_block(const int fd, void *buf, const ssize_t len)
264 {
265     int done = 0;
266     while(done < len)
267     {
268         int ret = read(fd, ((char *) buf) + done, len - done);
269         if(ret > 0)
270             done += ret;
271         if(ret < 0 && errno == EAGAIN)
272         {
273 #if USE_POLL
274             struct pollfd pfd;
275             pfd.fd = fd;
276             pfd.events = POLLIN;
277             pfd.revents = 0;
278             do {
279                 ret = poll(&pfd, 1, -1);
280             } while (ret == -1 && errno == EINTR);
281 #else
282             fd_set fds;
283             FD_ZERO(&fds);
284             FD_SET(fd, &fds);
285             do {
286                 ret = select(fd + 1, &fds, 0, 0, 0);
287             } while (ret == -1 && errno == EINTR);
288 #endif
289         }
290         if(ret <= 0)
291             return ret;
292     }
293     return len;
294 }
295
296 static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
297 {
298     struct reply_list *head;
299
300     /* If an error occurred when issuing the request, fail immediately. */
301     if(!request)
302         head = 0;
303     /* We've read requests past the one we want, so if it has replies we have
304      * them all and they're in the replies map. */
305     else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
306     {
307         head = _xcb_map_remove(c->in.replies, request);
308         if(head && head->next)
309             _xcb_map_put(c->in.replies, request, head->next);
310     }
311     /* We're currently processing the responses to the request we want, and we
312      * have a reply ready to return. So just return it without blocking. */
313     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
314     {
315         head = c->in.current_reply;
316         c->in.current_reply = head->next;
317         if(!head->next)
318             c->in.current_reply_tail = &c->in.current_reply;
319     }
320     /* We know this request can't have any more replies, and we've already
321      * established it doesn't have a reply now. Don't bother blocking. */
322     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
323         head = 0;
324     /* We may have more replies on the way for this request: block until we're
325      * sure. */
326     else
327         return 0;
328
329     if(error)
330         *error = 0;
331     *reply = 0;
332
333     if(head)
334     {
335         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
336         {
337             if(error)
338                 *error = head->reply;
339             else
340                 free(head->reply);
341         }
342         else
343             *reply = head->reply;
344
345         free(head);
346     }
347
348     return 1;
349 }
350
351 /* Public interface */
352
353 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
354 {
355     uint64_t widened_request;
356     void *ret = 0;
357     if(e)
358         *e = 0;
359     if(c->has_error)
360         return 0;
361
362     pthread_mutex_lock(&c->iolock);
363
364     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
365     if(widened_request > c->out.request)
366         widened_request -= UINT64_C(1) << 32;
367
368     /* If this request has not been written yet, write it. */
369     if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
370     {
371         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
372         reader_list reader;
373         reader_list **prev_reader;
374
375         for(prev_reader = &c->in.readers; 
376             *prev_reader && 
377             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
378             prev_reader = &(*prev_reader)->next)
379         {
380             /* empty */;
381         }
382         reader.request = request;
383         reader.data = &cond;
384         reader.next = *prev_reader;
385         *prev_reader = &reader;
386
387         while(!poll_for_reply(c, request, &ret, e))
388             if(!_xcb_conn_wait(c, &cond, 0, 0))
389                 break;
390
391         for(prev_reader = &c->in.readers;
392             *prev_reader && 
393             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
394             prev_reader = &(*prev_reader)->next)
395         {
396             if(*prev_reader == &reader)
397             {
398                 *prev_reader = (*prev_reader)->next;
399                 break;
400             }
401         }
402         pthread_cond_destroy(&cond);
403     }
404
405     wake_up_next_reader(c);
406     pthread_mutex_unlock(&c->iolock);
407     return ret;
408 }
409
410 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
411 {
412     pending_reply *pend;
413     pend = malloc(sizeof(*pend));
414     if(!pend)
415     {
416         _xcb_conn_shutdown(c);
417         return;
418     }
419
420     pend->first_request = seq;
421     pend->last_request = seq;
422     pend->workaround = 0;
423     pend->flags = XCB_REQUEST_DISCARD_REPLY;
424     pend->next = *prev_next;
425     *prev_next = pend;
426
427     if(!pend->next)
428         c->in.pending_replies_tail = &pend->next;
429 }
430
431 static void discard_reply(xcb_connection_t *c, unsigned int request)
432 {
433     pending_reply *pend = 0;
434     pending_reply **prev_pend;
435     uint64_t widened_request;
436
437     /* We've read requests past the one we want, so if it has replies we have
438      * them all and they're in the replies map. */
439     if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
440     {
441         struct reply_list *head;
442         head = _xcb_map_remove(c->in.replies, request);
443         while (head)
444         {
445             struct reply_list *next = head->next;
446             free(head->reply);
447             free(head);
448             head = next;
449         }
450         return;
451     }
452
453     /* We're currently processing the responses to the request we want, and we
454      * have a reply ready to return. Free it, and mark the pend to free any further
455      * replies. */
456     if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
457     {
458         struct reply_list *head;
459         head = c->in.current_reply;
460         c->in.current_reply = NULL;
461         c->in.current_reply_tail = &c->in.current_reply;
462         while (head)
463         {
464             struct reply_list *next = head->next;
465             free(head->reply);
466             free(head);
467             head = next;
468         }
469
470         pend = c->in.pending_replies;
471         if(pend &&
472             !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
473              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
474               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
475             pend = 0;
476         if(pend)
477             pend->flags |= XCB_REQUEST_DISCARD_REPLY;
478         else
479             insert_pending_discard(c, &c->in.pending_replies, c->in.request_read);
480
481         return;
482     }
483
484     /* Walk the list of pending requests. Mark the first match for deletion. */
485     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
486     {
487         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, >, request))
488             break;
489
490         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, ==, request))
491         {
492             /* Pending reply found. Mark for discard: */
493             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
494             return;
495         }
496     }
497
498     /* Pending reply not found (likely due to _unchecked request). Create one: */
499     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
500     if(widened_request > c->out.request)
501         widened_request -= UINT64_C(1) << 32;
502
503     insert_pending_discard(c, prev_pend, widened_request);
504 }
505
506 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
507 {
508     if(c->has_error)
509         return;
510
511     /* If an error occurred when issuing the request, fail immediately. */
512     if(!sequence)
513         return;
514
515     pthread_mutex_lock(&c->iolock);
516     discard_reply(c, sequence);
517     pthread_mutex_unlock(&c->iolock);
518 }
519
520 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
521 {
522     int ret;
523     if(c->has_error)
524     {
525         *reply = 0;
526         if(error)
527             *error = 0;
528         return 1; /* would not block */
529     }
530     assert(reply != 0);
531     pthread_mutex_lock(&c->iolock);
532     ret = poll_for_reply(c, request, reply, error);
533     pthread_mutex_unlock(&c->iolock);
534     return ret;
535 }
536
537 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
538 {
539     xcb_generic_event_t *ret;
540     if(c->has_error)
541         return 0;
542     pthread_mutex_lock(&c->iolock);
543     /* get_event returns 0 on empty list. */
544     while(!(ret = get_event(c)))
545         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
546             break;
547
548     wake_up_next_reader(c);
549     pthread_mutex_unlock(&c->iolock);
550     return ret;
551 }
552
553 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
554 {
555     xcb_generic_event_t *ret = 0;
556     if(!c->has_error)
557     {
558         pthread_mutex_lock(&c->iolock);
559         /* FIXME: follow X meets Z architecture changes. */
560         ret = get_event(c);
561         if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
562             ret = get_event(c);
563         pthread_mutex_unlock(&c->iolock);
564     }
565     return ret;
566 }
567
568 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
569 {
570     /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
571      * that would require factoring the locking out of xcb_get_input_focus,
572      * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
573     xcb_generic_error_t *ret;
574     void *reply;
575     if(c->has_error)
576         return 0;
577     if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_expected)
578        && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
579     {
580         free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
581         assert(!ret);
582     }
583     reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
584     assert(!reply);
585     return ret;
586 }
587
588 /* Private interface */
589
590 int _xcb_in_init(_xcb_in *in)
591 {
592     if(pthread_cond_init(&in->event_cond, 0))
593         return 0;
594     in->reading = 0;
595
596     in->queue_len = 0;
597
598     in->request_read = 0;
599     in->request_completed = 0;
600
601     in->replies = _xcb_map_new();
602     if(!in->replies)
603         return 0;
604
605     in->current_reply_tail = &in->current_reply;
606     in->events_tail = &in->events;
607     in->pending_replies_tail = &in->pending_replies;
608
609     return 1;
610 }
611
612 void _xcb_in_destroy(_xcb_in *in)
613 {
614     pthread_cond_destroy(&in->event_cond);
615     free_reply_list(in->current_reply);
616     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
617     while(in->events)
618     {
619         struct event_list *e = in->events;
620         in->events = e->next;
621         free(e->event);
622         free(e);
623     }
624     while(in->pending_replies)
625     {
626         pending_reply *pend = in->pending_replies;
627         in->pending_replies = pend->next;
628         free(pend);
629     }
630 }
631
632 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
633 {
634     pending_reply *pend = malloc(sizeof(pending_reply));
635     assert(workaround != WORKAROUND_NONE || flags != 0);
636     if(!pend)
637     {
638         _xcb_conn_shutdown(c);
639         return 0;
640     }
641     pend->first_request = pend->last_request = request;
642     pend->workaround = workaround;
643     pend->flags = flags;
644     pend->next = 0;
645     *c->in.pending_replies_tail = pend;
646     c->in.pending_replies_tail = &pend->next;
647     return 1;
648 }
649
650 void _xcb_in_replies_done(xcb_connection_t *c)
651 {
652     struct pending_reply *pend;
653     if (c->in.pending_replies_tail != &c->in.pending_replies)
654     {
655         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
656         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
657         {
658             pend->last_request = c->out.request;
659             pend->workaround = WORKAROUND_NONE;
660         }
661     }
662 }
663
664 int _xcb_in_read(xcb_connection_t *c)
665 {
666     int n = read(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len);
667     if(n > 0)
668         c->in.queue_len += n;
669     while(read_packet(c))
670         /* empty */;
671     if((n > 0) || (n < 0 && errno == EAGAIN))
672         return 1;
673     _xcb_conn_shutdown(c);
674     return 0;
675 }
676
677 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
678 {
679     int done = c->in.queue_len;
680     if(len < done)
681         done = len;
682
683     memcpy(buf, c->in.queue, done);
684     c->in.queue_len -= done;
685     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
686
687     if(len > done)
688     {
689         int ret = read_block(c->fd, (char *) buf + done, len - done);
690         if(ret <= 0)
691         {
692             _xcb_conn_shutdown(c);
693             return ret;
694         }
695     }
696
697     return len;
698 }