Track 64-bit sequence numbers internally.
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <sys/select.h>
34 #include <errno.h>
35
36 #include "xcb.h"
37 #include "xcbext.h"
38 #include "xcbint.h"
39
40 #define XCB_ERROR 0
41 #define XCB_REPLY 1
42 #define XCB_XGE_EVENT 35
43
44 struct event_list {
45     xcb_generic_event_t *event;
46     struct event_list *next;
47 };
48
49 struct reply_list {
50     void *reply;
51     struct reply_list *next;
52 };
53
54 typedef struct pending_reply {
55     uint64_t first_request;
56     uint64_t last_request;
57     enum workarounds workaround;
58     int flags;
59     struct pending_reply *next;
60 } pending_reply;
61
62 typedef struct reader_list {
63     unsigned int request;
64     pthread_cond_t *data;
65     struct reader_list *next;
66 } reader_list;
67
68 static void wake_up_next_reader(xcb_connection_t *c)
69 {
70     int pthreadret;
71     if(c->in.readers)
72         pthreadret = pthread_cond_signal(c->in.readers->data);
73     else
74         pthreadret = pthread_cond_signal(&c->in.event_cond);
75     assert(pthreadret == 0);
76 }
77
78 static int read_packet(xcb_connection_t *c)
79 {
80     xcb_generic_reply_t genrep;
81     int length = 32;
82     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
83     void *buf;
84     pending_reply *pend = 0;
85     struct event_list *event;
86
87     /* Wait for there to be enough data for us to read a whole packet */
88     if(c->in.queue_len < length)
89         return 0;
90
91     /* Get the response type, length, and sequence number. */
92     memcpy(&genrep, c->in.queue, sizeof(genrep));
93
94     /* Compute 32-bit sequence number of this packet. */
95     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
96     {
97         uint64_t lastread = c->in.request_read;
98         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
99         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
100             c->in.request_read += 0x10000;
101         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
102             c->in.request_expected = c->in.request_read;
103
104         if(c->in.request_read != lastread)
105         {
106             if(c->in.current_reply)
107             {
108                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
109                 c->in.current_reply = 0;
110                 c->in.current_reply_tail = &c->in.current_reply;
111             }
112             c->in.request_completed = c->in.request_read - 1;
113         }
114
115         while(c->in.pending_replies && 
116               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
117         {
118             pending_reply *oldpend = c->in.pending_replies;
119             c->in.pending_replies = oldpend->next;
120             if(!oldpend->next)
121                 c->in.pending_replies_tail = &c->in.pending_replies;
122             free(oldpend);
123         }
124
125         if(genrep.response_type == XCB_ERROR)
126             c->in.request_completed = c->in.request_read;
127     }
128
129     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
130     {
131         pend = c->in.pending_replies;
132         if(pend &&
133            (XCB_SEQUENCE_COMPARE(c->in.request_read, <, pend->first_request) ||
134             XCB_SEQUENCE_COMPARE(c->in.request_read, >, pend->last_request)))
135             pend = 0;
136     }
137
138     /* For reply packets, check that the entire packet is available. */
139     if(genrep.response_type == XCB_REPLY)
140     {
141         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
142         {
143             uint32_t *p = (uint32_t *) c->in.queue;
144             genrep.length = p[2] * p[3] * 2;
145         }
146         length += genrep.length * 4;
147     }
148
149     /* XGE events may have sizes > 32 */
150     if (genrep.response_type == XCB_XGE_EVENT)
151     {
152         eventlength = ((xcb_ge_event_t*)&genrep)->length * 4;
153     }
154
155     buf = malloc(length + eventlength +
156             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
157     if(!buf)
158     {
159         _xcb_conn_shutdown(c);
160         return 0;
161     }
162
163     if(_xcb_in_read_block(c, buf, length) <= 0)
164     {
165         free(buf);
166         return 0;
167     }
168
169     /* pull in XGE event data if available, append after event struct */
170     if (eventlength)
171     {
172         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
173         {
174             free(buf);
175             return 0;
176         }
177     }
178
179     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
180     {
181         free(buf);
182         return 1;
183     }
184
185     if(genrep.response_type != XCB_REPLY)
186         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
187
188     /* reply, or checked error */
189     if( genrep.response_type == XCB_REPLY ||
190        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
191     {
192         reader_list *reader;
193         struct reply_list *cur = malloc(sizeof(struct reply_list));
194         if(!cur)
195         {
196             _xcb_conn_shutdown(c);
197             free(buf);
198             return 0;
199         }
200         cur->reply = buf;
201         cur->next = 0;
202         *c->in.current_reply_tail = cur;
203         c->in.current_reply_tail = &cur->next;
204         for(reader = c->in.readers; 
205             reader && 
206             XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
207             reader = reader->next)
208         {
209             if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
210             {
211                 pthread_cond_signal(reader->data);
212                 break;
213             }
214         }
215         return 1;
216     }
217
218     /* event, or unchecked error */
219     event = malloc(sizeof(struct event_list));
220     if(!event)
221     {
222         _xcb_conn_shutdown(c);
223         free(buf);
224         return 0;
225     }
226     event->event = buf;
227     event->next = 0;
228     *c->in.events_tail = event;
229     c->in.events_tail = &event->next;
230     pthread_cond_signal(&c->in.event_cond);
231     return 1; /* I have something for you... */
232 }
233
234 static xcb_generic_event_t *get_event(xcb_connection_t *c)
235 {
236     struct event_list *cur = c->in.events;
237     xcb_generic_event_t *ret;
238     if(!c->in.events)
239         return 0;
240     ret = cur->event;
241     c->in.events = cur->next;
242     if(!cur->next)
243         c->in.events_tail = &c->in.events;
244     free(cur);
245     return ret;
246 }
247
248 static void free_reply_list(struct reply_list *head)
249 {
250     while(head)
251     {
252         struct reply_list *cur = head;
253         head = cur->next;
254         free(cur->reply);
255         free(cur);
256     }
257 }
258
259 static int read_block(const int fd, void *buf, const ssize_t len)
260 {
261     int done = 0;
262     while(done < len)
263     {
264         int ret = read(fd, ((char *) buf) + done, len - done);
265         if(ret > 0)
266             done += ret;
267         if(ret < 0 && errno == EAGAIN)
268         {
269             fd_set fds;
270             FD_ZERO(&fds);
271             FD_SET(fd, &fds);
272             do {
273                 ret = select(fd + 1, &fds, 0, 0, 0);
274             } while (ret == -1 && errno == EINTR);
275         }
276         if(ret <= 0)
277             return ret;
278     }
279     return len;
280 }
281
282 static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
283 {
284     struct reply_list *head;
285
286     /* If an error occurred when issuing the request, fail immediately. */
287     if(!request)
288         head = 0;
289     /* We've read requests past the one we want, so if it has replies we have
290      * them all and they're in the replies map. */
291     else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
292     {
293         head = _xcb_map_remove(c->in.replies, request);
294         if(head && head->next)
295             _xcb_map_put(c->in.replies, request, head->next);
296     }
297     /* We're currently processing the responses to the request we want, and we
298      * have a reply ready to return. So just return it without blocking. */
299     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
300     {
301         head = c->in.current_reply;
302         c->in.current_reply = head->next;
303         if(!head->next)
304             c->in.current_reply_tail = &c->in.current_reply;
305     }
306     /* We know this request can't have any more replies, and we've already
307      * established it doesn't have a reply now. Don't bother blocking. */
308     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
309         head = 0;
310     /* We may have more replies on the way for this request: block until we're
311      * sure. */
312     else
313         return 0;
314
315     if(error)
316         *error = 0;
317     *reply = 0;
318
319     if(head)
320     {
321         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
322         {
323             if(error)
324                 *error = head->reply;
325             else
326                 free(head->reply);
327         }
328         else
329             *reply = head->reply;
330
331         free(head);
332     }
333
334     return 1;
335 }
336
337 /* Public interface */
338
339 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
340 {
341     uint64_t widened_request;
342     void *ret = 0;
343     if(e)
344         *e = 0;
345     if(c->has_error)
346         return 0;
347
348     pthread_mutex_lock(&c->iolock);
349
350     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
351     if(widened_request > c->out.request)
352         widened_request -= UINT64_C(1) << 32;
353
354     /* If this request has not been written yet, write it. */
355     if(_xcb_out_flush_to(c, widened_request))
356     {
357         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
358         reader_list reader;
359         reader_list **prev_reader;
360
361         for(prev_reader = &c->in.readers; 
362             *prev_reader && 
363             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
364             prev_reader = &(*prev_reader)->next)
365         {
366             /* empty */;
367         }
368         reader.request = request;
369         reader.data = &cond;
370         reader.next = *prev_reader;
371         *prev_reader = &reader;
372
373         while(!poll_for_reply(c, request, &ret, e))
374             if(!_xcb_conn_wait(c, &cond, 0, 0))
375                 break;
376
377         for(prev_reader = &c->in.readers;
378             *prev_reader && 
379             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
380             prev_reader = &(*prev_reader)->next)
381         {
382             if(*prev_reader == &reader)
383             {
384                 *prev_reader = (*prev_reader)->next;
385                 break;
386             }
387         }
388         pthread_cond_destroy(&cond);
389     }
390
391     wake_up_next_reader(c);
392     pthread_mutex_unlock(&c->iolock);
393     return ret;
394 }
395
396 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
397 {
398     int ret;
399     if(c->has_error)
400     {
401         *reply = 0;
402         if(error)
403             *error = 0;
404         return 1; /* would not block */
405     }
406     assert(reply != 0);
407     pthread_mutex_lock(&c->iolock);
408     ret = poll_for_reply(c, request, reply, error);
409     pthread_mutex_unlock(&c->iolock);
410     return ret;
411 }
412
413 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
414 {
415     xcb_generic_event_t *ret;
416     if(c->has_error)
417         return 0;
418     pthread_mutex_lock(&c->iolock);
419     /* get_event returns 0 on empty list. */
420     while(!(ret = get_event(c)))
421         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
422             break;
423
424     wake_up_next_reader(c);
425     pthread_mutex_unlock(&c->iolock);
426     return ret;
427 }
428
429 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
430 {
431     xcb_generic_event_t *ret = 0;
432     if(!c->has_error)
433     {
434         pthread_mutex_lock(&c->iolock);
435         /* FIXME: follow X meets Z architecture changes. */
436         ret = get_event(c);
437         if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
438             ret = get_event(c);
439         pthread_mutex_unlock(&c->iolock);
440     }
441     return ret;
442 }
443
444 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
445 {
446     /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
447      * that would require factoring the locking out of xcb_get_input_focus,
448      * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
449     xcb_generic_error_t *ret;
450     void *reply;
451     if(c->has_error)
452         return 0;
453     if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_expected)
454        && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
455     {
456         free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
457         assert(!ret);
458     }
459     reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
460     assert(!reply);
461     return ret;
462 }
463
464 /* Private interface */
465
466 int _xcb_in_init(_xcb_in *in)
467 {
468     if(pthread_cond_init(&in->event_cond, 0))
469         return 0;
470     in->reading = 0;
471
472     in->queue_len = 0;
473
474     in->request_read = 0;
475     in->request_completed = 0;
476
477     in->replies = _xcb_map_new();
478     if(!in->replies)
479         return 0;
480
481     in->current_reply_tail = &in->current_reply;
482     in->events_tail = &in->events;
483     in->pending_replies_tail = &in->pending_replies;
484
485     return 1;
486 }
487
488 void _xcb_in_destroy(_xcb_in *in)
489 {
490     pthread_cond_destroy(&in->event_cond);
491     free_reply_list(in->current_reply);
492     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
493     while(in->events)
494     {
495         struct event_list *e = in->events;
496         in->events = e->next;
497         free(e->event);
498         free(e);
499     }
500     while(in->pending_replies)
501     {
502         pending_reply *pend = in->pending_replies;
503         in->pending_replies = pend->next;
504         free(pend);
505     }
506 }
507
508 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
509 {
510     pending_reply *pend = malloc(sizeof(pending_reply));
511     assert(workaround != WORKAROUND_NONE || flags != 0);
512     if(!pend)
513     {
514         _xcb_conn_shutdown(c);
515         return 0;
516     }
517     pend->first_request = pend->last_request = request;
518     pend->workaround = workaround;
519     pend->flags = flags;
520     pend->next = 0;
521     *c->in.pending_replies_tail = pend;
522     c->in.pending_replies_tail = &pend->next;
523     return 1;
524 }
525
526 int _xcb_in_read(xcb_connection_t *c)
527 {
528     int n = read(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len);
529     if(n > 0)
530         c->in.queue_len += n;
531     while(read_packet(c))
532         /* empty */;
533     if((n > 0) || (n < 0 && errno == EAGAIN))
534         return 1;
535     _xcb_conn_shutdown(c);
536     return 0;
537 }
538
539 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
540 {
541     int done = c->in.queue_len;
542     if(len < done)
543         done = len;
544
545     memcpy(buf, c->in.queue, done);
546     c->in.queue_len -= done;
547     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
548
549     if(len > done)
550     {
551         int ret = read_block(c->fd, (char *) buf + done, len - done);
552         if(ret <= 0)
553         {
554             _xcb_conn_shutdown(c);
555             return ret;
556         }
557     }
558
559     return len;
560 }