use poll() instead of select() when available
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <errno.h>
34
35 #include "xcb.h"
36 #include "xcbext.h"
37 #include "xcbint.h"
38 #if USE_POLL
39 #include <poll.h>
40 #else
41 #include <sys/select.h>
42 #endif
43
44 #define XCB_ERROR 0
45 #define XCB_REPLY 1
46 #define XCB_XGE_EVENT 35
47
48 struct event_list {
49     xcb_generic_event_t *event;
50     struct event_list *next;
51 };
52
53 struct reply_list {
54     void *reply;
55     struct reply_list *next;
56 };
57
58 typedef struct pending_reply {
59     uint64_t first_request;
60     uint64_t last_request;
61     enum workarounds workaround;
62     int flags;
63     struct pending_reply *next;
64 } pending_reply;
65
66 typedef struct reader_list {
67     unsigned int request;
68     pthread_cond_t *data;
69     struct reader_list *next;
70 } reader_list;
71
72 static void wake_up_next_reader(xcb_connection_t *c)
73 {
74     int pthreadret;
75     if(c->in.readers)
76         pthreadret = pthread_cond_signal(c->in.readers->data);
77     else
78         pthreadret = pthread_cond_signal(&c->in.event_cond);
79     assert(pthreadret == 0);
80 }
81
82 static int read_packet(xcb_connection_t *c)
83 {
84     xcb_generic_reply_t genrep;
85     int length = 32;
86     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
87     void *buf;
88     pending_reply *pend = 0;
89     struct event_list *event;
90
91     /* Wait for there to be enough data for us to read a whole packet */
92     if(c->in.queue_len < length)
93         return 0;
94
95     /* Get the response type, length, and sequence number. */
96     memcpy(&genrep, c->in.queue, sizeof(genrep));
97
98     /* Compute 32-bit sequence number of this packet. */
99     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
100     {
101         uint64_t lastread = c->in.request_read;
102         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
103         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
104             c->in.request_read += 0x10000;
105         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
106             c->in.request_expected = c->in.request_read;
107
108         if(c->in.request_read != lastread)
109         {
110             if(c->in.current_reply)
111             {
112                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
113                 c->in.current_reply = 0;
114                 c->in.current_reply_tail = &c->in.current_reply;
115             }
116             c->in.request_completed = c->in.request_read - 1;
117         }
118
119         while(c->in.pending_replies && 
120               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
121               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
122         {
123             pending_reply *oldpend = c->in.pending_replies;
124             c->in.pending_replies = oldpend->next;
125             if(!oldpend->next)
126                 c->in.pending_replies_tail = &c->in.pending_replies;
127             free(oldpend);
128         }
129
130         if(genrep.response_type == XCB_ERROR)
131             c->in.request_completed = c->in.request_read;
132     }
133
134     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
135     {
136         pend = c->in.pending_replies;
137         if(pend &&
138            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
139              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
140               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
141             pend = 0;
142     }
143
144     /* For reply packets, check that the entire packet is available. */
145     if(genrep.response_type == XCB_REPLY)
146     {
147         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
148         {
149             uint32_t *p = (uint32_t *) c->in.queue;
150             genrep.length = p[2] * p[3] * 2;
151         }
152         length += genrep.length * 4;
153     }
154
155     /* XGE events may have sizes > 32 */
156     if (genrep.response_type == XCB_XGE_EVENT)
157     {
158         eventlength = ((xcb_ge_event_t*)&genrep)->length * 4;
159     }
160
161     buf = malloc(length + eventlength +
162             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
163     if(!buf)
164     {
165         _xcb_conn_shutdown(c);
166         return 0;
167     }
168
169     if(_xcb_in_read_block(c, buf, length) <= 0)
170     {
171         free(buf);
172         return 0;
173     }
174
175     /* pull in XGE event data if available, append after event struct */
176     if (eventlength)
177     {
178         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
179         {
180             free(buf);
181             return 0;
182         }
183     }
184
185     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
186     {
187         free(buf);
188         return 1;
189     }
190
191     if(genrep.response_type != XCB_REPLY)
192         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
193
194     /* reply, or checked error */
195     if( genrep.response_type == XCB_REPLY ||
196        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
197     {
198         reader_list *reader;
199         struct reply_list *cur = malloc(sizeof(struct reply_list));
200         if(!cur)
201         {
202             _xcb_conn_shutdown(c);
203             free(buf);
204             return 0;
205         }
206         cur->reply = buf;
207         cur->next = 0;
208         *c->in.current_reply_tail = cur;
209         c->in.current_reply_tail = &cur->next;
210         for(reader = c->in.readers; 
211             reader && 
212             XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
213             reader = reader->next)
214         {
215             if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
216             {
217                 pthread_cond_signal(reader->data);
218                 break;
219             }
220         }
221         return 1;
222     }
223
224     /* event, or unchecked error */
225     event = malloc(sizeof(struct event_list));
226     if(!event)
227     {
228         _xcb_conn_shutdown(c);
229         free(buf);
230         return 0;
231     }
232     event->event = buf;
233     event->next = 0;
234     *c->in.events_tail = event;
235     c->in.events_tail = &event->next;
236     pthread_cond_signal(&c->in.event_cond);
237     return 1; /* I have something for you... */
238 }
239
240 static xcb_generic_event_t *get_event(xcb_connection_t *c)
241 {
242     struct event_list *cur = c->in.events;
243     xcb_generic_event_t *ret;
244     if(!c->in.events)
245         return 0;
246     ret = cur->event;
247     c->in.events = cur->next;
248     if(!cur->next)
249         c->in.events_tail = &c->in.events;
250     free(cur);
251     return ret;
252 }
253
254 static void free_reply_list(struct reply_list *head)
255 {
256     while(head)
257     {
258         struct reply_list *cur = head;
259         head = cur->next;
260         free(cur->reply);
261         free(cur);
262     }
263 }
264
265 static int read_block(const int fd, void *buf, const ssize_t len)
266 {
267     int done = 0;
268     while(done < len)
269     {
270         int ret = read(fd, ((char *) buf) + done, len - done);
271         if(ret > 0)
272             done += ret;
273         if(ret < 0 && errno == EAGAIN)
274         {
275 #if USE_POLL
276             struct pollfd pfd;
277             pfd.fd = fd;
278             pfd.events = POLLIN;
279             pfd.revents = 0;
280             do {
281                 ret = poll(&pfd, 1, -1);
282             } while (ret == -1 && errno == EINTR);
283 #else
284             fd_set fds;
285             FD_ZERO(&fds);
286             FD_SET(fd, &fds);
287             do {
288                 ret = select(fd + 1, &fds, 0, 0, 0);
289             } while (ret == -1 && errno == EINTR);
290 #endif
291         }
292         if(ret <= 0)
293             return ret;
294     }
295     return len;
296 }
297
298 static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
299 {
300     struct reply_list *head;
301
302     /* If an error occurred when issuing the request, fail immediately. */
303     if(!request)
304         head = 0;
305     /* We've read requests past the one we want, so if it has replies we have
306      * them all and they're in the replies map. */
307     else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
308     {
309         head = _xcb_map_remove(c->in.replies, request);
310         if(head && head->next)
311             _xcb_map_put(c->in.replies, request, head->next);
312     }
313     /* We're currently processing the responses to the request we want, and we
314      * have a reply ready to return. So just return it without blocking. */
315     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
316     {
317         head = c->in.current_reply;
318         c->in.current_reply = head->next;
319         if(!head->next)
320             c->in.current_reply_tail = &c->in.current_reply;
321     }
322     /* We know this request can't have any more replies, and we've already
323      * established it doesn't have a reply now. Don't bother blocking. */
324     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
325         head = 0;
326     /* We may have more replies on the way for this request: block until we're
327      * sure. */
328     else
329         return 0;
330
331     if(error)
332         *error = 0;
333     *reply = 0;
334
335     if(head)
336     {
337         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
338         {
339             if(error)
340                 *error = head->reply;
341             else
342                 free(head->reply);
343         }
344         else
345             *reply = head->reply;
346
347         free(head);
348     }
349
350     return 1;
351 }
352
353 /* Public interface */
354
355 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
356 {
357     uint64_t widened_request;
358     void *ret = 0;
359     if(e)
360         *e = 0;
361     if(c->has_error)
362         return 0;
363
364     pthread_mutex_lock(&c->iolock);
365
366     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
367     if(widened_request > c->out.request)
368         widened_request -= UINT64_C(1) << 32;
369
370     /* If this request has not been written yet, write it. */
371     if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
372     {
373         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
374         reader_list reader;
375         reader_list **prev_reader;
376
377         for(prev_reader = &c->in.readers; 
378             *prev_reader && 
379             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
380             prev_reader = &(*prev_reader)->next)
381         {
382             /* empty */;
383         }
384         reader.request = request;
385         reader.data = &cond;
386         reader.next = *prev_reader;
387         *prev_reader = &reader;
388
389         while(!poll_for_reply(c, request, &ret, e))
390             if(!_xcb_conn_wait(c, &cond, 0, 0))
391                 break;
392
393         for(prev_reader = &c->in.readers;
394             *prev_reader && 
395             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
396             prev_reader = &(*prev_reader)->next)
397         {
398             if(*prev_reader == &reader)
399             {
400                 *prev_reader = (*prev_reader)->next;
401                 break;
402             }
403         }
404         pthread_cond_destroy(&cond);
405     }
406
407     wake_up_next_reader(c);
408     pthread_mutex_unlock(&c->iolock);
409     return ret;
410 }
411
412 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
413 {
414     int ret;
415     if(c->has_error)
416     {
417         *reply = 0;
418         if(error)
419             *error = 0;
420         return 1; /* would not block */
421     }
422     assert(reply != 0);
423     pthread_mutex_lock(&c->iolock);
424     ret = poll_for_reply(c, request, reply, error);
425     pthread_mutex_unlock(&c->iolock);
426     return ret;
427 }
428
429 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
430 {
431     xcb_generic_event_t *ret;
432     if(c->has_error)
433         return 0;
434     pthread_mutex_lock(&c->iolock);
435     /* get_event returns 0 on empty list. */
436     while(!(ret = get_event(c)))
437         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
438             break;
439
440     wake_up_next_reader(c);
441     pthread_mutex_unlock(&c->iolock);
442     return ret;
443 }
444
445 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
446 {
447     xcb_generic_event_t *ret = 0;
448     if(!c->has_error)
449     {
450         pthread_mutex_lock(&c->iolock);
451         /* FIXME: follow X meets Z architecture changes. */
452         ret = get_event(c);
453         if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
454             ret = get_event(c);
455         pthread_mutex_unlock(&c->iolock);
456     }
457     return ret;
458 }
459
460 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
461 {
462     /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
463      * that would require factoring the locking out of xcb_get_input_focus,
464      * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
465     xcb_generic_error_t *ret;
466     void *reply;
467     if(c->has_error)
468         return 0;
469     if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_expected)
470        && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
471     {
472         free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
473         assert(!ret);
474     }
475     reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
476     assert(!reply);
477     return ret;
478 }
479
480 /* Private interface */
481
482 int _xcb_in_init(_xcb_in *in)
483 {
484     if(pthread_cond_init(&in->event_cond, 0))
485         return 0;
486     in->reading = 0;
487
488     in->queue_len = 0;
489
490     in->request_read = 0;
491     in->request_completed = 0;
492
493     in->replies = _xcb_map_new();
494     if(!in->replies)
495         return 0;
496
497     in->current_reply_tail = &in->current_reply;
498     in->events_tail = &in->events;
499     in->pending_replies_tail = &in->pending_replies;
500
501     return 1;
502 }
503
504 void _xcb_in_destroy(_xcb_in *in)
505 {
506     pthread_cond_destroy(&in->event_cond);
507     free_reply_list(in->current_reply);
508     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
509     while(in->events)
510     {
511         struct event_list *e = in->events;
512         in->events = e->next;
513         free(e->event);
514         free(e);
515     }
516     while(in->pending_replies)
517     {
518         pending_reply *pend = in->pending_replies;
519         in->pending_replies = pend->next;
520         free(pend);
521     }
522 }
523
524 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
525 {
526     pending_reply *pend = malloc(sizeof(pending_reply));
527     assert(workaround != WORKAROUND_NONE || flags != 0);
528     if(!pend)
529     {
530         _xcb_conn_shutdown(c);
531         return 0;
532     }
533     pend->first_request = pend->last_request = request;
534     pend->workaround = workaround;
535     pend->flags = flags;
536     pend->next = 0;
537     *c->in.pending_replies_tail = pend;
538     c->in.pending_replies_tail = &pend->next;
539     return 1;
540 }
541
542 void _xcb_in_replies_done(xcb_connection_t *c)
543 {
544     struct pending_reply *pend;
545     if (c->in.pending_replies_tail != &c->in.pending_replies)
546     {
547         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
548         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
549         {
550             pend->last_request = c->out.request;
551             pend->workaround = WORKAROUND_NONE;
552         }
553     }
554 }
555
556 int _xcb_in_read(xcb_connection_t *c)
557 {
558     int n = read(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len);
559     if(n > 0)
560         c->in.queue_len += n;
561     while(read_packet(c))
562         /* empty */;
563     if((n > 0) || (n < 0 && errno == EAGAIN))
564         return 1;
565     _xcb_conn_shutdown(c);
566     return 0;
567 }
568
569 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
570 {
571     int done = c->in.queue_len;
572     if(len < done)
573         done = len;
574
575     memcpy(buf, c->in.queue, done);
576     c->in.queue_len -= done;
577     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
578
579     if(len > done)
580     {
581         int ret = read_block(c->fd, (char *) buf + done, len - done);
582         if(ret <= 0)
583         {
584             _xcb_conn_shutdown(c);
585             return ret;
586         }
587     }
588
589     return len;
590 }