Support handing off socket write permission to external code.
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <sys/select.h>
34 #include <errno.h>
35
36 #include "xcb.h"
37 #include "xcbext.h"
38 #include "xcbint.h"
39
40 #define XCB_ERROR 0
41 #define XCB_REPLY 1
42 #define XCB_XGE_EVENT 35
43
44 struct event_list {
45     xcb_generic_event_t *event;
46     struct event_list *next;
47 };
48
49 struct reply_list {
50     void *reply;
51     struct reply_list *next;
52 };
53
54 typedef struct pending_reply {
55     uint64_t first_request;
56     uint64_t last_request;
57     enum workarounds workaround;
58     int flags;
59     struct pending_reply *next;
60 } pending_reply;
61
62 typedef struct reader_list {
63     unsigned int request;
64     pthread_cond_t *data;
65     struct reader_list *next;
66 } reader_list;
67
68 static void wake_up_next_reader(xcb_connection_t *c)
69 {
70     int pthreadret;
71     if(c->in.readers)
72         pthreadret = pthread_cond_signal(c->in.readers->data);
73     else
74         pthreadret = pthread_cond_signal(&c->in.event_cond);
75     assert(pthreadret == 0);
76 }
77
78 static int read_packet(xcb_connection_t *c)
79 {
80     xcb_generic_reply_t genrep;
81     int length = 32;
82     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
83     void *buf;
84     pending_reply *pend = 0;
85     struct event_list *event;
86
87     /* Wait for there to be enough data for us to read a whole packet */
88     if(c->in.queue_len < length)
89         return 0;
90
91     /* Get the response type, length, and sequence number. */
92     memcpy(&genrep, c->in.queue, sizeof(genrep));
93
94     /* Compute 32-bit sequence number of this packet. */
95     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
96     {
97         uint64_t lastread = c->in.request_read;
98         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
99         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
100             c->in.request_read += 0x10000;
101         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
102             c->in.request_expected = c->in.request_read;
103
104         if(c->in.request_read != lastread)
105         {
106             if(c->in.current_reply)
107             {
108                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
109                 c->in.current_reply = 0;
110                 c->in.current_reply_tail = &c->in.current_reply;
111             }
112             c->in.request_completed = c->in.request_read - 1;
113         }
114
115         while(c->in.pending_replies && 
116               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
117               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
118         {
119             pending_reply *oldpend = c->in.pending_replies;
120             c->in.pending_replies = oldpend->next;
121             if(!oldpend->next)
122                 c->in.pending_replies_tail = &c->in.pending_replies;
123             free(oldpend);
124         }
125
126         if(genrep.response_type == XCB_ERROR)
127             c->in.request_completed = c->in.request_read;
128     }
129
130     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
131     {
132         pend = c->in.pending_replies;
133         if(pend &&
134            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
135              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
136               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
137             pend = 0;
138     }
139
140     /* For reply packets, check that the entire packet is available. */
141     if(genrep.response_type == XCB_REPLY)
142     {
143         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
144         {
145             uint32_t *p = (uint32_t *) c->in.queue;
146             genrep.length = p[2] * p[3] * 2;
147         }
148         length += genrep.length * 4;
149     }
150
151     /* XGE events may have sizes > 32 */
152     if (genrep.response_type == XCB_XGE_EVENT)
153     {
154         eventlength = ((xcb_ge_event_t*)&genrep)->length * 4;
155     }
156
157     buf = malloc(length + eventlength +
158             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
159     if(!buf)
160     {
161         _xcb_conn_shutdown(c);
162         return 0;
163     }
164
165     if(_xcb_in_read_block(c, buf, length) <= 0)
166     {
167         free(buf);
168         return 0;
169     }
170
171     /* pull in XGE event data if available, append after event struct */
172     if (eventlength)
173     {
174         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
175         {
176             free(buf);
177             return 0;
178         }
179     }
180
181     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
182     {
183         free(buf);
184         return 1;
185     }
186
187     if(genrep.response_type != XCB_REPLY)
188         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
189
190     /* reply, or checked error */
191     if( genrep.response_type == XCB_REPLY ||
192        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
193     {
194         reader_list *reader;
195         struct reply_list *cur = malloc(sizeof(struct reply_list));
196         if(!cur)
197         {
198             _xcb_conn_shutdown(c);
199             free(buf);
200             return 0;
201         }
202         cur->reply = buf;
203         cur->next = 0;
204         *c->in.current_reply_tail = cur;
205         c->in.current_reply_tail = &cur->next;
206         for(reader = c->in.readers; 
207             reader && 
208             XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
209             reader = reader->next)
210         {
211             if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
212             {
213                 pthread_cond_signal(reader->data);
214                 break;
215             }
216         }
217         return 1;
218     }
219
220     /* event, or unchecked error */
221     event = malloc(sizeof(struct event_list));
222     if(!event)
223     {
224         _xcb_conn_shutdown(c);
225         free(buf);
226         return 0;
227     }
228     event->event = buf;
229     event->next = 0;
230     *c->in.events_tail = event;
231     c->in.events_tail = &event->next;
232     pthread_cond_signal(&c->in.event_cond);
233     return 1; /* I have something for you... */
234 }
235
236 static xcb_generic_event_t *get_event(xcb_connection_t *c)
237 {
238     struct event_list *cur = c->in.events;
239     xcb_generic_event_t *ret;
240     if(!c->in.events)
241         return 0;
242     ret = cur->event;
243     c->in.events = cur->next;
244     if(!cur->next)
245         c->in.events_tail = &c->in.events;
246     free(cur);
247     return ret;
248 }
249
250 static void free_reply_list(struct reply_list *head)
251 {
252     while(head)
253     {
254         struct reply_list *cur = head;
255         head = cur->next;
256         free(cur->reply);
257         free(cur);
258     }
259 }
260
261 static int read_block(const int fd, void *buf, const ssize_t len)
262 {
263     int done = 0;
264     while(done < len)
265     {
266         int ret = read(fd, ((char *) buf) + done, len - done);
267         if(ret > 0)
268             done += ret;
269         if(ret < 0 && errno == EAGAIN)
270         {
271             fd_set fds;
272             FD_ZERO(&fds);
273             FD_SET(fd, &fds);
274             do {
275                 ret = select(fd + 1, &fds, 0, 0, 0);
276             } while (ret == -1 && errno == EINTR);
277         }
278         if(ret <= 0)
279             return ret;
280     }
281     return len;
282 }
283
284 static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
285 {
286     struct reply_list *head;
287
288     /* If an error occurred when issuing the request, fail immediately. */
289     if(!request)
290         head = 0;
291     /* We've read requests past the one we want, so if it has replies we have
292      * them all and they're in the replies map. */
293     else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
294     {
295         head = _xcb_map_remove(c->in.replies, request);
296         if(head && head->next)
297             _xcb_map_put(c->in.replies, request, head->next);
298     }
299     /* We're currently processing the responses to the request we want, and we
300      * have a reply ready to return. So just return it without blocking. */
301     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
302     {
303         head = c->in.current_reply;
304         c->in.current_reply = head->next;
305         if(!head->next)
306             c->in.current_reply_tail = &c->in.current_reply;
307     }
308     /* We know this request can't have any more replies, and we've already
309      * established it doesn't have a reply now. Don't bother blocking. */
310     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
311         head = 0;
312     /* We may have more replies on the way for this request: block until we're
313      * sure. */
314     else
315         return 0;
316
317     if(error)
318         *error = 0;
319     *reply = 0;
320
321     if(head)
322     {
323         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
324         {
325             if(error)
326                 *error = head->reply;
327             else
328                 free(head->reply);
329         }
330         else
331             *reply = head->reply;
332
333         free(head);
334     }
335
336     return 1;
337 }
338
339 /* Public interface */
340
341 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
342 {
343     uint64_t widened_request;
344     void *ret = 0;
345     if(e)
346         *e = 0;
347     if(c->has_error)
348         return 0;
349
350     pthread_mutex_lock(&c->iolock);
351
352     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
353     if(widened_request > c->out.request)
354         widened_request -= UINT64_C(1) << 32;
355
356     /* If this request has not been written yet, write it. */
357     if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
358     {
359         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
360         reader_list reader;
361         reader_list **prev_reader;
362
363         for(prev_reader = &c->in.readers; 
364             *prev_reader && 
365             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
366             prev_reader = &(*prev_reader)->next)
367         {
368             /* empty */;
369         }
370         reader.request = request;
371         reader.data = &cond;
372         reader.next = *prev_reader;
373         *prev_reader = &reader;
374
375         while(!poll_for_reply(c, request, &ret, e))
376             if(!_xcb_conn_wait(c, &cond, 0, 0))
377                 break;
378
379         for(prev_reader = &c->in.readers;
380             *prev_reader && 
381             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
382             prev_reader = &(*prev_reader)->next)
383         {
384             if(*prev_reader == &reader)
385             {
386                 *prev_reader = (*prev_reader)->next;
387                 break;
388             }
389         }
390         pthread_cond_destroy(&cond);
391     }
392
393     wake_up_next_reader(c);
394     pthread_mutex_unlock(&c->iolock);
395     return ret;
396 }
397
398 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
399 {
400     int ret;
401     if(c->has_error)
402     {
403         *reply = 0;
404         if(error)
405             *error = 0;
406         return 1; /* would not block */
407     }
408     assert(reply != 0);
409     pthread_mutex_lock(&c->iolock);
410     ret = poll_for_reply(c, request, reply, error);
411     pthread_mutex_unlock(&c->iolock);
412     return ret;
413 }
414
415 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
416 {
417     xcb_generic_event_t *ret;
418     if(c->has_error)
419         return 0;
420     pthread_mutex_lock(&c->iolock);
421     /* get_event returns 0 on empty list. */
422     while(!(ret = get_event(c)))
423         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
424             break;
425
426     wake_up_next_reader(c);
427     pthread_mutex_unlock(&c->iolock);
428     return ret;
429 }
430
431 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
432 {
433     xcb_generic_event_t *ret = 0;
434     if(!c->has_error)
435     {
436         pthread_mutex_lock(&c->iolock);
437         /* FIXME: follow X meets Z architecture changes. */
438         ret = get_event(c);
439         if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
440             ret = get_event(c);
441         pthread_mutex_unlock(&c->iolock);
442     }
443     return ret;
444 }
445
446 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
447 {
448     /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
449      * that would require factoring the locking out of xcb_get_input_focus,
450      * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
451     xcb_generic_error_t *ret;
452     void *reply;
453     if(c->has_error)
454         return 0;
455     if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_expected)
456        && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
457     {
458         free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
459         assert(!ret);
460     }
461     reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
462     assert(!reply);
463     return ret;
464 }
465
466 /* Private interface */
467
468 int _xcb_in_init(_xcb_in *in)
469 {
470     if(pthread_cond_init(&in->event_cond, 0))
471         return 0;
472     in->reading = 0;
473
474     in->queue_len = 0;
475
476     in->request_read = 0;
477     in->request_completed = 0;
478
479     in->replies = _xcb_map_new();
480     if(!in->replies)
481         return 0;
482
483     in->current_reply_tail = &in->current_reply;
484     in->events_tail = &in->events;
485     in->pending_replies_tail = &in->pending_replies;
486
487     return 1;
488 }
489
490 void _xcb_in_destroy(_xcb_in *in)
491 {
492     pthread_cond_destroy(&in->event_cond);
493     free_reply_list(in->current_reply);
494     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
495     while(in->events)
496     {
497         struct event_list *e = in->events;
498         in->events = e->next;
499         free(e->event);
500         free(e);
501     }
502     while(in->pending_replies)
503     {
504         pending_reply *pend = in->pending_replies;
505         in->pending_replies = pend->next;
506         free(pend);
507     }
508 }
509
510 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
511 {
512     pending_reply *pend = malloc(sizeof(pending_reply));
513     assert(workaround != WORKAROUND_NONE || flags != 0);
514     if(!pend)
515     {
516         _xcb_conn_shutdown(c);
517         return 0;
518     }
519     pend->first_request = pend->last_request = request;
520     pend->workaround = workaround;
521     pend->flags = flags;
522     pend->next = 0;
523     *c->in.pending_replies_tail = pend;
524     c->in.pending_replies_tail = &pend->next;
525     return 1;
526 }
527
528 void _xcb_in_replies_done(xcb_connection_t *c)
529 {
530     struct pending_reply *pend;
531     if (c->in.pending_replies_tail != &c->in.pending_replies)
532     {
533         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
534         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
535         {
536             pend->last_request = c->out.request;
537             pend->workaround = WORKAROUND_NONE;
538         }
539     }
540 }
541
542 int _xcb_in_read(xcb_connection_t *c)
543 {
544     int n = read(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len);
545     if(n > 0)
546         c->in.queue_len += n;
547     while(read_packet(c))
548         /* empty */;
549     if((n > 0) || (n < 0 && errno == EAGAIN))
550         return 1;
551     _xcb_conn_shutdown(c);
552     return 0;
553 }
554
555 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
556 {
557     int done = c->in.queue_len;
558     if(len < done)
559         done = len;
560
561     memcpy(buf, c->in.queue, done);
562     c->in.queue_len -= done;
563     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
564
565     if(len > done)
566     {
567         int ret = read_block(c->fd, (char *) buf + done, len - done);
568         if(ret <= 0)
569         {
570             _xcb_conn_shutdown(c);
571             return ret;
572         }
573     }
574
575     return len;
576 }