Set errno=0 in read_block. On Win32 there is no errno and this makes the
[free-sw/xcb/libxcb] / src / xcb_in.c
1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  * 
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  * 
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  * 
20  * Except as contained in this notice, the names of the authors or their
21  * institutions shall not be used in advertising or otherwise to promote the
22  * sale, use or other dealings in this Software without prior written
23  * authorization from the authors.
24  */
25
26 /* Stuff that reads stuff from the server. */
27
28 #include <assert.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <errno.h>
34
35 #include "xcb.h"
36 #include "xcbext.h"
37 #include "xcbint.h"
38 #if USE_POLL
39 #include <poll.h>
40 #elif !defined _WIN32
41 #include <sys/select.h>
42 #endif
43
44 #ifdef _WIN32
45 #include "xcb_windefs.h"
46 #endif /* _WIN32 */
47
48 #define XCB_ERROR 0
49 #define XCB_REPLY 1
50 #define XCB_XGE_EVENT 35
51
52 struct event_list {
53     xcb_generic_event_t *event;
54     struct event_list *next;
55 };
56
57 struct reply_list {
58     void *reply;
59     struct reply_list *next;
60 };
61
62 typedef struct pending_reply {
63     uint64_t first_request;
64     uint64_t last_request;
65     enum workarounds workaround;
66     int flags;
67     struct pending_reply *next;
68 } pending_reply;
69
70 typedef struct reader_list {
71     unsigned int request;
72     pthread_cond_t *data;
73     struct reader_list *next;
74 } reader_list;
75
76 static void wake_up_next_reader(xcb_connection_t *c)
77 {
78     int pthreadret;
79     if(c->in.readers)
80         pthreadret = pthread_cond_signal(c->in.readers->data);
81     else
82         pthreadret = pthread_cond_signal(&c->in.event_cond);
83     assert(pthreadret == 0);
84 }
85
86 static int read_packet(xcb_connection_t *c)
87 {
88     xcb_generic_reply_t genrep;
89     int length = 32;
90     int eventlength = 0; /* length after first 32 bytes for GenericEvents */
91     void *buf;
92     pending_reply *pend = 0;
93     struct event_list *event;
94
95     /* Wait for there to be enough data for us to read a whole packet */
96     if(c->in.queue_len < length)
97         return 0;
98
99     /* Get the response type, length, and sequence number. */
100     memcpy(&genrep, c->in.queue, sizeof(genrep));
101
102     /* Compute 32-bit sequence number of this packet. */
103     if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
104     {
105         uint64_t lastread = c->in.request_read;
106         c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
107         if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
108             c->in.request_read += 0x10000;
109         if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
110             c->in.request_expected = c->in.request_read;
111
112         if(c->in.request_read != lastread)
113         {
114             if(c->in.current_reply)
115             {
116                 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
117                 c->in.current_reply = 0;
118                 c->in.current_reply_tail = &c->in.current_reply;
119             }
120             c->in.request_completed = c->in.request_read - 1;
121         }
122
123         while(c->in.pending_replies && 
124               c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
125               XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
126         {
127             pending_reply *oldpend = c->in.pending_replies;
128             c->in.pending_replies = oldpend->next;
129             if(!oldpend->next)
130                 c->in.pending_replies_tail = &c->in.pending_replies;
131             free(oldpend);
132         }
133
134         if(genrep.response_type == XCB_ERROR)
135             c->in.request_completed = c->in.request_read;
136     }
137
138     if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
139     {
140         pend = c->in.pending_replies;
141         if(pend &&
142            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
143              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
144               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
145             pend = 0;
146     }
147
148     /* For reply packets, check that the entire packet is available. */
149     if(genrep.response_type == XCB_REPLY)
150     {
151         if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
152         {
153             uint32_t *p = (uint32_t *) c->in.queue;
154             genrep.length = p[2] * p[3] * 2;
155         }
156         length += genrep.length * 4;
157     }
158
159     /* XGE events may have sizes > 32 */
160     if (genrep.response_type == XCB_XGE_EVENT)
161     {
162         eventlength = ((xcb_ge_event_t*)&genrep)->length * 4;
163     }
164
165     buf = malloc(length + eventlength +
166             (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
167     if(!buf)
168     {
169         _xcb_conn_shutdown(c);
170         return 0;
171     }
172
173     if(_xcb_in_read_block(c, buf, length) <= 0)
174     {
175         free(buf);
176         return 0;
177     }
178
179     /* pull in XGE event data if available, append after event struct */
180     if (eventlength)
181     {
182         if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
183         {
184             free(buf);
185             return 0;
186         }
187     }
188
189     if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
190     {
191         free(buf);
192         return 1;
193     }
194
195     if(genrep.response_type != XCB_REPLY)
196         ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
197
198     /* reply, or checked error */
199     if( genrep.response_type == XCB_REPLY ||
200        (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
201     {
202         reader_list *reader;
203         struct reply_list *cur = malloc(sizeof(struct reply_list));
204         if(!cur)
205         {
206             _xcb_conn_shutdown(c);
207             free(buf);
208             return 0;
209         }
210         cur->reply = buf;
211         cur->next = 0;
212         *c->in.current_reply_tail = cur;
213         c->in.current_reply_tail = &cur->next;
214         for(reader = c->in.readers; 
215             reader && 
216             XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
217             reader = reader->next)
218         {
219             if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
220             {
221                 pthread_cond_signal(reader->data);
222                 break;
223             }
224         }
225         return 1;
226     }
227
228     /* event, or unchecked error */
229     event = malloc(sizeof(struct event_list));
230     if(!event)
231     {
232         _xcb_conn_shutdown(c);
233         free(buf);
234         return 0;
235     }
236     event->event = buf;
237     event->next = 0;
238     *c->in.events_tail = event;
239     c->in.events_tail = &event->next;
240     pthread_cond_signal(&c->in.event_cond);
241     return 1; /* I have something for you... */
242 }
243
244 static xcb_generic_event_t *get_event(xcb_connection_t *c)
245 {
246     struct event_list *cur = c->in.events;
247     xcb_generic_event_t *ret;
248     if(!c->in.events)
249         return 0;
250     ret = cur->event;
251     c->in.events = cur->next;
252     if(!cur->next)
253         c->in.events_tail = &c->in.events;
254     free(cur);
255     return ret;
256 }
257
258 static void free_reply_list(struct reply_list *head)
259 {
260     while(head)
261     {
262         struct reply_list *cur = head;
263         head = cur->next;
264         free(cur->reply);
265         free(cur);
266     }
267 }
268
269 static int read_block(const int fd, void *buf, const ssize_t len)
270 {
271     int done = 0;
272     while(done < len)
273     {
274 #ifndef _WIN32
275         int ret = read(fd, ((char *) buf) + done, len - done);
276 #else
277         int ret = recv(fd, ((char *) buf) + done, len - done,0);
278 #endif /* !_WIN32 */
279
280         if(ret > 0)
281             done += ret;
282 #ifndef _WIN32
283         if(ret < 0 && errno == EAGAIN)
284 #else
285         if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
286 #endif /* !_Win32 */
287         {
288 #if USE_POLL
289             struct pollfd pfd;
290             pfd.fd = fd;
291             pfd.events = POLLIN;
292             pfd.revents = 0;
293             do {
294                 ret = poll(&pfd, 1, -1);
295             } while (ret == -1 && errno == EINTR);
296 #else
297             fd_set fds;
298             FD_ZERO(&fds);
299             FD_SET(fd, &fds);
300
301             /* Initializing errno here makes sure that for Win32 this loop will execute only once */
302             errno = 0;  
303             do {
304                 ret = select(fd + 1, &fds, 0, 0, 0);
305             } while (ret == -1 && errno == EINTR);
306 #endif /* USE_POLL */
307         }
308         if(ret <= 0)
309             return ret;
310     }
311     return len;
312 }
313
314 static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
315 {
316     struct reply_list *head;
317
318     /* If an error occurred when issuing the request, fail immediately. */
319     if(!request)
320         head = 0;
321     /* We've read requests past the one we want, so if it has replies we have
322      * them all and they're in the replies map. */
323     else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
324     {
325         head = _xcb_map_remove(c->in.replies, request);
326         if(head && head->next)
327             _xcb_map_put(c->in.replies, request, head->next);
328     }
329     /* We're currently processing the responses to the request we want, and we
330      * have a reply ready to return. So just return it without blocking. */
331     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
332     {
333         head = c->in.current_reply;
334         c->in.current_reply = head->next;
335         if(!head->next)
336             c->in.current_reply_tail = &c->in.current_reply;
337     }
338     /* We know this request can't have any more replies, and we've already
339      * established it doesn't have a reply now. Don't bother blocking. */
340     else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
341         head = 0;
342     /* We may have more replies on the way for this request: block until we're
343      * sure. */
344     else
345         return 0;
346
347     if(error)
348         *error = 0;
349     *reply = 0;
350
351     if(head)
352     {
353         if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
354         {
355             if(error)
356                 *error = head->reply;
357             else
358                 free(head->reply);
359         }
360         else
361             *reply = head->reply;
362
363         free(head);
364     }
365
366     return 1;
367 }
368
369 /* Public interface */
370
371 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
372 {
373     uint64_t widened_request;
374     void *ret = 0;
375     if(e)
376         *e = 0;
377     if(c->has_error)
378         return 0;
379
380     pthread_mutex_lock(&c->iolock);
381
382     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
383     if(widened_request > c->out.request)
384         widened_request -= UINT64_C(1) << 32;
385
386     /* If this request has not been written yet, write it. */
387     if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
388     {
389         pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
390         reader_list reader;
391         reader_list **prev_reader;
392
393         for(prev_reader = &c->in.readers; 
394             *prev_reader && 
395             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
396             prev_reader = &(*prev_reader)->next)
397         {
398             /* empty */;
399         }
400         reader.request = request;
401         reader.data = &cond;
402         reader.next = *prev_reader;
403         *prev_reader = &reader;
404
405         while(!poll_for_reply(c, request, &ret, e))
406             if(!_xcb_conn_wait(c, &cond, 0, 0))
407                 break;
408
409         for(prev_reader = &c->in.readers;
410             *prev_reader && 
411             XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
412             prev_reader = &(*prev_reader)->next)
413         {
414             if(*prev_reader == &reader)
415             {
416                 *prev_reader = (*prev_reader)->next;
417                 break;
418             }
419         }
420         pthread_cond_destroy(&cond);
421     }
422
423     wake_up_next_reader(c);
424     pthread_mutex_unlock(&c->iolock);
425     return ret;
426 }
427
428 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
429 {
430     pending_reply *pend;
431     pend = malloc(sizeof(*pend));
432     if(!pend)
433     {
434         _xcb_conn_shutdown(c);
435         return;
436     }
437
438     pend->first_request = seq;
439     pend->last_request = seq;
440     pend->workaround = 0;
441     pend->flags = XCB_REQUEST_DISCARD_REPLY;
442     pend->next = *prev_next;
443     *prev_next = pend;
444
445     if(!pend->next)
446         c->in.pending_replies_tail = &pend->next;
447 }
448
449 static void discard_reply(xcb_connection_t *c, unsigned int request)
450 {
451     pending_reply *pend = 0;
452     pending_reply **prev_pend;
453     uint64_t widened_request;
454
455     /* We've read requests past the one we want, so if it has replies we have
456      * them all and they're in the replies map. */
457     if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
458     {
459         struct reply_list *head;
460         head = _xcb_map_remove(c->in.replies, request);
461         while (head)
462         {
463             struct reply_list *next = head->next;
464             free(head->reply);
465             free(head);
466             head = next;
467         }
468         return;
469     }
470
471     /* We're currently processing the responses to the request we want, and we
472      * have a reply ready to return. Free it, and mark the pend to free any further
473      * replies. */
474     if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
475     {
476         struct reply_list *head;
477         head = c->in.current_reply;
478         c->in.current_reply = NULL;
479         c->in.current_reply_tail = &c->in.current_reply;
480         while (head)
481         {
482             struct reply_list *next = head->next;
483             free(head->reply);
484             free(head);
485             head = next;
486         }
487
488         pend = c->in.pending_replies;
489         if(pend &&
490             !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
491              (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
492               XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
493             pend = 0;
494         if(pend)
495             pend->flags |= XCB_REQUEST_DISCARD_REPLY;
496         else
497             insert_pending_discard(c, &c->in.pending_replies, c->in.request_read);
498
499         return;
500     }
501
502     /* Walk the list of pending requests. Mark the first match for deletion. */
503     for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
504     {
505         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, >, request))
506             break;
507
508         if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, ==, request))
509         {
510             /* Pending reply found. Mark for discard: */
511             (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
512             return;
513         }
514     }
515
516     /* Pending reply not found (likely due to _unchecked request). Create one: */
517     widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
518     if(widened_request > c->out.request)
519         widened_request -= UINT64_C(1) << 32;
520
521     insert_pending_discard(c, prev_pend, widened_request);
522 }
523
524 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
525 {
526     if(c->has_error)
527         return;
528
529     /* If an error occurred when issuing the request, fail immediately. */
530     if(!sequence)
531         return;
532
533     pthread_mutex_lock(&c->iolock);
534     discard_reply(c, sequence);
535     pthread_mutex_unlock(&c->iolock);
536 }
537
538 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
539 {
540     int ret;
541     if(c->has_error)
542     {
543         *reply = 0;
544         if(error)
545             *error = 0;
546         return 1; /* would not block */
547     }
548     assert(reply != 0);
549     pthread_mutex_lock(&c->iolock);
550     ret = poll_for_reply(c, request, reply, error);
551     pthread_mutex_unlock(&c->iolock);
552     return ret;
553 }
554
555 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
556 {
557     xcb_generic_event_t *ret;
558     if(c->has_error)
559         return 0;
560     pthread_mutex_lock(&c->iolock);
561     /* get_event returns 0 on empty list. */
562     while(!(ret = get_event(c)))
563         if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
564             break;
565
566     wake_up_next_reader(c);
567     pthread_mutex_unlock(&c->iolock);
568     return ret;
569 }
570
571 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
572 {
573     xcb_generic_event_t *ret = 0;
574     if(!c->has_error)
575     {
576         pthread_mutex_lock(&c->iolock);
577         /* FIXME: follow X meets Z architecture changes. */
578         ret = get_event(c);
579         if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
580             ret = get_event(c);
581         pthread_mutex_unlock(&c->iolock);
582     }
583     return ret;
584 }
585
586 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
587 {
588     /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
589      * that would require factoring the locking out of xcb_get_input_focus,
590      * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
591     xcb_generic_error_t *ret;
592     void *reply;
593     if(c->has_error)
594         return 0;
595     if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_expected)
596        && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
597     {
598         free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
599         assert(!ret);
600     }
601     reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
602     assert(!reply);
603     return ret;
604 }
605
606 /* Private interface */
607
608 int _xcb_in_init(_xcb_in *in)
609 {
610     if(pthread_cond_init(&in->event_cond, 0))
611         return 0;
612     in->reading = 0;
613
614     in->queue_len = 0;
615
616     in->request_read = 0;
617     in->request_completed = 0;
618
619     in->replies = _xcb_map_new();
620     if(!in->replies)
621         return 0;
622
623     in->current_reply_tail = &in->current_reply;
624     in->events_tail = &in->events;
625     in->pending_replies_tail = &in->pending_replies;
626
627     return 1;
628 }
629
630 void _xcb_in_destroy(_xcb_in *in)
631 {
632     pthread_cond_destroy(&in->event_cond);
633     free_reply_list(in->current_reply);
634     _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
635     while(in->events)
636     {
637         struct event_list *e = in->events;
638         in->events = e->next;
639         free(e->event);
640         free(e);
641     }
642     while(in->pending_replies)
643     {
644         pending_reply *pend = in->pending_replies;
645         in->pending_replies = pend->next;
646         free(pend);
647     }
648 }
649
650 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
651 {
652     pending_reply *pend = malloc(sizeof(pending_reply));
653     assert(workaround != WORKAROUND_NONE || flags != 0);
654     if(!pend)
655     {
656         _xcb_conn_shutdown(c);
657         return 0;
658     }
659     pend->first_request = pend->last_request = request;
660     pend->workaround = workaround;
661     pend->flags = flags;
662     pend->next = 0;
663     *c->in.pending_replies_tail = pend;
664     c->in.pending_replies_tail = &pend->next;
665     return 1;
666 }
667
668 void _xcb_in_replies_done(xcb_connection_t *c)
669 {
670     struct pending_reply *pend;
671     if (c->in.pending_replies_tail != &c->in.pending_replies)
672     {
673         pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
674         if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
675         {
676             pend->last_request = c->out.request;
677             pend->workaround = WORKAROUND_NONE;
678         }
679     }
680 }
681
682 int _xcb_in_read(xcb_connection_t *c)
683 {
684 #ifndef _WIN32
685     int n = read(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len);
686 #else
687     int n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len,0);
688 #endif /* !_WIN32 */
689     if(n > 0)
690         c->in.queue_len += n;
691     while(read_packet(c))
692         /* empty */;
693 #ifndef _WIN32
694     if((n > 0) || (n < 0 && errno == EAGAIN))
695 #else
696     if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
697 #endif /* !_WIN32 */
698         return 1;
699     _xcb_conn_shutdown(c);
700     return 0;
701 }
702
703 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
704 {
705     int done = c->in.queue_len;
706     if(len < done)
707         done = len;
708
709     memcpy(buf, c->in.queue, done);
710     c->in.queue_len -= done;
711     memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
712
713     if(len > done)
714     {
715         int ret = read_block(c->fd, (char *) buf + done, len - done);
716         if(ret <= 0)
717         {
718             _xcb_conn_shutdown(c);
719             return ret;
720         }
721     }
722
723     return len;
724 }