1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
26 /* Stuff that sends stuff to the server. */
42 static inline void send_request(xcb_connection_t *c, int isvoid, enum workarounds workaround, int flags, struct iovec *vector, int count)
49 c->in.request_expected = c->out.request;
50 if(workaround != WORKAROUND_NONE || flags != 0)
51 _xcb_in_expect_reply(c, c->out.request, workaround, flags);
53 while(count && c->out.queue_len + vector[0].iov_len <= sizeof(c->out.queue))
55 memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
56 c->out.queue_len += vector[0].iov_len;
57 vector[0].iov_base = (char *) vector[0].iov_base + vector[0].iov_len;
58 vector[0].iov_len = 0;
65 vector[0].iov_base = c->out.queue;
66 vector[0].iov_len = c->out.queue_len;
68 _xcb_out_send(c, vector, count);
71 static void send_sync(xcb_connection_t *c)
80 } sync_req = { { /* GetInputFocus */ 43, 0, 1 } };
81 struct iovec vector[2];
82 vector[1].iov_base = (char *) &sync_req;
83 vector[1].iov_len = sizeof(sync_req);
84 send_request(c, 0, WORKAROUND_NONE, XCB_REQUEST_DISCARD_REPLY, vector + 1, 1);
87 static void get_socket_back(xcb_connection_t *c)
89 while(c->out.return_socket && c->out.socket_moving)
90 pthread_cond_wait(&c->out.socket_cond, &c->iolock);
91 if(!c->out.return_socket)
94 c->out.socket_moving = 1;
95 pthread_mutex_unlock(&c->iolock);
96 c->out.return_socket(c->out.socket_closure);
97 pthread_mutex_lock(&c->iolock);
98 c->out.socket_moving = 0;
100 pthread_cond_broadcast(&c->out.socket_cond);
101 c->out.return_socket = 0;
102 c->out.socket_closure = 0;
103 _xcb_in_replies_done(c);
106 static void prepare_socket_request(xcb_connection_t *c)
108 /* We're about to append data to out.queue, so we need to
109 * atomically test for an external socket owner *and* some other
110 * thread currently writing.
112 * If we have an external socket owner, we have to get the socket back
113 * before we can use it again.
115 * If some other thread is writing to the socket, we assume it's
116 * writing from out.queue, and so we can't stick data there.
118 * We satisfy this condition by first calling get_socket_back
119 * (which may drop the lock, but will return when XCB owns the
120 * socket again) and then checking for another writing thread and
121 * escaping the loop if we're ready to go.
129 pthread_cond_wait(&c->out.cond, &c->iolock);
133 /* Public interface */
135 void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
139 pthread_mutex_lock(&c->out.reqlenlock);
140 if(c->out.maximum_request_length_tag == LAZY_NONE)
142 const xcb_query_extension_reply_t *ext;
143 ext = xcb_get_extension_data(c, &xcb_big_requests_id);
144 if(ext && ext->present)
146 c->out.maximum_request_length_tag = LAZY_COOKIE;
147 c->out.maximum_request_length.cookie = xcb_big_requests_enable(c);
151 c->out.maximum_request_length_tag = LAZY_FORCED;
152 c->out.maximum_request_length.value = c->setup->maximum_request_length;
155 pthread_mutex_unlock(&c->out.reqlenlock);
158 uint32_t xcb_get_maximum_request_length(xcb_connection_t *c)
162 xcb_prefetch_maximum_request_length(c);
163 pthread_mutex_lock(&c->out.reqlenlock);
164 if(c->out.maximum_request_length_tag == LAZY_COOKIE)
166 xcb_big_requests_enable_reply_t *r = xcb_big_requests_enable_reply(c, c->out.maximum_request_length.cookie, 0);
167 c->out.maximum_request_length_tag = LAZY_FORCED;
170 c->out.maximum_request_length.value = r->maximum_request_length;
174 c->out.maximum_request_length.value = c->setup->maximum_request_length;
176 pthread_mutex_unlock(&c->out.reqlenlock);
177 return c->out.maximum_request_length.value;
180 unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
184 int veclen = req->count;
185 enum workarounds workaround = WORKAROUND_NONE;
192 assert(req->count > 0);
194 if(!(flags & XCB_REQUEST_RAW))
196 static const char pad[3];
198 uint16_t shortlen = 0;
200 assert(vector[0].iov_len >= 4);
201 /* set the major opcode, and the minor opcode for extensions */
204 const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext);
205 if(!(extension && extension->present))
207 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_EXT_NOTSUPPORTED);
210 ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode;
211 ((uint8_t *) vector[0].iov_base)[1] = req->opcode;
214 ((uint8_t *) vector[0].iov_base)[0] = req->opcode;
216 /* put together the length field, possibly using BIGREQUESTS */
217 for(i = 0; i < req->count; ++i)
219 longlen += vector[i].iov_len;
220 if(!vector[i].iov_base)
222 vector[i].iov_base = (char *) pad;
223 assert(vector[i].iov_len <= sizeof(pad));
226 assert((longlen & 3) == 0);
229 if(longlen <= c->setup->maximum_request_length)
231 /* we don't need BIGREQUESTS. */
235 else if(longlen > xcb_get_maximum_request_length(c))
237 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_REQ_LEN_EXCEED);
238 return 0; /* server can't take this; maybe need BIGREQUESTS? */
241 /* set the length field. */
242 ((uint16_t *) vector[0].iov_base)[1] = shortlen;
245 prefix[0] = ((uint32_t *) vector[0].iov_base)[0];
246 prefix[1] = ++longlen;
247 vector[0].iov_base = (uint32_t *) vector[0].iov_base + 1;
248 vector[0].iov_len -= sizeof(uint32_t);
250 vector[0].iov_base = prefix;
251 vector[0].iov_len = sizeof(prefix);
254 flags &= ~XCB_REQUEST_RAW;
256 /* do we need to work around the X server bug described in glx.xml? */
257 /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane
258 * configuration, but that should be handled here anyway. */
259 if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") &&
260 ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) ||
262 workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
264 /* get a sequence number and arrange for delivery. */
265 pthread_mutex_lock(&c->iolock);
267 prepare_socket_request(c);
269 /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
271 * Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
272 * applications see sequence 0 as that is used to indicate
273 * an error in sending the request
276 while ((req->isvoid && c->out.request == c->in.request_expected + (1 << 16) - 2) ||
277 (unsigned int) (c->out.request + 1) == 0)
280 prepare_socket_request(c);
283 send_request(c, req->isvoid, workaround, flags, vector, veclen);
284 request = c->has_error ? 0 : c->out.request;
285 pthread_mutex_unlock(&c->iolock);
290 xcb_send_fd(xcb_connection_t *c, int fd)
295 pthread_mutex_lock(&c->iolock);
296 while (c->out.out_fd.nfd == XCB_MAX_PASS_FD) {
297 _xcb_out_flush_to(c, c->out.request);
302 c->out.out_fd.fd[c->out.out_fd.nfd++] = fd;
303 pthread_mutex_unlock(&c->iolock);
307 int xcb_take_socket(xcb_connection_t *c, void (*return_socket)(void *closure), void *closure, int flags, uint64_t *sent)
312 pthread_mutex_lock(&c->iolock);
315 /* _xcb_out_flush may drop the iolock allowing other threads to
316 * write requests, so keep flushing until we're done
319 ret = _xcb_out_flush_to(c, c->out.request);
320 while (ret && c->out.request != c->out.request_written);
323 c->out.return_socket = return_socket;
324 c->out.socket_closure = closure;
326 _xcb_in_expect_reply(c, c->out.request, WORKAROUND_EXTERNAL_SOCKET_OWNER, flags);
327 assert(c->out.request == c->out.request_written);
328 *sent = c->out.request;
330 pthread_mutex_unlock(&c->iolock);
334 int xcb_writev(xcb_connection_t *c, struct iovec *vector, int count, uint64_t requests)
339 pthread_mutex_lock(&c->iolock);
340 c->out.request += requests;
341 ret = _xcb_out_send(c, vector, count);
342 pthread_mutex_unlock(&c->iolock);
346 int xcb_flush(xcb_connection_t *c)
351 pthread_mutex_lock(&c->iolock);
352 ret = _xcb_out_flush_to(c, c->out.request);
353 pthread_mutex_unlock(&c->iolock);
357 /* Private interface */
359 int _xcb_out_init(_xcb_out *out)
361 if(pthread_cond_init(&out->socket_cond, 0))
363 out->return_socket = 0;
364 out->socket_closure = 0;
365 out->socket_moving = 0;
367 if(pthread_cond_init(&out->cond, 0))
374 out->request_written = 0;
376 if(pthread_mutex_init(&out->reqlenlock, 0))
378 out->maximum_request_length_tag = LAZY_NONE;
383 void _xcb_out_destroy(_xcb_out *out)
385 pthread_cond_destroy(&out->cond);
386 pthread_mutex_destroy(&out->reqlenlock);
389 int _xcb_out_send(xcb_connection_t *c, struct iovec *vector, int count)
393 ret = _xcb_conn_wait(c, &c->out.cond, &vector, &count);
394 c->out.request_written = c->out.request;
395 pthread_cond_broadcast(&c->out.cond);
396 _xcb_in_wake_up_next_reader(c);
400 void _xcb_out_send_sync(xcb_connection_t *c)
402 prepare_socket_request(c);
406 int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request)
408 assert(XCB_SEQUENCE_COMPARE(request, <=, c->out.request));
409 if(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request))
414 vec.iov_base = c->out.queue;
415 vec.iov_len = c->out.queue_len;
416 c->out.queue_len = 0;
417 return _xcb_out_send(c, &vec, 1);
419 while(c->out.writing)
420 pthread_cond_wait(&c->out.cond, &c->iolock);
421 assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));