1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
26 /* Stuff that sends stuff to the server. */
42 static inline void send_request(xcb_connection_t *c, int isvoid, enum workarounds workaround, int flags, struct iovec *vector, int count)
49 c->in.request_expected = c->out.request;
50 if(workaround != WORKAROUND_NONE || flags != 0)
51 _xcb_in_expect_reply(c, c->out.request, workaround, flags);
53 while(count && c->out.queue_len + vector[0].iov_len <= sizeof(c->out.queue))
55 memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
56 c->out.queue_len += vector[0].iov_len;
57 vector[0].iov_base = (char *) vector[0].iov_base + vector[0].iov_len;
58 vector[0].iov_len = 0;
65 vector[0].iov_base = c->out.queue;
66 vector[0].iov_len = c->out.queue_len;
68 _xcb_out_send(c, vector, count);
71 static void send_sync(xcb_connection_t *c)
80 } sync_req = { { /* GetInputFocus */ 43, 0, 1 } };
81 struct iovec vector[2];
82 vector[1].iov_base = (char *) &sync_req;
83 vector[1].iov_len = sizeof(sync_req);
84 send_request(c, 0, WORKAROUND_NONE, XCB_REQUEST_DISCARD_REPLY, vector + 1, 1);
87 static void get_socket_back(xcb_connection_t *c)
89 while(c->out.return_socket && c->out.socket_moving)
90 pthread_cond_wait(&c->out.socket_cond, &c->iolock);
91 if(!c->out.return_socket)
94 c->out.socket_moving = 1;
95 pthread_mutex_unlock(&c->iolock);
96 c->out.return_socket(c->out.socket_closure);
97 pthread_mutex_lock(&c->iolock);
98 c->out.socket_moving = 0;
100 pthread_cond_broadcast(&c->out.socket_cond);
101 c->out.return_socket = 0;
102 c->out.socket_closure = 0;
103 _xcb_in_replies_done(c);
106 /* Public interface */
108 void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
112 pthread_mutex_lock(&c->out.reqlenlock);
113 if(c->out.maximum_request_length_tag == LAZY_NONE)
115 const xcb_query_extension_reply_t *ext;
116 ext = xcb_get_extension_data(c, &xcb_big_requests_id);
117 if(ext && ext->present)
119 c->out.maximum_request_length_tag = LAZY_COOKIE;
120 c->out.maximum_request_length.cookie = xcb_big_requests_enable(c);
124 c->out.maximum_request_length_tag = LAZY_FORCED;
125 c->out.maximum_request_length.value = c->setup->maximum_request_length;
128 pthread_mutex_unlock(&c->out.reqlenlock);
131 uint32_t xcb_get_maximum_request_length(xcb_connection_t *c)
135 xcb_prefetch_maximum_request_length(c);
136 pthread_mutex_lock(&c->out.reqlenlock);
137 if(c->out.maximum_request_length_tag == LAZY_COOKIE)
139 xcb_big_requests_enable_reply_t *r = xcb_big_requests_enable_reply(c, c->out.maximum_request_length.cookie, 0);
140 c->out.maximum_request_length_tag = LAZY_FORCED;
143 c->out.maximum_request_length.value = r->maximum_request_length;
147 c->out.maximum_request_length.value = c->setup->maximum_request_length;
149 pthread_mutex_unlock(&c->out.reqlenlock);
150 return c->out.maximum_request_length.value;
153 unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
157 int veclen = req->count;
158 enum workarounds workaround = WORKAROUND_NONE;
165 assert(req->count > 0);
167 if(!(flags & XCB_REQUEST_RAW))
169 static const char pad[3];
171 uint16_t shortlen = 0;
173 assert(vector[0].iov_len >= 4);
174 /* set the major opcode, and the minor opcode for extensions */
177 const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext);
178 if(!(extension && extension->present))
180 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_EXT_NOTSUPPORTED);
183 ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode;
184 ((uint8_t *) vector[0].iov_base)[1] = req->opcode;
187 ((uint8_t *) vector[0].iov_base)[0] = req->opcode;
189 /* put together the length field, possibly using BIGREQUESTS */
190 for(i = 0; i < req->count; ++i)
192 longlen += vector[i].iov_len;
193 if(!vector[i].iov_base)
195 vector[i].iov_base = (char *) pad;
196 assert(vector[i].iov_len <= sizeof(pad));
199 assert((longlen & 3) == 0);
202 if(longlen <= c->setup->maximum_request_length)
204 /* we don't need BIGREQUESTS. */
208 else if(longlen > xcb_get_maximum_request_length(c))
210 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_REQ_LEN_EXCEED);
211 return 0; /* server can't take this; maybe need BIGREQUESTS? */
214 /* set the length field. */
215 ((uint16_t *) vector[0].iov_base)[1] = shortlen;
218 prefix[0] = ((uint32_t *) vector[0].iov_base)[0];
219 prefix[1] = ++longlen;
220 vector[0].iov_base = (uint32_t *) vector[0].iov_base + 1;
221 vector[0].iov_len -= sizeof(uint32_t);
223 vector[0].iov_base = prefix;
224 vector[0].iov_len = sizeof(prefix);
227 flags &= ~XCB_REQUEST_RAW;
229 /* do we need to work around the X server bug described in glx.xml? */
230 /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane
231 * configuration, but that should be handled here anyway. */
232 if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") &&
233 ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) ||
235 workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
237 /* get a sequence number and arrange for delivery. */
238 pthread_mutex_lock(&c->iolock);
239 /* wait for other writing threads to get out of my way. */
240 while(c->out.writing)
241 pthread_cond_wait(&c->out.cond, &c->iolock);
244 /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
246 if(req->isvoid && c->out.request == c->in.request_expected + (1 << 16) - 2)
248 /* Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
249 * applications see sequence 0 as that is used to indicate
250 * an error in sending the request */
251 if((unsigned int) (c->out.request + 1) == 0)
254 /* The above send_sync calls could drop the I/O lock, but this
255 * thread will still exclude any other thread that tries to write,
256 * so the sequence number postconditions still hold. */
257 send_request(c, req->isvoid, workaround, flags, vector, veclen);
258 request = c->has_error ? 0 : c->out.request;
259 pthread_mutex_unlock(&c->iolock);
263 int xcb_take_socket(xcb_connection_t *c, void (*return_socket)(void *closure), void *closure, int flags, uint64_t *sent)
268 pthread_mutex_lock(&c->iolock);
271 /* _xcb_out_flush may drop the iolock allowing other threads to
272 * write requests, so keep flushing until we're done
275 ret = _xcb_out_flush_to(c, c->out.request);
276 while (ret && c->out.request != c->out.request_written);
279 c->out.return_socket = return_socket;
280 c->out.socket_closure = closure;
282 _xcb_in_expect_reply(c, c->out.request, WORKAROUND_EXTERNAL_SOCKET_OWNER, flags);
283 assert(c->out.request == c->out.request_written);
284 *sent = c->out.request;
286 pthread_mutex_unlock(&c->iolock);
290 int xcb_writev(xcb_connection_t *c, struct iovec *vector, int count, uint64_t requests)
295 pthread_mutex_lock(&c->iolock);
296 c->out.request += requests;
297 ret = _xcb_out_send(c, vector, count);
298 pthread_mutex_unlock(&c->iolock);
302 int xcb_flush(xcb_connection_t *c)
307 pthread_mutex_lock(&c->iolock);
308 ret = _xcb_out_flush_to(c, c->out.request);
309 pthread_mutex_unlock(&c->iolock);
313 /* Private interface */
315 int _xcb_out_init(_xcb_out *out)
317 if(pthread_cond_init(&out->socket_cond, 0))
319 out->return_socket = 0;
320 out->socket_closure = 0;
321 out->socket_moving = 0;
323 if(pthread_cond_init(&out->cond, 0))
330 out->request_written = 0;
332 if(pthread_mutex_init(&out->reqlenlock, 0))
334 out->maximum_request_length_tag = LAZY_NONE;
339 void _xcb_out_destroy(_xcb_out *out)
341 pthread_cond_destroy(&out->cond);
342 pthread_mutex_destroy(&out->reqlenlock);
345 int _xcb_out_send(xcb_connection_t *c, struct iovec *vector, int count)
349 ret = _xcb_conn_wait(c, &c->out.cond, &vector, &count);
350 c->out.request_written = c->out.request;
351 pthread_cond_broadcast(&c->out.cond);
352 _xcb_in_wake_up_next_reader(c);
356 void _xcb_out_send_sync(xcb_connection_t *c)
358 /* wait for other writing threads to get out of my way. */
359 while(c->out.writing)
360 pthread_cond_wait(&c->out.cond, &c->iolock);
365 int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request)
367 assert(XCB_SEQUENCE_COMPARE(request, <=, c->out.request));
368 if(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request))
373 vec.iov_base = c->out.queue;
374 vec.iov_len = c->out.queue_len;
375 c->out.queue_len = 0;
376 return _xcb_out_send(c, &vec, 1);
378 while(c->out.writing)
379 pthread_cond_wait(&c->out.cond, &c->iolock);
380 assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));