1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
26 /* Stuff that sends stuff to the server. */
38 static int write_block(xcb_connection_t *c, struct iovec *vector, int count)
40 while(count && c->out.queue_len + vector[0].iov_len <= sizeof(c->out.queue))
42 memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
43 c->out.queue_len += vector[0].iov_len;
44 vector[0].iov_base = (char *) vector[0].iov_base + vector[0].iov_len;
45 vector[0].iov_len = 0;
52 vector[0].iov_base = c->out.queue;
53 vector[0].iov_len = c->out.queue_len;
55 return _xcb_out_send(c, &vector, &count);
58 /* Public interface */
60 void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
64 pthread_mutex_lock(&c->out.reqlenlock);
65 if(c->out.maximum_request_length_tag == LAZY_NONE)
67 const xcb_query_extension_reply_t *ext;
68 ext = xcb_get_extension_data(c, &xcb_big_requests_id);
69 if(ext && ext->present)
71 c->out.maximum_request_length_tag = LAZY_COOKIE;
72 c->out.maximum_request_length.cookie = xcb_big_requests_enable(c);
76 c->out.maximum_request_length_tag = LAZY_FORCED;
77 c->out.maximum_request_length.value = c->setup->maximum_request_length;
80 pthread_mutex_unlock(&c->out.reqlenlock);
83 uint32_t xcb_get_maximum_request_length(xcb_connection_t *c)
87 xcb_prefetch_maximum_request_length(c);
88 pthread_mutex_lock(&c->out.reqlenlock);
89 if(c->out.maximum_request_length_tag == LAZY_COOKIE)
91 xcb_big_requests_enable_reply_t *r = xcb_big_requests_enable_reply(c, c->out.maximum_request_length.cookie, 0);
92 c->out.maximum_request_length_tag = LAZY_FORCED;
95 c->out.maximum_request_length.value = r->maximum_request_length;
99 c->out.maximum_request_length.value = c->setup->maximum_request_length;
101 pthread_mutex_unlock(&c->out.reqlenlock);
102 return c->out.maximum_request_length.value;
105 unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
114 } sync_req = { { /* GetInputFocus */ 43, 0, 1 } };
116 uint32_t prefix[3] = { 0 };
117 int veclen = req->count;
118 enum workarounds workaround = WORKAROUND_NONE;
125 assert(req->count > 0);
127 if(!(flags & XCB_REQUEST_RAW))
129 static const char pad[3];
131 uint16_t shortlen = 0;
133 assert(vector[0].iov_len >= 4);
134 /* set the major opcode, and the minor opcode for extensions */
137 const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext);
138 if(!(extension && extension->present))
140 _xcb_conn_shutdown(c);
143 ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode;
144 ((uint8_t *) vector[0].iov_base)[1] = req->opcode;
147 ((uint8_t *) vector[0].iov_base)[0] = req->opcode;
149 /* put together the length field, possibly using BIGREQUESTS */
150 for(i = 0; i < req->count; ++i)
152 longlen += vector[i].iov_len;
153 if(!vector[i].iov_base)
155 vector[i].iov_base = (char *) pad;
156 assert(vector[i].iov_len <= sizeof(pad));
159 assert((longlen & 3) == 0);
162 if(longlen <= c->setup->maximum_request_length)
164 /* we don't need BIGREQUESTS. */
168 else if(longlen > xcb_get_maximum_request_length(c))
170 _xcb_conn_shutdown(c);
171 return 0; /* server can't take this; maybe need BIGREQUESTS? */
174 /* set the length field. */
175 ((uint16_t *) vector[0].iov_base)[1] = shortlen;
177 prefix[2] = ++longlen;
179 flags &= ~XCB_REQUEST_RAW;
181 /* do we need to work around the X server bug described in glx.xml? */
182 /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane
183 * configuration, but that should be handled here anyway. */
184 if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") &&
185 ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) ||
187 workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
189 /* get a sequence number and arrange for delivery. */
190 pthread_mutex_lock(&c->iolock);
191 /* wait for other writing threads to get out of my way. */
192 while(c->out.writing)
193 pthread_cond_wait(&c->out.cond, &c->iolock);
195 request = ++c->out.request;
196 /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
198 * Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
199 * applications see sequence 0 as that is used to indicate
200 * an error in sending the request */
201 while((req->isvoid &&
202 c->out.request == c->in.request_expected + (1 << 16) - 1) ||
205 prefix[0] = sync_req.packet;
206 _xcb_in_expect_reply(c, request, WORKAROUND_NONE, XCB_REQUEST_DISCARD_REPLY);
207 c->in.request_expected = c->out.request;
208 request = ++c->out.request;
211 if(workaround != WORKAROUND_NONE || flags != 0)
212 _xcb_in_expect_reply(c, request, workaround, flags);
214 c->in.request_expected = c->out.request;
216 if(prefix[0] || prefix[2])
221 prefix[1] = ((uint32_t *) vector[1].iov_base)[0];
222 vector[1].iov_base = (uint32_t *) vector[1].iov_base + 1;
223 vector[1].iov_len -= sizeof(uint32_t);
225 vector[0].iov_len = sizeof(uint32_t) * (prefix[0] ? 1 : 0 | prefix[2] ? 2 : 0);
226 vector[0].iov_base = prefix + !prefix[0];
229 if(!write_block(c, vector, veclen))
231 _xcb_conn_shutdown(c);
234 pthread_mutex_unlock(&c->iolock);
238 int xcb_flush(xcb_connection_t *c)
243 pthread_mutex_lock(&c->iolock);
244 ret = _xcb_out_flush_to(c, c->out.request);
245 pthread_mutex_unlock(&c->iolock);
249 /* Private interface */
251 int _xcb_out_init(_xcb_out *out)
253 if(pthread_cond_init(&out->cond, 0))
260 out->request_written = 0;
262 if(pthread_mutex_init(&out->reqlenlock, 0))
264 out->maximum_request_length_tag = LAZY_NONE;
269 void _xcb_out_destroy(_xcb_out *out)
271 pthread_cond_destroy(&out->cond);
272 pthread_mutex_destroy(&out->reqlenlock);
275 int _xcb_out_send(xcb_connection_t *c, struct iovec **vector, int *count)
279 ret = _xcb_conn_wait(c, &c->out.cond, vector, count);
280 c->out.request_written = c->out.request;
281 pthread_cond_broadcast(&c->out.cond);
285 int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request)
287 assert(XCB_SEQUENCE_COMPARE(request, <=, c->out.request));
288 if(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request))
292 struct iovec vec, *vec_ptr = &vec;
294 vec.iov_base = c->out.queue;
295 vec.iov_len = c->out.queue_len;
296 c->out.queue_len = 0;
297 return _xcb_out_send(c, &vec_ptr, &count);
299 while(c->out.writing)
300 pthread_cond_wait(&c->out.cond, &c->iolock);
301 assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));