/* Stuff that sends stuff to the server. */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
#include <assert.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
-#include <errno.h>
-
-#ifndef __GNUC__
-# if HAVE_ALLOCA_H
-# include <alloca.h>
-# else
-# ifdef _AIX
- #pragma alloca
-# endif
-# endif
-#endif
#include "xcb.h"
#include "xcbext.h"
#include "xcbint.h"
-#include "extensions/bigreq.h"
+#include "bigreq.h"
-static int force_sequence_wrap(XCBConnection *c)
+static inline void send_request(xcb_connection_t *c, int isvoid, enum workarounds workaround, int flags, struct iovec *vector, int count)
{
- int ret = 1;
- if((c->out.request - c->in.request_read) > 65530)
+ if(c->has_error)
+ return;
+
+ ++c->out.request;
+ if(!isvoid)
+ c->in.request_expected = c->out.request;
+ if(workaround != WORKAROUND_NONE || flags != 0)
+ _xcb_in_expect_reply(c, c->out.request, workaround, flags);
+
+ while(count && c->out.queue_len + vector[0].iov_len <= sizeof(c->out.queue))
{
- pthread_mutex_unlock(&c->iolock);
- ret = XCBSync(c, 0);
- pthread_mutex_lock(&c->iolock);
+ memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
+ c->out.queue_len += vector[0].iov_len;
+ vector[0].iov_base = (char *) vector[0].iov_base + vector[0].iov_len;
+ vector[0].iov_len = 0;
+ ++vector, --count;
}
- return ret;
+ if(!count)
+ return;
+
+ --vector, ++count;
+ vector[0].iov_base = c->out.queue;
+ vector[0].iov_len = c->out.queue_len;
+ c->out.queue_len = 0;
+ _xcb_out_send(c, vector, count);
}
-static int _xcb_write(const int fd, char (*buf)[], int *count)
+static void send_sync(xcb_connection_t *c)
{
- int n = write(fd, *buf, *count);
- if(n > 0)
- {
- *count -= n;
- if(*count)
- memmove(*buf, *buf + n, *count);
- }
- return n;
+ static const union {
+ struct {
+ uint8_t major;
+ uint8_t pad;
+ uint16_t len;
+ } fields;
+ uint32_t packet;
+ } sync_req = { { /* GetInputFocus */ 43, 0, 1 } };
+ struct iovec vector[2];
+ vector[1].iov_base = (char *) &sync_req;
+ vector[1].iov_len = sizeof(sync_req);
+ send_request(c, 0, WORKAROUND_NONE, XCB_REQUEST_DISCARD_REPLY, vector + 1, 1);
+}
+
+static void get_socket_back(xcb_connection_t *c)
+{
+ while(c->out.return_socket && c->out.socket_moving)
+ pthread_cond_wait(&c->out.socket_cond, &c->iolock);
+ if(!c->out.return_socket)
+ return;
+
+ c->out.socket_moving = 1;
+ pthread_mutex_unlock(&c->iolock);
+ c->out.return_socket(c->out.socket_closure);
+ pthread_mutex_lock(&c->iolock);
+ c->out.socket_moving = 0;
+
+ pthread_cond_broadcast(&c->out.socket_cond);
+ c->out.return_socket = 0;
+ c->out.socket_closure = 0;
+ _xcb_in_replies_done(c);
}
-static int _xcb_writev(const int fd, struct iovec *vec, int count)
+/* Public interface */
+
+void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
{
- int n = writev(fd, vec, count);
- if(n > 0)
+ if(c->has_error)
+ return;
+ pthread_mutex_lock(&c->out.reqlenlock);
+ if(c->out.maximum_request_length_tag == LAZY_NONE)
{
- int rem = n;
- for(; count; --count, ++vec)
+ const xcb_query_extension_reply_t *ext;
+ ext = xcb_get_extension_data(c, &xcb_big_requests_id);
+ if(ext && ext->present)
{
- int cur = vec->iov_len;
- if(cur > rem)
- cur = rem;
- vec->iov_len -= cur;
- vec->iov_base = (char *) vec->iov_base + cur;
- rem -= cur;
- if(vec->iov_len)
- break;
+ c->out.maximum_request_length_tag = LAZY_COOKIE;
+ c->out.maximum_request_length.cookie = xcb_big_requests_enable(c);
+ }
+ else
+ {
+ c->out.maximum_request_length_tag = LAZY_FORCED;
+ c->out.maximum_request_length.value = c->setup->maximum_request_length;
}
- assert(rem == 0);
}
- return n;
+ pthread_mutex_unlock(&c->out.reqlenlock);
}
-/* Public interface */
-
-CARD32 XCBGetMaximumRequestLength(XCBConnection *c)
+uint32_t xcb_get_maximum_request_length(xcb_connection_t *c)
{
+ if(c->has_error)
+ return 0;
+ xcb_prefetch_maximum_request_length(c);
pthread_mutex_lock(&c->out.reqlenlock);
- if(!c->out.maximum_request_length)
+ if(c->out.maximum_request_length_tag == LAZY_COOKIE)
{
- const XCBQueryExtensionRep *ext;
- c->out.maximum_request_length = c->setup->maximum_request_length;
- ext = XCBGetExtensionData(c, &XCBBigRequestsId);
- if(ext && ext->present)
+ xcb_big_requests_enable_reply_t *r = xcb_big_requests_enable_reply(c, c->out.maximum_request_length.cookie, 0);
+ c->out.maximum_request_length_tag = LAZY_FORCED;
+ if(r)
{
- XCBBigRequestsEnableRep *r = XCBBigRequestsEnableReply(c, XCBBigRequestsEnable(c), 0);
- c->out.maximum_request_length = r->maximum_request_length;
+ c->out.maximum_request_length.value = r->maximum_request_length;
free(r);
}
+ else
+ c->out.maximum_request_length.value = c->setup->maximum_request_length;
}
pthread_mutex_unlock(&c->out.reqlenlock);
- return c->out.maximum_request_length;
+ return c->out.maximum_request_length.value;
}
-int XCBSendRequest(XCBConnection *c, unsigned int *request, struct iovec *vector, const XCBProtocolRequest *req)
+unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
{
- static const char pad[3];
- int ret;
- int i;
- struct iovec *padded;
- int padlen = 0;
- CARD16 shortlen = 0;
- CARD32 longlen = 0;
+ uint64_t request;
+ uint32_t prefix[2];
+ int veclen = req->count;
enum workarounds workaround = WORKAROUND_NONE;
+ if(c->has_error)
+ return 0;
+
assert(c != 0);
- assert(request != 0);
assert(vector != 0);
assert(req->count > 0);
- /* set the major opcode, and the minor opcode for extensions */
- if(req->ext)
+ if(!(flags & XCB_REQUEST_RAW))
{
- const XCBQueryExtensionRep *extension = XCBGetExtensionData(c, req->ext);
- /* TODO: better error handling here, please! */
- assert(extension && extension->present);
- ((CARD8 *) vector[0].iov_base)[0] = extension->major_opcode;
- ((CARD8 *) vector[0].iov_base)[1] = req->opcode;
-
- /* do we need to work around the X server bug described in glx.xml? */
- if(strcmp(req->ext->name, "GLX") &&
- ((req->opcode == 17 && ((CARD32 *) vector[0].iov_base)[0] == 0x10004) ||
- req->opcode == 21))
- workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
- }
- else
- ((CARD8 *) vector[0].iov_base)[0] = req->opcode;
+ static const char pad[3];
+ unsigned int i;
+ uint16_t shortlen = 0;
+ size_t longlen = 0;
+ assert(vector[0].iov_len >= 4);
+ /* set the major opcode, and the minor opcode for extensions */
+ if(req->ext)
+ {
+ const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext);
+ if(!(extension && extension->present))
+ {
+ _xcb_conn_shutdown(c, XCB_CONN_CLOSED_EXT_NOTSUPPORTED);
+ return 0;
+ }
+ ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode;
+ ((uint8_t *) vector[0].iov_base)[1] = req->opcode;
+ }
+ else
+ ((uint8_t *) vector[0].iov_base)[0] = req->opcode;
- /* put together the length field, possibly using BIGREQUESTS */
- for(i = 0; i < req->count; ++i)
- longlen += XCB_CEIL(vector[i].iov_len) >> 2;
+ /* put together the length field, possibly using BIGREQUESTS */
+ for(i = 0; i < req->count; ++i)
+ {
+ longlen += vector[i].iov_len;
+ if(!vector[i].iov_base)
+ {
+ vector[i].iov_base = (char *) pad;
+ assert(vector[i].iov_len <= sizeof(pad));
+ }
+ }
+ assert((longlen & 3) == 0);
+ longlen >>= 2;
- if(longlen > c->setup->maximum_request_length)
- {
- if(longlen > XCBGetMaximumRequestLength(c))
+ if(longlen <= c->setup->maximum_request_length)
+ {
+ /* we don't need BIGREQUESTS. */
+ shortlen = longlen;
+ longlen = 0;
+ }
+ else if(longlen > xcb_get_maximum_request_length(c))
+ {
+ _xcb_conn_shutdown(c, XCB_CONN_CLOSED_REQ_LEN_EXCEED);
return 0; /* server can't take this; maybe need BIGREQUESTS? */
- }
- else
- {
- /* we don't need BIGREQUESTS. */
- shortlen = longlen;
- longlen = 0;
- }
+ }
- padded =
-#ifdef HAVE_ALLOCA
- alloca
-#else
- malloc
-#endif
- ((req->count * 2 + 3) * sizeof(struct iovec));
- /* set the length field. */
- ((CARD16 *) vector[0].iov_base)[1] = shortlen;
- if(!shortlen)
- {
- padded[0].iov_base = vector[0].iov_base;
- padded[0].iov_len = sizeof(CARD32);
- vector[0].iov_base = ((char *) vector[0].iov_base) + sizeof(CARD32);
- vector[0].iov_len -= sizeof(CARD32);
- ++longlen;
- padded[1].iov_base = &longlen;
- padded[1].iov_len = sizeof(CARD32);
- padlen = 2;
+ /* set the length field. */
+ ((uint16_t *) vector[0].iov_base)[1] = shortlen;
+ if(!shortlen)
+ {
+ prefix[0] = ((uint32_t *) vector[0].iov_base)[0];
+ prefix[1] = ++longlen;
+ vector[0].iov_base = (uint32_t *) vector[0].iov_base + 1;
+ vector[0].iov_len -= sizeof(uint32_t);
+ --vector, ++veclen;
+ vector[0].iov_base = prefix;
+ vector[0].iov_len = sizeof(prefix);
+ }
}
+ flags &= ~XCB_REQUEST_RAW;
- for(i = 0; i < req->count; ++i)
- {
- if(!vector[i].iov_len)
- continue;
- padded[padlen].iov_base = vector[i].iov_base;
- padded[padlen++].iov_len = vector[i].iov_len;
- if(!XCB_PAD(vector[i].iov_len))
- continue;
- padded[padlen].iov_base = (caddr_t) pad;
- padded[padlen++].iov_len = XCB_PAD(vector[i].iov_len);
- }
+ /* do we need to work around the X server bug described in glx.xml? */
+ /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane
+ * configuration, but that should be handled here anyway. */
+ if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") &&
+ ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) ||
+ req->opcode == 21))
+ workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
/* get a sequence number and arrange for delivery. */
pthread_mutex_lock(&c->iolock);
- if(req->isvoid && !force_sequence_wrap(c))
- {
- pthread_mutex_unlock(&c->iolock);
-#ifndef HAVE_ALLOCA
- free(padded);
-#endif
- return -1;
- }
-
- *request = ++c->out.request;
-
- if(!req->isvoid)
- _xcb_in_expect_reply(c, *request, workaround);
+ /* wait for other writing threads to get out of my way. */
+ while(c->out.writing)
+ pthread_cond_wait(&c->out.cond, &c->iolock);
+ get_socket_back(c);
+
+ /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
+ * a reply. */
+ if(req->isvoid && c->out.request == c->in.request_expected + (1 << 16) - 2)
+ send_sync(c);
+ /* Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
+ * applications see sequence 0 as that is used to indicate
+ * an error in sending the request */
+ if((unsigned int) (c->out.request + 1) == 0)
+ send_sync(c);
+
+ /* The above send_sync calls could drop the I/O lock, but this
+ * thread will still exclude any other thread that tries to write,
+ * so the sequence number postconditions still hold. */
+ send_request(c, req->isvoid, workaround, flags, vector, veclen);
+ request = c->has_error ? 0 : c->out.request;
+ pthread_mutex_unlock(&c->iolock);
+ return request;
+}
- ret = _xcb_out_write_block(c, padded, padlen);
+void
+xcb_send_fd(xcb_connection_t *c, int fd)
+{
+#if HAVE_SENDMSG
+ if (c->has_error)
+ return;
+ pthread_mutex_lock(&c->iolock);
+ while (c->out.out_fd.nfd == XCB_MAX_PASS_FD) {
+ _xcb_out_flush_to(c, c->out.request);
+ if (c->has_error)
+ break;
+ }
+ if (!c->has_error)
+ c->out.out_fd.fd[c->out.out_fd.nfd++] = fd;
pthread_mutex_unlock(&c->iolock);
-#ifndef HAVE_ALLOCA
- free(padded);
#endif
+}
+int xcb_take_socket(xcb_connection_t *c, void (*return_socket)(void *closure), void *closure, int flags, uint64_t *sent)
+{
+ int ret;
+ if(c->has_error)
+ return 0;
+ pthread_mutex_lock(&c->iolock);
+ get_socket_back(c);
+
+ /* _xcb_out_flush may drop the iolock allowing other threads to
+ * write requests, so keep flushing until we're done
+ */
+ do
+ ret = _xcb_out_flush_to(c, c->out.request);
+ while (ret && c->out.request != c->out.request_written);
+ if(ret)
+ {
+ c->out.return_socket = return_socket;
+ c->out.socket_closure = closure;
+ if(flags)
+ _xcb_in_expect_reply(c, c->out.request, WORKAROUND_EXTERNAL_SOCKET_OWNER, flags);
+ assert(c->out.request == c->out.request_written);
+ *sent = c->out.request;
+ }
+ pthread_mutex_unlock(&c->iolock);
+ return ret;
+}
+
+int xcb_writev(xcb_connection_t *c, struct iovec *vector, int count, uint64_t requests)
+{
+ int ret;
+ if(c->has_error)
+ return 0;
+ pthread_mutex_lock(&c->iolock);
+ c->out.request += requests;
+ ret = _xcb_out_send(c, vector, count);
+ pthread_mutex_unlock(&c->iolock);
return ret;
}
-int XCBFlush(XCBConnection *c)
+int xcb_flush(xcb_connection_t *c)
{
int ret;
+ if(c->has_error)
+ return 0;
pthread_mutex_lock(&c->iolock);
- ret = _xcb_out_flush(c);
+ ret = _xcb_out_flush_to(c, c->out.request);
pthread_mutex_unlock(&c->iolock);
return ret;
}
int _xcb_out_init(_xcb_out *out)
{
+ if(pthread_cond_init(&out->socket_cond, 0))
+ return 0;
+ out->return_socket = 0;
+ out->socket_closure = 0;
+ out->socket_moving = 0;
+
if(pthread_cond_init(&out->cond, 0))
return 0;
out->writing = 0;
out->queue_len = 0;
- out->vec = 0;
- out->vec_len = 0;
out->request = 0;
out->request_written = 0;
if(pthread_mutex_init(&out->reqlenlock, 0))
return 0;
- out->maximum_request_length = 0;
+ out->maximum_request_length_tag = LAZY_NONE;
return 1;
}
{
pthread_cond_destroy(&out->cond);
pthread_mutex_destroy(&out->reqlenlock);
- free(out->vec);
}
-int _xcb_out_write(XCBConnection *c)
+int _xcb_out_send(xcb_connection_t *c, struct iovec *vector, int count)
{
- int n;
- if(c->out.vec_len)
- n = _xcb_writev(c->fd, c->out.vec, c->out.vec_len);
- else
- n = _xcb_write(c->fd, &c->out.queue, &c->out.queue_len);
-
- /* XXX: should "nothing was written" be considered failure or
- * success for this function? it's not an I/O error, but... */
- n = (n > 0) || (n < 0 && errno == EAGAIN);
-
- if(c->out.vec_len)
- {
- int i;
- for(i = 0; i < c->out.vec_len; ++i)
- if(c->out.vec[i].iov_len)
- return n;
- c->out.vec = 0;
- c->out.vec_len = 0;
- }
- return n;
+ int ret = 1;
+ while(ret && count)
+ ret = _xcb_conn_wait(c, &c->out.cond, &vector, &count);
+ c->out.request_written = c->out.request;
+ pthread_cond_broadcast(&c->out.cond);
+ _xcb_in_wake_up_next_reader(c);
+ return ret;
}
-int _xcb_out_write_block(XCBConnection *c, struct iovec *vector, size_t count)
+void _xcb_out_send_sync(xcb_connection_t *c)
{
- while(count && c->out.queue_len + vector[0].iov_len < sizeof(c->out.queue))
- {
- memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
- c->out.queue_len += vector[0].iov_len;
- ++vector, --count;
- }
- if(!count)
- return 1;
-
- memmove(vector + 1, vector, count++ * sizeof(struct iovec));
- vector[0].iov_base = c->out.queue;
- vector[0].iov_len = c->out.queue_len;
- c->out.queue_len = 0;
-
- assert(!c->out.vec_len);
- assert(!c->out.vec);
- c->out.vec_len = count;
- c->out.vec = vector;
- return _xcb_out_flush(c);
+ /* wait for other writing threads to get out of my way. */
+ while(c->out.writing)
+ pthread_cond_wait(&c->out.cond, &c->iolock);
+ get_socket_back(c);
+ send_sync(c);
}
-int _xcb_out_flush(XCBConnection *c)
+int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request)
{
- int ret = 1;
- while(ret && (c->out.queue_len || c->out.vec_len))
- ret = _xcb_conn_wait(c, /*should_write*/ 1, &c->out.cond);
- c->out.request_written = c->out.request;
- pthread_cond_broadcast(&c->out.cond);
- return ret;
+ assert(XCB_SEQUENCE_COMPARE(request, <=, c->out.request));
+ if(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request))
+ return 1;
+ if(c->out.queue_len)
+ {
+ struct iovec vec;
+ vec.iov_base = c->out.queue;
+ vec.iov_len = c->out.queue_len;
+ c->out.queue_len = 0;
+ return _xcb_out_send(c, &vec, 1);
+ }
+ while(c->out.writing)
+ pthread_cond_wait(&c->out.cond, &c->iolock);
+ assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));
+ return 1;
}