}
assert(count <= (int) (sizeof(parts) / sizeof(*parts)));
- _xcb_lock_io(c);
+ pthread_mutex_lock(&c->iolock);
{
struct iovec *parts_ptr = parts;
ret = _xcb_out_send(c, &parts_ptr, &count);
}
- _xcb_unlock_io(c);
+ pthread_mutex_unlock(&c->iolock);
return ret;
}
c->has_error = 1;
}
-void _xcb_lock_io(xcb_connection_t *c)
-{
- pthread_mutex_lock(&c->iolock);
-}
-
-void _xcb_unlock_io(xcb_connection_t *c)
-{
- pthread_mutex_unlock(&c->iolock);
-}
-
-void _xcb_wait_io(xcb_connection_t *c, pthread_cond_t *cond)
-{
- pthread_cond_wait(cond, &c->iolock);
-}
-
int _xcb_conn_wait(xcb_connection_t *c, pthread_cond_t *cond, struct iovec **vector, int *count)
{
int ret;
/* If the thing I should be doing is already being done, wait for it. */
if(count ? c->out.writing : c->in.reading)
{
- _xcb_wait_io(c, cond);
+ pthread_cond_wait(cond, &c->iolock);
return 1;
}
++c->out.writing;
}
- _xcb_unlock_io(c);
+ pthread_mutex_unlock(&c->iolock);
do {
ret = select(c->fd + 1, &rfds, &wfds, 0, 0);
} while (ret == -1 && errno == EINTR);
_xcb_conn_shutdown(c);
ret = 0;
}
- _xcb_lock_io(c);
+ pthread_mutex_lock(&c->iolock);
if(ret)
{
if(c->has_error)
return 0;
- _xcb_lock_io(c);
+ pthread_mutex_lock(&c->iolock);
/* If this request has not been written yet, write it. */
if(_xcb_out_flush_to(c, request))
}
wake_up_next_reader(c);
- _xcb_unlock_io(c);
+ pthread_mutex_unlock(&c->iolock);
return ret;
}
return 1; /* would not block */
}
assert(reply != 0);
- _xcb_lock_io(c);
+ pthread_mutex_lock(&c->iolock);
ret = poll_for_reply(c, request, reply, error);
- _xcb_unlock_io(c);
+ pthread_mutex_unlock(&c->iolock);
return ret;
}
xcb_generic_event_t *ret;
if(c->has_error)
return 0;
- _xcb_lock_io(c);
+ pthread_mutex_lock(&c->iolock);
/* get_event returns 0 on empty list. */
while(!(ret = get_event(c)))
if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
break;
wake_up_next_reader(c);
- _xcb_unlock_io(c);
+ pthread_mutex_unlock(&c->iolock);
return ret;
}
xcb_generic_event_t *ret = 0;
if(!c->has_error)
{
- _xcb_lock_io(c);
+ pthread_mutex_lock(&c->iolock);
/* FIXME: follow X meets Z architecture changes. */
ret = get_event(c);
if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
ret = get_event(c);
- _xcb_unlock_io(c);
+ pthread_mutex_unlock(&c->iolock);
}
return ret;
}
workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
/* get a sequence number and arrange for delivery. */
- _xcb_lock_io(c);
+ pthread_mutex_lock(&c->iolock);
/* wait for other writing threads to get out of my way. */
while(c->out.writing)
- _xcb_wait_io(c, &c->out.cond);
+ pthread_cond_wait(&c->out.cond, &c->iolock);
request = ++c->out.request;
/* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
_xcb_conn_shutdown(c);
request = 0;
}
- _xcb_unlock_io(c);
+ pthread_mutex_unlock(&c->iolock);
return request;
}
int ret;
if(c->has_error)
return 0;
- _xcb_lock_io(c);
+ pthread_mutex_lock(&c->iolock);
ret = _xcb_out_flush_to(c, c->out.request);
- _xcb_unlock_io(c);
+ pthread_mutex_unlock(&c->iolock);
return ret;
}
return _xcb_out_send(c, &vec_ptr, &count);
}
while(c->out.writing)
- _xcb_wait_io(c, &c->out.cond);
+ pthread_cond_wait(&c->out.cond, &c->iolock);
assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));
return 1;
}
};
void _xcb_conn_shutdown(xcb_connection_t *c);
-void _xcb_lock_io(xcb_connection_t *c);
-void _xcb_unlock_io(xcb_connection_t *c);
-void _xcb_wait_io(xcb_connection_t *c, pthread_cond_t *cond);
int _xcb_conn_wait(xcb_connection_t *c, pthread_cond_t *cond, struct iovec **vector, int *count);