xwayland/xwm: implement somewhat asynchronous request flushing

Instead of calling xcb_flush() directly, wait until the FD is
writable.

Ideally we'd have a non-blocking variant instead of xcb_flush(),
but libxcb doesn't have this. Also libxcb blocks when its internal
buffer is full, but not much we can do here.
This commit is contained in:
Simon Ser 2023-08-21 12:16:24 +02:00 committed by Kirill Primak
parent c9fe96102d
commit 6ada67da9b
6 changed files with 39 additions and 23 deletions

View File

@ -172,4 +172,6 @@ xcb_void_cookie_t xwm_send_event_with_size(xcb_connection_t *c,
uint8_t propagate, xcb_window_t destination,
uint32_t event_mask, const void *event, uint32_t length);
void xwm_schedule_flush(struct wlr_xwm *xwm);
#endif

View File

@ -55,7 +55,7 @@ static void xwm_dnd_send_event(struct wlr_xwm *xwm, xcb_atom_t type,
XCB_EVENT_MASK_NO_EVENT,
&event,
sizeof(event));
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
}
static void xwm_dnd_send_enter(struct wlr_xwm *xwm) {

View File

@ -37,7 +37,7 @@ xwm_selection_transfer_create_incoming(struct wlr_xwm_selection *selection) {
XCB_EVENT_MASK_SUBSTRUCTURE_NOTIFY | XCB_EVENT_MASK_PROPERTY_CHANGE
}
);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
return transfer;
}
@ -89,7 +89,7 @@ static void xwm_notify_ready_for_next_incr_chunk(
wlr_log(WLR_DEBUG, "deleting property");
xcb_delete_property(xwm->xcb_conn, transfer->incoming_window,
xwm->atoms[WL_SELECTION]);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
xwm_selection_transfer_remove_event_source(transfer);
xwm_selection_transfer_destroy_property_reply(transfer);
@ -234,7 +234,7 @@ static void source_send(struct wlr_xwm_selection *selection,
xwm->atoms[WL_SELECTION],
XCB_TIME_CURRENT_TIME);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
fcntl(fd, F_SETFL, O_WRONLY | O_NONBLOCK);
transfer->wl_client_fd = fd;
@ -533,7 +533,7 @@ int xwm_handle_xfixes_selection_notify(struct wlr_xwm *xwm,
xwm->atoms[WL_SELECTION],
event->timestamp
);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
return 1;
}

View File

@ -33,7 +33,7 @@ static void xwm_selection_send_notify(struct wlr_xwm *xwm,
XCB_EVENT_MASK_NO_EVENT,
&selection_notify,
sizeof(selection_notify));
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
}
static int xwm_selection_flush_source_data(
@ -46,7 +46,7 @@ static int xwm_selection_flush_source_data(
8, // format
transfer->source_data.size,
transfer->source_data.data);
xcb_flush(transfer->selection->xwm->xcb_conn);
xwm_schedule_flush(transfer->selection->xwm);
transfer->property_set = true;
size_t length = transfer->source_data.size;
transfer->source_data.size = 0;

View File

@ -52,7 +52,7 @@ void xwm_selection_transfer_destroy(
if (transfer->incoming_window) {
struct wlr_xwm *xwm = transfer->selection->xwm;
xcb_destroy_window(xwm->xcb_conn, transfer->incoming_window);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
}
wl_list_remove(&transfer->link);
@ -269,14 +269,14 @@ static void xwm_selection_set_owner(struct wlr_xwm_selection *selection,
selection->window,
selection->atom,
XCB_TIME_CURRENT_TIME);
xcb_flush(selection->xwm->xcb_conn);
xwm_schedule_flush(selection->xwm);
} else {
if (selection->owner == selection->window) {
xcb_set_selection_owner(selection->xwm->xcb_conn,
XCB_WINDOW_NONE,
selection->atom,
selection->timestamp);
xcb_flush(selection->xwm->xcb_conn);
xwm_schedule_flush(selection->xwm);
}
}
}

View File

@ -302,7 +302,7 @@ static void xwm_send_wm_message(struct wlr_xwayland_surface *surface,
event_mask,
&event,
sizeof(event));
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
}
static void xwm_set_net_client_list(struct wlr_xwm *xwm) {
@ -460,7 +460,7 @@ static void xwm_surface_activate(struct wlr_xwm *xwm,
}
xwm_set_focused_window(xwm, xsurface);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
}
static void xsurface_set_net_wm_state(struct wlr_xwayland_surface *xsurface) {
@ -1187,7 +1187,7 @@ void wlr_xwayland_surface_restack(struct wlr_xwayland_surface *xsurface,
wl_list_insert(node, &xsurface->stack_link);
xwm_set_net_client_list_stacking(xwm);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
}
static void xwm_handle_map_request(struct wlr_xwm *xwm,
@ -1809,9 +1809,19 @@ static int x11_event_handler(int fd, uint32_t mask, void *data) {
return 0;
}
int count = read_x11_events(xwm);
int count = 0;
if (mask & WL_EVENT_READABLE) {
count = read_x11_events(xwm);
if (count) {
xwm_schedule_flush(xwm);
}
}
if (mask & WL_EVENT_WRITABLE) {
// xcb_flush() always blocks until it's written all pending requests,
// but it's the only thing we have
xcb_flush(xwm->xcb_conn);
wl_event_source_fd_update(xwm->event_source, WL_EVENT_READABLE);
}
return count;
@ -1835,7 +1845,7 @@ static void handle_compositor_new_surface(struct wl_listener *listener,
wl_list_for_each(xsurface, &xwm->unpaired_surfaces, unpaired_link) {
if (xsurface->surface_id == surface_id) {
xwayland_surface_associate(xwm, xsurface, surface);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
return;
}
}
@ -1924,7 +1934,7 @@ void wlr_xwayland_surface_configure(struct wlr_xwayland_surface *xsurface,
sizeof(configure_notify));
}
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
}
void wlr_xwayland_surface_close(struct wlr_xwayland_surface *xsurface) {
@ -1945,7 +1955,7 @@ void wlr_xwayland_surface_close(struct wlr_xwayland_surface *xsurface) {
xwm_send_wm_message(xsurface, &message_data, XCB_EVENT_MASK_NO_EVENT);
} else {
xcb_kill_client(xwm->xcb_conn, xsurface->window_id);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
}
}
@ -2238,7 +2248,7 @@ void xwm_set_cursor(struct wlr_xwm *xwm, const uint8_t *pixels, uint32_t stride,
uint32_t values[] = {xwm->cursor};
xcb_change_window_attributes(xwm->xcb_conn, xwm->screen->root,
XCB_CW_CURSOR, values);
xcb_flush(xwm->xcb_conn);
xwm_schedule_flush(xwm);
}
struct wlr_xwm *xwm_create(struct wlr_xwayland *xwayland, int wm_fd) {
@ -2364,7 +2374,7 @@ void wlr_xwayland_surface_set_withdrawn(struct wlr_xwayland_surface *surface,
surface->withdrawn = withdrawn;
xsurface_set_wm_state(surface);
xsurface_set_net_wm_state(surface);
xcb_flush(surface->xwm->xcb_conn);
xwm_schedule_flush(surface->xwm);
}
void wlr_xwayland_surface_set_minimized(struct wlr_xwayland_surface *surface,
@ -2372,7 +2382,7 @@ void wlr_xwayland_surface_set_minimized(struct wlr_xwayland_surface *surface,
surface->minimized = minimized;
xsurface_set_wm_state(surface);
xsurface_set_net_wm_state(surface);
xcb_flush(surface->xwm->xcb_conn);
xwm_schedule_flush(surface->xwm);
}
void wlr_xwayland_surface_set_maximized(struct wlr_xwayland_surface *surface,
@ -2380,14 +2390,14 @@ void wlr_xwayland_surface_set_maximized(struct wlr_xwayland_surface *surface,
surface->maximized_horz = maximized_horz;
surface->maximized_vert = maximized_vert;
xsurface_set_net_wm_state(surface);
xcb_flush(surface->xwm->xcb_conn);
xwm_schedule_flush(surface->xwm);
}
void wlr_xwayland_surface_set_fullscreen(struct wlr_xwayland_surface *surface,
bool fullscreen) {
surface->fullscreen = fullscreen;
xsurface_set_net_wm_state(surface);
xcb_flush(surface->xwm->xcb_conn);
xwm_schedule_flush(surface->xwm);
}
bool xwm_atoms_contains(struct wlr_xwm *xwm, xcb_atom_t *atoms,
@ -2513,3 +2523,7 @@ xcb_connection_t *wlr_xwayland_get_xwm_connection(
struct wlr_xwayland *wlr_xwayland) {
return wlr_xwayland->xwm ? wlr_xwayland->xwm->xcb_conn : NULL;
}
void xwm_schedule_flush(struct wlr_xwm *xwm) {
wl_event_source_fd_update(xwm->event_source, WL_EVENT_READABLE | WL_EVENT_WRITABLE);
}