My understanding is that the TTY code should be doing the same checking of
dispatch_event_queue and quit_check_signal_tick_count as the event-Xt code,
since emacs_tty_next_event and the event-stream code pays attention to the
former, and the signal handler to the latter. The below makes my TTY-only
build react more crisply (but doesn’t make VM startup on decent-size
mailboxes as fast as on a build with the XT event loop, even on a TTY--I
need to look further into why that is.)
http://mid.gmane.org/17597.11630.343389.787670@parhasard.net is a similar
problem, but more clear-cut. I suspect the two stem from an oversight at the
same point in time. I’d appreciate a look from someone who knows their way
around the event-handling code.
src/ChangeLog addition:
2006-12-08 Aidan Kehoe <kehoea(a)parhasard.net>
* event-tty.c:
* event-tty.c (emacs_tty_event_pending_p):
* event-tty.c (reinit_vars_of_event_tty):
Pay attention to the dispatch event queue, and input pending
signals in emacs_tty_event_pending_p. Makes pure TTY builds more
responsive.
XEmacs Trunk source patch:
Diff command: cvs -q diff -u
Files affected: src/event-tty.c
===================================================================
RCS
Index: src/event-tty.c
===================================================================
RCS file: /pack/xemacscvs/XEmacs/xemacs/src/event-tty.c,v
retrieving revision 1.17
diff -u -r1.17 event-tty.c
--- src/event-tty.c 2006/08/04 20:55:04 1.17
+++ src/event-tty.c 2006/12/08 13:10:02
@@ -45,6 +45,8 @@
extern int mswindows_is_blocking;
#endif
+static int last_quit_check_signal_tick_count;
+
/************************************************************************/
/* timeout events */
@@ -84,22 +86,76 @@
static int
emacs_tty_event_pending_p (int how_many)
{
+ Lisp_Object event;
+ int tick_count_val;
+
+ /* Cf. the comments on emacs_Xt_event_pending_p in event-xlike-inc.c . */
+
if (!how_many)
{
EMACS_TIME sometime;
- /* see if there's a pending timeout. */
+
+ /* (1) Any pending events in the dispatch queue? */
+ if (!NILP(dispatch_event_queue))
+ {
+ return 1;
+ }
+
+ /* (2) Any TTY or process input available? */
+ if (poll_fds_for_input (non_fake_input_wait_mask))
+ return 1;
+
+ /* (3) Any timeout input available? */
EMACS_GET_TIME (sometime);
if (tty_timer_queue &&
EMACS_TIME_EQUAL_OR_GREATER (sometime, tty_timer_queue->time))
return 1;
+ }
+ else
+ {
+ /* HOW_MANY > 0 */
+ EVENT_CHAIN_LOOP (event, dispatch_event_queue)
+ {
+ if (command_event_p (event))
+ {
+ how_many--;
+ if (how_many <= 0)
+ return 1;
+ }
+ }
- return poll_fds_for_input (non_fake_input_wait_mask);
}
+
+ tick_count_val = quit_check_signal_tick_count;
+
+ /* Checking in_modal_loop here is a bit cargo-cultish, since its use is
+ specific to builds with a window system. */
+ if (!in_modal_loop &&
+ (last_quit_check_signal_tick_count != tick_count_val))
+ {
+ last_quit_check_signal_tick_count = tick_count_val;
- /* #### Not right! We need to *count* the number of pending events, which
- means we need to have a dispatch queue and drain the pending events,
- using drain_tty_devices(). */
- return poll_fds_for_input (tty_only_mask);
+ /* We need to drain the entire queue now -- if we only drain part of
+ it, we may later on end up with events actually pending but
+ detect_input_pending() returning false because there wasn't
+ another SIGIO. */
+ event_stream_drain_queue ();
+
+ if (!how_many)
+ return !NILP (dispatch_event_queue);
+
+ EVENT_CHAIN_LOOP (event, dispatch_event_queue)
+ {
+ if (command_event_p (event))
+ {
+ how_many--;
+ if (how_many <= 0)
+ return 1;
+ }
+ }
+ }
+
+ return 0;
}
static void
@@ -304,6 +360,8 @@
tty_event_stream->drain_queue_cb = emacs_tty_drain_queue;
tty_event_stream->create_io_streams_cb = emacs_tty_create_io_streams;
tty_event_stream->delete_io_streams_cb = emacs_tty_delete_io_streams;
+
+ last_quit_check_signal_tick_count = 0;
}
void
--
Santa Maradona, priez pour moi!
_______________________________________________
XEmacs-Patches mailing list
XEmacs-Patches(a)xemacs.org
http://calypso.tux.org/cgi-bin/mailman/listinfo/xemacs-patches