#undef ELEMENT
#undef EQUAL
+/* Counter used to rarely_quit in replace-buffer-contents. */
+static unsigned short rbc_quitcounter;
+
#define XVECREF_YVECREF_EQUAL(ctx, xoff, yoff) \
buffer_chars_equal ((ctx), (xoff), (yoff))
/* Buffers to compare. */ \
struct buffer *buffer_a; \
struct buffer *buffer_b; \
+ /* Whether each buffer is unibyte/plain-ASCII or not. */ \
+ bool a_unibyte; \
+ bool b_unibyte; \
/* Bit vectors recording for each character whether it was deleted
or inserted. */ \
unsigned char *deletions; \
struct context ctx = {
.buffer_a = a,
.buffer_b = b,
+ .a_unibyte = BUF_ZV (a) == BUF_ZV_BYTE (a),
+ .b_unibyte = BUF_ZV (b) == BUF_ZV_BYTE (b),
.deletions = SAFE_ALLOCA (del_bytes),
.insertions = SAFE_ALLOCA (ins_bytes),
.fdiag = buffer + size_b + 1,
early. */
eassert (! early_abort);
+ rbc_quitcounter = 0;
+
Fundo_boundary ();
+ bool modification_hooks_inhibited = false;
ptrdiff_t count = SPECPDL_INDEX ();
record_unwind_protect (save_excursion_restore, save_excursion_save ());
+ /* We are going to make a lot of small modifications, and having the
+ modification hooks called for each of them will slow us down.
+ Instead, we announce a single modification for the entire
+ modified region. But don't do that if the caller inhibited
+ modification hooks, because then they don't want that. */
+ if (!inhibit_modification_hooks)
+ {
+ prepare_to_modify_buffer (BEGV, ZV, NULL);
+ specbind (Qinhibit_modification_hooks, Qt);
+ modification_hooks_inhibited = true;
+ }
+
ptrdiff_t i = size_a;
ptrdiff_t j = size_b;
/* Walk backwards through the lists of changes. This was also
while (i >= 0 || j >= 0)
{
/* Allow the user to quit if this gets too slow. */
- maybe_quit ();
+ rarely_quit (++rbc_quitcounter);
/* Check whether there is a change (insertion or deletion)
before the current position. */
if ((i > 0 && bit_is_set (ctx.deletions, i - 1)) ||
(j > 0 && bit_is_set (ctx.insertions, j - 1)))
{
- maybe_quit ();
-
ptrdiff_t end_a = min_a + i;
ptrdiff_t end_b = min_b + j;
/* Find the beginning of the current change run. */
--i;
while (j > 0 && bit_is_set (ctx.insertions, j - 1))
--j;
+
+ rarely_quit (rbc_quitcounter++);
+
ptrdiff_t beg_a = min_a + i;
ptrdiff_t beg_b = min_b + j;
- eassert (beg_a >= BEGV);
- eassert (beg_b >= BUF_BEGV (b));
eassert (beg_a <= end_a);
eassert (beg_b <= end_b);
- eassert (end_a <= ZV);
- eassert (end_b <= BUF_ZV (b));
eassert (beg_a < end_a || beg_b < end_b);
if (beg_a < end_a)
del_range (beg_a, end_a);
}
unbind_to (count, Qnil);
SAFE_FREE ();
+ rbc_quitcounter = 0;
+
+ if (modification_hooks_inhibited)
+ {
+ signal_after_change (BEGV, size_a, ZV - BEGV);
+ update_compositions (BEGV, ZV, CHECK_BORDER);
+ }
return Qnil;
}
/* Return true if the characters at position POS_A of buffer
CTX->buffer_a and at position POS_B of buffer CTX->buffer_b are
equal. POS_A and POS_B are zero-based. Text properties are
- ignored. */
+ ignored.
+
+ Implementation note: this function is called inside the inner-most
+ loops of compareseq, so it absolutely must be optimized for speed,
+ every last bit of it. E.g., each additional use of BEGV or such
+ likes will slow down replace-buffer-contents by dozens of percents,
+ because builtin_lisp_symbol will be called one more time in the
+ innermost loop. */
static bool
buffer_chars_equal (struct context *ctx,
ptrdiff_t pos_a, ptrdiff_t pos_b)
{
- eassert (pos_a >= 0);
pos_a += BUF_BEGV (ctx->buffer_a);
- eassert (pos_a >= BUF_BEGV (ctx->buffer_a));
- eassert (pos_a < BUF_ZV (ctx->buffer_a));
-
- eassert (pos_b >= 0);
pos_b += BUF_BEGV (ctx->buffer_b);
- eassert (pos_b >= BUF_BEGV (ctx->buffer_b));
- eassert (pos_b < BUF_ZV (ctx->buffer_b));
-
- bool a_unibyte = BUF_ZV (ctx->buffer_a) == BUF_ZV_BYTE (ctx->buffer_a);
- bool b_unibyte = BUF_ZV (ctx->buffer_b) == BUF_ZV_BYTE (ctx->buffer_b);
/* Allow the user to escape out of a slow compareseq call. */
- maybe_quit ();
+ rarely_quit (++rbc_quitcounter);
ptrdiff_t bpos_a =
- a_unibyte ? pos_a : buf_charpos_to_bytepos (ctx->buffer_a, pos_a);
+ ctx->a_unibyte ? pos_a : buf_charpos_to_bytepos (ctx->buffer_a, pos_a);
ptrdiff_t bpos_b =
- b_unibyte ? pos_b : buf_charpos_to_bytepos (ctx->buffer_b, pos_b);
+ ctx->b_unibyte ? pos_b : buf_charpos_to_bytepos (ctx->buffer_b, pos_b);
- if (a_unibyte && b_unibyte)
+ /* We make the below a series of specific test to avoid using
+ BUF_FETCH_CHAR_AS_MULTIBYTE, which references Lisp symbols, and
+ is therefore significantly slower (see the note in the commentary
+ to this function). */
+ if (ctx->a_unibyte && ctx->b_unibyte)
return BUF_FETCH_BYTE (ctx->buffer_a, bpos_a)
== BUF_FETCH_BYTE (ctx->buffer_b, bpos_b);
-
- return BUF_FETCH_CHAR_AS_MULTIBYTE (ctx->buffer_a, bpos_a)
- == BUF_FETCH_CHAR_AS_MULTIBYTE (ctx->buffer_b, bpos_b);
+ if (ctx->a_unibyte && !ctx->b_unibyte)
+ return UNIBYTE_TO_CHAR (BUF_FETCH_BYTE (ctx->buffer_a, bpos_a))
+ == BUF_FETCH_MULTIBYTE_CHAR (ctx->buffer_b, bpos_b);
+ if (!ctx->a_unibyte && ctx->b_unibyte)
+ return BUF_FETCH_MULTIBYTE_CHAR (ctx->buffer_a, bpos_a)
+ == UNIBYTE_TO_CHAR (BUF_FETCH_BYTE (ctx->buffer_b, bpos_b));
+ return BUF_FETCH_MULTIBYTE_CHAR (ctx->buffer_a, bpos_a)
+ == BUF_FETCH_MULTIBYTE_CHAR (ctx->buffer_b, bpos_b);
}
\f