/* These structures may have been purecopied and shared
(bug#36447). */
+ Lisp_Object hash = make_nil_vector (size);
h->next = Fcopy_sequence (h->next);
h->index = Fcopy_sequence (h->index);
- h->hash = make_nil_vector (size);
/* Recompute the actual hash codes for each entry in the table.
Order is still invalid. */
{
Lisp_Object key = HASH_KEY (h, i);
if (!EQ (key, Qunbound))
- set_hash_hash_slot (h, i, h->test.hashfn (key, h));
+ ASET (hash, i, h->test.hashfn (key, h));
}
/* Reset the index so that any slot we don't fill below is marked
/* Rebuild the collision chains. */
for (ptrdiff_t i = 0; i < size; ++i)
- if (!NILP (HASH_HASH (h, i)))
+ if (!NILP (AREF (hash, i)))
{
- EMACS_UINT hash_code = XUFIXNUM (HASH_HASH (h, i));
+ EMACS_UINT hash_code = XUFIXNUM (AREF (hash, i));
ptrdiff_t start_of_bucket = hash_code % ASIZE (h->index);
set_hash_next_slot (h, i, HASH_INDEX (h, start_of_bucket));
set_hash_index_slot (h, start_of_bucket, i);
Do this last so that if we're interrupted, we retry on next
access. */
eassert (hash_rehash_needed_p (h));
- h->count = -h->count;
+ h->hash = hash;
eassert (!hash_rehash_needed_p (h));
}
/* Clear key, value, and hash. */
set_hash_key_slot (h, i, Qunbound);
set_hash_value_slot (h, i, Qnil);
- set_hash_hash_slot (h, i, Qnil);
+ if (!NILP (h->hash))
+ set_hash_hash_slot (h, i, Qnil);
eassert (h->count != 0);
h->count += h->count > 0 ? -1 : 1;
(Lisp_Object table)
{
struct Lisp_Hash_Table *h = check_hash_table (table);
- hash_rehash_if_needed (h);
+ eassert (h->count >= 0);
return make_fixnum (h->count);
}
hash_rehash_if_needed (XHASH_TABLE (table_orig));
Lisp_Object table_rehashed = Fcopy_hash_table (table_orig);
eassert (!hash_rehash_needed_p (XHASH_TABLE (table_rehashed)));
- XHASH_TABLE (table_rehashed)->count *= -1;
+ XHASH_TABLE (table_rehashed)->hash = Qnil;
eassert (count == 0 || hash_rehash_needed_p (XHASH_TABLE (table_rehashed)));
hash_rehash_if_needed (XHASH_TABLE (table_rehashed));
eassert (!hash_rehash_needed_p (XHASH_TABLE (table_rehashed)));
the need to rehash-on-access if we can load the dump where we
want. */
if (hash->count > 0 && !is_stable)
- hash->count = -hash->count;
+ /* Hash codes will have to be recomputed anyway, so let's not dump them.
+ Also set `hash` to nil for hash_rehash_needed_p.
+ We could also refrain from dumping the `next' and `index' vectors,
+ except that `next' is currently used for HASH_TABLE_SIZE and
+ we'd have to rebuild the next_free list as well as adjust
+ sweep_weak_hash_table for the case where there's no `index'. */
+ hash->hash = Qnil;
START_DUMP_PVEC (ctx, &hash->header, struct Lisp_Hash_Table, out);
dump_pseudovector_lisp_fields (ctx, &out->header, &hash->header);