(struct Lisp_Bool_Vector.size): EMACS_INT, not EMACS_UINT.
We prefer signed types, and the value cannot exceed the EMACS_INT
range anyway (because otherwise the length would not be representable).
+ (XSET) [USE_LISP_UNION_TYPE]: Use uintptr_t and intptr_t,
+ not EMACS_UINT and EMACS_INT, when converting pointer to integer.
+ This avoids a GCC warning when WIDE_EMACS_INT.
* indent.c (sane_tab_width): New function.
(current_column, scan_for_column, Findent_to, position_indentation)
#ifdef USE_LSB_TAG
# define XSET(var, vartype, ptr) \
- (eassert ((((EMACS_UINT) (ptr)) & ((1 << GCTYPEBITS) - 1)) == 0), \
- (var).u.val = ((EMACS_UINT) (ptr)) >> GCTYPEBITS, \
+ (eassert ((((uintptr_t) (ptr)) & ((1 << GCTYPEBITS) - 1)) == 0), \
+ (var).u.val = ((uintptr_t) (ptr)) >> GCTYPEBITS, \
(var).u.type = ((char) (vartype)))
/* Some versions of gcc seem to consider the bitfield width when issuing
# define XSETFASTINT(a, b) ((a).i = (b))
# define XSET(var, vartype, ptr) \
- (((var).s.val = ((EMACS_INT) (ptr))), ((var).s.type = ((char) (vartype))))
+ (((var).s.val = ((intptr_t) (ptr))), ((var).s.type = ((char) (vartype))))
#ifdef DATA_SEG_BITS
/* DATA_SEG_BITS forces extra bits to be or'd in with any pointers