From: Paul Eggert Date: Sun, 19 May 2024 02:12:55 +0000 (-0700) Subject: Update from Gnulib by running admin/merge-gnulib X-Git-Url: http://git.eshelyaron.com/gitweb/?a=commitdiff_plain;h=8fe1f8756612ac17e05ceade3b6f7aaab3523b1b;p=emacs.git Update from Gnulib by running admin/merge-gnulib (cherry picked from commit c07160b8df4e9f795dd73f08a3399ccef465c898) --- diff --git a/lib/sha512.c b/lib/sha512.c index 9eb036fb327..6750041bc7b 100644 --- a/lib/sha512.c +++ b/lib/sha512.c @@ -35,7 +35,7 @@ #ifdef WORDS_BIGENDIAN # define SWAP(n) (n) #else -# define SWAP(n) bswap_64 (n) +# define SWAP(n) u64bswap (n) #endif #if ! HAVE_OPENSSL_SHA512 diff --git a/lib/stdlib.in.h b/lib/stdlib.in.h index 1888d3ee314..ef9fde30eb2 100644 --- a/lib/stdlib.in.h +++ b/lib/stdlib.in.h @@ -231,6 +231,14 @@ _GL_CXXALIAS_SYS (abort, void, (void)); _GL_CXXALIASWARN (abort); # endif #endif +#if @GNULIB_ABORT_DEBUG@ && @REPLACE_ABORT@ +_GL_EXTERN_C void _gl_pre_abort (void); +#else +# if !GNULIB_defined_gl_pre_abort +# define _gl_pre_abort() /* nothing */ +# define GNULIB_defined_gl_pre_abort 1 +# endif +#endif #if @GNULIB_FREE_POSIX@ diff --git a/lib/u64.h b/lib/u64.h index 4eca03e985e..cfb55887578 100644 --- a/lib/u64.h +++ b/lib/u64.h @@ -22,8 +22,11 @@ #error "Please include config.h first." #endif +#include #include +#include + _GL_INLINE_HEADER_BEGIN #ifndef _GL_U64_INLINE # define _GL_U64_INLINE _GL_INLINE @@ -34,9 +37,6 @@ extern "C" { #endif -/* Return X rotated left by N bits, where 0 < N < 64. */ -#define u64rol(x, n) u64or (u64shl (x, n), u64shr (x, 64 - n)) - #ifdef UINT64_MAX /* Native implementations are trivial. See below for comments on what @@ -53,24 +53,30 @@ typedef uint64_t u64; # define u64plus(x, y) ((x) + (y)) # define u64shl(x, n) ((x) << (n)) # define u64shr(x, n) ((x) >> (n)) +# define u64bswap(x) bswap_64 (x) #else -/* u64 is a 64-bit unsigned integer value. +# define _GL_U64_MASK32 0xfffffffful /* 2**32 - 1. */ + +/* u64 represents a 64-bit unsigned integer value equal to (HI << 32) + LO. + Implement it with unsigned int, which the GNU coding standards say + is wide enough to hold 32 bits, and which does not signal an error + when adding (theoretically possible with types like uint_fast32_t). u64init (HI, LO), is like u64hilo (HI, LO), but for use in initializer contexts. */ # ifdef WORDS_BIGENDIAN -typedef struct { uint32_t hi, lo; } u64; +typedef struct { unsigned int hi, lo; } u64; # define u64init(hi, lo) { hi, lo } # else -typedef struct { uint32_t lo, hi; } u64; +typedef struct { unsigned int lo, hi; } u64; # define u64init(hi, lo) { lo, hi } # endif /* Given the high and low-order 32-bit quantities HI and LO, return a u64 value representing (HI << 32) + LO. */ _GL_U64_INLINE u64 -u64hilo (uint32_t hi, uint32_t lo) +u64hilo (unsigned int hi, unsigned int lo) { u64 r; r.hi = hi; @@ -78,9 +84,9 @@ u64hilo (uint32_t hi, uint32_t lo) return r; } -/* Return a u64 value representing LO. */ +/* Return a u64 value representing the 32-bit quantity LO. */ _GL_U64_INLINE u64 -u64lo (uint32_t lo) +u64lo (unsigned int lo) { u64 r; r.hi = 0; @@ -88,18 +94,18 @@ u64lo (uint32_t lo) return r; } -/* Return a u64 value representing SIZE. */ +/* Return a u64 value representing SIZE, where 0 <= SIZE < 2**64. */ _GL_U64_INLINE u64 u64size (size_t size) { u64 r; r.hi = size >> 31 >> 1; - r.lo = size; + r.lo = size & _GL_U64_MASK32; return r; } /* Return X < Y. */ -_GL_U64_INLINE int +_GL_U64_INLINE bool u64lt (u64 x, u64 y) { return x.hi < y.hi || (x.hi == y.hi && x.lo < y.lo); @@ -135,29 +141,29 @@ u64xor (u64 x, u64 y) return r; } -/* Return X + Y. */ +/* Return X + Y, wrapping around on overflow. */ _GL_U64_INLINE u64 u64plus (u64 x, u64 y) { u64 r; - r.lo = x.lo + y.lo; - r.hi = x.hi + y.hi + (r.lo < x.lo); + r.lo = (x.lo + y.lo) & _GL_U64_MASK32; + r.hi = (x.hi + y.hi + (r.lo < x.lo)) & _GL_U64_MASK32; return r; } -/* Return X << N. */ +/* Return X << N, where 0 <= N < 64. */ _GL_U64_INLINE u64 u64shl (u64 x, int n) { u64 r; if (n < 32) { - r.hi = (x.hi << n) | (x.lo >> (32 - n)); - r.lo = x.lo << n; + r.hi = (x.hi << n & _GL_U64_MASK32) | x.lo >> (32 - n); + r.lo = x.lo << n & _GL_U64_MASK32; } else { - r.hi = x.lo << (n - 32); + r.hi = x.lo << (n - 32) & _GL_U64_MASK32; r.lo = 0; } return r; @@ -171,7 +177,7 @@ u64shr (u64 x, int n) if (n < 32) { r.hi = x.hi >> n; - r.lo = (x.hi << (32 - n)) | (x.lo >> n); + r.lo = (x.hi << (32 - n) & _GL_U64_MASK32) | x.lo >> n; } else { @@ -181,8 +187,22 @@ u64shr (u64 x, int n) return r; } +/* Return X with bytes in reverse order. */ +_GL_U64_INLINE u64 +u64bswap (u64 x) +{ + return u64hilo (bswap_32 (x.lo), bswap_32 (x.hi)); +} + #endif +/* Return X rotated left by N bits, where 0 < N < 64. */ +_GL_U64_INLINE u64 +u64rol (u64 x, int n) +{ + return u64or (u64shl (x, n), u64shr (x, 64 - n)); +} + #ifdef __cplusplus }