#error "Please include config.h first."
#endif
+#include <stddef.h>
#include <stdint.h>
+#include <byteswap.h>
+
_GL_INLINE_HEADER_BEGIN
#ifndef _GL_U64_INLINE
# define _GL_U64_INLINE _GL_INLINE
#endif
-/* Return X rotated left by N bits, where 0 < N < 64. */
-#define u64rol(x, n) u64or (u64shl (x, n), u64shr (x, 64 - n))
-
#ifdef UINT64_MAX
/* Native implementations are trivial. See below for comments on what
# define u64plus(x, y) ((x) + (y))
# define u64shl(x, n) ((x) << (n))
# define u64shr(x, n) ((x) >> (n))
+# define u64bswap(x) bswap_64 (x)
#else
-/* u64 is a 64-bit unsigned integer value.
+# define _GL_U64_MASK32 0xfffffffful /* 2**32 - 1. */
+
+/* u64 represents a 64-bit unsigned integer value equal to (HI << 32) + LO.
+ Implement it with unsigned int, which the GNU coding standards say
+ is wide enough to hold 32 bits, and which does not signal an error
+ when adding (theoretically possible with types like uint_fast32_t).
u64init (HI, LO), is like u64hilo (HI, LO), but for use in
initializer contexts. */
# ifdef WORDS_BIGENDIAN
-typedef struct { uint32_t hi, lo; } u64;
+typedef struct { unsigned int hi, lo; } u64;
# define u64init(hi, lo) { hi, lo }
# else
-typedef struct { uint32_t lo, hi; } u64;
+typedef struct { unsigned int lo, hi; } u64;
# define u64init(hi, lo) { lo, hi }
# endif
/* Given the high and low-order 32-bit quantities HI and LO, return a u64
value representing (HI << 32) + LO. */
_GL_U64_INLINE u64
-u64hilo (uint32_t hi, uint32_t lo)
+u64hilo (unsigned int hi, unsigned int lo)
{
u64 r;
r.hi = hi;
return r;
}
-/* Return a u64 value representing LO. */
+/* Return a u64 value representing the 32-bit quantity LO. */
_GL_U64_INLINE u64
-u64lo (uint32_t lo)
+u64lo (unsigned int lo)
{
u64 r;
r.hi = 0;
return r;
}
-/* Return a u64 value representing SIZE. */
+/* Return a u64 value representing SIZE, where 0 <= SIZE < 2**64. */
_GL_U64_INLINE u64
u64size (size_t size)
{
u64 r;
r.hi = size >> 31 >> 1;
- r.lo = size;
+ r.lo = size & _GL_U64_MASK32;
return r;
}
/* Return X < Y. */
-_GL_U64_INLINE int
+_GL_U64_INLINE bool
u64lt (u64 x, u64 y)
{
return x.hi < y.hi || (x.hi == y.hi && x.lo < y.lo);
return r;
}
-/* Return X + Y. */
+/* Return X + Y, wrapping around on overflow. */
_GL_U64_INLINE u64
u64plus (u64 x, u64 y)
{
u64 r;
- r.lo = x.lo + y.lo;
- r.hi = x.hi + y.hi + (r.lo < x.lo);
+ r.lo = (x.lo + y.lo) & _GL_U64_MASK32;
+ r.hi = (x.hi + y.hi + (r.lo < x.lo)) & _GL_U64_MASK32;
return r;
}
-/* Return X << N. */
+/* Return X << N, where 0 <= N < 64. */
_GL_U64_INLINE u64
u64shl (u64 x, int n)
{
u64 r;
if (n < 32)
{
- r.hi = (x.hi << n) | (x.lo >> (32 - n));
- r.lo = x.lo << n;
+ r.hi = (x.hi << n & _GL_U64_MASK32) | x.lo >> (32 - n);
+ r.lo = x.lo << n & _GL_U64_MASK32;
}
else
{
- r.hi = x.lo << (n - 32);
+ r.hi = x.lo << (n - 32) & _GL_U64_MASK32;
r.lo = 0;
}
return r;
if (n < 32)
{
r.hi = x.hi >> n;
- r.lo = (x.hi << (32 - n)) | (x.lo >> n);
+ r.lo = (x.hi << (32 - n) & _GL_U64_MASK32) | x.lo >> n;
}
else
{
return r;
}
+/* Return X with bytes in reverse order. */
+_GL_U64_INLINE u64
+u64bswap (u64 x)
+{
+ return u64hilo (bswap_32 (x.lo), bswap_32 (x.hi));
+}
+
#endif
+/* Return X rotated left by N bits, where 0 < N < 64. */
+_GL_U64_INLINE u64
+u64rol (u64 x, int n)
+{
+ return u64or (u64shl (x, n), u64shr (x, 64 - n));
+}
+
#ifdef __cplusplus
}