summaryrefslogtreecommitdiffstats
path: root/Modules/_hacl/include/krml
diff options
context:
space:
mode:
authorJonathan Protzenko <protz@microsoft.com>2023-02-07 02:11:01 (GMT)
committerGitHub <noreply@github.com>2023-02-07 02:11:01 (GMT)
commit1fcc0efdaa84b3602c236391633b70ff36df149b (patch)
treec3be6de92320d8e82e9d94849ec22f9268b6dfc3 /Modules/_hacl/include/krml
parent914f8fd9f7fc5e48b54d938a68c932cc618ef3a6 (diff)
downloadcpython-1fcc0efdaa84b3602c236391633b70ff36df149b.zip
cpython-1fcc0efdaa84b3602c236391633b70ff36df149b.tar.gz
cpython-1fcc0efdaa84b3602c236391633b70ff36df149b.tar.bz2
gh-99108: Replace SHA2-224 & 256 with verified code from HACL* (#99109)
replacing hashlib primitives (for the non-OpenSSL case) with verified implementations from HACL*. This is the first PR in the series, and focuses specifically on SHA2-256 and SHA2-224. This PR imports Hacl_Streaming_SHA2 into the Python tree. This is the HACL* implementation of SHA2, which combines a core implementation of SHA2 along with a layer of buffer management that allows updating the digest with any number of bytes. This supersedes the previous implementation in the tree. @franziskuskiefer was kind enough to benchmark the changes: in addition to being verified (thus providing significant safety and security improvements), this implementation also provides a sizeable performance boost! ``` --------------------------------------------------------------- Benchmark Time CPU Iterations --------------------------------------------------------------- Sha2_256_Streaming 3163 ns 3160 ns 219353 // this PR LibTomCrypt_Sha2_256 5057 ns 5056 ns 136234 // library used by Python currently ``` The changes in this PR are as follows: - import the subset of HACL* that covers SHA2-256/224 into `Modules/_hacl` - rewire sha256module.c to use the HACL* implementation Co-authored-by: Gregory P. Smith [Google LLC] <greg@krypto.org> Co-authored-by: Erlend E. Aasland <erlend.aasland@protonmail.com>
Diffstat (limited to 'Modules/_hacl/include/krml')
-rw-r--r--Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h109
-rw-r--r--Modules/_hacl/include/krml/internal/target.h218
-rw-r--r--Modules/_hacl/include/krml/lowstar_endianness.h230
3 files changed, 557 insertions, 0 deletions
diff --git a/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h b/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h
new file mode 100644
index 0000000..3e2e4b3
--- /dev/null
+++ b/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h
@@ -0,0 +1,109 @@
+/*
+ Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License.
+*/
+
+
+#ifndef __FStar_UInt_8_16_32_64_H
+#define __FStar_UInt_8_16_32_64_H
+
+
+
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include "krml/lowstar_endianness.h"
+#include "krml/FStar_UInt_8_16_32_64.h"
+#include "krml/internal/target.h"
+static inline uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
+{
+ uint64_t x = a ^ b;
+ uint64_t minus_x = ~x + (uint64_t)1U;
+ uint64_t x_or_minus_x = x | minus_x;
+ uint64_t xnx = x_or_minus_x >> (uint32_t)63U;
+ return xnx - (uint64_t)1U;
+}
+
+static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
+{
+ uint64_t x = a;
+ uint64_t y = b;
+ uint64_t x_xor_y = x ^ y;
+ uint64_t x_sub_y = x - y;
+ uint64_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint64_t q = x_xor_y | x_sub_y_xor_y;
+ uint64_t x_xor_q = x ^ q;
+ uint64_t x_xor_q_ = x_xor_q >> (uint32_t)63U;
+ return x_xor_q_ - (uint64_t)1U;
+}
+
+static inline uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
+{
+ uint32_t x = a ^ b;
+ uint32_t minus_x = ~x + (uint32_t)1U;
+ uint32_t x_or_minus_x = x | minus_x;
+ uint32_t xnx = x_or_minus_x >> (uint32_t)31U;
+ return xnx - (uint32_t)1U;
+}
+
+static inline uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
+{
+ uint32_t x = a;
+ uint32_t y = b;
+ uint32_t x_xor_y = x ^ y;
+ uint32_t x_sub_y = x - y;
+ uint32_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint32_t q = x_xor_y | x_sub_y_xor_y;
+ uint32_t x_xor_q = x ^ q;
+ uint32_t x_xor_q_ = x_xor_q >> (uint32_t)31U;
+ return x_xor_q_ - (uint32_t)1U;
+}
+
+static inline uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
+{
+ uint16_t x = a ^ b;
+ uint16_t minus_x = ~x + (uint16_t)1U;
+ uint16_t x_or_minus_x = x | minus_x;
+ uint16_t xnx = x_or_minus_x >> (uint32_t)15U;
+ return xnx - (uint16_t)1U;
+}
+
+static inline uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
+{
+ uint16_t x = a;
+ uint16_t y = b;
+ uint16_t x_xor_y = x ^ y;
+ uint16_t x_sub_y = x - y;
+ uint16_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint16_t q = x_xor_y | x_sub_y_xor_y;
+ uint16_t x_xor_q = x ^ q;
+ uint16_t x_xor_q_ = x_xor_q >> (uint32_t)15U;
+ return x_xor_q_ - (uint16_t)1U;
+}
+
+static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
+{
+ uint8_t x = a ^ b;
+ uint8_t minus_x = ~x + (uint8_t)1U;
+ uint8_t x_or_minus_x = x | minus_x;
+ uint8_t xnx = x_or_minus_x >> (uint32_t)7U;
+ return xnx - (uint8_t)1U;
+}
+
+static inline uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
+{
+ uint8_t x = a;
+ uint8_t y = b;
+ uint8_t x_xor_y = x ^ y;
+ uint8_t x_sub_y = x - y;
+ uint8_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint8_t q = x_xor_y | x_sub_y_xor_y;
+ uint8_t x_xor_q = x ^ q;
+ uint8_t x_xor_q_ = x_xor_q >> (uint32_t)7U;
+ return x_xor_q_ - (uint8_t)1U;
+}
+
+
+#define __FStar_UInt_8_16_32_64_H_DEFINED
+#endif
diff --git a/Modules/_hacl/include/krml/internal/target.h b/Modules/_hacl/include/krml/internal/target.h
new file mode 100644
index 0000000..9ef5985
--- /dev/null
+++ b/Modules/_hacl/include/krml/internal/target.h
@@ -0,0 +1,218 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License. */
+
+#ifndef __KRML_TARGET_H
+#define __KRML_TARGET_H
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <assert.h>
+
+/* Since KaRaMeL emits the inline keyword unconditionally, we follow the
+ * guidelines at https://gcc.gnu.org/onlinedocs/gcc/Inline.html and make this
+ * __inline__ to ensure the code compiles with -std=c90 and earlier. */
+#ifdef __GNUC__
+# define inline __inline__
+#endif
+
+#ifndef KRML_HOST_MALLOC
+# define KRML_HOST_MALLOC malloc
+#endif
+
+#ifndef KRML_HOST_CALLOC
+# define KRML_HOST_CALLOC calloc
+#endif
+
+#ifndef KRML_HOST_FREE
+# define KRML_HOST_FREE free
+#endif
+
+/* Macros for prettier unrolling of loops */
+#define KRML_LOOP1(i, n, x) { \
+ x \
+ i += n; \
+}
+
+#define KRML_LOOP2(i, n, x) \
+ KRML_LOOP1(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP3(i, n, x) \
+ KRML_LOOP2(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP4(i, n, x) \
+ KRML_LOOP2(i, n, x) \
+ KRML_LOOP2(i, n, x)
+
+#define KRML_LOOP5(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP6(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP2(i, n, x)
+
+#define KRML_LOOP7(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP3(i, n, x)
+
+#define KRML_LOOP8(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP4(i, n, x)
+
+#define KRML_LOOP9(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP10(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP2(i, n, x)
+
+#define KRML_LOOP11(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP3(i, n, x)
+
+#define KRML_LOOP12(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP4(i, n, x)
+
+#define KRML_LOOP13(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP5(i, n, x)
+
+#define KRML_LOOP14(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP6(i, n, x)
+
+#define KRML_LOOP15(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP7(i, n, x)
+
+#define KRML_LOOP16(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP8(i, n, x)
+
+#define KRML_UNROLL_FOR(i, z, n, k, x) do { \
+ uint32_t i = z; \
+ KRML_LOOP##n(i, k, x) \
+} while (0)
+
+#define KRML_ACTUAL_FOR(i, z, n, k, x) \
+ do { \
+ for (uint32_t i = z; i < n; i += k) { \
+ x \
+ } \
+ } while (0)
+
+#ifndef KRML_UNROLL_MAX
+#define KRML_UNROLL_MAX 16
+#endif
+
+/* 1 is the number of loop iterations, i.e. (n - z)/k as evaluated by krml */
+#if 0 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR0(i, z, n, k, x)
+#else
+#define KRML_MAYBE_FOR0(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 1 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 1, k, x)
+#else
+#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 2 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 2, k, x)
+#else
+#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 3 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 3, k, x)
+#else
+#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 4 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 4, k, x)
+#else
+#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 5 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 5, k, x)
+#else
+#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 6 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 6, k, x)
+#else
+#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 7 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 7, k, x)
+#else
+#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 8 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 8, k, x)
+#else
+#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 9 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 9, k, x)
+#else
+#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 10 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 10, k, x)
+#else
+#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 11 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 11, k, x)
+#else
+#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 12 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 12, k, x)
+#else
+#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 13 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 13, k, x)
+#else
+#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 14 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 14, k, x)
+#else
+#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 15 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 15, k, x)
+#else
+#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 16 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 16, k, x)
+#else
+#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+#endif
diff --git a/Modules/_hacl/include/krml/lowstar_endianness.h b/Modules/_hacl/include/krml/lowstar_endianness.h
new file mode 100644
index 0000000..32a7391
--- /dev/null
+++ b/Modules/_hacl/include/krml/lowstar_endianness.h
@@ -0,0 +1,230 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License. */
+
+#ifndef __LOWSTAR_ENDIANNESS_H
+#define __LOWSTAR_ENDIANNESS_H
+
+#include <string.h>
+#include <inttypes.h>
+
+/******************************************************************************/
+/* Implementing C.fst (part 2: endian-ness macros) */
+/******************************************************************************/
+
+/* ... for Linux */
+#if defined(__linux__) || defined(__CYGWIN__) || defined (__USE_SYSTEM_ENDIAN_H__) || defined(__GLIBC__)
+# include <endian.h>
+
+/* ... for OSX */
+#elif defined(__APPLE__)
+# include <libkern/OSByteOrder.h>
+# define htole64(x) OSSwapHostToLittleInt64(x)
+# define le64toh(x) OSSwapLittleToHostInt64(x)
+# define htobe64(x) OSSwapHostToBigInt64(x)
+# define be64toh(x) OSSwapBigToHostInt64(x)
+
+# define htole16(x) OSSwapHostToLittleInt16(x)
+# define le16toh(x) OSSwapLittleToHostInt16(x)
+# define htobe16(x) OSSwapHostToBigInt16(x)
+# define be16toh(x) OSSwapBigToHostInt16(x)
+
+# define htole32(x) OSSwapHostToLittleInt32(x)
+# define le32toh(x) OSSwapLittleToHostInt32(x)
+# define htobe32(x) OSSwapHostToBigInt32(x)
+# define be32toh(x) OSSwapBigToHostInt32(x)
+
+/* ... for Solaris */
+#elif defined(__sun__)
+# include <sys/byteorder.h>
+# define htole64(x) LE_64(x)
+# define le64toh(x) LE_64(x)
+# define htobe64(x) BE_64(x)
+# define be64toh(x) BE_64(x)
+
+# define htole16(x) LE_16(x)
+# define le16toh(x) LE_16(x)
+# define htobe16(x) BE_16(x)
+# define be16toh(x) BE_16(x)
+
+# define htole32(x) LE_32(x)
+# define le32toh(x) LE_32(x)
+# define htobe32(x) BE_32(x)
+# define be32toh(x) BE_32(x)
+
+/* ... for the BSDs */
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
+# include <sys/endian.h>
+#elif defined(__OpenBSD__)
+# include <endian.h>
+
+/* ... for Windows (MSVC)... not targeting XBOX 360! */
+#elif defined(_MSC_VER)
+
+# include <stdlib.h>
+# define htobe16(x) _byteswap_ushort(x)
+# define htole16(x) (x)
+# define be16toh(x) _byteswap_ushort(x)
+# define le16toh(x) (x)
+
+# define htobe32(x) _byteswap_ulong(x)
+# define htole32(x) (x)
+# define be32toh(x) _byteswap_ulong(x)
+# define le32toh(x) (x)
+
+# define htobe64(x) _byteswap_uint64(x)
+# define htole64(x) (x)
+# define be64toh(x) _byteswap_uint64(x)
+# define le64toh(x) (x)
+
+/* ... for Windows (GCC-like, e.g. mingw or clang) */
+#elif (defined(_WIN32) || defined(_WIN64)) && \
+ (defined(__GNUC__) || defined(__clang__))
+
+# define htobe16(x) __builtin_bswap16(x)
+# define htole16(x) (x)
+# define be16toh(x) __builtin_bswap16(x)
+# define le16toh(x) (x)
+
+# define htobe32(x) __builtin_bswap32(x)
+# define htole32(x) (x)
+# define be32toh(x) __builtin_bswap32(x)
+# define le32toh(x) (x)
+
+# define htobe64(x) __builtin_bswap64(x)
+# define htole64(x) (x)
+# define be64toh(x) __builtin_bswap64(x)
+# define le64toh(x) (x)
+
+/* ... generic big-endian fallback code */
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+/* byte swapping code inspired by:
+ * https://github.com/rweather/arduinolibs/blob/master/libraries/Crypto/utility/EndianUtil.h
+ * */
+
+# define htobe32(x) (x)
+# define be32toh(x) (x)
+# define htole32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+# define le32toh(x) (htole32((x)))
+
+# define htobe64(x) (x)
+# define be64toh(x) (x)
+# define htole64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+# define le64toh(x) (htole64((x)))
+
+/* ... generic little-endian fallback code */
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+# define htole32(x) (x)
+# define le32toh(x) (x)
+# define htobe32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+# define be32toh(x) (htobe32((x)))
+
+# define htole64(x) (x)
+# define le64toh(x) (x)
+# define htobe64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+# define be64toh(x) (htobe64((x)))
+
+/* ... couldn't determine endian-ness of the target platform */
+#else
+# error "Please define __BYTE_ORDER__!"
+
+#endif /* defined(__linux__) || ... */
+
+/* Loads and stores. These avoid undefined behavior due to unaligned memory
+ * accesses, via memcpy. */
+
+inline static uint16_t load16(uint8_t *b) {
+ uint16_t x;
+ memcpy(&x, b, 2);
+ return x;
+}
+
+inline static uint32_t load32(uint8_t *b) {
+ uint32_t x;
+ memcpy(&x, b, 4);
+ return x;
+}
+
+inline static uint64_t load64(uint8_t *b) {
+ uint64_t x;
+ memcpy(&x, b, 8);
+ return x;
+}
+
+inline static void store16(uint8_t *b, uint16_t i) {
+ memcpy(b, &i, 2);
+}
+
+inline static void store32(uint8_t *b, uint32_t i) {
+ memcpy(b, &i, 4);
+}
+
+inline static void store64(uint8_t *b, uint64_t i) {
+ memcpy(b, &i, 8);
+}
+
+/* Legacy accessors so that this header can serve as an implementation of
+ * C.Endianness */
+#define load16_le(b) (le16toh(load16(b)))
+#define store16_le(b, i) (store16(b, htole16(i)))
+#define load16_be(b) (be16toh(load16(b)))
+#define store16_be(b, i) (store16(b, htobe16(i)))
+
+#define load32_le(b) (le32toh(load32(b)))
+#define store32_le(b, i) (store32(b, htole32(i)))
+#define load32_be(b) (be32toh(load32(b)))
+#define store32_be(b, i) (store32(b, htobe32(i)))
+
+#define load64_le(b) (le64toh(load64(b)))
+#define store64_le(b, i) (store64(b, htole64(i)))
+#define load64_be(b) (be64toh(load64(b)))
+#define store64_be(b, i) (store64(b, htobe64(i)))
+
+/* Co-existence of LowStar.Endianness and FStar.Endianness generates name
+ * conflicts, because of course both insist on having no prefixes. Until a
+ * prefix is added, or until we truly retire FStar.Endianness, solve this issue
+ * in an elegant way. */
+#define load16_le0 load16_le
+#define store16_le0 store16_le
+#define load16_be0 load16_be
+#define store16_be0 store16_be
+
+#define load32_le0 load32_le
+#define store32_le0 store32_le
+#define load32_be0 load32_be
+#define store32_be0 store32_be
+
+#define load64_le0 load64_le
+#define store64_le0 store64_le
+#define load64_be0 load64_be
+#define store64_be0 store64_be
+
+#define load128_le0 load128_le
+#define store128_le0 store128_le
+#define load128_be0 load128_be
+#define store128_be0 store128_be
+
+#endif