summaryrefslogtreecommitdiffstats
path: root/Modules/_hacl
diff options
context:
space:
mode:
authorJonathan Protzenko <protz@microsoft.com>2023-02-07 02:11:01 (GMT)
committerGitHub <noreply@github.com>2023-02-07 02:11:01 (GMT)
commit1fcc0efdaa84b3602c236391633b70ff36df149b (patch)
treec3be6de92320d8e82e9d94849ec22f9268b6dfc3 /Modules/_hacl
parent914f8fd9f7fc5e48b54d938a68c932cc618ef3a6 (diff)
downloadcpython-1fcc0efdaa84b3602c236391633b70ff36df149b.zip
cpython-1fcc0efdaa84b3602c236391633b70ff36df149b.tar.gz
cpython-1fcc0efdaa84b3602c236391633b70ff36df149b.tar.bz2
gh-99108: Replace SHA2-224 & 256 with verified code from HACL* (#99109)
replacing hashlib primitives (for the non-OpenSSL case) with verified implementations from HACL*. This is the first PR in the series, and focuses specifically on SHA2-256 and SHA2-224. This PR imports Hacl_Streaming_SHA2 into the Python tree. This is the HACL* implementation of SHA2, which combines a core implementation of SHA2 along with a layer of buffer management that allows updating the digest with any number of bytes. This supersedes the previous implementation in the tree. @franziskuskiefer was kind enough to benchmark the changes: in addition to being verified (thus providing significant safety and security improvements), this implementation also provides a sizeable performance boost! ``` --------------------------------------------------------------- Benchmark Time CPU Iterations --------------------------------------------------------------- Sha2_256_Streaming 3163 ns 3160 ns 219353 // this PR LibTomCrypt_Sha2_256 5057 ns 5056 ns 136234 // library used by Python currently ``` The changes in this PR are as follows: - import the subset of HACL* that covers SHA2-256/224 into `Modules/_hacl` - rewire sha256module.c to use the HACL* implementation Co-authored-by: Gregory P. Smith [Google LLC] <greg@krypto.org> Co-authored-by: Erlend E. Aasland <erlend.aasland@protonmail.com>
Diffstat (limited to 'Modules/_hacl')
-rw-r--r--Modules/_hacl/Hacl_Streaming_SHA2.c682
-rw-r--r--Modules/_hacl/Hacl_Streaming_SHA2.h136
-rw-r--r--Modules/_hacl/README.md29
-rw-r--r--Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h109
-rw-r--r--Modules/_hacl/include/krml/internal/target.h218
-rw-r--r--Modules/_hacl/include/krml/lowstar_endianness.h230
-rw-r--r--Modules/_hacl/include/python_hacl_namespaces.h28
-rw-r--r--Modules/_hacl/internal/Hacl_SHA2_Generic.h135
-rwxr-xr-xModules/_hacl/refresh.sh132
9 files changed, 1699 insertions, 0 deletions
diff --git a/Modules/_hacl/Hacl_Streaming_SHA2.c b/Modules/_hacl/Hacl_Streaming_SHA2.c
new file mode 100644
index 0000000..8456657
--- /dev/null
+++ b/Modules/_hacl/Hacl_Streaming_SHA2.c
@@ -0,0 +1,682 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "Hacl_Streaming_SHA2.h"
+
+#include "internal/Hacl_SHA2_Generic.h"
+
+
+static inline void sha256_init(uint32_t *hash)
+{
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ uint32_t *os = hash;
+ uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+ os[i] = x;);
+}
+
+static inline void sha256_update0(uint8_t *b, uint32_t *hash)
+{
+ uint32_t hash_old[8U] = { 0U };
+ uint32_t ws[16U] = { 0U };
+ memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t));
+ uint8_t *b10 = b;
+ uint32_t u = load32_be(b10);
+ ws[0U] = u;
+ uint32_t u0 = load32_be(b10 + (uint32_t)4U);
+ ws[1U] = u0;
+ uint32_t u1 = load32_be(b10 + (uint32_t)8U);
+ ws[2U] = u1;
+ uint32_t u2 = load32_be(b10 + (uint32_t)12U);
+ ws[3U] = u2;
+ uint32_t u3 = load32_be(b10 + (uint32_t)16U);
+ ws[4U] = u3;
+ uint32_t u4 = load32_be(b10 + (uint32_t)20U);
+ ws[5U] = u4;
+ uint32_t u5 = load32_be(b10 + (uint32_t)24U);
+ ws[6U] = u5;
+ uint32_t u6 = load32_be(b10 + (uint32_t)28U);
+ ws[7U] = u6;
+ uint32_t u7 = load32_be(b10 + (uint32_t)32U);
+ ws[8U] = u7;
+ uint32_t u8 = load32_be(b10 + (uint32_t)36U);
+ ws[9U] = u8;
+ uint32_t u9 = load32_be(b10 + (uint32_t)40U);
+ ws[10U] = u9;
+ uint32_t u10 = load32_be(b10 + (uint32_t)44U);
+ ws[11U] = u10;
+ uint32_t u11 = load32_be(b10 + (uint32_t)48U);
+ ws[12U] = u11;
+ uint32_t u12 = load32_be(b10 + (uint32_t)52U);
+ ws[13U] = u12;
+ uint32_t u13 = load32_be(b10 + (uint32_t)56U);
+ ws[14U] = u13;
+ uint32_t u14 = load32_be(b10 + (uint32_t)60U);
+ ws[15U] = u14;
+ KRML_MAYBE_FOR4(i0,
+ (uint32_t)0U,
+ (uint32_t)4U,
+ (uint32_t)1U,
+ KRML_MAYBE_FOR16(i,
+ (uint32_t)0U,
+ (uint32_t)16U,
+ (uint32_t)1U,
+ uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+ uint32_t ws_t = ws[i];
+ uint32_t a0 = hash[0U];
+ uint32_t b0 = hash[1U];
+ uint32_t c0 = hash[2U];
+ uint32_t d0 = hash[3U];
+ uint32_t e0 = hash[4U];
+ uint32_t f0 = hash[5U];
+ uint32_t g0 = hash[6U];
+ uint32_t h02 = hash[7U];
+ uint32_t k_e_t = k_t;
+ uint32_t
+ t1 =
+ h02
+ +
+ ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U)
+ ^
+ ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U)
+ ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U)))
+ + ((e0 & f0) ^ (~e0 & g0))
+ + k_e_t
+ + ws_t;
+ uint32_t
+ t2 =
+ ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U)
+ ^
+ ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U)
+ ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U)))
+ + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
+ uint32_t a1 = t1 + t2;
+ uint32_t b1 = a0;
+ uint32_t c1 = b0;
+ uint32_t d1 = c0;
+ uint32_t e1 = d0 + t1;
+ uint32_t f1 = e0;
+ uint32_t g1 = f0;
+ uint32_t h12 = g0;
+ hash[0U] = a1;
+ hash[1U] = b1;
+ hash[2U] = c1;
+ hash[3U] = d1;
+ hash[4U] = e1;
+ hash[5U] = f1;
+ hash[6U] = g1;
+ hash[7U] = h12;);
+ if (i0 < (uint32_t)3U)
+ {
+ KRML_MAYBE_FOR16(i,
+ (uint32_t)0U,
+ (uint32_t)16U,
+ (uint32_t)1U,
+ uint32_t t16 = ws[i];
+ uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
+ uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
+ uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+ uint32_t
+ s1 =
+ (t2 << (uint32_t)15U | t2 >> (uint32_t)17U)
+ ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U);
+ uint32_t
+ s0 =
+ (t15 << (uint32_t)25U | t15 >> (uint32_t)7U)
+ ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U);
+ ws[i] = s1 + t7 + s0 + t16;);
+ });
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ uint32_t *os = hash;
+ uint32_t x = hash[i] + hash_old[i];
+ os[i] = x;);
+}
+
+static inline void sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
+{
+ uint32_t blocks = len / (uint32_t)64U;
+ for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+ {
+ uint8_t *b0 = b;
+ uint8_t *mb = b0 + i * (uint32_t)64U;
+ sha256_update0(mb, st);
+ }
+}
+
+static inline void
+sha256_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *hash)
+{
+ uint32_t blocks;
+ if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+ {
+ blocks = (uint32_t)1U;
+ }
+ else
+ {
+ blocks = (uint32_t)2U;
+ }
+ uint32_t fin = blocks * (uint32_t)64U;
+ uint8_t last[128U] = { 0U };
+ uint8_t totlen_buf[8U] = { 0U };
+ uint64_t total_len_bits = totlen << (uint32_t)3U;
+ store64_be(totlen_buf, total_len_bits);
+ uint8_t *b0 = b;
+ memcpy(last, b0, len * sizeof (uint8_t));
+ last[len] = (uint8_t)0x80U;
+ memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+ uint8_t *last00 = last;
+ uint8_t *last10 = last + (uint32_t)64U;
+ uint8_t *l0 = last00;
+ uint8_t *l1 = last10;
+ uint8_t *lb0 = l0;
+ uint8_t *lb1 = l1;
+ uint8_t *last0 = lb0;
+ uint8_t *last1 = lb1;
+ sha256_update0(last0, hash);
+ if (blocks > (uint32_t)1U)
+ {
+ sha256_update0(last1, hash);
+ return;
+ }
+}
+
+static inline void sha256_finish(uint32_t *st, uint8_t *h)
+{
+ uint8_t hbuf[32U] = { 0U };
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ store32_be(hbuf + i * (uint32_t)4U, st[i]););
+ memcpy(h, hbuf, (uint32_t)32U * sizeof (uint8_t));
+}
+
+static inline void sha224_init(uint32_t *hash)
+{
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ uint32_t *os = hash;
+ uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
+ os[i] = x;);
+}
+
+static inline void sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
+{
+ sha256_update_nblocks(len, b, st);
+}
+
+static void sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st)
+{
+ sha256_update_last(totlen, len, b, st);
+}
+
+static inline void sha224_finish(uint32_t *st, uint8_t *h)
+{
+ uint8_t hbuf[32U] = { 0U };
+ KRML_MAYBE_FOR8(i,
+ (uint32_t)0U,
+ (uint32_t)8U,
+ (uint32_t)1U,
+ store32_be(hbuf + i * (uint32_t)4U, st[i]););
+ memcpy(h, hbuf, (uint32_t)28U * sizeof (uint8_t));
+}
+
+/**
+Allocate initial state for the SHA2_256 hash. The state is to be freed by
+calling `free_256`.
+*/
+Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_256(void)
+{
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+ Hacl_Streaming_SHA2_state_sha2_224
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ Hacl_Streaming_SHA2_state_sha2_224
+ *p =
+ (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof (
+ Hacl_Streaming_SHA2_state_sha2_224
+ ));
+ p[0U] = s;
+ sha256_init(block_state);
+ return p;
+}
+
+/**
+Copies the state passed as argument into a newly allocated state (deep copy).
+The state is to be freed by calling `free_256`. Cloning the state this way is
+useful, for instance, if your control-flow diverges and you need to feed
+more (different) data into the hash in each branch.
+*/
+Hacl_Streaming_SHA2_state_sha2_224
+*Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_SHA2_state_sha2_224 *s0)
+{
+ Hacl_Streaming_SHA2_state_sha2_224 scrut = *s0;
+ uint32_t *block_state0 = scrut.block_state;
+ uint8_t *buf0 = scrut.buf;
+ uint64_t total_len0 = scrut.total_len;
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+ memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint32_t));
+ Hacl_Streaming_SHA2_state_sha2_224
+ s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
+ Hacl_Streaming_SHA2_state_sha2_224
+ *p =
+ (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof (
+ Hacl_Streaming_SHA2_state_sha2_224
+ ));
+ p[0U] = s;
+ return p;
+}
+
+/**
+Reset an existing state to the initial hash state with empty data.
+*/
+void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_SHA2_state_sha2_224 *s)
+{
+ Hacl_Streaming_SHA2_state_sha2_224 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ sha256_init(block_state);
+ Hacl_Streaming_SHA2_state_sha2_224
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s[0U] = tmp;
+}
+
+static inline uint32_t
+update_224_256(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *data, uint32_t len)
+{
+ Hacl_Streaming_SHA2_state_sha2_224 s = *p;
+ uint64_t total_len = s.total_len;
+ if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+ {
+ return (uint32_t)1U;
+ }
+ uint32_t sz;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ sz = (uint32_t)64U;
+ }
+ else
+ {
+ sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ if (len <= (uint32_t)64U - sz)
+ {
+ Hacl_Streaming_SHA2_state_sha2_224 s1 = *p;
+ uint32_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf2 = buf + sz1;
+ memcpy(buf2, data, len * sizeof (uint8_t));
+ uint64_t total_len2 = total_len1 + (uint64_t)len;
+ *p
+ =
+ (
+ (Hacl_Streaming_SHA2_state_sha2_224){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len2
+ }
+ );
+ }
+ else if (sz == (uint32_t)0U)
+ {
+ Hacl_Streaming_SHA2_state_sha2_224 s1 = *p;
+ uint32_t *block_state1 = s1.block_state;
+ uint8_t *buf = s1.buf;
+ uint64_t total_len1 = s1.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+ }
+ uint32_t ite;
+ if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+ }
+ uint32_t n_blocks = (len - ite) / (uint32_t)64U;
+ uint32_t data1_len = n_blocks * (uint32_t)64U;
+ uint32_t data2_len = len - data1_len;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + data1_len;
+ sha256_update_nblocks(data1_len, data1, block_state1);
+ uint8_t *dst = buf;
+ memcpy(dst, data2, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_SHA2_state_sha2_224){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)len
+ }
+ );
+ }
+ else
+ {
+ uint32_t diff = (uint32_t)64U - sz;
+ uint8_t *data1 = data;
+ uint8_t *data2 = data + diff;
+ Hacl_Streaming_SHA2_state_sha2_224 s1 = *p;
+ uint32_t *block_state10 = s1.block_state;
+ uint8_t *buf0 = s1.buf;
+ uint64_t total_len10 = s1.total_len;
+ uint32_t sz10;
+ if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+ {
+ sz10 = (uint32_t)64U;
+ }
+ else
+ {
+ sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf2 = buf0 + sz10;
+ memcpy(buf2, data1, diff * sizeof (uint8_t));
+ uint64_t total_len2 = total_len10 + (uint64_t)diff;
+ *p
+ =
+ (
+ (Hacl_Streaming_SHA2_state_sha2_224){
+ .block_state = block_state10,
+ .buf = buf0,
+ .total_len = total_len2
+ }
+ );
+ Hacl_Streaming_SHA2_state_sha2_224 s10 = *p;
+ uint32_t *block_state1 = s10.block_state;
+ uint8_t *buf = s10.buf;
+ uint64_t total_len1 = s10.total_len;
+ uint32_t sz1;
+ if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+ {
+ sz1 = (uint32_t)64U;
+ }
+ else
+ {
+ sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+ }
+ if (!(sz1 == (uint32_t)0U))
+ {
+ sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+ }
+ uint32_t ite;
+ if
+ (
+ (uint64_t)(len - diff)
+ % (uint64_t)(uint32_t)64U
+ == (uint64_t)0U
+ && (uint64_t)(len - diff) > (uint64_t)0U
+ )
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+ }
+ uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
+ uint32_t data1_len = n_blocks * (uint32_t)64U;
+ uint32_t data2_len = len - diff - data1_len;
+ uint8_t *data11 = data2;
+ uint8_t *data21 = data2 + data1_len;
+ sha256_update_nblocks(data1_len, data11, block_state1);
+ uint8_t *dst = buf;
+ memcpy(dst, data21, data2_len * sizeof (uint8_t));
+ *p
+ =
+ (
+ (Hacl_Streaming_SHA2_state_sha2_224){
+ .block_state = block_state1,
+ .buf = buf,
+ .total_len = total_len1 + (uint64_t)(len - diff)
+ }
+ );
+ }
+ return (uint32_t)0U;
+}
+
+/**
+Feed an arbitrary amount of data into the hash. This function returns 0 for
+success, or 1 if the combined length of all of the data passed to `update_256`
+(since the last call to `init_256`) exceeds 2^61-1 bytes.
+
+This function is identical to the update function for SHA2_224.
+*/
+uint32_t
+Hacl_Streaming_SHA2_update_256(
+ Hacl_Streaming_SHA2_state_sha2_224 *p,
+ uint8_t *input,
+ uint32_t input_len
+)
+{
+ return update_224_256(p, input, input_len);
+}
+
+/**
+Write the resulting hash into `dst`, an array of 32 bytes. The state remains
+valid after a call to `finish_256`, meaning the user may feed more data into
+the hash via `update_256`. (The finish_256 function operates on an internal copy of
+the state and therefore does not invalidate the client-held state `p`.)
+*/
+void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst)
+{
+ Hacl_Streaming_SHA2_state_sha2_224 scrut = *p;
+ uint32_t *block_state = scrut.block_state;
+ uint8_t *buf_ = scrut.buf;
+ uint64_t total_len = scrut.total_len;
+ uint32_t r;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ r = (uint32_t)64U;
+ }
+ else
+ {
+ r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf_1 = buf_;
+ uint32_t tmp_block_state[8U] = { 0U };
+ memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+ uint32_t ite;
+ if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = r % (uint32_t)64U;
+ }
+ uint8_t *buf_last = buf_1 + r - ite;
+ uint8_t *buf_multi = buf_1;
+ sha256_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ uint64_t prev_len_last = total_len - (uint64_t)r;
+ sha256_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state);
+ sha256_finish(tmp_block_state, dst);
+}
+
+/**
+Free a state allocated with `create_in_256`.
+
+This function is identical to the free function for SHA2_224.
+*/
+void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_SHA2_state_sha2_224 *s)
+{
+ Hacl_Streaming_SHA2_state_sha2_224 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ KRML_HOST_FREE(block_state);
+ KRML_HOST_FREE(buf);
+ KRML_HOST_FREE(s);
+}
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+*/
+void Hacl_Streaming_SHA2_sha256(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ uint8_t *ib = input;
+ uint8_t *rb = dst;
+ uint32_t st[8U] = { 0U };
+ sha256_init(st);
+ uint32_t rem = input_len % (uint32_t)64U;
+ uint64_t len_ = (uint64_t)input_len;
+ sha256_update_nblocks(input_len, ib, st);
+ uint32_t rem1 = input_len % (uint32_t)64U;
+ uint8_t *b0 = ib;
+ uint8_t *lb = b0 + input_len - rem1;
+ sha256_update_last(len_, rem, lb, st);
+ sha256_finish(st, rb);
+}
+
+Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_224(void)
+{
+ uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+ uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+ Hacl_Streaming_SHA2_state_sha2_224
+ s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ Hacl_Streaming_SHA2_state_sha2_224
+ *p =
+ (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof (
+ Hacl_Streaming_SHA2_state_sha2_224
+ ));
+ p[0U] = s;
+ sha224_init(block_state);
+ return p;
+}
+
+void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_SHA2_state_sha2_224 *s)
+{
+ Hacl_Streaming_SHA2_state_sha2_224 scrut = *s;
+ uint8_t *buf = scrut.buf;
+ uint32_t *block_state = scrut.block_state;
+ sha224_init(block_state);
+ Hacl_Streaming_SHA2_state_sha2_224
+ tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+ s[0U] = tmp;
+}
+
+uint32_t
+Hacl_Streaming_SHA2_update_224(
+ Hacl_Streaming_SHA2_state_sha2_224 *p,
+ uint8_t *input,
+ uint32_t input_len
+)
+{
+ return update_224_256(p, input, input_len);
+}
+
+/**
+Write the resulting hash into `dst`, an array of 28 bytes. The state remains
+valid after a call to `finish_224`, meaning the user may feed more data into
+the hash via `update_224`.
+*/
+void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst)
+{
+ Hacl_Streaming_SHA2_state_sha2_224 scrut = *p;
+ uint32_t *block_state = scrut.block_state;
+ uint8_t *buf_ = scrut.buf;
+ uint64_t total_len = scrut.total_len;
+ uint32_t r;
+ if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+ {
+ r = (uint32_t)64U;
+ }
+ else
+ {
+ r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+ }
+ uint8_t *buf_1 = buf_;
+ uint32_t tmp_block_state[8U] = { 0U };
+ memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+ uint32_t ite;
+ if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+ {
+ ite = (uint32_t)64U;
+ }
+ else
+ {
+ ite = r % (uint32_t)64U;
+ }
+ uint8_t *buf_last = buf_1 + r - ite;
+ uint8_t *buf_multi = buf_1;
+ sha224_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+ uint64_t prev_len_last = total_len - (uint64_t)r;
+ sha224_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state);
+ sha224_finish(tmp_block_state, dst);
+}
+
+void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_SHA2_state_sha2_224 *p)
+{
+ Hacl_Streaming_SHA2_free_256(p);
+}
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+*/
+void Hacl_Streaming_SHA2_sha224(uint8_t *input, uint32_t input_len, uint8_t *dst)
+{
+ uint8_t *ib = input;
+ uint8_t *rb = dst;
+ uint32_t st[8U] = { 0U };
+ sha224_init(st);
+ uint32_t rem = input_len % (uint32_t)64U;
+ uint64_t len_ = (uint64_t)input_len;
+ sha224_update_nblocks(input_len, ib, st);
+ uint32_t rem1 = input_len % (uint32_t)64U;
+ uint8_t *b0 = ib;
+ uint8_t *lb = b0 + input_len - rem1;
+ sha224_update_last(len_, rem, lb, st);
+ sha224_finish(st, rb);
+}
+
diff --git a/Modules/_hacl/Hacl_Streaming_SHA2.h b/Modules/_hacl/Hacl_Streaming_SHA2.h
new file mode 100644
index 0000000..c83a835
--- /dev/null
+++ b/Modules/_hacl/Hacl_Streaming_SHA2.h
@@ -0,0 +1,136 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Streaming_SHA2_H
+#define __Hacl_Streaming_SHA2_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "python_hacl_namespaces.h"
+#include "krml/FStar_UInt_8_16_32_64.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+
+
+
+typedef struct Hacl_Streaming_SHA2_state_sha2_224_s
+{
+ uint32_t *block_state;
+ uint8_t *buf;
+ uint64_t total_len;
+}
+Hacl_Streaming_SHA2_state_sha2_224;
+
+typedef Hacl_Streaming_SHA2_state_sha2_224 Hacl_Streaming_SHA2_state_sha2_256;
+
+/**
+Allocate initial state for the SHA2_256 hash. The state is to be freed by
+calling `free_256`.
+*/
+Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_256(void);
+
+/**
+Copies the state passed as argument into a newly allocated state (deep copy).
+The state is to be freed by calling `free_256`. Cloning the state this way is
+useful, for instance, if your control-flow diverges and you need to feed
+more (different) data into the hash in each branch.
+*/
+Hacl_Streaming_SHA2_state_sha2_224
+*Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_SHA2_state_sha2_224 *s0);
+
+/**
+Reset an existing state to the initial hash state with empty data.
+*/
+void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_SHA2_state_sha2_224 *s);
+
+/**
+Feed an arbitrary amount of data into the hash. This function returns 0 for
+success, or 1 if the combined length of all of the data passed to `update_256`
+(since the last call to `init_256`) exceeds 2^61-1 bytes.
+
+This function is identical to the update function for SHA2_224.
+*/
+uint32_t
+Hacl_Streaming_SHA2_update_256(
+ Hacl_Streaming_SHA2_state_sha2_224 *p,
+ uint8_t *input,
+ uint32_t input_len
+);
+
+/**
+Write the resulting hash into `dst`, an array of 32 bytes. The state remains
+valid after a call to `finish_256`, meaning the user may feed more data into
+the hash via `update_256`. (The finish_256 function operates on an internal copy of
+the state and therefore does not invalidate the client-held state `p`.)
+*/
+void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst);
+
+/**
+Free a state allocated with `create_in_256`.
+
+This function is identical to the free function for SHA2_224.
+*/
+void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_SHA2_state_sha2_224 *s);
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+*/
+void Hacl_Streaming_SHA2_sha256(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_224(void);
+
+void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_SHA2_state_sha2_224 *s);
+
+uint32_t
+Hacl_Streaming_SHA2_update_224(
+ Hacl_Streaming_SHA2_state_sha2_224 *p,
+ uint8_t *input,
+ uint32_t input_len
+);
+
+/**
+Write the resulting hash into `dst`, an array of 28 bytes. The state remains
+valid after a call to `finish_224`, meaning the user may feed more data into
+the hash via `update_224`.
+*/
+void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst);
+
+void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_SHA2_state_sha2_224 *p);
+
+/**
+Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+*/
+void Hacl_Streaming_SHA2_sha224(uint8_t *input, uint32_t input_len, uint8_t *dst);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Streaming_SHA2_H_DEFINED
+#endif
diff --git a/Modules/_hacl/README.md b/Modules/_hacl/README.md
new file mode 100644
index 0000000..e6a156a
--- /dev/null
+++ b/Modules/_hacl/README.md
@@ -0,0 +1,29 @@
+# Algorithm implementations used by the `hashlib` module.
+
+This code comes from the
+[HACL\*](https://github.com/hacl-star/hacl-star/) project.
+
+HACL\* is a cryptographic library that has been formally verified for memory
+safety, functional correctness, and secret independence.
+
+## Updating HACL*
+
+Use the `refresh.sh` script in this directory to pull in a new upstream code
+version. The upstream git hash used for the most recent code pull is recorded
+in the script. Modify the script as needed to bring in more if changes are
+needed based on upstream code refactoring.
+
+Never manually edit HACL\* files. Always add transformation shell code to the
+`refresh.sh` script to perform any necessary edits. If there are serious code
+changes needed, work with the upstream repository.
+
+## Local files
+
+1. `./include/python_hacl_namespaces.h`
+1. `./README.md`
+1. `./refresh.sh`
+
+## ACKS
+
+* Jonathan Protzenko aka [@msprotz on Github](https://github.com/msprotz)
+contributed our HACL\* based builtin code.
diff --git a/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h b/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h
new file mode 100644
index 0000000..3e2e4b3
--- /dev/null
+++ b/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h
@@ -0,0 +1,109 @@
+/*
+ Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License.
+*/
+
+
+#ifndef __FStar_UInt_8_16_32_64_H
+#define __FStar_UInt_8_16_32_64_H
+
+
+
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include "krml/lowstar_endianness.h"
+#include "krml/FStar_UInt_8_16_32_64.h"
+#include "krml/internal/target.h"
+static inline uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
+{
+ uint64_t x = a ^ b;
+ uint64_t minus_x = ~x + (uint64_t)1U;
+ uint64_t x_or_minus_x = x | minus_x;
+ uint64_t xnx = x_or_minus_x >> (uint32_t)63U;
+ return xnx - (uint64_t)1U;
+}
+
+static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
+{
+ uint64_t x = a;
+ uint64_t y = b;
+ uint64_t x_xor_y = x ^ y;
+ uint64_t x_sub_y = x - y;
+ uint64_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint64_t q = x_xor_y | x_sub_y_xor_y;
+ uint64_t x_xor_q = x ^ q;
+ uint64_t x_xor_q_ = x_xor_q >> (uint32_t)63U;
+ return x_xor_q_ - (uint64_t)1U;
+}
+
+static inline uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
+{
+ uint32_t x = a ^ b;
+ uint32_t minus_x = ~x + (uint32_t)1U;
+ uint32_t x_or_minus_x = x | minus_x;
+ uint32_t xnx = x_or_minus_x >> (uint32_t)31U;
+ return xnx - (uint32_t)1U;
+}
+
+static inline uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
+{
+ uint32_t x = a;
+ uint32_t y = b;
+ uint32_t x_xor_y = x ^ y;
+ uint32_t x_sub_y = x - y;
+ uint32_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint32_t q = x_xor_y | x_sub_y_xor_y;
+ uint32_t x_xor_q = x ^ q;
+ uint32_t x_xor_q_ = x_xor_q >> (uint32_t)31U;
+ return x_xor_q_ - (uint32_t)1U;
+}
+
+static inline uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
+{
+ uint16_t x = a ^ b;
+ uint16_t minus_x = ~x + (uint16_t)1U;
+ uint16_t x_or_minus_x = x | minus_x;
+ uint16_t xnx = x_or_minus_x >> (uint32_t)15U;
+ return xnx - (uint16_t)1U;
+}
+
+static inline uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
+{
+ uint16_t x = a;
+ uint16_t y = b;
+ uint16_t x_xor_y = x ^ y;
+ uint16_t x_sub_y = x - y;
+ uint16_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint16_t q = x_xor_y | x_sub_y_xor_y;
+ uint16_t x_xor_q = x ^ q;
+ uint16_t x_xor_q_ = x_xor_q >> (uint32_t)15U;
+ return x_xor_q_ - (uint16_t)1U;
+}
+
+static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
+{
+ uint8_t x = a ^ b;
+ uint8_t minus_x = ~x + (uint8_t)1U;
+ uint8_t x_or_minus_x = x | minus_x;
+ uint8_t xnx = x_or_minus_x >> (uint32_t)7U;
+ return xnx - (uint8_t)1U;
+}
+
+static inline uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
+{
+ uint8_t x = a;
+ uint8_t y = b;
+ uint8_t x_xor_y = x ^ y;
+ uint8_t x_sub_y = x - y;
+ uint8_t x_sub_y_xor_y = x_sub_y ^ y;
+ uint8_t q = x_xor_y | x_sub_y_xor_y;
+ uint8_t x_xor_q = x ^ q;
+ uint8_t x_xor_q_ = x_xor_q >> (uint32_t)7U;
+ return x_xor_q_ - (uint8_t)1U;
+}
+
+
+#define __FStar_UInt_8_16_32_64_H_DEFINED
+#endif
diff --git a/Modules/_hacl/include/krml/internal/target.h b/Modules/_hacl/include/krml/internal/target.h
new file mode 100644
index 0000000..9ef5985
--- /dev/null
+++ b/Modules/_hacl/include/krml/internal/target.h
@@ -0,0 +1,218 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License. */
+
+#ifndef __KRML_TARGET_H
+#define __KRML_TARGET_H
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <assert.h>
+
+/* Since KaRaMeL emits the inline keyword unconditionally, we follow the
+ * guidelines at https://gcc.gnu.org/onlinedocs/gcc/Inline.html and make this
+ * __inline__ to ensure the code compiles with -std=c90 and earlier. */
+#ifdef __GNUC__
+# define inline __inline__
+#endif
+
+#ifndef KRML_HOST_MALLOC
+# define KRML_HOST_MALLOC malloc
+#endif
+
+#ifndef KRML_HOST_CALLOC
+# define KRML_HOST_CALLOC calloc
+#endif
+
+#ifndef KRML_HOST_FREE
+# define KRML_HOST_FREE free
+#endif
+
+/* Macros for prettier unrolling of loops */
+#define KRML_LOOP1(i, n, x) { \
+ x \
+ i += n; \
+}
+
+#define KRML_LOOP2(i, n, x) \
+ KRML_LOOP1(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP3(i, n, x) \
+ KRML_LOOP2(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP4(i, n, x) \
+ KRML_LOOP2(i, n, x) \
+ KRML_LOOP2(i, n, x)
+
+#define KRML_LOOP5(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP6(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP2(i, n, x)
+
+#define KRML_LOOP7(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP3(i, n, x)
+
+#define KRML_LOOP8(i, n, x) \
+ KRML_LOOP4(i, n, x) \
+ KRML_LOOP4(i, n, x)
+
+#define KRML_LOOP9(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP1(i, n, x)
+
+#define KRML_LOOP10(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP2(i, n, x)
+
+#define KRML_LOOP11(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP3(i, n, x)
+
+#define KRML_LOOP12(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP4(i, n, x)
+
+#define KRML_LOOP13(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP5(i, n, x)
+
+#define KRML_LOOP14(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP6(i, n, x)
+
+#define KRML_LOOP15(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP7(i, n, x)
+
+#define KRML_LOOP16(i, n, x) \
+ KRML_LOOP8(i, n, x) \
+ KRML_LOOP8(i, n, x)
+
+#define KRML_UNROLL_FOR(i, z, n, k, x) do { \
+ uint32_t i = z; \
+ KRML_LOOP##n(i, k, x) \
+} while (0)
+
+#define KRML_ACTUAL_FOR(i, z, n, k, x) \
+ do { \
+ for (uint32_t i = z; i < n; i += k) { \
+ x \
+ } \
+ } while (0)
+
+#ifndef KRML_UNROLL_MAX
+#define KRML_UNROLL_MAX 16
+#endif
+
+/* 1 is the number of loop iterations, i.e. (n - z)/k as evaluated by krml */
+#if 0 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR0(i, z, n, k, x)
+#else
+#define KRML_MAYBE_FOR0(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 1 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 1, k, x)
+#else
+#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 2 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 2, k, x)
+#else
+#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 3 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 3, k, x)
+#else
+#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 4 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 4, k, x)
+#else
+#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 5 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 5, k, x)
+#else
+#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 6 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 6, k, x)
+#else
+#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 7 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 7, k, x)
+#else
+#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 8 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 8, k, x)
+#else
+#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 9 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 9, k, x)
+#else
+#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 10 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 10, k, x)
+#else
+#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 11 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 11, k, x)
+#else
+#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 12 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 12, k, x)
+#else
+#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 13 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 13, k, x)
+#else
+#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 14 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 14, k, x)
+#else
+#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 15 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 15, k, x)
+#else
+#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+
+#if 16 <= KRML_UNROLL_MAX
+#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 16, k, x)
+#else
+#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x)
+#endif
+#endif
diff --git a/Modules/_hacl/include/krml/lowstar_endianness.h b/Modules/_hacl/include/krml/lowstar_endianness.h
new file mode 100644
index 0000000..32a7391
--- /dev/null
+++ b/Modules/_hacl/include/krml/lowstar_endianness.h
@@ -0,0 +1,230 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+ Licensed under the Apache 2.0 License. */
+
+#ifndef __LOWSTAR_ENDIANNESS_H
+#define __LOWSTAR_ENDIANNESS_H
+
+#include <string.h>
+#include <inttypes.h>
+
+/******************************************************************************/
+/* Implementing C.fst (part 2: endian-ness macros) */
+/******************************************************************************/
+
+/* ... for Linux */
+#if defined(__linux__) || defined(__CYGWIN__) || defined (__USE_SYSTEM_ENDIAN_H__) || defined(__GLIBC__)
+# include <endian.h>
+
+/* ... for OSX */
+#elif defined(__APPLE__)
+# include <libkern/OSByteOrder.h>
+# define htole64(x) OSSwapHostToLittleInt64(x)
+# define le64toh(x) OSSwapLittleToHostInt64(x)
+# define htobe64(x) OSSwapHostToBigInt64(x)
+# define be64toh(x) OSSwapBigToHostInt64(x)
+
+# define htole16(x) OSSwapHostToLittleInt16(x)
+# define le16toh(x) OSSwapLittleToHostInt16(x)
+# define htobe16(x) OSSwapHostToBigInt16(x)
+# define be16toh(x) OSSwapBigToHostInt16(x)
+
+# define htole32(x) OSSwapHostToLittleInt32(x)
+# define le32toh(x) OSSwapLittleToHostInt32(x)
+# define htobe32(x) OSSwapHostToBigInt32(x)
+# define be32toh(x) OSSwapBigToHostInt32(x)
+
+/* ... for Solaris */
+#elif defined(__sun__)
+# include <sys/byteorder.h>
+# define htole64(x) LE_64(x)
+# define le64toh(x) LE_64(x)
+# define htobe64(x) BE_64(x)
+# define be64toh(x) BE_64(x)
+
+# define htole16(x) LE_16(x)
+# define le16toh(x) LE_16(x)
+# define htobe16(x) BE_16(x)
+# define be16toh(x) BE_16(x)
+
+# define htole32(x) LE_32(x)
+# define le32toh(x) LE_32(x)
+# define htobe32(x) BE_32(x)
+# define be32toh(x) BE_32(x)
+
+/* ... for the BSDs */
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
+# include <sys/endian.h>
+#elif defined(__OpenBSD__)
+# include <endian.h>
+
+/* ... for Windows (MSVC)... not targeting XBOX 360! */
+#elif defined(_MSC_VER)
+
+# include <stdlib.h>
+# define htobe16(x) _byteswap_ushort(x)
+# define htole16(x) (x)
+# define be16toh(x) _byteswap_ushort(x)
+# define le16toh(x) (x)
+
+# define htobe32(x) _byteswap_ulong(x)
+# define htole32(x) (x)
+# define be32toh(x) _byteswap_ulong(x)
+# define le32toh(x) (x)
+
+# define htobe64(x) _byteswap_uint64(x)
+# define htole64(x) (x)
+# define be64toh(x) _byteswap_uint64(x)
+# define le64toh(x) (x)
+
+/* ... for Windows (GCC-like, e.g. mingw or clang) */
+#elif (defined(_WIN32) || defined(_WIN64)) && \
+ (defined(__GNUC__) || defined(__clang__))
+
+# define htobe16(x) __builtin_bswap16(x)
+# define htole16(x) (x)
+# define be16toh(x) __builtin_bswap16(x)
+# define le16toh(x) (x)
+
+# define htobe32(x) __builtin_bswap32(x)
+# define htole32(x) (x)
+# define be32toh(x) __builtin_bswap32(x)
+# define le32toh(x) (x)
+
+# define htobe64(x) __builtin_bswap64(x)
+# define htole64(x) (x)
+# define be64toh(x) __builtin_bswap64(x)
+# define le64toh(x) (x)
+
+/* ... generic big-endian fallback code */
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+/* byte swapping code inspired by:
+ * https://github.com/rweather/arduinolibs/blob/master/libraries/Crypto/utility/EndianUtil.h
+ * */
+
+# define htobe32(x) (x)
+# define be32toh(x) (x)
+# define htole32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+# define le32toh(x) (htole32((x)))
+
+# define htobe64(x) (x)
+# define be64toh(x) (x)
+# define htole64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+# define le64toh(x) (htole64((x)))
+
+/* ... generic little-endian fallback code */
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+# define htole32(x) (x)
+# define le32toh(x) (x)
+# define htobe32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+# define be32toh(x) (htobe32((x)))
+
+# define htole64(x) (x)
+# define le64toh(x) (x)
+# define htobe64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+# define be64toh(x) (htobe64((x)))
+
+/* ... couldn't determine endian-ness of the target platform */
+#else
+# error "Please define __BYTE_ORDER__!"
+
+#endif /* defined(__linux__) || ... */
+
+/* Loads and stores. These avoid undefined behavior due to unaligned memory
+ * accesses, via memcpy. */
+
+inline static uint16_t load16(uint8_t *b) {
+ uint16_t x;
+ memcpy(&x, b, 2);
+ return x;
+}
+
+inline static uint32_t load32(uint8_t *b) {
+ uint32_t x;
+ memcpy(&x, b, 4);
+ return x;
+}
+
+inline static uint64_t load64(uint8_t *b) {
+ uint64_t x;
+ memcpy(&x, b, 8);
+ return x;
+}
+
+inline static void store16(uint8_t *b, uint16_t i) {
+ memcpy(b, &i, 2);
+}
+
+inline static void store32(uint8_t *b, uint32_t i) {
+ memcpy(b, &i, 4);
+}
+
+inline static void store64(uint8_t *b, uint64_t i) {
+ memcpy(b, &i, 8);
+}
+
+/* Legacy accessors so that this header can serve as an implementation of
+ * C.Endianness */
+#define load16_le(b) (le16toh(load16(b)))
+#define store16_le(b, i) (store16(b, htole16(i)))
+#define load16_be(b) (be16toh(load16(b)))
+#define store16_be(b, i) (store16(b, htobe16(i)))
+
+#define load32_le(b) (le32toh(load32(b)))
+#define store32_le(b, i) (store32(b, htole32(i)))
+#define load32_be(b) (be32toh(load32(b)))
+#define store32_be(b, i) (store32(b, htobe32(i)))
+
+#define load64_le(b) (le64toh(load64(b)))
+#define store64_le(b, i) (store64(b, htole64(i)))
+#define load64_be(b) (be64toh(load64(b)))
+#define store64_be(b, i) (store64(b, htobe64(i)))
+
+/* Co-existence of LowStar.Endianness and FStar.Endianness generates name
+ * conflicts, because of course both insist on having no prefixes. Until a
+ * prefix is added, or until we truly retire FStar.Endianness, solve this issue
+ * in an elegant way. */
+#define load16_le0 load16_le
+#define store16_le0 store16_le
+#define load16_be0 load16_be
+#define store16_be0 store16_be
+
+#define load32_le0 load32_le
+#define store32_le0 store32_le
+#define load32_be0 load32_be
+#define store32_be0 store32_be
+
+#define load64_le0 load64_le
+#define store64_le0 store64_le
+#define load64_be0 load64_be
+#define store64_be0 store64_be
+
+#define load128_le0 load128_le
+#define store128_le0 store128_le
+#define load128_be0 load128_be
+#define store128_be0 store128_be
+
+#endif
diff --git a/Modules/_hacl/include/python_hacl_namespaces.h b/Modules/_hacl/include/python_hacl_namespaces.h
new file mode 100644
index 0000000..af39045
--- /dev/null
+++ b/Modules/_hacl/include/python_hacl_namespaces.h
@@ -0,0 +1,28 @@
+#ifndef _PYTHON_HACL_NAMESPACES_H
+#define _PYTHON_HACL_NAMESPACES_H
+
+/*
+ * C's excuse for namespaces: Use globally unique names to avoid linkage
+ * conflicts with builds linking or dynamically loading other code potentially
+ * using HACL* libraries.
+ */
+
+#define Hacl_Streaming_SHA2_state_sha2_224_s python_hashlib_Hacl_Streaming_SHA2_state_sha2_224_s
+#define Hacl_Streaming_SHA2_state_sha2_224 python_hashlib_Hacl_Streaming_SHA2_state_sha2_224
+#define Hacl_Streaming_SHA2_state_sha2_256 python_hashlib_Hacl_Streaming_SHA2_state_sha2_256
+#define Hacl_Streaming_SHA2_create_in_256 python_hashlib_Hacl_Streaming_SHA2_create_in_256
+#define Hacl_Streaming_SHA2_create_in_224 python_hashlib_Hacl_Streaming_SHA2_create_in_224
+#define Hacl_Streaming_SHA2_copy_256 python_hashlib_Hacl_Streaming_SHA2_copy_256
+#define Hacl_Streaming_SHA2_copy_224 python_hashlib_Hacl_Streaming_SHA2_copy_224
+#define Hacl_Streaming_SHA2_init_256 python_hashlib_Hacl_Streaming_SHA2_init_256
+#define Hacl_Streaming_SHA2_init_224 python_hashlib_Hacl_Streaming_SHA2_init_224
+#define Hacl_Streaming_SHA2_update_256 python_hashlib_Hacl_Streaming_SHA2_update_256
+#define Hacl_Streaming_SHA2_update_224 python_hashlib_Hacl_Streaming_SHA2_update_224
+#define Hacl_Streaming_SHA2_finish_256 python_hashlib_Hacl_Streaming_SHA2_finish_256
+#define Hacl_Streaming_SHA2_finish_224 python_hashlib_Hacl_Streaming_SHA2_finish_224
+#define Hacl_Streaming_SHA2_free_256 python_hashlib_Hacl_Streaming_SHA2_free_256
+#define Hacl_Streaming_SHA2_free_224 python_hashlib_Hacl_Streaming_SHA2_free_224
+#define Hacl_Streaming_SHA2_sha256 python_hashlib_Hacl_Streaming_SHA2_sha256
+#define Hacl_Streaming_SHA2_sha224 python_hashlib_Hacl_Streaming_SHA2_sha224
+
+#endif // _PYTHON_HACL_NAMESPACES_H
diff --git a/Modules/_hacl/internal/Hacl_SHA2_Generic.h b/Modules/_hacl/internal/Hacl_SHA2_Generic.h
new file mode 100644
index 0000000..23f7cea
--- /dev/null
+++ b/Modules/_hacl/internal/Hacl_SHA2_Generic.h
@@ -0,0 +1,135 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __internal_Hacl_SHA2_Generic_H
+#define __internal_Hacl_SHA2_Generic_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/FStar_UInt_8_16_32_64.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+
+
+
+static const
+uint32_t
+Hacl_Impl_SHA2_Generic_h224[8U] =
+ {
+ (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U,
+ (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U
+ };
+
+static const
+uint32_t
+Hacl_Impl_SHA2_Generic_h256[8U] =
+ {
+ (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU,
+ (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U
+ };
+
+static const
+uint64_t
+Hacl_Impl_SHA2_Generic_h384[8U] =
+ {
+ (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U,
+ (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U,
+ (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U
+ };
+
+static const
+uint64_t
+Hacl_Impl_SHA2_Generic_h512[8U] =
+ {
+ (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU,
+ (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU,
+ (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U
+ };
+
+static const
+uint32_t
+Hacl_Impl_SHA2_Generic_k224_256[64U] =
+ {
+ (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
+ (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
+ (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
+ (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
+ (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
+ (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
+ (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
+ (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
+ (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
+ (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
+ (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
+ (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
+ (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
+ (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
+ (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
+ (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+ };
+
+static const
+uint64_t
+Hacl_Impl_SHA2_Generic_k384_512[80U] =
+ {
+ (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU,
+ (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U,
+ (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U,
+ (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U,
+ (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U,
+ (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U,
+ (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U,
+ (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U,
+ (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU,
+ (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U,
+ (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU,
+ (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU,
+ (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U,
+ (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U,
+ (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U,
+ (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U,
+ (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U,
+ (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU,
+ (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU,
+ (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU,
+ (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U,
+ (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U,
+ (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU,
+ (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU,
+ (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU,
+ (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU,
+ (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U
+ };
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __internal_Hacl_SHA2_Generic_H_DEFINED
+#endif
diff --git a/Modules/_hacl/refresh.sh b/Modules/_hacl/refresh.sh
new file mode 100755
index 0000000..5948738
--- /dev/null
+++ b/Modules/_hacl/refresh.sh
@@ -0,0 +1,132 @@
+#!/usr/bin/env bash
+#
+# Use this script to update the HACL generated hash algorithm implementation
+# code from a local checkout of the upstream hacl-star repository.
+#
+
+set -e
+set -o pipefail
+
+if [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
+ echo "A bash version >= 4 required. Got: $BASH_VERSION" >&2
+ exit 1
+fi
+
+if [[ $1 == "" ]]; then
+ echo "Usage: $0 path-to-hacl-directory"
+ echo ""
+ echo " path-to-hacl-directory should be a local git checkout of a"
+ echo " https://github.com/hacl-star/hacl-star/ repo."
+ exit 1
+fi
+
+# Update this when updating to a new version after verifying that the changes
+# the update brings in are good.
+expected_hacl_star_rev=94aabbb4cf71347d3779a8db486c761403c6d036
+
+hacl_dir="$(realpath "$1")"
+cd "$(dirname "$0")"
+actual_rev=$(cd "$hacl_dir" && git rev-parse HEAD)
+
+if [[ "$actual_rev" != "$expected_hacl_star_rev" ]]; then
+ echo "WARNING: HACL* in '$hacl_dir' is at revision:" >&2
+ echo " $actual_rev" >&2
+ echo "but expected revision:" >&2
+ echo " $expected_hacl_star_rev" >&2
+ echo "Edit the expected rev if the changes pulled in are what you want."
+fi
+
+# Step 1: copy files
+
+declare -a dist_files
+dist_files=(
+ Hacl_Streaming_SHA2.h
+ internal/Hacl_SHA2_Generic.h
+ Hacl_Streaming_SHA2.c
+)
+
+declare -a include_files
+include_files=(
+ include/krml/lowstar_endianness.h
+ include/krml/internal/target.h
+)
+
+declare -a lib_files
+lib_files=(
+ krmllib/dist/minimal/FStar_UInt_8_16_32_64.h
+)
+
+# C files for the algorithms themselves: current directory
+(cd "$hacl_dir/dist/gcc-compatible" && tar cf - "${dist_files[@]}") | tar xf -
+
+# Support header files (e.g. endianness macros): stays in include/
+(cd "$hacl_dir/dist/karamel" && tar cf - "${include_files[@]}") | tar xf -
+
+# Special treatment: we don't bother with an extra directory and move krmllib
+# files to the same include directory
+for f in "${lib_files[@]}"; do
+ cp "$hacl_dir/dist/karamel/$f" include/krml/
+done
+
+# Step 2: some in-place modifications to keep things simple and minimal
+
+# This is basic, but refreshes of the vendored HACL code are infrequent, so
+# let's not over-engineer this.
+if [[ $(uname) == "Darwin" ]]; then
+ # You're already running with homebrew or macports to satisfy the
+ # bash>=4 requirement, so requiring GNU sed is entirely reasonable.
+ sed=gsed
+else
+ sed=sed
+fi
+
+readarray -t all_files < <(find . -name '*.h' -or -name '*.c')
+
+# types.h is a simple wrapper that defines the uint128 type then proceeds to
+# include FStar_UInt_8_16_32_64.h; we jump the types.h step since our current
+# selection of algorithms does not necessitate the use of uint128
+$sed -i 's!#include.*types.h"!#include "krml/FStar_UInt_8_16_32_64.h"!g' "${all_files[@]}"
+$sed -i 's!#include.*compat.h"!!g' "${all_files[@]}"
+
+# FStar_UInt_8_16_32_64 contains definitions useful in the general case, but not
+# for us; trim!
+$sed -i -z 's!\(extern\|typedef\)[^;]*;\n\n!!g' include/krml/FStar_UInt_8_16_32_64.h
+
+# This contains static inline prototypes that are defined in
+# FStar_UInt_8_16_32_64; they are by default repeated for safety of separate
+# compilation, but this is not necessary.
+$sed -i 's!#include.*Hacl_Krmllib.h"!!g' "${all_files[@]}"
+
+# This header is useful for *other* algorithms that refer to SHA2, e.g. Ed25519
+# which needs to compute a digest of a message before signing it. Here, since no
+# other algorithm builds upon SHA2, this internal header is useless (and is not
+# included in $dist_files).
+$sed -i 's!#include.*internal/Hacl_Streaming_SHA2.h"!#include "Hacl_Streaming_SHA2.h"!g' "${all_files[@]}"
+
+# The SHA2 file contains all variants of SHA2. We strip 384 and 512 for the time
+# being, to be included later.
+# This regexp matches a separator (two new lines), followed by:
+#
+# <non-empty-line>*
+# ... 384 or 512 ... {
+# <indented-line>*
+# }
+#
+# The first non-empty lines are the comment block. The second ... may spill over
+# the next following lines if the arguments are printed in one-per-line mode.
+$sed -i -z 's/\n\n\([^\n]\+\n\)*[^\n]*\(384\|512\)[^{]*{\n\?\( [^\n]*\n\)*}//g' Hacl_Streaming_SHA2.c
+
+# Same thing with function prototypes
+$sed -i -z 's/\n\n\([^\n]\+\n\)*[^\n]*\(384\|512\)[^;]*;//g' Hacl_Streaming_SHA2.h
+
+# Use globally unique names for the Hacl_ C APIs to avoid linkage conflicts.
+$sed -i -z 's!#include <string.h>\n!#include <string.h>\n#include "python_hacl_namespaces.h"\n!' Hacl_Streaming_SHA2.h
+
+# Finally, we remove a bunch of ifdefs from target.h that are, again, useful in
+# the general case, but not exercised by the subset of HACL* that we vendor.
+$sed -z -i 's!#ifndef KRML_\(HOST_PRINTF\|HOST_EXIT\|PRE_ALIGN\|POST_ALIGN\|ALIGNED_MALLOC\|ALIGNED_FREE\|HOST_TIME\)\n\(\n\|# [^\n]*\n\|[^#][^\n]*\n\)*#endif\n\n!!g' include/krml/internal/target.h
+$sed -z -i 's!\n\n\([^#][^\n]*\n\)*#define KRML_\(EABORT\|EXIT\|CHECK_SIZE\)[^\n]*\(\n [^\n]*\)*!!g' include/krml/internal/target.h
+$sed -z -i 's!\n\n\([^#][^\n]*\n\)*#if [^\n]*\n\( [^\n]*\n\)*#define KRML_\(EABORT\|EXIT\|CHECK_SIZE\)[^\n]*\(\n [^\n]*\)*!!g' include/krml/internal/target.h
+$sed -z -i 's!\n\n\([^#][^\n]*\n\)*#if [^\n]*\n\( [^\n]*\n\)*# define _\?KRML_\(DEPRECATED\|CHECK_SIZE_PRAGMA\|HOST_EPRINTF\|HOST_SNPRINTF\)[^\n]*\n\([^#][^\n]*\n\|#el[^\n]*\n\|# [^\n]*\n\)*#endif!!g' include/krml/internal/target.h
+
+echo "Updated; verify all is okay using git diff and git status."