summaryrefslogtreecommitdiffstats
path: root/Python/pyhash.c
blob: 97cb54759b6183ead7ee41935f21db2899987e7d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
/* Set of hash utility functions to help maintaining the invariant that
    if a==b then hash(a)==hash(b)

   All the utility functions (_Py_Hash*()) return "-1" to signify an error.
*/
#include "Python.h"

#ifdef __APPLE__
#  include <libkern/OSByteOrder.h>
#elif defined(HAVE_LE64TOH) && defined(HAVE_ENDIAN_H)
#  include <endian.h>
#elif defined(HAVE_LE64TOH) && defined(HAVE_SYS_ENDIAN_H)
#  include <sys/endian.h>
#endif

#ifdef __cplusplus
extern "C" {
#endif

_Py_HashSecret_t _Py_HashSecret;

#if Py_HASH_ALGORITHM == Py_HASH_EXTERNAL
extern PyHash_FuncDef PyHash_Func;
#else
static PyHash_FuncDef PyHash_Func;
#endif

/* Count _Py_HashBytes() calls */
#ifdef Py_HASH_STATS
#define Py_HASH_STATS_MAX 32
static Py_ssize_t hashstats[Py_HASH_STATS_MAX + 1] = {0};
#endif

/* For numeric types, the hash of a number x is based on the reduction
   of x modulo the prime P = 2**_PyHASH_BITS - 1.  It's designed so that
   hash(x) == hash(y) whenever x and y are numerically equal, even if
   x and y have different types.

   A quick summary of the hashing strategy:

   (1) First define the 'reduction of x modulo P' for any rational
   number x; this is a standard extension of the usual notion of
   reduction modulo P for integers.  If x == p/q (written in lowest
   terms), the reduction is interpreted as the reduction of p times
   the inverse of the reduction of q, all modulo P; if q is exactly
   divisible by P then define the reduction to be infinity.  So we've
   got a well-defined map

      reduce : { rational numbers } -> { 0, 1, 2, ..., P-1, infinity }.

   (2) Now for a rational number x, define hash(x) by:

      reduce(x)   if x >= 0
      -reduce(-x) if x < 0

   If the result of the reduction is infinity (this is impossible for
   integers, floats and Decimals) then use the predefined hash value
   _PyHASH_INF for x >= 0, or -_PyHASH_INF for x < 0, instead.
   _PyHASH_INF, -_PyHASH_INF and _PyHASH_NAN are also used for the
   hashes of float and Decimal infinities and nans.

   A selling point for the above strategy is that it makes it possible
   to compute hashes of decimal and binary floating-point numbers
   efficiently, even if the exponent of the binary or decimal number
   is large.  The key point is that

      reduce(x * y) == reduce(x) * reduce(y) (modulo _PyHASH_MODULUS)

   provided that {reduce(x), reduce(y)} != {0, infinity}.  The reduction of a
   binary or decimal float is never infinity, since the denominator is a power
   of 2 (for binary) or a divisor of a power of 10 (for decimal).  So we have,
   for nonnegative x,

      reduce(x * 2**e) == reduce(x) * reduce(2**e) % _PyHASH_MODULUS

      reduce(x * 10**e) == reduce(x) * reduce(10**e) % _PyHASH_MODULUS

   and reduce(10**e) can be computed efficiently by the usual modular
   exponentiation algorithm.  For reduce(2**e) it's even better: since
   P is of the form 2**n-1, reduce(2**e) is 2**(e mod n), and multiplication
   by 2**(e mod n) modulo 2**n-1 just amounts to a rotation of bits.

   */

Py_hash_t
_Py_HashDouble(double v)
{
    int e, sign;
    double m;
    Py_uhash_t x, y;

    if (!Py_IS_FINITE(v)) {
        if (Py_IS_INFINITY(v))
            return v > 0 ? _PyHASH_INF : -_PyHASH_INF;
        else
            return _PyHASH_NAN;
    }

    m = frexp(v, &e);

    sign = 1;
    if (m < 0) {
        sign = -1;
        m = -m;
    }

    /* process 28 bits at a time;  this should work well both for binary
       and hexadecimal floating point. */
    x = 0;
    while (m) {
        x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28);
        m *= 268435456.0;  /* 2**28 */
        e -= 28;
        y = (Py_uhash_t)m;  /* pull out integer part */
        m -= y;
        x += y;
        if (x >= _PyHASH_MODULUS)
            x -= _PyHASH_MODULUS;
    }

    /* adjust for the exponent;  first reduce it modulo _PyHASH_BITS */
    e = e >= 0 ? e % _PyHASH_BITS : _PyHASH_BITS-1-((-1-e) % _PyHASH_BITS);
    x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e);

    x = x * sign;
    if (x == (Py_uhash_t)-1)
        x = (Py_uhash_t)-2;
    return (Py_hash_t)x;
}

Py_hash_t
_Py_HashPointer(void *p)
{
    Py_hash_t x;
    size_t y = (size_t)p;
    /* bottom 3 or 4 bits are likely to be 0; rotate y by 4 to avoid
       excessive hash collisions for dicts and sets */
    y = (y >> 4) | (y << (8 * SIZEOF_VOID_P - 4));
    x = (Py_hash_t)y;
    if (x == -1)
        x = -2;
    return x;
}

Py_hash_t
_Py_HashBytes(const void *src, Py_ssize_t len)
{
    Py_hash_t x;
    /*
      We make the hash of the empty string be 0, rather than using
      (prefix ^ suffix), since this slightly obfuscates the hash secret
    */
    if (len == 0) {
        return 0;
    }

#ifdef Py_HASH_STATS
    hashstats[(len <= Py_HASH_STATS_MAX) ? len : 0]++;
#endif

#if Py_HASH_CUTOFF > 0
    if (len < Py_HASH_CUTOFF) {
        /* Optimize hashing of very small strings with inline DJBX33A. */
        Py_uhash_t hash;
        const unsigned char *p = src;
        hash = 5381; /* DJBX33A starts with 5381 */

        switch(len) {
            /* ((hash << 5) + hash) + *p == hash * 33 + *p */
            case 7: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
            case 6: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
            case 5: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
            case 4: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
            case 3: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
            case 2: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
            case 1: hash = ((hash << 5) + hash) + *p++; break;
            default:
                assert(0);
        }
        hash ^= len;
        hash ^= (Py_uhash_t) _Py_HashSecret.djbx33a.suffix;
        x = (Py_hash_t)hash;
    }
    else
#endif /* Py_HASH_CUTOFF */
        x = PyHash_Func.hash(src, len);

    if (x == -1)
        return -2;
    return x;
}

void
_PyHash_Fini(void)
{
#ifdef Py_HASH_STATS
    int i;
    Py_ssize_t total = 0;
    char *fmt = "%2i %8" PY_FORMAT_SIZE_T "d %8" PY_FORMAT_SIZE_T "d\n";

    fprintf(stderr, "len   calls    total\n");
    for (i = 1; i <= Py_HASH_STATS_MAX; i++) {
        total += hashstats[i];
        fprintf(stderr, fmt, i, hashstats[i], total);
    }
    total += hashstats[0];
    fprintf(stderr, ">  %8" PY_FORMAT_SIZE_T "d %8" PY_FORMAT_SIZE_T "d\n",
            hashstats[0], total);
#endif
}

PyHash_FuncDef *
PyHash_GetFuncDef(void)
{
    return &PyHash_Func;
}

/* Optimized memcpy() for Windows */
#ifdef _MSC_VER
#  if SIZEOF_PY_UHASH_T == 4
#    define PY_UHASH_CPY(dst, src) do {                                    \
       dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; \
       } while(0)
#  elif SIZEOF_PY_UHASH_T == 8
#    define PY_UHASH_CPY(dst, src) do {                                    \
       dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; \
       dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; \
       } while(0)
#  else
#    error SIZEOF_PY_UHASH_T must be 4 or 8
#  endif /* SIZEOF_PY_UHASH_T */
#else /* not Windows */
#  define PY_UHASH_CPY(dst, src) memcpy(dst, src, SIZEOF_PY_UHASH_T)
#endif /* _MSC_VER */


#if Py_HASH_ALGORITHM == Py_HASH_FNV
/* **************************************************************************
 * Modified Fowler-Noll-Vo (FNV) hash function
 */
static Py_hash_t
fnv(const void *src, Py_ssize_t len)
{
    const unsigned char *p = src;
    Py_uhash_t x;
    Py_ssize_t remainder, blocks;
    union {
        Py_uhash_t value;
        unsigned char bytes[SIZEOF_PY_UHASH_T];
    } block;

#ifdef Py_DEBUG
    assert(_Py_HashSecret_Initialized);
#endif
    remainder = len % SIZEOF_PY_UHASH_T;
    if (remainder == 0) {
        /* Process at least one block byte by byte to reduce hash collisions
         * for strings with common prefixes. */
        remainder = SIZEOF_PY_UHASH_T;
    }
    blocks = (len - remainder) / SIZEOF_PY_UHASH_T;

    x = (Py_uhash_t) _Py_HashSecret.fnv.prefix;
    x ^= (Py_uhash_t) *p << 7;
    while (blocks--) {
        PY_UHASH_CPY(block.bytes, p);
        x = (_PyHASH_MULTIPLIER * x) ^ block.value;
        p += SIZEOF_PY_UHASH_T;
    }
    /* add remainder */
    for (; remainder > 0; remainder--)
        x = (_PyHASH_MULTIPLIER * x) ^ (Py_uhash_t) *p++;
    x ^= (Py_uhash_t) len;
    x ^= (Py_uhash_t) _Py_HashSecret.fnv.suffix;
    if (x == -1) {
        x = -2;
    }
    return x;
}

static PyHash_FuncDef PyHash_Func = {fnv, "fnv", 8 * SIZEOF_PY_HASH_T,
                                     16 * SIZEOF_PY_HASH_T};

#endif /* Py_HASH_ALGORITHM == Py_HASH_FNV */


#if Py_HASH_ALGORITHM == Py_HASH_SIPHASH24
/* **************************************************************************
 <MIT License>
 Copyright (c) 2013  Marek Majkowski <marek@popcount.org>

 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
 in the Software without restriction, including without limitation the rights
 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 copies of the Software, and to permit persons to whom the Software is
 furnished to do so, subject to the following conditions:

 The above copyright notice and this permission notice shall be included in
 all copies or substantial portions of the Software.

 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 THE SOFTWARE.
 </MIT License>

 Original location:
    https://github.com/majek/csiphash/

 Solution inspired by code from:
    Samuel Neves (supercop/crypto_auth/siphash24/little)
    djb (supercop/crypto_auth/siphash24/little2)
    Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)

 Modified for Python by Christian Heimes:
    - C89 / MSVC compatibility
    - PY_UINT64_T, PY_UINT32_T and PY_UINT8_T
    - _rotl64() on Windows
    - letoh64() fallback
*/

typedef unsigned char PY_UINT8_T;

/* byte swap little endian to host endian
 * Endian conversion not only ensures that the hash function returns the same
 * value on all platforms. It is also required to for a good dispersion of
 * the hash values' least significant bits.
 */
#if PY_LITTLE_ENDIAN
#  define _le64toh(x) ((PY_UINT64_T)(x))
#elif defined(__APPLE__)
#  define _le64toh(x) OSSwapLittleToHostInt64(x)
#elif defined(HAVE_LETOH64)
#  define _le64toh(x) le64toh(x)
#else
#  define _le64toh(x) (((PY_UINT64_T)(x) << 56) | \
                      (((PY_UINT64_T)(x) << 40) & 0xff000000000000ULL) | \
                      (((PY_UINT64_T)(x) << 24) & 0xff0000000000ULL) | \
                      (((PY_UINT64_T)(x) << 8)  & 0xff00000000ULL) | \
                      (((PY_UINT64_T)(x) >> 8)  & 0xff000000ULL) | \
                      (((PY_UINT64_T)(x) >> 24) & 0xff0000ULL) | \
                      (((PY_UINT64_T)(x) >> 40) & 0xff00ULL) | \
                      ((PY_UINT64_T)(x)  >> 56))
#endif


#ifdef _MSC_VER
#  define ROTATE(x, b)  _rotl64(x, b)
#else
#  define ROTATE(x, b) (PY_UINT64_T)( ((x) << (b)) | ( (x) >> (64 - (b))) )
#endif

#define HALF_ROUND(a,b,c,d,s,t)         \
    a += b; c += d;             \
    b = ROTATE(b, s) ^ a;           \
    d = ROTATE(d, t) ^ c;           \
    a = ROTATE(a, 32);

#define DOUBLE_ROUND(v0,v1,v2,v3)       \
    HALF_ROUND(v0,v1,v2,v3,13,16);      \
    HALF_ROUND(v2,v1,v0,v3,17,21);      \
    HALF_ROUND(v0,v1,v2,v3,13,16);      \
    HALF_ROUND(v2,v1,v0,v3,17,21);


static Py_hash_t
siphash24(const void *src, Py_ssize_t src_sz) {
    PY_UINT64_T k0 = _le64toh(_Py_HashSecret.siphash.k0);
    PY_UINT64_T k1 = _le64toh(_Py_HashSecret.siphash.k1);
    PY_UINT64_T b = (PY_UINT64_T)src_sz << 56;
    const PY_UINT64_T *in = (PY_UINT64_T*)src;

    PY_UINT64_T v0 = k0 ^ 0x736f6d6570736575ULL;
    PY_UINT64_T v1 = k1 ^ 0x646f72616e646f6dULL;
    PY_UINT64_T v2 = k0 ^ 0x6c7967656e657261ULL;
    PY_UINT64_T v3 = k1 ^ 0x7465646279746573ULL;

    PY_UINT64_T t;
    PY_UINT8_T *pt;
    PY_UINT8_T *m;

    while (src_sz >= 8) {
        PY_UINT64_T mi = _le64toh(*in);
        in += 1;
        src_sz -= 8;
        v3 ^= mi;
        DOUBLE_ROUND(v0,v1,v2,v3);
        v0 ^= mi;
    }

    t = 0;
    pt = (PY_UINT8_T *)&t;
    m = (PY_UINT8_T *)in;
    switch (src_sz) {
        case 7: pt[6] = m[6];
        case 6: pt[5] = m[5];
        case 5: pt[4] = m[4];
        case 4: Py_MEMCPY(pt, m, sizeof(PY_UINT32_T)); break;
        case 3: pt[2] = m[2];
        case 2: pt[1] = m[1];
        case 1: pt[0] = m[0];
    }
    b |= _le64toh(t);

    v3 ^= b;
    DOUBLE_ROUND(v0,v1,v2,v3);
    v0 ^= b;
    v2 ^= 0xff;
    DOUBLE_ROUND(v0,v1,v2,v3);
    DOUBLE_ROUND(v0,v1,v2,v3);

    /* modified */
    t = (v0 ^ v1) ^ (v2 ^ v3);
    return (Py_hash_t)t;
}

static PyHash_FuncDef PyHash_Func = {siphash24, "siphash24", 64, 128};

#endif /* Py_HASH_ALGORITHM == Py_HASH_SIPHASH24 */

#ifdef __cplusplus
}
#endif
"hl opt">(p = interp->tstate_head; p != NULL; p = p->next) { if (p->thread_id == id) { /* Tricky: we need to decref the current value * (if any) in p->async_exc, but that can in turn * allow arbitrary Python code to run, including * perhaps calls to this function. To prevent * deadlock, we need to release head_mutex before * the decref. */ PyObject *old_exc = p->async_exc; Py_XINCREF(exc); p->async_exc = exc; HEAD_UNLOCK(); Py_XDECREF(old_exc); return 1; } } HEAD_UNLOCK(); return 0; } /* Routines for advanced debuggers, requested by David Beazley. Don't use unless you know what you are doing! */ PyInterpreterState * PyInterpreterState_Head(void) { return interp_head; } PyInterpreterState * PyInterpreterState_Next(PyInterpreterState *interp) { return interp->next; } PyThreadState * PyInterpreterState_ThreadHead(PyInterpreterState *interp) { return interp->tstate_head; } PyThreadState * PyThreadState_Next(PyThreadState *tstate) { return tstate->next; } /* The implementation of sys._current_frames(). This is intended to be called with the GIL held, as it will be when called via sys._current_frames(). It's possible it would work fine even without the GIL held, but haven't thought enough about that. */ PyObject * _PyThread_CurrentFrames(void) { PyObject *result; PyInterpreterState *i; result = PyDict_New(); if (result == NULL) return NULL; /* for i in all interpreters: * for t in all of i's thread states: * if t's frame isn't NULL, map t's id to its frame * Because these lists can mutute even when the GIL is held, we * need to grab head_mutex for the duration. */ HEAD_LOCK(); for (i = interp_head; i != NULL; i = i->next) { PyThreadState *t; for (t = i->tstate_head; t != NULL; t = t->next) { PyObject *id; int stat; struct _frame *frame = t->frame; if (frame == NULL) continue; id = PyLong_FromLong(t->thread_id); if (id == NULL) goto Fail; stat = PyDict_SetItem(result, id, (PyObject *)frame); Py_DECREF(id); if (stat < 0) goto Fail; } } HEAD_UNLOCK(); return result; Fail: HEAD_UNLOCK(); Py_DECREF(result); return NULL; } /* Python "auto thread state" API. */ #ifdef WITH_THREAD /* Keep this as a static, as it is not reliable! It can only ever be compared to the state for the *current* thread. * If not equal, then it doesn't matter that the actual value may change immediately after comparison, as it can't possibly change to the current thread's state. * If equal, then the current thread holds the lock, so the value can't change until we yield the lock. */ static int PyThreadState_IsCurrent(PyThreadState *tstate) { /* Must be the tstate for this thread */ assert(PyGILState_GetThisThreadState()==tstate); /* On Windows at least, simple reads and writes to 32 bit values are atomic. */ return tstate == _PyThreadState_Current; } /* Internal initialization/finalization functions called by Py_Initialize/Py_Finalize */ void _PyGILState_Init(PyInterpreterState *i, PyThreadState *t) { assert(i && t); /* must init with valid states */ autoTLSkey = PyThread_create_key(); autoInterpreterState = i; assert(PyThread_get_key_value(autoTLSkey) == NULL); assert(t->gilstate_counter == 0); _PyGILState_NoteThreadState(t); } void _PyGILState_Fini(void) { PyThread_delete_key(autoTLSkey); autoTLSkey = 0; autoInterpreterState = NULL; } /* When a thread state is created for a thread by some mechanism other than PyGILState_Ensure, it's important that the GILState machinery knows about it so it doesn't try to create another thread state for the thread (this is a better fix for SF bug #1010677 than the first one attempted). */ static void _PyGILState_NoteThreadState(PyThreadState* tstate) { /* If autoTLSkey is 0, this must be the very first threadstate created in Py_Initialize(). Don't do anything for now (we'll be back here when _PyGILState_Init is called). */ if (!autoTLSkey) return; /* Stick the thread state for this thread in thread local storage. The only situation where you can legitimately have more than one thread state for an OS level thread is when there are multiple interpreters, when: a) You shouldn't really be using the PyGILState_ APIs anyway, and: b) The slightly odd way PyThread_set_key_value works (see comments by its implementation) means that the first thread state created for that given OS level thread will "win", which seems reasonable behaviour. */ if (PyThread_set_key_value(autoTLSkey, (void *)tstate) < 0) Py_FatalError("Couldn't create autoTLSkey mapping"); /* PyGILState_Release must not try to delete this thread state. */ tstate->gilstate_counter = 1; } /* The public functions */ PyThreadState * PyGILState_GetThisThreadState(void) { if (autoInterpreterState == NULL || autoTLSkey == 0) return NULL; return (PyThreadState *)PyThread_get_key_value(autoTLSkey); } PyGILState_STATE PyGILState_Ensure(void) { int current; PyThreadState *tcur; /* Note that we do not auto-init Python here - apart from potential races with 2 threads auto-initializing, pep-311 spells out other issues. Embedders are expected to have called Py_Initialize() and usually PyEval_InitThreads(). */ assert(autoInterpreterState); /* Py_Initialize() hasn't been called! */ tcur = (PyThreadState *)PyThread_get_key_value(autoTLSkey); if (tcur == NULL) { /* Create a new thread state for this thread */ tcur = PyThreadState_New(autoInterpreterState); if (tcur == NULL) Py_FatalError("Couldn't create thread-state for new thread"); /* This is our thread state! We'll need to delete it in the matching call to PyGILState_Release(). */ tcur->gilstate_counter = 0; current = 0; /* new thread state is never current */ } else current = PyThreadState_IsCurrent(tcur); if (current == 0) PyEval_RestoreThread(tcur); /* Update our counter in the thread-state - no need for locks: - tcur will remain valid as we hold the GIL. - the counter is safe as we are the only thread "allowed" to modify this value */ ++tcur->gilstate_counter; return current ? PyGILState_LOCKED : PyGILState_UNLOCKED; } void PyGILState_Release(PyGILState_STATE oldstate) { PyThreadState *tcur = (PyThreadState *)PyThread_get_key_value( autoTLSkey); if (tcur == NULL) Py_FatalError("auto-releasing thread-state, " "but no thread-state for this thread"); /* We must hold the GIL and have our thread state current */ /* XXX - remove the check - the assert should be fine, but while this is very new (April 2003), the extra check by release-only users can't hurt. */ if (! PyThreadState_IsCurrent(tcur)) Py_FatalError("This thread state must be current when releasing"); assert(PyThreadState_IsCurrent(tcur)); --tcur->gilstate_counter; assert(tcur->gilstate_counter >= 0); /* illegal counter value */ /* If we're going to destroy this thread-state, we must * clear it while the GIL is held, as destructors may run. */ if (tcur->gilstate_counter == 0) { /* can't have been locked when we created it */ assert(oldstate == PyGILState_UNLOCKED); PyThreadState_Clear(tcur); /* Delete the thread-state. Note this releases the GIL too! * It's vital that the GIL be held here, to avoid shutdown * races; see bugs 225673 and 1061968 (that nasty bug has a * habit of coming back). */ PyThreadState_DeleteCurrent(); } /* Release the lock if necessary */ else if (oldstate == PyGILState_UNLOCKED) PyEval_SaveThread(); } #ifdef __cplusplus } #endif #endif /* WITH_THREAD */