summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Doc/library/zlib.rst31
-rw-r--r--Lib/test/test_fileio.py11
-rw-r--r--Lib/test/test_zlib.py30
-rw-r--r--Misc/NEWS8
-rw-r--r--Modules/Setup.dist7
-rw-r--r--Modules/_decimal/libmpdec/mpdecimal.c40
-rw-r--r--Modules/_io/fileio.c12
-rw-r--r--Modules/md5module.c10
-rw-r--r--Modules/sha1module.c9
-rw-r--r--Modules/zlibmodule.c118
-rw-r--r--Objects/stringlib/localeutil.h2
-rw-r--r--Python/formatter_unicode.c2
-rw-r--r--setup.py13
13 files changed, 221 insertions, 72 deletions
diff --git a/Doc/library/zlib.rst b/Doc/library/zlib.rst
index 1e9a2bc..705f734 100644
--- a/Doc/library/zlib.rst
+++ b/Doc/library/zlib.rst
@@ -58,12 +58,19 @@ The available exception and functions in this module are:
exception if any error occurs.
-.. function:: compressobj([level])
+.. function:: compressobj([level[, method[, wbits[, memlevel[, strategy[, zdict]]]]]])
Returns a compression object, to be used for compressing data streams that won't
- fit into memory at once. *level* is an integer from ``1`` to ``9`` controlling
- the level of compression; ``1`` is fastest and produces the least compression,
- ``9`` is slowest and produces the most. The default value is ``6``.
+ fit into memory at once.
+
+ *level* is an integer from ``1`` to ``9`` controlling the level of
+ compression; ``1`` is fastest and produces the least compression, ``9`` is
+ slowest and produces the most. The default value is ``6``.
+
+ *zdict* is a predefined compression dictionary. This is a sequence of bytes
+ (such as a :class:`bytes` object) containing subsequences that are expected
+ to occur frequently in the data that is to be compressed. Those subsequences
+ that are expected to be most common should come at the end of the dictionary.
.. function:: crc32(data[, value])
@@ -114,11 +121,21 @@ The available exception and functions in this module are:
to :c:func:`malloc`. The default size is 16384.
-.. function:: decompressobj([wbits])
+.. function:: decompressobj([wbits[, zdict]])
Returns a decompression object, to be used for decompressing data streams that
- won't fit into memory at once. The *wbits* parameter controls the size of the
- window buffer.
+ won't fit into memory at once.
+
+ The *wbits* parameter controls the size of the window buffer.
+
+ The *zdict* parameter specifies a predefined compression dictionary. If
+ provided, this must be the same dictionary as was used by the compressor that
+ produced the data that is to be decompressed.
+
+.. note::
+ If *zdict* is a mutable object (such as a :class:`bytearray`), you must not
+ modify its contents between the call to :func:`decompressobj` and the first
+ call to the decompressor's ``decompress()`` method.
Compression objects support the following methods:
diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py
index 9854d0c..99f2069 100644
--- a/Lib/test/test_fileio.py
+++ b/Lib/test/test_fileio.py
@@ -404,6 +404,17 @@ class OtherFileTests(unittest.TestCase):
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
+ def testUnclosedFDOnException(self):
+ class MyException(Exception): pass
+ class MyFileIO(_FileIO):
+ def __setattr__(self, name, value):
+ if name == "name":
+ raise MyException("blocked setting name")
+ return super(MyFileIO, self).__setattr__(name, value)
+ fd = os.open(__file__, os.O_RDONLY)
+ self.assertRaises(MyException, MyFileIO, fd)
+ os.close(fd) # should not raise OSError(EBADF)
+
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py
index 3c982c6..904ac4d 100644
--- a/Lib/test/test_zlib.py
+++ b/Lib/test/test_zlib.py
@@ -425,6 +425,36 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
dco = zlib.decompressobj()
self.assertEqual(dco.flush(), b"") # Returns nothing
+ def test_dictionary(self):
+ h = HAMLET_SCENE
+ # build a simulated dictionary out of the words in HAMLET
+ words = h.split()
+ random.shuffle(words)
+ zdict = b''.join(words)
+ # use it to compress HAMLET
+ co = zlib.compressobj(zdict=zdict)
+ cd = co.compress(h) + co.flush()
+ # verify that it will decompress with the dictionary
+ dco = zlib.decompressobj(zdict=zdict)
+ self.assertEqual(dco.decompress(cd) + dco.flush(), h)
+ # verify that it fails when not given the dictionary
+ dco = zlib.decompressobj()
+ self.assertRaises(zlib.error, dco.decompress, cd)
+
+ def test_dictionary_streaming(self):
+ # this is simulating the needs of SPDY to be able to reuse the same
+ # stream object (with its compression state) between sets of compressed
+ # headers.
+ co = zlib.compressobj(zdict=HAMLET_SCENE)
+ do = zlib.decompressobj(zdict=HAMLET_SCENE)
+ piece = HAMLET_SCENE[1000:1500]
+ d0 = co.compress(piece) + co.flush(zlib.Z_SYNC_FLUSH)
+ d1 = co.compress(piece[100:]) + co.flush(zlib.Z_SYNC_FLUSH)
+ d2 = co.compress(piece[:-100]) + co.flush(zlib.Z_SYNC_FLUSH)
+ self.assertEqual(do.decompress(d0), piece)
+ self.assertEqual(do.decompress(d1), piece[100:])
+ self.assertEqual(do.decompress(d2), piece[:-100])
+
def test_decompress_incomplete_stream(self):
# This is 'foo', deflated
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E'
diff --git a/Misc/NEWS b/Misc/NEWS
index 2f9236f..5ce4f12 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -10,6 +10,9 @@ What's New in Python 3.3.0 Beta 1?
Core and Builtins
-----------------
+- Issue #10053: Don't close FDs when FileIO.__init__ fails. Loosely based on
+ the work by Hirokazu Yamamoto.
+
- Issue #15096: Removed support for ur'' as the raw notation isn't
compatible with Python 2.x's raw unicode strings.
@@ -34,6 +37,9 @@ Core and Builtins
Library
-------
+- Issue #14684: zlib.compressobj() and zlib.decompressobj() now support the use
+ of predefined compression dictionaries. Original patch by Sam Rushing.
+
- Fix GzipFile's handling of filenames given as bytes objects.
- Issue #14772: Return destination values from some shutil functions.
@@ -162,6 +168,8 @@ Tests
Build
-----
+- Issue #14225: Fix Unicode support for curses (#12567) on OS X
+
- Issue #14928: Fix importlib bootstrap issues by using a custom executable
(Modules/_freeze_importlib) to build Python/importlib.h.
diff --git a/Modules/Setup.dist b/Modules/Setup.dist
index 6abafff..f45de5c 100644
--- a/Modules/Setup.dist
+++ b/Modules/Setup.dist
@@ -234,15 +234,14 @@ _symtable symtablemodule.c
# system does not have the OpenSSL libs containing an optimized version.
# The _md5 module implements the RSA Data Security, Inc. MD5
-# Message-Digest Algorithm, described in RFC 1321. The necessary files
-# md5.c and md5.h are included here.
+# Message-Digest Algorithm, described in RFC 1321.
-#_md5 md5module.c md5.c
+#_md5 md5module.c
# The _sha module implements the SHA checksum algorithms.
# (NIST's Secure Hash Algorithms.)
-#_sha shamodule.c
+#_sha1 sha1module.c
#_sha256 sha256module.c
#_sha512 sha512module.c
diff --git a/Modules/_decimal/libmpdec/mpdecimal.c b/Modules/_decimal/libmpdec/mpdecimal.c
index 38756e0..8f0c97b 100644
--- a/Modules/_decimal/libmpdec/mpdecimal.c
+++ b/Modules/_decimal/libmpdec/mpdecimal.c
@@ -6679,7 +6679,7 @@ mpd_qrem_near(mpd_t *r, const mpd_t *a, const mpd_t *b,
mpd_context_t workctx;
MPD_NEW_STATIC(btmp,0,0,0,0);
MPD_NEW_STATIC(q,0,0,0,0);
- mpd_ssize_t expdiff, floordigits;
+ mpd_ssize_t expdiff, qdigits;
int cmp, isodd, allnine;
if (mpd_isspecial(a) || mpd_isspecial(b)) {
@@ -6716,53 +6716,45 @@ mpd_qrem_near(mpd_t *r, const mpd_t *a, const mpd_t *b,
b = &btmp;
}
- workctx = *ctx;
- workctx.prec = a->digits;
- workctx.prec = (workctx.prec > ctx->prec) ? workctx.prec : ctx->prec;
-
- _mpd_qdivmod(&q, r, a, b, &workctx, status);
- if (mpd_isnan(&q) || mpd_isnan(r) || q.digits > ctx->prec) {
- mpd_seterror(r, MPD_Division_impossible, status);
+ _mpd_qdivmod(&q, r, a, b, ctx, status);
+ if (mpd_isnan(&q) || mpd_isnan(r)) {
goto finish;
}
if (mpd_iszerocoeff(r)) {
goto finish;
}
- /* Deal with cases like rmnx078:
- * remaindernear 999999999.5 1 -> NaN Division_impossible */
expdiff = mpd_adjexp(b) - mpd_adjexp(r);
if (-1 <= expdiff && expdiff <= 1) {
- mpd_qtrunc(&q, &q, &workctx, &workctx.status);
allnine = mpd_coeff_isallnine(&q);
- floordigits = q.digits;
+ qdigits = q.digits;
isodd = mpd_isodd(&q);
mpd_maxcontext(&workctx);
if (mpd_sign(a) == mpd_sign(b)) {
+ /* sign(r) == sign(b) */
_mpd_qsub(&q, r, b, &workctx, &workctx.status);
- if (workctx.status&MPD_Errors) {
- mpd_seterror(r, workctx.status&MPD_Errors, status);
- goto finish;
- }
}
else {
+ /* sign(r) != sign(b) */
_mpd_qadd(&q, r, b, &workctx, &workctx.status);
- if (workctx.status&MPD_Errors) {
- mpd_seterror(r, workctx.status&MPD_Errors, status);
- goto finish;
- }
}
- cmp = mpd_cmp_total_mag(&q, r);
+ if (workctx.status&MPD_Errors) {
+ mpd_seterror(r, workctx.status&MPD_Errors, status);
+ goto finish;
+ }
+
+ cmp = _mpd_cmp_abs(&q, r);
if (cmp < 0 || (cmp == 0 && isodd)) {
- if (allnine && floordigits == ctx->prec) {
+ /* abs(r) > abs(b)/2 or abs(r) == abs(b)/2 and isodd(quotient) */
+ if (allnine && qdigits == ctx->prec) {
+ /* abs(quotient) + 1 == 10**prec */
mpd_seterror(r, MPD_Division_impossible, status);
goto finish;
}
mpd_qcopy(r, &q, status);
- *status &= ~MPD_Rounded;
}
}
@@ -7088,7 +7080,7 @@ _mpd_qreciprocal(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
* q, r = divmod(coeff(a), coeff(b))
*
* Strategy: Multiply the dividend by the reciprocal of the divisor. The
- * inexact result is fixed by a small loop, using at most 2 iterations.
+ * inexact result is fixed by a small loop, using at most one iteration.
*
* ACL2 proofs:
* ------------
diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c
index 726d17b..31e2994 100644
--- a/Modules/_io/fileio.c
+++ b/Modules/_io/fileio.c
@@ -227,6 +227,7 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
int flags = 0;
int fd = -1;
int closefd = 1;
+ int fd_is_own = 0;
assert(PyFileIO_Check(oself));
if (self->fd >= 0) {
@@ -376,6 +377,7 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
#endif
self->fd = open(name, flags, 0666);
Py_END_ALLOW_THREADS
+ fd_is_own = 1;
} else {
PyObject *fdobj = PyObject_CallFunction(
opener, "Oi", nameobj, flags);
@@ -393,6 +395,7 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
if (self->fd == -1) {
goto error;
}
+ fd_is_own = 1;
}
if (self->fd < 0) {
@@ -421,13 +424,8 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
end of file (otherwise, it might be done only on the
first write()). */
PyObject *pos = portable_lseek(self->fd, NULL, 2);
- if (pos == NULL) {
- if (closefd) {
- close(self->fd);
- self->fd = -1;
- }
+ if (pos == NULL)
goto error;
- }
Py_DECREF(pos);
}
@@ -435,6 +433,8 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
error:
ret = -1;
+ if (!fd_is_own)
+ self->fd = -1;
if (self->fd >= 0)
internal_close(self);
diff --git a/Modules/md5module.c b/Modules/md5module.c
index ee44c48..e2681a8 100644
--- a/Modules/md5module.c
+++ b/Modules/md5module.c
@@ -210,7 +210,8 @@ static void md5_compress(struct md5_state *md5, unsigned char *buf)
Initialize the hash state
@param sha1 The hash state you wish to initialize
*/
-void md5_init(struct md5_state *md5)
+static void
+md5_init(struct md5_state *md5)
{
assert(md5 != NULL);
md5->state[0] = 0x67452301UL;
@@ -227,8 +228,8 @@ void md5_init(struct md5_state *md5)
@param in The data to hash
@param inlen The length of the data (octets)
*/
-void md5_process(struct md5_state *md5,
- const unsigned char *in, Py_ssize_t inlen)
+static void
+md5_process(struct md5_state *md5, const unsigned char *in, Py_ssize_t inlen)
{
Py_ssize_t n;
@@ -262,7 +263,8 @@ void md5_process(struct md5_state *md5,
@param sha1 The hash state
@param out [out] The destination of the hash (16 bytes)
*/
-void md5_done(struct md5_state *md5, unsigned char *out)
+static void
+md5_done(struct md5_state *md5, unsigned char *out)
{
int i;
diff --git a/Modules/sha1module.c b/Modules/sha1module.c
index daea887..a733c4b 100644
--- a/Modules/sha1module.c
+++ b/Modules/sha1module.c
@@ -184,7 +184,8 @@ static void sha1_compress(struct sha1_state *sha1, unsigned char *buf)
Initialize the hash state
@param sha1 The hash state you wish to initialize
*/
-void sha1_init(struct sha1_state *sha1)
+static void
+sha1_init(struct sha1_state *sha1)
{
assert(sha1 != NULL);
sha1->state[0] = 0x67452301UL;
@@ -202,7 +203,8 @@ void sha1_init(struct sha1_state *sha1)
@param in The data to hash
@param inlen The length of the data (octets)
*/
-void sha1_process(struct sha1_state *sha1,
+static void
+sha1_process(struct sha1_state *sha1,
const unsigned char *in, Py_ssize_t inlen)
{
Py_ssize_t n;
@@ -237,7 +239,8 @@ void sha1_process(struct sha1_state *sha1,
@param sha1 The hash state
@param out [out] The destination of the hash (20 bytes)
*/
-void sha1_done(struct sha1_state *sha1, unsigned char *out)
+static void
+sha1_done(struct sha1_state *sha1, unsigned char *out)
{
int i;
diff --git a/Modules/zlibmodule.c b/Modules/zlibmodule.c
index a6da056..102740b 100644
--- a/Modules/zlibmodule.c
+++ b/Modules/zlibmodule.c
@@ -45,6 +45,7 @@ typedef struct
PyObject *unconsumed_tail;
char eof;
int is_initialised;
+ PyObject *zdict;
#ifdef WITH_THREAD
PyThread_type_lock lock;
#endif
@@ -80,14 +81,21 @@ zlib_error(z_stream zst, int err, char *msg)
}
PyDoc_STRVAR(compressobj__doc__,
-"compressobj([level]) -- Return a compressor object.\n"
+"compressobj([level[, method[, wbits[, memlevel[, strategy[, zdict]]]]]])\n"
+" -- Return a compressor object.\n"
"\n"
-"Optional arg level is the compression level, in 1-9.");
+"Optional arg level is the compression level, in 1-9.\n"
+"\n"
+"Optional arg zdict is the predefined compression dictionary - a sequence of\n"
+"bytes containing subsequences that are likely to occur in the input data.");
PyDoc_STRVAR(decompressobj__doc__,
-"decompressobj([wbits]) -- Return a decompressor object.\n"
+"decompressobj([wbits[, zdict]]) -- Return a decompressor object.\n"
+"\n"
+"Optional arg wbits is the window buffer size.\n"
"\n"
-"Optional arg wbits is the window buffer size.");
+"Optional arg zdict is the predefined compression dictionary. This must be\n"
+"the same dictionary as used by the compressor that produced the input data.");
static compobject *
newcompobject(PyTypeObject *type)
@@ -98,6 +106,7 @@ newcompobject(PyTypeObject *type)
return NULL;
self->eof = 0;
self->is_initialised = 0;
+ self->zdict = NULL;
self->unused_data = PyBytes_FromStringAndSize("", 0);
if (self->unused_data == NULL) {
Py_DECREF(self);
@@ -316,19 +325,24 @@ PyZlib_decompress(PyObject *self, PyObject *args)
}
static PyObject *
-PyZlib_compressobj(PyObject *selfptr, PyObject *args)
+PyZlib_compressobj(PyObject *selfptr, PyObject *args, PyObject *kwargs)
{
compobject *self;
int level=Z_DEFAULT_COMPRESSION, method=DEFLATED;
int wbits=MAX_WBITS, memLevel=DEF_MEM_LEVEL, strategy=0, err;
-
- if (!PyArg_ParseTuple(args, "|iiiii:compressobj", &level, &method, &wbits,
- &memLevel, &strategy))
+ Py_buffer zdict;
+ static char *kwlist[] = {"level", "method", "wbits",
+ "memLevel", "strategy", "zdict", NULL};
+
+ zdict.buf = NULL; /* Sentinel, so we can tell whether zdict was supplied. */
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iiiiiy*:compressobj",
+ kwlist, &level, &method, &wbits,
+ &memLevel, &strategy, &zdict))
return NULL;
self = newcompobject(&Comptype);
if (self==NULL)
- return(NULL);
+ goto error;
self->zst.zalloc = (alloc_func)NULL;
self->zst.zfree = (free_func)Z_NULL;
self->zst.next_in = NULL;
@@ -337,30 +351,58 @@ PyZlib_compressobj(PyObject *selfptr, PyObject *args)
switch(err) {
case (Z_OK):
self->is_initialised = 1;
- return (PyObject*)self;
+ if (zdict.buf == NULL) {
+ goto success;
+ } else {
+ err = deflateSetDictionary(&self->zst, zdict.buf, zdict.len);
+ switch (err) {
+ case (Z_OK):
+ goto success;
+ case (Z_STREAM_ERROR):
+ PyErr_SetString(PyExc_ValueError, "Invalid dictionary");
+ goto error;
+ default:
+ PyErr_SetString(PyExc_ValueError, "deflateSetDictionary()");
+ goto error;
+ }
+ }
case (Z_MEM_ERROR):
- Py_DECREF(self);
PyErr_SetString(PyExc_MemoryError,
"Can't allocate memory for compression object");
- return NULL;
+ goto error;
case(Z_STREAM_ERROR):
- Py_DECREF(self);
PyErr_SetString(PyExc_ValueError, "Invalid initialization option");
- return NULL;
+ goto error;
default:
zlib_error(self->zst, err, "while creating compression object");
- Py_DECREF(self);
- return NULL;
+ goto error;
}
+
+ error:
+ Py_XDECREF(self);
+ self = NULL;
+ success:
+ if (zdict.buf != NULL)
+ PyBuffer_Release(&zdict);
+ return (PyObject*)self;
}
static PyObject *
-PyZlib_decompressobj(PyObject *selfptr, PyObject *args)
+PyZlib_decompressobj(PyObject *selfptr, PyObject *args, PyObject *kwargs)
{
+ static char *kwlist[] = {"wbits", "zdict", NULL};
int wbits=DEF_WBITS, err;
compobject *self;
- if (!PyArg_ParseTuple(args, "|i:decompressobj", &wbits))
+ PyObject *zdict=NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iO:decompressobj",
+ kwlist, &wbits, &zdict))
+ return NULL;
+ if (zdict != NULL && !PyObject_CheckBuffer(zdict)) {
+ PyErr_SetString(PyExc_TypeError,
+ "zdict argument must support the buffer protocol");
return NULL;
+ }
self = newcompobject(&Decomptype);
if (self == NULL)
@@ -369,6 +411,10 @@ PyZlib_decompressobj(PyObject *selfptr, PyObject *args)
self->zst.zfree = (free_func)Z_NULL;
self->zst.next_in = NULL;
self->zst.avail_in = 0;
+ if (zdict != NULL) {
+ Py_INCREF(zdict);
+ self->zdict = zdict;
+ }
err = inflateInit2(&self->zst, wbits);
switch(err) {
case (Z_OK):
@@ -398,6 +444,7 @@ Dealloc(compobject *self)
#endif
Py_XDECREF(self->unused_data);
Py_XDECREF(self->unconsumed_tail);
+ Py_XDECREF(self->zdict);
PyObject_Del(self);
}
@@ -557,6 +604,27 @@ PyZlib_objdecompress(compobject *self, PyObject *args)
err = inflate(&(self->zst), Z_SYNC_FLUSH);
Py_END_ALLOW_THREADS
+ if (err == Z_NEED_DICT && self->zdict != NULL) {
+ Py_buffer zdict_buf;
+ if (PyObject_GetBuffer(self->zdict, &zdict_buf, PyBUF_SIMPLE) == -1) {
+ Py_DECREF(RetVal);
+ RetVal = NULL;
+ goto error;
+ }
+ err = inflateSetDictionary(&(self->zst), zdict_buf.buf, zdict_buf.len);
+ PyBuffer_Release(&zdict_buf);
+ if (err != Z_OK) {
+ zlib_error(self->zst, err, "while decompressing data");
+ Py_DECREF(RetVal);
+ RetVal = NULL;
+ goto error;
+ }
+ /* repeat the call to inflate! */
+ Py_BEGIN_ALLOW_THREADS
+ err = inflate(&(self->zst), Z_SYNC_FLUSH);
+ Py_END_ALLOW_THREADS
+ }
+
/* While Z_OK and the output buffer is full, there might be more output.
So extend the output buffer and try again.
*/
@@ -770,10 +838,13 @@ PyZlib_copy(compobject *self)
}
Py_INCREF(self->unused_data);
Py_INCREF(self->unconsumed_tail);
+ Py_XINCREF(self->zdict);
Py_XDECREF(retval->unused_data);
Py_XDECREF(retval->unconsumed_tail);
+ Py_XDECREF(retval->zdict);
retval->unused_data = self->unused_data;
retval->unconsumed_tail = self->unconsumed_tail;
+ retval->zdict = self->zdict;
retval->eof = self->eof;
/* Mark it as being initialized */
@@ -822,10 +893,13 @@ PyZlib_uncopy(compobject *self)
Py_INCREF(self->unused_data);
Py_INCREF(self->unconsumed_tail);
+ Py_XINCREF(self->zdict);
Py_XDECREF(retval->unused_data);
Py_XDECREF(retval->unconsumed_tail);
+ Py_XDECREF(retval->zdict);
retval->unused_data = self->unused_data;
retval->unconsumed_tail = self->unconsumed_tail;
+ retval->zdict = self->zdict;
retval->eof = self->eof;
/* Mark it as being initialized */
@@ -1032,13 +1106,13 @@ static PyMethodDef zlib_methods[] =
adler32__doc__},
{"compress", (PyCFunction)PyZlib_compress, METH_VARARGS,
compress__doc__},
- {"compressobj", (PyCFunction)PyZlib_compressobj, METH_VARARGS,
+ {"compressobj", (PyCFunction)PyZlib_compressobj, METH_VARARGS|METH_KEYWORDS,
compressobj__doc__},
{"crc32", (PyCFunction)PyZlib_crc32, METH_VARARGS,
crc32__doc__},
{"decompress", (PyCFunction)PyZlib_decompress, METH_VARARGS,
decompress__doc__},
- {"decompressobj", (PyCFunction)PyZlib_decompressobj, METH_VARARGS,
+ {"decompressobj", (PyCFunction)PyZlib_decompressobj, METH_VARARGS|METH_KEYWORDS,
decompressobj__doc__},
{NULL, NULL}
};
@@ -1112,10 +1186,10 @@ PyDoc_STRVAR(zlib_module_documentation,
"\n"
"adler32(string[, start]) -- Compute an Adler-32 checksum.\n"
"compress(string[, level]) -- Compress string, with compression level in 1-9.\n"
-"compressobj([level]) -- Return a compressor object.\n"
+"compressobj([level[, ...]]) -- Return a compressor object.\n"
"crc32(string[, start]) -- Compute a CRC-32 checksum.\n"
"decompress(string,[wbits],[bufsize]) -- Decompresses a compressed string.\n"
-"decompressobj([wbits]) -- Return a decompressor object.\n"
+"decompressobj([wbits[, zdict]]]) -- Return a decompressor object.\n"
"\n"
"'wbits' is window buffer size.\n"
"Compressor objects support compress() and flush() methods; decompressor\n"
diff --git a/Objects/stringlib/localeutil.h b/Objects/stringlib/localeutil.h
index 28c87c8..6e2f073 100644
--- a/Objects/stringlib/localeutil.h
+++ b/Objects/stringlib/localeutil.h
@@ -99,7 +99,7 @@ STRINGLIB(fill)(STRINGLIB_CHAR **digits_end, STRINGLIB_CHAR **buffer_end,
* As closely as possible, this code mimics the logic in decimal.py's
_insert_thousands_sep().
**/
-Py_ssize_t
+static Py_ssize_t
STRINGLIB(InsertThousandsGrouping)(
STRINGLIB_CHAR *buffer,
Py_ssize_t n_buffer,
diff --git a/Python/formatter_unicode.c b/Python/formatter_unicode.c
index cd66670..4b0fd91 100644
--- a/Python/formatter_unicode.c
+++ b/Python/formatter_unicode.c
@@ -1347,7 +1347,7 @@ done:
/************************************************************************/
/*********** built in formatters ****************************************/
/************************************************************************/
-int
+static int
format_obj(PyObject *obj, _PyUnicodeWriter *writer)
{
PyObject *str;
diff --git a/setup.py b/setup.py
index 3103752..538cf58 100644
--- a/setup.py
+++ b/setup.py
@@ -1185,6 +1185,18 @@ class PyBuildExt(build_ext):
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
+ if platform == 'darwin':
+ # On OS X, there is no separate /usr/lib/libncursesw nor
+ # libpanelw. If we are here, we found a locally-supplied
+ # version of libncursesw. There should be also be a
+ # libpanelw. _XOPEN_SOURCE defines are usually excluded
+ # for OS X but we need _XOPEN_SOURCE_EXTENDED here for
+ # ncurses wide char support
+ curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
+ elif platform == 'darwin' and curses_library == 'ncurses':
+ # Building with the system-suppied combined libncurses/libpanel
+ curses_defines.append(('HAVE_NCURSESW', '1'))
+ curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
if curses_library.startswith('ncurses'):
curses_libs = [curses_library]
@@ -1213,6 +1225,7 @@ class PyBuildExt(build_ext):
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
include_dirs=curses_includes,
+ define_macros=curses_defines,
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')