summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAntoine Pitrou <solipsis@pitrou.net>2010-06-11 21:48:02 (GMT)
committerAntoine Pitrou <solipsis@pitrou.net>2010-06-11 21:48:02 (GMT)
commit4595e518178f547966fab079351a77d11566cf33 (patch)
tree9e127ec23cd12c2ec6b83fe738ef21ff61b3cf81
parent59c9fa106aebd96bc36ae39bfa68059028f235ef (diff)
downloadcpython-4595e518178f547966fab079351a77d11566cf33.zip
cpython-4595e518178f547966fab079351a77d11566cf33.tar.gz
cpython-4595e518178f547966fab079351a77d11566cf33.tar.bz2
Merged revisions 81907 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk ........ r81907 | antoine.pitrou | 2010-06-11 23:42:26 +0200 (ven., 11 juin 2010) | 5 lines Issue #8941: decoding big endian UTF-32 data in UCS-2 builds could crash the interpreter with characters outside the Basic Multilingual Plane (higher than 0x10000). ........
-rw-r--r--Lib/test/test_codecs.py25
-rw-r--r--Misc/NEWS4
-rw-r--r--Objects/unicodeobject.c40
3 files changed, 50 insertions, 19 deletions
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index a46edae..4d03ae7 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -315,6 +315,16 @@ class UTF32Test(ReadTest):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
"\xff", "strict", True)
+ def test_issue8941(self):
+ # Issue #8941: insufficient result allocation when decoding into
+ # surrogate pairs on UCS-2 builds.
+ encoded_le = '\xff\xfe\x00\x00' + '\x00\x00\x01\x00' * 1024
+ self.assertEqual(u'\U00010000' * 1024,
+ codecs.utf_32_decode(encoded_le)[0])
+ encoded_be = '\x00\x00\xfe\xff' + '\x00\x01\x00\x00' * 1024
+ self.assertEqual(u'\U00010000' * 1024,
+ codecs.utf_32_decode(encoded_be)[0])
+
class UTF32LETest(ReadTest):
encoding = "utf-32-le"
@@ -348,6 +358,13 @@ class UTF32LETest(ReadTest):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
"\xff", "strict", True)
+ def test_issue8941(self):
+ # Issue #8941: insufficient result allocation when decoding into
+ # surrogate pairs on UCS-2 builds.
+ encoded = '\x00\x00\x01\x00' * 1024
+ self.assertEqual(u'\U00010000' * 1024,
+ codecs.utf_32_le_decode(encoded)[0])
+
class UTF32BETest(ReadTest):
encoding = "utf-32-be"
@@ -381,6 +398,14 @@ class UTF32BETest(ReadTest):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
"\xff", "strict", True)
+ def test_issue8941(self):
+ # Issue #8941: insufficient result allocation when decoding into
+ # surrogate pairs on UCS-2 builds.
+ encoded = '\x00\x01\x00\x00' * 1024
+ self.assertEqual(u'\U00010000' * 1024,
+ codecs.utf_32_be_decode(encoded)[0])
+
+
class UTF16Test(ReadTest):
encoding = "utf-16"
diff --git a/Misc/NEWS b/Misc/NEWS
index 88a148a..7ab2d91 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -12,6 +12,10 @@ What's New in Python 2.6.6 alpha 1?
Core and Builtins
-----------------
+- Issue #8941: decoding big endian UTF-32 data in UCS-2 builds could crash
+ the interpreter with characters outside the Basic Multilingual Plane
+ (higher than 0x10000).
+
- Issue #8627: Remove bogus "Overriding __cmp__ blocks inheritance of
__hash__ in 3.x" warning. Also fix "XXX undetected error" that
arises from the "Overriding __eq__ blocks inheritance ..." warning
diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c
index 111f9bb..47249cb 100644
--- a/Objects/unicodeobject.c
+++ b/Objects/unicodeobject.c
@@ -2085,11 +2085,11 @@ PyUnicode_DecodeUTF32Stateful(const char *s,
PyUnicodeObject *unicode;
Py_UNICODE *p;
#ifndef Py_UNICODE_WIDE
- int i, pairs;
+ int pairs = 0;
#else
const int pairs = 0;
#endif
- const unsigned char *q, *e;
+ const unsigned char *q, *e, *qq;
int bo = 0; /* assume native ordering by default */
const char *errmsg = "";
/* Offsets from q for retrieving bytes in the right order. */
@@ -2100,23 +2100,7 @@ PyUnicode_DecodeUTF32Stateful(const char *s,
#endif
PyObject *errorHandler = NULL;
PyObject *exc = NULL;
- /* On narrow builds we split characters outside the BMP into two
- codepoints => count how much extra space we need. */
-#ifndef Py_UNICODE_WIDE
- for (i = pairs = 0; i < size/4; i++)
- if (((Py_UCS4 *)s)[i] >= 0x10000)
- pairs++;
-#endif
-
- /* This might be one to much, because of a BOM */
- unicode = _PyUnicode_New((size+3)/4+pairs);
- if (!unicode)
- return NULL;
- if (size == 0)
- return (PyObject *)unicode;
-
- /* Unpack UTF-32 encoded data */
- p = unicode->str;
+
q = (unsigned char *)s;
e = q + size;
@@ -2168,6 +2152,24 @@ PyUnicode_DecodeUTF32Stateful(const char *s,
iorder[3] = 0;
}
+ /* On narrow builds we split characters outside the BMP into two
+ codepoints => count how much extra space we need. */
+#ifndef Py_UNICODE_WIDE
+ for (qq = q; qq < e; qq += 4)
+ if (qq[iorder[2]] != 0 || qq[iorder[3]] != 0)
+ pairs++;
+#endif
+
+ /* This might be one to much, because of a BOM */
+ unicode = _PyUnicode_New((size+3)/4+pairs);
+ if (!unicode)
+ return NULL;
+ if (size == 0)
+ return (PyObject *)unicode;
+
+ /* Unpack UTF-32 encoded data */
+ p = unicode->str;
+
while (q < e) {
Py_UCS4 ch;
/* remaining bytes at the end? (size should be divisible by 4) */