diff options
author | Steve Dower <steve.dower@python.org> | 2019-08-21 23:22:33 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-08-21 23:22:33 (GMT) |
commit | 7ebdda0dbee7df6f0c945a7e1e623e47676e112d (patch) | |
tree | f0f37360dcb65a057f5fb65c8381997cacfa9dee /Objects | |
parent | df0c21ff46c5c37b6913828ef8c7651f523432f8 (diff) | |
download | cpython-7ebdda0dbee7df6f0c945a7e1e623e47676e112d.zip cpython-7ebdda0dbee7df6f0c945a7e1e623e47676e112d.tar.gz cpython-7ebdda0dbee7df6f0c945a7e1e623e47676e112d.tar.bz2 |
bpo-36311: Fixes decoding multibyte characters around chunk boundaries and improves decoding performance (GH-15083)
Diffstat (limited to 'Objects')
-rw-r--r-- | Objects/unicodeobject.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c index 5545eae..aa93377 100644 --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -7186,6 +7186,12 @@ PyUnicode_AsASCIIString(PyObject *unicode) #define NEED_RETRY #endif +/* INT_MAX is the theoretical largest chunk (or INT_MAX / 2 when + transcoding from UTF-16), but INT_MAX / 4 perfoms better in + both cases also and avoids partial characters overrunning the + length limit in MultiByteToWideChar on Windows */ +#define DECODING_CHUNK_SIZE (INT_MAX/4) + #ifndef WC_ERR_INVALID_CHARS # define WC_ERR_INVALID_CHARS 0x0080 #endif @@ -7422,8 +7428,8 @@ decode_code_page_stateful(int code_page, do { #ifdef NEED_RETRY - if (size > INT_MAX) { - chunk_size = INT_MAX; + if (size > DECODING_CHUNK_SIZE) { + chunk_size = DECODING_CHUNK_SIZE; final = 0; done = 0; } @@ -7827,10 +7833,8 @@ encode_code_page(int code_page, do { #ifdef NEED_RETRY - /* UTF-16 encoding may double the size, so use only INT_MAX/2 - chunks. */ - if (len > INT_MAX/2) { - chunk_len = INT_MAX/2; + if (len > DECODING_CHUNK_SIZE) { + chunk_len = DECODING_CHUNK_SIZE; done = 0; } else |