summaryrefslogtreecommitdiffstats
path: root/Lib/test/test_unicode.py
diff options
context:
space:
mode:
authorGuido van Rossum <guido@python.org>2000-04-05 20:11:21 (GMT)
committerGuido van Rossum <guido@python.org>2000-04-05 20:11:21 (GMT)
commit9e896b37c7a554250d7d832566cc4fe7d30d034c (patch)
tree58692393b51a2102b34f01a01184b6b1e77ea530 /Lib/test/test_unicode.py
parent457855a5f03ce6637e5ab807deec6331ddab2059 (diff)
downloadcpython-9e896b37c7a554250d7d832566cc4fe7d30d034c.zip
cpython-9e896b37c7a554250d7d832566cc4fe7d30d034c.tar.gz
cpython-9e896b37c7a554250d7d832566cc4fe7d30d034c.tar.bz2
Marc-Andre's third try at this bulk patch seems to work (except that
his copy of test_contains.py seems to be broken -- the lines he deleted were already absent). Checkin messages: New Unicode support for int(), float(), complex() and long(). - new APIs PyInt_FromUnicode() and PyLong_FromUnicode() - added support for Unicode to PyFloat_FromString() - new encoding API PyUnicode_EncodeDecimal() which converts Unicode to a decimal char* string (used in the above new APIs) - shortcuts for calls like int(<int object>) and float(<float obj>) - tests for all of the above Unicode compares and contains checks: - comparing Unicode and non-string types now works; TypeErrors are masked, all other errors such as ValueError during Unicode coercion are passed through (note that PyUnicode_Compare does not implement the masking -- PyObject_Compare does this) - contains now works for non-string types too; TypeErrors are masked and 0 returned; all other errors are passed through Better testing support for the standard codecs. Misc minor enhancements, such as an alias dbcs for the mbcs codec. Changes: - PyLong_FromString() now applies the same error checks as does PyInt_FromString(): trailing garbage is reported as error and not longer silently ignored. The only characters which may be trailing the digits are 'L' and 'l' -- these are still silently ignored. - string.ato?() now directly interface to int(), long() and float(). The error strings are now a little different, but the type still remains the same. These functions are now ready to get declared obsolete ;-) - PyNumber_Int() now also does a check for embedded NULL chars in the input string; PyNumber_Long() already did this (and still does) Followed by: Looks like I've gone a step too far there... (and test_contains.py seem to have a bug too). I've changed back to reporting all errors in PyUnicode_Contains() and added a few more test cases to test_contains.py (plus corrected the join() NameError).
Diffstat (limited to 'Lib/test/test_unicode.py')
-rw-r--r--Lib/test/test_unicode.py101
1 files changed, 93 insertions, 8 deletions
diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py
index f90887a..5c0a063 100644
--- a/Lib/test/test_unicode.py
+++ b/Lib/test/test_unicode.py
@@ -221,15 +221,23 @@ test('translate', u"abababc", u'iiix', {ord('a'):None, ord('b'):ord('i'), ord('c
# Contains:
print 'Testing Unicode contains method...',
-assert ('a' in 'abdb') == 1
-assert ('a' in 'bdab') == 1
-assert ('a' in 'bdaba') == 1
-assert ('a' in 'bdba') == 1
+assert ('a' in u'abdb') == 1
+assert ('a' in u'bdab') == 1
+assert ('a' in u'bdaba') == 1
+assert ('a' in u'bdba') == 1
assert ('a' in u'bdba') == 1
assert (u'a' in u'bdba') == 1
assert (u'a' in u'bdb') == 0
assert (u'a' in 'bdb') == 0
assert (u'a' in 'bdba') == 1
+assert (u'a' in ('a',1,None)) == 1
+assert (u'a' in (1,None,'a')) == 1
+assert (u'a' in (1,None,u'a')) == 1
+assert ('a' in ('a',1,None)) == 1
+assert ('a' in (1,None,'a')) == 1
+assert ('a' in (1,None,u'a')) == 1
+assert ('a' in ('x',1,u'y')) == 0
+assert ('a' in ('x',1,None)) == 0
print 'done.'
# Formatting:
@@ -270,11 +278,88 @@ for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
assert unicode(u.encode(encoding),encoding) == u
u = u''.join(map(unichr, range(256)))
-for encoding in ('latin-1',):
- assert unicode(u.encode(encoding),encoding) == u
+for encoding in (
+ 'latin-1',
+ ):
+ try:
+ assert unicode(u.encode(encoding),encoding) == u
+ except AssertionError:
+ print '*** codec "%s" failed round-trip' % encoding
+ except ValueError,why:
+ print '*** codec for "%s" failed: %s' % (encoding, why)
u = u''.join(map(unichr, range(128)))
-for encoding in ('ascii',):
- assert unicode(u.encode(encoding),encoding) == u
+for encoding in (
+ 'ascii',
+ ):
+ try:
+ assert unicode(u.encode(encoding),encoding) == u
+ except AssertionError:
+ print '*** codec "%s" failed round-trip' % encoding
+ except ValueError,why:
+ print '*** codec for "%s" failed: %s' % (encoding, why)
+
+print 'done.'
+
+print 'Testing standard mapping codecs...',
+
+print '0-127...',
+s = ''.join(map(chr, range(128)))
+for encoding in (
+ 'cp037', 'cp1026',
+ 'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
+ 'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
+ 'cp863', 'cp865', 'cp866',
+ 'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
+ 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
+ 'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
+ 'mac_cyrillic', 'mac_latin2',
+
+ 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
+ 'cp1256', 'cp1257', 'cp1258',
+ 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
+
+ 'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
+ 'cp1006', 'cp875', 'iso8859_8',
+
+ ### These have undefined mappings:
+ #'cp424',
+
+ ):
+ try:
+ assert unicode(s,encoding).encode(encoding) == s
+ except AssertionError:
+ print '*** codec "%s" failed round-trip' % encoding
+ except ValueError,why:
+ print '*** codec for "%s" failed: %s' % (encoding, why)
+
+print '128-255...',
+s = ''.join(map(chr, range(128,256)))
+for encoding in (
+ 'cp037', 'cp1026',
+ 'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
+ 'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
+ 'cp863', 'cp865', 'cp866',
+ 'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
+ 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
+ 'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
+ 'mac_cyrillic', 'mac_latin2',
+
+ ### These have undefined mappings:
+ #'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
+ #'cp1256', 'cp1257', 'cp1258',
+ #'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
+ #'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
+
+ ### These fail the round-trip:
+ #'cp1006', 'cp875', 'iso8859_8',
+
+ ):
+ try:
+ assert unicode(s,encoding).encode(encoding) == s
+ except AssertionError:
+ print '*** codec "%s" failed round-trip' % encoding
+ except ValueError,why:
+ print '*** codec for "%s" failed: %s' % (encoding, why)
print 'done.'