diff options
author | Christian Heimes <christian@cheimes.de> | 2008-03-28 00:55:15 (GMT) |
---|---|---|
committer | Christian Heimes <christian@cheimes.de> | 2008-03-28 00:55:15 (GMT) |
commit | ba4af493a5bcece67bc6ae369bfea0592b10f9e5 (patch) | |
tree | bf07bc2752cd8020a1b9bbcac2e6489b3843a0ce /Lib | |
parent | 3a932128246bccd4a9c3fc3ae056341e1b1068e0 (diff) | |
download | cpython-ba4af493a5bcece67bc6ae369bfea0592b10f9e5.zip cpython-ba4af493a5bcece67bc6ae369bfea0592b10f9e5.tar.gz cpython-ba4af493a5bcece67bc6ae369bfea0592b10f9e5.tar.bz2 |
Merged revisions 61964-61979 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk
........
r61964 | benjamin.peterson | 2008-03-27 01:25:33 +0100 (Thu, 27 Mar 2008) | 2 lines
add commas for introductory clauses
........
r61965 | christian.heimes | 2008-03-27 02:36:21 +0100 (Thu, 27 Mar 2008) | 1 line
Hopefully added _fileio module to the Windows build system
........
r61966 | christian.heimes | 2008-03-27 02:38:47 +0100 (Thu, 27 Mar 2008) | 1 line
Revert commit accident
........
r61967 | neal.norwitz | 2008-03-27 04:49:54 +0100 (Thu, 27 Mar 2008) | 3 lines
Fix bytes so it works on 64-bit platforms.
(Also remove some #if 0 code that is already handled in _getbytevalue.)
........
r61968 | neal.norwitz | 2008-03-27 05:40:07 +0100 (Thu, 27 Mar 2008) | 1 line
Fix memory leaks
........
r61969 | neal.norwitz | 2008-03-27 05:40:50 +0100 (Thu, 27 Mar 2008) | 3 lines
Fix warnings about using char as an array subscript. This is not portable
since char is signed on some platforms and unsigned on others.
........
r61970 | neal.norwitz | 2008-03-27 06:02:57 +0100 (Thu, 27 Mar 2008) | 1 line
Fix test_compiler after adding unicode_literals
........
r61971 | neal.norwitz | 2008-03-27 06:03:11 +0100 (Thu, 27 Mar 2008) | 1 line
Fix compiler warnings
........
r61972 | neal.norwitz | 2008-03-27 07:52:01 +0100 (Thu, 27 Mar 2008) | 1 line
Pluralss only need one s, not 2 (intss -> ints)
........
r61973 | christian.heimes | 2008-03-27 10:02:33 +0100 (Thu, 27 Mar 2008) | 1 line
Quick 'n dirty hack: Increase the magic by 2 to force a rebuild of pyc/pyo files on the build bots
........
r61974 | eric.smith | 2008-03-27 10:42:35 +0100 (Thu, 27 Mar 2008) | 3 lines
Added test cases for single quoted strings, both forms of triple quotes,
and some string concatenations.
Removed unneeded __future__ print_function import.
........
r61975 | christian.heimes | 2008-03-27 11:35:52 +0100 (Thu, 27 Mar 2008) | 1 line
Build bots are working again - removing the hack
........
r61976 | christian.heimes | 2008-03-27 12:46:37 +0100 (Thu, 27 Mar 2008) | 2 lines
Fixed tokenize tests
The tokenize module doesn't understand __future__.unicode_literals yet
........
r61977 | georg.brandl | 2008-03-27 14:27:31 +0100 (Thu, 27 Mar 2008) | 2 lines
#2248: return result of QUIT from quit().
........
r61978 | georg.brandl | 2008-03-27 14:34:59 +0100 (Thu, 27 Mar 2008) | 2 lines
The bug for which there was a test in outstanding_bugs.py was agreed not to be a bug.
........
r61979 | amaury.forgeotdarc | 2008-03-28 00:23:54 +0100 (Fri, 28 Mar 2008) | 5 lines
Issue2495: tokenize.untokenize did not insert space between two consecutive string literals:
"" "" => """", which is invalid code.
Will backport
........
Diffstat (limited to 'Lib')
-rwxr-xr-x | Lib/smtplib.py | 3 | ||||
-rw-r--r-- | Lib/test/outstanding_bugs.py | 33 | ||||
-rw-r--r-- | Lib/test/test_future4.py | 1 | ||||
-rw-r--r-- | Lib/test/test_tokenize.py | 24 | ||||
-rw-r--r-- | Lib/tokenize.py | 9 |
5 files changed, 34 insertions, 36 deletions
diff --git a/Lib/smtplib.py b/Lib/smtplib.py index 63732c8..a3cc65d 100755 --- a/Lib/smtplib.py +++ b/Lib/smtplib.py @@ -729,8 +729,9 @@ class SMTP: def quit(self): """Terminate the SMTP session.""" - self.docmd("quit") + res = self.docmd("quit") self.close() + return res if _have_ssl: diff --git a/Lib/test/outstanding_bugs.py b/Lib/test/outstanding_bugs.py index 9c75bfc..3f672fb 100644 --- a/Lib/test/outstanding_bugs.py +++ b/Lib/test/outstanding_bugs.py @@ -13,38 +13,6 @@ from test import test_support # One test case for outstanding bugs at the moment: # -class TestDifflibLongestMatch(unittest.TestCase): - # From Patch #1678339: - # The find_longest_match method in the difflib's SequenceMatcher has a bug. - - # The bug is in turn caused by a problem with creating a b2j mapping which - # should contain a list of indices for each of the list elements in b. - # However, when the b2j mapping is being created (this is being done in - # __chain_b method in the SequenceMatcher) the mapping becomes broken. The - # cause of this is that for the frequently used elements the list of indices - # is removed and the element is being enlisted in the populardict mapping. - - # The test case tries to match two strings like: - # abbbbbb.... and ...bbbbbbc - - # The number of b is equal and the find_longest_match should have returned - # the proper amount. However, in case the number of "b"s is large enough, the - # method reports that the length of the longest common substring is 0. It - # simply can't find it. - - # A bug was raised some time ago on this matter. It's ID is 1528074. - - def test_find_longest_match(self): - import difflib - for i in (190, 200, 210): - text1 = "a" + "b"*i - text2 = "b"*i + "c" - m = difflib.SequenceMatcher(None, text1, text2) - (aptr, bptr, l) = m.find_longest_match(0, len(text1), 0, len(text2)) - self.assertEquals(i, l) - self.assertEquals(aptr, 1) - self.assertEquals(bptr, 0) - # test_io import io class TextIOWrapperTest(unittest.TestCase): @@ -114,7 +82,6 @@ class TextIOWrapperTest(unittest.TestCase): def test_main(): test_support.run_unittest( - TestDifflibLongestMatch, TextIOWrapperTest) if __name__ == "__main__": diff --git a/Lib/test/test_future4.py b/Lib/test/test_future4.py index 5456449..8ada489 100644 --- a/Lib/test/test_future4.py +++ b/Lib/test/test_future4.py @@ -1,4 +1,3 @@ -from __future__ import print_function from __future__ import unicode_literals import unittest diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index 308158f..371e2b9 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -88,7 +88,7 @@ Some error-handling code >>> roundtrip("try: import somemodule\\n" ... "except ImportError: # comment\\n" - ... " print 'Can not import' # comment2\\n" + ... " print('Can not import' # comment2\\n)" ... "else: print 'Loaded'\\n") True @@ -509,6 +509,28 @@ Backslash means line continuation, except for comments True >>> roundtrip("# Comment \\\\nx = 0") True + +Two string literals on the same line + + >>> roundtrip("'' ''") + True + +Test roundtrip on random python modules. +pass the '-ucompiler' option to process the full directory. + + >>> import random + >>> tempdir = os.path.dirname(f) or os.curdir + >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py")) + + >>> if not test_support.is_resource_enabled("compiler"): + ... testfiles = random.sample(testfiles, 10) + ... + >>> for testfile in testfiles: + ... if not roundtrip(open(testfile, 'rb')): + ... print("Roundtrip failed for file %s" % testfile) + ... break + ... else: True + True """ from test import test_support diff --git a/Lib/tokenize.py b/Lib/tokenize.py index d7043b1..fdc5cbf 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -178,6 +178,7 @@ class Untokenizer: tokval += ' ' if toknum in (NEWLINE, NL): startline = True + prevstring = False for tok in iterable: toknum, tokval = tok[:2] if toknum == ENCODING: @@ -187,6 +188,14 @@ class Untokenizer: if toknum in (NAME, NUMBER): tokval += ' ' + # Insert a space between two consecutive strings + if toknum == STRING: + if prevstring: + tokval = ' ' + tokval + prevstring = True + else: + prevstring = False + if toknum == INDENT: indents.append(tokval) continue |