summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
Diffstat (limited to 'Lib')
-rwxr-xr-xLib/smtplib.py3
-rw-r--r--Lib/test/outstanding_bugs.py33
-rw-r--r--Lib/test/test_future4.py1
-rw-r--r--Lib/test/test_tokenize.py24
-rw-r--r--Lib/tokenize.py9
5 files changed, 34 insertions, 36 deletions
diff --git a/Lib/smtplib.py b/Lib/smtplib.py
index 63732c8..a3cc65d 100755
--- a/Lib/smtplib.py
+++ b/Lib/smtplib.py
@@ -729,8 +729,9 @@ class SMTP:
def quit(self):
"""Terminate the SMTP session."""
- self.docmd("quit")
+ res = self.docmd("quit")
self.close()
+ return res
if _have_ssl:
diff --git a/Lib/test/outstanding_bugs.py b/Lib/test/outstanding_bugs.py
index 9c75bfc..3f672fb 100644
--- a/Lib/test/outstanding_bugs.py
+++ b/Lib/test/outstanding_bugs.py
@@ -13,38 +13,6 @@ from test import test_support
# One test case for outstanding bugs at the moment:
#
-class TestDifflibLongestMatch(unittest.TestCase):
- # From Patch #1678339:
- # The find_longest_match method in the difflib's SequenceMatcher has a bug.
-
- # The bug is in turn caused by a problem with creating a b2j mapping which
- # should contain a list of indices for each of the list elements in b.
- # However, when the b2j mapping is being created (this is being done in
- # __chain_b method in the SequenceMatcher) the mapping becomes broken. The
- # cause of this is that for the frequently used elements the list of indices
- # is removed and the element is being enlisted in the populardict mapping.
-
- # The test case tries to match two strings like:
- # abbbbbb.... and ...bbbbbbc
-
- # The number of b is equal and the find_longest_match should have returned
- # the proper amount. However, in case the number of "b"s is large enough, the
- # method reports that the length of the longest common substring is 0. It
- # simply can't find it.
-
- # A bug was raised some time ago on this matter. It's ID is 1528074.
-
- def test_find_longest_match(self):
- import difflib
- for i in (190, 200, 210):
- text1 = "a" + "b"*i
- text2 = "b"*i + "c"
- m = difflib.SequenceMatcher(None, text1, text2)
- (aptr, bptr, l) = m.find_longest_match(0, len(text1), 0, len(text2))
- self.assertEquals(i, l)
- self.assertEquals(aptr, 1)
- self.assertEquals(bptr, 0)
-
# test_io
import io
class TextIOWrapperTest(unittest.TestCase):
@@ -114,7 +82,6 @@ class TextIOWrapperTest(unittest.TestCase):
def test_main():
test_support.run_unittest(
- TestDifflibLongestMatch,
TextIOWrapperTest)
if __name__ == "__main__":
diff --git a/Lib/test/test_future4.py b/Lib/test/test_future4.py
index 5456449..8ada489 100644
--- a/Lib/test/test_future4.py
+++ b/Lib/test/test_future4.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
from __future__ import unicode_literals
import unittest
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 308158f..371e2b9 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -88,7 +88,7 @@ Some error-handling code
>>> roundtrip("try: import somemodule\\n"
... "except ImportError: # comment\\n"
- ... " print 'Can not import' # comment2\\n"
+ ... " print('Can not import' # comment2\\n)"
... "else: print 'Loaded'\\n")
True
@@ -509,6 +509,28 @@ Backslash means line continuation, except for comments
True
>>> roundtrip("# Comment \\\\nx = 0")
True
+
+Two string literals on the same line
+
+ >>> roundtrip("'' ''")
+ True
+
+Test roundtrip on random python modules.
+pass the '-ucompiler' option to process the full directory.
+
+ >>> import random
+ >>> tempdir = os.path.dirname(f) or os.curdir
+ >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
+
+ >>> if not test_support.is_resource_enabled("compiler"):
+ ... testfiles = random.sample(testfiles, 10)
+ ...
+ >>> for testfile in testfiles:
+ ... if not roundtrip(open(testfile, 'rb')):
+ ... print("Roundtrip failed for file %s" % testfile)
+ ... break
+ ... else: True
+ True
"""
from test import test_support
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index d7043b1..fdc5cbf 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -178,6 +178,7 @@ class Untokenizer:
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
+ prevstring = False
for tok in iterable:
toknum, tokval = tok[:2]
if toknum == ENCODING:
@@ -187,6 +188,14 @@ class Untokenizer:
if toknum in (NAME, NUMBER):
tokval += ' '
+ # Insert a space between two consecutive strings
+ if toknum == STRING:
+ if prevstring:
+ tokval = ' ' + tokval
+ prevstring = True
+ else:
+ prevstring = False
+
if toknum == INDENT:
indents.append(tokval)
continue