summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorVictor Stinner <victor.stinner@gmail.com>2015-05-25 22:46:44 (GMT)
committerVictor Stinner <victor.stinner@gmail.com>2015-05-25 22:46:44 (GMT)
commit24d262af0b16f76415baa6187c8892de1682c8a6 (patch)
treea13a4f056c5c47989c637f1131a26fe473757126 /Lib
parente6efbdc94760ba0ca410d037b55ac32020de5cb2 (diff)
parent387729e183365a366c48fce7a9abfcaf4ec6ff4e (diff)
downloadcpython-24d262af0b16f76415baa6187c8892de1682c8a6.zip
cpython-24d262af0b16f76415baa6187c8892de1682c8a6.tar.gz
cpython-24d262af0b16f76415baa6187c8892de1682c8a6.tar.bz2
(Merge 3.5) Issue #23840: tokenize.open() now closes the temporary binary file
on error to fix a resource warning.
Diffstat (limited to 'Lib')
-rw-r--r--Lib/test/test_tokenize.py10
-rw-r--r--Lib/tokenize.py14
2 files changed, 18 insertions, 6 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 43fadaf..b4e114c 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -834,7 +834,7 @@ from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
open as tokenize_open, Untokenizer)
from io import BytesIO
-from unittest import TestCase
+from unittest import TestCase, mock
import os, sys, glob
import token
@@ -1246,6 +1246,14 @@ class TestDetectEncoding(TestCase):
ins = Bunk(lines, path)
detect_encoding(ins.readline)
+ def test_open_error(self):
+ # Issue #23840: open() must close the binary file on error
+ m = BytesIO(b'#coding:xxx')
+ with mock.patch('tokenize._builtin_open', return_value=m):
+ self.assertRaises(SyntaxError, tokenize_open, 'foobar')
+ self.assertTrue(m.closed)
+
+
class TestTokenize(TestCase):
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index d65325e..f58c286 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -435,11 +435,15 @@ def open(filename):
detect_encoding().
"""
buffer = _builtin_open(filename, 'rb')
- encoding, lines = detect_encoding(buffer.readline)
- buffer.seek(0)
- text = TextIOWrapper(buffer, encoding, line_buffering=True)
- text.mode = 'r'
- return text
+ try:
+ encoding, lines = detect_encoding(buffer.readline)
+ buffer.seek(0)
+ text = TextIOWrapper(buffer, encoding, line_buffering=True)
+ text.mode = 'r'
+ return text
+ except:
+ buffer.close()
+ raise
def tokenize(readline):