summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
Diffstat (limited to 'Lib')
-rw-r--r--Lib/distutils/cmd.py3
-rw-r--r--Lib/distutils/log.py13
-rw-r--r--Lib/ftplib.py4
-rw-r--r--Lib/glob.py2
-rw-r--r--Lib/optparse.py1
-rwxr-xr-xLib/pdb.py47
-rw-r--r--Lib/test/README411
-rw-r--r--Lib/test/test_pprint.py4
-rw-r--r--Lib/test/test_sys.py5
-rw-r--r--Lib/test/test_threading.py16
-rw-r--r--Lib/threading.py42
11 files changed, 86 insertions, 462 deletions
diff --git a/Lib/distutils/cmd.py b/Lib/distutils/cmd.py
index 46055b4..5829a56 100644
--- a/Lib/distutils/cmd.py
+++ b/Lib/distutils/cmd.py
@@ -333,7 +333,8 @@ class Command:
# -- External world manipulation -----------------------------------
def warn(self, msg):
- log.warn("warning: %s: %s\n" % (self.get_command_name(), msg))
+ log.warn("warning: %s: %s\n" %
+ (self.get_command_name(), msg))
def execute(self, func, args, msg=None, level=1):
util.execute(func, args, msg, dry_run=self.dry_run)
diff --git a/Lib/distutils/log.py b/Lib/distutils/log.py
index 97319a0..6f949d5 100644
--- a/Lib/distutils/log.py
+++ b/Lib/distutils/log.py
@@ -18,13 +18,14 @@ class Log:
def _log(self, level, msg, args):
if level >= self.threshold:
- if not args:
- # msg may contain a '%'. If args is empty,
- # don't even try to string-format
- print(msg)
+ if args:
+ msg = msg % args
+ if level in (WARN, ERROR, FATAL):
+ stream = sys.stderr
else:
- print(msg % args)
- sys.stdout.flush()
+ stream = sys.stdout
+ stream.write('%s\n' % msg)
+ stream.flush()
def log(self, level, msg, *args):
self._log(level, msg, args)
diff --git a/Lib/ftplib.py b/Lib/ftplib.py
index 42f2bff..7e678b4 100644
--- a/Lib/ftplib.py
+++ b/Lib/ftplib.py
@@ -223,7 +223,7 @@ class FTP:
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
- if resp[0] != '2':
+ if resp[:1] != '2':
raise error_reply(resp)
return resp
@@ -522,8 +522,6 @@ class FTP:
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in ('250', '200'):
return resp
- elif resp[:1] == '5':
- raise error_perm(resp)
else:
raise error_reply(resp)
diff --git a/Lib/glob.py b/Lib/glob.py
index 9529f7e..c5f5f69 100644
--- a/Lib/glob.py
+++ b/Lib/glob.py
@@ -16,7 +16,7 @@ def glob(pathname):
return list(iglob(pathname))
def iglob(pathname):
- """Return a list of paths matching a pathname pattern.
+ """Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
diff --git a/Lib/optparse.py b/Lib/optparse.py
index 7b1734c..d9225e1 100644
--- a/Lib/optparse.py
+++ b/Lib/optparse.py
@@ -11,6 +11,7 @@ For support, use the optik-users@lists.sourceforge.net mailing list
__version__ = "1.5.3"
__all__ = ['Option',
+ 'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
diff --git a/Lib/pdb.py b/Lib/pdb.py
index ca03c50..22fc4c5 100755
--- a/Lib/pdb.py
+++ b/Lib/pdb.py
@@ -95,10 +95,14 @@ class Pdb(bdb.Bdb, cmd.Cmd):
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
- self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
- self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
- self.commands_defining = False # True while in the process of defining a command list
- self.commands_bnum = None # The breakpoint number for which we are defining a list
+ self.commands_doprompt = {} # for each bp num, tells if the prompt
+ # must be disp. after execing the cmd list
+ self.commands_silent = {} # for each bp num, tells if the stack trace
+ # must be disp. after execing the cmd list
+ self.commands_defining = False # True while in the process of defining
+ # a command list
+ self.commands_bnum = None # The breakpoint number for which we are
+ # defining a list
def reset(self):
bdb.Bdb.reset(self)
@@ -114,6 +118,10 @@ class Pdb(bdb.Bdb, cmd.Cmd):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
+ # The f_locals dictionary is updated from the actual frame
+ # locals whenever the .f_locals accessor is called, so we
+ # cache it here to ensure that modifications are not overwritten.
+ self.curframe_locals = self.curframe.f_locals
self.execRcLines()
# Can be executed earlier than 'setup' if desired
@@ -192,21 +200,30 @@ class Pdb(bdb.Bdb, cmd.Cmd):
self.cmdloop()
self.forget()
+ def displayhook(self, obj):
+ """Custom displayhook for the exec in default(), which prevents
+ assignment of the _ variable in the builtins.
+ """
+ print(repr(obj))
+
def default(self, line):
if line[:1] == '!': line = line[1:]
- locals = self.curframe.f_locals
+ locals = self.curframe_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
+ save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
+ sys.displayhook = self.displayhook
exec(code, globals, locals)
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
+ sys.displayhook = save_displayhook
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
@@ -349,7 +366,7 @@ class Pdb(bdb.Bdb, cmd.Cmd):
try:
func = eval(arg,
self.curframe.f_globals,
- self.curframe.f_locals)
+ self.curframe_locals)
except:
func = arg
try:
@@ -597,6 +614,7 @@ class Pdb(bdb.Bdb, cmd.Cmd):
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
+ self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
@@ -607,6 +625,7 @@ class Pdb(bdb.Bdb, cmd.Cmd):
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
+ self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
@@ -670,7 +689,7 @@ class Pdb(bdb.Bdb, cmd.Cmd):
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
- locals = self.curframe.f_locals
+ locals = self.curframe_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print("ENTERING RECURSIVE DEBUGGER", file=self.stdout)
@@ -694,9 +713,8 @@ class Pdb(bdb.Bdb, cmd.Cmd):
return 1
def do_args(self, arg):
- f = self.curframe
- co = f.f_code
- dict = f.f_locals
+ co = self.curframe.f_code
+ dict = self.curframe_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
@@ -708,16 +726,15 @@ class Pdb(bdb.Bdb, cmd.Cmd):
do_a = do_args
def do_retval(self, arg):
- if '__return__' in self.curframe.f_locals:
- print(self.curframe.f_locals['__return__'], file=self.stdout)
+ if '__return__' in self.curframe_locals:
+ print(self.curframe_locals['__return__'], file=self.stdout)
else:
print('*** Not yet returned!', file=self.stdout)
do_rv = do_retval
def _getval(self, arg):
try:
- return eval(arg, self.curframe.f_globals,
- self.curframe.f_locals)
+ return eval(arg, self.curframe.f_globals, self.curframe_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
@@ -788,7 +805,7 @@ class Pdb(bdb.Bdb, cmd.Cmd):
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
- self.curframe.f_locals)
+ self.curframe_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
diff --git a/Lib/test/README b/Lib/test/README
deleted file mode 100644
index a237740..0000000
--- a/Lib/test/README
+++ /dev/null
@@ -1,411 +0,0 @@
-+++++++++++++++++++++++++++++++
-Writing Python Regression Tests
-+++++++++++++++++++++++++++++++
-
-:Author: Skip Montanaro
-:Contact: skip@pobox.com
-
-Introduction
-============
-
-If you add a new module to Python or modify the functionality of an existing
-module, you should write one or more test cases to exercise that new
-functionality. There are different ways to do this within the regression
-testing facility provided with Python; any particular test should use only
-one of these options. Each option requires writing a test module using the
-conventions of the selected option:
-
- - unittest_ based tests
- - doctest_ based tests
- - "traditional" Python test modules
-
-Regardless of the mechanics of the testing approach you choose,
-you will be writing unit tests (isolated tests of functions and objects
-defined by the module) using white box techniques. Unlike black box
-testing, where you only have the external interfaces to guide your test case
-writing, in white box testing you can see the code being tested and tailor
-your test cases to exercise it more completely. In particular, you will be
-able to refer to the C and Python code in the CVS repository when writing
-your regression test cases.
-
-.. _unittest: http://www.python.org/doc/current/lib/module-unittest.html
-.. _doctest: http://www.python.org/doc/current/lib/module-doctest.html
-
-unittest-based tests
-------------------
-The unittest_ framework is based on the ideas of unit testing as espoused
-by Kent Beck and the `Extreme Programming`_ (XP) movement. The specific
-interface provided by the framework is tightly based on the JUnit_
-Java implementation of Beck's original SmallTalk test framework. Please
-see the documentation of the unittest_ module for detailed information on
-the interface and general guidelines on writing unittest-based tests.
-
-The test_support helper module provides a function for use by
-unittest-based tests in the Python regression testing framework,
-``run_unittest()``. This is the primary way of running tests in the
-standard library. You can pass it any number of the following:
-
-- classes derived from or instances of ``unittest.TestCase`` or
- ``unittest.TestSuite``. These will be handed off to unittest for
- converting into a proper TestSuite instance.
-
-- a string; this must be a key in sys.modules. The module associated with
- that string will be scanned by ``unittest.TestLoader.loadTestsFromModule``.
- This is usually seen as ``test_support.run_unittest(__name__)`` in a test
- module's ``test_main()`` function. This has the advantage of picking up
- new tests automatically, without you having to add each new test case
- manually.
-
-All test methods in the Python regression framework have names that
-start with "``test_``" and use lower-case names with words separated with
-underscores.
-
-Test methods should *not* have docstrings! The unittest module prints
-the docstring if there is one, but otherwise prints the function name
-and the full class name. When there's a problem with a test, the
-latter information makes it easier to find the source for the test
-than the docstring.
-
-All unittest-based tests in the Python test suite use boilerplate that
-looks like this (with minor variations)::
-
- import unittest
- from test import test_support
-
- class MyTestCase1(unittest.TestCase):
-
- # Define setUp and tearDown only if needed
-
- def setUp(self):
- unittest.TestCase.setUp(self)
- ... additional initialization...
-
- def tearDown(self):
- ... additional finalization...
- unittest.TestCase.tearDown(self)
-
- def test_feature_one(self):
- # Testing feature one
- ...unit test for feature one...
-
- def test_feature_two(self):
- # Testing feature two
- ...unit test for feature two...
-
- ...etc...
-
- class MyTestCase2(unittest.TestCase):
- ...same structure as MyTestCase1...
-
- ...etc...
-
- def test_main():
- test_support.run_unittest(__name__)
-
- if __name__ == "__main__":
- test_main()
-
-This has the advantage that it allows the unittest module to be used
-as a script to run individual tests as well as working well with the
-regrtest framework.
-
-.. _Extreme Programming: http://www.extremeprogramming.org/
-.. _JUnit: http://www.junit.org/
-
-doctest based tests
--------------------
-Tests written to use doctest_ are actually part of the docstrings for
-the module being tested. Each test is written as a display of an
-interactive session, including the Python prompts, statements that would
-be typed by the user, and the output of those statements (including
-tracebacks, although only the exception msg needs to be retained then).
-The module in the test package is simply a wrapper that causes doctest
-to run over the tests in the module. The test for the difflib module
-provides a convenient example::
-
- import difflib
- from test import test_support
- test_support.run_doctest(difflib)
-
-If the test is successful, nothing is written to stdout (so you should not
-create a corresponding output/test_difflib file), but running regrtest
-with -v will give a detailed report, the same as if passing -v to doctest.
-
-A second argument can be passed to run_doctest to tell doctest to search
-``sys.argv`` for -v instead of using test_support's idea of verbosity. This
-is useful for writing doctest-based tests that aren't simply running a
-doctest'ed Lib module, but contain the doctests themselves. Then at
-times you may want to run such a test directly as a doctest, independent
-of the regrtest framework. The tail end of test_descrtut.py is a good
-example::
-
- def test_main(verbose=None):
- from test import test_support, test_descrtut
- test_support.run_doctest(test_descrtut, verbose)
-
- if __name__ == "__main__":
- test_main(1)
-
-If run via regrtest, ``test_main()`` is called (by regrtest) without
-specifying verbose, and then test_support's idea of verbosity is used. But
-when run directly, ``test_main(1)`` is called, and then doctest's idea of
-verbosity is used.
-
-See the documentation for the doctest module for information on
-writing tests using the doctest framework.
-
-"traditional" Python test modules
----------------------------------
-The mechanics of how the "traditional" test system operates are fairly
-straightforward. When a test case is run, the output is compared with the
-expected output that is stored in .../Lib/test/output. If the test runs to
-completion and the actual and expected outputs match, the test succeeds, if
-not, it fails. If an ``ImportError`` or ``test_support.TestSkipped`` error
-is raised, the test is not run.
-
-Executing Test Cases
-====================
-If you are writing test cases for module spam, you need to create a file
-in .../Lib/test named test_spam.py. In addition, if the tests are expected
-to write to stdout during a successful run, you also need to create an
-expected output file in .../Lib/test/output named test_spam ("..."
-represents the top-level directory in the Python source tree, the directory
-containing the configure script). If needed, generate the initial version
-of the test output file by executing::
-
- ./python Lib/test/regrtest.py -g test_spam.py
-
-from the top-level directory.
-
-Any time you modify test_spam.py you need to generate a new expected
-output file. Don't forget to desk check the generated output to make sure
-it's really what you expected to find! All in all it's usually better
-not to have an expected-out file (note that doctest- and unittest-based
-tests do not).
-
-To run a single test after modifying a module, simply run regrtest.py
-without the -g flag::
-
- ./python Lib/test/regrtest.py test_spam.py
-
-While debugging a regression test, you can of course execute it
-independently of the regression testing framework and see what it prints::
-
- ./python Lib/test/test_spam.py
-
-To run the entire test suite:
-
-- [UNIX, + other platforms where "make" works] Make the "test" target at the
- top level::
-
- make test
-
-- [WINDOWS] Run rt.bat from your PCBuild directory. Read the comments at
- the top of rt.bat for the use of special -d, -O and -q options processed
- by rt.bat.
-
-- [OTHER] You can simply execute the two runs of regrtest (optimized and
- non-optimized) directly::
-
- ./python Lib/test/regrtest.py
- ./python -O Lib/test/regrtest.py
-
-But note that this way picks up whatever .pyc and .pyo files happen to be
-around. The makefile and rt.bat ways run the tests twice, the first time
-removing all .pyc and .pyo files from the subtree rooted at Lib/.
-
-Test cases generate output based upon values computed by the test code.
-When executed, regrtest.py compares the actual output generated by executing
-the test case with the expected output and reports success or failure. It
-stands to reason that if the actual and expected outputs are to match, they
-must not contain any machine dependencies. This means your test cases
-should not print out absolute machine addresses (e.g. the return value of
-the id() builtin function) or floating point numbers with large numbers of
-significant digits (unless you understand what you are doing!).
-
-
-Test Case Writing Tips
-======================
-Writing good test cases is a skilled task and is too complex to discuss in
-detail in this short document. Many books have been written on the subject.
-I'll show my age by suggesting that Glenford Myers' `"The Art of Software
-Testing"`_, published in 1979, is still the best introduction to the subject
-available. It is short (177 pages), easy to read, and discusses the major
-elements of software testing, though its publication predates the
-object-oriented software revolution, so doesn't cover that subject at all.
-Unfortunately, it is very expensive (about $100 new). If you can borrow it
-or find it used (around $20), I strongly urge you to pick up a copy.
-
-The most important goal when writing test cases is to break things. A test
-case that doesn't uncover a bug is much less valuable than one that does.
-In designing test cases you should pay attention to the following:
-
- * Your test cases should exercise all the functions and objects defined
- in the module, not just the ones meant to be called by users of your
- module. This may require you to write test code that uses the module
- in ways you don't expect (explicitly calling internal functions, for
- example - see test_atexit.py).
-
- * You should consider any boundary values that may tickle exceptional
- conditions (e.g. if you were writing regression tests for division,
- you might well want to generate tests with numerators and denominators
- at the limits of floating point and integer numbers on the machine
- performing the tests as well as a denominator of zero).
-
- * You should exercise as many paths through the code as possible. This
- may not always be possible, but is a goal to strive for. In
- particular, when considering if statements (or their equivalent), you
- want to create test cases that exercise both the true and false
- branches. For loops, you should create test cases that exercise the
- loop zero, one and multiple times.
-
- * You should test with obviously invalid input. If you know that a
- function requires an integer input, try calling it with other types of
- objects to see how it responds.
-
- * You should test with obviously out-of-range input. If the domain of a
- function is only defined for positive integers, try calling it with a
- negative integer.
-
- * If you are going to fix a bug that wasn't uncovered by an existing
- test, try to write a test case that exposes the bug (preferably before
- fixing it).
-
- * If you need to create a temporary file, you can use the filename in
- ``test_support.TESTFN`` to do so. It is important to remove the file
- when done; other tests should be able to use the name without cleaning
- up after your test.
-
-.. _"The Art of Software Testing":
- http://www.amazon.com/exec/obidos/ISBN=0471043281
-
-Regression Test Writing Rules
-=============================
-Each test case is different. There is no "standard" form for a Python
-regression test case, though there are some general rules (note that
-these mostly apply only to the "classic" tests; unittest_- and doctest_-
-based tests should follow the conventions natural to those frameworks)::
-
- * If your test case detects a failure, raise ``TestFailed`` (found in
- ``test.test_support``).
-
- * Import everything you'll need as early as possible.
-
- * If you'll be importing objects from a module that is at least
- partially platform-dependent, only import those objects you need for
- the current test case to avoid spurious ``ImportError`` exceptions
- that prevent the test from running to completion.
-
- * Print all your test case results using the ``print`` statement. For
- non-fatal errors, print an error message (or omit a successful
- completion print) to indicate the failure, but proceed instead of
- raising ``TestFailed``.
-
- * Use ``assert`` sparingly, if at all. It's usually better to just print
- what you got, and rely on regrtest's got-vs-expected comparison to
- catch deviations from what you expect. ``assert`` statements aren't
- executed at all when regrtest is run in -O mode; and, because they
- cause the test to stop immediately, can lead to a long & tedious
- test-fix, test-fix, test-fix, ... cycle when things are badly broken
- (and note that "badly broken" often includes running the test suite
- for the first time on new platforms or under new implementations of
- the language).
-
-Miscellaneous
-=============
-There is a test_support module in the test package you can import for
-your test case. Import this module using either::
-
- import test.test_support
-
-or::
-
- from test import test_support
-
-test_support provides the following useful objects:
-
- * ``TestFailed`` - raise this exception when your regression test detects
- a failure.
-
- * ``TestSkipped`` - raise this if the test could not be run because the
- platform doesn't offer all the required facilities (like large
- file support), even if all the required modules are available.
-
- * ``ResourceDenied`` - this is raised when a test requires a resource that
- is not available. Primarily used by 'requires'.
-
- * ``verbose`` - you can use this variable to control print output. Many
- modules use it. Search for "verbose" in the test_*.py files to see
- lots of examples.
-
- * ``forget(module_name)`` - attempts to cause Python to "forget" that it
- loaded a module and erase any PYC files.
-
- * ``is_resource_enabled(resource)`` - Returns a boolean based on whether
- the resource is enabled or not.
-
- * ``requires(resource [, msg])`` - if the required resource is not
- available the ResourceDenied exception is raised.
-
- * ``verify(condition, reason='test failed')``. Use this instead of::
-
- assert condition[, reason]
-
- ``verify()`` has two advantages over ``assert``: it works even in -O
- mode, and it raises ``TestFailed`` on failure instead of
- ``AssertionError``.
-
- * ``is_jython`` - true if the interpreter is Jython, false otherwise.
-
- * ``TESTFN`` - a string that should always be used as the filename when
- you need to create a temp file. Also use ``try``/``finally`` to
- ensure that your temp files are deleted before your test completes.
- Note that you cannot unlink an open file on all operating systems, so
- also be sure to close temp files before trying to unlink them.
-
- * ``sortdict(dict)`` - acts like ``repr(dict.items())``, but sorts the
- items first. This is important when printing a dict value, because
- the order of items produced by ``dict.items()`` is not defined by the
- language.
-
- * ``findfile(file)`` - you can call this function to locate a file
- somewhere along sys.path or in the Lib/test tree - see
- test_ossaudiodev.py for an example of its use.
-
- * ``fcmp(x,y)`` - you can call this function to compare two floating
- point numbers when you expect them to only be approximately equal
- withing a fuzz factor (``test_support.FUZZ``, which defaults to 1e-6).
-
- * ``check_syntax_error(testcase, statement)`` - make sure that the
- statement is *not* correct Python syntax.
-
-
-Some Non-Obvious regrtest Features
-==================================
- * Automagic test detection: When you create a new test file
- test_spam.py, you do not need to modify regrtest (or anything else)
- to advertise its existence. regrtest searches for and runs all
- modules in the test directory with names of the form test_xxx.py.
-
- * Miranda output: If, when running test_spam.py, regrtest does not
- find an expected-output file test/output/test_spam, regrtest
- pretends that it did find one, containing the single line
-
- test_spam
-
- This allows new tests that don't expect to print anything to stdout
- to not bother creating expected-output files.
-
- * Two-stage testing: To run test_spam.py, regrtest imports test_spam
- as a module. Most tests run to completion as a side-effect of
- getting imported. After importing test_spam, regrtest also executes
- ``test_spam.test_main()``, if test_spam has a ``test_main`` attribute.
- This is rarely required with the "traditional" Python tests, and
- you shouldn't create a module global with name test_main unless
- you're specifically exploiting this gimmick. This usage does
- prove useful with unittest-based tests as well, however; defining
- a ``test_main()`` which is run by regrtest and a script-stub in the
- test module ("``if __name__ == '__main__': test_main()``") allows
- the test to be used like any other Python test and also work
- with the unittest.py-as-a-script approach, allowing a developer
- to run specific tests from the command line.
diff --git a/Lib/test/test_pprint.py b/Lib/test/test_pprint.py
index f9b679e..1469ff0 100644
--- a/Lib/test/test_pprint.py
+++ b/Lib/test/test_pprint.py
@@ -115,10 +115,10 @@ class QueryTestCase(unittest.TestCase):
{}, dict2(), dict3(),
verify, pprint,
-6, -6, -6-6j, -1.5, "x", b"x", (3,), [3], {3: 6},
- (1,2), [3,4], {5: 6, 7: 8},
+ (1,2), [3,4], {5: 6},
tuple2((1,2)), tuple3((1,2)), tuple3(range(100)),
[3,4], list2([3,4]), list3([3,4]), list3(range(100)),
- {5: 6, 7: 8}, dict2({5: 6}), dict3({5: 6}),
+ dict2({5: 6}), dict3({5: 6}),
range(10, -11, -1)
):
native = repr(simple)
diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py
index b81c4fe..474d855 100644
--- a/Lib/test/test_sys.py
+++ b/Lib/test/test_sys.py
@@ -225,6 +225,11 @@ class SysModuleTest(unittest.TestCase):
sys.setdlopenflags(oldflags)
def test_refcount(self):
+ # n here must be a global in order for this test to pass while
+ # tracing with a python function. Tracing calls PyFrame_FastToLocals
+ # which will add a copy of any locals to the frame object, causing
+ # the reference count to increase by 2 instead of 1.
+ global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py
index 956672d..eabd7f4 100644
--- a/Lib/test/test_threading.py
+++ b/Lib/test/test_threading.py
@@ -84,11 +84,24 @@ class ThreadTests(unittest.TestCase):
t.join(NUMTASKS)
self.assert_(not t.is_alive())
self.failIfEqual(t.ident, 0)
+ self.assertFalse(t.ident is None)
self.assert_(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
+ def test_ident_of_no_threading_threads(self):
+ # The ident still must work for the main thread and dummy threads.
+ self.assertFalse(threading.currentThread().ident is None)
+ def f():
+ ident.append(threading.currentThread().ident)
+ done.set()
+ done = threading.Event()
+ ident = []
+ _thread.start_new_thread(f, ())
+ done.wait()
+ self.assertFalse(ident[0] is None)
+
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
@@ -187,7 +200,8 @@ class ThreadTests(unittest.TestCase):
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
- worker_started.wait()
+ ret = worker_started.wait()
+ self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assert_(not t.finished)
diff --git a/Lib/threading.py b/Lib/threading.py
index b541d77..d4fcbb0 100644
--- a/Lib/threading.py
+++ b/Lib/threading.py
@@ -375,6 +375,7 @@ class _Event(_Verbose):
try:
if not self._flag:
self._cond.wait(timeout)
+ return self._flag
finally:
self._cond.release()
@@ -450,9 +451,8 @@ class Thread(_Verbose):
raise RuntimeError("thread already started")
if __debug__:
self._note("%s.start(): starting thread", self)
- _active_limbo_lock.acquire()
- _limbo[self] = self
- _active_limbo_lock.release()
+ with _active_limbo_lock:
+ _limbo[self] = self
_start_new_thread(self._bootstrap, ())
self._started.wait()
@@ -485,14 +485,16 @@ class Thread(_Verbose):
return
raise
+ def _set_ident(self):
+ self._ident = _get_ident()
+
def _bootstrap_inner(self):
try:
- self._ident = _get_ident()
+ self._set_ident()
self._started.set()
- _active_limbo_lock.acquire()
- _active[self._ident] = self
- del _limbo[self]
- _active_limbo_lock.release()
+ with _active_limbo_lock:
+ _active[self._ident] = self
+ del _limbo[self]
if __debug__:
self._note("%s._bootstrap(): thread started", self)
@@ -721,9 +723,9 @@ class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._started.set()
- _active_limbo_lock.acquire()
- _active[_get_ident()] = self
- _active_limbo_lock.release()
+ self._set_ident()
+ with _active_limbo_lock:
+ _active[self._ident] = self
def _set_daemon(self):
return False
@@ -768,9 +770,9 @@ class _DummyThread(Thread):
self._started.set()
- _active_limbo_lock.acquire()
- _active[_get_ident()] = self
- _active_limbo_lock.release()
+ self._set_ident()
+ with _active_limbo_lock:
+ _active[self._ident] = self
def _set_daemon(self):
return True
@@ -791,18 +793,14 @@ def current_thread():
currentThread = current_thread
def active_count():
- _active_limbo_lock.acquire()
- count = len(_active) + len(_limbo)
- _active_limbo_lock.release()
- return count
+ with _active_limbo_lock:
+ return len(_active) + len(_limbo)
activeCount = active_count
def enumerate():
- _active_limbo_lock.acquire()
- active = list(_active.values()) + list(_limbo.values())
- _active_limbo_lock.release()
- return active
+ with _active_limbo_lock:
+ return list(_active.values()) + list(_limbo.values())
from _thread import stack_size