summaryrefslogtreecommitdiffstats
path: root/Lib/test/_test_multiprocessing.py
diff options
context:
space:
mode:
authorPetr Viktorin <encukou@gmail.com>2024-01-18 01:15:29 (GMT)
committerGitHub <noreply@github.com>2024-01-18 01:15:29 (GMT)
commitc1db9606081bdbe0207f83a861a3c70c356d3704 (patch)
tree4190826c9d57c68262ca8f294daad49820c75603 /Lib/test/_test_multiprocessing.py
parente2c097ebdee447ded1109f99a235e65aa3533bf8 (diff)
downloadcpython-c1db9606081bdbe0207f83a861a3c70c356d3704.zip
cpython-c1db9606081bdbe0207f83a861a3c70c356d3704.tar.gz
cpython-c1db9606081bdbe0207f83a861a3c70c356d3704.tar.bz2
gh-113205: test_multiprocessing.test_terminate: Test the API on threadpools (#114186)
gh-113205: test_multiprocessing.test_terminate: Test the API works on threadpools Threads can't be forced to terminate (without potentially corrupting too much state), so the expected behaviour of `ThreadPool.terminate` is to wait for the currently executing tasks to finish. The entire test was skipped in GH-110848 (0e9c364f4ac18a2237bdbac702b96bcf8ef9cb09). Instead of skipping it entirely, we should ensure the API eventually succeeds: use a shorter timeout. For the record: on my machine, when the test is un-skipped, the task manages to start in about 1.5% cases.
Diffstat (limited to 'Lib/test/_test_multiprocessing.py')
-rw-r--r--Lib/test/_test_multiprocessing.py11
1 files changed, 8 insertions, 3 deletions
diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py
index 8e4e076..6a050fa 100644
--- a/Lib/test/_test_multiprocessing.py
+++ b/Lib/test/_test_multiprocessing.py
@@ -2693,12 +2693,17 @@ class _TestPool(BaseTestCase):
p.join()
def test_terminate(self):
+ # Simulate slow tasks which take "forever" to complete
+ sleep_time = support.LONG_TIMEOUT
+
if self.TYPE == 'threads':
- self.skipTest("Threads cannot be terminated")
+ # Thread pool workers can't be forced to quit, so if the first
+ # task starts early enough, we will end up waiting for it.
+ # Sleep for a shorter time, so the test doesn't block.
+ sleep_time = 1
- # Simulate slow tasks which take "forever" to complete
p = self.Pool(3)
- args = [support.LONG_TIMEOUT for i in range(10_000)]
+ args = [sleep_time for i in range(10_000)]
result = p.map_async(time.sleep, args, chunksize=1)
p.terminate()
p.join()