summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorVictor Stinner <vstinner@python.org>2020-04-17 20:54:38 (GMT)
committerGitHub <noreply@github.com>2020-04-17 20:54:38 (GMT)
commit87502ddd710eb1f030b8ff5a60b05becea3f474f (patch)
tree695a6abbaa45adf48766011f144050ce9b6b3780 /Lib
parent223221b290db00ca1042c77103efcbc072f29c90 (diff)
downloadcpython-87502ddd710eb1f030b8ff5a60b05becea3f474f.zip
cpython-87502ddd710eb1f030b8ff5a60b05becea3f474f.tar.gz
cpython-87502ddd710eb1f030b8ff5a60b05becea3f474f.tar.bz2
bpo-40286: Use random.randbytes() in tests (GH-19575)
Diffstat (limited to 'Lib')
-rw-r--r--Lib/test/test_bz2.py2
-rw-r--r--Lib/test/test_lzma.py2
-rw-r--r--Lib/test/test_tarfile.py2
-rw-r--r--Lib/test/test_zipfile.py11
-rw-r--r--Lib/test/test_zlib.py20
5 files changed, 9 insertions, 28 deletions
diff --git a/Lib/test/test_bz2.py b/Lib/test/test_bz2.py
index 030d564..78b95d8 100644
--- a/Lib/test/test_bz2.py
+++ b/Lib/test/test_bz2.py
@@ -710,7 +710,7 @@ class BZ2DecompressorTest(BaseTest):
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
- block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
+ block = random.randbytes(blocksize)
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
diff --git a/Lib/test/test_lzma.py b/Lib/test/test_lzma.py
index f24ed3c..0f3af27 100644
--- a/Lib/test/test_lzma.py
+++ b/Lib/test/test_lzma.py
@@ -350,7 +350,7 @@ class CompressorDecompressorTestCase(unittest.TestCase):
def test_decompressor_bigmem(self, size):
lzd = LZMADecompressor()
blocksize = 10 * 1024 * 1024
- block = random.getrandbits(blocksize * 8).to_bytes(blocksize, "little")
+ block = random.randbytes(blocksize)
try:
input = block * (size // blocksize + 1)
cdata = lzma.compress(input)
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index cae9680..99196f6 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -386,7 +386,7 @@ class CommonReadTest(ReadTest):
def test_ignore_zeros(self):
# Test TarFile's ignore_zeros option.
# generate 512 pseudorandom bytes
- data = Random(0).getrandbits(512*8).to_bytes(512, 'big')
+ data = Random(0).randbytes(512)
for char in (b'\0', b'a'):
# Test if EOFHeaderError ('\0') and InvalidHeaderError ('a')
# are ignored correctly.
diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py
index 643c5b4..29d98c8 100644
--- a/Lib/test/test_zipfile.py
+++ b/Lib/test/test_zipfile.py
@@ -16,7 +16,7 @@ import zipfile
from tempfile import TemporaryFile
-from random import randint, random, getrandbits
+from random import randint, random, randbytes
from test.support import script_helper
from test.support import (TESTFN, findfile, unlink, rmtree, temp_dir, temp_cwd,
@@ -33,9 +33,6 @@ SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
-def getrandbytes(size):
- return getrandbits(8 * size).to_bytes(size, 'little')
-
def get_files(test):
yield TESTFN2
with TemporaryFile() as f:
@@ -324,7 +321,7 @@ class AbstractTestsWithSourceFile:
# than requested.
for test_size in (1, 4095, 4096, 4097, 16384):
file_size = test_size + 1
- junk = getrandbytes(file_size)
+ junk = randbytes(file_size)
with zipfile.ZipFile(io.BytesIO(), "w", self.compression) as zipf:
zipf.writestr('foo', junk)
with zipf.open('foo', 'r') as fp:
@@ -2423,8 +2420,8 @@ class UnseekableTests(unittest.TestCase):
class TestsWithMultipleOpens(unittest.TestCase):
@classmethod
def setUpClass(cls):
- cls.data1 = b'111' + getrandbytes(10000)
- cls.data2 = b'222' + getrandbytes(10000)
+ cls.data1 = b'111' + randbytes(10000)
+ cls.data2 = b'222' + randbytes(10000)
def make_test_archive(self, f):
# Create the ZIP archive
diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py
index f828b4c..02509cd 100644
--- a/Lib/test/test_zlib.py
+++ b/Lib/test/test_zlib.py
@@ -134,8 +134,7 @@ class BaseCompressTestCase(object):
# Generate 10 MiB worth of random, and expand it by repeating it.
# The assumption is that zlib's memory is not big enough to exploit
# such spread out redundancy.
- data = b''.join([random.getrandbits(8 * _1M).to_bytes(_1M, 'little')
- for i in range(10)])
+ data = random.randbytes(_1M * 10)
data = data * (size // len(data) + 1)
try:
compress_func(data)
@@ -488,7 +487,7 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# others might simply have a single RNG
gen = random
gen.seed(1)
- data = genblock(1, 17 * 1024, generator=gen)
+ data = gen.randbytes(17 * 1024)
# compress, sync-flush, and decompress
first = co.compress(data)
@@ -825,20 +824,6 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
self.assertEqual(dco.decompress(gzip), HAMLET_SCENE)
-def genblock(seed, length, step=1024, generator=random):
- """length-byte stream of random data from a seed (in step-byte blocks)."""
- if seed is not None:
- generator.seed(seed)
- randint = generator.randint
- if length < step or step < 2:
- step = length
- blocks = bytes()
- for i in range(0, length, step):
- blocks += bytes(randint(0, 255) for x in range(step))
- return blocks
-
-
-
def choose_lines(source, number, seed=None, generator=random):
"""Return a list of number lines randomly chosen from the source"""
if seed is not None:
@@ -847,7 +832,6 @@ def choose_lines(source, number, seed=None, generator=random):
return [generator.choice(sources) for n in range(number)]
-
HAMLET_SCENE = b"""
LAERTES