summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorVictor Stinner <victor.stinner@gmail.com>2017-11-08 22:44:44 (GMT)
committerGitHub <noreply@github.com>2017-11-08 22:44:44 (GMT)
commit8c663fd60ecba9c82aa4c404dbfb1aae69fe8553 (patch)
tree8aed07de4d990bd998a61a051f3ac6b1a88f6392 /Lib
parent0e163d2ced28ade8ff526e8c663faf03c2c0b168 (diff)
downloadcpython-8c663fd60ecba9c82aa4c404dbfb1aae69fe8553.zip
cpython-8c663fd60ecba9c82aa4c404dbfb1aae69fe8553.tar.gz
cpython-8c663fd60ecba9c82aa4c404dbfb1aae69fe8553.tar.bz2
Replace KB unit with KiB (#4293)
kB (*kilo* byte) unit means 1000 bytes, whereas KiB ("kibibyte") means 1024 bytes. KB was misused: replace kB or KB with KiB when appropriate. Same change for MB and GB which become MiB and GiB. Change the output of Tools/iobench/iobench.py. Round also the size of the documentation from 5.5 MB to 5 MiB.
Diffstat (limited to 'Lib')
-rw-r--r--Lib/distutils/cygwinccompiler.py4
-rw-r--r--Lib/gzip.py2
-rw-r--r--Lib/test/_test_multiprocessing.py2
-rw-r--r--Lib/test/libregrtest/cmdline.py2
-rw-r--r--Lib/test/pickletester.py4
-rw-r--r--Lib/test/test_bigaddrspace.py2
-rw-r--r--Lib/test/test_bz2.py2
-rw-r--r--Lib/test/test_io.py10
-rw-r--r--Lib/test/test_largefile.py6
-rw-r--r--Lib/test/test_mmap.py2
-rw-r--r--Lib/test/test_os.py4
-rw-r--r--Lib/test/test_socket.py2
-rw-r--r--Lib/test/test_tarfile.py4
-rw-r--r--Lib/test/test_threading.py8
-rw-r--r--Lib/test/test_zipfile64.py2
-rw-r--r--Lib/test/test_zlib.py4
-rw-r--r--Lib/xmlrpc/client.py2
17 files changed, 31 insertions, 31 deletions
diff --git a/Lib/distutils/cygwinccompiler.py b/Lib/distutils/cygwinccompiler.py
index 1c36990..6c5d777 100644
--- a/Lib/distutils/cygwinccompiler.py
+++ b/Lib/distutils/cygwinccompiler.py
@@ -234,8 +234,8 @@ class CygwinCCompiler(UnixCCompiler):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
- # (On my machine: 10KB < stripped_file < ??100KB
- # unstripped_file = stripped_file + XXX KB
+ # (On my machine: 10KiB < stripped_file < ??100KiB
+ # unstripped_file = stripped_file + XXX KiB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
diff --git a/Lib/gzip.py b/Lib/gzip.py
index 76ab497..ddc7bda 100644
--- a/Lib/gzip.py
+++ b/Lib/gzip.py
@@ -308,7 +308,7 @@ class GzipFile(_compression.BaseStream):
if self.mode == WRITE:
fileobj.write(self.compress.flush())
write32u(fileobj, self.crc)
- # self.size may exceed 2GB, or even 4GB
+ # self.size may exceed 2 GiB, or even 4 GiB
write32u(fileobj, self.size & 0xffffffff)
elif self.mode == READ:
self._buffer.close()
diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py
index d4e8a8a..dbca2d8 100644
--- a/Lib/test/_test_multiprocessing.py
+++ b/Lib/test/_test_multiprocessing.py
@@ -4221,7 +4221,7 @@ class TestIgnoreEINTR(unittest.TestCase):
conn.send('ready')
x = conn.recv()
conn.send(x)
- conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
+ conn.send_bytes(b'x' * (1024 * 1024)) # sending 1 MiB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py
index 4999fa7..dab17c3 100644
--- a/Lib/test/libregrtest/cmdline.py
+++ b/Lib/test/libregrtest/cmdline.py
@@ -96,7 +96,7 @@ resources to test. Currently only the following are defined:
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
- consume >2GB of disk space temporarily.
+ consume >2 GiB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py
index 5c83361..296faf0 100644
--- a/Lib/test/pickletester.py
+++ b/Lib/test/pickletester.py
@@ -2276,7 +2276,7 @@ class AbstractPickleTests(unittest.TestCase):
class BigmemPickleTests(unittest.TestCase):
- # Binary protocols can serialize longs of up to 2GB-1
+ # Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
@@ -2291,7 +2291,7 @@ class BigmemPickleTests(unittest.TestCase):
finally:
data = None
- # Protocol 3 can serialize up to 4GB-1 as a bytes object
+ # Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
diff --git a/Lib/test/test_bigaddrspace.py b/Lib/test/test_bigaddrspace.py
index b8c59d4..b639f68 100644
--- a/Lib/test/test_bigaddrspace.py
+++ b/Lib/test/test_bigaddrspace.py
@@ -3,7 +3,7 @@ These tests are meant to exercise that requests to create objects bigger
than what the address space allows are properly met with an OverflowError
(rather than crash weirdly).
-Primarily, this means 32-bit builds with at least 2 GB of available memory.
+Primarily, this means 32-bit builds with at least 2 GiB of available memory.
You need to pass the -M option to regrtest (e.g. "-M 2.1G") for tests to
be enabled.
"""
diff --git a/Lib/test/test_bz2.py b/Lib/test/test_bz2.py
index 58dbf96..58abdc1 100644
--- a/Lib/test/test_bz2.py
+++ b/Lib/test/test_bz2.py
@@ -62,7 +62,7 @@ class BaseTest(unittest.TestCase):
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
- # is at least 100 kB, we gather some data dynamically and compress it.
+ # is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py
index ce4ed1b8..3158729 100644
--- a/Lib/test/test_io.py
+++ b/Lib/test/test_io.py
@@ -564,7 +564,7 @@ class IOTest(unittest.TestCase):
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
- # a long time to build the >2GB file and takes >2GB of disk space
+ # a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
@@ -736,7 +736,7 @@ class IOTest(unittest.TestCase):
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
- self.skipTest("test requires at least 2GB of memory")
+ self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
@@ -1421,7 +1421,7 @@ class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
- # than 2GB RAM and a 64-bit kernel.
+ # than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
@@ -1733,7 +1733,7 @@ class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
- # than 2GB RAM and a 64-bit kernel.
+ # than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
@@ -2206,7 +2206,7 @@ class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
- # than 2GB RAM and a 64-bit kernel.
+ # than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
diff --git a/Lib/test/test_largefile.py b/Lib/test/test_largefile.py
index 5b276e7..f409c5b 100644
--- a/Lib/test/test_largefile.py
+++ b/Lib/test/test_largefile.py
@@ -9,12 +9,12 @@ from test.support import TESTFN, requires, unlink
import io # C implementation of io
import _pyio as pyio # Python implementation of io
-# size of file to create (>2GB; 2GB == 2147483648 bytes)
+# size of file to create (>2 GiB; 2 GiB == 2,147,483,648 bytes)
size = 2500000000
class LargeFileTest:
"""Test that each file function works as expected for large
- (i.e. > 2GB) files.
+ (i.e. > 2 GiB) files.
"""
def setUp(self):
@@ -142,7 +142,7 @@ def setUpModule():
pass
# On Windows and Mac OSX this test comsumes large resources; It
- # takes a long time to build the >2GB file and takes >2GB of disk
+ # takes a long time to build the >2 GiB file and takes >2 GiB of disk
# space therefore the resource must be enabled to run this test.
# If not, nothing after this line stanza will be executed.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
diff --git a/Lib/test/test_mmap.py b/Lib/test/test_mmap.py
index 56d85e7..80835c9 100644
--- a/Lib/test/test_mmap.py
+++ b/Lib/test/test_mmap.py
@@ -777,7 +777,7 @@ class LargeMmapTests(unittest.TestCase):
with mmap.mmap(f.fileno(), 0x10000, access=mmap.ACCESS_READ) as m:
self.assertEqual(m.size(), 0x180000000)
- # Issue 11277: mmap() with large (~4GB) sparse files crashes on OS X.
+ # Issue 11277: mmap() with large (~4 GiB) sparse files crashes on OS X.
def _test_around_boundary(self, boundary):
tail = b' DEARdear '
diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py
index 4d57bfb..ff19fac 100644
--- a/Lib/test/test_os.py
+++ b/Lib/test/test_os.py
@@ -171,7 +171,7 @@ class FileTests(unittest.TestCase):
with open(support.TESTFN, "rb") as fp:
data = os.read(fp.fileno(), size)
- # The test does not try to read more than 2 GB at once because the
+ # The test does not try to read more than 2 GiB at once because the
# operating system is free to return less bytes than requested.
self.assertEqual(data, b'test')
@@ -2573,7 +2573,7 @@ class SendfileTestServer(asyncore.dispatcher, threading.Thread):
@unittest.skipUnless(hasattr(os, 'sendfile'), "test needs os.sendfile()")
class TestSendfile(unittest.TestCase):
- DATA = b"12345abcde" * 16 * 1024 # 160 KB
+ DATA = b"12345abcde" * 16 * 1024 # 160 KiB
SUPPORT_HEADERS_TRAILERS = not sys.platform.startswith("linux") and \
not sys.platform.startswith("solaris") and \
not sys.platform.startswith("sunos")
diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py
index 096fb54..fb16d09 100644
--- a/Lib/test/test_socket.py
+++ b/Lib/test/test_socket.py
@@ -5299,7 +5299,7 @@ class SendfileUsingSendTest(ThreadedTCPSocketTest):
Test the send() implementation of socket.sendfile().
"""
- FILESIZE = (10 * 1024 * 1024) # 10MB
+ FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index 030ace1..f0a5b21 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -779,12 +779,12 @@ class Bz2DetectReadTest(Bz2Test, DetectReadTest):
def test_detect_stream_bz2(self):
# Originally, tarfile's stream detection looked for the string
# "BZh91" at the start of the file. This is incorrect because
- # the '9' represents the blocksize (900kB). If the file was
+ # the '9' represents the blocksize (900,000 bytes). If the file was
# compressed using another blocksize autodetection fails.
with open(tarname, "rb") as fobj:
data = fobj.read()
- # Compress with blocksize 100kB, the file starts with "BZh11".
+ # Compress with blocksize 100,000 bytes, the file starts with "BZh11".
with bz2.BZ2File(tmpname, "wb", compresslevel=1) as fobj:
fobj.write(data)
diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py
index a4887af..007581d 100644
--- a/Lib/test/test_threading.py
+++ b/Lib/test/test_threading.py
@@ -132,10 +132,10 @@ class ThreadTests(BaseTestCase):
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
- # run with a small(ish) thread stack size (256kB)
+ # run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
- print('with 256kB thread stack size...')
+ print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
@@ -144,10 +144,10 @@ class ThreadTests(BaseTestCase):
self.test_various_ops()
threading.stack_size(0)
- # run with a large thread stack size (1MB)
+ # run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
- print('with 1MB thread stack size...')
+ print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
diff --git a/Lib/test/test_zipfile64.py b/Lib/test/test_zipfile64.py
index ae29545..cba909f 100644
--- a/Lib/test/test_zipfile64.py
+++ b/Lib/test/test_zipfile64.py
@@ -39,7 +39,7 @@ class TestsWithSourceFile(unittest.TestCase):
# Create the ZIP archive.
zipfp = zipfile.ZipFile(f, "w", compression)
- # It will contain enough copies of self.data to reach about 6GB of
+ # It will contain enough copies of self.data to reach about 6 GiB of
# raw data to store.
filecount = 6*1024**3 // len(self.data)
diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py
index 20174d8..e67bee9 100644
--- a/Lib/test/test_zlib.py
+++ b/Lib/test/test_zlib.py
@@ -72,7 +72,7 @@ class ChecksumTestCase(unittest.TestCase):
self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
-# Issue #10276 - check that inputs >=4GB are handled correctly.
+# Issue #10276 - check that inputs >=4 GiB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
@bigmemtest(size=_4G + 4, memuse=1, dry_run=False)
@@ -130,7 +130,7 @@ class ExceptionTestCase(unittest.TestCase):
class BaseCompressTestCase(object):
def check_big_compress_buffer(self, size, compress_func):
_1M = 1024 * 1024
- # Generate 10MB worth of random, and expand it by repeating it.
+ # Generate 10 MiB worth of random, and expand it by repeating it.
# The assumption is that zlib's memory is not big enough to exploit
# such spread out redundancy.
data = b''.join([random.getrandbits(8 * _1M).to_bytes(_1M, 'little')
diff --git a/Lib/xmlrpc/client.py b/Lib/xmlrpc/client.py
index bd3278e..2852503 100644
--- a/Lib/xmlrpc/client.py
+++ b/Lib/xmlrpc/client.py
@@ -1046,7 +1046,7 @@ def gzip_encode(data):
# in the HTTP header, as described in RFC 1952
#
# @param data The encoded data
-# @keyparam max_decode Maximum bytes to decode (20MB default), use negative
+# @keyparam max_decode Maximum bytes to decode (20 MiB default), use negative
# values for unlimited decoding
# @return the unencoded data
# @raises ValueError if data is not correctly coded.