diff options
author | Tim Peters <tim.peters@gmail.com> | 2002-05-23 15:15:30 (GMT) |
---|---|---|
committer | Tim Peters <tim.peters@gmail.com> | 2002-05-23 15:15:30 (GMT) |
commit | 8ac1495a6a1d18111a626cec0c7f2eb67df3edb3 (patch) | |
tree | 2d91993770d3a5b3f3668857983d9bf75276b14f | |
parent | f655328483b2e237cc2f71c1c308eceb2f30f6fd (diff) | |
download | cpython-8ac1495a6a1d18111a626cec0c7f2eb67df3edb3.zip cpython-8ac1495a6a1d18111a626cec0c7f2eb67df3edb3.tar.gz cpython-8ac1495a6a1d18111a626cec0c7f2eb67df3edb3.tar.bz2 |
Whitespace normalization.
-rw-r--r-- | Lib/StringIO.py | 12 | ||||
-rw-r--r-- | Lib/email/Charset.py | 8 | ||||
-rw-r--r-- | Lib/email/Header.py | 8 | ||||
-rw-r--r-- | Lib/email/Message.py | 2 | ||||
-rw-r--r-- | Lib/email/Utils.py | 2 | ||||
-rw-r--r-- | Lib/email/base64MIME.py | 26 | ||||
-rw-r--r-- | Lib/email/quopriMIME.py | 8 | ||||
-rw-r--r-- | Lib/fileinput.py | 18 | ||||
-rw-r--r-- | Lib/macpath.py | 26 | ||||
-rw-r--r-- | Lib/pickle.py | 2 | ||||
-rw-r--r-- | Lib/random.py | 10 | ||||
-rwxr-xr-x | Lib/tabnanny.py | 14 | ||||
-rw-r--r-- | Lib/test/string_tests.py | 6 | ||||
-rw-r--r-- | Lib/test/test_base64.py | 31 | ||||
-rwxr-xr-x | Lib/test/test_binascii.py | 2 | ||||
-rw-r--r-- | Lib/test/test_isinstance.py | 10 | ||||
-rw-r--r-- | Lib/test/test_math.py | 2 | ||||
-rw-r--r-- | Lib/test/test_string.py | 2 | ||||
-rw-r--r-- | Lib/test/test_unicode.py | 2 | ||||
-rw-r--r-- | Lib/tokenize.py | 10 |
20 files changed, 100 insertions, 101 deletions
diff --git a/Lib/StringIO.py b/Lib/StringIO.py index b8cba32..bdf11cc 100644 --- a/Lib/StringIO.py +++ b/Lib/StringIO.py @@ -37,16 +37,16 @@ except ImportError: __all__ = ["StringIO"] class StringIO: - """class StringIO([buffer]) - + """class StringIO([buffer]) + When a StringIO object is created, it can be initialized to an existing string by passing the string to the constructor. If no string is given, - the StringIO will start empty. + the StringIO will start empty. The StringIO object can accept either Unicode or 8-bit strings, but mixing the two may take some care. If both are used, 8-bit strings that cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause - a UnicodeError to be raised when getvalue() is called. + a UnicodeError to be raised when getvalue() is called. """ def __init__(self, buf = ''): # Force self.buf to be a string or unicode @@ -63,7 +63,7 @@ class StringIO: return iter(self.readline, '') def close(self): - """Free the memory buffer. + """Free the memory buffer. """ if not self.closed: self.closed = 1 @@ -186,7 +186,7 @@ class StringIO: but mixing the two may take some care. If both are used, 8-bit strings that cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause a UnicodeError to be raised when getvalue() - is called. + is called. """ if self.buflist: self.buf += ''.join(self.buflist) diff --git a/Lib/email/Charset.py b/Lib/email/Charset.py index 4874597..0de5f80 100644 --- a/Lib/email/Charset.py +++ b/Lib/email/Charset.py @@ -13,7 +13,7 @@ QP = 1 # Quoted-Printable BASE64 = 2 # Base64 # In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7 -MISC_LEN = 7 +MISC_LEN = 7 DEFAULT_CHARSET = 'us-ascii' @@ -22,11 +22,11 @@ DEFAULT_CHARSET = 'us-ascii' # Defaults CHARSETS = { # input header enc body enc output conv - 'iso-8859-1': (QP, QP, None), + 'iso-8859-1': (QP, QP, None), 'iso-8859-2': (QP, QP, None), 'us-ascii': (None, None, None), 'big5': (BASE64, BASE64, None), - 'gb2312': (BASE64, BASE64, None), + 'gb2312': (BASE64, BASE64, None), 'euc-jp': (BASE64, None, 'iso-2022-jp'), 'shift_jis': (BASE64, None, 'iso-2022-jp'), 'iso-2022-jp': (BASE64, None, None), @@ -125,7 +125,7 @@ class Charset: converting between character sets, given the availability of the applicable codecs. Given an character set, it will do its best to provide information on how to use that character set in an email. - + Certain character sets must be encoded with quoted-printable or base64 when used in email headers or bodies. Certain character sets must be converted outright, and are not allowed in email. Instances of this diff --git a/Lib/email/Header.py b/Lib/email/Header.py index fb8792c..95b5a37 100644 --- a/Lib/email/Header.py +++ b/Lib/email/Header.py @@ -61,7 +61,7 @@ def decode_header(header): if not ecre.search(line): decoded.append((line, None)) continue - + parts = ecre.split(line) while parts: unenc = parts.pop(0).strip() @@ -149,14 +149,14 @@ class Header: if charset is None: charset = self._charset self._chunks.append((s, charset)) - + def _split(self, s, charset): # Split up a header safely for use with encode_chunks. BAW: this # appears to be a private convenience method. splittable = charset.to_splittable(s) encoded = charset.from_splittable(splittable) elen = charset.encoded_header_len(encoded) - + if elen <= self._maxlinelen: return [(encoded, charset)] # BAW: should we use encoded? @@ -185,7 +185,7 @@ class Header: Base64 or quoted-printable) header strings. In addition, there is a 75-character length limit on any given encoded header field, so line-wrapping must be performed, even with double-byte character sets. - + This method will do its best to convert the string to the correct character set used in email, and encode and line wrap it safely with the appropriate scheme for that character set. diff --git a/Lib/email/Message.py b/Lib/email/Message.py index 6cb659c..84a4e16 100644 --- a/Lib/email/Message.py +++ b/Lib/email/Message.py @@ -216,7 +216,7 @@ class Message: def get_charset(self): """Return the Charset object associated with the message's payload.""" return self._charset - + # # MAPPING INTERFACE (partial) # diff --git a/Lib/email/Utils.py b/Lib/email/Utils.py index 99a65f9..0c57392 100644 --- a/Lib/email/Utils.py +++ b/Lib/email/Utils.py @@ -88,7 +88,7 @@ def formataddr(pair): """The inverse of parseaddr(), this takes a 2-tuple of the form (realname, email_address) and returns the string value suitable for an RFC 2822 From:, To: or Cc:. - + If the first element of pair is false, then the second element is returned unmodified. """ diff --git a/Lib/email/base64MIME.py b/Lib/email/base64MIME.py index 08420b2..11979e3 100644 --- a/Lib/email/base64MIME.py +++ b/Lib/email/base64MIME.py @@ -39,20 +39,20 @@ MISC_LEN = 7 # Helpers def base64_len(s): """Return the length of s when it is encoded with base64.""" - groups_of_3, leftover = divmod(len(s), 3) - # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in. + groups_of_3, leftover = divmod(len(s), 3) + # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in. # Thanks, Tim! - n = groups_of_3 * 4 - if leftover: - n += 4 - return n + n = groups_of_3 * 4 + if leftover: + n += 4 + return n def header_encode(header, charset='iso-8859-1', keep_eols=0, maxlinelen=76, eol=NL): """Encode a single header line with Base64 encoding in a given charset. - + Defined in RFC 2045, this Base64 encoding is identical to normal Base64 encoding, except that each line must be intelligently wrapped (respecting the Base64 encoding), and subsequent lines must start with a space. @@ -72,7 +72,7 @@ def header_encode(header, charset='iso-8859-1', keep_eols=0, maxlinelen=76, "=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n =?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?=" - + with each line wrapped at, at most, maxlinelen characters (defaults to 76 characters). """ @@ -82,7 +82,7 @@ def header_encode(header, charset='iso-8859-1', keep_eols=0, maxlinelen=76, if not keep_eols: header = fix_eols(header) - + # Base64 encode each line, in encoded chunks no greater than maxlinelen in # length, after the RFC chrome is added in. base64ed = [] @@ -91,7 +91,7 @@ def header_encode(header, charset='iso-8859-1', keep_eols=0, maxlinelen=76, # BAW: Ben's original code used a step of max_unencoded, but I think it # ought to be max_encoded. Otherwise, where's max_encoded used? I'm - # still not sure what the + # still not sure what the for i in range(0, len(header), max_unencoded): base64ed.append(b2a_base64(header[i:i+max_unencoded])) @@ -126,10 +126,10 @@ def encode(s, binary=1, maxlinelen=76, eol=NL): """ if not s: return s - + if not binary: s = fix_eols(s) - + encvec = [] max_unencoded = maxlinelen * 3 / 4 for i in range(0, len(s), max_unencoded): @@ -162,7 +162,7 @@ def decode(s, convert_eols=None): """ if not s: return s - + dec = a2b_base64(s) if convert_eols: return dec.replace(CRLF, convert_eols) diff --git a/Lib/email/quopriMIME.py b/Lib/email/quopriMIME.py index 002034e..afd2e5e 100644 --- a/Lib/email/quopriMIME.py +++ b/Lib/email/quopriMIME.py @@ -22,7 +22,7 @@ in To:/From:/Cc: etc. fields, as well as Subject: lines. This module does not do the line wrapping or end-of-line character conversion necessary for proper internationalized headers; it only does dumb encoding and decoding. To deal with the various line -wrapping issues, use the email.Header module. +wrapping issues, use the email.Header module. """ import re @@ -50,7 +50,7 @@ def body_quopri_check(c): """Return true if the character should be escaped with body quopri.""" return bqre.match(c) and 1 - + def header_quopri_len(s): """Return the length of str when it is encoded with header quopri.""" count = 0 @@ -131,7 +131,7 @@ def header_encode(header, charset="iso-8859-1", keep_eols=0, maxlinelen=76, # lenght, after the RFC chrome is added in. quoted = [] max_encoded = maxlinelen - len(charset) - MISC_LEN - + for c in header: # Space may be represented as _ instead of =20 for readability if c == ' ': @@ -187,7 +187,7 @@ def encode(body, binary=0, maxlinelen=76, eol=NL): line = line[:-2] elif line[-1] in CRLF: line = line[:-1] - + lineno += 1 encoded_line = '' prev = None diff --git a/Lib/fileinput.py b/Lib/fileinput.py index db8ddbd..5626ee8 100644 --- a/Lib/fileinput.py +++ b/Lib/fileinput.py @@ -94,7 +94,7 @@ def input(files=None, inplace=0, backup="", bufsize=0): Create an instance of the FileInput class. The instance will be used as global state for the functions of this module, and is also returned to use during iteration. The parameters to this function will be passed - along to the constructor of the FileInput class. + along to the constructor of the FileInput class. """ global _state if _state and _state._file: @@ -118,7 +118,7 @@ def nextfile(): changed until after the first line of the next file has been read. Before the first line has been read, this function has no effect; it cannot be used to skip the first file. After the last line of the - last file has been read, this function has no effect. + last file has been read, this function has no effect. """ if not _state: raise RuntimeError, "no active input()" @@ -127,7 +127,7 @@ def nextfile(): def filename(): """ Return the name of the file currently being read. - Before the first line has been read, returns None. + Before the first line has been read, returns None. """ if not _state: raise RuntimeError, "no active input()" @@ -137,7 +137,7 @@ def lineno(): """ Return the cumulative line number of the line that has just been read. Before the first line has been read, returns 0. After the last line - of the last file has been read, returns the line number of that line. + of the last file has been read, returns the line number of that line. """ if not _state: raise RuntimeError, "no active input()" @@ -147,7 +147,7 @@ def filelineno(): """ Return the line number in the current file. Before the first line has been read, returns 0. After the last line of the last file has - been read, returns the line number of that line within the file. + been read, returns the line number of that line within the file. """ if not _state: raise RuntimeError, "no active input()" @@ -156,7 +156,7 @@ def filelineno(): def isfirstline(): """ Returns true the line just read is the first line of its file, - otherwise returns false. + otherwise returns false. """ if not _state: raise RuntimeError, "no active input()" @@ -165,7 +165,7 @@ def isfirstline(): def isstdin(): """ Returns true if the last line was read from sys.stdin, - otherwise returns false. + otherwise returns false. """ if not _state: raise RuntimeError, "no active input()" @@ -173,14 +173,14 @@ def isstdin(): class FileInput: """class FileInput([files[, inplace[, backup]]]) - + Class FileInput is the implementation of the module; its methods filename(), lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close() correspond to the functions of the same name in the module. In addition it has a readline() method which returns the next input line, and a __getitem__() method which implements the sequence behavior. The sequence must be accessed in strictly - sequential order; random access and readline() cannot be mixed. + sequential order; random access and readline() cannot be mixed. """ def __init__(self, files=None, inplace=0, backup="", bufsize=0): diff --git a/Lib/macpath.py b/Lib/macpath.py index d358bd2..ad87cb1 100644 --- a/Lib/macpath.py +++ b/Lib/macpath.py @@ -238,16 +238,16 @@ def abspath(path): # realpath is a no-op on systems without islink support def realpath(path): - path = abspath(path) - try: - import macfs - except ImportError: - return path - if not path: - return path - components = path.split(':') - path = components[0] + ':' - for c in components[1:]: - path = join(path, c) - path = macfs.ResolveAliasFile(path)[0].as_pathname() - return path + path = abspath(path) + try: + import macfs + except ImportError: + return path + if not path: + return path + components = path.split(':') + path = components[0] + ':' + for c in components[1:]: + path = join(path, c) + path = macfs.ResolveAliasFile(path)[0].as_pathname() + return path diff --git a/Lib/pickle.py b/Lib/pickle.py index d24786a..6a162a9 100644 --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -865,7 +865,7 @@ class Unpickler: import warnings warnings.warn("The None return argument form of __reduce__ is " "deprecated. Return a tuple of arguments instead.", - DeprecationWarning) + DeprecationWarning) value = callable.__basicnew__() else: value = apply(callable, arg_tup) diff --git a/Lib/random.py b/Lib/random.py index 72822f2..af788c6 100644 --- a/Lib/random.py +++ b/Lib/random.py @@ -445,14 +445,14 @@ class Random: ## -------------------- gamma distribution -------------------- def gammavariate(self, alpha, beta): - + # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 - + # Warning: a few older sources define the gamma distribution in terms # of alpha > -1.0 if alpha <= 0.0 or beta <= 0.0: raise ValueError, 'gammavariate: alpha and beta must be > 0.0' - + random = self.random if alpha > 1.0: @@ -463,7 +463,7 @@ class Random: ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv - + while 1: u1 = random() u2 = random() @@ -630,7 +630,7 @@ def _test(N=20000): _test_generator(N, 'vonmisesvariate(0.0, 1.0)') _test_generator(N, 'gammavariate(0.01, 1.0)') _test_generator(N, 'gammavariate(0.1, 1.0)') - _test_generator(N, 'gammavariate(0.1, 2.0)') + _test_generator(N, 'gammavariate(0.1, 2.0)') _test_generator(N, 'gammavariate(0.5, 1.0)') _test_generator(N, 'gammavariate(0.9, 1.0)') _test_generator(N, 'gammavariate(1.0, 1.0)') diff --git a/Lib/tabnanny.py b/Lib/tabnanny.py index 5b10474..c9a1ccd 100755 --- a/Lib/tabnanny.py +++ b/Lib/tabnanny.py @@ -2,14 +2,14 @@ """The Tab Nanny despises ambiguous indentation. She knows no mercy. -tabnanny -- Detection of ambiguous indentation +tabnanny -- Detection of ambiguous indentation For the time being this module is intended to be called as a script. However it is possible to import it into an IDE and use the function -check() described below. +check() described below. Warning: The API provided by this module is likely to change in future -releases; such changes may not be backward compatible. +releases; such changes may not be backward compatible. """ # Released to the public domain, by Tim Peters, 15 April 1998. @@ -60,7 +60,7 @@ def main(): class NannyNag(Exception): """ Raised by tokeneater() if detecting an ambiguous indent. - Captured and handled in check(). + Captured and handled in check(). """ def __init__(self, lineno, msg, line): self.lineno, self.msg, self.line = lineno, msg, line @@ -73,14 +73,14 @@ class NannyNag(Exception): def check(file): """check(file_or_dir) - + If file_or_dir is a directory and not a symbolic link, then recursively descend the directory tree named by file_or_dir, checking all .py files along the way. If file_or_dir is an ordinary Python source file, it is checked for whitespace related problems. The diagnostic messages are - written to standard output using the print statement. + written to standard output using the print statement. """ - + if os.path.isdir(file) and not os.path.islink(file): if verbose: print "%s: listing directory" % `file` diff --git a/Lib/test/string_tests.py b/Lib/test/string_tests.py index 334a6b9..075e1c9 100644 --- a/Lib/test/string_tests.py +++ b/Lib/test/string_tests.py @@ -177,13 +177,13 @@ def run_method_tests(test): # strip/lstrip/rstrip with unicode arg if have_unicode: - test('strip', 'xyzzyhelloxyzzy', + test('strip', 'xyzzyhelloxyzzy', unicode('hello', 'ascii'), unicode('xyz', 'ascii')) - test('lstrip', 'xyzzyhelloxyzzy', + test('lstrip', 'xyzzyhelloxyzzy', unicode('helloxyzzy', 'ascii'), unicode('xyz', 'ascii')) test('rstrip', 'xyzzyhelloxyzzy', unicode('xyzzyhello', 'ascii'), unicode('xyz', 'ascii')) - test('strip', 'hello', + test('strip', 'hello', unicode('hello', 'ascii'), unicode('xyz', 'ascii')) test('swapcase', 'HeLLo cOmpUteRs', 'hEllO CoMPuTErS') diff --git a/Lib/test/test_base64.py b/Lib/test/test_base64.py index 8ee4d2e..02cbd49 100644 --- a/Lib/test/test_base64.py +++ b/Lib/test/test_base64.py @@ -6,20 +6,20 @@ from binascii import Error as binascii_error class Base64TestCase(unittest.TestCase): def test_encode_string(self): """Testing encode string""" - test_support.verify(base64.encodestring("www.python.org") == - "d3d3LnB5dGhvbi5vcmc=\n", + test_support.verify(base64.encodestring("www.python.org") == + "d3d3LnB5dGhvbi5vcmc=\n", reason="www.python.org encodestring failed") - test_support.verify(base64.encodestring("a") == - "YQ==\n", + test_support.verify(base64.encodestring("a") == + "YQ==\n", reason="a encodestring failed") - test_support.verify(base64.encodestring("ab") == - "YWI=\n", + test_support.verify(base64.encodestring("ab") == + "YWI=\n", reason="ab encodestring failed") - test_support.verify(base64.encodestring("abc") == - "YWJj\n", + test_support.verify(base64.encodestring("abc") == + "YWJj\n", reason="abc encodestring failed") - test_support.verify(base64.encodestring("") == - "", + test_support.verify(base64.encodestring("") == + "", reason="null encodestring failed") test_support.verify(base64.encodestring( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#0^&*();:<>,. []{}") == @@ -29,16 +29,16 @@ class Base64TestCase(unittest.TestCase): def test_decode_string(self): """Testing decode string""" test_support.verify(base64.decodestring("d3d3LnB5dGhvbi5vcmc=\n") == - "www.python.org", + "www.python.org", reason="www.python.org decodestring failed") test_support.verify(base64.decodestring("YQ==\n") == - "a", + "a", reason="a decodestring failed") test_support.verify(base64.decodestring("YWI=\n") == - "ab", + "ab", reason="ab decodestring failed") test_support.verify(base64.decodestring("YWJj\n") == - "abc", + "abc", reason="abc decodestring failed") test_support.verify(base64.decodestring( "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNTY3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n") == @@ -50,10 +50,9 @@ class Base64TestCase(unittest.TestCase): pass else: self.fail("expected a binascii.Error on null decode request") - + def test_main(): test_support.run_unittest(Base64TestCase) if __name__ == "__main__": test_main() - diff --git a/Lib/test/test_binascii.py b/Lib/test/test_binascii.py index d4a905a..2c59160 100755 --- a/Lib/test/test_binascii.py +++ b/Lib/test/test_binascii.py @@ -113,7 +113,7 @@ else: # Verify the treatment of Unicode strings if have_unicode: - verify(binascii.hexlify(unicode('a', 'ascii')) == '61', + verify(binascii.hexlify(unicode('a', 'ascii')) == '61', "hexlify failed for Unicode") # A test for SF bug 534347 (segfaults without the proper fix) diff --git a/Lib/test/test_isinstance.py b/Lib/test/test_isinstance.py index e5eb6ed..ebdcbb4 100644 --- a/Lib/test/test_isinstance.py +++ b/Lib/test/test_isinstance.py @@ -104,7 +104,7 @@ class TestIsSubclassExceptions(unittest.TestCase): __bases__ = property(getbases) class S(C): pass - + self.assertRaises(TypeError, issubclass, C(), S()) # Like above, but test the second branch, where the __bases__ of the @@ -176,7 +176,7 @@ class TestIsInstanceIsSubclass(unittest.TestCase): # combinations. def test_isinstance_normal(self): - # normal instances + # normal instances self.assertEqual(True, isinstance(Super(), Super)) self.assertEqual(False, isinstance(Super(), Child)) self.assertEqual(False, isinstance(Super(), AbstractSuper)) @@ -186,7 +186,7 @@ class TestIsInstanceIsSubclass(unittest.TestCase): self.assertEqual(False, isinstance(Child(), AbstractSuper)) def test_isinstance_abstract(self): - # abstract instances + # abstract instances self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper)) self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild)) self.assertEqual(False, isinstance(AbstractSuper(), Super)) @@ -196,7 +196,7 @@ class TestIsInstanceIsSubclass(unittest.TestCase): self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper)) self.assertEqual(False, isinstance(AbstractChild(), Super)) self.assertEqual(False, isinstance(AbstractChild(), Child)) - + def test_subclass_normal(self): # normal classes self.assertEqual(True, issubclass(Super, Super)) @@ -217,7 +217,7 @@ class TestIsInstanceIsSubclass(unittest.TestCase): self.assertEqual(True, issubclass(AbstractChild, AbstractSuper)) self.assertEqual(False, issubclass(AbstractChild, Super)) self.assertEqual(False, issubclass(AbstractChild, Child)) - + diff --git a/Lib/test/test_math.py b/Lib/test/test_math.py index cb2b86c..d64024f 100644 --- a/Lib/test/test_math.py +++ b/Lib/test/test_math.py @@ -137,7 +137,7 @@ testit('pow(2,-1)', math.pow(2,-1), 0.5) print 'radians' testit('radians(180)', math.radians(180), math.pi) testit('radians(90)', math.radians(90), math.pi/2) -testit('radians(-45)', math.radians(-45), -math.pi/4) +testit('radians(-45)', math.radians(-45), -math.pi/4) print 'sin' testit('sin(0)', math.sin(0), 0) diff --git a/Lib/test/test_string.py b/Lib/test/test_string.py index 8f0ea47..cc61512 100644 --- a/Lib/test/test_string.py +++ b/Lib/test/test_string.py @@ -38,7 +38,7 @@ def test(name, input, output, *args): value = apply(f, (input,) + args) if value is input: if verbose: - print 'no' + print 'no' print '*',f, `input`, `output`, `value` return if value != output: diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py index 5d73939..5465051 100644 --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -68,7 +68,7 @@ def test(method, input, output, *args): exc = sys.exc_info()[:2] if value is input: if verbose: - print 'no' + print 'no' print '*',f, `input`, `output`, `value` return if value != output or type(value) is not type(output): diff --git a/Lib/tokenize.py b/Lib/tokenize.py index 42aafe4..22f28c4 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -124,14 +124,14 @@ def tokenize(readline, tokeneater=printtoken): """ The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). - + The first parameter, readline, must be a callable object which provides the same interface as the readline() method of built-in file objects. - Each call to the function should return one line of input as a string. + Each call to the function should return one line of input as a string. The second parameter, tokeneater, must also be a callable object. It is called once for each token, with five arguments, corresponding to the - tuples generated by generate_tokens(). + tuples generated by generate_tokens(). """ try: tokenize_loop(readline, tokeneater) @@ -149,13 +149,13 @@ def generate_tokens(readline): must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. - + The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the - logical line; continuation lines are included. + logical line; continuation lines are included. """ lnum = parenlev = continued = 0 namechars, numchars = string.ascii_letters + '_', '0123456789' |