diff options
author | Tim Peters <tim.peters@gmail.com> | 2004-07-18 06:16:08 (GMT) |
---|---|---|
committer | Tim Peters <tim.peters@gmail.com> | 2004-07-18 06:16:08 (GMT) |
commit | 182b5aca27d376b08a2904bed42b751496f932f3 (patch) | |
tree | df13115820dbc879c0fe2eae488c9f8c0215a7da /Lib/plat-irix6/panelparser.py | |
parent | e6ddc8b20b493fef2e7cffb2e1351fe1d238857e (diff) | |
download | cpython-182b5aca27d376b08a2904bed42b751496f932f3.zip cpython-182b5aca27d376b08a2904bed42b751496f932f3.tar.gz cpython-182b5aca27d376b08a2904bed42b751496f932f3.tar.bz2 |
Whitespace normalization, via reindent.py.
Diffstat (limited to 'Lib/plat-irix6/panelparser.py')
-rw-r--r-- | Lib/plat-irix6/panelparser.py | 122 |
1 files changed, 61 insertions, 61 deletions
diff --git a/Lib/plat-irix6/panelparser.py b/Lib/plat-irix6/panelparser.py index ee50971..c831c49 100644 --- a/Lib/plat-irix6/panelparser.py +++ b/Lib/plat-irix6/panelparser.py @@ -15,47 +15,47 @@ separators = operators + whitespace + ';' + '"' # Return a list of tokens (strings). # def tokenize_string(s): - tokens = [] - while s: - c = s[:1] - if c in whitespace: - s = s[1:] - elif c == ';': - s = '' - elif c == '"': - n = len(s) - i = 1 - while i < n: - c = s[i] - i = i+1 - if c == '"': break - if c == '\\': i = i+1 - tokens.append(s[:i]) - s = s[i:] - elif c in operators: - tokens.append(c) - s = s[1:] - else: - n = len(s) - i = 1 - while i < n: - if s[i] in separators: break - i = i+1 - tokens.append(s[:i]) - s = s[i:] - return tokens + tokens = [] + while s: + c = s[:1] + if c in whitespace: + s = s[1:] + elif c == ';': + s = '' + elif c == '"': + n = len(s) + i = 1 + while i < n: + c = s[i] + i = i+1 + if c == '"': break + if c == '\\': i = i+1 + tokens.append(s[:i]) + s = s[i:] + elif c in operators: + tokens.append(c) + s = s[1:] + else: + n = len(s) + i = 1 + while i < n: + if s[i] in separators: break + i = i+1 + tokens.append(s[:i]) + s = s[i:] + return tokens # Tokenize a whole file (given as file object, not as file name). # Return a list of tokens (strings). # def tokenize_file(fp): - tokens = [] - while 1: - line = fp.readline() - if not line: break - tokens = tokens + tokenize_string(line) - return tokens + tokens = [] + while 1: + line = fp.readline() + if not line: break + tokens = tokens + tokenize_string(line) + return tokens # Exception raised by parse_exr. @@ -71,50 +71,50 @@ syntax_error = 'syntax error' # May raise syntax_error. # def parse_expr(tokens): - if (not tokens) or tokens[0] != '(': - raise syntax_error, 'expected "("' - tokens = tokens[1:] - expr = [] - while 1: - if not tokens: - raise syntax_error, 'missing ")"' - if tokens[0] == ')': - return expr, tokens[1:] - elif tokens[0] == '(': - subexpr, tokens = parse_expr(tokens) - expr.append(subexpr) - else: - expr.append(tokens[0]) - tokens = tokens[1:] + if (not tokens) or tokens[0] != '(': + raise syntax_error, 'expected "("' + tokens = tokens[1:] + expr = [] + while 1: + if not tokens: + raise syntax_error, 'missing ")"' + if tokens[0] == ')': + return expr, tokens[1:] + elif tokens[0] == '(': + subexpr, tokens = parse_expr(tokens) + expr.append(subexpr) + else: + expr.append(tokens[0]) + tokens = tokens[1:] # Parse a file (given as file object, not as file name). # Return a list of parsed S-expressions found at the top level. # def parse_file(fp): - tokens = tokenize_file(fp) - exprlist = [] - while tokens: - expr, tokens = parse_expr(tokens) - exprlist.append(expr) - return exprlist + tokens = tokenize_file(fp) + exprlist = [] + while tokens: + expr, tokens = parse_expr(tokens) + exprlist.append(expr) + return exprlist # EXAMPLE: # # The input -# '(hip (hop hur-ray))' +# '(hip (hop hur-ray))' # # passed to tokenize_string() returns the token list -# ['(', 'hip', '(', 'hop', 'hur-ray', ')', ')'] +# ['(', 'hip', '(', 'hop', 'hur-ray', ')', ')'] # # When this is passed to parse_expr() it returns the expression -# ['hip', ['hop', 'hur-ray']] +# ['hip', ['hop', 'hur-ray']] # plus an empty token list (because there are no tokens left. # # When a file containing the example is passed to parse_file() it returns # a list whose only element is the output of parse_expr() above: -# [['hip', ['hop', 'hur-ray']]] +# [['hip', ['hop', 'hur-ray']]] # TOKENIZING: @@ -123,6 +123,6 @@ def parse_file(fp): # # Tokens are separated by whitespace, except the following characters # always form a separate token (outside strings): -# ( ) ' +# ( ) ' # Strings are enclosed in double quotes (") and backslash (\) is used # as escape character in strings. |