summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorGustavo Niemeyer <gustavo@niemeyer.net>2003-04-17 21:31:33 (GMT)
committerGustavo Niemeyer <gustavo@niemeyer.net>2003-04-17 21:31:33 (GMT)
commit68d8cef89a307bafc752da68dce078306bc51352 (patch)
treea1a740dce058de6a3810ceb011675ab57dd900c6 /Lib
parent84c2b1b9aa3a596b597d37e6258c790987e50963 (diff)
downloadcpython-68d8cef89a307bafc752da68dce078306bc51352.zip
cpython-68d8cef89a307bafc752da68dce078306bc51352.tar.gz
cpython-68d8cef89a307bafc752da68dce078306bc51352.tar.bz2
Implemented posix-mode parsing support in shlex.py, as dicussed in
mailing list, and in patch #722686.
Diffstat (limited to 'Lib')
-rw-r--r--Lib/shlex.py132
-rw-r--r--Lib/test/test_shlex.py191
2 files changed, 299 insertions, 24 deletions
diff --git a/Lib/shlex.py b/Lib/shlex.py
index 7ffa79c..863e404 100644
--- a/Lib/shlex.py
+++ b/Lib/shlex.py
@@ -1,28 +1,51 @@
+# -*- coding: iso-8859-1 -*-
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
+# Posix compliance, split(), string arguments, and
+# iterator interface by Gustavo Niemeyer, April 2003.
import os.path
import sys
-__all__ = ["shlex"]
+from types import StringTypes
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+__all__ = ["shlex", "split"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
- def __init__(self, instream=None, infile=None):
+ def __init__(self, instream=None, infile=None, posix=0):
+ if type(instream) in StringTypes:
+ instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
+ self.posix = posix
+ if posix:
+ self.eof = None
+ else:
+ self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
+ if self.posix:
+ self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
+ 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
+ self.whitespace_split = 0
self.quotes = '\'"'
+ self.escape = '\\'
+ self.escapedquotes = '"'
self.state = ' '
self.pushback = []
self.lineno = 1
@@ -42,6 +65,8 @@ class shlex:
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
+ if type(newstream) in StringTypes:
+ newstream = StringIO(newstream)
self.filestack.insert(0, (self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
@@ -73,29 +98,31 @@ class shlex:
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
- while raw == self.source:
- spec = self.sourcehook(self.read_token())
- if spec:
- (newfile, newstream) = spec
- self.push_source(newstream, newfile)
- raw = self.get_token()
+ if self.source is not None:
+ while raw == self.source:
+ spec = self.sourcehook(self.read_token())
+ if spec:
+ (newfile, newstream) = spec
+ self.push_source(newstream, newfile)
+ raw = self.get_token()
# Maybe we got EOF instead?
- while raw == "":
+ while raw == self.eof:
if len(self.filestack) == 0:
- return ""
+ return self.eof
else:
self.pop_source()
raw = self.get_token()
- # Neither inclusion nor EOF
+ # Neither inclusion nor EOF
if self.debug >= 1:
- if raw:
+ if raw != self.eof:
print "shlex: token=" + `raw`
else:
print "shlex: token=EOF"
return raw
def read_token(self):
- "Read a token from the input stream (no pushback or inclusions)"
+ quoted = 0
+ escapedstate = ' '
while 1:
nextchar = self.instream.read(1)
if nextchar == '\n':
@@ -113,35 +140,65 @@ class shlex:
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
- if self.token:
+ if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
+ elif self.posix and nextchar in self.escape:
+ escapedstate = 'a'
+ self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
- self.token = nextchar
+ if not self.posix:
+ self.token = nextchar
self.state = nextchar
+ elif self.whitespace_split:
+ self.token = nextchar
+ self.state = 'a'
else:
self.token = nextchar
- if self.token:
+ if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
- self.token = self.token + nextchar
- if nextchar == self.state:
- self.state = ' '
- break
- elif not nextchar: # end of file
+ quoted = 1
+ if not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# XXX what error should be raised here?
raise ValueError, "No closing quotation"
+ if nextchar == self.state:
+ if not self.posix:
+ self.token = self.token + nextchar
+ self.state = ' '
+ break
+ else:
+ self.state = 'a'
+ elif self.posix and nextchar in self.escape and \
+ self.state in self.escapedquotes:
+ escapedstate = self.state
+ self.state = nextchar
+ else:
+ self.token = self.token + nextchar
+ elif self.state in self.escape:
+ if not nextchar: # end of file
+ if self.debug >= 2:
+ print "shlex: I see EOF in escape state"
+ # XXX what error should be raised here?
+ raise ValueError, "No escaped character"
+ # In posix shells, only the quote itself or the escape
+ # character may be escaped within quotes.
+ if escapedstate in self.quotes and \
+ nextchar != self.state and nextchar != escapedstate:
+ self.token = self.token + self.state
+ self.token = self.token + nextchar
+ self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
@@ -150,14 +207,26 @@ class shlex:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
- if self.token:
+ if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
- elif nextchar in self.wordchars or nextchar in self.quotes:
+ if self.posix:
+ self.state = ' '
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ elif self.posix and nextchar in self.quotes:
+ self.state = nextchar
+ elif self.posix and nextchar in self.escape:
+ escapedstate = 'a'
+ self.state = nextchar
+ elif nextchar in self.wordchars or nextchar in self.quotes \
+ or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback = [nextchar] + self.pushback
@@ -170,6 +239,8 @@ class shlex:
continue
result = self.token
self.token = ''
+ if self.posix and not quoted and result == '':
+ result = None
if self.debug > 1:
if result:
print "shlex: raw token=" + `result`
@@ -182,7 +253,7 @@ class shlex:
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
- if type(self.infile) == type("") and not os.path.isabs(newfile):
+ if type(self.infile) in StringTypes and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
@@ -194,6 +265,19 @@ class shlex:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
+ def __iter__(self):
+ return self
+
+ def next(self):
+ token = self.get_token()
+ if token == self.eof:
+ raise StopIteration
+ return token
+
+def split(s, posix=1, spaces=1):
+ lex = shlex(s, posix=posix)
+ lex.whitespace_split = spaces
+ return list(lex)
if __name__ == '__main__':
if len(sys.argv) == 1:
diff --git a/Lib/test/test_shlex.py b/Lib/test/test_shlex.py
new file mode 100644
index 0000000..f8ef88c
--- /dev/null
+++ b/Lib/test/test_shlex.py
@@ -0,0 +1,191 @@
+# -*- coding: iso-8859-1 -*-
+import unittest
+import os, sys
+import shlex
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+
+# The original test data set was from shellwords, by Hartmut Goebel.
+
+data = r"""x|x|
+foo bar|foo|bar|
+ foo bar|foo|bar|
+ foo bar |foo|bar|
+foo bar bla fasel|foo|bar|bla|fasel|
+x y z xxxx|x|y|z|xxxx|
+\x bar|\|x|bar|
+\ x bar|\|x|bar|
+\ bar|\|bar|
+foo \x bar|foo|\|x|bar|
+foo \ x bar|foo|\|x|bar|
+foo \ bar|foo|\|bar|
+foo "bar" bla|foo|"bar"|bla|
+"foo" "bar" "bla"|"foo"|"bar"|"bla"|
+"foo" bar "bla"|"foo"|bar|"bla"|
+"foo" bar bla|"foo"|bar|bla|
+foo 'bar' bla|foo|'bar'|bla|
+'foo' 'bar' 'bla'|'foo'|'bar'|'bla'|
+'foo' bar 'bla'|'foo'|bar|'bla'|
+'foo' bar bla|'foo'|bar|bla|
+blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz|
+blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz|
+""|""|
+''|''|
+foo "" bar|foo|""|bar|
+foo '' bar|foo|''|bar|
+foo "" "" "" bar|foo|""|""|""|bar|
+foo '' '' '' bar|foo|''|''|''|bar|
+\""|\|""|
+"\"|"\"|
+"foo\ bar"|"foo\ bar"|
+"foo\\ bar"|"foo\\ bar"|
+"foo\\ bar\"|"foo\\ bar\"|
+"foo\\" bar\""|"foo\\"|bar|\|""|
+"foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"|
+"foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"|
+"foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"|
+"foo\x bar\" dfadf"|"foo\x bar\"|dfadf"|
+\''|\|''|
+'foo\ bar'|'foo\ bar'|
+'foo\\ bar'|'foo\\ bar'|
+"foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'|
+\"foo"|\|"foo"|
+\"foo"\x|\|"foo"|\|x|
+"foo\x"|"foo\x"|
+"foo\ "|"foo\ "|
+foo\ xx|foo|\|xx|
+foo\ x\x|foo|\|x|\|x|
+foo\ x\x\""|foo|\|x|\|x|\|""|
+"foo\ x\x"|"foo\ x\x"|
+"foo\ x\x\\"|"foo\ x\x\\"|
+"foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"|
+"foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"|
+"foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"|
+"foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'|
+'foo\ bar'|'foo\ bar'|
+'foo\\ bar'|'foo\\ bar'|
+foo\ bar|foo|\|bar|
+foo#bar\nbaz|foobaz|
+:-) ;-)|:|-|)|;|-|)|
+áéíóú|á|é|í|ó|ú|
+"""
+
+posix_data = r"""x|x|
+foo bar|foo|bar|
+ foo bar|foo|bar|
+ foo bar |foo|bar|
+foo bar bla fasel|foo|bar|bla|fasel|
+x y z xxxx|x|y|z|xxxx|
+\x bar|x|bar|
+\ x bar| x|bar|
+\ bar| bar|
+foo \x bar|foo|x|bar|
+foo \ x bar|foo| x|bar|
+foo \ bar|foo| bar|
+foo "bar" bla|foo|bar|bla|
+"foo" "bar" "bla"|foo|bar|bla|
+"foo" bar "bla"|foo|bar|bla|
+"foo" bar bla|foo|bar|bla|
+foo 'bar' bla|foo|bar|bla|
+'foo' 'bar' 'bla'|foo|bar|bla|
+'foo' bar 'bla'|foo|bar|bla|
+'foo' bar bla|foo|bar|bla|
+blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
+blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
+""||
+''||
+foo "" bar|foo||bar|
+foo '' bar|foo||bar|
+foo "" "" "" bar|foo||||bar|
+foo '' '' '' bar|foo||||bar|
+\"|"|
+"\""|"|
+"foo\ bar"|foo\ bar|
+"foo\\ bar"|foo\ bar|
+"foo\\ bar\""|foo\ bar"|
+"foo\\" bar\"|foo\|bar"|
+"foo\\ bar\" dfadf"|foo\ bar" dfadf|
+"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
+"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
+"foo\x bar\" dfadf"|foo\x bar" dfadf|
+\'|'|
+'foo\ bar'|foo\ bar|
+'foo\\ bar'|foo\\ bar|
+"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
+\"foo|"foo|
+\"foo\x|"foox|
+"foo\x"|foo\x|
+"foo\ "|foo\ |
+foo\ xx|foo xx|
+foo\ x\x|foo xx|
+foo\ x\x\"|foo xx"|
+"foo\ x\x"|foo\ x\x|
+"foo\ x\x\\"|foo\ x\x\|
+"foo\ x\x\\""foobar"|foo\ x\x\foobar|
+"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
+"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
+"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
+"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
+'foo\ bar'|foo\ bar|
+'foo\\ bar'|foo\\ bar|
+foo\ bar|foo bar|
+foo#bar\nbaz|foo|baz|
+:-) ;-)|:-)|;-)|
+áéíóú|áéíóú|
+"""
+
+class ShlexTest(unittest.TestCase):
+ def setUp(self):
+ self.data = [x.split("|")[:-1]
+ for x in data.splitlines()]
+ self.posix_data = [x.split("|")[:-1]
+ for x in posix_data.splitlines()]
+ for item in self.data:
+ item[0] = item[0].replace(r"\n", "\n")
+ for item in self.posix_data:
+ item[0] = item[0].replace(r"\n", "\n")
+
+ def splitTest(self, data, posix, spaces):
+ for i in range(len(data)):
+ l = shlex.split(data[i][0], posix=posix, spaces=spaces)
+ self.assertEqual(l, data[i][1:],
+ "%s: %s != %s" %
+ (data[i][0], l, data[i][1:]))
+
+ def oldSplit(self, s):
+ ret = []
+ lex = shlex.shlex(StringIO(s))
+ tok = lex.get_token()
+ while tok:
+ ret.append(tok)
+ tok = lex.get_token()
+ return ret
+
+ def testSplit(self):
+ """Test data splitting with non-posix parser"""
+ self.splitTest(self.data, posix=0, spaces=0)
+
+ def testSplitPosix(self):
+ """Test data splitting with posix parser"""
+ self.splitTest(self.posix_data, posix=1, spaces=1)
+
+ def testCompat(self):
+ """Test compatibility interface"""
+ for i in range(len(self.data)):
+ l = self.oldSplit(self.data[i][0])
+ self.assertEqual(l, self.data[i][1:],
+ "%s: %s != %s" %
+ (self.data[i][0], l, self.data[i][1:]))
+
+# Allow this test to be used with old shlex.py
+if not getattr(shlex, "split", None):
+ for methname in dir(ShlexTest):
+ if methname.startswith("test") and methname != "testCompat":
+ delattr(ShlexTest, methname)
+
+if __name__ == "__main__":
+ unittest.main()