mirror of
https://github.com/Mikaela/Limnoria.git
synced 2025-01-12 13:12:35 +01:00
New test and fix for the associated bug.
This commit is contained in:
parent
965d8e0b5f
commit
08244ff36e
@ -155,16 +155,15 @@ class Tokenizer:
|
|||||||
# These are the characters valid in a token. Everything printable except
|
# These are the characters valid in a token. Everything printable except
|
||||||
# double-quote, left-bracket, and right-bracket.
|
# double-quote, left-bracket, and right-bracket.
|
||||||
validChars = string.ascii.translate(string.ascii, '\x00\r\n \t"[]')
|
validChars = string.ascii.translate(string.ascii, '\x00\r\n \t"[]')
|
||||||
|
quotes = '"'
|
||||||
def __init__(self, tokens=''):
|
def __init__(self, tokens=''):
|
||||||
# Add a '|' to tokens to have the pipe syntax.
|
# Add a '|' to tokens to have the pipe syntax.
|
||||||
self.validChars = self.validChars.translate(string.ascii, tokens)
|
self.validChars = self.validChars.translate(string.ascii, tokens)
|
||||||
|
|
||||||
def _handleToken(self, token):
|
def _handleToken(self, token):
|
||||||
while token and token[0] == '"' and token[-1] == token[0]:
|
if token[0] == token[-1] and token[0] in self.quotes:
|
||||||
if len(token) > 1:
|
token = token[1:-1]
|
||||||
token = token[1:-1].decode('string_escape') # 2.3+
|
token = token.decode('string-escape')
|
||||||
else:
|
|
||||||
break
|
|
||||||
return token
|
return token
|
||||||
|
|
||||||
def _insideBrackets(self, lexer):
|
def _insideBrackets(self, lexer):
|
||||||
@ -185,7 +184,7 @@ class Tokenizer:
|
|||||||
"""Tokenizes a string according to supybot's nested argument format."""
|
"""Tokenizes a string according to supybot's nested argument format."""
|
||||||
lexer = shlex.shlex(StringIO(s))
|
lexer = shlex.shlex(StringIO(s))
|
||||||
lexer.commenters = ''
|
lexer.commenters = ''
|
||||||
lexer.quotes = '"'
|
lexer.quotes = self.quotes
|
||||||
lexer.wordchars = self.validChars
|
lexer.wordchars = self.validChars
|
||||||
args = []
|
args = []
|
||||||
ends = []
|
ends = []
|
||||||
|
@ -52,6 +52,9 @@ class TokenizerTestCase(unittest.TestCase):
|
|||||||
def testDQsWithBackslash(self):
|
def testDQsWithBackslash(self):
|
||||||
self.assertEqual(tokenize('"\\\\"'), ["\\"])
|
self.assertEqual(tokenize('"\\\\"'), ["\\"])
|
||||||
|
|
||||||
|
def testDoubleQuotes(self):
|
||||||
|
self.assertEqual(tokenize('"\\"foo\\""'), ['"foo"'])
|
||||||
|
|
||||||
def testSingleWord(self):
|
def testSingleWord(self):
|
||||||
self.assertEqual(tokenize('foo'), ['foo'])
|
self.assertEqual(tokenize('foo'), ['foo'])
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user