mirror of
https://github.com/Mikaela/Limnoria.git
synced 2024-11-27 13:19:24 +01:00
Added memoization optimization to tokenize function.
This commit is contained in:
parent
68fd496516
commit
eb31db6277
@ -41,6 +41,7 @@ how to use them.
|
|||||||
import fix
|
import fix
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
import copy
|
||||||
import sets
|
import sets
|
||||||
import time
|
import time
|
||||||
import shlex
|
import shlex
|
||||||
@ -281,22 +282,8 @@ def tokenize(s):
|
|||||||
_lastTokenized = None
|
_lastTokenized = None
|
||||||
_lastTokenizedResult = None
|
_lastTokenizedResult = None
|
||||||
raise SyntaxError, str(e)
|
raise SyntaxError, str(e)
|
||||||
debug.msg('tokenize took %s seconds.' % (time.time() - start), 'verbose')
|
#debug.msg('tokenize took %s seconds.' % (time.time() - start), 'verbose')
|
||||||
return _lastTokenizeResult
|
return copy.deepcopy(_lastTokenizeResult)
|
||||||
|
|
||||||
## def tokenize(s):
|
|
||||||
## """A utility function to create a Tokenizer and tokenize a string."""
|
|
||||||
## start = time.time()
|
|
||||||
## try:
|
|
||||||
## if conf.enablePipeSyntax:
|
|
||||||
## tokens = '|'
|
|
||||||
## else:
|
|
||||||
## tokens = ''
|
|
||||||
## args = Tokenizer(tokens).tokenize(s)
|
|
||||||
## except ValueError, e:
|
|
||||||
## raise SyntaxError, str(e)
|
|
||||||
## #debug.msg('tokenize took %s seconds.' % (time.time() - start), 'verbose')
|
|
||||||
## return args
|
|
||||||
|
|
||||||
def getCommands(tokens):
|
def getCommands(tokens):
|
||||||
"""Given tokens as output by tokenize, returns the command names."""
|
"""Given tokens as output by tokenize, returns the command names."""
|
||||||
|
Loading…
Reference in New Issue
Block a user