def on_tokenize(self, shell, tokens, origin): ret = [] for token in tokens: if not isinstance(token, StringToken) or self.prefix not in token.text: ret.append(token) continue for subt in get_subtokens(token, self.prefix): if isinstance(subt, StringToken): ret.append(subt) continue expanded = self.expand(shell, subt) if token.quote: ret.append(StringToken(subt.index, expanded, token.quote)) else: ws = False for part in expanded.split(): if ws: ret.append(WhitespaceToken(subt.index)) else: ws = True ret.append(StringToken(subt.index, part)) return ret
def test_get_subtokens_dbl_escape(self): assert (list( get_subtokens(StringToken(0, 'hello, \\\\$name'), '$', self.features)) == [ StringToken(0, 'hello, \\\\'), VariableToken(0, '$', 'name') ])
def test_get_subtokens_var_escape_next(self): assert (list( get_subtokens(StringToken(0, 'hello $name\\n'), '$', self.features)) == [ StringToken(0, 'hello '), VariableToken(0, '$', 'name'), StringToken(0, '\\n') ])
def test_get_subtokens_repeat(self): assert (list( get_subtokens(StringToken(0, 'hello $name$name'), '$', self.features)) == [ StringToken(0, 'hello '), VariableToken(0, '$', 'name'), VariableToken(0, '$', 'name') ])
def test_get_subtokens_multi(self): assert (list( get_subtokens(StringToken(0, '$name, welcome to $city'), '$', self.features)) == [ VariableToken(0, '$', 'name'), StringToken(1, ', welcome to '), VariableToken(0, '$', 'city') ])
def test_get_subtokens_single(self): assert (list( get_subtokens(StringToken(0, 'hello, $name, welcome'), '$', self.features)) == [ StringToken(0, 'hello, '), VariableToken(0, '$', 'name'), StringToken(1, ', welcome') ])
def on_tokenize(self, shell, tokens, origin): ret = [] for token in tokens: if (not isinstance(token, StringToken) or self.prefix not in token.text): ret.append(token) continue for subt in get_subtokens(token, self.prefix): if isinstance(subt, StringToken): ret.append(subt) continue expanded = self.expand(shell, subt) ''' if token.quote: ret.append(StringToken(subt.index, expanded, token.quote)) else: ws = False for part in shell.parser.tokenize(expanded): ret.append(part) ''' ret.append(StringToken(subt.index, expanded, '"')) return ret
def get_subtokens(token, prefix, features): escape = False index = token.index subt = None var = None for c in token.text: if escape: escape = False if c != prefix: subt.text += '\\' subt.text += c elif var: rc = var.add_char(c) if rc == TokenEnd: yield var var = None if c == prefix: var = VariableToken(index, c) else: if c == '\\': escape = True c = '' subt = StringToken(index, c, token.quote, features=features) elif c == prefix: if subt: yield subt subt = None var = VariableToken(index, c) else: if c == '\\': escape = True c = '' if not subt: subt = StringToken(index, c, token.quote, features=features) else: subt.text += c index += 1 if subt: yield subt elif var: yield var
def preprocess_single(self, raw, origin): tokens = self.on_tokenize([StringToken(0, raw, quote='"')], origin) if tokens: parser = StatementParser(self.features) parser.clean_escapes(tokens) ret = '' for token in tokens: ret += token.text return ret return ''
def preprocess_single(self, raw, origin): tokens = [StringToken(0, raw, quote='"')] for pp in self.preprocessors: tokens = pp.on_tokenize(self, tokens, origin) if not tokens: break if tokens: self.parser.clean_escapes(tokens) ret = '' for token in tokens: ret += token.text return ret return ''
def on_tokenize(self, shell, tokens, origin): ret = [] for token in tokens: if (not isinstance(token, StringToken) or self.prefix not in token.text): ret.append(token) continue for subt in get_subtokens(token, self.prefix, shell.features): if isinstance(subt, StringToken): ret.append(subt) continue expanded = self.expand(shell, subt) ret.append(StringToken(subt.index, expanded, '"')) return ret
def test_on_tokenize(self): self.shell.ctx.vars['token'] = 'first_token' self.shell.ctx.vars['message'] = 'second_message' assert self.plugin.on_tokenize(self.shell, [ StringToken(0, 'first $token'), WhitespaceToken(1), StringToken(2, 'second $message') ], 'input') == [ StringToken(0, 'first '), StringToken(1, 'first_token', quote='"'), WhitespaceToken(2), StringToken(3, 'second '), StringToken(4, 'second_message', quote='"') ]
def test_get_subtokens_none(self): assert list( get_subtokens(StringToken(0, 'hello, adam'), '$', self.features)) == [StringToken(0, 'hello, adam')]