Example #1
0
    def __init__(self,
                 s,
                 strictmode=True,
                 expansionlimit=None,
                 tokenizerargs=None):
        # 判断 expansionlimit为空值
        # Check if the number expansionlimit is an integer:
        assert expansionlimit is None or isinstance(expansionlimit, int)

        # Save the context which we need to parse
        self.s = s
        self._strictmode = strictmode
        self._expansionlimit = expansionlimit
        # If there is note tokenizerargs, then tokenizerargs = None
        if tokenizerargs is None:
            tokenizerargs = {}
        # pop() is an inbuilt function in Python
        # that removes and returns last value from the list or the given index value.
        self.parserstate = tokenizerargs.pop('parserstate',
                                             state.parserstate())

        self.tok = tokenizer.tokenizer(s,
                                       parserstate=self.parserstate,
                                       strictmode=strictmode,
                                       **tokenizerargs)

        self.redirstack = self.tok.redirstack
Example #2
0
    def __init__(self, s, strictmode=True, expansionlimit=None, tokenizerargs=None):
        assert expansionlimit is None or isinstance(expansionlimit, int)

        self.s = s
        self._strictmode = strictmode
        self._expansionlimit = expansionlimit

        if tokenizerargs is None:
            tokenizerargs = {}
        self.parserstate = tokenizerargs.pop('parserstate', state.parserstate())

        self.tok = tokenizer.tokenizer(s,
                                       parserstate=self.parserstate,
                                       strictmode=strictmode,
                                       **tokenizerargs)

        self.redirstack = self.tok.redirstack
Example #3
0
    def __init__(self, s, strictmode=True, expansionlimit=None, tokenizerargs=None):
        assert expansionlimit is None or isinstance(expansionlimit, int)

        self.s = s
        self._strictmode = strictmode
        self._expansionlimit = expansionlimit

        if tokenizerargs is None:
            tokenizerargs = {}
        self.parserstate = tokenizerargs.pop('parserstate', state.parserstate())

        self.tok = tokenizer.tokenizer(s,
                                       parserstate=self.parserstate,
                                       strictmode=strictmode,
                                       **tokenizerargs)

        self.redirstack = self.tok.redirstack
Example #4
0
def split(s):
    '''a utility function that mimics shlex.split but handles more
    complex shell constructs such as command substitutions inside words

    >>> list(split('a b"c"\\'d\\''))
    ['a', 'bcd']
    >>> list(split('a "b $(c)" $(d) \\'$(e)\\''))
    ['a', 'b $(c)', '$(d)', '$(e)']
    >>> list(split('a b\\n'))
    ['a', 'b', '\\n']
    '''
    from bashlex import subst

    tok = tokenizer(s, state.parserstate())
    for t in tok:
        if t.ttype == tokentype.WORD:
            quoted = bool(t.flags & flags.word.QUOTED)
            doublequoted = quoted and t.value[0] == '"'
            parts, expandedword = subst._expandwordinternal(tok, t, 0,
                                                            doublequoted, 0, 0)
            yield expandedword
        else:
            yield s[t.lexpos:t.endlexpos]
Example #5
0
def split(s):
    '''a utility function that mimics shlex.split but handles more
    complex shell constructs such as command substitutions inside words

    >>> list(split('a b"c"\\'d\\''))
    ['a', 'bcd']
    >>> list(split('a "b $(c)" $(d) \\'$(e)\\''))
    ['a', 'b $(c)', '$(d)', '$(e)']
    >>> list(split('a b\\n'))
    ['a', 'b', '\\n']
    '''
    from bashlex import subst

    tok = tokenizer(s, state.parserstate())
    for t in tok:
        if t.ttype == tokentype.WORD:
            quoted = bool(t.flags & flags.word.QUOTED)
            doublequoted = quoted and t.value[0] == '"'
            parts, expandedword = subst._expandwordinternal(
                tok, t, 0, doublequoted, 0, 0)
            yield expandedword
        else:
            yield s[t.lexpos:t.endlexpos]
Example #6
0
import unittest

from bashlex import tokenizer, state, flags, errors

from bashlex.tokenizer import token as t
from bashlex.tokenizer import tokentype as tt

tokenize = lambda s: list(tokenizer.tokenizer(s, state.parserstate()))

hasdollarset = set([flags.word.HASDOLLAR])


class test_tokenizer(unittest.TestCase):
    def setUp(self):
        if not hasattr(self, 'assertRaisesRegex'):
            self.assertRaisesRegex = self.assertRaisesRegexp

    def assertTokens(self, s, tokens):
        result = tokenize(s)

        # pop the last token if it's a new line since that gets appended
        # to the input string by default and we don't really care about
        # that here
        if result[-1].value == '\n':
            result.pop()

        self.assertEqual(result, tokens)

        for t in tokens:
            self.assertEqual(str(t.value), s[t.lexpos:t.endlexpos])
Example #7
0
import unittest

from bashlex import tokenizer, state, flags, errors

from bashlex.tokenizer import token as t
from bashlex.tokenizer import tokentype as tt

tokenize = lambda s: list(tokenizer.tokenizer(s, state.parserstate()))

hasdollarset = set([flags.word.HASDOLLAR])

class test_tokenizer(unittest.TestCase):
    def assertTokens(self, s, tokens):
        result = tokenize(s)

        # pop the last token if it's a new line since that gets appended
        # to the input string by default and we don't really care about
        # that here
        if result[-1].value == '\n':
            result.pop()

        self.assertEquals(result, tokens)

        for t in tokens:
            self.assertEquals(str(t.value), s[t.lexpos:t.endlexpos])

    def test_empty_string(self):
        self.assertEquals(len(tokenize('')), 0)

    def test_simple(self):
        s = 'a b'