コード例 #1
0
def _expandword(parser, tokenword):
    if parser._expansionlimit == -1:
        # we enter this branch in the following conditions:
        # - currently parsing a substitution as a result of an expansion
        # - the previous expansion had limit == 0
        #
        # this means that this node is a descendant of a substitution in an
        # unexpanded word and will be filtered in the limit == 0 condition below
        #
        # (the reason we even expand when limit == 0 is to get quote removal)
        node = ast.node(kind='word',
                        word=tokenword,
                        pos=(tokenword.lexpos, tokenword.endlexpos),
                        parts=[])
        return node
    else:
        quoted = bool(tokenword.flags & flags.word.QUOTED)
        doublequoted = quoted and tokenword.value[0] == '"'

        # TODO set qheredocument
        parts, expandedword = subst._expandwordinternal(
            parser, tokenword, 0, doublequoted, 0, 0)

        # limit reached, don't include substitutions (still expanded to get
        # quote removal though)
        if parser._expansionlimit == 0:
            parts = [node for node in parts if 'substitution' not in node.kind]

        node = ast.node(kind='word',
                        word=expandedword,
                        pos=(tokenword.lexpos, tokenword.endlexpos),
                        parts=parts)
        return node
コード例 #2
0
def _expandword(parser, tokenword):
    if parser._expansionlimit == -1:
        # we enter this branch in the following conditions:
        # - currently parsing a substitution as a result of an expansion
        # - the previous expansion had limit == 0
        #
        # this means that this node is a descendant of a substitution in an
        # unexpanded word and will be filtered in the limit == 0 condition below
        #
        # (the reason we even expand when limit == 0 is to get quote removal)
        node = ast.node(kind='word', word=tokenword,
                        pos=(tokenword.lexpos, tokenword.endlexpos), parts=[])
        return node
    else:
        quoted = bool(tokenword.flags & flags.word.QUOTED)
        doublequoted = quoted and tokenword.value[0] == '"'

        # TODO set qheredocument
        parts, expandedword = subst._expandwordinternal(parser,
                                                        tokenword, 0,
                                                        doublequoted, 0, 0)

        # limit reached, don't include substitutions (still expanded to get
        # quote removal though)
        if parser._expansionlimit == 0:
            parts = [node for node in parts if 'substitution' not in node.kind]

        node = ast.node(kind='word', word=expandedword,
                        pos=(tokenword.lexpos, tokenword.endlexpos), parts=parts)
        return node
コード例 #3
0
def split(s):
    '''a utility function that mimics shlex.split but handles more
    complex shell constructs such as command substitutions inside words

    >>> list(split('a b"c"\\'d\\''))
    ['a', 'bcd']
    >>> list(split('a "b $(c)" $(d) \\'$(e)\\''))
    ['a', 'b $(c)', '$(d)', '$(e)']
    >>> list(split('a b\\n'))
    ['a', 'b', '\\n']
    '''
    p = _parser(s)
    for t in p.tok:
        if t.ttype == tokenizer.tokentype.WORD:
            quoted = bool(t.flags & flags.word.QUOTED)
            doublequoted = quoted and t.value[0] == '"'
            parts, expandedword = subst._expandwordinternal(
                p, t, 0, doublequoted, 0, 0)
            yield expandedword
        else:
            yield s[t.lexpos:t.endlexpos]
コード例 #4
0
def split(s):
    '''a utility function that mimics shlex.split but handles more
    complex shell constructs such as command substitutions inside words

    >>> list(split('a b"c"\\'d\\''))
    ['a', 'bcd']
    >>> list(split('a "b $(c)" $(d) \\'$(e)\\''))
    ['a', 'b $(c)', '$(d)', '$(e)']
    >>> list(split('a b\\n'))
    ['a', 'b', '\\n']
    '''
    p = _parser(s)
    for t in p.tok:
        if t.ttype == tokenizer.tokentype.WORD:
            quoted = bool(t.flags & flags.word.QUOTED)
            doublequoted = quoted and t.value[0] == '"'
            parts, expandedword = subst._expandwordinternal(p, t, 0,
                                                            doublequoted, 0, 0)
            yield expandedword
        else:
            yield s[t.lexpos:t.endlexpos]