예제 #1
0
def p_simple_list(p):
    '''simple_list : simple_list1
                   | simple_list1 AMPERSAND
                   | simple_list1 SEMICOLON'''
    tok = p.lexer
    heredoc.gatherheredocuments(tok)

    if len(p) == 3 or len(p[1]) > 1:
        parts = p[1]
        if len(p) == 3:
            parts.append(
                ast.node(kind='operator',
                         op=p[2],
                         pos=p.lexspan(2),
                         lineno=p.lineno(2)))
        p[0] = ast.node(kind='list',
                        parts=parts,
                        pos=_partsspan(parts),
                        lineno=parts[-1].lineno)
    else:
        assert len(p[1]) == 1
        p[0] = p[1][0]

    if (len(p) == 2 and p.lexer._parserstate & flags.parser.CMDSUBST
            and p.lexer._current_token.nopos() == p.lexer._shell_eof_token):
        # accept the input
        p.accept()
예제 #2
0
def p_simple_list(p):
    '''simple_list : simple_list1
                   | simple_list1 AMPERSAND
                   | simple_list1 SEMICOLON'''
    tok = p.lexer
    heredoc.gatherheredocuments(tok)

    if len(p) == 3 or len(p[1]) > 1:
        parts = p[1]
        if len(p) == 3:
            parts.append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2)))
        p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts))
    else:
        assert len(p[1]) == 1
        p[0] = p[1][0]

    if (len(p) == 2 and p.lexer._parserstate & flags.parser.CMDSUBST and
            p.lexer._current_token.nopos() == p.lexer._shell_eof_token):
        # accept the input
        p.accept()
예제 #3
0
파일: tokenizer.py 프로젝트: idank/bashlex
    def _readtoken(self):
        character = None
        peek_char = None

        if self._token_to_read is not None:
            t = self._token_to_read
            self._token_to_read = None
            return t

        # bashlex/parse.y L2989 COND_COMMAND
        character = self._getc(True)
        while character is not None and _shellblank(character):
            character = self._getc(True)

        if character is None:
            return eoftoken

        if character == '#':
            self._discard_until('\n')
            self._getc(False)
            character = '\n'

        self._recordpos(1)

        if character == '\n':
            # bashlex/parse.y L3034 ALIAS
            heredoc.gatherheredocuments(self)

            self._parserstate.discard(parserflags.ASSIGNOK)
            return tokentype(character)

        if self._parserstate & parserflags.REGEXP:
            return self._readtokenword(character)

        if _shellmeta(character) and not (self._parserstate
                                          & parserflags.DBLPAREN):
            self._parserstate.discard(parserflags.ASSIGNOK)
            peek_char = self._getc(True)

            both = character
            if peek_char:
                both += peek_char
            if character == peek_char:
                if character == '<':
                    peek_char = self._getc()
                    if peek_char == '-':
                        return tokentype.LESS_LESS_MINUS
                    elif peek_char == '<':
                        return tokentype.LESS_LESS_LESS
                    else:
                        self._ungetc(peek_char)
                        return tokentype.LESS_LESS
                elif character == '>':
                    return tokentype.GREATER_GREATER
                elif character == ';':
                    self._parserstate |= parserflags.CASEPAT
                    # bashlex/parse.y L3085 ALIAS
                    peek_char = self._getc()
                    if peek_char == '&':
                        return tokentype.SEMI_SEMI_AND
                    else:
                        self._ungetc(peek_char)
                        return tokentype.SEMI_SEMI
                elif character == '&':
                    return tokentype.AND_AND
                elif character == '|':
                    return tokentype.OR_OR
                # bashlex/parse.y L3105
            elif both == '<&':
                return tokentype.LESS_AND
            elif both == '>&':
                return tokentype.GREATER_AND
            elif both == '<>':
                return tokentype.LESS_GREATER
            elif both == '>|':
                return tokentype.GREATER_BAR
            elif both == '&>':
                peek_char = self._getc()
                if peek_char == '>':
                    return tokentype.AND_GREATER_GREATER
                else:
                    self._ungetc(peek_char)
                    return tokentype.AND_GREATER
            elif both == '|&':
                return tokentype.BAR_AND
            elif both == ';&':
                return tokentype.SEMI_AND

            self._ungetc(peek_char)
            if character == ')' and self._last_read_token.value == '(' and self._token_before_that.ttype == tokentype.WORD:
                self._parserstate.add(parserflags.ALLOWOPNBRC)
                # bashlex/parse.y L3155

            if character == '(' and not self._parserstate & parserflags.CASEPAT:
                self._parserstate.add(parserflags.SUBSHELL)
            elif self._parserstate & parserflags.CASEPAT and character == ')':
                self._parserstate.discard(parserflags.CASEPAT)
            elif self._parserstate & parserflags.SUBSHELL and character == ')':
                self._parserstate.discard(parserflags.SUBSHELL)

            if character not in '<>' or peek_char != '(':
                return tokentype(character)

        if character == '-' and (
                self._last_read_token.ttype == tokentype.LESS_AND
                or self._last_read_token.ttype == tokentype.GREATER_AND):
            return tokentype(character)

        return self._readtokenword(character)
예제 #4
0
    def _readtoken(self):
        character = None
        peek_char = None

        if self._token_to_read is not None:
            t = self._token_to_read
            self._token_to_read = None
            return t

        # 2989 COND_COMMAND
        character = self._getc(True)
        while character is not None and _shellblank(character):
            character = self._getc(True)

        if character is None:
            return eoftoken

        if character == '#':
            self._discard_until('\n')
            self._getc(False)
            character = '\n'

        self._recordpos(1)

        if character == '\n':
            # XXX 3034 ALIAS
            heredoc.gatherheredocuments(self)

            self._parserstate.discard(parserflags.ASSIGNOK)
            return tokentype(character)

        if self._parserstate & parserflags.REGEXP:
            return self._readtokenword(character)

        if _shellmeta(character) and not (self._parserstate & parserflags.DBLPAREN):
            self._parserstate.discard(parserflags.ASSIGNOK)
            peek_char = self._getc(True)

            both = character
            if peek_char:
                both += peek_char
            if character == peek_char:
                if character == '<':
                    peek_char = self._getc()
                    if peek_char == '-':
                        return tokentype.LESS_LESS_MINUS
                    elif peek_char == '<':
                        return tokentype.LESS_LESS_LESS
                    else:
                        self._ungetc(peek_char)
                        return tokentype.LESS_LESS
                elif character == '>':
                    return tokentype.GREATER_GREATER
                elif character == ';':
                    self._parserstate |= parserflags.CASEPAT
                    # 3085 ALIAS
                    peek_char = self._getc()
                    if peek_char == '&':
                        return tokentype.SEMI_SEMI_AND
                    else:
                        self._ungetc(peek_char)
                        return tokentype.SEMI_SEMI
                elif character == '&':
                    return tokentype.AND_AND
                elif character == '|':
                    return tokentype.OR_OR
                # XXX 3105
            elif both == '<&':
                return tokentype.LESS_AND
            elif both == '>&':
                return tokentype.GREATER_AND
            elif both == '<>':
                return tokentype.LESS_GREATER
            elif both == '>|':
                return tokentype.GREATER_BAR
            elif both == '&>':
                peek_char = self._getc()
                if peek_char == '>':
                    return tokentype.AND_GREATER_GREATER
                else:
                    self._ungetc(peek_char)
                    return tokentype.AND_GREATER
            elif both == '|&':
                return tokentype.BAR_AND
            elif both == ';&':
                return tokentype.SEMI_AND

            self._ungetc(peek_char)
            if character == ')' and self._last_read_token.value == '(' and self._token_before_that.ttype == tokentype.WORD:
                self._parserstate.add(parserflags.ALLOWOPNBRC)
                # XXX 3155

            if character == '(' and not self._parserstate & parserflags.CASEPAT:
                self._parserstate.add(parserflags.SUBSHELL)
            elif self._parserstate & parserflags.CASEPAT and character == ')':
                self._parserstate.discard(parserflags.CASEPAT)
            elif self._parserstate & parserflags.SUBSHELL and character == ')':
                self._parserstate.discard(parserflags.SUBSHELL)

            if character not in '<>' or peek_char != '(':
                return tokentype(character)

        if character == '-' and (self._last_read_token.ttype == tokentype.LESS_AND or self._last_read_token.ttype == tokentype.GREATER_AND):
            return tokentype(character)

        return self._readtokenword(character)
예제 #5
0
파일: tokenizer.py 프로젝트: idank/bashlex
    def _readtoken(self):
        character = None
        peek_char = None

        if self._token_to_read is not None:
            t = self._token_to_read
            self._token_to_read = None
            return t

        # bashlex/parse.y L2989 COND_COMMAND
        character = self._getc(True)
        while character is not None and _shellblank(character):
            character = self._getc(True)

        if character is None:
            return eoftoken

        if character == "#":
            self._discard_until("\n")
            self._getc(False)
            character = "\n"

        self._recordpos(1)

        if character == "\n":
            # bashlex/parse.y L3034 ALIAS
            heredoc.gatherheredocuments(self)

            self._parserstate.discard(parserflags.ASSIGNOK)
            return tokentype(character)

        if self._parserstate & parserflags.REGEXP:
            return self._readtokenword(character)

        if _shellmeta(character) and not (self._parserstate & parserflags.DBLPAREN):
            self._parserstate.discard(parserflags.ASSIGNOK)
            peek_char = self._getc(True)

            both = character
            if peek_char:
                both += peek_char
            if character == peek_char:
                if character == "<":
                    peek_char = self._getc()
                    if peek_char == "-":
                        return tokentype.LESS_LESS_MINUS
                    elif peek_char == "<":
                        return tokentype.LESS_LESS_LESS
                    else:
                        self._ungetc(peek_char)
                        return tokentype.LESS_LESS
                elif character == ">":
                    return tokentype.GREATER_GREATER
                elif character == ";":
                    self._parserstate |= parserflags.CASEPAT
                    # bashlex/parse.y L3085 ALIAS
                    peek_char = self._getc()
                    if peek_char == "&":
                        return tokentype.SEMI_SEMI_AND
                    else:
                        self._ungetc(peek_char)
                        return tokentype.SEMI_SEMI
                elif character == "&":
                    return tokentype.AND_AND
                elif character == "|":
                    return tokentype.OR_OR
                # bashlex/parse.y L3105
            elif both == "<&":
                return tokentype.LESS_AND
            elif both == ">&":
                return tokentype.GREATER_AND
            elif both == "<>":
                return tokentype.LESS_GREATER
            elif both == ">|":
                return tokentype.GREATER_BAR
            elif both == "&>":
                peek_char = self._getc()
                if peek_char == ">":
                    return tokentype.AND_GREATER_GREATER
                else:
                    self._ungetc(peek_char)
                    return tokentype.AND_GREATER
            elif both == "|&":
                return tokentype.BAR_AND
            elif both == ";&":
                return tokentype.SEMI_AND

            self._ungetc(peek_char)
            if (
                character == ")"
                and self._last_read_token.value == "("
                and self._token_before_that.ttype == tokentype.WORD
            ):
                self._parserstate.add(parserflags.ALLOWOPNBRC)
                # bashlex/parse.y L3155

            if character == "(" and not self._parserstate & parserflags.CASEPAT:
                self._parserstate.add(parserflags.SUBSHELL)
            elif self._parserstate & parserflags.CASEPAT and character == ")":
                self._parserstate.discard(parserflags.CASEPAT)
            elif self._parserstate & parserflags.SUBSHELL and character == ")":
                self._parserstate.discard(parserflags.SUBSHELL)

            if character not in "<>" or peek_char != "(":
                return tokentype(character)

        if character == "-" and (
            self._last_read_token.ttype == tokentype.LESS_AND or self._last_read_token.ttype == tokentype.GREATER_AND
        ):
            return tokentype(character)

        return self._readtokenword(character)