コード例 #1
0
ファイル: bash.py プロジェクト: lambada/toltec
def _parse_indexed(lexer: shlex.shlex) -> IndexedArray:
    """Parse an indexed Bash array."""
    assert lexer.get_token() == "("
    result: List[Optional[str]] = []

    while True:
        token = lexer.get_token()
        assert token != lexer.eof

        if token == ")":
            break

        assert token == "["
        index = int(lexer.get_token())
        assert lexer.get_token() == "]"
        assert lexer.get_token() == "="
        value = _parse_string(lexer.get_token())

        # Grow the result array so that the index exists
        if index >= len(result):
            result.extend([None] * (index - len(result) + 1))

        result[index] = value

    return result
コード例 #2
0
def tokens_until(lex: shlex, until_tok: Union[str,
                                              Iterable[str]]) -> Iterator[str]:
    until_toks = [
        until_tok,
    ] if isinstance(until_tok, str) else list(until_tok)
    while True:
        tok = lex.get_token()
        if tok == lex.eof:
            return
        elif tok in until_toks:
            lex.push_token(tok)
            return
        else:
            yield tok
コード例 #3
0
ファイル: bash.py プロジェクト: lambada/toltec
def _parse_func(lexer: shlex.shlex) -> Tuple[int, int]:
    """Find the starting and end bounds of a function declaration."""
    assert lexer.get_token() == "{"
    brace_depth = 1

    start_byte = lexer.instream.tell()

    while brace_depth > 0:
        token = lexer.get_token()
        assert token != lexer.eof

        if token == "{":
            brace_depth += 1
        elif token == "}":
            brace_depth -= 1

    end_byte = lexer.instream.tell() - 1
    return start_byte, end_byte
コード例 #4
0
ファイル: bash.py プロジェクト: lambada/toltec
def _parse_assoc(lexer: shlex.shlex) -> AssociativeArray:
    """Parse an associative Bash array."""
    assert lexer.get_token() == "("
    result = {}

    while True:
        token = lexer.get_token()
        assert token != lexer.eof

        if token == ")":
            break

        assert token == "["
        key = lexer.get_token()
        assert lexer.get_token() == "]"
        assert lexer.get_token() == "="
        value = _parse_string(lexer.get_token())

        result[key] = value

    return result
コード例 #5
0
ファイル: bash.py プロジェクト: lambada/toltec
def _parse_var(lexer: shlex.shlex) -> Tuple[str, Optional[Any]]:
    """Parse a variable declaration."""
    flags_token = lexer.get_token()

    if flags_token != "--":
        var_flags = set(flags_token[1:])
    else:
        var_flags = set()

    var_name = lexer.get_token()
    var_value: Optional[Any] = None
    lookahead = lexer.get_token()

    if lookahead == "=":
        if "a" in var_flags:
            var_value = _parse_indexed(lexer)
        elif "A" in var_flags:
            var_value = _parse_assoc(lexer)
        else:
            var_value = _parse_string(lexer.get_token())
    else:
        lexer.push_token(lookahead)

    return var_name, var_value
コード例 #6
0
def amend_shlex(
    lex: shlex,
    add_commenters: Optional[str] = None,
    rm_commenters: Optional[str] = None,
    commenters: Optional[str] = None,
    add_wordchars: Optional[str] = None,
    rm_wordchars: Optional[str] = None,
    wordchars: Optional[str] = None,
    add_whitespace: Optional[str] = None,
    rm_whitespace: Optional[str] = None,
    whitespace: Optional[str] = None,
    add_escape: Optional[str] = None,
    rm_escape: Optional[str] = None,
    escape: Optional[str] = None,
    add_quotes: Optional[str] = None,
    rm_quotes: Optional[str] = None,
    quotes: Optional[str] = None,
    add_escapedquotes: Optional[str] = None,
    rm_escapedquotes: Optional[str] = None,
    escapedquotes: Optional[str] = None,
) -> Iterator[shlex]:
    """Context manager temporarily changing shlex control variables.
	"""
    orig_commenters = lex.commenters
    orig_wordchars = lex.wordchars
    orig_whitespace = lex.whitespace
    orig_escape = lex.escape
    orig_quotes = lex.quotes
    orig_escapedquotes = lex.escapedquotes

    if add_commenters is not None:
        lex.commenters += ''.join(c for c in add_commenters
                                  if c not in lex.commenters)
    if rm_commenters is not None:
        lex.commenters = ''.join(c for c in lex.commenters
                                 if c not in rm_commenters)
    if commenters is not None:
        lex.commenters = commenters

    if add_wordchars is not None:
        lex.wordchars += ''.join(c for c in add_wordchars
                                 if c not in lex.wordchars)
    if rm_wordchars is not None:
        lex.wordchars = ''.join(c for c in lex.wordchars
                                if c not in rm_wordchars)
    if wordchars is not None:
        lex.wordchars = wordchars

    if add_whitespace is not None:
        lex.whitespace += ''.join(c for c in add_whitespace
                                  if c not in lex.whitespace)
    if rm_whitespace is not None:
        lex.whitespace = ''.join(c for c in lex.whitespace
                                 if c not in rm_whitespace)
    if whitespace is not None:
        lex.whitespace = whitespace

    if add_escape is not None:
        lex.escape += ''.join(c for c in add_escape if c not in lex.escape)
    if rm_escape is not None:
        lex.escape = ''.join(c for c in lex.escape if c not in rm_escape)
    if escape is not None:
        lex.escape = escape

    if add_quotes is not None:
        lex.quotes += ''.join(c for c in add_quotes if c not in lex.quotes)
    if rm_quotes is not None:
        lex.quotes = ''.join(c for c in lex.quotes if c not in rm_quotes)
    if quotes is not None:
        lex.quotes = quotes

    if add_escapedquotes is not None:
        lex.escapedquotes += ''.join(c for c in add_escapedquotes
                                     if c not in lex.escapedquotes)
    if rm_escapedquotes is not None:
        lex.escapedquotes = ''.join(c for c in lex.escapedquotes
                                    if c not in rm_escapedquotes)
    if escapedquotes is not None:
        lex.escapedquotes = escapedquotes

    try:
        yield lex
    finally:
        lex.commenters = orig_commenters
        lex.wordchars = orig_wordchars
        lex.whitespace = orig_whitespace
        lex.escape = orig_escape
        lex.quotes = orig_quotes
        lex.escapedquotes = orig_escapedquotes
コード例 #7
0
 def read_one_token(lex: shlex) -> None:
     lex.get_token()
     return None