コード例 #1
0
def _MakeSimpleCommand(preparsed_list, suffix_words, redirects):
  """Create an command.SimpleCommand node."""

  # FOO=(1 2 3) ls is not allowed.
  for _, _, _, w in preparsed_list:
    if word.HasArrayPart(w):
      p_die("Environment bindings can't contain array literals", word=w)

  # echo FOO=(1 2 3) is not allowed (but we should NOT fail on echo FOO[x]=1).
  for w in suffix_words:
    if word.HasArrayPart(w):
      p_die("Commands can't contain array literals", word=w)

  # NOTE: # In bash, {~bob,~jane}/src works, even though ~ isn't the leading
  # character of the initial word.
  # However, this means we must do tilde detection AFTER brace EXPANSION, not
  # just after brace DETECTION like we're doing here.
  # The BracedWordTree instances have to be expanded into CompoundWord
  # instances for the tilde detection to work.
  words2 = braces.BraceDetectAll(suffix_words)
  words3 = word.TildeDetectAll(words2)

  node = command.SimpleCommand()
  node.words = words3
  node.redirects = redirects
  _AppendMoreEnv(preparsed_list, node.more_env)
  return node
コード例 #2
0
    def _ReadArrayLiteralPart(self):
        self._Next(lex_mode_e.Outer)  # advance past (
        self._Peek()
        if self.cur_token.id != Id.Op_LParen:
            p_die('Expected ( after =, got %r',
                  self.cur_token.val,
                  token=self.cur_token)

        # MUST use a new word parser (with same lexer).
        w_parser = WordParser(self.parse_ctx, self.lexer, self.line_reader)
        words = []
        while True:
            w = w_parser.ReadWord(lex_mode_e.Outer)
            assert w is not None

            if w.tag == word_e.TokenWord:
                word_id = word.CommandId(w)
                if word_id == Id.Right_ArrayLiteral:
                    break
                # Unlike command parsing, array parsing allows embedded \n.
                elif word_id == Id.Op_Newline:
                    continue
                else:
                    # TokenWord
                    p_die('Unexpected token in array literal: %r',
                          w.token.val,
                          word=w)

            words.append(w)

        words2 = braces.BraceDetectAll(words)
        words3 = word.TildeDetectAll(words2)

        return word_part.ArrayLiteralPart(words3)
コード例 #3
0
  def _ParseForEachLoop(self):
    node = command.ForEach()
    node.do_arg_iter = False

    ok, iter_name, quoted = word.StaticEval(self.cur_word)
    if not ok or quoted:
      p_die("Loop variable name should be a constant", word=self.cur_word)
    if not match.IsValidVarName(iter_name):
      p_die("Invalid loop variable name", word=self.cur_word)
    node.iter_name = iter_name
    self._Next()  # skip past name

    self._NewlineOk()

    in_spid = const.NO_INTEGER
    semi_spid = const.NO_INTEGER

    self._Peek()
    if self.c_id == Id.KW_In:
      self._Next()  # skip in

      in_spid = word.LeftMostSpanForWord(self.cur_word) + 1
      iter_words, semi_spid = self.ParseForWords()
      assert iter_words is not None

      words2 = braces.BraceDetectAll(iter_words)
      words3 = word.TildeDetectAll(words2)
      node.iter_words = words3

    elif self.c_id == Id.Op_Semi:
      node.do_arg_iter = True  # implicit for loop
      self._Next()

    elif self.c_id == Id.KW_Do:
      node.do_arg_iter = True  # implicit for loop
      # do not advance

    else:  # for foo BAD
      p_die('Unexpected word after for loop variable', word=self.cur_word)

    node.spids.extend((in_spid, semi_spid))

    body_node = self.ParseDoGroup()
    assert body_node is not None

    node.body = body_node
    return node
コード例 #4
0
ファイル: expr_parse.py プロジェクト: roryokane/oil
def _PushOilTokens(parse_ctx, gr, p, lex):
  # type: (ParseContext, Grammar, parse.Parser, Lexer) -> token
  """Push tokens onto pgen2's parser.

  Returns the last token so it can be reused/seen by the CommandParser.
  """
  #log('keywords = %s', gr.keywords)
  #log('tokens = %s', gr.tokens)

  mode = lex_mode_e.Expr
  mode_stack = [mode]
  last_token = None

  balance = 0

  from core.util import log
  while True:
    if last_token:  # e.g. left over from WordParser
      tok = last_token
      #log('last_token = %s', last_token)
      last_token = None
    else:
      tok = lex.Read(mode)
      #log('tok = %s', tok)

    # Comments and whitespace.  Newlines aren't ignored.
    if meta.LookupKind(tok.id) == Kind.Ignored:
      continue

    # For var x = {
    #   a: 1, b: 2
    # }
    if balance > 0 and tok.id == Id.Op_Newline:
      #log('*** SKIPPING NEWLINE')
      continue

    action = _MODE_TRANSITIONS.get((mode, tok.id))
    if action == POP:
      mode_stack.pop()
      mode = mode_stack[-1]
      balance -= 1
      #log('POPPED to %s', mode)
    elif action:  # it's an Id
      new_mode = action
      mode_stack.append(new_mode)
      mode = new_mode
      balance += 1  # e.g. var x = $/ NEWLINE /
      #log('PUSHED to %s', mode)
    else:
      # If we didn't already so something with the balance, look at another table.
      balance += _OTHER_BALANCE.get(tok.id, 0)
      #log('BALANCE after seeing %s = %d', tok.id, balance)

    #if tok.id == Id.Expr_Name and tok.val in KEYWORDS:
    #  tok.id = KEYWORDS[tok.val]
    #  log('Replaced with %s', tok.id)

    if tok.id.enum_id >= 256:
      raise AssertionError(str(tok))

    ilabel = _Classify(gr, tok)
    #log('tok = %s, ilabel = %d', tok, ilabel)

    if p.addtoken(tok.id.enum_id, tok, ilabel):
      return tok

    #
    # Extra handling of the body of @() and $().  Lex in the ShCommand mode.
    #

    if tok.id == Id.Left_AtParen:
      lex.PushHint(Id.Op_RParen, Id.Right_ShArrayLiteral)

      # Blame the opening token
      line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
      w_parser = parse_ctx.MakeWordParser(lex, line_reader)
      words = []
      while True:
        w = w_parser.ReadWord(lex_mode_e.ShCommand)
        if 0:
          log('w = %s', w)

        if isinstance(w, word__Token):
          word_id = word_.CommandId(w)
          if word_id == Id.Right_ShArrayLiteral:
            break
          elif word_id == Id.Op_Newline:  # internal newlines allowed
            continue
          else:
            # Token
            p_die('Unexpected token in array literal: %r', w.token.val, word=w)

        assert isinstance(w, word__Compound)  # for MyPy
        words.append(w)

      words2 = braces.BraceDetectAll(words)
      words3 = word_.TildeDetectAll(words2)

      typ = Id.Expr_CastedDummy.enum_id
      opaque = cast(token, words3)  # HACK for expr_to_ast
      done = p.addtoken(typ, opaque, gr.tokens[typ])
      assert not done  # can't end the expression

      # Now push the closing )
      tok = w.token
      ilabel = _Classify(gr, tok)
      done = p.addtoken(tok.id.enum_id, tok, ilabel)
      assert not done  # can't end the expression

      continue

    if tok.id == Id.Left_DollarParen:
      left_token = tok

      lex.PushHint(Id.Op_RParen, Id.Eof_RParen)
      line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
      c_parser = parse_ctx.MakeParserForCommandSub(line_reader, lex,
                                                   Id.Eof_RParen)
      node = c_parser.ParseCommandSub()
      # A little gross: Copied from osh/word_parse.py
      right_token = c_parser.w_parser.cur_token

      cs_part = command_sub(left_token, node)
      cs_part.spids.append(left_token.span_id)
      cs_part.spids.append(right_token.span_id)

      typ = Id.Expr_CastedDummy.enum_id
      opaque = cast(token, cs_part)  # HACK for expr_to_ast
      done = p.addtoken(typ, opaque, gr.tokens[typ])
      assert not done  # can't end the expression

      # Now push the closing )
      ilabel = _Classify(gr, right_token)
      done = p.addtoken(right_token.id.enum_id, right_token, ilabel)
      assert not done  # can't end the expression

      continue

    if tok.id == Id.Left_DoubleQuote:
      left_token = tok
      line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
      w_parser = parse_ctx.MakeWordParser(lex, line_reader)

      parts = []  # type: List[word_part_t]
      last_token = w_parser.ReadDoubleQuoted(left_token, parts)
      expr_dq_part = double_quoted(left_token, parts)

      typ = Id.Expr_CastedDummy.enum_id
      opaque = cast(token, expr_dq_part)  # HACK for expr_to_ast
      done = p.addtoken(typ, opaque, gr.tokens[typ])
      assert not done  # can't end the expression

      continue

    if tok.id == Id.Left_DollarBrace:
      left_token = tok
      line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
      w_parser = parse_ctx.MakeWordParser(lex, line_reader)

      part, last_token = w_parser.ReadBracedBracedVarSub(left_token)

      # It's casted word_part__BracedVarSub -> dummy -> expr__BracedVarSub!
      typ = Id.Expr_CastedDummy.enum_id
      opaque = cast(token, part)  # HACK for expr_to_ast
      done = p.addtoken(typ, opaque, gr.tokens[typ])
      assert not done  # can't end the expression

      continue

    # '' and c''
    if tok.id in (Id.Left_SingleQuoteRaw, Id.Left_SingleQuoteC):
      left_token = tok
      line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
      w_parser = parse_ctx.MakeWordParser(lex, line_reader)

      # mode can be SQ or DollarSQ
      tokens = []  # type: List[token]
      no_backslashes = (left_token.val == "'")
      last_token = w_parser.ReadSingleQuoted(mode, left_token, tokens,
                                             no_backslashes)
      sq_part = single_quoted(left_token, tokens)

      typ = Id.Expr_CastedDummy.enum_id
      opaque = cast(token, sq_part)  # HACK for expr_to_ast
      done = p.addtoken(typ, opaque, gr.tokens[typ])
      assert not done  # can't end the expression

      continue

  else:
    # We never broke out -- EOF is too soon (how can this happen???)
    raise parse.ParseError("incomplete input", tok.id.enum_id, tok)
コード例 #5
0
def _PushOilTokens(parse_ctx, gr, p, lex):
    # type: (ParseContext, Grammar, parse.Parser, Lexer) -> token
    """Push tokens onto pgen2's parser.

  Returns the last token so it can be reused/seen by the CommandParser.
  """
    #log('keywords = %s', gr.keywords)
    #log('tokens = %s', gr.tokens)

    last_token = None  # type: Optional[token]

    balance = 0  # to ignore newlines

    while True:
        if last_token:  # e.g. left over from WordParser
            tok = last_token
            #log('last_token = %s', last_token)
            last_token = None
        else:
            tok = lex.Read(lex_mode_e.Expr)
            #log('tok = %s', tok)

        # Comments and whitespace.  Newlines aren't ignored.
        if lookup.LookupKind(tok.id) == Kind.Ignored:
            continue

        # For var x = {
        #   a: 1, b: 2
        # }
        if balance > 0 and tok.id == Id.Op_Newline:
            #log('*** SKIPPING NEWLINE')
            continue

        balance += _OTHER_BALANCE.get(tok.id, 0)
        #log('BALANCE after seeing %s = %d', tok.id, balance)

        #if tok.id == Id.Expr_Name and tok.val in KEYWORDS:
        #  tok.id = KEYWORDS[tok.val]
        #  log('Replaced with %s', tok.id)

        assert tok.id < 256, Id_str(tok.id)

        ilabel = _Classify(gr, tok)
        #log('tok = %s, ilabel = %d', tok, ilabel)

        if p.addtoken(tok.id, tok, ilabel):
            return tok

        #
        # Mututally recursive calls into the command/word parsers.
        #

        if mylib.PYTHON:
            if tok.id == Id.Left_AtParen:
                left_tok = tok
                lex.PushHint(Id.Op_RParen, Id.Right_ShArrayLiteral)

                # Blame the opening token
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                w_parser = parse_ctx.MakeWordParser(lex, line_reader)
                words = []
                while True:
                    w = w_parser.ReadWord(lex_mode_e.ShCommand)
                    if 0:
                        log('w = %s', w)

                    if isinstance(w, word__Token):
                        word_id = word_.CommandId(w)
                        if word_id == Id.Right_ShArrayLiteral:
                            break
                        elif word_id == Id.Op_Newline:  # internal newlines allowed
                            continue
                        else:
                            # Token
                            p_die('Unexpected token in array literal: %r',
                                  w.token.val,
                                  word=w)

                    assert isinstance(w, word__Compound)  # for MyPy
                    words.append(w)

                words2 = braces.BraceDetectAll(words)
                words3 = word_.TildeDetectAll(words2)

                typ = Id.Expr_CastedDummy

                lit_part = sh_array_literal(left_tok, words3)
                opaque = cast(token, lit_part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression

                # Now push the closing )
                tok = w.token
                ilabel = _Classify(gr, tok)
                done = p.addtoken(tok.id, tok, ilabel)
                assert not done  # can't end the expression

                continue

            if tok.id == Id.Left_DollarParen:
                left_token = tok

                lex.PushHint(Id.Op_RParen, Id.Eof_RParen)
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                c_parser = parse_ctx.MakeParserForCommandSub(
                    line_reader, lex, Id.Eof_RParen)
                node = c_parser.ParseCommandSub()
                # A little gross: Copied from osh/word_parse.py
                right_token = c_parser.w_parser.cur_token

                cs_part = command_sub(left_token, node)
                cs_part.spids.append(left_token.span_id)
                cs_part.spids.append(right_token.span_id)

                typ = Id.Expr_CastedDummy
                opaque = cast(token, cs_part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression

                # Now push the closing )
                ilabel = _Classify(gr, right_token)
                done = p.addtoken(right_token.id, right_token, ilabel)
                assert not done  # can't end the expression

                continue

            if tok.id == Id.Left_DoubleQuote:
                left_token = tok
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                w_parser = parse_ctx.MakeWordParser(lex, line_reader)

                parts = []  # type: List[word_part_t]
                last_token = w_parser.ReadDoubleQuoted(left_token, parts)
                expr_dq_part = double_quoted(left_token, parts)

                typ = Id.Expr_CastedDummy
                opaque = cast(token, expr_dq_part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression

                continue

            if tok.id == Id.Left_DollarBrace:
                left_token = tok
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                w_parser = parse_ctx.MakeWordParser(lex, line_reader)

                part, last_token = w_parser.ReadBracedBracedVarSub(left_token)

                # It's casted word_part__BracedVarSub -> dummy -> expr__BracedVarSub!
                typ = Id.Expr_CastedDummy
                opaque = cast(token, part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression

                continue

            # '' and c''
            if tok.id in (Id.Left_SingleQuoteRaw, Id.Left_SingleQuoteC):
                if tok.id == Id.Left_SingleQuoteRaw:
                    sq_mode = lex_mode_e.SQ_Raw
                else:
                    sq_mode = lex_mode_e.SQ_C

                left_token = tok
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                w_parser = parse_ctx.MakeWordParser(lex, line_reader)

                tokens = []  # type: List[token]
                no_backslashes = (left_token.val == "'")
                last_token = w_parser.ReadSingleQuoted(sq_mode, left_token,
                                                       tokens, no_backslashes)
                sq_part = single_quoted(left_token, tokens)

                typ = Id.Expr_CastedDummy
                opaque = cast(token, sq_part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression
                continue

    else:
        # We never broke out -- EOF is too soon (how can this happen???)
        raise parse.ParseError("incomplete input", tok.id, tok)
コード例 #6
0
ファイル: word_parse.py プロジェクト: stevenworthington/oil
  def _ReadArrayLiteral(self):
    # type: () -> word_part_t
    """
    a=(1 2 3)

    TODO: See osh/cmd_parse.py:164 for Id.Lit_ArrayLhsOpen, for a[x++]=1

    We want:

    A=(['x']=1 ["x"]=2 [$x$y]=3)

    Maybe allow this as a literal string?  Because I think I've seen it before?
    Or maybe force people to patch to learn the rule.

    A=([x]=4)

    Starts with Lit_Other '[', and then it has Lit_ArrayLhsClose
    Maybe enforce that ALL have keys or NONE of have keys.
    """
    self._Next(lex_mode_e.ShCommand)  # advance past (
    self._Peek()
    if self.cur_token.id != Id.Op_LParen:
      p_die('Expected ( after =', token=self.cur_token)
    left_token = self.cur_token
    paren_spid = self.cur_token.span_id

    # MUST use a new word parser (with same lexer).
    w_parser = self.parse_ctx.MakeWordParser(self.lexer, self.line_reader)
    words = []  # type: List[compound_word]
    while True:
      w = w_parser.ReadWord(lex_mode_e.ShCommand)
      if w.tag_() == word_e.Token:
        tok = cast(Token, w)
        if tok.id == Id.Right_ShArrayLiteral:
          break
        # Unlike command parsing, array parsing allows embedded \n.
        elif tok.id == Id.Op_Newline:
          continue
        else:
          # Token
          p_die('Unexpected token in array literal', word=w)

      words.append(cast(compound_word, w))

    if len(words) == 0:  # a=() is empty indexed array
      # Needed for type safety, doh
      no_words = []  # type: List[word_t]
      node = sh_array_literal(left_token, no_words)
      node.spids.append(left_token.span_id)
      return node
 
    # If the first one is a key/value pair, then the rest are assumed to be.
    pair = word_.DetectAssocPair(words[0])
    if pair:
      k, v = pair
      pairs = [k, v]

      n = len(words)
      for i in xrange(1, n):
        w2 = words[i]
        pair = word_.DetectAssocPair(w2)
        if not pair:
          p_die("Expected associative array pair", word=w2)

        k, v = pair
        pairs.append(k)  # flat representation
        pairs.append(v)

      # invariant List?
      node2 = word_part.AssocArrayLiteral(left_token, pairs)
      node2.spids.append(paren_spid)
      return node2

    # Brace detection for arrays but NOT associative arrays
    words2 = braces.BraceDetectAll(words)
    words3 = word_.TildeDetectAll(words2)
    node = sh_array_literal(left_token, words3)
    node.spids.append(paren_spid)
    return node
コード例 #7
0
    def _ReadArrayLiteralPart(self):
        # type: () -> word_part_t
        """
    a=(1 2 3)

    TODO: See osh/cmd_parse.py:164 for Id.Lit_ArrayLhsOpen, for a[x++]=1

    We want:

    A=(['x']=1 ["x"]=2 [$x$y]=3)

    Maybe allow this as a literal string?  Because I think I've seen it before?
    Or maybe force people to patch to learn the rule.

    A=([x]=4)

    Starts with Lit_Other '[', and then it has Lit_ArrayLhsClose
    Maybe enforce that ALL have keys or NONE of have keys.
    """
        self._Next(lex_mode_e.ShCommand)  # advance past (
        self._Peek()
        if self.cur_token.id != Id.Op_LParen:
            p_die('Expected ( after =, got %r',
                  self.cur_token.val,
                  token=self.cur_token)
        paren_spid = self.cur_token.span_id

        # MUST use a new word parser (with same lexer).
        w_parser = WordParser(self.parse_ctx, self.lexer, self.line_reader)
        words = []
        while True:
            w = w_parser.ReadWord(lex_mode_e.ShCommand)

            if isinstance(w, word__TokenWord):
                word_id = word.CommandId(w)
                if word_id == Id.Right_ArrayLiteral:
                    break
                # Unlike command parsing, array parsing allows embedded \n.
                elif word_id == Id.Op_Newline:
                    continue
                else:
                    # TokenWord
                    p_die('Unexpected token in array literal: %r',
                          w.token.val,
                          word=w)

            assert isinstance(w, word__CompoundWord)  # for MyPy
            words.append(w)

        if not words:  # a=() is empty indexed array
            node = word_part.ArrayLiteralPart(
                words)  # type: ignore  # invariant List?
            node.spids.append(paren_spid)
            return node

        # If the first one is a key/value pair, then the rest are assumed to be.
        pair = word.DetectAssocPair(words[0])
        if pair:
            pairs = [pair[0], pair[1]]  # flat representation

            n = len(words)
            for i in xrange(1, n):
                w = words[i]
                pair = word.DetectAssocPair(w)
                if not pair:
                    p_die("Expected associative array pair", word=w)

                pairs.append(pair[0])  # flat representation
                pairs.append(pair[1])

            node = word_part.AssocArrayLiteral(
                pairs)  # type: ignore  # invariant List?
            node.spids.append(paren_spid)
            return node

        words2 = braces.BraceDetectAll(words)
        words3 = word.TildeDetectAll(words2)
        node = word_part.ArrayLiteralPart(words3)
        node.spids.append(paren_spid)
        return node