示例#1
0
  def Expr(self, pnode):
    # type: (PNode) -> expr_t
    """Transform expressions (as opposed to statements)."""
    typ = pnode.typ
    tok = pnode.tok
    children = pnode.children

    if ISNONTERMINAL(typ):

      #
      # Oil Entry Points / Additions
      #

      if typ == grammar_nt.oil_expr:  # for if/while
        # oil_expr: '(' testlist ')'
        return self.Expr(children[1])

      if typ == grammar_nt.return_expr:
        # return_expr: testlist end_stmt
        return self.Expr(children[0])

      if typ == grammar_nt.place_list:
        return self._AssocBinary(children)

      if typ == grammar_nt.place:
        # place: NAME place_trailer*
        if len(pnode.children) == 1:
          return self.Expr(pnode.children[0])
        # TODO: Called _Trailer but don't handle ( )?
        # only [] . -> :: ?
        raise NotImplementedError

      #
      # Python-like Expressions / Operators
      #

      if typ == grammar_nt.atom:
        if len(children) == 1:
          return self.Expr(children[0])
        return self._Atom(children)

      if typ == grammar_nt.testlist:
        # testlist: test (',' test)* [',']
        # We need tuples for Python's 'var a, b = x' and 'for (a, b in x) {'
        return self._Tuple(children)

      if typ == grammar_nt.test:
        # test: or_test ['if' or_test 'else' test] | lambdef
        if len(children) == 1:
          return self.Expr(children[0])

        # TODO: Handle lambdef

        test = self.Expr(children[2])
        body = self.Expr(children[0])
        orelse = self.Expr(children[4])
        return expr.IfExp(test, body, orelse)

      if typ == grammar_nt.test_nocond:
        # test_nocond: or_test | lambdef_nocond
        assert len(children) == 1
        return self.Expr(children[0])

      if typ == grammar_nt.argument:
        # argument: ( test [comp_for] |
        #             test '=' test |
        #             '**' test |
        #             '*' test )
        if len(pnode.children) == 1:
          return self.Expr(children[0])
        # TODO:
        raise NotImplementedError

      if typ == grammar_nt.subscript:
        # subscript: test | [test] ':' [test] [sliceop]
        if len(pnode.children) == 1:
          return self.Expr(children[0])
        # TODO:
        raise NotImplementedError

      if typ == grammar_nt.testlist_comp:
        # testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
        if children[1].typ == grammar_nt.comp_for:
          elt = self.Expr(children[0])
          comp = self._CompFor(children[1])
          return expr.ListComp(elt, [comp])

        # (1,)  (1, 2)  etc.
        if children[1].tok.id == Id.Arith_Comma:
          return self._Tuple(children)
        raise NotImplementedError('testlist_comp')

      elif typ == grammar_nt.exprlist:
        # exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']

        if len(children) == 1:
          return self.Expr(children[0])

        # used in for loop, genexpr.
        # TODO: This sould be placelist?  for x, *y ?
        raise NotImplementedError('exprlist')

      #
      # Operators with Precedence
      #

      if typ == grammar_nt.or_test:
        # or_test: and_test ('or' and_test)*
        return self._AssocBinary(children)

      if typ == grammar_nt.and_test:
        # and_test: not_test ('and' not_test)*
        return self._AssocBinary(children)

      if typ == grammar_nt.not_test:
        # not_test: 'not' not_test | comparison
        if len(children) == 1:
          return self.Expr(children[0])

        op_tok = children[0].tok  # not
        return expr.Unary(op_tok, self.Expr(children[1]))

      elif typ == grammar_nt.comparison:
        if len(children) == 1:
          return self.Expr(children[0])

        return self._CompareChain(children)

      elif typ == grammar_nt.expr:
        # expr: xor_expr ('|' xor_expr)*
        return self._AssocBinary(children)

      if typ == grammar_nt.xor_expr:
        # xor_expr: and_expr ('xor' and_expr)*
        return self._AssocBinary(children)

      if typ == grammar_nt.and_expr:  # a & b
        # and_expr: shift_expr ('&' shift_expr)*
        return self._AssocBinary(children)

      elif typ == grammar_nt.shift_expr:
        # shift_expr: arith_expr (('<<'|'>>') arith_expr)*
        return self._AssocBinary(children)

      elif typ == grammar_nt.arith_expr:
        # arith_expr: term (('+'|'-') term)*
        return self._AssocBinary(children)

      elif typ == grammar_nt.term:
        # term: factor (('*'|'/'|'div'|'mod') factor)*
        return self._AssocBinary(children)

      elif typ == grammar_nt.factor:
        # factor: ('+'|'-'|'~') factor | power
        # the power would have already been reduced
        if len(children) == 1:
          return self.Expr(children[0])
        op, e = children
        assert isinstance(op.tok, token)
        return expr.Unary(op.tok, self.Expr(e))

      elif typ == grammar_nt.power:
        # power: atom trailer* ['^' factor]

        node = self.Expr(children[0])
        if len(children) == 1:  # No trailers
          return node

        n = len(children)
        i = 1
        while i < n and ISNONTERMINAL(children[i].typ):
          node = self._Trailer(node, children[i])
          i += 1

        if i != n:  # ['^' factor]
          op_tok = children[i].tok
          assert op_tok.id == Id.Arith_Caret, op_tok
          factor = self.Expr(children[i+1])
          node = expr.Binary(op_tok, node, factor)

        return node

      #
      # Oil Lexer Modes
      #

      elif typ == grammar_nt.array_literal:
        left_tok = children[0].tok

        # Approximation for now.
        tokens = [
            pnode.tok for pnode in children[1:-1] if pnode.tok.id ==
            Id.Lit_Chars
        ]
        items = [expr.Const(t) for t in tokens]  # type: List[expr_t]
        return expr.ArrayLiteral(left_tok, items)

      elif typ == grammar_nt.sh_array_literal:
        left_tok = children[0].tok

        # HACK: When typ is Id.Expr_CastedDummy, the 'tok' field ('opaque')
        # actually has a list of words!
        typ1 = children[1].typ
        assert typ1 == Id.Expr_CastedDummy.enum_id, typ1
        array_words = cast('List[word_t]', children[1].tok)

        return sh_array_literal(left_tok, array_words)

      elif typ == grammar_nt.sh_command_sub:
        return cast(command_sub, children[1].tok)

      elif typ == grammar_nt.braced_var_sub:
        return cast(braced_var_sub, children[1].tok)

      elif typ == grammar_nt.dq_string:
        return cast(double_quoted, children[1].tok)

      elif typ == grammar_nt.sq_string:
        return cast(single_quoted, children[1].tok)

      elif typ == grammar_nt.simple_var_sub:
        return simple_var_sub(children[0].tok)

      else:
        nt_name = self.number2symbol[typ]
        raise AssertionError(
            "PNode type %d (%s) wasn't handled" % (typ, nt_name))

    else:  # Terminals should have a token
      id_ = tok.id

      if id_ == Id.Expr_Name:
        return expr.Var(tok)

      if id_ in (
          Id.Expr_DecInt, Id.Expr_BinInt, Id.Expr_OctInt, Id.Expr_HexInt,
          Id.Expr_Float):
        return expr.Const(tok)

      if id_ in (Id.Expr_Null, Id.Expr_True, Id.Expr_False):
        return expr.Const(tok)

      from core.meta import IdInstance
      raise NotImplementedError(IdInstance(typ))
示例#2
0
def _PushOilTokens(parse_ctx, gr, p, lex):
    # type: (ParseContext, Grammar, parse.Parser, Lexer) -> token
    """Push tokens onto pgen2's parser.

  Returns the last token so it can be reused/seen by the CommandParser.
  """
    #log('keywords = %s', gr.keywords)
    #log('tokens = %s', gr.tokens)

    last_token = None  # type: Optional[token]

    balance = 0  # to ignore newlines

    while True:
        if last_token:  # e.g. left over from WordParser
            tok = last_token
            #log('last_token = %s', last_token)
            last_token = None
        else:
            tok = lex.Read(lex_mode_e.Expr)
            #log('tok = %s', tok)

        # Comments and whitespace.  Newlines aren't ignored.
        if lookup.LookupKind(tok.id) == Kind.Ignored:
            continue

        # For var x = {
        #   a: 1, b: 2
        # }
        if balance > 0 and tok.id == Id.Op_Newline:
            #log('*** SKIPPING NEWLINE')
            continue

        balance += _OTHER_BALANCE.get(tok.id, 0)
        #log('BALANCE after seeing %s = %d', tok.id, balance)

        #if tok.id == Id.Expr_Name and tok.val in KEYWORDS:
        #  tok.id = KEYWORDS[tok.val]
        #  log('Replaced with %s', tok.id)

        assert tok.id < 256, Id_str(tok.id)

        ilabel = _Classify(gr, tok)
        #log('tok = %s, ilabel = %d', tok, ilabel)

        if p.addtoken(tok.id, tok, ilabel):
            return tok

        #
        # Mututally recursive calls into the command/word parsers.
        #

        if mylib.PYTHON:
            if tok.id == Id.Left_AtParen:
                left_tok = tok
                lex.PushHint(Id.Op_RParen, Id.Right_ShArrayLiteral)

                # Blame the opening token
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                w_parser = parse_ctx.MakeWordParser(lex, line_reader)
                words = []
                while True:
                    w = w_parser.ReadWord(lex_mode_e.ShCommand)
                    if 0:
                        log('w = %s', w)

                    if isinstance(w, word__Token):
                        word_id = word_.CommandId(w)
                        if word_id == Id.Right_ShArrayLiteral:
                            break
                        elif word_id == Id.Op_Newline:  # internal newlines allowed
                            continue
                        else:
                            # Token
                            p_die('Unexpected token in array literal: %r',
                                  w.token.val,
                                  word=w)

                    assert isinstance(w, word__Compound)  # for MyPy
                    words.append(w)

                words2 = braces.BraceDetectAll(words)
                words3 = word_.TildeDetectAll(words2)

                typ = Id.Expr_CastedDummy

                lit_part = sh_array_literal(left_tok, words3)
                opaque = cast(token, lit_part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression

                # Now push the closing )
                tok = w.token
                ilabel = _Classify(gr, tok)
                done = p.addtoken(tok.id, tok, ilabel)
                assert not done  # can't end the expression

                continue

            if tok.id == Id.Left_DollarParen:
                left_token = tok

                lex.PushHint(Id.Op_RParen, Id.Eof_RParen)
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                c_parser = parse_ctx.MakeParserForCommandSub(
                    line_reader, lex, Id.Eof_RParen)
                node = c_parser.ParseCommandSub()
                # A little gross: Copied from osh/word_parse.py
                right_token = c_parser.w_parser.cur_token

                cs_part = command_sub(left_token, node)
                cs_part.spids.append(left_token.span_id)
                cs_part.spids.append(right_token.span_id)

                typ = Id.Expr_CastedDummy
                opaque = cast(token, cs_part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression

                # Now push the closing )
                ilabel = _Classify(gr, right_token)
                done = p.addtoken(right_token.id, right_token, ilabel)
                assert not done  # can't end the expression

                continue

            if tok.id == Id.Left_DoubleQuote:
                left_token = tok
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                w_parser = parse_ctx.MakeWordParser(lex, line_reader)

                parts = []  # type: List[word_part_t]
                last_token = w_parser.ReadDoubleQuoted(left_token, parts)
                expr_dq_part = double_quoted(left_token, parts)

                typ = Id.Expr_CastedDummy
                opaque = cast(token, expr_dq_part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression

                continue

            if tok.id == Id.Left_DollarBrace:
                left_token = tok
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                w_parser = parse_ctx.MakeWordParser(lex, line_reader)

                part, last_token = w_parser.ReadBracedBracedVarSub(left_token)

                # It's casted word_part__BracedVarSub -> dummy -> expr__BracedVarSub!
                typ = Id.Expr_CastedDummy
                opaque = cast(token, part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression

                continue

            # '' and c''
            if tok.id in (Id.Left_SingleQuoteRaw, Id.Left_SingleQuoteC):
                if tok.id == Id.Left_SingleQuoteRaw:
                    sq_mode = lex_mode_e.SQ_Raw
                else:
                    sq_mode = lex_mode_e.SQ_C

                left_token = tok
                line_reader = reader.DisallowedLineReader(parse_ctx.arena, tok)
                w_parser = parse_ctx.MakeWordParser(lex, line_reader)

                tokens = []  # type: List[token]
                no_backslashes = (left_token.val == "'")
                last_token = w_parser.ReadSingleQuoted(sq_mode, left_token,
                                                       tokens, no_backslashes)
                sq_part = single_quoted(left_token, tokens)

                typ = Id.Expr_CastedDummy
                opaque = cast(token, sq_part)  # HACK for expr_to_ast
                done = p.addtoken(typ, opaque, gr.tokens[typ])
                assert not done  # can't end the expression
                continue

    else:
        # We never broke out -- EOF is too soon (how can this happen???)
        raise parse.ParseError("incomplete input", tok.id, tok)
示例#3
0
  def _ReadArrayLiteral(self):
    # type: () -> word_part_t
    """
    a=(1 2 3)

    TODO: See osh/cmd_parse.py:164 for Id.Lit_ArrayLhsOpen, for a[x++]=1

    We want:

    A=(['x']=1 ["x"]=2 [$x$y]=3)

    Maybe allow this as a literal string?  Because I think I've seen it before?
    Or maybe force people to patch to learn the rule.

    A=([x]=4)

    Starts with Lit_Other '[', and then it has Lit_ArrayLhsClose
    Maybe enforce that ALL have keys or NONE of have keys.
    """
    self._Next(lex_mode_e.ShCommand)  # advance past (
    self._Peek()
    if self.cur_token.id != Id.Op_LParen:
      p_die('Expected ( after =', token=self.cur_token)
    left_token = self.cur_token
    paren_spid = self.cur_token.span_id

    # MUST use a new word parser (with same lexer).
    w_parser = self.parse_ctx.MakeWordParser(self.lexer, self.line_reader)
    words = []  # type: List[compound_word]
    while True:
      w = w_parser.ReadWord(lex_mode_e.ShCommand)
      if w.tag_() == word_e.Token:
        tok = cast(Token, w)
        if tok.id == Id.Right_ShArrayLiteral:
          break
        # Unlike command parsing, array parsing allows embedded \n.
        elif tok.id == Id.Op_Newline:
          continue
        else:
          # Token
          p_die('Unexpected token in array literal', word=w)

      words.append(cast(compound_word, w))

    if len(words) == 0:  # a=() is empty indexed array
      # Needed for type safety, doh
      no_words = []  # type: List[word_t]
      node = sh_array_literal(left_token, no_words)
      node.spids.append(left_token.span_id)
      return node
 
    # If the first one is a key/value pair, then the rest are assumed to be.
    pair = word_.DetectAssocPair(words[0])
    if pair:
      k, v = pair
      pairs = [k, v]

      n = len(words)
      for i in xrange(1, n):
        w2 = words[i]
        pair = word_.DetectAssocPair(w2)
        if not pair:
          p_die("Expected associative array pair", word=w2)

        k, v = pair
        pairs.append(k)  # flat representation
        pairs.append(v)

      # invariant List?
      node2 = word_part.AssocArrayLiteral(left_token, pairs)
      node2.spids.append(paren_spid)
      return node2

    # Brace detection for arrays but NOT associative arrays
    words2 = braces.BraceDetectAll(words)
    words3 = word_.TildeDetectAll(words2)
    node = sh_array_literal(left_token, words3)
    node.spids.append(paren_spid)
    return node
示例#4
0
    def Expr(self, pnode):
        # type: (PNode) -> expr_t
        """Transform expressions (as opposed to statements)."""
        typ = pnode.typ
        tok = pnode.tok
        children = pnode.children

        if ISNONTERMINAL(typ):

            #
            # Oil Entry Points / Additions
            #

            if typ == grammar_nt.oil_expr:  # for if/while
                # oil_expr: '(' testlist ')'
                return self.Expr(children[1])

            if typ == grammar_nt.command_expr:
                # return_expr: testlist end_stmt
                return self.Expr(children[0])

            #
            # Python-like Expressions / Operators
            #

            if typ == grammar_nt.atom:
                if len(children) == 1:
                    return self.Expr(children[0])
                return self._Atom(children)

            if typ == grammar_nt.testlist:
                # testlist: test (',' test)* [',']
                return self._Tuple(children)

            if typ == grammar_nt.test:
                # test: or_test ['if' or_test 'else' test] | lambdef
                if len(children) == 1:
                    return self.Expr(children[0])

                # TODO: Handle lambdef

                test = self.Expr(children[2])
                body = self.Expr(children[0])
                orelse = self.Expr(children[4])
                return expr.IfExp(test, body, orelse)

            if typ == grammar_nt.lambdef:
                # lambdef: '|' [name_type_list] '|' test

                n = len(children)
                if n == 4:
                    params = self._NameTypeList(children[1])
                else:
                    params = []

                body = self.Expr(children[n - 1])
                return expr.Lambda(params, body)

            #
            # Operators with Precedence
            #

            if typ == grammar_nt.or_test:
                # or_test: and_test ('or' and_test)*
                return self._AssocBinary(children)

            if typ == grammar_nt.and_test:
                # and_test: not_test ('and' not_test)*
                return self._AssocBinary(children)

            if typ == grammar_nt.not_test:
                # not_test: 'not' not_test | comparison
                if len(children) == 1:
                    return self.Expr(children[0])

                op_tok = children[0].tok  # not
                return expr.Unary(op_tok, self.Expr(children[1]))

            elif typ == grammar_nt.comparison:
                if len(children) == 1:
                    return self.Expr(children[0])

                return self._CompareChain(children)

            elif typ == grammar_nt.range_expr:
                if len(children) == 1:
                    return self.Expr(children[0])

                if len(children) == 3:
                    return expr.Range(self.Expr(children[0]),
                                      self.Expr(children[2]))

                raise AssertionError(children)

            elif typ == grammar_nt.expr:
                # expr: xor_expr ('|' xor_expr)*
                return self._AssocBinary(children)

            if typ == grammar_nt.xor_expr:
                # xor_expr: and_expr ('xor' and_expr)*
                return self._AssocBinary(children)

            if typ == grammar_nt.and_expr:  # a & b
                # and_expr: shift_expr ('&' shift_expr)*
                return self._AssocBinary(children)

            elif typ == grammar_nt.shift_expr:
                # shift_expr: arith_expr (('<<'|'>>') arith_expr)*
                return self._AssocBinary(children)

            elif typ == grammar_nt.arith_expr:
                # arith_expr: term (('+'|'-') term)*
                return self._AssocBinary(children)

            elif typ == grammar_nt.term:
                # term: factor (('*'|'/'|'div'|'mod') factor)*
                return self._AssocBinary(children)

            elif typ == grammar_nt.factor:
                # factor: ('+'|'-'|'~') factor | power
                # the power would have already been reduced
                if len(children) == 1:
                    return self.Expr(children[0])
                op, e = children
                assert isinstance(op.tok, token)
                return expr.Unary(op.tok, self.Expr(e))

            elif typ == grammar_nt.power:
                # power: atom trailer* ['^' factor]

                node = self.Expr(children[0])
                if len(children) == 1:  # No trailers
                    return node

                n = len(children)
                i = 1
                while i < n and ISNONTERMINAL(children[i].typ):
                    node = self._Trailer(node, children[i])
                    i += 1

                if i != n:  # ['^' factor]
                    op_tok = children[i].tok
                    assert op_tok.id == Id.Arith_Caret, op_tok
                    factor = self.Expr(children[i + 1])
                    node = expr.Binary(op_tok, node, factor)

                return node

            elif typ == grammar_nt.array_literal:
                left_tok = children[0].tok

                items = [self._ArrayItem(p) for p in children[1:-1]]
                return expr.ArrayLiteral(left_tok, items)

            elif typ == grammar_nt.oil_expr_sub:
                return self.Expr(children[0])

            #
            # Oil Lexer Modes
            #

            elif typ == grammar_nt.sh_array_literal:
                left_tok = children[0].tok

                # HACK: When typ is Id.Expr_CastedDummy, the 'tok' field ('opaque')
                # actually has a list of words!
                typ1 = children[1].typ
                assert typ1 == Id.Expr_CastedDummy.enum_id, typ1
                array_words = cast('List[word_t]', children[1].tok)

                return sh_array_literal(left_tok, array_words)

            elif typ == grammar_nt.sh_command_sub:
                return cast(command_sub, children[1].tok)

            elif typ == grammar_nt.braced_var_sub:
                return cast(braced_var_sub, children[1].tok)

            elif typ == grammar_nt.dq_string:
                return cast(double_quoted, children[1].tok)

            elif typ == grammar_nt.sq_string:
                return cast(single_quoted, children[1].tok)

            elif typ == grammar_nt.simple_var_sub:
                return simple_var_sub(children[0].tok)

            else:
                nt_name = self.number2symbol[typ]
                raise AssertionError("PNode type %d (%s) wasn't handled" %
                                     (typ, nt_name))

        else:  # Terminals should have a token
            id_ = tok.id

            if id_ == Id.Expr_Name:
                return expr.Var(tok)

            if id_ in (Id.Expr_DecInt, Id.Expr_BinInt, Id.Expr_OctInt,
                       Id.Expr_HexInt, Id.Expr_Float):
                return expr.Const(tok)

            if id_ in (Id.Expr_Null, Id.Expr_True, Id.Expr_False):
                return expr.Const(tok)

            from core.meta import IdInstance
            raise NotImplementedError(IdInstance(typ))
示例#5
0
  def _ReadArrayLiteral(self):
    # type: () -> word_part_t
    """
    a=(1 2 3)

    TODO: See osh/cmd_parse.py:164 for Id.Lit_ArrayLhsOpen, for a[x++]=1

    We want:

    A=(['x']=1 ["x"]=2 [$x$y]=3)

    Maybe allow this as a literal string?  Because I think I've seen it before?
    Or maybe force people to patch to learn the rule.

    A=([x]=4)

    Starts with Lit_Other '[', and then it has Lit_ArrayLhsClose
    Maybe enforce that ALL have keys or NONE of have keys.
    """
    self._Next(lex_mode_e.ShCommand)  # advance past (
    self._Peek()
    if self.cur_token.id != Id.Op_LParen:
      p_die('Expected ( after =, got %r', self.cur_token.val,
            token=self.cur_token)
    left_token = self.cur_token
    paren_spid = self.cur_token.span_id

    # MUST use a new word parser (with same lexer).
    w_parser = self.parse_ctx.MakeWordParser(self.lexer, self.line_reader)
    words = []
    while True:
      w = w_parser.ReadWord(lex_mode_e.ShCommand)

      if isinstance(w, word__Token):
        word_id = word_.CommandId(w)
        if word_id == Id.Right_ShArrayLiteral:
          break
        # Unlike command parsing, array parsing allows embedded \n.
        elif word_id == Id.Op_Newline:
          continue
        else:
          # Token
          p_die('Unexpected token in array literal: %r', w.token.val, word=w)

      assert isinstance(w, word__Compound)  # for MyPy
      words.append(w)

    if not words:  # a=() is empty indexed array
      # ignore for invariant List?
      node = sh_array_literal(left_token, words)  # type: ignore
      node.spids.append(left_token.span_id)
      return node
 
    # If the first one is a key/value pair, then the rest are assumed to be.
    pair = word_.DetectAssocPair(words[0])
    if pair:
      pairs = [pair[0], pair[1]]  # flat representation

      n = len(words)
      for i in xrange(1, n):
        w = words[i]
        pair = word_.DetectAssocPair(w)
        if not pair:
          p_die("Expected associative array pair", word=w)

        pairs.append(pair[0])  # flat representation
        pairs.append(pair[1])

      # invariant List?
      node = word_part.AssocArrayLiteral(left_token, pairs)  # type: ignore
      node.spids.append(paren_spid)
      return node

    words2 = braces.BraceDetectAll(words)
    words3 = word_.TildeDetectAll(words2)
    node = sh_array_literal(left_token, words3)
    node.spids.append(paren_spid)
    return node