def parse(cls, ctx, tokens, _breakstack):
    """
    Consume a parenthetical group of arguments from `tokens` and return the
    parse subtree rooted at this group.  `argstack` contains a stack of all
    early break conditions that are currently "opened".
    """

    assert tokens[0].type == lexer.TokenType.LEFT_PAREN
    tree = TreeNode(NodeType.PARENGROUP)
    lparen = TreeNode(NodeType.LPAREN)
    lparen.children.append(tokens.pop(0))
    tree.children.append(lparen)

    subtree = ConditionalGroupNode.parse(ctx, tokens, [ParenBreaker()])
    tree.children.append(subtree)

    if tokens[0].type != lexer.TokenType.RIGHT_PAREN:
      raise ValueError(
          "Unexpected {} token at {}, expecting r-paren, got {}"
          .format(tokens[0].type.name, tokens[0].get_location(),
                  tokens[0].content))
    rparen = TreeNode(NodeType.RPAREN)
    rparen.children.append(tokens.pop(0))
    tree.children.append(rparen)

    # NOTE(josh): parenthetical groups can have trailing comments because
    # they have closing punctuation
    CommentNode.consume_trailing(ctx, tokens, tree)

    return tree
示例#2
0
    def parse(cls, ctx, tokens, flags, breakstack):
        """
    Parse a continuous sequence of flags
    """

        # TODO(josh): use a bespoke FLAGGROUP?
        tree = cls()
        while tokens:
            # Break if the next token belongs to a parent parser, i.e. if it
            # matches a keyword argument of something higher in the stack, or if
            # it closes a parent group.
            if should_break(tokens[0], breakstack):
                break

            # If it is a whitespace token then put it directly in the parse tree at
            # the current depth
            if tokens[0].type in WHITESPACE_TOKENS:
                tree.children.append(tokens.pop(0))
                continue

            # Break if the next token is not a known flag
            if tokens[0].spelling.upper() not in flags:
                break

            # Otherwise is it is a flag, so add it to the tree as such
            child = TreeNode(NodeType.FLAG)
            child.children.append(tokens.pop(0))
            CommentNode.consume_trailing(ctx, tokens, child)
            tree.children.append(child)

        return tree
示例#3
0
    def consume(cls, ctx, tokens):
        """
    Consume a complete statement, removing tokens from the input list and
    returning a STATEMENT node.
    """
        node = cls()

        # Consume the function name
        fnname = tokens[0].spelling.lower()
        node.funnode = funnode = FunctionNameNode.parse(ctx, tokens)
        node.children.append(funnode)

        # Consume whitespace up to the parenthesis
        while tokens and tokens[0].type in WHITESPACE_TOKENS:
            node.children.append(tokens.pop(0))

        # TODO(josh): should the parens belong to the statement node or the
        # group node?
        if tokens[0].type != lexer.TokenType.LEFT_PAREN:
            raise ValueError(
                "Unexpected {} token at {}, expecting l-paren, got {}".format(
                    tokens[0].type.name, tokens[0].get_location(),
                    repr(tokens[0].content)))

        lparen = TreeNode(NodeType.LPAREN)
        lparen.children.append(tokens.pop(0))
        node.children.append(lparen)

        while tokens and tokens[0].type in WHITESPACE_TOKENS:
            node.children.append(tokens.pop(0))
            continue

        breakstack = [ParenBreaker()]

        parse_fun = ctx.parse_db.get(fnname, None)
        if parse_fun is None:
            # If the parse_db provides a "_default" then use that. Otherwise use the
            # standard parser with no kwargs or flags.
            parse_fun = ctx.parse_db.get("_default", StandardParser())
        node.argtree = subtree = parse_fun(ctx, tokens, breakstack)
        node.children.append(subtree)

        # NOTE(josh): technically we may have a statement specification with
        # an exact number of arguments. At this point we have broken out of that
        # statement but we might have some comments or whitespace to consume
        while tokens and tokens[0].type != lexer.TokenType.RIGHT_PAREN:
            if tokens[0].type in WHITESPACE_TOKENS:
                node.children.append(tokens.pop(0))
                continue

            if tokens[0].type in COMMENT_TOKENS:
                cnode = CommentNode.consume(ctx, tokens)
                node.children.append(cnode)
                continue

            raise UserError(
                "Unexpected {} token at {}, expecting r-paren, got {}".format(
                    tokens[0].type.name, tokens[0].get_location(),
                    repr(tokens[0].content)))

        if not tokens:
            raise UserError(
                "Unexpected end of token stream while parsing statement:\n {}".
                format(tree_string([node])))

        if tokens[0].type != lexer.TokenType.RIGHT_PAREN:
            raise UserError(
                "Unexpected {} token at {}, expecting r-paren, got {}".format(
                    tokens[0].type.name, tokens[0].get_location(),
                    repr(tokens[0].content)))

        rparen = TreeNode(NodeType.RPAREN)
        rparen.children.append(tokens.pop(0))
        node.children.append(rparen)
        CommentNode.consume_trailing(ctx, tokens, node)

        return node
  def parse(cls, ctx, tokens, npargs, flags, breakstack, sortable=False):
    """
    Parse a continuous sequence of `npargs` positional arguments. If npargs is
    an integer we will consume exactly that many arguments. If it is not an
    integer then it is a string meaning:

    * "?": zero or one
    * "*": zero or more
    * "+": one or more
    """

    tree = cls(sortable=sortable)
    tree.spec = PositionalSpec(npargs, flags)
    nconsumed = 0

    # Strip off any preceeding whitespace (note that in most cases this has
    # already been done but in some cases (such ask kwarg subparser) where
    # it hasn't
    while tokens and tokens[0].type in WHITESPACE_TOKENS:
      tree.children.append(tokens.pop(0))

    # If the first non-whitespace token is a cmake-format tag annotating
    # sortability, then parse it out here and record the annotation
    if tokens and get_tag(tokens[0]) in ("sortable", "sort"):
      tree.sortable = True
    elif tokens and get_tag(tokens[0]) in ("unsortable", "unsort"):
      tree.sortable = False

    while tokens:
      # Break if we have consumed   enough positional arguments
      if pargs_are_full(npargs, nconsumed):
        break

      # Break if the next token belongs to a parent parser, i.e. if it
      # matches a keyword argument of something higher in the stack, or if
      # it closes a parent group.
      if should_break(tokens[0], breakstack):
        # NOTE(josh): if npargs is an exact number of arguments, then we
        # shouldn't break on kwarg match from a parent parser. Instead, we
        # should consume the token. This is a hack to deal with
        # ```install(RUNTIME COMPONENT runtime)``. In this case the second
        # occurance of "runtime" should not match the ``RUNTIME`` keyword
        # and should not break the positional parser.
        # TODO(josh): this is kind of hacky because it will force the positional
        # parser to consume a right parenthesis and will lead to parse errors
        # in the event of a missing positional argument. Such errors will be
        # difficult to debug for the user.
        if not npargs_is_exact(npargs):
          break

        if tokens[0].type == lexer.TokenType.RIGHT_PAREN:
          break

      # If this is the start of a parenthetical group, then parse the group
      # NOTE(josh): syntatically this probably shouldn't be allowed here, but
      # cmake seems to accept it so we probably should too.
      if tokens[0].type == lexer.TokenType.LEFT_PAREN:
        subtree = ParenGroupNode.parse(ctx, tokens, breakstack)
        tree.children.append(subtree)
        continue

      # If it is a whitespace token then put it directly in the parse tree at
      # the current depth
      if tokens[0].type in WHITESPACE_TOKENS:
        tree.children.append(tokens.pop(0))
        continue

      # If it's a comment token not associated with an argument, then put it
      # directly into the parse tree at the current depth
      if tokens[0].type in (lexer.TokenType.COMMENT,
                            lexer.TokenType.BRACKET_COMMENT):
        before = len(tokens)
        child = CommentNode.consume(ctx, tokens)
        assert len(tokens) < before, \
            "consume_comment didn't consume any tokens"
        tree.children.append(child)
        continue

      # Otherwise is it is a positional argument, so add it to the tree as such
      if get_normalized_kwarg(tokens[0]) in flags:
        child = TreeNode(NodeType.FLAG)
      else:
        child = TreeNode(NodeType.ARGUMENT)

      child.children.append(tokens.pop(0))
      CommentNode.consume_trailing(ctx, tokens, child)
      tree.children.append(child)
      nconsumed += 1

    return tree
示例#5
0
    def parse(cls, ctx, tokens, npargs, ntup, flags, breakstack):
        """Parse a continuous sequence of `npargs` positional argument pairs.
    If npargs is an integer we will consume exactly that many arguments.
    If it is not an integer then it is a string meaning:

    * "?": zero or one
    * "*": zero or more
    * "+": one or more
    """

        tree = cls()
        subtree = None
        active_depth = tree

        npargs_consumed = 0
        ntup_consumed = 0

        while tokens:
            # Break if we have consumed enough positional arguments
            if pargs_are_full(npargs, npargs_consumed):
                break

            # Break if the next token belongs to a parent parser, i.e. if it
            # matches a keyword argument of something higher in the stack, or if
            # it closes a parent group.
            if should_break(tokens[0], breakstack):
                break

            # Otherwise we will consume the token
            token = tokens.pop(0)

            # If it is a whitespace token then put it directly in the parse tree at
            # the current depth
            if token.type in WHITESPACE_TOKENS:
                active_depth.children.append(token)
                continue

            # If it's a comment token not associated with an argument, then put it
            # directly into the parse tree at the current depth
            if token.type in (lexer.TokenType.COMMENT,
                              lexer.TokenType.BRACKET_COMMENT):
                child = CommentNode()
                tree.children.append(child)
                child.children.append(token)
                continue

            if subtree is None:
                subtree = PositionalGroupNode()
                tree.children.append(subtree)
                ntup_consumed = 0

            # Otherwise is it is a positional argument, so add it to the tree as such
            if get_normalized_kwarg(token) in flags:
                child = TreeNode(NodeType.FLAG)
            else:
                child = TreeNode(NodeType.ARGUMENT)

            child.children.append(token)
            CommentNode.consume_trailing(ctx, tokens, child)
            subtree.children.append(child)
            ntup_consumed += 1

            if ntup_consumed >= ntup:
                npargs_consumed += 1
                subtree = None

        return tree
示例#6
0
def parse_add_library_standard(ctx, tokens, breakstack, sortable):
    """
  ::

    add_library(<name> [STATIC | SHARED | MODULE]
               [EXCLUDE_FROM_ALL]
               source1 [source2 ...])

  :see: https://cmake.org/cmake/help/v3.0/command/add_library.html
  """
    # pylint: disable=too-many-statements

    parsing_name = 1
    parsing_type = 2
    parsing_flag = 3
    parsing_sources = 4

    tree = TreeNode(NodeType.ARGGROUP)

    # If it is a whitespace token then put it directly in the parse tree at
    # the current depth
    while tokens and tokens[0].type in WHITESPACE_TOKENS:
        tree.children.append(tokens.pop(0))
        continue

    state_ = parsing_name
    parg_group = None
    src_group = None
    active_depth = tree

    flags = ("STATIC", "SHARED", "MODULE", "OBJECT")

    while tokens:
        # This parse function breakson the first right paren, since parenthetical
        # groups are not allowed. A parenthesis might exist in a filename, but
        # if so that filename should be quoted so it wont show up as a RIGHT_PAREN
        # token.
        if tokens[0].type is TokenType.RIGHT_PAREN:
            break

        # If it is a whitespace token then put it directly in the parse tree at
        # the current depth
        if tokens[0].type in WHITESPACE_TOKENS:
            active_depth.children.append(tokens.pop(0))
            continue

        # If it's a comment token not associated with an argument, then put it
        # directly into the parse tree at the current depth
        if tokens[0].type in (TokenType.COMMENT, TokenType.BRACKET_COMMENT):
            if state_ > parsing_name:
                if get_tag(tokens[0]) in ("unsort", "unsortable"):
                    sortable = False
                elif get_tag(tokens[0]) in ("sort", "sortable"):
                    sortable = True
            child = TreeNode(NodeType.COMMENT)
            active_depth.children.append(child)
            child.children.append(tokens.pop(0))
            continue

        if state_ is parsing_name:
            token = tokens.pop(0)
            parg_group = PositionalGroupNode()
            parg_group.spec = PositionalSpec("+")
            active_depth = parg_group
            tree.children.append(parg_group)
            child = TreeNode(NodeType.ARGUMENT)
            child.children.append(token)
            CommentNode.consume_trailing(ctx, tokens, child)
            parg_group.children.append(child)
            state_ += 1
        elif state_ is parsing_type:
            if get_normalized_kwarg(tokens[0]) in flags:
                token = tokens.pop(0)
                child = TreeNode(NodeType.FLAG)
                child.children.append(token)
                CommentNode.consume_trailing(ctx, tokens, child)
                parg_group.children.append(child)
            state_ += 1
        elif state_ is parsing_flag:
            if get_normalized_kwarg(tokens[0]) == "EXCLUDE_FROM_ALL":
                token = tokens.pop(0)
                child = TreeNode(NodeType.FLAG)
                child.children.append(token)
                CommentNode.consume_trailing(ctx, tokens, child)
                parg_group.children.append(child)
            state_ += 1
            src_group = PositionalGroupNode(sortable=sortable,
                                            tags=["file-list"])
            src_group.spec = PositionalSpec("+")
            active_depth = src_group
            tree.children.append(src_group)
        elif state_ is parsing_sources:
            token = tokens.pop(0)
            child = TreeNode(NodeType.ARGUMENT)
            child.children.append(token)
            CommentNode.consume_trailing(ctx, tokens, child)
            src_group.children.append(child)

            if only_comments_and_whitespace_remain(tokens, breakstack):
                active_depth = tree

    return tree
def parse_add_executable_standard(ctx, tokens, breakstack, sortable):
  """
  ::

    add_executable(<name> [WIN32] [MACOSX_BUNDLE]
                   [EXCLUDE_FROM_ALL]
                   [source1] [source2 ...])

  :see: https://cmake.org/cmake/help/latest/command/add_executable.html#command:add_executable
  """
  # pylint: disable=too-many-statements

  parsing_name = 1
  parsing_flags = 2
  parsing_sources = 3

  tree = TreeNode(NodeType.ARGGROUP)

  # If it is a whitespace token then put it directly in the parse tree at
  # the current depth
  while tokens and tokens[0].type in WHITESPACE_TOKENS:
    tree.children.append(tokens.pop(0))
    continue

  state_ = parsing_name
  parg_group = None
  src_group = None
  active_depth = tree

  while tokens:
    # This parse function breaks on the first right paren, since parenthetical
    # groups are not allowed. A parenthesis might exist in a filename, but
    # if so that filename should be quoted so it wont show up as a RIGHT_PAREN
    # token.
    if tokens[0].type is lexer.TokenType.RIGHT_PAREN:
      break

    # If it is a whitespace token then put it directly in the parse tree at
    # the current depth
    if tokens[0].type in WHITESPACE_TOKENS:
      active_depth.children.append(tokens.pop(0))
      continue

    # If it's a comment token not associated with an argument, then put it
    # directly into the parse tree at the current depth
    if tokens[0].type in (lexer.TokenType.COMMENT,
                          lexer.TokenType.BRACKET_COMMENT):
      if state_ > parsing_name:
        if get_tag(tokens[0]) in ("unsort", "unsortable"):
          sortable = False
        elif get_tag(tokens[0]) in ("unsort", "unsortable"):
          sortable = True
      child = TreeNode(NodeType.COMMENT)
      active_depth.children.append(child)
      child.children.append(tokens.pop(0))
      continue

    if state_ is parsing_name:
      token = tokens.pop(0)
      parg_group = TreeNode(NodeType.PARGGROUP)
      active_depth = parg_group
      tree.children.append(parg_group)
      child = TreeNode(NodeType.ARGUMENT)
      child.children.append(token)
      CommentNode.consume_trailing(ctx, tokens, child)
      parg_group.children.append(child)
      state_ += 1
    elif state_ is parsing_flags:
      if get_normalized_kwarg(tokens[0]) in (
          "WIN32", "MACOSX_BUNDLE", "EXCLUDE_FROM_ALL"):
        token = tokens.pop(0)
        child = TreeNode(NodeType.FLAG)
        child.children.append(token)
        CommentNode.consume_trailing(ctx, tokens, child)
        parg_group.children.append(child)
      else:
        state_ += 1
        src_group = TreeNode(NodeType.PARGGROUP, sortable=sortable)
        active_depth = src_group
        tree.children.append(src_group)
    elif state_ is parsing_sources:
      token = tokens.pop(0)
      child = TreeNode(NodeType.ARGUMENT)
      child.children.append(token)
      CommentNode.consume_trailing(ctx, tokens, child)
      src_group.children.append(child)

      if only_comments_and_whitespace_remain(tokens, breakstack):
        active_depth = tree

  return tree