示例#1
0
文件: gen_cmake.py 项目: honza1a/v8
def FormatCMake(contents):
    from cmake_format import configuration, lexer, parse, formatter
    cfg = configuration.Configuration()
    tokens = lexer.tokenize(contents)
    parse_tree = parse.parse(tokens)
    box_tree = formatter.layout_tree(parse_tree, cfg)
    return formatter.write_tree(box_tree, cfg, contents)
示例#2
0
def process_file(config, infile, outfile, dump=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    infile_content = infile.read()
    if config.line_ending == 'auto':
        detected = detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    if dump == "lex":
        for token in tokens:
            outfile.write("{}\n".format(token))
        return
    config.first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_db = parse_funs.get_parse_db()
    parse_db.update(parse_funs.get_legacy_parse(config.fn_spec).kwargs)
    parse_tree = parser.parse(tokens, parse_db)
    if dump == "parse":
        parser.dump_tree([parse_tree], outfile)
        return
    if dump == "markup":
        dump_markup([parse_tree], config, outfile)
        return

    box_tree = formatter.layout_tree(parse_tree, config)
    if dump == "layout":
        formatter.dump_tree([box_tree], outfile)
        return

    text = formatter.write_tree(box_tree, config, infile_content)
    if config.emit_byteorder_mark:
        outfile.write("\ufeff")
    outfile.write(text)
示例#3
0
def annotate_file(config, infile, outfile, outfmt=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    infile_content = infile.read()
    if config.line_ending == 'auto':
        detected = __main__.detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    config.first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_db = parse_funs.get_parse_db()
    parse_db.update(parse_funs.get_legacy_parse(config.fn_spec).kwargs)
    parse_tree = parser.parse(tokens, parse_db)

    if outfmt == "page":
        html_content = render.get_html(parse_tree, fullpage=True)
        outfile.write(html_content)
        return
    if outfmt == "stub":
        html_content = render.get_html(parse_tree, fullpage=False)
        outfile.write(html_content)
        return
    raise ValueError("Invalid output format: {}".format(outfmt))
示例#4
0
def assert_lex(test, input_str, expected_types):
    """
  Run the lexer on the input string and assert that the result tokens match
  the expected
  """
    test.assertEqual(expected_types,
                     [tok.type for tok in lexer.tokenize(input_str)])
示例#5
0
def process_file(config, infile, outfile, dump=None):
  """
  Parse the input cmake file, re-format it, and print to the output file.
  """

  infile_content = infile.read()
  if config.line_ending == 'auto':
    detected = detect_line_endings(infile_content)
    config = config.clone()
    config.set_line_ending(detected)
  tokens = lexer.tokenize(infile_content)
  if dump == 'lex':
    for token in tokens:
      outfile.write('{}\n'.format(token))
    return
  config.first_token = lexer.get_first_non_whitespace_token(tokens)
  parse_tree = parser.parse(tokens, config.fn_spec)
  if dump == 'parse':
    parser.dump_tree([parse_tree], outfile)
    return
  box_tree = formatter.layout_tree(parse_tree, config)
  if dump == 'layout':
    formatter.dump_tree([box_tree], outfile)
    return
  text = formatter.write_tree(box_tree, config, infile_content)
  outfile.write(text)
示例#6
0
 def do_type_test(self, input_str, expect_tree):
     """
 Run the parser to get the fst, then compare the result to the types in the
 ``expect_tree`` tuple tree.
 """
     tokens = lexer.tokenize(input_str)
     fst_root = parser.parse(tokens, self.config.fn_spec)
     assert_tree_type(self, [fst_root], expect_tree)
示例#7
0
 def do_type_test(self, input_str, expect_tree):
   """
   Run the parser to get the fst, then compare the result to the types in the
   ``expect_tree`` tuple tree.
   """
   tokens = lexer.tokenize(input_str)
   fst_root = parser.parse(tokens, self.config.fn_spec)
   assert_tree_type(self, [fst_root], expect_tree)
示例#8
0
def assert_parse(test, input_str, expect_tree):
    """
  Run the parser to get the fst, then compare the result to the types in the
  ``expect_tree`` tuple tree.
  """
    tokens = lexer.tokenize(input_str)
    fst_root = parse.parse(tokens, test.parse_ctx)
    assert_parse_tree(test, [fst_root], expect_tree)
示例#9
0
  def assert_tok_types(self, input_str, expected_types):
    """
    Run the lexer on the input string and assert that the result tokens match
    the expected
    """

    self.assertEqual(expected_types,
                     [tok.type for tok in lexer.tokenize(input_str)])
示例#10
0
 def parse_file(self, filepath):
   """Parse one file. Read the content, tokenize, and parse.
   """
   if not os.path.exists(filepath):
     logger.warning("%s does not exist", filepath)
     return
   with io.open(filepath, "r", encoding="utf-8") as infile:
     infile_content = infile.read()
   tokens = lexer.tokenize(infile_content)
   _ = parse.parse(tokens, self.get_db())
示例#11
0
def process_file(config, infile, outfile):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    pretty_printer = formatter.TreePrinter(config, outfile)
    tokens = lexer.tokenize(infile.read())
    tok_seqs = parser.digest_tokens(tokens)
    fst = parser.construct_fst(tok_seqs)
    pretty_printer.print_node(fst)
示例#12
0
def assert_layout(test, input_str, expect_tree, strip_len=0):
    """
  Run the formatter on the input string and assert that the result matches
  the output string
  """

    input_str = strip_indent(input_str, strip_len)
    tokens = lexer.tokenize(input_str)
    parse_tree = parse.parse(tokens, test.parse_db)
    box_tree = formatter.layout_tree(parse_tree, test.config)
    assert_layout_tree(test, [box_tree], expect_tree)
示例#13
0
  def do_layout_test(self, input_str, expect_tree, strip_len=6):
    """
    Run the formatter on the input string and assert that the result matches
    the output string
    """

    input_str = strip_indent(input_str, strip_len)
    tokens = lexer.tokenize(input_str)
    parse_tree = parser.parse(tokens, self.config.fn_spec)
    box_tree = formatter.layout_tree(parse_tree, self.config)
    assert_tree(self, [box_tree], expect_tree)
示例#14
0
  def do_layout_test(self, input_str, expect_tree, strip_len=6):
    """
    Run the formatter on the input string and assert that the result matches
    the output string
    """

    input_str = strip_indent(input_str, strip_len)
    tokens = lexer.tokenize(input_str)
    parse_tree = parser.parse(tokens, self.config.fn_spec)
    box_tree = formatter.layout_tree(parse_tree, self.config)
    assert_tree(self, [box_tree], expect_tree)
示例#15
0
def main():
  """
  Dump digested tokens or full-syntax-tree to stdout for debugging purposes.
  """
  import argparse
  parser = argparse.ArgumentParser(description=__doc__)
  parser.add_argument('infile')
  args = parser.parse_args()

  with open(args.infile, 'r') as infile:
    tokens = lexer.tokenize(infile.read())
    rootnode = parse(tokens)

  dump_tree([rootnode], sys.stdout)
示例#16
0
def main():
    """
  Dump digested tokens or full-syntax-tree to stdout for debugging purposes.
  """
    import argparse
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('infile')
    args = parser.parse_args()

    with open(args.infile, 'r') as infile:
        tokens = lexer.tokenize(infile.read())
        rootnode = parse(tokens)

    dump_tree([rootnode], sys.stdout)
示例#17
0
def process_file(config, infile, outfile):
  """
  Parse the input cmake file, re-format it, and print to the output file.
  """

  pretty_printer = formatter.TreePrinter(config, outfile)
  infile_content = infile.read()
  if config.line_ending == 'auto':
    detected = detect_line_endings(infile_content)
    config.set_line_ending(detected)
  tokens = lexer.tokenize(infile_content)
  tok_seqs = parser.digest_tokens(tokens)
  fst = parser.construct_fst(tok_seqs)
  pretty_printer.print_node(fst)
示例#18
0
def process_file(config, infile_content):
  """
  Parse the input cmake file, return the parse tree
  """

  if config.format.line_ending == 'auto':
    detected = __main__.detect_line_endings(infile_content)
    config = config.clone()
    config.set_line_ending(detected)

  tokens = lexer.tokenize(infile_content)
  parse_db = parse_funs.get_parse_db()
  parse_db.update(parse_funs.get_legacy_parse(config.parse.fn_spec).kwargs)
  ctx = parse.ParseContext(parse_db, config=config)
  parse_tree = parse.parse(tokens, ctx)
  parse_tree.build_ancestry()
  return parse_tree
示例#19
0
def process_file(config, local_ctx, infile_content):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    if config.format.line_ending == 'auto':
        detected = __main__.detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)

    basic_checker.check_basics(config, local_ctx, infile_content)
    tokens = lexer.tokenize(infile_content)
    parse_db = parse_funs.get_parse_db()
    parse_db.update(parse_funs.get_legacy_parse(config.parse.fn_spec).kwargs)
    ctx = parse.ParseContext(parse_db, local_ctx, config)
    parse_tree = parse.parse(tokens, ctx)
    parse_tree.build_ancestry()
    basic_checker.check_parse_tree(config, local_ctx, parse_tree)
示例#20
0
def process_file(config, infile_content, dump=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    outfile = io.StringIO(newline='')
    if config.format.line_ending == 'auto':
        detected = detect_line_endings(infile_content)
        config = config.clone()
        config.format.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    if dump == "lex":
        for token in tokens:
            outfile.write("{}\n".format(token))
        return outfile.getvalue(), True
    first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_db = parse_funs.get_parse_db()
    parse_db.update(parse_funs.get_funtree(config.parse.fn_spec))

    if dump == "parsedb":
        dump_parsedb(parse_db, outfile)
        return outfile.getvalue(), True

    ctx = parse.ParseContext(parse_db, config=config)
    parse_tree = parse.parse(tokens, ctx)
    if dump == "parse":
        dump_parse([parse_tree], outfile)
        return outfile.getvalue(), True
    if dump == "markup":
        dump_markup([parse_tree], config, outfile)
        return outfile.getvalue(), True

    box_tree = formatter.layout_tree(parse_tree,
                                     config,
                                     first_token=first_token)
    if dump == "layout":
        formatter.dump_tree([box_tree], outfile)
        return outfile.getvalue(), True

    outstr = formatter.write_tree(box_tree, config, infile_content)
    if config.encode.emit_byteorder_mark:
        outstr = "\ufeff" + outstr

    return (outstr, box_tree.reflow_valid)
示例#21
0
def process_file(config, infile, outfile, dump=None):
  """
  Parse the input cmake file, re-format it, and print to the output file.
  """

  infile_content = infile.read()
  if config.line_ending == 'auto':
    detected = detect_line_endings(infile_content)
    config = config.clone()
    config.set_line_ending(detected)
  tokens = lexer.tokenize(infile_content)
  if dump == "lex":
    for token in tokens:
      outfile.write("{}\n".format(token))
    return
  config.first_token = lexer.get_first_non_whitespace_token(tokens)
  parse_tree = parser.parse(tokens, config.fn_spec)
  if dump == "parse":
    parser.dump_tree([parse_tree], outfile)
    return
  if dump == "markup":
    dump_markup([parse_tree], config, outfile)
    return
  if dump == "html-page":
    html_content = render.get_html(parse_tree, fullpage=True)
    outfile.write(html_content)
    return
  if dump == "html-stub":
    html_content = render.get_html(parse_tree, fullpage=False)
    outfile.write(html_content)
    return

  box_tree = formatter.layout_tree(parse_tree, config)
  if dump == "layout":
    infile.seek(0)
    formatter.dump_tree([box_tree], outfile)
    return

  text = formatter.write_tree(box_tree, config, infile_content)
  if config.emit_byteorder_mark:
    outfile.write("\ufeff")
  outfile.write(text)
示例#22
0
def process_file(config, infile, outfile, dump=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    infile_content = infile.read()
    if config.line_ending == 'auto':
        detected = detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    if dump == "lex":
        for token in tokens:
            outfile.write("{}\n".format(token))
        return
    config.first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_tree = parser.parse(tokens, config.fn_spec)
    if dump == "parse":
        parser.dump_tree([parse_tree], outfile)
        return
    if dump == "markup":
        dump_markup([parse_tree], config, outfile)
        return
    if dump == "html-page":
        html_content = render.get_html(parse_tree, fullpage=True)
        outfile.write(html_content)
        return
    if dump == "html-stub":
        html_content = render.get_html(parse_tree, fullpage=False)
        outfile.write(html_content)
        return

    box_tree = formatter.layout_tree(parse_tree, config)
    if dump == "layout":
        infile.seek(0)
        formatter.dump_tree([box_tree], outfile)
        return

    text = formatter.write_tree(box_tree, config, infile_content)
    if config.emit_byteorder_mark:
        outfile.write("\ufeff")
    outfile.write(text)
示例#23
0
def process_file(config, infile_content, dump=None, extra=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    outfile = io.StringIO(newline='')
    if config.line_ending == 'auto':
        detected = detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    if dump == "lex":
        for token in tokens:
            outfile.write("{}\n".format(token))
        return outfile.getvalue()
    config.first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_db = parse_funs.get_parse_db()
    parse_db.update(parse_funs.get_legacy_parse(config.fn_spec).kwargs)
    parse_tree = parser.parse(tokens, parse_db)
    if dump == "parse":
        parser.dump_tree([parse_tree], outfile)
        return outfile.getvalue()
    if dump == "markup":
        dump_markup([parse_tree], config, outfile)
        return outfile.getvalue()

    box_tree = formatter.layout_tree(parse_tree, config)
    if dump == "layout":
        formatter.dump_tree([box_tree], outfile)
        return outfile.getvalue()

    if extra is not None:
        extra["reflow_valid"] = box_tree.reflow_valid

    outstr = formatter.write_tree(box_tree, config, infile_content)
    if config.emit_byteorder_mark:
        return "\ufeff" + outstr
    return outstr
示例#24
0
def annotate_file(config, infile, outfile, outfmt=None):
  """
  Parse the input cmake file, re-format it, and print to the output file.
  """

  infile_content = infile.read()
  if config.format.line_ending == 'auto':
    detected = __main__.detect_line_endings(infile_content)
    config = config.clone()
    config.format.set_line_ending(detected)
  tokens = lexer.tokenize(infile_content)
  parse_db = parse_funs.get_parse_db()
  parse_db.update(parse_funs.get_funtree(config.parse.fn_spec))
  ctx = parse.ParseContext(parse_db)
  parse_tree = parse.parse(tokens, ctx)

  if outfmt == "page":
    html_content = render.get_html(parse_tree, fullpage=True)
    outfile.write(html_content)
    return
  if outfmt == "stub":
    html_content = render.get_html(parse_tree, fullpage=False)
    outfile.write(html_content)
    return
  if outfmt == "iframe":
    html_content = render.get_html(parse_tree, fullpage=True)
    wrap_lines = EMBED_TPL.split("\n")
    for line in wrap_lines[:2]:
      outfile.write(line)
      outfile.write("\n")
    outfile.write(html_content)
    for line in wrap_lines[3:]:
      outfile.write(line)
      outfile.write("\n")
    return

  raise ValueError("Invalid output format: {}".format(outfmt))
示例#25
0
def main():
    """
  Dump digested tokens or full-syntax-tree to stdout for debugging purposes.
  """
    import argparse
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('infile')
    subparsers = parser.add_subparsers(dest='command')
    subparsers.add_parser('dump-digest')
    subparsers.add_parser('dump-tree')

    args = parser.parse_args()
    with open(args.infile, 'r') as infile:
        tokens = lexer.tokenize(infile.read())
        tok_seqs = digest_tokens(tokens)
        fst = construct_fst(tok_seqs)

    if args.command == 'dump-digest':
        for seq in tok_seqs:
            print seq
    elif args.command == 'dump-tree':
        dump_fst(fst)
    else:
        assert False, "Unkown command {}".format(args.command)
示例#26
0
def inner_main():
    """Parse arguments, open files, start work."""
    logging.basicConfig(level=logging.INFO, format="%(levelname)s %(message)s")

    argparser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        usage=USAGE_STRING)

    setup_argparse(argparser)
    args = argparser.parse_args()
    logging.getLogger().setLevel(getattr(logging, args.log_level.upper()))

    if args.outfile_path is None:
        args.outfile_path = '-'

    if '-' in args.infilepaths:
        assert len(args.infilepaths) == 1, \
            "You cannot mix stdin as an input with other input files"

    if args.outfile_path == '-':
        outfile = io.open(os.dup(sys.stdout.fileno()),
                          mode='w',
                          encoding="utf-8",
                          newline='')
    else:
        outfile = io.open(args.outfile_path, 'w', encoding="utf-8", newline='')

    returncode = 0

    cfg = configuration.Configuration()
    collector = NameCollector()
    for infile_path in args.infilepaths:
        # NOTE(josh): have to load config once for every file, because we may pick
        # up a new config file location for each path
        if infile_path == '-':
            infile_path = os.dup(sys.stdin.fileno())

        try:
            infile = io.open(infile_path,
                             mode='r',
                             encoding=cfg.encode.input_encoding,
                             newline='')
        except (IOError, OSError):
            logger.error("Failed to open %s for read", infile_path)
            returncode = 1
            continue

        try:
            with infile:
                infile_content = infile.read()
        except UnicodeDecodeError:
            logger.error("Unable to read %s as %s", infile_path,
                         cfg.encode.input_encoding)
            returncode = 1
            continue

        tokens = lexer.tokenize(infile_content)
        parse_db = parse_funs.get_parse_db()
        ctx = parse.ParseContext(parse_db, config=cfg)
        parse_tree = parse.parse(tokens, ctx)
        parse_tree.build_ancestry()
        collector.collect_names(parse_tree)

    regexes = [
        re.compile(pattern) for pattern in [
            r"[A-Z][A-Z0-9_]+",  # upper snake-case
            r"[a-z][a-z0-9_]+",  # lower snake-case
            r"_[A-Z0-9_]+",  # upper snake-case with underscore prefix
            r"_[a-z0-9_]+",  # lower snake-case with underscore prefix
        ]
    ]

    outmap = {}
    patmap = {}
    for scope, varname in sorted(collector.varnames):
        if scope not in outmap:
            outmap[scope] = {}

        if scope not in patmap:
            patmap[scope] = {}
            for regex in regexes:
                patmap[scope][str(regex)] = 0
            patmap[scope]["other"] = 0

        for regex in regexes:
            if regex.match(varname):
                patmap[scope][str(regex)] += 1
                break
        else:
            patmap[scope]["other"] += 1

        if varname not in outmap[scope]:
            outmap[scope][varname] = 0
        outmap[scope][varname] += 1

    for scope, countmap in sorted(outmap.items()):
        outfile.write("\n{}\n{}\n".format(scope.name, "=" * len(scope.name)))
        for varname, count in sorted(countmap.items()):
            outfile.write("{}: {}\n".format(varname, count))

    for scope, countmap in sorted(patmap.items()):
        outfile.write("\n{}\n{}\n".format(scope.name, "=" * len(scope.name)))
        for varname, count in sorted(countmap.items()):
            outfile.write("{}: {}\n".format(varname, count))

    outfile.close()
    return returncode