示例#1
0
def annotate_file(config, infile, outfile, outfmt=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    infile_content = infile.read()
    if config.line_ending == 'auto':
        detected = __main__.detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    config.first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_db = parse_funs.get_parse_db()
    parse_db.update(parse_funs.get_legacy_parse(config.fn_spec).kwargs)
    parse_tree = parser.parse(tokens, parse_db)

    if outfmt == "page":
        html_content = render.get_html(parse_tree, fullpage=True)
        outfile.write(html_content)
        return
    if outfmt == "stub":
        html_content = render.get_html(parse_tree, fullpage=False)
        outfile.write(html_content)
        return
    raise ValueError("Invalid output format: {}".format(outfmt))
示例#2
0
def process_file(config, infile, outfile, dump=None):
  """
  Parse the input cmake file, re-format it, and print to the output file.
  """

  infile_content = infile.read()
  if config.line_ending == 'auto':
    detected = detect_line_endings(infile_content)
    config = config.clone()
    config.set_line_ending(detected)
  tokens = lexer.tokenize(infile_content)
  if dump == "lex":
    for token in tokens:
      outfile.write("{}\n".format(token))
    return
  config.first_token = lexer.get_first_non_whitespace_token(tokens)
  parse_tree = parser.parse(tokens, config.fn_spec)
  if dump == "parse":
    parser.dump_tree([parse_tree], outfile)
    return
  if dump == "markup":
    dump_markup([parse_tree], config, outfile)
    return
  if dump == "html-page":
    html_content = render.get_html(parse_tree, fullpage=True)
    outfile.write(html_content)
    return
  if dump == "html-stub":
    html_content = render.get_html(parse_tree, fullpage=False)
    outfile.write(html_content)
    return

  box_tree = formatter.layout_tree(parse_tree, config)
  if dump == "layout":
    infile.seek(0)
    formatter.dump_tree([box_tree], outfile)
    return

  text = formatter.write_tree(box_tree, config, infile_content)
  if config.emit_byteorder_mark:
    outfile.write("\ufeff")
  outfile.write(text)
示例#3
0
def process_file(config, infile, outfile, dump=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    infile_content = infile.read()
    if config.line_ending == 'auto':
        detected = detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    if dump == "lex":
        for token in tokens:
            outfile.write("{}\n".format(token))
        return
    config.first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_tree = parser.parse(tokens, config.fn_spec)
    if dump == "parse":
        parser.dump_tree([parse_tree], outfile)
        return
    if dump == "markup":
        dump_markup([parse_tree], config, outfile)
        return
    if dump == "html-page":
        html_content = render.get_html(parse_tree, fullpage=True)
        outfile.write(html_content)
        return
    if dump == "html-stub":
        html_content = render.get_html(parse_tree, fullpage=False)
        outfile.write(html_content)
        return

    box_tree = formatter.layout_tree(parse_tree, config)
    if dump == "layout":
        infile.seek(0)
        formatter.dump_tree([box_tree], outfile)
        return

    text = formatter.write_tree(box_tree, config, infile_content)
    if config.emit_byteorder_mark:
        outfile.write("\ufeff")
    outfile.write(text)
示例#4
0
def annotate_file(config, infile, outfile, outfmt=None):
  """
  Parse the input cmake file, re-format it, and print to the output file.
  """

  infile_content = infile.read()
  if config.format.line_ending == 'auto':
    detected = __main__.detect_line_endings(infile_content)
    config = config.clone()
    config.format.set_line_ending(detected)
  tokens = lexer.tokenize(infile_content)
  parse_db = parse_funs.get_parse_db()
  parse_db.update(parse_funs.get_funtree(config.parse.fn_spec))
  ctx = parse.ParseContext(parse_db)
  parse_tree = parse.parse(tokens, ctx)

  if outfmt == "page":
    html_content = render.get_html(parse_tree, fullpage=True)
    outfile.write(html_content)
    return
  if outfmt == "stub":
    html_content = render.get_html(parse_tree, fullpage=False)
    outfile.write(html_content)
    return
  if outfmt == "iframe":
    html_content = render.get_html(parse_tree, fullpage=True)
    wrap_lines = EMBED_TPL.split("\n")
    for line in wrap_lines[:2]:
      outfile.write(line)
      outfile.write("\n")
    outfile.write(html_content)
    for line in wrap_lines[3:]:
      outfile.write(line)
      outfile.write("\n")
    return

  raise ValueError("Invalid output format: {}".format(outfmt))