Esempio n. 1
0
def process_file(config, infile, outfile, dump=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    infile_content = infile.read()
    if config.line_ending == 'auto':
        detected = detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    if dump == "lex":
        for token in tokens:
            outfile.write("{}\n".format(token))
        return
    config.first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_db = parse_funs.get_parse_db()
    parse_db.update(parse_funs.get_legacy_parse(config.fn_spec).kwargs)
    parse_tree = parser.parse(tokens, parse_db)
    if dump == "parse":
        parser.dump_tree([parse_tree], outfile)
        return
    if dump == "markup":
        dump_markup([parse_tree], config, outfile)
        return

    box_tree = formatter.layout_tree(parse_tree, config)
    if dump == "layout":
        formatter.dump_tree([box_tree], outfile)
        return

    text = formatter.write_tree(box_tree, config, infile_content)
    if config.emit_byteorder_mark:
        outfile.write("\ufeff")
    outfile.write(text)
Esempio n. 2
0
def process_file(config, infile, outfile, dump=None):
  """
  Parse the input cmake file, re-format it, and print to the output file.
  """

  infile_content = infile.read()
  if config.line_ending == 'auto':
    detected = detect_line_endings(infile_content)
    config = config.clone()
    config.set_line_ending(detected)
  tokens = lexer.tokenize(infile_content)
  if dump == 'lex':
    for token in tokens:
      outfile.write('{}\n'.format(token))
    return
  config.first_token = lexer.get_first_non_whitespace_token(tokens)
  parse_tree = parser.parse(tokens, config.fn_spec)
  if dump == 'parse':
    parser.dump_tree([parse_tree], outfile)
    return
  box_tree = formatter.layout_tree(parse_tree, config)
  if dump == 'layout':
    formatter.dump_tree([box_tree], outfile)
    return
  text = formatter.write_tree(box_tree, config, infile_content)
  outfile.write(text)
Esempio n. 3
0
def process_file(config, infile_content, dump=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    outfile = io.StringIO(newline='')
    if config.format.line_ending == 'auto':
        detected = detect_line_endings(infile_content)
        config = config.clone()
        config.format.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    if dump == "lex":
        for token in tokens:
            outfile.write("{}\n".format(token))
        return outfile.getvalue(), True
    first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_db = parse_funs.get_parse_db()
    parse_db.update(parse_funs.get_funtree(config.parse.fn_spec))

    if dump == "parsedb":
        dump_parsedb(parse_db, outfile)
        return outfile.getvalue(), True

    ctx = parse.ParseContext(parse_db, config=config)
    parse_tree = parse.parse(tokens, ctx)
    if dump == "parse":
        dump_parse([parse_tree], outfile)
        return outfile.getvalue(), True
    if dump == "markup":
        dump_markup([parse_tree], config, outfile)
        return outfile.getvalue(), True

    box_tree = formatter.layout_tree(parse_tree,
                                     config,
                                     first_token=first_token)
    if dump == "layout":
        formatter.dump_tree([box_tree], outfile)
        return outfile.getvalue(), True

    outstr = formatter.write_tree(box_tree, config, infile_content)
    if config.encode.emit_byteorder_mark:
        outstr = "\ufeff" + outstr

    return (outstr, box_tree.reflow_valid)
Esempio n. 4
0
def process_file(config, infile, outfile, dump=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    infile_content = infile.read()
    if config.line_ending == 'auto':
        detected = detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    if dump == "lex":
        for token in tokens:
            outfile.write("{}\n".format(token))
        return
    config.first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_tree = parser.parse(tokens, config.fn_spec)
    if dump == "parse":
        parser.dump_tree([parse_tree], outfile)
        return
    if dump == "markup":
        dump_markup([parse_tree], config, outfile)
        return
    if dump == "html-page":
        html_content = render.get_html(parse_tree, fullpage=True)
        outfile.write(html_content)
        return
    if dump == "html-stub":
        html_content = render.get_html(parse_tree, fullpage=False)
        outfile.write(html_content)
        return

    box_tree = formatter.layout_tree(parse_tree, config)
    if dump == "layout":
        infile.seek(0)
        formatter.dump_tree([box_tree], outfile)
        return

    text = formatter.write_tree(box_tree, config, infile_content)
    if config.emit_byteorder_mark:
        outfile.write("\ufeff")
    outfile.write(text)
Esempio n. 5
0
def process_file(config, infile, outfile, dump=None):
  """
  Parse the input cmake file, re-format it, and print to the output file.
  """

  infile_content = infile.read()
  if config.line_ending == 'auto':
    detected = detect_line_endings(infile_content)
    config = config.clone()
    config.set_line_ending(detected)
  tokens = lexer.tokenize(infile_content)
  if dump == "lex":
    for token in tokens:
      outfile.write("{}\n".format(token))
    return
  config.first_token = lexer.get_first_non_whitespace_token(tokens)
  parse_tree = parser.parse(tokens, config.fn_spec)
  if dump == "parse":
    parser.dump_tree([parse_tree], outfile)
    return
  if dump == "markup":
    dump_markup([parse_tree], config, outfile)
    return
  if dump == "html-page":
    html_content = render.get_html(parse_tree, fullpage=True)
    outfile.write(html_content)
    return
  if dump == "html-stub":
    html_content = render.get_html(parse_tree, fullpage=False)
    outfile.write(html_content)
    return

  box_tree = formatter.layout_tree(parse_tree, config)
  if dump == "layout":
    infile.seek(0)
    formatter.dump_tree([box_tree], outfile)
    return

  text = formatter.write_tree(box_tree, config, infile_content)
  if config.emit_byteorder_mark:
    outfile.write("\ufeff")
  outfile.write(text)
Esempio n. 6
0
def process_file(config, infile_content, dump=None, extra=None):
    """
  Parse the input cmake file, re-format it, and print to the output file.
  """

    outfile = io.StringIO(newline='')
    if config.line_ending == 'auto':
        detected = detect_line_endings(infile_content)
        config = config.clone()
        config.set_line_ending(detected)
    tokens = lexer.tokenize(infile_content)
    if dump == "lex":
        for token in tokens:
            outfile.write("{}\n".format(token))
        return outfile.getvalue()
    config.first_token = lexer.get_first_non_whitespace_token(tokens)
    parse_db = parse_funs.get_parse_db()
    parse_db.update(parse_funs.get_legacy_parse(config.fn_spec).kwargs)
    parse_tree = parser.parse(tokens, parse_db)
    if dump == "parse":
        parser.dump_tree([parse_tree], outfile)
        return outfile.getvalue()
    if dump == "markup":
        dump_markup([parse_tree], config, outfile)
        return outfile.getvalue()

    box_tree = formatter.layout_tree(parse_tree, config)
    if dump == "layout":
        formatter.dump_tree([box_tree], outfile)
        return outfile.getvalue()

    if extra is not None:
        extra["reflow_valid"] = box_tree.reflow_valid

    outstr = formatter.write_tree(box_tree, config, infile_content)
    if config.emit_byteorder_mark:
        return "\ufeff" + outstr
    return outstr