Example #1
0
def collect_results(args, start_time, targets):
    """Walks through buck-out/, collects results for the different buck targets
    and stores them in in args.infer_out/results.json.
    """
    collected_reports = {}

    for path in get_output_jars(targets):
        try:
            with zipfile.ZipFile(path) as jar:
                report = load_json_report(jar)
                merge_reports(report, collected_reports)
        except NotFoundInJar:
            pass
        except zipfile.BadZipfile:
            logging.warn('Bad zip file %s', path)

    json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)

    with open(json_report, 'w') as file_out:
        json.dump(collected_reports.values(), file_out)

    bugs_out = os.path.join(args.infer_out, config.BUGS_FILENAME)
    issues.print_and_save_errors(args.infer_out,
                                 args.project_root,
                                 json_report,
                                 bugs_out,
                                 args.pmd_xml,
                                 console_out=not args.quiet)
Example #2
0
File: buck.py Project: zmyer/infer
 def capture_with_flavors(self):
     if self.keep_going and not self.args.continue_capture:
         self._move_buck_out()
     ret = self._run_buck_with_flavors()
     if not ret == os.EX_OK and not self.keep_going:
         return ret
     result_paths = self._get_analysis_result_paths()
     if result_paths is None:
         # huho, the Buck command to extract results paths failed
         return os.EX_SOFTWARE
     merged_reports_path = os.path.join(
         self.args.infer_out, config.JSON_REPORT_FILENAME)
     merged_deps_path = os.path.join(
         self.args.infer_out, config.INFER_BUCK_DEPS_FILENAME)
     self._merge_infer_report_files(result_paths, merged_reports_path)
     if not ret == os.EX_OK and self.keep_going:
         self._find_deps_and_merge(merged_deps_path)
     else:
         self._merge_infer_dep_files(result_paths, merged_deps_path)
     infer_out = self.args.infer_out
     json_report = os.path.join(infer_out, config.JSON_REPORT_FILENAME)
     bugs_out = os.path.join(infer_out, config.BUGS_FILENAME)
     issues.print_and_save_errors(infer_out, self.args.project_root,
                                  json_report, bugs_out, self.args.pmd_xml,
                                  console_out=not self.args.quiet)
     return os.EX_OK
Example #3
0
def collect_results(args, start_time, targets):
    """Walks through buck-gen, collects results for the different buck targets
    and stores them in in args.infer_out/results.csv.
    """
    all_json_rows = set()

    for path in get_output_jars(targets):
        try:
            with zipfile.ZipFile(path) as jar:
                json_rows = load_json_report(jar)
                for row in json_rows:
                    all_json_rows.add(json.dumps(row))
        except NotFoundInJar:
            pass
        except zipfile.BadZipfile:
            logging.warn('Bad zip file %s', path)

    json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)

    with open(json_report, 'w') as report:
        json_string = '['
        json_string += ','.join(all_json_rows)
        json_string += ']'
        report.write(json_string)
        report.flush()

    bugs_out = os.path.join(args.infer_out, config.BUGS_FILENAME)
    issues.print_and_save_errors(args.infer_out, args.project_root,
                                 json_report, bugs_out, args.pmd_xml)
Example #4
0
def collect_results(args, start_time, targets):
    """Walks through buck-out/, collects results for the different buck targets
    and stores them in in args.infer_out/results.json.
    """
    all_json_rows = set()

    for path in get_output_jars(targets):
        try:
            with zipfile.ZipFile(path) as jar:
                json_rows = load_json_report(jar)
                for row in json_rows:
                    all_json_rows.add(json.dumps(row))
        except NotFoundInJar:
            pass
        except zipfile.BadZipfile:
            logging.warn('Bad zip file %s', path)

    json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)

    with open(json_report, 'w') as report:
        json_string = '['
        json_string += ','.join(all_json_rows)
        json_string += ']'
        report.write(json_string)
        report.flush()

    bugs_out = os.path.join(args.infer_out, config.BUGS_FILENAME)
    issues.print_and_save_errors(args.infer_out, args.project_root,
                                 json_report, bugs_out, args.pmd_xml,
                                 console_out=not args.quiet)
Example #5
0
 def capture_with_flavors(self):
     if self.keep_going:
         self._move_buck_out()
     ret = self._run_buck_with_flavors()
     if not ret == os.EX_OK and not self.keep_going:
         return ret
     result_paths = self._get_analysis_result_paths()
     if result_paths is None:
         # huho, the Buck command to extract results paths failed
         return os.EX_SOFTWARE
     merged_reports_path = os.path.join(self.args.infer_out,
                                        config.JSON_REPORT_FILENAME)
     merged_deps_path = os.path.join(self.args.infer_out,
                                     config.INFER_BUCK_DEPS_FILENAME)
     self._merge_infer_report_files(result_paths, merged_reports_path)
     if not ret == os.EX_OK and self.keep_going:
         self._find_deps_and_merge(merged_deps_path)
     else:
         self._merge_infer_dep_files(result_paths, merged_deps_path)
     infer_out = self.args.infer_out
     json_report = os.path.join(infer_out, config.JSON_REPORT_FILENAME)
     bugs_out = os.path.join(infer_out, config.BUGS_FILENAME)
     issues.print_and_save_errors(infer_out,
                                  self.args.project_root,
                                  json_report,
                                  bugs_out,
                                  self.args.pmd_xml,
                                  console_out=not self.args.quiet)
     return os.EX_OK
Example #6
0
def collect_results(buck_args, infer_args, start_time, targets):
    """Walks through buck-out/, collects results for the different buck targets
    and stores them in in args.infer_out/results.json.
    """
    collected_reports = {}
    collected_costs_reports = []

    for path in get_output_jars(buck_args, targets):
        try:
            with zipfile.ZipFile(path) as jar:
                report = load_json_report(jar, INFER_JSON_REPORT)
                costs_report = load_json_report(jar, INFER_JSON_COSTS_REPORT)
                merge_reports(report, collected_reports)
                # No need to de-duplicate elements in costs-report, merge all
                collected_costs_reports += costs_report
        except NotFoundInJar:
            pass
        except zipfile.BadZipfile:
            logging.warn('Bad zip file %s', path)

    json_report = os.path.join(infer_args.infer_out,
                               config.JSON_REPORT_FILENAME)
    json_costs_report = os.path.join(infer_args.infer_out,
                                     config.JSON_COSTS_REPORT_FILENAME)

    with open(json_report, 'w') as report_out, \
            open(json_costs_report, 'w') as costs_report_out:
        json.dump(collected_reports.values(), report_out)
        json.dump(collected_costs_reports, costs_report_out)

    bugs_out = os.path.join(infer_args.infer_out, config.BUGS_FILENAME)
    issues.print_and_save_errors(infer_args.infer_out, infer_args.project_root,
                                 json_report, bugs_out, infer_args.pmd_xml,
                                 console_out=not infer_args.quiet)
Example #7
0
def collect_results(args, start_time, targets):
    """Walks through buck-out/, collects results for the different buck targets
    and stores them in in args.infer_out/results.json.
    """
    all_json_rows = []

    for path in get_output_jars(targets):
        try:
            with zipfile.ZipFile(path) as jar:
                json_rows = load_json_report(jar)
                for row in json_rows:
                    all_json_rows.append(row)
        except NotFoundInJar:
            pass
        except zipfile.BadZipfile:
            logging.warn('Bad zip file %s', path)

    json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)

    with open(json_report, 'w') as file_out:
        json.dump(all_json_rows, file_out)

    bugs_out = os.path.join(args.infer_out, config.BUGS_FILENAME)
    issues.print_and_save_errors(args.infer_out, args.project_root,
                                 json_report, bugs_out, args.pmd_xml,
                                 console_out=not args.quiet)
Example #8
0
def main():
    sys_argv = map(utils.decode, sys.argv)
    args = arg_parser.parse_args(sys_argv[1:])
    issues.print_and_save_errors(args.results_dir,
                                 args.project_root,
                                 args.issues_json,
                                 args.issues_txt,
                                 args.pmd_xml,
                                 console_out=not args.quiet)
Example #9
0
 def capture_with_flavors(self):
     ret = self._run_buck_with_flavors()
     if not ret == os.EX_OK:
         return ret
     result_paths = self._get_analysis_result_paths()
     merged_reports_path = os.path.join(
         self.args.infer_out, config.JSON_REPORT_FILENAME)
     merged_deps_path = os.path.join(
         self.args.infer_out, config.INFER_BUCK_DEPS_FILENAME)
     self._merge_infer_report_files(result_paths, merged_reports_path)
     self._merge_infer_dep_files(result_paths, merged_deps_path)
     infer_out = self.args.infer_out
     json_report = os.path.join(infer_out, config.JSON_REPORT_FILENAME)
     bugs_out = os.path.join(infer_out, config.BUGS_FILENAME)
     issues.print_and_save_errors(infer_out, self.args.project_root,
                                  json_report, bugs_out, self.args.pmd_xml)
     return os.EX_OK
Example #10
0
 def capture_with_flavors(self):
     ret = self._run_buck_with_flavors()
     if not ret == os.EX_OK:
         return ret
     result_paths = self._get_analysis_result_paths()
     merged_reports_path = os.path.join(self.args.infer_out,
                                        config.JSON_REPORT_FILENAME)
     merged_deps_path = os.path.join(self.args.infer_out,
                                     config.INFER_BUCK_DEPS_FILENAME)
     self._merge_infer_report_files(result_paths, merged_reports_path)
     self._merge_infer_dep_files(result_paths, merged_deps_path)
     infer_out = self.args.infer_out
     json_report = os.path.join(infer_out, config.JSON_REPORT_FILENAME)
     bugs_out = os.path.join(infer_out, config.BUGS_FILENAME)
     issues.print_and_save_errors(infer_out, self.args.project_root,
                                  json_report, bugs_out, self.args.pmd_xml)
     return os.EX_OK
Example #11
0
def main():
    sys_argv = map(utils.decode, sys.argv)
    args = arg_parser.parse_args(sys_argv[1:])
    issues.print_and_save_errors(args.results_dir, args.project_root,
                                 args.issues_json, args.issues_txt,
                                 args.pmd_xml, console_out=not args.quiet)
Example #12
0
def collect_results(args, start_time):
    """Walks through buck-gen, collects results for the different buck targets
    and stores them in in args.infer_out/results.csv.
    """
    buck_stats = get_buck_stats()
    logging.info(buck_stats)
    with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'w') as f:
        f.write(buck_stats)

    all_csv_rows = set()
    all_json_rows = set()
    headers = []
    stats = init_stats(args, start_time)

    accumulation_whitelist = list(
        map(re.compile, [
            '^cores$',
            '^time$',
            '^start_time$',
            '.*_pc',
        ]))

    expected_analyzer = stats['normal']['analyzer']
    expected_version = stats['normal']['infer_version']

    for root, _, files in os.walk(DEFAULT_BUCK_OUT_GEN):
        for f in [f for f in files if f.endswith('.jar')]:
            path = os.path.join(root, f)
            try:
                with zipfile.ZipFile(path) as jar:
                    # Accumulate integers and float values
                    target_stats = load_stats(jar)

                    found_analyzer = target_stats['normal']['analyzer']
                    found_version = target_stats['normal']['infer_version']

                    if (found_analyzer != expected_analyzer
                            or found_version != expected_version):
                        continue
                    else:
                        for type_k in ['int', 'float']:
                            items = target_stats.get(type_k, {}).items()
                            for key, value in items:
                                if not any(
                                        map(lambda r: r.match(key),
                                            accumulation_whitelist)):
                                    old_value = stats[type_k].get(key, 0)
                                    stats[type_k][key] = old_value + value

                    csv_rows = load_csv_report(jar)
                    if len(csv_rows) > 0:
                        headers.append(csv_rows[0])
                        for row in csv_rows[1:]:
                            all_csv_rows.add(tuple(row))

                    json_rows = load_json_report(jar)
                    for row in json_rows:
                        all_json_rows.add(json.dumps(row))

                    # Override normals
                    stats['normal'].update(target_stats.get('normal', {}))
            except NotFoundInJar:
                pass
            except zipfile.BadZipfile:
                logging.warn('Bad zip file %s', path)

    csv_report = os.path.join(args.infer_out, config.CSV_REPORT_FILENAME)
    json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)
    bugs_out = os.path.join(args.infer_out, config.BUGS_FILENAME)

    if len(headers) == 0:
        with open(csv_report, 'w'):
            pass
        logging.info('No reports found')
        return
    elif len(headers) > 1:
        if any(map(lambda x: x != headers[0], headers)):
            raise Exception('Inconsistent reports found')

    # Convert all float values to integer values
    for key, value in stats.get('float', {}).items():
        stats['int'][key] = int(round(value))

    # Delete the float entries before exporting the results
    del (stats['float'])

    with open(csv_report, 'w') as report:
        writer = csv.writer(report)
        all_csv_rows = [list(row) for row in all_csv_rows]
        writer.writerows([headers[0]] + all_csv_rows)
        report.flush()

    with open(json_report, 'w') as report:
        json_string = '['
        json_string += ','.join(all_json_rows)
        json_string += ']'
        report.write(json_string)
        report.flush()

    print('\n')
    issues.print_and_save_errors(json_report, bugs_out)

    stats['int']['total_time'] = int(round(utils.elapsed_time(start_time)))

    store_performances_csv(args.infer_out, stats)

    stats_filename = os.path.join(args.infer_out, config.STATS_FILENAME)
    utils.dump_json_to_path(stats, stats_filename)

    basic_stats = get_basic_stats(stats)

    if args.print_harness:
        harness_code = get_harness_code()
        basic_stats += harness_code

    logging.info(basic_stats)

    with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'a') as f:
        f.write(basic_stats)
Example #13
0
def collect_results(args, start_time, targets):
    """Walks through buck-gen, collects results for the different buck targets
    and stores them in in args.infer_out/results.csv.
    """
    buck_stats = get_buck_stats()
    logging.info(buck_stats)
    with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'w') as f:
        f.write(buck_stats)

    all_csv_rows = set()
    all_json_rows = set()
    headers = []
    stats = init_stats(args, start_time)

    accumulation_whitelist = list(map(re.compile, [
        '^cores$',
        '^time$',
        '^start_time$',
        '.*_pc',
    ]))

    expected_analyzer = stats['normal']['analyzer']
    expected_version = stats['normal']['infer_version']

    for path in get_output_jars(targets):
        try:
            with zipfile.ZipFile(path) as jar:
                # Accumulate integers and float values
                target_stats = load_stats(jar)

                found_analyzer = target_stats['normal']['analyzer']
                found_version = target_stats['normal']['infer_version']

                if found_analyzer != expected_analyzer \
                        or found_version != expected_version:
                    continue
                else:
                    for type_k in ['int', 'float']:
                        items = target_stats.get(type_k, {}).items()
                        for key, value in items:
                            if not any(map(lambda r: r.match(key),
                                           accumulation_whitelist)):
                                old_value = stats[type_k].get(key, 0)
                                stats[type_k][key] = old_value + value

                csv_rows = load_csv_report(jar)
                if len(csv_rows) > 0:
                    headers.append(csv_rows[0])
                    for row in csv_rows[1:]:
                        all_csv_rows.add(tuple(row))

                json_rows = load_json_report(jar)
                for row in json_rows:
                    all_json_rows.add(json.dumps(row))

                # Override normals
                stats['normal'].update(target_stats.get('normal', {}))
        except NotFoundInJar:
            pass
        except zipfile.BadZipfile:
            logging.warn('Bad zip file %s', path)

    csv_report = os.path.join(args.infer_out, config.CSV_REPORT_FILENAME)
    json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)
    bugs_out = os.path.join(args.infer_out, config.BUGS_FILENAME)

    if len(headers) > 1:
        if any(map(lambda x: x != headers[0], headers)):
            raise Exception('Inconsistent reports found')

    # Convert all float values to integer values
    for key, value in stats.get('float', {}).items():
        stats['int'][key] = int(round(value))

    # Delete the float entries before exporting the results
    del(stats['float'])

    with open(csv_report, 'w') as report:
        if len(headers) > 0:
            writer = csv.writer(report)
            all_csv_rows = [list(row) for row in all_csv_rows]
            writer.writerows([headers[0]] + all_csv_rows)
            report.flush()

    with open(json_report, 'w') as report:
        json_string = '['
        json_string += ','.join(all_json_rows)
        json_string += ']'
        report.write(json_string)
        report.flush()

    print('\n')
    xml_out = None
    if args.pmd_xml:
        xml_out = os.path.join(args.infer_out,
                               config.PMD_XML_FILENAME)
    issues.print_and_save_errors(json_report, bugs_out, xml_out)

    stats['int']['total_time'] = int(round(utils.elapsed_time(start_time)))

    store_performances_csv(args.infer_out, stats)

    stats_filename = os.path.join(args.infer_out, config.STATS_FILENAME)
    utils.dump_json_to_path(stats, stats_filename)

    basic_stats = get_basic_stats(stats)

    if args.print_harness:
        harness_code = get_harness_code()
        basic_stats += harness_code

    logging.info(basic_stats)

    with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'a') as f:
        f.write(basic_stats)
Example #14
0
def collect_results(args, start_time, targets):
    """Walks through buck-gen, collects results for the different buck targets
    and stores them in in args.infer_out/results.csv.
    """
    buck_stats = get_buck_stats()
    logging.info(buck_stats)
    with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'w') as f:
        f.write(buck_stats)

    all_json_rows = set()
    stats = init_stats(args, start_time)

    accumulation_whitelist = list(map(re.compile, [
        '^cores$',
        '^time$',
        '^start_time$',
        '.*_pc',
    ]))

    expected_analyzer = stats['normal']['analyzer']
    expected_version = stats['normal']['infer_version']

    for path in get_output_jars(targets):
        try:
            with zipfile.ZipFile(path) as jar:
                # Accumulate integers and float values
                target_stats = load_stats(jar)

                found_analyzer = target_stats['normal']['analyzer']
                found_version = target_stats['normal']['infer_version']

                if found_analyzer != expected_analyzer \
                        or found_version != expected_version:
                    continue
                else:
                    for type_k in ['int', 'float']:
                        items = target_stats.get(type_k, {}).items()
                        for key, value in items:
                            if not any(map(lambda r: r.match(key),
                                           accumulation_whitelist)):
                                old_value = stats[type_k].get(key, 0)
                                stats[type_k][key] = old_value + value

                json_rows = load_json_report(jar)
                for row in json_rows:
                    all_json_rows.add(json.dumps(row))

                # Override normals
                stats['normal'].update(target_stats.get('normal', {}))
        except NotFoundInJar:
            pass
        except zipfile.BadZipfile:
            logging.warn('Bad zip file %s', path)

    json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)

    # Convert all float values to integer values
    for key, value in stats.get('float', {}).items():
        stats['int'][key] = int(round(value))

    # Delete the float entries before exporting the results
    del(stats['float'])

    with open(json_report, 'w') as report:
        json_string = '['
        json_string += ','.join(all_json_rows)
        json_string += ']'
        report.write(json_string)
        report.flush()

    print('\n')
    json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)
    bugs_out = os.path.join(args.infer_out, config.BUGS_FILENAME)
    issues.print_and_save_errors(args.infer_out, args.project_root,
                                 json_report, bugs_out, args.pmd_xml)

    stats['int']['total_time'] = int(round(utils.elapsed_time(start_time)))

    store_performances_csv(args.infer_out, stats)

    stats_filename = os.path.join(args.infer_out, config.STATS_FILENAME)
    utils.dump_json_to_path(stats, stats_filename)

    basic_stats = get_basic_stats(stats)

    if args.print_harness:
        harness_code = get_harness_code()
        basic_stats += harness_code

    logging.info(basic_stats)

    with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), 'a') as f:
        f.write(basic_stats)
Example #15
0
def main():
    sys_argv = map(utils.decode, sys.argv)
    args = arg_parser.parse_args(sys_argv[1:])
    bugs_out = os.path.join(args.results_dir, config.BUGS_FILENAME)
    issues.print_and_save_errors(args.results_dir, args.project_root,
                                 args.issues_json, bugs_out, args.pmd_xml)
Example #16
0
def collect_results(args, start_time):
    """Walks through buck-gen, collects results for the different buck targets
    and stores them in in args.infer_out/results.csv.
    """
    buck_stats = get_buck_stats()
    logging.info(buck_stats)
    with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), "w") as f:
        f.write(buck_stats)

    all_csv_rows = set()
    all_json_rows = set()
    headers = []
    stats = init_stats(args, start_time)

    accumulation_whitelist = list(map(re.compile, ["^cores$", "^time$", "^start_time$", ".*_pc"]))

    expected_analyzer = stats["normal"]["analyzer"]
    expected_version = stats["normal"]["infer_version"]

    for root, _, files in os.walk(DEFAULT_BUCK_OUT_GEN):
        for f in [f for f in files if f.endswith(".jar")]:
            path = os.path.join(root, f)
            try:
                with zipfile.ZipFile(path) as jar:
                    # Accumulate integers and float values
                    target_stats = load_stats(jar)

                    found_analyzer = target_stats["normal"]["analyzer"]
                    found_version = target_stats["normal"]["infer_version"]

                    if found_analyzer != expected_analyzer or found_version != expected_version:
                        continue
                    else:
                        for type_k in ["int", "float"]:
                            items = target_stats.get(type_k, {}).items()
                            for key, value in items:
                                if not any(map(lambda r: r.match(key), accumulation_whitelist)):
                                    old_value = stats[type_k].get(key, 0)
                                    stats[type_k][key] = old_value + value

                    csv_rows = load_csv_report(jar)
                    if len(csv_rows) > 0:
                        headers.append(csv_rows[0])
                        for row in csv_rows[1:]:
                            all_csv_rows.add(tuple(row))

                    json_rows = load_json_report(jar)
                    for row in json_rows:
                        all_json_rows.add(json.dumps(row))

                    # Override normals
                    stats["normal"].update(target_stats.get("normal", {}))
            except NotFoundInJar:
                pass
            except zipfile.BadZipfile:
                logging.warn("Bad zip file %s", path)

    csv_report = os.path.join(args.infer_out, config.CSV_REPORT_FILENAME)
    json_report = os.path.join(args.infer_out, config.JSON_REPORT_FILENAME)
    bugs_out = os.path.join(args.infer_out, config.BUGS_FILENAME)

    if len(headers) > 1:
        if any(map(lambda x: x != headers[0], headers)):
            raise Exception("Inconsistent reports found")

    # Convert all float values to integer values
    for key, value in stats.get("float", {}).items():
        stats["int"][key] = int(round(value))

    # Delete the float entries before exporting the results
    del (stats["float"])

    with open(csv_report, "w") as report:
        if len(headers) > 0:
            writer = csv.writer(report)
            all_csv_rows = [list(row) for row in all_csv_rows]
            writer.writerows([headers[0]] + all_csv_rows)
            report.flush()

    with open(json_report, "w") as report:
        json_string = "["
        json_string += ",".join(all_json_rows)
        json_string += "]"
        report.write(json_string)
        report.flush()

    print("\n")
    xml_out = None
    if args.pmd_xml:
        xml_out = os.path.join(args.infer_out, config.PMD_XML_FILENAME)
    issues.print_and_save_errors(json_report, bugs_out, xml_out)

    stats["int"]["total_time"] = int(round(utils.elapsed_time(start_time)))

    store_performances_csv(args.infer_out, stats)

    stats_filename = os.path.join(args.infer_out, config.STATS_FILENAME)
    utils.dump_json_to_path(stats, stats_filename)

    basic_stats = get_basic_stats(stats)

    if args.print_harness:
        harness_code = get_harness_code()
        basic_stats += harness_code

    logging.info(basic_stats)

    with open(os.path.join(args.infer_out, ANALYSIS_SUMMARY_OUTPUT), "a") as f:
        f.write(basic_stats)
Example #17
0
def main():
    sys_argv = map(utils.decode, sys.argv)
    args = arg_parser.parse_args(sys_argv[1:])
    bugs_out = os.path.join(args.results_dir, config.BUGS_FILENAME)
    issues.print_and_save_errors(args.results_dir, args.project_root,
                                 args.issues_json, bugs_out, args.pmd_xml)