Пример #1
0
def write_report(project, test_results):
    report = {"tests-run": test_results.number_of_tests_executed,
              "time_in_millis": test_results.execution_time,
              "failures": []}
    for test_result in test_results.test_results:
        if test_result.success:
            continue
        report["failures"].append({"test": test_result.test_definition.name, "message": test_result.message,
                                   "traceback": test_result.traceback_as_string})

    project.write_report("pyfix_unittest.json", render_report(report))
Пример #2
0
    def test_should_render_report (self):
        report = {
            "spam": "spam",
            "eggs": ["egg", "another_egg"]
        }
        expected = """{
  "eggs": [
    "egg", 
    "another_egg"
  ], 
  "spam": "spam"
}"""
        actual = render_report(report)
        self.assertEquals(expected, actual)
Пример #3
0
def write_report(name, project, logger, result, console_out):
    project.write_report("%s" % name, console_out)    
    
    report = {"tests-run":result.testsRun, 
        "errors":[], 
        "failures":[]}
    for error in result.errors:
        report["errors"].append({"test":error[0].id(), "traceback":error[1]})
        logger.error("Test has error: %s", error[0].id())
    
    for failure in result.failures:
        report["failures"].append({"test":failure[0].id(), "traceback":failure[1]})
        logger.error("Test failed: %s", failure[0].id())
    
    project.write_report("%s.json" % name, render_report(report))
Пример #4
0
def execute_pychecker (project, logger):
    command_line = build_command_line(project) 
    logger.info("Executing pychecker on project sources: %s" % (' '.join(command_line)))
    
    _, report_file = execute_tool_on_modules(project, "pychecker", command_line, True)
    
    warnings = read_file(report_file)
    
    report = parse_pychecker_output(project, warnings)
    project.write_report("pychecker.json", render_report(report.to_json_dict()))
    
    if len(warnings) != 0:
        logger.warn("Found %d warning%s produced by pychecker. See %s for details.", 
                    len(warnings), 
                    "s" if len(warnings) != 1 else "", 
                    report_file)
        
        threshold = project.get_property("pychecker_break_build_threshold")
        
        if project.get_property("pychecker_break_build") and len(warnings) > threshold:
            raise BuildFailedException("Found warnings produced by pychecker")
Пример #5
0
def run_integration_tests(project, logger):
    reports_dir = prepare_reports_directory(project)

    test_failed = 0
    tests_executed = 0
    report_items = []

    total_time = Timer.start()

    for test in discover_integration_tests(
        project.expand_path("$dir_source_integrationtest_python"), project.expand("$integrationtest_file_suffix")
    ):

        report_item = run_single_test(logger, project, reports_dir, test)
        report_items.append(report_item)

        if not report_item["success"]:
            test_failed += 1

        tests_executed += 1

    total_time.stop()

    test_report = {
        "time": total_time.get_millis(),
        "success": test_failed == 0,
        "num_of_tests": tests_executed,
        "tests_failed": test_failed,
        "tests": report_items,
    }

    project.write_report("integrationtest.json", render_report(test_report))

    logger.info("Executed %d integration tests.", tests_executed)
    if test_failed:
        raise BuildFailedException("Integration test(s) failed.")
Пример #6
0
def do_coverage(project, logger, reactor):
    import coverage

    start_coverage(coverage)
    reactor.execute_task("run_unit_tests")
    stop_coverage(coverage, project, logger)

    coverage_too_low = False
    threshold = project.get_property("coverage_threshold_warn")
    exceptions = project.get_property("coverage_exceptions")

    report = {"module_names": []}

    sum_lines = 0
    sum_lines_not_covered = 0

    module_names = discover_modules_to_cover(project)
    modules = []
    for module_name in module_names:
        try:
            module = sys.modules[module_name]
        except KeyError:
            logger.warn("Module not imported: %s. No coverage information available.", module_name)
            continue

        modules.append(module)

        module_report_data = build_module_report(coverage, module)

        sum_lines += module_report_data[0]
        sum_lines_not_covered += module_report_data[2]

        module_report = {
            "module": module_name,
            "coverage": module_report_data[4],
            "sum_lines": module_report_data[0],
            "lines": module_report_data[1],
            "sum_lines_not_covered": module_report_data[2],
            "lines_not_covered": module_report_data[3],
        }

        report["module_names"].append(module_report)

        if module_report_data[4] < threshold:
            msg = "Test coverage below %2d%% for %s: %2d%%" % (threshold, module_name, module_report_data[4])
            if module_name not in exceptions:
                logger.warn(msg)
                coverage_too_low = True
            else:
                logger.info(msg)

    if sum_lines == 0:
        overall_coverage = 0
    else:
        overall_coverage = (sum_lines - sum_lines_not_covered) * 100 / sum_lines
    report["overall_coverage"] = overall_coverage

    if overall_coverage < threshold:
        logger.warn("Overall coverage is below %2d%%: %2d%%", threshold, overall_coverage)
        coverage_too_low = True
    else:
        logger.info("Overall coverage is %2d%%", overall_coverage)

    project.write_report("coverage.json", render_report(report))

    write_summary_report(coverage, project, modules)

    if coverage_too_low and project.get_property("coverage_break_build"):
        raise BuildFailedException("Test coverage for at least one module is below %d%%", threshold)