Пример #1
0
def test_clean_trial_exception(binop_file, monkeypatch):
    """Ensure clean trial raises a BaselineTestException on non-zero returncode"""
    def mock_subprocess_run(*args, **kwargs):
        return CompletedProcess(args="pytest", returncode=1)

    monkeypatch.setattr(subprocess, "run", mock_subprocess_run)

    with pytest.raises(BaselineTestException):
        run.clean_trial(binop_file.parent, ["pytest"])
Пример #2
0
def test_clean_trial_timedelta(binop_file, monkeypatch):
    """Clean trial results in a timedelta object."""
    def mock_subprocess_run(*args, **kwargs):
        return CompletedProcess(args="pytest", returncode=0)

    monkeypatch.setattr(subprocess, "run", mock_subprocess_run)

    result = run.clean_trial(binop_file.parent, ["pytest"])
    assert isinstance(result, timedelta)
Пример #3
0
def main(args: argparse.Namespace) -> None:
    """Main CLI function to run the mutation trials and report results.

    Args:
        args: argparse arguments

    Returns:
        None, reports output
    """
    src_loc = get_src_location(args.src)

    # set the logging level based on the debug flag in args
    # when in debug mode the test stdout is not captured by subprocess.run
    logging.basicConfig(
        format=DEBUG_FORMAT if args.debug else FORMAT,
        level=logging.DEBUG if args.debug else logging.INFO,
        stream=sys.stdout,
    )

    clean_runtime_1 = run.clean_trial(src_loc=src_loc, test_cmds=args.testcmds)

    # Run the mutation trials based on the input argument
    # set categories if present
    filter_codes: List[str] = list()
    if len(args.whitelist) > 0 or len(args.blacklist) > 0:
        filter_codes = selected_categories(whitelist=args.whitelist,
                                           blacklist=args.blacklist)

    # Build the running configuration for the mutation trials
    run_mode = RunMode(args.mode)

    config = Config(
        n_locations=args.nlocations,
        exclude_files=args.exclude,
        filter_codes=filter_codes,
        random_seed=args.rseed,
        break_on_detected=run_mode.break_on_detection,
        break_on_survival=run_mode.break_on_survival,
        break_on_error=run_mode.break_on_error,
        break_on_unknown=run_mode.break_on_unknown,
        break_on_timeout=run_mode.break_on_timeout,
        ignore_coverage=args.nocov,
        max_runtime=args.timeout_factor * clean_runtime_1.total_seconds(),
        multi_processing=args.parallel,
    )

    results_summary = run.run_mutation_trials(src_loc=src_loc,
                                              test_cmds=args.testcmds,
                                              config=config)

    # Run the pipeline with no mutations last to ensure cleared cache
    clean_runtime_2 = run.clean_trial(src_loc=src_loc, test_cmds=args.testcmds)

    runtimes = TrialTimes(
        clean_trial_1=clean_runtime_1,
        clean_trial_2=clean_runtime_2,
        mutation_trials=results_summary.total_runtime,
    )

    # create the report of results
    cli_report = cli_summary_report(
        src_loc=src_loc,
        args=args,
        locs_mutated=results_summary.n_locs_mutated,
        locs_identified=results_summary.n_locs_identified,
        runtimes=runtimes,
    )

    trial_report, display_results = report.analyze_mutant_trials(
        results_summary.results)

    LOGGER.info("CLI Report:\n\n%s", cli_report)
    LOGGER.info("Trial Summary Report:\n\n%s\n", display_results.summary)
    LOGGER.info("Detected mutations:%s\n", display_results.detected)
    LOGGER.info("Timedout mutations:%s\n", display_results.timedout)
    LOGGER.info("Surviving mutations:%s\n", display_results.survived)

    if args.output:
        report.write_report("\n".join([cli_report, trial_report]),
                            Path(args.output))

    if args.exception:
        LOGGER.info("Survivor tolerance check for %s surviving mutants.",
                    args.exception)
        exception_processing(args.exception, results_summary.results)