示例#1
0
def test_run_mutation_trials_bad_binop(bos, bod, exp_trials, single_binop_file_with_bad_test):
    """Slow test to run detection trials on a simple mutation on a binop.

    Based on fixture, there is one Add operation, with 6 substitutions e.g.
    sub, div, mult, pow, mod, floordiv, therefore, 6 total trials are expected for a full run
    and 1 trial is expected when break on detected is used.

    Args:
        bos: break on survival
        bod: break on detection
        exp_trials: number of expected trials
        single_binop_file_with_good_test: fixture for single op with a good test
    """

    test_cmds = f"pytest {single_binop_file_with_bad_test.test_file.resolve()}".split()

    config = Config(n_locations=100, break_on_survival=bos, break_on_detected=bod)

    results_summary = run.run_mutation_trials(
        single_binop_file_with_bad_test.src_file.resolve(), test_cmds=test_cmds, config=config
    )

    assert len(results_summary.results) == exp_trials

    # in all trials the status should be survivors
    for mutant_trial in results_summary.results:
        assert mutant_trial.return_code == 0
        assert mutant_trial.status == "SURVIVED"
示例#2
0
def test_run_mutation_trials_timeout(bot, exp_timeout_trials, sleep_timeout):
    """Slow test to run detection trials on a simple mutation on a binop.

    Based on fixture, there are 2 substitutions e.g. and one if statement:
    one of these changes will cause the sleep function to be executed
    resulting in a Timeout. In total there are 6 total mutations, 3 of which will timeout.

    Args:
        bot: break on timeout
        exp_trials: number of expected trials
        sleep_timeout: fixture for single op with a timeout test
    """

    test_cmds = f"pytest {sleep_timeout.test_file.resolve()}".split()
    max_runtime = 1  # manually set to keep the timeout time reasonable

    config = Config(
        n_locations=100,
        break_on_survival=False,
        break_on_detected=False,
        break_on_timeout=bot,
        max_runtime=max_runtime,
    )

    results_summary = run.run_mutation_trials(sleep_timeout.src_file.resolve(),
                                              test_cmds=test_cmds,
                                              config=config)

    # in all trials the status should be survivors or timeouts
    for mutant_trial in results_summary.results:
        assert mutant_trial.return_code in {0, 3}
        if mutant_trial.return_code == 0:
            assert mutant_trial.status == "SURVIVED"
        else:
            assert mutant_trial.status == "TIMEOUT"

    timeout_results = [
        mutant_trial for mutant_trial in results_summary.results
        if mutant_trial.status == "TIMEOUT"
    ]

    # It's possible the timeout will exceed in CI, rare but seen on Windows
    # Assumed to be an IO thing or shared fixture problem in multiple environments
    # Generally, these are expected to be equal
    assert len(timeout_results) >= exp_timeout_trials
示例#3
0
def test_run_mutation_trials_good_binop(bos, bod, exp_trials, parallel,
                                        single_binop_file_with_good_test,
                                        change_to_tmp):
    """Slow test to run detection trials on a simple mutation on a binop.

    Based on fixture, there is one Add operation, with 6 substitutions e.g.
    sub, div, mult, pow, mod, floordiv, therefore, 6 total trials are expected for a full run
    and 1 trial is expected when break on detected is used.

    Args:
        bos: break on survival
        bod: break on detection
        exp_trials: number of expected trials
        single_binop_file_with_good_test: fixture for single op with a good test
    """
    if sys.version_info < (3, 8) and parallel:
        pytest.skip("Under version 3.8 will not run parallel tests.")

    test_cmds = f"pytest {single_binop_file_with_good_test.test_file.resolve()}".split(
    )

    config = Config(n_locations=100,
                    break_on_survival=bos,
                    break_on_detected=bod,
                    multi_processing=parallel)

    results_summary = run.run_mutation_trials(
        single_binop_file_with_good_test.src_file.resolve(),
        test_cmds=test_cmds,
        config=config)

    assert len(results_summary.results) == exp_trials

    # in all trials the status should be detected
    for mutant_trial in results_summary.results:
        assert mutant_trial.return_code == 1
        assert mutant_trial.status == "DETECTED"
示例#4
0
def main(args: argparse.Namespace) -> None:
    """Main CLI function to run the mutation trials and report results.

    Args:
        args: argparse arguments

    Returns:
        None, reports output
    """
    src_loc = get_src_location(args.src)

    # set the logging level based on the debug flag in args
    # when in debug mode the test stdout is not captured by subprocess.run
    logging.basicConfig(
        format=DEBUG_FORMAT if args.debug else FORMAT,
        level=logging.DEBUG if args.debug else logging.INFO,
        stream=sys.stdout,
    )

    clean_runtime_1 = run.clean_trial(src_loc=src_loc, test_cmds=args.testcmds)

    # Run the mutation trials based on the input argument
    # set categories if present
    filter_codes: List[str] = list()
    if len(args.whitelist) > 0 or len(args.blacklist) > 0:
        filter_codes = selected_categories(whitelist=args.whitelist,
                                           blacklist=args.blacklist)

    # Build the running configuration for the mutation trials
    run_mode = RunMode(args.mode)

    config = Config(
        n_locations=args.nlocations,
        exclude_files=args.exclude,
        filter_codes=filter_codes,
        random_seed=args.rseed,
        break_on_detected=run_mode.break_on_detection,
        break_on_survival=run_mode.break_on_survival,
        break_on_error=run_mode.break_on_error,
        break_on_unknown=run_mode.break_on_unknown,
        break_on_timeout=run_mode.break_on_timeout,
        ignore_coverage=args.nocov,
        max_runtime=args.timeout_factor * clean_runtime_1.total_seconds(),
        multi_processing=args.parallel,
    )

    results_summary = run.run_mutation_trials(src_loc=src_loc,
                                              test_cmds=args.testcmds,
                                              config=config)

    # Run the pipeline with no mutations last to ensure cleared cache
    clean_runtime_2 = run.clean_trial(src_loc=src_loc, test_cmds=args.testcmds)

    runtimes = TrialTimes(
        clean_trial_1=clean_runtime_1,
        clean_trial_2=clean_runtime_2,
        mutation_trials=results_summary.total_runtime,
    )

    # create the report of results
    cli_report = cli_summary_report(
        src_loc=src_loc,
        args=args,
        locs_mutated=results_summary.n_locs_mutated,
        locs_identified=results_summary.n_locs_identified,
        runtimes=runtimes,
    )

    trial_report, display_results = report.analyze_mutant_trials(
        results_summary.results)

    LOGGER.info("CLI Report:\n\n%s", cli_report)
    LOGGER.info("Trial Summary Report:\n\n%s\n", display_results.summary)
    LOGGER.info("Detected mutations:%s\n", display_results.detected)
    LOGGER.info("Timedout mutations:%s\n", display_results.timedout)
    LOGGER.info("Surviving mutations:%s\n", display_results.survived)

    if args.output:
        report.write_report("\n".join([cli_report, trial_report]),
                            Path(args.output))

    if args.exception:
        LOGGER.info("Survivor tolerance check for %s surviving mutants.",
                    args.exception)
        exception_processing(args.exception, results_summary.results)