def test_get_genome_group_folder_and_file(tmp_path): """Genome Group initialization from run using exclusions and folder/file configs.""" f = tmp_path / "folder" f.mkdir() test_files = [ tmp_path / "first.py", tmp_path / "second.py", tmp_path / "test_first.py", tmp_path / "test_second.py", tmp_path / "third_test.py", f / "third.py", f / "test_third.py", ] for tf in test_files: with open(tf, "w") as temp_py: temp_py.write("import this") config = Config(exclude_files=[tmp_path / "first.py"], filter_codes=["bn", "ix"]) expected_keys = sorted([tmp_path / "second.py", f / "third.py"]) # test using a folder including exclusions ggrp = run.get_genome_group(src_loc=tmp_path, config=config) assert sorted(list(ggrp.keys())) == expected_keys for k, g in ggrp.items(): assert g.filter_codes == {"bn", "ix"} # test using only a file and empty config ggrp2 = run.get_genome_group(src_loc=tmp_path / "first.py", config=Config()) assert sorted(list(ggrp2.keys())) == [tmp_path / "first.py"] for k, g in ggrp2.items(): assert g.filter_codes == set()
def test_run_mutation_trials_bad_binop(bos, bod, exp_trials, single_binop_file_with_bad_test): """Slow test to run detection trials on a simple mutation on a binop. Based on fixture, there is one Add operation, with 6 substitutions e.g. sub, div, mult, pow, mod, floordiv, therefore, 6 total trials are expected for a full run and 1 trial is expected when break on detected is used. Args: bos: break on survival bod: break on detection exp_trials: number of expected trials single_binop_file_with_good_test: fixture for single op with a good test """ test_cmds = f"pytest {single_binop_file_with_bad_test.test_file.resolve()}".split() config = Config(n_locations=100, break_on_survival=bos, break_on_detected=bod) results_summary = run.run_mutation_trials( single_binop_file_with_bad_test.src_file.resolve(), test_cmds=test_cmds, config=config ) assert len(results_summary.results) == exp_trials # in all trials the status should be survivors for mutant_trial in results_summary.results: assert mutant_trial.return_code == 0 assert mutant_trial.status == "SURVIVED"
def test_break_on_check(return_code, config, mock_Mutant, mock_LocIdx): # positive case mtr = MutantTrialResult(mock_Mutant, return_code) result = run.trial_output_check_break(mtr, config, Path("file.py"), mock_LocIdx) assert result # negative case # using the default Config has no break-on settings result = run.trial_output_check_break(mtr, Config(), Path("file.py"), mock_LocIdx) assert not result
def test_run_mutation_trials_timeout(bot, exp_timeout_trials, sleep_timeout): """Slow test to run detection trials on a simple mutation on a binop. Based on fixture, there are 2 substitutions e.g. and one if statement: one of these changes will cause the sleep function to be executed resulting in a Timeout. In total there are 6 total mutations, 3 of which will timeout. Args: bot: break on timeout exp_trials: number of expected trials sleep_timeout: fixture for single op with a timeout test """ test_cmds = f"pytest {sleep_timeout.test_file.resolve()}".split() max_runtime = 1 # manually set to keep the timeout time reasonable config = Config( n_locations=100, break_on_survival=False, break_on_detected=False, break_on_timeout=bot, max_runtime=max_runtime, ) results_summary = run.run_mutation_trials(sleep_timeout.src_file.resolve(), test_cmds=test_cmds, config=config) # in all trials the status should be survivors or timeouts for mutant_trial in results_summary.results: assert mutant_trial.return_code in {0, 3} if mutant_trial.return_code == 0: assert mutant_trial.status == "SURVIVED" else: assert mutant_trial.status == "TIMEOUT" timeout_results = [ mutant_trial for mutant_trial in results_summary.results if mutant_trial.status == "TIMEOUT" ] # It's possible the timeout will exceed in CI, rare but seen on Windows # Assumed to be an IO thing or shared fixture problem in multiple environments # Generally, these are expected to be equal assert len(timeout_results) >= exp_timeout_trials
def test_run_mutation_trials_good_binop(bos, bod, exp_trials, parallel, single_binop_file_with_good_test, change_to_tmp): """Slow test to run detection trials on a simple mutation on a binop. Based on fixture, there is one Add operation, with 6 substitutions e.g. sub, div, mult, pow, mod, floordiv, therefore, 6 total trials are expected for a full run and 1 trial is expected when break on detected is used. Args: bos: break on survival bod: break on detection exp_trials: number of expected trials single_binop_file_with_good_test: fixture for single op with a good test """ if sys.version_info < (3, 8) and parallel: pytest.skip("Under version 3.8 will not run parallel tests.") test_cmds = f"pytest {single_binop_file_with_good_test.test_file.resolve()}".split( ) config = Config(n_locations=100, break_on_survival=bos, break_on_detected=bod, multi_processing=parallel) results_summary = run.run_mutation_trials( single_binop_file_with_good_test.src_file.resolve(), test_cmds=test_cmds, config=config) assert len(results_summary.results) == exp_trials # in all trials the status should be detected for mutant_trial in results_summary.results: assert mutant_trial.return_code == 1 assert mutant_trial.status == "DETECTED"
assert sorted(list(ggrp.keys())) == expected_keys for k, g in ggrp.items(): assert g.filter_codes == {"bn", "ix"} # test using only a file and empty config ggrp2 = run.get_genome_group(src_loc=tmp_path / "first.py", config=Config()) assert sorted(list(ggrp2.keys())) == [tmp_path / "first.py"] for k, g in ggrp2.items(): assert g.filter_codes == set() @pytest.mark.parametrize( "return_code, config", [ (0, Config(break_on_survival=True)), (1, Config(break_on_detected=True)), (2, Config(break_on_error=True)), (3, Config(break_on_unknown=True)), ], ids=["survival", "detected", "error", "unknown"], ) def test_break_on_check(return_code, config, mock_Mutant, mock_LocIdx): # positive case mtr = MutantTrialResult(mock_Mutant, return_code) result = run.trial_output_check_break(mtr, config, Path("file.py"), mock_LocIdx) assert result # negative case # using the default Config has no break-on settings result = run.trial_output_check_break(mtr, Config(), Path("file.py"), mock_LocIdx)
def main(args: argparse.Namespace) -> None: """Main CLI function to run the mutation trials and report results. Args: args: argparse arguments Returns: None, reports output """ src_loc = get_src_location(args.src) # set the logging level based on the debug flag in args # when in debug mode the test stdout is not captured by subprocess.run logging.basicConfig( format=DEBUG_FORMAT if args.debug else FORMAT, level=logging.DEBUG if args.debug else logging.INFO, stream=sys.stdout, ) clean_runtime_1 = run.clean_trial(src_loc=src_loc, test_cmds=args.testcmds) # Run the mutation trials based on the input argument # set categories if present filter_codes: List[str] = list() if len(args.whitelist) > 0 or len(args.blacklist) > 0: filter_codes = selected_categories(whitelist=args.whitelist, blacklist=args.blacklist) # Build the running configuration for the mutation trials run_mode = RunMode(args.mode) config = Config( n_locations=args.nlocations, exclude_files=args.exclude, filter_codes=filter_codes, random_seed=args.rseed, break_on_detected=run_mode.break_on_detection, break_on_survival=run_mode.break_on_survival, break_on_error=run_mode.break_on_error, break_on_unknown=run_mode.break_on_unknown, break_on_timeout=run_mode.break_on_timeout, ignore_coverage=args.nocov, max_runtime=args.timeout_factor * clean_runtime_1.total_seconds(), multi_processing=args.parallel, ) results_summary = run.run_mutation_trials(src_loc=src_loc, test_cmds=args.testcmds, config=config) # Run the pipeline with no mutations last to ensure cleared cache clean_runtime_2 = run.clean_trial(src_loc=src_loc, test_cmds=args.testcmds) runtimes = TrialTimes( clean_trial_1=clean_runtime_1, clean_trial_2=clean_runtime_2, mutation_trials=results_summary.total_runtime, ) # create the report of results cli_report = cli_summary_report( src_loc=src_loc, args=args, locs_mutated=results_summary.n_locs_mutated, locs_identified=results_summary.n_locs_identified, runtimes=runtimes, ) trial_report, display_results = report.analyze_mutant_trials( results_summary.results) LOGGER.info("CLI Report:\n\n%s", cli_report) LOGGER.info("Trial Summary Report:\n\n%s\n", display_results.summary) LOGGER.info("Detected mutations:%s\n", display_results.detected) LOGGER.info("Timedout mutations:%s\n", display_results.timedout) LOGGER.info("Surviving mutations:%s\n", display_results.survived) if args.output: report.write_report("\n".join([cli_report, trial_report]), Path(args.output)) if args.exception: LOGGER.info("Survivor tolerance check for %s surviving mutants.", args.exception) exception_processing(args.exception, results_summary.results)