def main(): options = to_options_gen(arg_parser_gen()) jdk = JDK(options.java_home) classes_dir = _maven_work(options, jdk) _create_mutants_dir(options) tool = MuJava(options.mutants, jdk=jdk, classpath=classes_dir) state = _recover_state(options) db = _initialize_db(options) targets = state[0] analysed_files = state[1] count = 1 files = get_class_files(options.java_src, ext='.java') for i, file in enumerate(sort_files(files)): print('PROCESSING {0} {1}/{2}'.format(file, i + 1, len(files))) if file not in analysed_files['files']: t = tool.generate(classes_dir, options.java_src, file, len(targets)) print('\ttargets found: {0}'.format(len(t))) targets += t for target in t: target['mutants'] = _run_hunor(options, target) _persist_targets(db, t) _save_state(options, state, t, file) count += 1
def main(): source_dir = PROJECT_DIR files = get_class_files(source_dir, ext='.java') targets = [] for file in files: targets += get_targets(source_dir, file, len(targets)) write_config_json(targets)
def _test_classes(self): classes = [] for class_file in sorted(get_class_files(self.tests_classes)): filename, _ = os.path.splitext(class_file) if not filename.endswith('_scaffolding'): classes.append(filename.replace(os.sep, '.')) return classes
def _compile(self): os.mkdir(self.tests_classes) classpath = generate_classpath([self.classpath, self.tests_src, JUNIT, HAMCREST]) for java_test_file in sorted(get_class_files( self.tests_src, ext='.java')): self.jdk.run_javac(os.path.join(self.tests_src, java_test_file), 5 * 60, self.tests_src, '-classpath', classpath, '-d', self.tests_classes) return self._test_classes()
def equivalence_analysis(jdk, junit, classpath, test_suites, mutants, mutation_tool, sut_class, coverage_threshold, output, mutants_dir, using_target=False): if mutation_tool == 'pit': mutation_tool = Pit(mutants_dir, sut_class) elif mutation_tool == 'major': mutation_tool = Major(mutants_dir) elif mutation_tool == 'mujava': mutation_tool = MuJava(mutants_dir) root_path = mutants_dir[0:mutants_dir.rfind('/')] target_mutant = mutants_dir[mutants_dir.rfind('/') + 1:] mutants = mutation_tool.read_log(log_dir=root_path, target_mutant=target_mutant) with open(os.path.join(output, 'equivalents.csv'), 'w') as f: f.write('id,maybe_equivalent,not_equivalent,coverage\n') f.close() original_dir = os.path.join(mutants_dir[0:mutants_dir.rfind('/')], 'ORIGINAL') ori_coverage = 0 ori_tests_total = 0 line_coverage = 0 if using_target and len(list(mutants.keys())) > 0: line_coverage = mutants[list(mutants.keys())[0]].line_number if os.path.exists(original_dir): ori_test_suites = junit.run_test_suites(test_suites, original_dir, line_coverage) for t in ori_test_suites: test_suites[t].elapsed_time = ori_test_suites[t].elapsed_time ori_coverage += ori_test_suites[t].coverage ori_tests_total += ori_test_suites[t].tests_total if ori_test_suites[t].tests_total == 0: test_suites[t].is_valid = False coverage_threshold = float(coverage_threshold) if coverage_threshold < 1: coverage_threshold = math.ceil(coverage_threshold * ori_tests_total) else: coverage_threshold = math.ceil(coverage_threshold) if using_target and ori_coverage < coverage_threshold: print('WARNING: Not enough coverage for this target.' + ' coverage: {0}, test cases: {1}'.format(ori_coverage, ori_tests_total)) return None print('RUNNING TEST SUITES FOR ALL MUTANTS...') begin = datetime.now() for i, m in enumerate(mutants): mutant_begin = datetime.now() mutant = mutants[m] print('\tmutant: {0}... {1}/{2}'.format(mutant, i + 1, len(mutants))) if os.path.exists(mutant.path): compile_success = True for java_file in get_class_files(mutant.path, ext='.java'): compile_success = compile_success and jdk.run_javac( java_file, 60, mutant.path, "-classpath", classpath) if compile_success: mutant.result.test_suites = junit.run_test_suites( test_suites, mutant.path, mutant.line_number, original_dir) coverage = 0 fail = False maybe_in_loop = False coverage_log = [] tests_total = 0 fail_tests_total = 0 fail_tests = set() for r in mutant.result.test_suites: coverage += mutant.result.test_suites[r].coverage fail = fail or mutant.result.test_suites[r].fail maybe_in_loop = (maybe_in_loop or mutant.result.test_suites[r].fail) tests_total += mutant.result.test_suites[r].tests_total fail_tests_total += ( mutant.result.test_suites[r].fail_tests_total) fail_tests = fail_tests.union( mutant.result.test_suites[r].fail_tests) coverage_log.append('{0}: {1}'.format( r, mutant.result.test_suites[r].coverage)) print('\t\tcoverage: {0}/{4} ({1}) tests fail: {2}/{3}'.format( coverage, ', '.join(coverage_log), fail_tests_total, tests_total, coverage_threshold)) if tests_total > 0 or maybe_in_loop: with open(os.path.join(output, 'equivalents.csv'), 'a') as f: if coverage >= coverage_threshold and not fail: print('\t\t +++ THIS MUTANT MAY BE EQUIVALENT!') mutant.maybe_equivalent = True f.write('{0},{1},{2},{3}\n'.format( mutant.id, 'x', '', coverage)) elif fail: print('\t\t --- THIS MUTANT IS NOT EQUIVALENT!') f.write('{0},{1},{2},{3}\n'.format( mutant.id, '', 'x', coverage)) else: f.write('{0},{1},{2},{3}\n'.format( mutant.id, '', '', coverage)) f.close() else: mutant.is_invalid = True else: print('\t\tWARNING: mutant not compile: {0}'.format( mutant.path)) mutant.is_invalid = True else: print('\t\tWARNING: mutant directory not found: {0}'.format( mutant.path)) mutant.is_invalid = True print('\t********************************* {0}'.format(datetime.now() - mutant_begin)) print('############ END ANALYSIS ########### {0}'.format(datetime.now() - begin)) return mutants