Ejemplo n.º 1
0
    def __init__(self, config):
        self.reviewed_eval_result_file = 'reviewed-result.csv'
        self.visualize_result_file = 'result.csv'

        self.config = config

        white_list = []
        black_list = []
        if 'white_list' in config:
            white_list.extend(config.white_list)
        if 'black_list' in config:
            black_list.extend(config.black_list)

        if 'dataset' in config:
            white_list.extend(get_white_list(join(self.DATASETS_FILE_PATH), config.dataset))

        self.runner = TaskRunner(Benchmark.DATA_PATH, white_list, black_list)
Ejemplo n.º 2
0
    def setup(self):
        self.temp_dir = mkdtemp(prefix='mubench-datareader-test_')
        self.test_task = MagicMock()  # type: ProjectTask

        self.uut = TaskRunner(self.temp_dir, white_list=[], black_list=[])
        self.uut.add(self.test_task)
Ejemplo n.º 3
0
class Benchmark:
    DATA_PATH = realpath("data")
    CHECKOUTS_PATH = realpath("checkouts")
    COMPILES_PATH = CHECKOUTS_PATH
    DETECTORS_PATH = realpath("detectors")
    FINDINGS_PATH = realpath("findings")

    DATASETS_FILE_PATH = join(DATA_PATH, 'datasets.yml')

    EX1_SUBFOLDER = "detect-only"
    EX2_SUBFOLDER = "mine-and-detect"
    EX3_SUBFOLDER = "mine-and-detect"

    def __init__(self, config):
        self.reviewed_eval_result_file = 'reviewed-result.csv'
        self.visualize_result_file = 'result.csv'

        self.config = config

        white_list = []
        black_list = []
        if 'white_list' in config:
            white_list.extend(config.white_list)
        if 'black_list' in config:
            black_list.extend(config.black_list)

        if 'dataset' in config:
            white_list.extend(get_white_list(join(self.DATASETS_FILE_PATH), config.dataset))

        self.runner = TaskRunner(Benchmark.DATA_PATH, white_list, black_list)

    def _setup_stats(self) -> None:
        stats_calculator = stats.get_calculator(self.config.script)
        self.runner.add(stats_calculator)

    def _setup_info(self):
        self.runner.add(Info(Benchmark.CHECKOUTS_PATH, Benchmark.COMPILES_PATH))

    def _setup_checkout(self):
        checkout_handler = Checkout(Benchmark.CHECKOUTS_PATH, self.config.force_checkout)
        self.runner.add(checkout_handler)

    def _setup_compile(self):
        compile_handler = Compile(Benchmark.CHECKOUTS_PATH, Benchmark.COMPILES_PATH, self.config.force_compile)
        self.runner.add(compile_handler)

    def _setup_detect(self):
        experiment = self.__get_experiment()
        self.runner.add(Detect(Benchmark.COMPILES_PATH, experiment, self.config.timeout, self.config.force_detect))

    def _setup_publish_findings(self):
        experiment = self.__get_experiment()
        self.runner.add(PublishFindingsTask(experiment, self.config.dataset, self.config.review_site_url,
                                            self.config.review_site_user, self.config.review_site_password))

    def _setup_publish_metadata(self):
        self.runner.add(PublishMetadataTask(self.config.review_site_url,
                                            self.config.review_site_user, self.config.review_site_password))

    def __get_experiment(self):
        ex_ids = {
            1: Experiment.PROVIDED_PATTERNS,
            2: Experiment.TOP_FINDINGS,
            3: Experiment.BENCHMARK
        }
        try:
            limit = self.config.limit
        except AttributeError:
            limit = 0
        return Experiment(ex_ids.get(self.config.experiment), self.__get_detector(), Benchmark.FINDINGS_PATH, limit)

    def __get_detector(self):
        try:
            java_options = ['-' + option for option in self.config.java_options]
            return find_detector(self.DETECTORS_PATH, self.config.detector, java_options)
        except ValueError as e:
            logger.critical(e)
            exit()

    def run(self) -> None:
        if config.task == 'check':
            check_all_requirements()
            return
        elif config.task == 'info':
            self._setup_info()
        elif config.task == 'checkout':
            self._setup_checkout()
        elif config.task == 'compile':
            self._setup_checkout()
            self._setup_compile()
        elif config.task == 'detect':
            self._setup_checkout()
            self._setup_compile()
            self._setup_detect()
        elif config.task == 'publish':
            if config.publish_task == 'findings':
                self._setup_checkout()
                self._setup_compile()
                self._setup_detect()
                self._setup_publish_findings()
            elif config.publish_task == 'metadata':
                self._setup_checkout()
                self._setup_publish_metadata()
        elif config.task == 'stats':
            self._setup_stats()

        self.runner.run()
Ejemplo n.º 4
0
class TestTaskRunner:
    def setup(self):
        self.temp_dir = mkdtemp(prefix='mubench-datareader-test_')
        self.test_task = MagicMock()  # type: ProjectTask

        self.uut = TaskRunner(self.temp_dir, white_list=[], black_list=[])
        self.uut.add(self.test_task)

    def teardown(self):
        remove_tree(self.temp_dir)

    def test_processes_project(self):
        project = create_project("p1")
        self.uut._get_projects = MagicMock(return_value=[project])

        self.uut.run()

        self.test_task.process_project.assert_called_with(project)

    def test_checks_requirements(self):
        requirement = Requirement("test requirement")
        requirement.check = MagicMock()
        self.test_task.get_requirements = MagicMock(return_value=[requirement])

        self.uut.run()

        requirement.check.assert_called_with()

    def test_stops_on_unsatisfied_requirement(self):
        requirement = Requirement("test requirement")
        requirement.check = MagicMock(side_effect=ValueError("not satisfied"))
        self.test_task.get_requirements = MagicMock(return_value=[requirement])

        with assert_raises(SystemExit):
            self.uut.run()

        # assert that exit comes before task is started
        self.test_task.start.assert_not_called()

    def test_starts_task(self):
        self.uut.run()

        self.test_task.start.assert_called_with()

    def test_ends_task(self):
        self.uut.run()

        self.test_task.end.assert_called_with()

    def test_finds_all_projects(self):
        p1 = create_project("p1", base_path=self.temp_dir)
        create_file(p1._project_file)
        p2 = create_project("p2", base_path=self.temp_dir)
        create_file(p2._project_file)

        self.uut.run()

        assert_equals([call(p1), call(p2)], self.test_task.process_project.call_args_list)

    def test_ignores_non_project_directories(self):
        create_file(join(self.temp_dir, "p1", "iamnotaproject.yml"))

        self.uut.run()

        self.test_task.process_project.assert_not_called()

    def test_skips_blacklisted_project(self):
        self.uut._get_projects = MagicMock(return_value=[create_project("p1")])
        self.uut.black_list.append("p1")

        self.uut.run()

        self.test_task.process_project.assert_not_called()

    def test_runs_only_whitelisted_project(self):
        p2 = create_project("p2")
        self.uut._get_projects = MagicMock(return_value=[create_project("p1"), p2])
        self.uut.white_list.append("p2")

        self.uut.run()

        assert_equals([call(p2)], self.test_task.process_project.call_args_list)

    def test_runs_whitelisted_project_if_version_only_whitelist(self):
        project = create_project("p")
        self.uut._get_projects = MagicMock(return_value=[project])
        self.uut.white_list.append("p.42")

        self.uut.run()

        self.test_task.process_project.assert_called_with(project)

    def test_adds_project_to_backlist_when_task_answers_skip(self):
        self.uut._get_projects = MagicMock(return_value=[(create_project("p1"))])
        self.test_task.process_project = MagicMock(return_value=["p1"])

        self.uut.run()

        assert_equals(["p1"], self.uut.black_list)