def test_get_environment_by_id(self):
     em = EnvironmentsManager()
     env1 = Environment()
     env2 = Environment()
     env3 = Environment()
     env1.get_id = lambda: "Env1"
     env2.get_id = lambda: "Env2"
     env3.get_id = lambda: "Env3"
     em.add_environment(env1)
     em.add_environment(env2)
     em.add_environment(env3)
     self.assertTrue(env1 == em.get_environment_by_id("Env1"))
     self.assertTrue(env2 == em.get_environment_by_id("Env2"))
     self.assertTrue(env3 == em.get_environment_by_id("Env3"))
Example #2
0
 def test_is_supported(self):
     tk = TaskHeaderKeeper(EnvironmentsManager(), 10.0)
     self.assertFalse(tk.is_supported({}))
     task = {"environment": Environment.get_id(), 'max_price': 0}
     self.assertFalse(tk.is_supported(task))
     e = Environment()
     e.accept_tasks = True
     tk.environments_manager.add_environment(e)
     self.assertFalse(tk.is_supported(task))
     task["max_price"] = 10.0
     self.assertFalse(tk.is_supported(task))
     task["min_version"] = APP_VERSION
     self.assertTrue(tk.is_supported(task))
     task["max_price"] = 10.5
     self.assertTrue(tk.is_supported(task))
     config_desc = Mock()
     config_desc.min_price = 13.0
     tk.change_config(config_desc)
     self.assertFalse(tk.is_supported(task))
     config_desc.min_price = 10.0
     tk.change_config(config_desc)
     self.assertTrue(tk.is_supported(task))
     task["min_version"] = "120"
     self.assertFalse(tk.is_supported(task))
     task["min_version"] = tk.app_version
     self.assertTrue(tk.is_supported(task))
     task["min_version"] = "abc"
     with self.assertLogs(logger=logger, level=1):
         self.assertFalse(tk.is_supported(task))
Example #3
0
    def test_get_performance(self):
        # given
        perf_value = 6666.6
        perf = Performance(environment_id=Environment.get_id(),
                           value=perf_value)
        perf.save()

        # then
        self.assertEqual(self.env.get_performance(), perf_value)
Example #4
0
    def test_is_supported(self):
        em = EnvironmentsManager()
        em.environments = {}
        em.support_statuses = {}

        tk = TaskHeaderKeeper(environments_manager=EnvironmentsManager(),
                              node=p2p.Node(),
                              min_price=10.0)

        header = get_task_header()
        header.fixed_header.environment = None
        header.fixed_header.max_price = None
        header.fixed_header.min_version = None
        self.assertFalse(tk.check_support(header))

        header.fixed_header.environment = Environment.get_id()
        header.fixed_header.max_price = 0
        supported = tk.check_support(header)
        self.assertFalse(supported)
        self.assertIn(UnsupportReason.ENVIRONMENT_MISSING, supported.desc)

        e = Environment()
        e.accept_tasks = True
        tk.environments_manager.add_environment(e)
        supported = tk.check_support(header)
        self.assertFalse(supported)
        self.assertIn(UnsupportReason.MAX_PRICE, supported.desc)

        header.fixed_header.max_price = 10.0
        supported = tk.check_support(header)
        self.assertFalse(supported)
        self.assertIn(UnsupportReason.APP_VERSION, supported.desc)

        header.fixed_header.min_version = golem.__version__
        self.assertTrue(tk.check_support(header))

        header.fixed_header.max_price = 10.0
        self.assertTrue(tk.check_support(header))

        config_desc = mock.Mock()
        config_desc.min_price = 13.0
        tk.change_config(config_desc)
        self.assertFalse(tk.check_support(header))

        config_desc.min_price = 10.0
        tk.change_config(config_desc)
        self.assertTrue(tk.check_support(header))

        header.fixed_header.min_version = "120"
        self.assertFalse(tk.check_support(header))

        header.fixed_header.min_version = tk.app_version
        self.assertTrue(tk.check_support(header))

        header.fixed_header.min_version = "abc"
        with self.assertLogs(logger=logger, level='WARNING'):
            self.assertFalse(tk.check_support(header))
Example #5
0
    def test_benchmarks_not_needed_when_results_saved(self):
        # given
        for env_id in self.b.benchmarks:
            Performance.update_or_create(env_id, 100)

        Performance.update_or_create(DefaultEnvironment.get_id(), 3)

        # then
        assert not self.b.benchmarks_needed()
Example #6
0
 def run_benchmark_for_env_id(self, env_id, callback, errback):
     if env_id == DefaultEnvironment.get_id():
         self.run_default_benchmark(callback, errback)
     else:
         benchmark_data = self.benchmarks.get(env_id)
         if benchmark_data:
             self.run_benchmark(benchmark_data[0], benchmark_data[1],
                                env_id, callback, errback)
         else:
             raise Exception("Unknown environment: {}".format(env_id))
Example #7
0
    def test_get_min_accepted_performance(self):
        # given
        p = Performance(environment_id=Environment.get_id(),
                        min_accepted_step=100)
        p.save()
        MinPerformanceMultiplier.set(3.141)

        # then
        self.assertEqual(MinPerformanceMultiplier.get(), 3.141)
        self.assertEqual(self.env.get_min_accepted_performance(), 314.1)
Example #8
0
    def run_all_benchmarks(self, success=None, error=None):
        logger.info('Running all benchmarks with num_cores=%r',
                    self.task_server.client.config_desc.num_cores)

        def run_non_default_benchmarks(_performance=None):
            self.run_benchmarks(copy(self.benchmarks), success, error)

        if DefaultEnvironment.get_id() not in self.get_saved_benchmarks_ids():
            # run once in lifetime, since it's for single CPU core
            self.run_default_benchmark(run_non_default_benchmarks, error)
        else:
            run_non_default_benchmarks()
Example #9
0
    def test_run_non_default_benchmarks(self, br_mock, mpt_mock, *_):
        # given
        Performance.update_or_create(DefaultEnvironment.get_id(), -7)

        def _run():
            # call success callback with performance = call_count * 100
            br_mock.assert_called()
            success_callback = br_mock.call_args[1].get('success_callback')
            assert callable(success_callback)
            return success_callback(br_mock.call_count * 100)

        br_mock.return_value.run.side_effect = _run

        # when
        self.b.run_all_benchmarks()

        # then
        assert mpt_mock.call_count == 0
        assert br_mock.call_count == len(self.b.benchmarks)
        for idx, env_id in enumerate(reversed(list(self.b.benchmarks))):
            assert (1 + idx) * 100 == \
                   Performance.get(Performance.environment_id == env_id).value
Example #10
0
 def benchmarks_needed(self):
     if self.benchmarks:
         ids = self.get_saved_benchmarks_ids()
         return not set(self.benchmarks.keys()
                        | {DefaultEnvironment.get_id()}).issubset(ids)
     return False
 def test_load_config_manager(self):
     mgr = EnvironmentsManager()
     env = Environment()
     mgr.environments[env.get_id()] = env
     mgr.load_config(self.path)
     assert mgr.env_config