def test_get_performance(self): env = Environment() perf = 6666.6 cfg_desc = ClientConfigDescriptor() cfg_desc.estimated_performance = perf result = env.get_performance(cfg_desc) self.assertTrue(result == perf)
class EnvTest(DatabaseFixture, PEP8MixIn): PEP8_FILES = ["golem/environments/environment.py"] def setUp(self): super().setUp() self.env = Environment() def test_get_performance(self): # given perf_value = 6666.6 perf = Performance(environment_id=Environment.get_id(), value=perf_value) perf.save() # then self.assertEqual(self.env.get_performance(), perf_value) def test_get_source_code(self): # check defaults assert self.env.get_source_code() is None # given file_name = path.join(self.path, "mainprogramfile") self.env.main_program_file = file_name # then assert self.env.get_source_code() is None # re-given with open(file_name, 'w') as f: f.write("PROGRAM CODE") # then self.env.main_program_file = file_name assert self.env.get_source_code() == "PROGRAM CODE" def test_run_default_benchmark(self): assert Environment.get_performance() == 0.0 assert Environment.run_default_benchmark(save=True) > 0.0 assert Environment.get_performance() > 0.0 def test_get_min_accepted_performance_default(self): self.assertEqual(MinPerformanceMultiplier.get(), 0.0) self.assertEqual(self.env.get_min_accepted_performance(), 0.0) def test_get_min_accepted_performance(self): # given p = Performance(environment_id=Environment.get_id(), min_accepted_step=100) p.save() MinPerformanceMultiplier.set(3.141) # then self.assertEqual(MinPerformanceMultiplier.get(), 3.141) self.assertEqual(self.env.get_min_accepted_performance(), 314.1)
def test_run_all_benchmarks(self, br_mock, mpt_mock, *_): # given mpt_mock.return_value = 314.15 # default performance def _run(): # call success callback with performance = call_count * 100 br_mock.assert_called() success_callback = br_mock.call_args[1].get('success_callback') assert callable(success_callback) return success_callback(br_mock.call_count * 100) br_mock.return_value.run.side_effect = _run # when self.b.run_all_benchmarks() # then assert mpt_mock.call_count == 1 assert DefaultEnvironment.get_performance() == 314.15 assert br_mock.call_count == len(self.b.benchmarks) for idx, env_id in enumerate(reversed(list(self.b.benchmarks))): assert (1 + idx) * 100 == \ Performance.get(Performance.environment_id == env_id).value
def test_run_default_benchmark(self): assert Environment.get_performance() == 0.0 assert Environment.run_default_benchmark(save=True) > 0.0 assert Environment.get_performance() > 0.0