def test_repeated_evaluation(self): SEQUENTIAL_CONFIG = { "n_steps": 3, "cell_lines": ['DV90', 'HS695T'], "objective": TestObjective(), "max_dosage": 8000, "domain": UnitSimplex(7), "scale": "linear" } repeated_evaluator = Evaluator(SEQUENTIAL_CONFIG, self.n_envs, store=True, repeated=True) x = np.array([0.5, 0.5, 0, 0, 0, 0, 0]) treats = [ prepare_dict(x, max_dosage=TEST_CONFIG["max_dosage"], scale=TEST_CONFIG["scale"]) for _ in range(self.n_steps) ] # use evaluator _, prolifs = repeated_evaluator.evaluate([x]) # write test to check y p = 1 for i in range(self.n_steps): simulator = Simulator() simulator.initialize("HS695T") p *= simulator.apply_treatment(treats[i]) print("p: ", p) print("prolis: ", prolifs) self.assertAlmostEqual(prolifs[0][1], p) repeated_evaluator.terminate()
class TestCovarianceMatrixAdaption(unittest.TestCase): def setUp(self): self.n_envs = 4 self.evaluator = Evaluator(TEST_CONFIG, self.n_envs, store=True) self.domain = UnitSimplex(7) def test_cma_es(self): mu, obj, prolif = cma_es(self.evaluator, self.domain, MAX_ITER, verbose=True, seed=23) self.assertTrue(np.abs(obj - np.average(prolif)) < EPS) self.assertTrue(self.domain.contains(mu)) # compare objective with sequential computation treatment = prepare_dict(mu.flatten(), max_dosage=TEST_CONFIG["max_dosage"], scale=TEST_CONFIG["scale"]) print(treatment) prolifs = [] for line in TEST_CONFIG["cell_lines"]: simulator = Simulator() simulator.initialize(line) prolifs.append(simulator.apply_treatment(treatment)) o = TEST_CONFIG["objective"].eval(prolifs, treatment) self.assertTrue(np.abs(obj - o) <= EPS) def tearDown(self): # performs internal check if all environments terminate self.evaluator.terminate()
def setUp(self): self.n_steps = 1 self.cell_lines = [ 'DV90', 'HS695T', 'NCIH1092', 'PK59', ] self.max_dosage = 8000 self.reward_function = lambda x: x self.penalty_function = lambda x: 0 self.env = SimulatorEnv(self.n_steps, self.cell_lines, self.max_dosage, TestObjective(), UnitSimplex(7), "linear") self.treatment = np.array([0.35, 0.05, 0.1, 0.1, 0.1, 0.15, 0.05])
class TestCrossEntropy(unittest.TestCase): def setUp(self): self.n_envs = 4 self.evaluator = Evaluator(TEST_CONFIG, self.n_envs, store=True) self.domain = UnitSimplex(7) def test_cross_entropy(self): mu, obj, prolif = cross_entropy_method(self.evaluator, self.domain, MAX_ITER, 20, 10, 7, verbose=True, seed=23) self.assertTrue(np.abs(obj - np.average(prolif)) < EPS) self.assertTrue(self.domain.contains(mu)) def tearDown(self): # performs internal check if all environments terminate self.evaluator.terminate()
EPS = 10e-6 EVALS = 5 class TestObjective(): # We use a replicator object because functions are not pickable def eval(self, rel_proliferations, action_dict): return np.average(rel_proliferations) TEST_CONFIG = { "n_steps": 1, "cell_lines": ['DV90', 'HS695T'], "objective": TestObjective(), "max_dosage": 8000, "domain": UnitSimplex(7), "scale": "linear" } class TestEvaluator(unittest.TestCase): def setUp(self): self.n_envs = 2 self.n_steps = 3 self.evaluator = Evaluator(TEST_CONFIG, self.n_envs, store=True) self.xs = [np.random.uniform(0, 1, 7) for i in range(EVALS)] self.xs = [x / sum(x + EPS) for x in self.xs] def test_evaluate(self): ys, prolifs = self.evaluator.evaluate(self.xs) ys, prolifs = self.evaluator.evaluate(self.xs)
def setUp(self): self.n_envs = 4 self.evaluator = Evaluator(TEST_CONFIG, self.n_envs, store=True) self.domain = UnitSimplex(7)