def test_pareto_frontier_evaluator_raw(self): Y, cov = pareto_frontier_evaluator( model=self.model, objective_weights=self.objective_weights, objective_thresholds=self.objective_thresholds, Y=self.Y, Yvar=self.Yvar, ) pred = self.Y[2:4] self.assertTrue(torch.allclose(Y, pred), f"{Y} does not match {pred}") # Omit objective_thresholds Y, cov = pareto_frontier_evaluator( model=self.model, objective_weights=self.objective_weights, Y=self.Y, Yvar=self.Yvar, ) pred = self.Y[2:] self.assertTrue(torch.allclose(Y, pred), f"{Y} does not match {pred}") # Change objective_weights so goal is to minimize b Y, cov = pareto_frontier_evaluator( model=self.model, objective_weights=torch.tensor([1.0, -1.0]), objective_thresholds=self.objective_thresholds, Y=self.Y, Yvar=self.Yvar, ) pred = self.Y[[0, 4]] self.assertTrue(torch.allclose(Y, pred), f"actual {Y} does not match pred {pred}")
def test_pareto_frontier_raise_error_when_missing_data(self): with self.assertRaises(ValueError): pareto_frontier_evaluator( model=self.model, objective_thresholds=self.objective_thresholds, objective_weights=self.objective_weights, Yvar=self.Yvar, )
def test_pareto_frontier_evaluator_raw(self): Yvar = torch.diag_embed(self.Yvar) Y, cov, indx = pareto_frontier_evaluator( model=self.model, objective_weights=self.objective_weights, objective_thresholds=self.objective_thresholds, Y=self.Y, Yvar=Yvar, ) pred = self.Y[2:4] self.assertTrue(torch.allclose(Y, pred), f"{Y} does not match {pred}") expected_cov = Yvar[2:4] self.assertTrue(torch.allclose(expected_cov, cov)) self.assertTrue(torch.equal(torch.arange(2, 4), indx)) # Omit objective_thresholds Y, cov, indx = pareto_frontier_evaluator( model=self.model, objective_weights=self.objective_weights, Y=self.Y, Yvar=Yvar, ) pred = self.Y[2:] self.assertTrue(torch.allclose(Y, pred), f"{Y} does not match {pred}") expected_cov = Yvar[2:] self.assertTrue(torch.allclose(expected_cov, cov)) self.assertTrue(torch.equal(torch.arange(2, 5), indx)) # Change objective_weights so goal is to minimize b Y, cov, indx = pareto_frontier_evaluator( model=self.model, objective_weights=torch.tensor([1.0, -1.0]), objective_thresholds=self.objective_thresholds, Y=self.Y, Yvar=Yvar, ) pred = self.Y[[0, 4]] self.assertTrue(torch.allclose(Y, pred), f"actual {Y} does not match pred {pred}") expected_cov = Yvar[[0, 4]] self.assertTrue(torch.allclose(expected_cov, cov)) # test no points better than reference point Y, cov, indx = pareto_frontier_evaluator( model=self.model, objective_weights=self.objective_weights, objective_thresholds=torch.full_like(self.objective_thresholds, 100.0), Y=self.Y, Yvar=Yvar, ) self.assertTrue(torch.equal(Y, self.Y[:0])) self.assertTrue(torch.equal(cov, torch.zeros(0, 3, 3))) self.assertTrue(torch.equal(torch.tensor([], dtype=torch.long), indx))
def test_pareto_frontier_evaluator_predict(self): Y, cov = pareto_frontier_evaluator( model=self.model, objective_weights=self.objective_weights, objective_thresholds=self.objective_thresholds, X=self.X, ) pred = self.Y[2:4] self.assertTrue(torch.allclose(Y, pred), f"actual {Y} does not match pred {pred}")
def test_pareto_frontier_evaluator_with_outcome_constraints(self): Y, cov = pareto_frontier_evaluator( model=self.model, objective_weights=self.objective_weights, objective_thresholds=self.objective_thresholds, Y=self.Y, Yvar=self.Yvar, outcome_constraints=self.outcome_constraints, ) pred = self.Y[2, :] self.assertTrue(torch.allclose(Y, pred), f"actual {Y} does not match pred {pred}")