def get_botorch_objective( model: Model, objective_weights: Tensor, use_scalarized_objective: bool = True, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, objective_thresholds: Optional[Tensor] = None, X_observed: Optional[Tensor] = None, ) -> AcquisitionObjective: """Constructs a BoTorch `AcquisitionObjective` object. Args: model: A BoTorch Model objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. use_scalarized_objective: A boolean parameter that defaults to True, specifying whether ScalarizedObjective should be used. NOTE: when using outcome_constraints, use_scalarized_objective will be ignored. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. (Not used by single task models) objective_thresholds: A tensor containing thresholds forming a reference point from which to calculate pareto frontier hypervolume. Points that do not dominate the objective_thresholds contribute nothing to hypervolume. X_observed: Observed points that are feasible and appear in the objective or the constraints. None if there are no such points. Returns: A BoTorch `AcquisitionObjective` object. It will be one of: `ScalarizedObjective`, `LinearMCOObjective`, `ConstrainedMCObjective`. """ if objective_thresholds is not None: nonzero_idcs = torch.nonzero(objective_weights).view(-1) objective_weights = objective_weights[nonzero_idcs] return WeightedMCMultiOutputObjective(weights=objective_weights, outcomes=nonzero_idcs.tolist()) if X_observed is None: raise UnsupportedError( "X_observed is required to construct a BoTorch Objective.") if outcome_constraints: if use_scalarized_objective: logger.warning( "Currently cannot use ScalarizedObjective when there are outcome " "constraints. Ignoring (default) kwarg `use_scalarized_objective`" "= True. Creating ConstrainedMCObjective.") obj_tf = get_objective_weights_transform(objective_weights) def objective(samples: Tensor, X: Optional[Tensor] = None) -> Tensor: return obj_tf(samples) con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) return ConstrainedMCObjective(objective=objective, constraints=con_tfs or [], infeasible_cost=inf_cost) elif use_scalarized_objective: return ScalarizedObjective(weights=objective_weights) return LinearMCObjective(weights=objective_weights)
def get_botorch_objective_and_transform( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, objective_thresholds: Optional[Tensor] = None, X_observed: Optional[Tensor] = None, ) -> Tuple[Optional[MCAcquisitionObjective], Optional[PosteriorTransform]]: """Constructs a BoTorch `AcquisitionObjective` object. Args: model: A BoTorch Model objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. (Not used by single task models) objective_thresholds: A tensor containing thresholds forming a reference point from which to calculate pareto frontier hypervolume. Points that do not dominate the objective_thresholds contribute nothing to hypervolume. X_observed: Observed points that are feasible and appear in the objective or the constraints. None if there are no such points. Returns: A two-tuple containing (optioally) an `MCAcquisitionObjective` and (optionally) a `PosteriorTransform`. """ if objective_thresholds is not None: # we are doing multi-objective optimization nonzero_idcs = torch.nonzero(objective_weights).view(-1) objective_weights = objective_weights[nonzero_idcs] objective = WeightedMCMultiOutputObjective( weights=objective_weights, outcomes=nonzero_idcs.tolist()) return objective, None if X_observed is None: raise UnsupportedError( "X_observed is required to construct a BoTorch objective.") if outcome_constraints: # If there are outcome constraints, we use MC Acquistion functions obj_tf = get_objective_weights_transform(objective_weights) def objective(samples: Tensor, X: Optional[Tensor] = None) -> Tensor: return obj_tf(samples) con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective(objective=objective, constraints=con_tfs or [], infeasible_cost=inf_cost) return objective, None # Case of linear weights - use ScalarizedPosteriorTransform transform = ScalarizedPosteriorTransform(weights=objective_weights) return None, transform
def construct_inputs_qEHVI( model: Model, training_data: TrainingData, objective_thresholds: Tensor, objective: Optional[AcquisitionObjective] = None, **kwargs: Any, ) -> Dict[str, Any]: r"""Construct kwargs for `qExpectedHypervolumeImprovement` constructor.""" X_observed = training_data.X # compute posterior mean (for ref point computation ref pareto frontier) with torch.no_grad(): Y_pmean = model.posterior(X_observed).mean outcome_constraints = kwargs.pop("outcome_constraints", None) # For HV-based acquisition functions we pass the constraint transform directly if outcome_constraints is None: cons_tfs = None else: cons_tfs = get_outcome_constraint_transforms(outcome_constraints) # Adjust `Y_pmean` to contrain feasible points only. feas = torch.stack([c(Y_pmean) <= 0 for c in cons_tfs], dim=-1).all(dim=-1) Y_pmean = Y_pmean[feas] if objective is None: objective = IdentityMCMultiOutputObjective() ehvi_kwargs = construct_inputs_EHVI( model=model, training_data=training_data, objective_thresholds=objective_thresholds, objective=objective, # Pass `Y_pmean` that accounts for constraints to `construct_inputs_EHVI` # to ensure that correct non-dominated partitioning is produced. Y_pmean=Y_pmean, **kwargs, ) sampler = kwargs.get("sampler") if sampler is None: sampler = _get_sampler( mc_samples=kwargs.get("mc_samples", 128), qmc=kwargs.get("qmc", True) ) add_qehvi_kwargs = { "sampler": sampler, "X_pending": kwargs.get("X_pending"), "constraints": cons_tfs, "eta": kwargs.get("eta", 1e-3), } return {**ehvi_kwargs, **add_qehvi_kwargs}
def get_botorch_objective( model: Model, objective_weights: Tensor, use_scalarized_objective: bool = True, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None, ) -> AcquisitionObjective: """Constructs a BoTorch `AcquisitionObjective` object. Args: model: A BoTorch Model objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. use_scalarized_objective: A boolean parameter that defaults to True, specifying whether ScalarizedObjective should be used. NOTE: when using outcome_constraints, use_scalarized_objective will be ignored. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. (Not used by single task models) X_observed: Observed points that are feasible and appear in the objective or the constraints. None if there are no such points. Returns: A BoTorch `AcquisitionObjective` object. It will be one of: `ScalarizedObjective`, `LinearMCOObjective`, `ConstrainedMCObjective`. """ if X_observed is None: raise UnsupportedError( "X_observed is required to construct a BoTorch Objective.") if outcome_constraints: if use_scalarized_objective: logger.warning( "Currently cannot use ScalarizedObjective when there are outcome " "constraints. Ignoring (default) kwarg `use_scalarized_objective`" "= True. Creating ConstrainedMCObjective.") obj_tf = get_objective_weights_transform(objective_weights) con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) return ConstrainedMCObjective(objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost) if use_scalarized_objective: return ScalarizedObjective(weights=objective_weights) return LinearMCObjective(weights=objective_weights)
def _get_objective( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None, ) -> AcquisitionObjective: if outcome_constraints is None: objective = ScalarizedObjective(weights=objective_weights) else: X_observed = torch.as_tensor(X_observed) obj_tf = get_objective_weights_transform(objective_weights) con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective(objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost) return objective
def construct_inputs_qNEHVI( model: Model, training_data: TrainingData, objective_thresholds: Tensor, objective: Optional[AcquisitionObjective] = None, **kwargs: Any, ) -> Dict[str, Any]: r"""Construct kwargs for `qNoisyExpectedHypervolumeImprovement` constructor.""" # This selects the objectives (a subset of the outcomes) and set each # objective threhsold to have the proper optimization direction. if objective is None: objective = IdentityMCMultiOutputObjective() outcome_constraints = kwargs.pop("outcome_constraints", None) if outcome_constraints is None: cons_tfs = None else: cons_tfs = get_outcome_constraint_transforms(outcome_constraints) sampler = kwargs.get("sampler") if sampler is None: sampler = _get_sampler( mc_samples=kwargs.get("mc_samples", 128), qmc=kwargs.get("qmc", True) ) return { "model": model, "ref_point": objective(objective_thresholds), "X_baseline": kwargs.get("X_baseline", training_data.X), "sampler": sampler, "objective": objective, "constraints": cons_tfs, "X_pending": kwargs.get("X_pending"), "eta": kwargs.get("eta", 1e-3), "prune_baseline": kwargs.get("prune_baseline", True), "alpha": kwargs.get("alpha", 0.0), "cache_pending": kwargs.get("cache_pending", True), "max_iep": kwargs.get("max_iep", 0), "incremental_nehvi": kwargs.get("incremental_nehvi", True), }
def get_botorch_objective( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None, ) -> AcquisitionObjective: """Constructs a BoTorch `Objective`.""" if X_observed is None: raise UnsupportedError( "X_observed is required to construct a BoTorch Objective.") if outcome_constraints is None: objective = ScalarizedObjective(weights=objective_weights) else: obj_tf = get_objective_weights_transform(objective_weights) con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective(objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost) return objective
def test_construct_inputs_qNEHVI(self): c = get_acqf_input_constructor(qNoisyExpectedHypervolumeImprovement) objective_thresholds = torch.rand(2) mock_model = mock.Mock() # Test defaults kwargs = c( model=mock_model, training_data=self.bd_td, objective_thresholds=objective_thresholds, ) ref_point_expected = objective_thresholds self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected)) self.assertTrue(torch.equal(kwargs["X_baseline"], self.bd_td.X)) self.assertIsInstance(kwargs["sampler"], SobolQMCNormalSampler) self.assertEqual(kwargs["sampler"].sample_shape, torch.Size([128])) self.assertIsInstance(kwargs["objective"], IdentityMCMultiOutputObjective) self.assertIsNone(kwargs["constraints"]) self.assertIsNone(kwargs["X_pending"]) self.assertEqual(kwargs["eta"], 1e-3) self.assertTrue(kwargs["prune_baseline"]) self.assertEqual(kwargs["alpha"], 0.0) self.assertTrue(kwargs["cache_pending"]) self.assertEqual(kwargs["max_iep"], 0) self.assertTrue(kwargs["incremental_nehvi"]) # Test custom inputs weights = torch.rand(2) objective = WeightedMCMultiOutputObjective(weights=weights) X_baseline = torch.rand(2, 2) sampler = IIDNormalSampler(num_samples=4) outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]])) X_pending = torch.rand(1, 2) kwargs = c( model=mock_model, training_data=self.bd_td, objective_thresholds=objective_thresholds, objective=objective, X_baseline=X_baseline, sampler=sampler, outcome_constraints=outcome_constraints, X_pending=X_pending, eta=1e-2, prune_baseline=True, alpha=0.1, cache_pending=False, max_iep=1, incremental_nehvi=False, ) ref_point_expected = objective(objective_thresholds) self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected)) self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline)) sampler_ = kwargs["sampler"] self.assertIsInstance(sampler_, IIDNormalSampler) self.assertEqual(sampler_.sample_shape, torch.Size([4])) self.assertEqual(kwargs["objective"], objective) cons_tfs_expected = get_outcome_constraint_transforms( outcome_constraints) cons_tfs = kwargs["constraints"] self.assertEqual(len(cons_tfs), 1) test_Y = torch.rand(1, 2) self.assertTrue( torch.equal(cons_tfs[0](test_Y), cons_tfs_expected[0](test_Y))) self.assertTrue(torch.equal(kwargs["X_pending"], X_pending)) self.assertEqual(kwargs["eta"], 1e-2) self.assertTrue(kwargs["prune_baseline"]) self.assertEqual(kwargs["alpha"], 0.1) self.assertFalse(kwargs["cache_pending"]) self.assertEqual(kwargs["max_iep"], 1) self.assertFalse(kwargs["incremental_nehvi"])