def _get_model(n, fixed_noise=False, use_octf=False, **tkwargs):
    train_x1, train_y1 = _get_random_data(batch_shape=torch.Size(),
                                          m=1,
                                          n=10,
                                          **tkwargs)
    train_x2, train_y2 = _get_random_data(batch_shape=torch.Size(),
                                          m=1,
                                          n=11,
                                          **tkwargs)
    octfs = [Standardize(m=1), Standardize(m=1)] if use_octf else [None, None]
    if fixed_noise:
        train_y1_var = 0.1 + 0.1 * torch.rand_like(train_y1, **tkwargs)
        train_y2_var = 0.1 + 0.1 * torch.rand_like(train_y2, **tkwargs)
        model1 = FixedNoiseGP(
            train_X=train_x1,
            train_Y=train_y1,
            train_Yvar=train_y1_var,
            outcome_transform=octfs[0],
        )
        model2 = FixedNoiseGP(
            train_X=train_x2,
            train_Y=train_y2,
            train_Yvar=train_y2_var,
            outcome_transform=octfs[1],
        )
    else:
        model1 = SingleTaskGP(train_X=train_x1,
                              train_Y=train_y1,
                              outcome_transform=octfs[0])
        model2 = SingleTaskGP(train_X=train_x2,
                              train_Y=train_y2,
                              outcome_transform=octfs[1])
    model = ModelListGP(model1, model2)
    return model.to(**tkwargs)
Example #2
0
 def test_ModelListGP_single(self):
     tkwargs = {"device": self.device, "dtype": torch.float}
     train_x1, train_x2, train_y1, train_y2 = _get_random_data(n=10, **tkwargs)
     model1 = SingleTaskGP(train_X=train_x1, train_Y=train_y1)
     model = ModelListGP(model1)
     model.to(**tkwargs)
     test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
     posterior = model.posterior(test_x)
     self.assertIsInstance(posterior, GPyTorchPosterior)
     self.assertIsInstance(posterior.mvn, MultivariateNormal)
Example #3
0
 def initialize_model(train_x, train_obj, state_dict=None):
     # define models for objective
     model_obj = FixedNoiseGP(train_x, train_obj,
                              train_yvar.expand_as(train_obj)).to(train_x)
     # combine into a multi-output GP model
     model = ModelListGP(model_obj)
     mll = SumMarginalLogLikelihood(model.likelihood, model)
     # load state dict if it is passed
     if state_dict is not None:
         model.load_state_dict(state_dict)
     return mll, model
Example #4
0
 def test_ModelListGPSingle(self, cuda=False):
     tkwargs = {
         "device": torch.device("cuda") if cuda else torch.device("cpu"),
         "dtype": torch.float,
     }
     train_x1, train_x2, train_y1, train_y2 = _get_random_data(n=10, **tkwargs)
     model1 = SingleTaskGP(train_X=train_x1, train_Y=train_y1)
     model = ModelListGP(gp_models=[model1])
     model.to(**tkwargs)
     test_x = (torch.tensor([0.25, 0.75]).type_as(model.train_targets[0]),)
     posterior = model.posterior(test_x)
     self.assertIsInstance(posterior, GPyTorchPosterior)
     self.assertIsInstance(posterior.mvn, MultivariateNormal)
 def test_ModelListGPSingle(self, cuda=False):
     tkwargs = {
         "device": torch.device("cuda") if cuda else torch.device("cpu"),
         "dtype": torch.float,
     }
     train_x1, train_x2, train_y1, train_y2 = _get_random_data(n=10, **tkwargs)
     model1 = SingleTaskGP(train_X=train_x1, train_Y=train_y1)
     model = ModelListGP(gp_models=[model1])
     model.to(**tkwargs)
     test_x = (torch.tensor([0.25, 0.75]).type_as(model.train_targets[0]),)
     posterior = model.posterior(test_x)
     self.assertIsInstance(posterior, GPyTorchPosterior)
     self.assertIsInstance(posterior.mvn, MultivariateNormal)
Example #6
0
File: main.py Project: stys/albo
def initialize_model(x, z, state_dict=None):
    n = z.shape[-1]
    gp_models = []
    for i in range(n):
        y = z[..., i].unsqueeze(-1)
        gp_model = SingleTaskGP(train_X=x, train_Y=y)
        gp_model.likelihood.noise_covar.register_constraint(
            "raw_noise", GreaterThan(1e-5))
        gp_models.append(gp_model)
    model_list = ModelListGP(*gp_models)
    mll = SumMarginalLogLikelihood(model_list.likelihood, model_list)
    if state_dict is not None:
        model_list.load_state_dict(state_dict)
    return mll, model_list
def _get_model(n, fixed_noise=False, **tkwargs):
    train_x1, train_x2, train_y1, train_y2 = _get_random_data(n=n, **tkwargs)
    if fixed_noise:
        train_y1_var = 0.1 + 0.1 * torch.rand_like(train_y1, **tkwargs)
        train_y2_var = 0.1 + 0.1 * torch.rand_like(train_y2, **tkwargs)
        model1 = FixedNoiseGP(train_X=train_x1,
                              train_Y=train_y1,
                              train_Yvar=train_y1_var)
        model2 = FixedNoiseGP(train_X=train_x2,
                              train_Y=train_y2,
                              train_Yvar=train_y2_var)
    else:
        model1 = SingleTaskGP(train_X=train_x1, train_Y=train_y1)
        model2 = SingleTaskGP(train_X=train_x2, train_Y=train_y2)
    model = ModelListGP(model1, model2)
    return model.to(**tkwargs)
    def __init__(self, model_list: List[Model], iden: str,
                 Nrestarts_eta_c: int, budget_failures: int) -> None:
        """
		"""

        self.iden = iden
        self.my_print("Starting AcquisitionBaseTools ...")
        self.model_list = model_list

        # # Define GP posterior mean:
        # self.gp_mean_obj = GPmean(self.model_list[idxm['obj']])

        # define models for objective and constraint
        # model_obj = FixedNoiseGP(train_x, train_obj, train_yvar.expand_as(train_obj)).to(train_x)
        # model_con = FixedNoiseGP(train_x, train_con, train_yvar.expand_as(train_con)).to(train_x)
        # combine into a multi-output GP model
        # mll = SumMarginalLogLikelihood(model.likelihood, model)
        # fit_gpytorch_model(mll)

        self.gp_mean_obj_cons = GPmeanConstrained(model=ModelListGP(
            model_list[0], model_list[1]),
                                                  objective=constrained_obj)

        # Some options:
        self.Nrestarts_eta_c = Nrestarts_eta_c
        self.budget_failures = budget_failures

        self.dim = self.model_list[idxm['obj']].dim
        self.x_eta_c = None
        self.eta_c = None
        self.bounds = torch.tensor([[0.0] * self.dim, [1.0] * self.dim],
                                   device=device)

        # Optimization method: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
        self.method_opti = "L-BFGS-B"
Example #9
0
    def test_get_extra_mll_args(self):
        train_X = torch.rand(3, 5)
        train_Y = torch.rand(3)
        model = SingleTaskGP(train_X=train_X, train_Y=train_Y)
        # test ExactMarginalLogLikelihood
        exact_mll = ExactMarginalLogLikelihood(model.likelihood, model)
        exact_extra_args = _get_extra_mll_args(mll=exact_mll)
        self.assertEqual(len(exact_extra_args), 1)
        self.assertTrue(torch.equal(exact_extra_args[0], train_X))

        # test VariationalELBO
        elbo = VariationalELBO(model.likelihood, model, num_data=train_X.shape[0])
        elbo_extra_args = _get_extra_mll_args(mll=elbo)
        self.assertEqual(len(elbo_extra_args), 0)

        # test SumMarginalLogLikelihood
        model2 = ModelListGP(gp_models=[model])
        sum_mll = SumMarginalLogLikelihood(model2.likelihood, model2)
        sum_mll_extra_args = _get_extra_mll_args(mll=sum_mll)
        self.assertEqual(len(sum_mll_extra_args), 1)
        self.assertEqual(len(sum_mll_extra_args[0]), 1)
        self.assertTrue(torch.equal(sum_mll_extra_args[0][0], train_X))

        # test unsupported MarginalLogLikelihood type
        unsupported_mll = MarginalLogLikelihood(model.likelihood, model)
        with self.assertRaises(ValueError):
            _get_extra_mll_args(mll=unsupported_mll)
Example #10
0
def run():

	train_x, train_obj, train_con, best_observed_value_nei = generate_initial_data(n=10)

	# define models for objective and constraint
	model_obj = FixedNoiseGP(train_x, train_obj, train_yvar.expand_as(train_obj)).to(train_x)
	model_con = FixedNoiseGP(train_x, train_con, train_yvar.expand_as(train_con)).to(train_x)
	# combine into a multi-output GP model
	model = ModelListGP(model_obj, model_con)
	mll = SumMarginalLogLikelihood(model.likelihood, model)

	fit_gpytorch_model(mll)

	acqui_gpmean_cons = GPmeanConstrained(model=model,objective=constrained_obj)

	# Forward:
	# X = torch.rand(size=(1,6))
	# acqui_gpmean_cons.forward(X)

	method_opti = "SLSQP" # constraints
	# method_opti = "COBYLA" # constraints
	# method_opti = "L-BFGS-B"

	# Below, num_restarts must be equal to q, otherwise, it fails...
	options = {"batch_limit": 1,"maxiter": 200,"ftol":1e-6,"method":method_opti}
	x_eta_c, eta_c = optimize_acqf(acq_function=acqui_gpmean_cons,bounds=bounds,q=1,num_restarts=1,
																raw_samples=500,return_best_only=True,options=options)

	


	pdb.set_trace()
 def test_is_noiseless(self):
     x = torch.zeros(1, 1)
     y = torch.zeros(1, 1)
     se = torch.zeros(1, 1)
     model = SingleTaskGP(x, y)
     self.assertTrue(is_noiseless(model))
     model = HeteroskedasticSingleTaskGP(x, y, se)
     self.assertFalse(is_noiseless(model))
     with self.assertRaises(ModelError):
         is_noiseless(ModelListGP())
Example #12
0
    def test_acquisition_functions(self):
        tkwargs = {"device": self.device, "dtype": torch.double}
        train_X, train_Y, train_Yvar, model = self._get_data_and_model(
            infer_noise=True, **tkwargs
        )
        fit_fully_bayesian_model_nuts(
            model, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
        )
        sampler = IIDNormalSampler(num_samples=2)
        acquisition_functions = [
            ExpectedImprovement(model=model, best_f=train_Y.max()),
            ProbabilityOfImprovement(model=model, best_f=train_Y.max()),
            PosteriorMean(model=model),
            UpperConfidenceBound(model=model, beta=4),
            qExpectedImprovement(model=model, best_f=train_Y.max(), sampler=sampler),
            qNoisyExpectedImprovement(model=model, X_baseline=train_X, sampler=sampler),
            qProbabilityOfImprovement(
                model=model, best_f=train_Y.max(), sampler=sampler
            ),
            qSimpleRegret(model=model, sampler=sampler),
            qUpperConfidenceBound(model=model, beta=4, sampler=sampler),
            qNoisyExpectedHypervolumeImprovement(
                model=ModelListGP(model, model),
                X_baseline=train_X,
                ref_point=torch.zeros(2, **tkwargs),
                sampler=sampler,
            ),
            qExpectedHypervolumeImprovement(
                model=ModelListGP(model, model),
                ref_point=torch.zeros(2, **tkwargs),
                sampler=sampler,
                partitioning=NondominatedPartitioning(
                    ref_point=torch.zeros(2, **tkwargs), Y=train_Y.repeat([1, 2])
                ),
            ),
        ]

        for acqf in acquisition_functions:
            for batch_shape in [[5], [6, 5, 2]]:
                test_X = torch.rand(*batch_shape, 1, 4, **tkwargs)
                self.assertEqual(acqf(test_X).shape, torch.Size(batch_shape))
def initialize_model(train_x, train_obj, train_con, state_dict=None):
    if problem.num_constraints == 1:
        # define models for objective and constraint
        # model_obj = SingleTaskGP(train_x, train_obj, outcome_transform=Standardize(m=train_obj.shape[-1]))
        # model_con = SingleTaskGP(train_x, train_con, outcome_transform=Standardize(m=train_con.shape[-1]))
        model_obj = SingleTaskGP(train_x, train_obj)
        model_con = SingleTaskGP(train_x, train_con)
        # combine into a multi-output GP model
        model = ModelListGP(model_obj, model_con)
        mll = SumMarginalLogLikelihood(model.likelihood, model)
    else:
        train_y = torch.cat([train_obj, train_con], dim=-1)
        model = SingleTaskGP(
            train_x,
            train_y,
            outcome_transform=Standardize(m=train_y.shape[-1]))
        mll = ExactMarginalLogLikelihood(model.likelihood, model)
    # load state dict if it is passed
    if state_dict is not None:
        model.load_state_dict(state_dict)
    return mll, model
    def __init__(self, model_list: List[Model], options: dict) -> None:

        # AnalyticAcquisitionFunction.__init__(self, model=ModelListGP(model_list[0],model_list[1]), objective=ScalarizedObjective(weights=torch.Tensor([1.0])))
        MCAcquisitionFunction.__init__(self,
                                       model=ModelListGP(
                                           model_list[0], model_list[1]),
                                       objective=constrained_obj)

        AcquisitionBaseToolsConstrained.__init__(
            self,
            model_list=model_list,
            iden="XsearchFailures",
            Nrestarts_eta_c=options["Nrestarts_eta_c"],
            budget_failures=options["budget_failures"])

        self.dim = model_list[0].dim
        self.u_vec = None
        self.Nsamples_fmin = options["Nsamples_fmin"]
        self.Nrestarts_safe = options["Nrestarts_safe"]
        assert self.Nrestarts_safe > 1, "Choose at least 2 restart points."
        self.Nrestarts_risky = options["Nrestarts_risky"]
        assert self.Nrestarts_risky > 1, "Choose at least 2 restart points."
        self.which_mode = "risky"
        self.NBOiters = options["NBOiters"]
        self.rho_conserv = options["rho_safe"]
        self.method_safe = options["method_safe"]
        self.method_risky = options["method_risky"]
        self.constrained_opt = ConstrainedOptimizationNonLinearConstraints(
            self.dim, self.forward, self.probabilistic_constraint)
        self.use_nlopt = False
        self.disp_info_scipy_opti = options["disp_info_scipy_opti"]
        self.decision_boundary = options["decision_boundary"]

        # Initialize rho latent process:
        if float(self.budget_failures) / self.NBOiters == 1.0:
            self.zk = norm.ppf(self.rho_conserv)
        else:
            self.zk = norm.ppf(float(self.budget_failures) / self.NBOiters)
        self.zrisk = norm.ppf(1.0 - self.rho_conserv)
        self.zsafe = norm.ppf(self.rho_conserv)
def test_EIC(cfg: DictConfig):

	train_x, train_yl = get_initial_evaluations(dim=1)

	Neval = train_x.shape[0]
	dim = train_x.shape[1]

	gpcr1 = GPCRmodel(train_x=train_x, train_yl=train_yl, options=cfg.gpcr_model)
	gpcr2 = GPCRmodel(train_x=train_x.clone(), train_yl=train_yl.clone(), options=cfg.gpcr_model)

	model_list = ModelListGP(gpcr1,gpcr2)

	constraints = {1: (None, gpcr2.threshold )}
	# EIC = ConstrainedExpectedImprovement(model=model_list, best_f=0.2, objective_index=0, constraints=constraints)
	eic = ExpectedImprovementWithConstraints(model_list=model_list, constraints=constraints, options=cfg.acquisition_function)
	eic_val = eic(torch.tensor([[0.5]]))

	x_next, alpha_next = eic.get_next_point()

	# Plotting:
	axes_GPobj, axes_acqui, axes_fmin = plotting_tool_uncons(gpcr1,eic,axes_GPobj=None,axes_acqui=None,axes_fmin=None)
	axes_GPobj, axes_acqui, axes_fmin = plotting_tool_uncons(gpcr1,eic,axes_GPobj,axes_acqui,axes_fmin,xnext=x_next,alpha_next=alpha_next,block=True)
Example #16
0
    def test_get_extra_mll_args(self):
        train_X = torch.rand(3, 5)
        train_Y = torch.rand(3, 1)
        model = SingleTaskGP(train_X=train_X, train_Y=train_Y)

        # test ExactMarginalLogLikelihood
        exact_mll = ExactMarginalLogLikelihood(model.likelihood, model)
        exact_extra_args = _get_extra_mll_args(mll=exact_mll)
        self.assertEqual(len(exact_extra_args), 1)
        self.assertTrue(torch.equal(exact_extra_args[0], train_X))

        # test SumMarginalLogLikelihood
        model2 = ModelListGP(model)
        sum_mll = SumMarginalLogLikelihood(model2.likelihood, model2)
        sum_mll_extra_args = _get_extra_mll_args(mll=sum_mll)
        self.assertEqual(len(sum_mll_extra_args), 1)
        self.assertEqual(len(sum_mll_extra_args[0]), 1)
        self.assertTrue(torch.equal(sum_mll_extra_args[0][0], train_X))

        # test unsupported MarginalLogLikelihood type
        unsupported_mll = MarginalLogLikelihood(model.likelihood, model)
        unsupported_mll_extra_args = _get_extra_mll_args(mll=unsupported_mll)
        self.assertEqual(unsupported_mll_extra_args, [])
Example #17
0
def _get_model(n, **tkwargs):
    train_x1, train_x2, train_y1, train_y2 = _get_random_data(n=n, **tkwargs)
    model1 = SingleTaskGP(train_X=train_x1, train_Y=train_y1)
    model2 = SingleTaskGP(train_X=train_x2, train_Y=train_y2)
    model = ModelListGP(gp_models=[model1, model2])
    return model.to(**tkwargs)
Example #18
0
    def test_model_list_to_batched(self):
        for dtype in (torch.float, torch.double):
            # basic test
            train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
            train_Y1 = train_X.sum(dim=-1, keepdim=True)
            train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
            gp1 = SingleTaskGP(train_X, train_Y1)
            gp2 = SingleTaskGP(train_X, train_Y2)
            list_gp = ModelListGP(gp1, gp2)
            batch_gp = model_list_to_batched(list_gp)
            self.assertIsInstance(batch_gp, SingleTaskGP)
            # test degenerate (single model)
            batch_gp = model_list_to_batched(ModelListGP(gp1))
            self.assertEqual(batch_gp._num_outputs, 1)
            # test different model classes
            gp2 = FixedNoiseGP(train_X, train_Y1, torch.ones_like(train_Y1))
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # test non-batched models
            gp1_ = SimpleGPyTorchModel(train_X, train_Y1)
            gp2_ = SimpleGPyTorchModel(train_X, train_Y2)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1_, gp2_))
            # test list of multi-output models
            train_Y = torch.cat([train_Y1, train_Y2], dim=-1)
            gp2 = SingleTaskGP(train_X, train_Y)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # test different training inputs
            gp2 = SingleTaskGP(2 * train_X, train_Y2)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # check scalar agreement
            gp2 = SingleTaskGP(train_X, train_Y2)
            gp2.likelihood.noise_covar.noise_prior.rate.fill_(1.0)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # check tensor shape agreement
            gp2 = SingleTaskGP(train_X, train_Y2)
            gp2.covar_module.raw_outputscale = torch.nn.Parameter(
                torch.tensor([0.0], device=self.device, dtype=dtype))
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(ModelListGP(gp1, gp2))
            # test HeteroskedasticSingleTaskGP
            gp2 = HeteroskedasticSingleTaskGP(train_X, train_Y1,
                                              torch.ones_like(train_Y1))
            with self.assertRaises(NotImplementedError):
                model_list_to_batched(ModelListGP(gp2))
            # test custom likelihood
            gp2 = SingleTaskGP(train_X,
                               train_Y2,
                               likelihood=GaussianLikelihood())
            with self.assertRaises(NotImplementedError):
                model_list_to_batched(ModelListGP(gp2))
            # test FixedNoiseGP
            train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
            train_Y1 = train_X.sum(dim=-1, keepdim=True)
            train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
            gp1_ = FixedNoiseGP(train_X, train_Y1, torch.rand_like(train_Y1))
            gp2_ = FixedNoiseGP(train_X, train_Y2, torch.rand_like(train_Y2))
            list_gp = ModelListGP(gp1_, gp2_)
            batch_gp = model_list_to_batched(list_gp)
            # test SingleTaskMultiFidelityGP
            gp1_ = SingleTaskMultiFidelityGP(train_X,
                                             train_Y1,
                                             iteration_fidelity=1)
            gp2_ = SingleTaskMultiFidelityGP(train_X,
                                             train_Y2,
                                             iteration_fidelity=1)
            list_gp = ModelListGP(gp1_, gp2_)
            batch_gp = model_list_to_batched(list_gp)
            gp2_ = SingleTaskMultiFidelityGP(train_X,
                                             train_Y2,
                                             iteration_fidelity=2)
            list_gp = ModelListGP(gp1_, gp2_)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(list_gp)
            # test input transform
            input_tf = Normalize(
                d=2,
                bounds=torch.tensor([[0.0, 0.0], [1.0, 1.0]],
                                    device=self.device,
                                    dtype=dtype),
            )
            gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf)
            gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf)
            list_gp = ModelListGP(gp1_, gp2_)
            batch_gp = model_list_to_batched(list_gp)
            self.assertIsInstance(batch_gp.input_transform, Normalize)
            self.assertTrue(
                torch.equal(batch_gp.input_transform.bounds, input_tf.bounds))
            # test different input transforms
            input_tf2 = Normalize(
                d=2,
                bounds=torch.tensor([[-1.0, -1.0], [1.0, 1.0]],
                                    device=self.device,
                                    dtype=dtype),
            )
            gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf)
            gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf2)
            list_gp = ModelListGP(gp1_, gp2_)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(list_gp)

            # test batched input transform
            input_tf2 = Normalize(
                d=2,
                bounds=torch.tensor([[-1.0, -1.0], [1.0, 1.0]],
                                    device=self.device,
                                    dtype=dtype),
                batch_shape=torch.Size([3]),
            )
            gp1_ = SingleTaskGP(train_X, train_Y1, input_transform=input_tf2)
            gp2_ = SingleTaskGP(train_X, train_Y2, input_transform=input_tf2)
            list_gp = ModelListGP(gp1_, gp2_)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(list_gp)

            # test outcome transform
            octf = Standardize(m=1)
            gp1_ = SingleTaskGP(train_X, train_Y1, outcome_transform=octf)
            gp2_ = SingleTaskGP(train_X, train_Y2, outcome_transform=octf)
            list_gp = ModelListGP(gp1_, gp2_)
            with self.assertRaises(UnsupportedError):
                model_list_to_batched(list_gp)
def _get_model(n, **tkwargs):
    train_x1, train_x2, train_y1, train_y2 = _get_random_data(n=n, **tkwargs)
    model1 = SingleTaskGP(train_X=train_x1, train_Y=train_y1)
    model2 = SingleTaskGP(train_X=train_x2, train_Y=train_y2)
    model = ModelListGP(gp_models=[model1, model2])
    return model.to(**tkwargs)
Example #20
0
def main(cfg: DictConfig):

    dim = 1
    train_x_obj, train_y_obj = get_init_evals_obj(eval_type=1)
    train_x_cons, train_yl_cons = get_init_evals_cons(eval_type=1)

    gp_obj = GPmodel(dim=dim,
                     train_X=train_x_obj,
                     train_Y=train_y_obj.view(-1),
                     options=cfg.gpmodel)
    gp_cons = GPCRmodel(dim=dim,
                        train_x=train_x_cons.clone(),
                        train_yl=train_yl_cons.clone(),
                        options=cfg.gpcr_model)
    gp_cons.covar_module.base_kernel.lengthscale = 0.15
    constraints = {1: (None, gp_cons.threshold)}
    model_list = ModelListGP(gp_obj, gp_cons)
    eic = ExpectedImprovementWithConstraints(model_list=model_list,
                                             constraints=constraints,
                                             options=cfg.acquisition_function)

    # Get next point:
    x_next, alpha_next = eic.get_next_point()

    hdl_fig = plt.figure(figsize=(16, 10))
    # hdl_fig.suptitle("Bayesian optimization with unknown constraint and threshold")
    grid_size = (3, 1)
    axes_GPobj = plt.subplot2grid(grid_size, (0, 0), rowspan=1, fig=hdl_fig)
    axes_GPcons = plt.subplot2grid(grid_size, (1, 0), rowspan=1, fig=hdl_fig)
    # axes_GPcons_prob = plt.subplot2grid(grid_size, (2,0), rowspan=1,fig=hdl_fig)
    axes_acqui = plt.subplot2grid(grid_size, (2, 0), rowspan=1, fig=hdl_fig)

    # Plotting:
    axes_GPobj, axes_GPcons, axes_GPcons_prob, axes_acqui = plotting_tool_cons(
        gp_obj,
        gp_cons,
        eic,
        axes_GPobj=axes_GPobj,
        axes_GPcons=axes_GPcons,
        axes_GPcons_prob=None,
        axes_acqui=axes_acqui,
        cfg_plot=cfg.plot,
        xnext=x_next,
        alpha_next=alpha_next,
        plot_eta_c=False)

    fontsize_labels = 35
    axes_GPobj.set_xticklabels([])
    axes_GPobj.set_yticks([], [])
    axes_GPobj.set_yticklabels([], [])
    axes_GPobj.set_yticks([0])
    axes_GPobj.set_ylabel(r"$f(x)$", fontsize=fontsize_labels)

    axes_GPcons.set_yticks([], [])
    axes_GPcons.set_xticklabels([], [])
    axes_GPcons.set_yticks([0])
    axes_GPcons.set_ylabel(r"$g(x)$", fontsize=fontsize_labels)

    axes_acqui.set_yticks([], [])
    axes_acqui.set_xticks([0.0, 0.5, 1.0])
    axes_acqui.set_ylabel(r"$\alpha(x)$", fontsize=fontsize_labels)
    axes_acqui.set_xlabel(r"x", fontsize=fontsize_labels)
    plt.pause(0.5)

    logger.info("Saving plot to {0:s} ...".format(cfg.plot.path))
    hdl_fig.tight_layout()
    plt.savefig(fname=cfg.plot.path, dpi=300, facecolor='w', edgecolor='w')

    # pdb.set_trace()

    # # General plotting settings:
    # fontsize = 25
    # fontsize_labels = fontsize + 3
    # from matplotlib import rc
    # import matplotlib.pyplot as plt
    # from matplotlib.ticker import FormatStrFormatter
    # rc('font', family='serif')
    # rc('font',**{'family':'serif','serif':['Computer Modern Roman'], 'size': fontsize})
    # rc('text', usetex=True)
    # rc('legend',fontsize=fontsize_labels)
    # ylim = [-8,+8]

    # hdl_fig, axes_GPcons = plt.subplots(1,1,figsize=(6, 6))
    # gp_cons.plot(title="",block=False,axes=axes_GPcons,plotting=True,legend=False,Ndiv=100,Nsamples=None,ylim=ylim,showtickslabels_x=False,ylabel=r"$g(x)$")

    # if "threshold" in dir(gp_cons):
    # 	 axes_GPcons.plot([0,1],[gp_cons.threshold.item()]*2,linestyle="--",color="mediumpurple",linewidth=2.0,label="threshold")

    # axes_GPcons.set_xticks([])
    # axes_GPcons.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))

    # axes_GPobj, axes_GPcons, axes_GPcons_prob, axes_acqui = plotting_tool_cons(gp_obj,gp_cons,eic,axes_GPobj,axes_GPcons,
    # 																												axes_GPcons_prob,axes_acqui,cfg.plot,
    # 																												xnext=x_next,alpha_next=alpha_next,Ndiv=100)

    # axes_GPobj, axes_GPcons, axes_GPcons_prob, axes_acqui = plotting_tool_cons(gp_obj,gp_cons,eic,axes_GPobj=None,axes_GPcons=None,axes_GPcons_prob=None,axes_acqui=None,cfg_plot=cfg.plot,Ndiv=201)
    # axes_GPobj, axes_GPcons, axes_GPcons_prob, axes_acqui = plotting_tool_cons(gp_obj,gp_cons,eic,axes_GPobj,axes_GPcons,axes_GPcons_prob,axes_acqui,cfg.plot,xnext=x_next,alpha_next=alpha_next)

    # Ndiv = 100
    # xpred = torch.linspace(0,1,Ndiv)[:,None]
    # prob_vec = eic.get_probability_of_safe_evaluation(xpred)
    # axes_acqui.plot(xpred.cpu().detach().numpy(),prob_vec.cpu().detach().numpy())
    # import matplotlib.pyplot as plt
    plt.show(block=True)
Example #21
0
    def test_get_X_baseline(self):
        tkwargs = {"device": self.device}
        for dtype in (torch.float, torch.double):
            tkwargs["dtype"] = dtype
            X_train = torch.rand(20, 2, **tkwargs)
            model = MockModel(
                MockPosterior(mean=(2 * X_train +
                                    1).sum(dim=-1, keepdim=True)))
            # test NEI with X_baseline
            acqf = qNoisyExpectedImprovement(model, X_baseline=X_train[:2])
            X = get_X_baseline(acq_function=acqf)
            self.assertTrue(torch.equal(X, acqf.X_baseline))
            # test EI without X_baseline
            acqf = qExpectedImprovement(model, best_f=0.0)

            with warnings.catch_warnings(
                    record=True) as w, settings.debug(True):

                X_rnd = get_X_baseline(acq_function=acqf, )
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, BotorchWarning))
                self.assertIsNone(X_rnd)

            # set train inputs
            model.train_inputs = (X_train, )
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))
            # test that we fail back to train_inputs if X_baseline is an empty tensor
            acqf.register_buffer("X_baseline", X_train[:0])
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))

            # test acquisitipon function without X_baseline or model
            acqf = FixedFeatureAcquisitionFunction(acqf,
                                                   d=2,
                                                   columns=[0],
                                                   values=[0])
            with warnings.catch_warnings(
                    record=True) as w, settings.debug(True):
                X_rnd = get_X_baseline(acq_function=acqf, )
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, BotorchWarning))
                self.assertIsNone(X_rnd)

            Y_train = 2 * X_train[:2] + 1
            moo_model = MockModel(MockPosterior(mean=Y_train, samples=Y_train))
            ref_point = torch.zeros(2, **tkwargs)
            # test NEHVI with X_baseline
            acqf = qNoisyExpectedHypervolumeImprovement(
                moo_model,
                ref_point=ref_point,
                X_baseline=X_train[:2],
                cache_root=False,
            )
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, acqf.X_baseline))
            # test qEHVI without train_inputs
            acqf = qExpectedHypervolumeImprovement(
                moo_model,
                ref_point=ref_point,
                partitioning=FastNondominatedPartitioning(
                    ref_point=ref_point,
                    Y=Y_train,
                ),
            )
            # test extracting train_inputs from model list GP
            model_list = ModelListGP(
                SingleTaskGP(X_train, Y_train[:, :1]),
                SingleTaskGP(X_train, Y_train[:, 1:]),
            )
            acqf = qExpectedHypervolumeImprovement(
                model_list,
                ref_point=ref_point,
                partitioning=FastNondominatedPartitioning(
                    ref_point=ref_point,
                    Y=Y_train,
                ),
            )
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))

            # test MESMO for which we need to use
            # `acqf.mo_model`
            batched_mo_model = SingleTaskGP(X_train, Y_train)
            acqf = qMultiObjectiveMaxValueEntropy(
                batched_mo_model,
                sample_pareto_frontiers=lambda model: torch.rand(
                    10, 2, **tkwargs),
            )
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))
            # test that if there is an input transform that is applied
            # to the train_inputs when the model is in eval mode, we
            # extract the untransformed train_inputs
            model = SingleTaskGP(X_train,
                                 Y_train[:, :1],
                                 input_transform=Warp(indices=[0, 1]))
            model.eval()
            self.assertFalse(torch.equal(model.train_inputs[0], X_train))
            acqf = qExpectedImprovement(model, best_f=0.0)
            X = get_X_baseline(acq_function=acqf, )
            self.assertTrue(torch.equal(X, X_train))
Example #22
0
 def test_model_list_to_batched(self):
     for dtype in (torch.float, torch.double):
         # basic test
         train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
         train_Y1 = train_X.sum(dim=-1, keepdim=True)
         train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
         gp1 = SingleTaskGP(train_X, train_Y1)
         gp2 = SingleTaskGP(train_X, train_Y2)
         list_gp = ModelListGP(gp1, gp2)
         batch_gp = model_list_to_batched(list_gp)
         self.assertIsInstance(batch_gp, SingleTaskGP)
         # test degenerate (single model)
         batch_gp = model_list_to_batched(ModelListGP(gp1))
         self.assertEqual(batch_gp._num_outputs, 1)
         # test different model classes
         gp2 = FixedNoiseGP(train_X, train_Y1, torch.ones_like(train_Y1))
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # test non-batched models
         gp1_ = SimpleGPyTorchModel(train_X, train_Y1)
         gp2_ = SimpleGPyTorchModel(train_X, train_Y2)
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1_, gp2_))
         # test list of multi-output models
         train_Y = torch.cat([train_Y1, train_Y2], dim=-1)
         gp2 = SingleTaskGP(train_X, train_Y)
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # test different training inputs
         gp2 = SingleTaskGP(2 * train_X, train_Y2)
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # check scalar agreement
         gp2 = SingleTaskGP(train_X, train_Y2)
         gp2.likelihood.noise_covar.noise_prior.rate.fill_(1.0)
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # check tensor shape agreement
         gp2 = SingleTaskGP(train_X, train_Y2)
         gp2.covar_module.raw_outputscale = torch.nn.Parameter(
             torch.tensor([0.0], device=self.device, dtype=dtype)
         )
         with self.assertRaises(UnsupportedError):
             model_list_to_batched(ModelListGP(gp1, gp2))
         # test HeteroskedasticSingleTaskGP
         gp2 = HeteroskedasticSingleTaskGP(
             train_X, train_Y1, torch.ones_like(train_Y1)
         )
         with self.assertRaises(NotImplementedError):
             model_list_to_batched(ModelListGP(gp2))
         # test custom likelihood
         gp2 = SingleTaskGP(train_X, train_Y2, likelihood=GaussianLikelihood())
         with self.assertRaises(NotImplementedError):
             model_list_to_batched(ModelListGP(gp2))
         # test FixedNoiseGP
         train_X = torch.rand(10, 2, device=self.device, dtype=dtype)
         train_Y1 = train_X.sum(dim=-1, keepdim=True)
         train_Y2 = (train_X[:, 0] - train_X[:, 1]).unsqueeze(-1)
         gp1_ = FixedNoiseGP(train_X, train_Y1, torch.rand_like(train_Y1))
         gp2_ = FixedNoiseGP(train_X, train_Y2, torch.rand_like(train_Y2))
         list_gp = ModelListGP(gp1_, gp2_)
         batch_gp = model_list_to_batched(list_gp)
Example #23
0
def run(cfg: DictConfig, rep_nr: int) -> None:

    # Random seed for numpy and torch:
    np.random.seed(rep_nr)
    torch.manual_seed(rep_nr)

    # Load true function and initial evaluations:
    function_obj, function_cons, dim, x_min, f_min = get_objective_functions(
        which_objective=cfg.which_objective)

    logvars = initialize_logging_variables()
    if "safety_mechanisms" in cfg.keys():

        if cfg.safety_mechanisms.use and cfg.safety_mechanisms.load_from_file.use:

            nr_exp = cfg.safety_mechanisms.load_from_file.nr_exp
            path2data = "./{0:s}/{1:s}_results/{2:s}".format(
                cfg.which_objective, cfg.acqui, nr_exp)
            try:
                with open("{0:s}/data_0.yaml".format(path2data),
                          "r") as stream:
                    my_node = yaml.load(stream, Loader=yaml.UnsafeLoader)
            except Exception as inst:
                logger.info("Exception (!) type: {0:s} | args: {1:s}".format(
                    str(type(inst)), str(inst.args)))
                raise ValueError("Data corrupted or non-existent!!!")
            else:
                logger.info("We have lodaded existing data from {0:s}".format(
                    path2data))
                logger.info(
                    "A quick inspection reveals {0:d} existing datapoint(s) ..."
                    .format(len(my_node["regret_simple_array"])))

            if cfg.safety_mechanisms.load_from_file.modify:
                logger.info(
                    "Here, we have the opportunity of modifying some data, if needed..."
                )
                pdb.set_trace()

            # pdb.set_trace()
            # Get stored values:
            if my_node["GPs"][0]["train_inputs"] is not None:
                train_x_obj = torch.from_numpy(
                    my_node["GPs"][0]["train_inputs"]).to(device=device,
                                                          dtype=dtype)
            else:
                train_x_obj = [torch.tensor([])]
            if my_node["GPs"][0]["train_targets"] is not None:
                train_y_obj = torch.from_numpy(
                    my_node["GPs"][0]["train_targets"]).to(device=device,
                                                           dtype=dtype)
            else:
                train_y_obj = torch.tensor([])

            train_x_cons = torch.from_numpy(
                my_node["GPs"][1]["train_inputs"]).to(device=device,
                                                      dtype=dtype)
            train_yl_cons = torch.from_numpy(
                my_node["GPs"][1]["train_targets"]).to(device=device,
                                                       dtype=dtype)

            logger.info("train_x_cons:" + str(train_x_cons))
            logger.info("train_x_cons.shape:" + str(train_x_cons.shape))

            # # If we need to convert something, do it here:
            # train_x_obj[:,2:4] = train_x_obj[:,2:4]*(0.08-0.03)/(0.14-0.03)
            # train_x_cons[:,2:4] = train_x_cons[:,2:4]*(0.08-0.03)/(0.14-0.03)

            # # Mods 27 Jul 12:43:
            # train_x_obj = train_x_obj[0:-1,:] # kick out last value from f(x)
            # train_y_obj = train_y_obj[0:-1] # kick out last value from f(x)
            # # train_x_cons # stays the same g(x)
            # train_yl_cons[-1,0] = float("Inf") # Make the last point of g(x) a failure
            # train_yl_cons[-1,1] = -1.0 # Make the last point of g(x) a failure

            # # Mods 29 Jul 12:43:
            train_x_obj[:,
                        0:2] = train_x_obj[:, 0:2] * (4.0 - 1.5) / (5.0 - 1.5)
            train_x_cons[:,
                         0:2] = train_x_cons[:,
                                             0:2] * (4.0 - 1.5) / (5.0 - 1.5)
            logger.info("after conversion....")

            logger.info("train_x_cons:" + str(train_x_cons))
            logger.info("train_x_cons.shape:" + str(train_x_cons.shape))

            # Get best:
            ind_min_cost = torch.argmin(train_y_obj)
            train_x_obj_min = train_x_obj[ind_min_cost, :]
            print("train_x_obj_min:", train_x_obj_min)
            print("train_y_obj_min:", train_y_obj[ind_min_cost])

            # pdb.set_trace()

            # Get logvars so far:
            logvars["regret_simple_list"] = np.ndarray.tolist(
                my_node["regret_simple_array"])
            logvars["regret_simple_list"] = [
                np.array([el]) for el in logvars["regret_simple_list"]
            ]
            logvars["threshold_list"] = np.ndarray.tolist(
                my_node["threshold_array"])
            logvars["threshold_list"] = [
                np.array([el]) for el in logvars["threshold_list"]
            ]
            logvars["x_next_list"] = np.ndarray.tolist(my_node["x_next_array"])
            logvars["x_next_list"] = [
                np.array(el) for el in logvars["x_next_list"]
            ]

            # Report of data so far:
            logger.info("Quick report on data collected so far")
            logger.info("=====================================")
            logger.info("regret_simple_list:" +
                        str(logvars["regret_simple_list"]))
            logger.info("threshold_list:" + str(logvars["threshold_list"]))

            train_y_obj_mod = train_yl_cons[:, 0]
            train_y_obj_mod[train_y_obj_mod != float("Inf")] = train_y_obj[:]

            # pdb.set_trace()

        elif cfg.safety_mechanisms.use:

            my_path = "./{0:s}/{1:s}_results".format(cfg.which_objective,
                                                     cfg.acqui)
            path2data = generate_folder_at_path(my_path, create_folder=True)

            train_x_obj, train_y_obj, train_x_cons, train_yl_cons = get_initial_evaluations(
                which_objective=cfg.which_objective,
                function_obj=function_obj,
                function_cons=function_cons,
                cfg_Ninit_points=cfg.Ninit_points,
                with_noise=cfg.with_noise)

    # pdb.set_trace()
    gp_obj = GPmodel(dim=dim,
                     train_X=train_x_obj,
                     train_Y=train_y_obj.view(-1),
                     options=cfg.gpmodel)
    if cfg.acqui == "EIC":
        gp_cons = GPCRmodel(dim=dim,
                            train_x=train_x_cons.clone(),
                            train_yl=train_yl_cons.clone(),
                            options=cfg.gpcr_model)
    elif cfg.acqui == "EIClassi":
        ind_safe = train_yl_cons[:, 1] == +1
        train_yl_cons[ind_safe, 1] = +1
        train_yl_cons[~ind_safe, 1] = 0
        gp_cons = GPClassifier(dim=dim,
                               train_X=train_x_cons.clone(),
                               train_Y=train_yl_cons[:, 1].clone(),
                               options=cfg.gpclassimodel)

    if cfg.acqui == "EIC":
        constraints = {1: (None, gp_cons.threshold)}
        model_list = ModelListGP(gp_obj, gp_cons)
        eic = ExpectedImprovementWithConstraints(
            model_list=model_list,
            constraints=constraints,
            options=cfg.acquisition_function)
    elif cfg.acqui == "EIClassi":
        model_list = [gp_obj, gp_cons]
        eic = ExpectedImprovementWithConstraintsClassi(
            dim=dim, model_list=model_list, options=cfg.acquisition_function)

    # pdb.set_trace()
    if cfg.acqui == "EIC" and model_list.train_targets[0] is not None:
        logvars["GPs"] = dict(train_inputs=[
            train_inp[0] for train_inp in model_list.train_inputs
        ],
                              train_targets=[
                                  train_tar
                                  for train_tar in model_list.train_targets
                              ])
    elif cfg.acqui == "EIClassi" and model_list[0].train_targets is not None:
        logvars["GPs"] = dict(train_inputs=[
            mod.train_inputs[0] if mod.train_inputs[0] is not None else None
            for mod in model_list
        ],
                              train_targets=[
                                  mod.train_targets for mod in model_list
                              ])

    # pdb.set_trace()

    # Plotting:
    if cfg.plot.plotting:
        axes_GPobj, axes_GPcons, axes_GPcons_prob, axes_acqui = plotting_tool_cons(
            gp_obj,
            gp_cons,
            eic,
            axes_GPobj=None,
            axes_GPcons=None,
            axes_GPcons_prob=None,
            axes_acqui=None,
            cfg_plot=cfg.plot)

    try:
        # average over multiple trials
        for trial in range(cfg.NBOiters):

            msg_bo_iters = " <<< BOC Iteration {0:d} / {1:d} >>>".format(
                trial + 1, cfg.NBOiters)
            print("\n\n")
            logger.info("=" * len(msg_bo_iters))
            logger.info("{0:s}".format(msg_bo_iters))
            logger.info("=" * len(msg_bo_iters))

            # Get next point:
            x_next, alpha_next = eic.get_next_point()

            # Compute simple regret:
            regret_simple = eic.get_simple_regret_cons(fmin_true=f_min)
            logger.info("Regret: {0:2.5f}".format(regret_simple.item()))

            if x_next is None and alpha_next is None:
                break

            if cfg.plot.plotting:
                axes_GPobj, axes_GPcons, axes_GPcons_prob, axes_acqui = plotting_tool_cons(
                    gp_obj,
                    gp_cons,
                    eic,
                    axes_GPobj,
                    axes_GPcons,
                    axes_GPcons_prob,
                    axes_acqui,
                    cfg.plot,
                    xnext=x_next,
                    alpha_next=alpha_next)

            # Logging:
            append_logging_variables(logvars, eic.eta_c, eic.x_eta_c, x_next,
                                     alpha_next, regret_simple,
                                     gp_cons.threshold)
            # pdb.set_trace()

            # Collect evaluation at xnext:
            y_new_obj = function_obj(x_next, with_noise=cfg.with_noise)
            # yl_new_cons  = function_cons(x_next,with_noise=cfg.with_noise)
            yl_new_cons = function_cons(x_next, with_noise=False)

            x_new_cons = x_next
            x_new_obj = x_new_cons[yl_new_cons[:, 1] == +1.0, :]
            y_new_obj = y_new_obj[yl_new_cons[:, 1] == +1.0]

            # Update GP model:
            if len(y_new_obj) == 0:  # If there's no new data
                if gp_obj.train_inputs is None and gp_obj.train_targets is None:  # and the GPobj was empty, fill with empty tensors
                    train_x_obj_new = [torch.tensor([])]
                    train_y_obj_new = torch.tensor([])
                else:  # and the GPobj wasn't empty, don't update it
                    train_x_obj_new = gp_obj.train_inputs[0]
                    train_y_obj_new = gp_obj.train_targets
            else:  # if there's new data
                if gp_obj.train_inputs is None and gp_obj.train_targets is None:  # and the GPobj was empty, fill it
                    train_x_obj_new = x_new_obj
                    train_y_obj_new = y_new_obj
                else:  # and the GPobj wasn't empty, concatenate
                    train_x_obj_new = torch.cat(
                        [gp_obj.train_inputs[0], x_new_obj])
                    train_y_obj_new = torch.cat(
                        [gp_obj.train_targets, y_new_obj])

            # pdb.set_trace()
            train_x_cons_new = torch.cat([gp_cons.train_x, x_new_cons])
            train_yl_cons_new = torch.cat(
                [gp_cons.train_yl, yl_new_cons.view(1, 2)], dim=0)

            # Load GP model for f(x) and fit hyperparameters:
            gp_obj = GPmodel(dim=dim,
                             train_X=train_x_obj_new,
                             train_Y=train_y_obj_new.view(-1),
                             options=cfg.gpmodel)

            # Load GPCR model for g(x) and fit hyperparameters:
            gp_cons_train_x_backup = gp_cons.train_x.clone()
            gp_cons_train_yl_backup = gp_cons.train_yl.clone()

            if cfg.acqui == "EIClassi":

                ind_safe = train_yl_cons_new[:, 1] == +1
                train_yl_cons_new[ind_safe, 1] = +1
                train_yl_cons_new[~ind_safe, 1] = 0

                gp_cons = GPClassifier(dim=dim,
                                       train_X=train_x_cons_new.clone(),
                                       train_Y=train_yl_cons_new[:, 1].clone(),
                                       options=cfg.gpclassimodel)
            elif cfg.acqui == "EIC":
                try:
                    gp_cons = GPCRmodel(dim=dim,
                                        train_x=train_x_cons_new.clone(),
                                        train_yl=train_yl_cons_new.clone(),
                                        options=cfg.gpcr_model)
                except Exception as inst:
                    logger.info(
                        "  Exception (!) type: {0:s} | args: {1:s}".format(
                            str(type(inst)), str(inst.args)))
                    logger.info(
                        "  GPCR model has failed to be constructed (!!)")
                    logger.info(
                        "  This typically happens when the model the model is stuffed with datapoints, some of them rather close together,"
                    )
                    logger.info(
                        "  which causes numerical unstability that couldn't be fixed internally ..."
                    )
                    logger.info(
                        "  Trying to simply not update the GPCR model. Keeping the same number of evaluations: {0:d} ..."
                        .format(gp_cons_train_x_backup.shape[0]))
                    # gp_cons = GPCRmodel(dim=dim, train_x=gp_cons_train_x_backup, train_yl=gp_cons_train_yl_backup, options=cfg.gpcr_model) # Not needed! We keep the old one

            # Update the model in other classes:
            if cfg.acqui == "EIC":
                constraints = {1: (None, gp_cons.threshold)}
                model_list = ModelListGP(gp_obj, gp_cons)
                eic = ExpectedImprovementWithConstraints(
                    model_list=model_list,
                    constraints=constraints,
                    options=cfg.acquisition_function)
            elif cfg.acqui == "EIClassi":
                model_list = [gp_obj, gp_cons]
                eic = ExpectedImprovementWithConstraintsClassi(
                    dim=dim,
                    model_list=model_list,
                    options=cfg.acquisition_function)

            logvars["GPs"] = [gp_obj.logging(), gp_cons.logging()]

            if "safety_mechanisms" in cfg.keys():
                if cfg.safety_mechanisms.use:
                    node2write = convert_lists2arrays(logvars)
                    node2write["n_rep"] = rep_nr
                    node2write["ycm"] = f_min
                    node2write["xcm"] = x_min
                    # node2write["cfg"] = cfg # Do NOT save this, or yaml will terribly fail as it will have a cyclic graph!

                    file2save = "{0:s}/data_0.yaml".format(path2data)
                    logger.info(
                        "Saving while optimizing. Iteration: {0:d} / {1:d}".
                        format(trial + 1, cfg.NBOiters))
                    logger.info("Saving in {0:s} ...".format(file2save))
                    with open(file2save, "w") as stream_write:
                        yaml.dump(node2write, stream_write)
                    logger.info("Done!")

    except Exception as inst:
        logger.info("Exception (!) type: {0:s} | args: {1:s}".format(
            str(type(inst)), str(inst.args)))
        msg_bo_final = " <<< {0:s} failed (!) at iteration {1:d} / {2:d} >>>".format(
            cfg.acqui, trial + 1, cfg.NBOiters)
    else:
        msg_bo_final = " <<< {0:s} finished successfully!! >>>".format(
            cfg.acqui)

    logger.info("=" * len(msg_bo_final))
    logger.info("{0:s}".format(msg_bo_final))
    logger.info("=" * len(msg_bo_final))

    node2write = convert_lists2arrays(logvars)
    node2write["n_rep"] = rep_nr
    node2write["ycm"] = f_min
    node2write["xcm"] = x_min
    # node2write["cfg"] = cfg # Do NOT save this, or yaml will terribly fail as it will have a cyclic graph!

    if "safety_mechanisms" in cfg.keys(
    ) and cfg.safety_mechanisms.use == False:
        save_data(node2write=node2write,
                  which_obj=cfg.which_objective,
                  which_acqui=cfg.acqui,
                  rep_nr=rep_nr)