def from_config(cls, config): classname = cls.__name__ model = GPClassificationModel.from_config(config) lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") restarts = config.getint(classname, "restarts", fallback=10) samps = config.getint(classname, "samps", fallback=1000) assert lb.shape[0] == ub.shape[0], "bounds are of different shapes!" dim = lb.shape[0] acqf = config.getobj("experiment", "acqf", fallback=MCLevelSetEstimation) acqf_name = acqf.__name__ default_extra_acqf_args = { "beta": 3.98, "target": 0.75, "objective": ProbitObjective, } extra_acqf_args = { k: config.getobj(acqf_name, k, fallback_type=float, fallback=v, warn=False) for k, v in default_extra_acqf_args.items() } extra_acqf_args = _prune_extra_acqf_args(acqf, extra_acqf_args) if ("objective" in extra_acqf_args.keys() and extra_acqf_args["objective"] is not None): extra_acqf_args["objective"] = extra_acqf_args["objective"]() return cls( lb=lb, ub=ub, restarts=restarts, samps=samps, dim=dim, acqf=acqf, model=model, extra_acqf_args=extra_acqf_args, )
def test_hyperparam_consistency(self): # verify that creating the model `from_config` or with `__init__` has the same hyperparams m1 = GPClassificationModel(lb=[1, 2], ub=[3, 4]) m2 = GPClassificationModel.from_config( config=Config(config_dict={"common": {"lb": "[1,2]", "ub": "[3,4]"}}) ) self.assertTrue(isinstance(m1.covar_module, type(m2.covar_module))) self.assertTrue( isinstance(m1.covar_module.base_kernel, type(m2.covar_module.base_kernel)) ) self.assertTrue(isinstance(m1.mean_module, type(m2.mean_module))) m1priors = list(m1.covar_module.named_priors()) m2priors = list(m2.covar_module.named_priors()) for p1, p2 in zip(m1priors, m2priors): name1, parent1, prior1, paramtransforms1, priortransforms1 = p1 name2, parent2, prior2, paramtransforms2, priortransforms2 = p2 self.assertTrue(name1 == name2) self.assertTrue(isinstance(parent1, type(parent2))) self.assertTrue(isinstance(prior1, type(prior2)))