def from_config(cls, config: Config): classname = cls.__name__ acqf = config.getobj("common", "acqf", fallback=None) extra_acqf_args = cls._get_acqf_options(acqf, config) options = {} options["num_restarts"] = config.getint(classname, "restarts", fallback=10) options["raw_samples"] = config.getint(classname, "samps", fallback=1000) options["verbosity_freq"] = config.getint( classname, "verbosity_freq", fallback=-1 ) options["lr"] = config.getfloat(classname, "lr", fallback=0.01) # type: ignore options["momentum"] = config.getfloat(classname, "momentum", fallback=0.9) # type: ignore options["nesterov"] = config.getboolean(classname, "nesterov", fallback=True) options["epochs"] = config.getint(classname, "epochs", fallback=50) options["milestones"] = config.getlist( classname, "milestones", fallback=[25, 40] # type: ignore ) options["gamma"] = config.getfloat(classname, "gamma", fallback=0.1) # type: ignore options["loss_constraint_fun"] = config.getobj( classname, "loss_constraint_fun", fallback=default_loss_constraint_fun ) explore_features = config.getlist(classname, "explore_idxs", fallback=None) # type: ignore return cls( acqf=acqf, acqf_kwargs=extra_acqf_args, model_gen_options=options, explore_features=explore_features, )
def from_config(cls, config: Config) -> GPClassificationModel: """Alternate constructor for GPClassification model. This is used when we recursively build a full sampling strategy from a configuration. TODO: document how this works in some tutorial. Args: config (Config): A configuration containing keys/values matching this class Returns: GPClassificationModel: Configured class instance. """ classname = cls.__name__ inducing_size = config.getint(classname, "inducing_size", fallback=10) lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") dim = config.getint(classname, "dim", fallback=None) mean_covar_factory = config.getobj(classname, "mean_covar_factory", fallback=default_mean_covar_factory) mean, covar = mean_covar_factory(config) max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) inducing_point_method = config.get(classname, "inducing_point_method", fallback="auto") likelihood_cls = config.getobj(classname, "likelihood", fallback=None) if likelihood_cls is not None: if hasattr(likelihood_cls, "from_config"): likelihood = likelihood_cls.from_config(config) else: likelihood = likelihood_cls() else: likelihood = None # fall back to __init__ default return cls( lb=lb, ub=ub, dim=dim, inducing_size=inducing_size, mean_module=mean, covar_module=covar, max_fit_time=max_fit_time, inducing_point_method=inducing_point_method, likelihood=likelihood, )
def from_config(cls, config: Config) -> MonotonicRejectionGP: classname = cls.__name__ num_induc = config.gettensor(classname, "num_induc", fallback=25) num_samples = config.gettensor(classname, "num_samples", fallback=250) num_rejection_samples = config.getint(classname, "num_rejection_samples", fallback=5000) lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") dim = config.getint(classname, "dim", fallback=None) mean_covar_factory = config.getobj( classname, "mean_covar_factory", fallback=monotonic_mean_covar_factory) mean, covar = mean_covar_factory(config) monotonic_idxs: List[int] = config.getlist(classname, "monotonic_idxs", fallback=[-1]) return cls( monotonic_idxs=monotonic_idxs, lb=lb, ub=ub, dim=dim, num_induc=num_induc, num_samples=num_samples, num_rejection_samples=num_rejection_samples, mean_module=mean, covar_module=covar, )
def _get_acqf_options(cls, acqf: AcquisitionFunction, config: Config): if acqf is not None: acqf_name = acqf.__name__ default_extra_acqf_args = { "beta": 3.98, "target": 0.75, "objective": None, "query_set_size": 512, } extra_acqf_args = { k: config.getobj( acqf_name, k, fallback_type=float, fallback=v, warn=False ) for k, v in default_extra_acqf_args.items() } acqf_args_expected = signature(acqf).parameters.keys() extra_acqf_args = { k: v for k, v in extra_acqf_args.items() if k in acqf_args_expected } if ( "objective" in extra_acqf_args.keys() and extra_acqf_args["objective"] is not None ): extra_acqf_args["objective"] = extra_acqf_args["objective"]() else: extra_acqf_args = {} return extra_acqf_args
def from_config(cls, config: Config): classname = cls.__name__ n_samples = config.getint(classname, "num_samples", fallback=1) n_rejection_samples = config.getint(classname, "num_rejection_samples", fallback=500) num_ts_points = config.getint(classname, "num_ts_points", fallback=1000) target = config.getfloat(classname, "target", fallback=0.75) objective = config.getobj(classname, "objective", fallback=ProbitObjective) explore_features = config.getlist(classname, "explore_idxs", fallback=None) # type: ignore return cls( n_samples=n_samples, n_rejection_samples=n_rejection_samples, num_ts_points=num_ts_points, target_value=target, objective=objective, explore_features=explore_features, )
def from_config(cls, config: Config): classname = cls.__name__ subgen_cls = config.getobj(classname, "subgenerator", fallback=OptimizeAcqfGenerator) subgen = subgen_cls.from_config(config) epsilon = config.getfloat(classname, "epsilon", fallback=0.1) return cls(subgenerator=subgen, epsilon=epsilon)
def from_config(cls, config: Config, name: str): gen_cls = config.getobj(name, "generator", fallback=SobolGenerator) generator = gen_cls.from_config(config) model_cls = config.getobj(name, "model", fallback=None) if model_cls is not None: model = model_cls.from_config(config) else: model = None acqf_cls = config.getobj(name, "acqf", fallback=None) if acqf_cls is not None and hasattr(generator, "acqf"): if generator.acqf is None: generator.acqf = acqf_cls generator.acqf_kwargs = generator._get_acqf_options( acqf_cls, config) n_trials = config.getint(name, "n_trials") refit_every = config.getint(name, "refit_every", fallback=1) lb = config.gettensor(name, "lb") ub = config.gettensor(name, "ub") dim = config.getint(name, "dim", fallback=None) outcome_type = config.get(name, "outcome_type", fallback="single_probit") if model is not None and not generator._requires_model: if refit_every < n_trials: warnings.warn( f"Strategy '{name}' has refit_every < n_trials even though its generator does not require a model. Consider making refit_every = n_trials to speed up point generation.", RuntimeWarning, ) return cls( lb=lb, ub=ub, dim=dim, model=model, generator=generator, n_trials=n_trials, refit_every=refit_every, outcome_type=outcome_type, )
def from_config(cls, config: Config): classname = cls.__name__ acqf = config.getobj(classname, "acqf", fallback=None) extra_acqf_args = cls._get_acqf_options(acqf, config) restarts = config.getint(classname, "restarts", fallback=10) samps = config.getint(classname, "samps", fallback=1000) max_gen_time = config.getfloat(classname, "max_gen_time", fallback=None) return cls( acqf=acqf, acqf_kwargs=extra_acqf_args, restarts=restarts, samps=samps, max_gen_time=max_gen_time, )
def from_config(cls, config: Config): objective_cls = config.getobj(cls.__name__, "objective") objective = objective_cls.from_config(config) return cls(objective=objective)
def default_mean_covar_factory( config: Config, ) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.ScaleKernel]: """Default factory for generic GP models Args: config (Config): Object containing bounds (and potentially other config details). Returns: Tuple[gpytorch.means.Mean, gpytorch.kernels.Kernel]: Instantiated ConstantMean and ScaleKernel with priors based on bounds. """ lb = config.gettensor("default_mean_covar_factory", "lb") ub = config.gettensor("default_mean_covar_factory", "ub") fixed_mean = config.getboolean("default_mean_covar_factory", "fixed_mean", fallback=False) lengthscale_prior = config.get("default_mean_covar_factory", "lengthscale_prior", fallback="gamma") outputscale_prior = config.get("default_mean_covar_factory", "outputscale_prior", fallback="box") kernel = config.getobj("default_mean_covar_factory", "kernel", fallback=gpytorch.kernels.RBFKernel) assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!" dim = lb.shape[0] mean = gpytorch.means.ConstantMean() if fixed_mean: try: target = config.getfloat("default_mean_covar_factory", "target") mean.constant.requires_grad_(False) mean.constant.copy_(torch.tensor([norm.ppf(target)])) except NoOptionError: raise RuntimeError( "Config got fixed_mean=True but no target included!") if lengthscale_prior == "invgamma": ls_prior = gpytorch.priors.GammaPrior( concentration=__default_invgamma_concentration, rate=__default_invgamma_rate, transform=lambda x: 1 / x, ) ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1) elif lengthscale_prior == "gamma": ls_prior = gpytorch.priors.GammaPrior(concentration=3.0, rate=6.0) ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate else: raise RuntimeError( f"Lengthscale_prior should be invgamma or gamma, got {lengthscale_prior}" ) if outputscale_prior == "gamma": os_prior = gpytorch.priors.GammaPrior(concentration=2.0, rate=0.15) elif outputscale_prior == "box": os_prior = gpytorch.priors.SmoothedBoxPrior(a=1, b=4) else: raise RuntimeError( f"Outputscale_prior should be gamma or box, got {outputscale_prior}" ) ls_constraint = gpytorch.constraints.Positive(transform=None, initial_value=ls_prior_mode) covar = gpytorch.kernels.ScaleKernel( kernel( lengthscale_prior=ls_prior, lengthscale_constraint=ls_constraint, ard_num_dims=dim, ), outputscale_prior=os_prior, ) return mean, covar