def from_config(cls, config: Config) -> MonotonicRejectionGP: classname = cls.__name__ num_induc = config.gettensor(classname, "num_induc", fallback=25) num_samples = config.gettensor(classname, "num_samples", fallback=250) num_rejection_samples = config.getint(classname, "num_rejection_samples", fallback=5000) lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") dim = config.getint(classname, "dim", fallback=None) mean_covar_factory = config.getobj( classname, "mean_covar_factory", fallback=monotonic_mean_covar_factory) mean, covar = mean_covar_factory(config) monotonic_idxs: List[int] = config.getlist(classname, "monotonic_idxs", fallback=[-1]) return cls( monotonic_idxs=monotonic_idxs, lb=lb, ub=ub, dim=dim, num_induc=num_induc, num_samples=num_samples, num_rejection_samples=num_rejection_samples, mean_module=mean, covar_module=covar, )
def from_config(cls, config: Config): classname = cls.__name__ lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") dim = config.getint(classname, "dim", fallback=None) seed = config.getint(classname, "seed", fallback=None) return cls(lb=lb, ub=ub, dim=dim, seed=seed)
def from_config(cls, config: Config) -> GPClassificationModel: """Alternate constructor for GPClassification model. This is used when we recursively build a full sampling strategy from a configuration. TODO: document how this works in some tutorial. Args: config (Config): A configuration containing keys/values matching this class Returns: GPClassificationModel: Configured class instance. """ classname = cls.__name__ inducing_size = config.getint(classname, "inducing_size", fallback=10) lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") dim = config.getint(classname, "dim", fallback=None) mean_covar_factory = config.getobj(classname, "mean_covar_factory", fallback=default_mean_covar_factory) mean, covar = mean_covar_factory(config) max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) inducing_point_method = config.get(classname, "inducing_point_method", fallback="auto") likelihood_cls = config.getobj(classname, "likelihood", fallback=None) if likelihood_cls is not None: if hasattr(likelihood_cls, "from_config"): likelihood = likelihood_cls.from_config(config) else: likelihood = likelihood_cls() else: likelihood = None # fall back to __init__ default return cls( lb=lb, ub=ub, dim=dim, inducing_size=inducing_size, mean_module=mean, covar_module=covar, max_fit_time=max_fit_time, inducing_point_method=inducing_point_method, likelihood=likelihood, )
def monotonic_mean_covar_factory( config: Config, ) -> Tuple[ConstantMeanPartialObsGrad, gpytorch.kernels.ScaleKernel]: """Default factory for monotonic GP models based on derivative observations. Args: config (Config): Config containing (at least) bounds, and optionally LSE target. Returns: Tuple[ConstantMeanPartialObsGrad, gpytorch.kernels.ScaleKernel]: Instantiated mean and scaled RBF kernels with partial derivative observations. """ lb = config.gettensor("monotonic_mean_covar_factory", "lb") ub = config.gettensor("monotonic_mean_covar_factory", "ub") assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!" dim = lb.shape[0] fixed_mean = config.getboolean("monotonic_mean_covar_factory", "fixed_mean", fallback=False) mean = ConstantMeanPartialObsGrad() if fixed_mean: try: target = config.getfloat("monotonic_mean_covar_factory", "target") mean.constant.requires_grad_(False) mean.constant.copy_(torch.tensor([norm.ppf(target)])) except NoOptionError: raise RuntimeError( "Config got fixed_mean=True but no target included!") ls_prior = gpytorch.priors.GammaPrior( concentration=__default_invgamma_concentration, rate=__default_invgamma_rate, transform=lambda x: 1 / x, ) ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1) ls_constraint = gpytorch.constraints.Positive(transform=None, initial_value=ls_prior_mode) covar = gpytorch.kernels.ScaleKernel( RBFKernelPartialObsGrad( lengthscale_prior=ls_prior, lengthscale_constraint=ls_constraint, ard_num_dims=dim, ), outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4), ) return mean, covar
def from_config(cls, config: Config, name: str): gen_cls = config.getobj(name, "generator", fallback=SobolGenerator) generator = gen_cls.from_config(config) model_cls = config.getobj(name, "model", fallback=None) if model_cls is not None: model = model_cls.from_config(config) else: model = None acqf_cls = config.getobj(name, "acqf", fallback=None) if acqf_cls is not None and hasattr(generator, "acqf"): if generator.acqf is None: generator.acqf = acqf_cls generator.acqf_kwargs = generator._get_acqf_options( acqf_cls, config) n_trials = config.getint(name, "n_trials") refit_every = config.getint(name, "refit_every", fallback=1) lb = config.gettensor(name, "lb") ub = config.gettensor(name, "ub") dim = config.getint(name, "dim", fallback=None) outcome_type = config.get(name, "outcome_type", fallback="single_probit") if model is not None and not generator._requires_model: if refit_every < n_trials: warnings.warn( f"Strategy '{name}' has refit_every < n_trials even though its generator does not require a model. Consider making refit_every = n_trials to speed up point generation.", RuntimeWarning, ) return cls( lb=lb, ub=ub, dim=dim, model=model, generator=generator, n_trials=n_trials, refit_every=refit_every, outcome_type=outcome_type, )
def default_mean_covar_factory( config: Config, ) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.ScaleKernel]: """Default factory for generic GP models Args: config (Config): Object containing bounds (and potentially other config details). Returns: Tuple[gpytorch.means.Mean, gpytorch.kernels.Kernel]: Instantiated ConstantMean and ScaleKernel with priors based on bounds. """ lb = config.gettensor("default_mean_covar_factory", "lb") ub = config.gettensor("default_mean_covar_factory", "ub") fixed_mean = config.getboolean("default_mean_covar_factory", "fixed_mean", fallback=False) lengthscale_prior = config.get("default_mean_covar_factory", "lengthscale_prior", fallback="gamma") outputscale_prior = config.get("default_mean_covar_factory", "outputscale_prior", fallback="box") kernel = config.getobj("default_mean_covar_factory", "kernel", fallback=gpytorch.kernels.RBFKernel) assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!" dim = lb.shape[0] mean = gpytorch.means.ConstantMean() if fixed_mean: try: target = config.getfloat("default_mean_covar_factory", "target") mean.constant.requires_grad_(False) mean.constant.copy_(torch.tensor([norm.ppf(target)])) except NoOptionError: raise RuntimeError( "Config got fixed_mean=True but no target included!") if lengthscale_prior == "invgamma": ls_prior = gpytorch.priors.GammaPrior( concentration=__default_invgamma_concentration, rate=__default_invgamma_rate, transform=lambda x: 1 / x, ) ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1) elif lengthscale_prior == "gamma": ls_prior = gpytorch.priors.GammaPrior(concentration=3.0, rate=6.0) ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate else: raise RuntimeError( f"Lengthscale_prior should be invgamma or gamma, got {lengthscale_prior}" ) if outputscale_prior == "gamma": os_prior = gpytorch.priors.GammaPrior(concentration=2.0, rate=0.15) elif outputscale_prior == "box": os_prior = gpytorch.priors.SmoothedBoxPrior(a=1, b=4) else: raise RuntimeError( f"Outputscale_prior should be gamma or box, got {outputscale_prior}" ) ls_constraint = gpytorch.constraints.Positive(transform=None, initial_value=ls_prior_mode) covar = gpytorch.kernels.ScaleKernel( kernel( lengthscale_prior=ls_prior, lengthscale_constraint=ls_constraint, ard_num_dims=dim, ), outputscale_prior=os_prior, ) return mean, covar
def song_mean_covar_factory( config: Config, ) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.AdditiveKernel]: """ Factory that makes kernels like Song et al. 2018: Linear in intensity dimension (assumed to be the last dimension), RBF in context dimensions, summed. Args: config (Config): Config object containing (at least) bounds and optionally LSE target. Returns: Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.AdditiveKernel]: Instantiated constant mean object and additive kernel object. """ lb = config.gettensor("song_mean_covar_factory", "lb") ub = config.gettensor("song_mean_covar_factory", "ub") assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!" dim = lb.shape[0] mean = gpytorch.means.ConstantMean() try: target = config.getfloat("song_mean_covar_factory", "target") except NoOptionError: target = 0.75 mean.constant.requires_grad_(False) mean.constant.copy_(torch.tensor([norm.ppf(target)])) ls_prior = gpytorch.priors.GammaPrior( concentration=__default_invgamma_concentration, rate=__default_invgamma_rate, transform=lambda x: 1 / x, ) ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1) ls_constraint = gpytorch.constraints.Positive(transform=None, initial_value=ls_prior_mode) stim_dim = config.getint("song_mean_covar_factory", "stim_dim", fallback=-1) context_dims = list(range(dim)) stim_dim = context_dims.pop(stim_dim) # support relative stim dims if dim == 1: # this can just be LinearKernel but for consistency of interface # we make it additive with one module return ( mean, gpytorch.kernels.AdditiveKernel( gpytorch.kernels.ScaleKernel( gpytorch.kernels.LinearKernel(ard_num_dims=1), outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4), )), ) else: context_covar = gpytorch.kernels.ScaleKernel( gpytorch.kernels.RBFKernel( lengthscale_prior=ls_prior, lengthscale_constraint=ls_constraint, ard_num_dims=dim - 1, active_dims=context_dims, ), outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4), ) intensity_covar = gpytorch.kernels.ScaleKernel( gpytorch.kernels.LinearKernel(active_dims=stim_dim, ard_num_dims=1), outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4), ) return mean, context_covar + intensity_covar