def from_config(cls, config: Config) -> MonotonicRejectionGP:
        classname = cls.__name__
        num_induc = config.gettensor(classname, "num_induc", fallback=25)
        num_samples = config.gettensor(classname, "num_samples", fallback=250)
        num_rejection_samples = config.getint(classname,
                                              "num_rejection_samples",
                                              fallback=5000)

        lb = config.gettensor(classname, "lb")
        ub = config.gettensor(classname, "ub")
        dim = config.getint(classname, "dim", fallback=None)

        mean_covar_factory = config.getobj(
            classname,
            "mean_covar_factory",
            fallback=monotonic_mean_covar_factory)

        mean, covar = mean_covar_factory(config)

        monotonic_idxs: List[int] = config.getlist(classname,
                                                   "monotonic_idxs",
                                                   fallback=[-1])

        return cls(
            monotonic_idxs=monotonic_idxs,
            lb=lb,
            ub=ub,
            dim=dim,
            num_induc=num_induc,
            num_samples=num_samples,
            num_rejection_samples=num_rejection_samples,
            mean_module=mean,
            covar_module=covar,
        )
Exemplo n.º 2
0
    def from_config(cls, config: Config):
        classname = cls.__name__
        n_samples = config.getint(classname, "num_samples", fallback=1)
        n_rejection_samples = config.getint(classname,
                                            "num_rejection_samples",
                                            fallback=500)
        num_ts_points = config.getint(classname,
                                      "num_ts_points",
                                      fallback=1000)
        target = config.getfloat(classname, "target", fallback=0.75)
        objective = config.getobj(classname,
                                  "objective",
                                  fallback=ProbitObjective)
        explore_features = config.getlist(classname,
                                          "explore_idxs",
                                          fallback=None)  # type: ignore

        return cls(
            n_samples=n_samples,
            n_rejection_samples=n_rejection_samples,
            num_ts_points=num_ts_points,
            target_value=target,
            objective=objective,
            explore_features=explore_features,
        )
Exemplo n.º 3
0
    def from_config(cls, config: Config):
        classname = cls.__name__
        acqf = config.getobj("common", "acqf", fallback=None)
        extra_acqf_args = cls._get_acqf_options(acqf, config)

        options = {}
        options["num_restarts"] = config.getint(classname, "restarts", fallback=10)
        options["raw_samples"] = config.getint(classname, "samps", fallback=1000)
        options["verbosity_freq"] = config.getint(
            classname, "verbosity_freq", fallback=-1
        )
        options["lr"] = config.getfloat(classname, "lr", fallback=0.01)  # type: ignore
        options["momentum"] = config.getfloat(classname, "momentum", fallback=0.9)  # type: ignore
        options["nesterov"] = config.getboolean(classname, "nesterov", fallback=True)
        options["epochs"] = config.getint(classname, "epochs", fallback=50)
        options["milestones"] = config.getlist(
            classname, "milestones", fallback=[25, 40]  # type: ignore
        )
        options["gamma"] = config.getfloat(classname, "gamma", fallback=0.1)  # type: ignore
        options["loss_constraint_fun"] = config.getobj(
            classname, "loss_constraint_fun", fallback=default_loss_constraint_fun
        )

        explore_features = config.getlist(classname, "explore_idxs", fallback=None)  # type: ignore

        return cls(
            acqf=acqf,
            acqf_kwargs=extra_acqf_args,
            model_gen_options=options,
            explore_features=explore_features,
        )
Exemplo n.º 4
0
    def from_config(cls, config: Config):
        classname = cls.__name__

        lb = config.gettensor(classname, "lb")
        ub = config.gettensor(classname, "ub")
        dim = config.getint(classname, "dim", fallback=None)
        seed = config.getint(classname, "seed", fallback=None)

        return cls(lb=lb, ub=ub, dim=dim, seed=seed)
Exemplo n.º 5
0
    def from_config(cls, config: Config) -> GPClassificationModel:
        """Alternate constructor for GPClassification model.

        This is used when we recursively build a full sampling strategy
        from a configuration. TODO: document how this works in some tutorial.

        Args:
            config (Config): A configuration containing keys/values matching this class

        Returns:
            GPClassificationModel: Configured class instance.
        """

        classname = cls.__name__
        inducing_size = config.getint(classname, "inducing_size", fallback=10)

        lb = config.gettensor(classname, "lb")
        ub = config.gettensor(classname, "ub")
        dim = config.getint(classname, "dim", fallback=None)

        mean_covar_factory = config.getobj(classname,
                                           "mean_covar_factory",
                                           fallback=default_mean_covar_factory)

        mean, covar = mean_covar_factory(config)
        max_fit_time = config.getfloat(classname,
                                       "max_fit_time",
                                       fallback=None)

        inducing_point_method = config.get(classname,
                                           "inducing_point_method",
                                           fallback="auto")

        likelihood_cls = config.getobj(classname, "likelihood", fallback=None)

        if likelihood_cls is not None:
            if hasattr(likelihood_cls, "from_config"):
                likelihood = likelihood_cls.from_config(config)
            else:
                likelihood = likelihood_cls()
        else:
            likelihood = None  # fall back to __init__ default

        return cls(
            lb=lb,
            ub=ub,
            dim=dim,
            inducing_size=inducing_size,
            mean_module=mean,
            covar_module=covar,
            max_fit_time=max_fit_time,
            inducing_point_method=inducing_point_method,
            likelihood=likelihood,
        )
Exemplo n.º 6
0
    def from_config(cls, config: Config, name: str):
        gen_cls = config.getobj(name, "generator", fallback=SobolGenerator)
        generator = gen_cls.from_config(config)

        model_cls = config.getobj(name, "model", fallback=None)
        if model_cls is not None:
            model = model_cls.from_config(config)
        else:
            model = None

        acqf_cls = config.getobj(name, "acqf", fallback=None)
        if acqf_cls is not None and hasattr(generator, "acqf"):
            if generator.acqf is None:
                generator.acqf = acqf_cls
                generator.acqf_kwargs = generator._get_acqf_options(
                    acqf_cls, config)

        n_trials = config.getint(name, "n_trials")
        refit_every = config.getint(name, "refit_every", fallback=1)

        lb = config.gettensor(name, "lb")
        ub = config.gettensor(name, "ub")
        dim = config.getint(name, "dim", fallback=None)

        outcome_type = config.get(name,
                                  "outcome_type",
                                  fallback="single_probit")

        if model is not None and not generator._requires_model:
            if refit_every < n_trials:
                warnings.warn(
                    f"Strategy '{name}' has refit_every < n_trials even though its generator does not require a model. Consider making refit_every = n_trials to speed up point generation.",
                    RuntimeWarning,
                )

        return cls(
            lb=lb,
            ub=ub,
            dim=dim,
            model=model,
            generator=generator,
            n_trials=n_trials,
            refit_every=refit_every,
            outcome_type=outcome_type,
        )
    def from_config(cls, config: Config):
        classname = cls.__name__
        acqf = config.getobj(classname, "acqf", fallback=None)
        extra_acqf_args = cls._get_acqf_options(acqf, config)

        restarts = config.getint(classname, "restarts", fallback=10)
        samps = config.getint(classname, "samps", fallback=1000)

        max_gen_time = config.getfloat(classname,
                                       "max_gen_time",
                                       fallback=None)

        return cls(
            acqf=acqf,
            acqf_kwargs=extra_acqf_args,
            restarts=restarts,
            samps=samps,
            max_gen_time=max_gen_time,
        )
Exemplo n.º 8
0
def song_mean_covar_factory(
    config: Config,
) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.AdditiveKernel]:
    """
    Factory that makes kernels like Song et al. 2018:
    Linear in intensity dimension (assumed to be the last
    dimension), RBF in context dimensions, summed.

    Args:
        config (Config): Config object containing (at least) bounds and optionally
            LSE target.

    Returns:
        Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.AdditiveKernel]: Instantiated
            constant mean object and additive kernel object.
    """
    lb = config.gettensor("song_mean_covar_factory", "lb")
    ub = config.gettensor("song_mean_covar_factory", "ub")
    assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!"
    dim = lb.shape[0]

    mean = gpytorch.means.ConstantMean()

    try:
        target = config.getfloat("song_mean_covar_factory", "target")
    except NoOptionError:
        target = 0.75
    mean.constant.requires_grad_(False)
    mean.constant.copy_(torch.tensor([norm.ppf(target)]))

    ls_prior = gpytorch.priors.GammaPrior(
        concentration=__default_invgamma_concentration,
        rate=__default_invgamma_rate,
        transform=lambda x: 1 / x,
    )
    ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)

    ls_constraint = gpytorch.constraints.Positive(transform=None,
                                                  initial_value=ls_prior_mode)

    stim_dim = config.getint("song_mean_covar_factory",
                             "stim_dim",
                             fallback=-1)
    context_dims = list(range(dim))
    stim_dim = context_dims.pop(stim_dim)  # support relative stim dims

    if dim == 1:
        # this can just be LinearKernel but for consistency of interface
        # we make it additive with one module
        return (
            mean,
            gpytorch.kernels.AdditiveKernel(
                gpytorch.kernels.ScaleKernel(
                    gpytorch.kernels.LinearKernel(ard_num_dims=1),
                    outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1,
                                                                       b=4),
                )),
        )
    else:
        context_covar = gpytorch.kernels.ScaleKernel(
            gpytorch.kernels.RBFKernel(
                lengthscale_prior=ls_prior,
                lengthscale_constraint=ls_constraint,
                ard_num_dims=dim - 1,
                active_dims=context_dims,
            ),
            outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
        )
        intensity_covar = gpytorch.kernels.ScaleKernel(
            gpytorch.kernels.LinearKernel(active_dims=stim_dim,
                                          ard_num_dims=1),
            outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
        )

    return mean, context_covar + intensity_covar