예제 #1
0
def train_gp(train_x, train_y, use_ard, num_steps, hypers={}):
    """Fit a GP model where train_x is in [0, 1]^d and train_y is standardized."""
    assert train_x.ndim == 2
    assert train_y.ndim == 1
    assert train_x.shape[0] == train_y.shape[0]

    # Create hyper parameter bounds
    noise_constraint = Interval(5e-4, 0.2)
    if use_ard:
        lengthscale_constraint = Interval(0.005, 2.0)
    else:
        lengthscale_constraint = Interval(0.005, math.sqrt(
            train_x.shape[1]))  # [0.005, sqrt(dim)]
    outputscale_constraint = Interval(0.05, 20.0)

    # Create models
    likelihood = GaussianLikelihood(noise_constraint=noise_constraint).to(
        device=train_x.device, dtype=train_y.dtype)
    ard_dims = train_x.shape[1] if use_ard else None
    model = GP(
        train_x=train_x,
        train_y=train_y,
        likelihood=likelihood,
        lengthscale_constraint=lengthscale_constraint,
        outputscale_constraint=outputscale_constraint,
        ard_dims=ard_dims,
    ).to(device=train_x.device, dtype=train_x.dtype)

    # Find optimal model hyperparameters
    model.train()
    likelihood.train()

    # "Loss" for GPs - the marginal log likelihood
    mll = ExactMarginalLogLikelihood(likelihood, model)

    # Initialize model hypers
    if hypers:
        model.load_state_dict(hypers)
    else:
        hypers = {}
        hypers["covar_module.outputscale"] = 1.0
        hypers["covar_module.base_kernel.lengthscale"] = 0.5
        hypers["likelihood.noise"] = 0.005
        model.initialize(**hypers)

    # Use the adam optimizer
    optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.1)

    for _ in range(num_steps):
        optimizer.zero_grad()
        output = model(train_x)
        loss = -mll(output, train_y)
        loss.backward()
        optimizer.step()

    # Switch to eval mode
    model.eval()
    likelihood.eval()

    return model
예제 #2
0
    def __init__(self, num_tasks, prior=None, var_constraint=None, **kwargs):
        super().__init__(**kwargs)
        self.register_parameter(name="raw_c2",
                                parameter=torch.nn.Parameter(
                                    torch.randn(*self.batch_shape, 1)))
        self.num_tasks = num_tasks

        if var_constraint is None:
            var_constraint = Interval(0, 1)

        self.register_constraint("raw_c2", var_constraint)
예제 #3
0
    def __init__(self,
                 num_tasks,
                 rho_prior=None,
                 var_constraint=None,
                 **kwargs):
        super().__init__(**kwargs)

        self.register_parameter(name="raw_rho",
                                parameter=torch.nn.Parameter(
                                    torch.randn(*self.batch_shape, 1)))
        self.num_tasks = num_tasks

        if var_constraint is None:
            var_constraint = Interval(-1, 1)

        self.register_constraint("raw_rho", var_constraint)

        if rho_prior is not None:
            self.register_prior("rho_prior", rho_prior, lambda m: m.rho,
                                lambda m, v: m._set_rho(v))