Example #1
0
    def __init__(self, input, inducing_size, device='cpu'):
        if device == 'gpu' and torch.cuda.is_available():
            self.device = torch.device('cuda:0')
        else:
            self.device = torch.device('cpu')

        if input.ndim == 1:
            self.input_size = 1
        else:
            self.input_size = input.shape[-1]

        self.inducing_size = inducing_size

        _likelihood = GaussianLikelihood()
        super(SparseGPRegressor, self).__init__(train_inputs=None,
                                                train_targets=None,
                                                likelihood=_likelihood)

        self.mean_module = ZeroMean()
        self.base_covar_module = ScaleKernel(RBFKernel())

        inducing_idx = np.random.choice(len(input), inducing_size, replace=False)
        self.covar_module = InducingPointKernel(self.base_covar_module,
                                                inducing_points=input[inducing_idx, ...],
                                                likelihood=_likelihood)

        self.input_trans = None
        self.target_trans = None
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(constant_bounds=[-1e-5, 1e-5])
     self.base_covar_module = RBFKernel(log_lengthscale_bounds=(-5, 6))
     self.covar_module = InducingPointKernel(self.base_covar_module,
                                             inducing_points=torch.linspace(
                                                 0, 1, 32))
Example #3
0
 def __init__(self, train_x, train_y):
     likelihood = gpytorch.likelihoods.GaussianLikelihood()
     super().__init__(train_x, train_y, likelihood)
     self.mean_module = gpytorch.means.ZeroMean()
     self.covar_module = InducingPointKernel(
         ScaleKernel(RBFKernel(ard_num_dims=3)), inducing_points=torch.randn(512, 3), likelihood=likelihood,
     )
Example #4
0
 def __init__(self, train_x, train_y, train_u, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean()
     self.base_covar_module = ScaleKernel(RBFKernel())
     self.covar_module = InducingPointKernel(self.base_covar_module,
                                             inducing_points=train_u,
                                             likelihood=likelihood)
Example #5
0
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5))
     self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1)))
     self.covar_module = InducingPointKernel(
         self.base_covar_module, inducing_points=torch.linspace(0, 1, 32), likelihood=likelihood
     )
def create_sgpr_kernel(d, ard=False, kernel_type='RBF', inducing_points=800, init_lengthscale_range=(1.0, 1.0),
                       X=None, likelihood=None):
    if ard:
        ard_num_dims = d
    else:
        ard_num_dims = None
    if kernel_type == 'RBF':
        kernel = gpytorch.kernels.RBFKernel(ard_num_dims=ard_num_dims)
    elif kernel_type == 'Matern':
        kernel = gpytorch.kernels.MaternKernel(nu=1.5, ard_num_dims=ard_num_dims)
    elif kernel_type == 'InverseMQ':
        kernel = InverseMQKernel(ard_num_dims=ard_num_dims)
    else:
        raise ValueError("Unknown kernel type")

    if ard:
        samples = ard_num_dims
    else:
        samples = 1
    kernel.initialize(lengthscale=_sample_from_range(samples, init_lengthscale_range))

    if X is None:
        raise ValueError("X is required")
    if likelihood is None:
        raise ValueError("Likelihood is required")
    kernel = InducingPointKernel(kernel, X[:inducing_points], likelihood)
    return kernel
Example #7
0
 def __init__(self, x_train, y_train, likelihood):
     super().__init__(x_train, y_train, likelihood)
     self.mean = ConstantMean()
     base_kernel = ScaleKernel(RBFKernel())
     # self.covariance = base_kernel
     # here we chose inducing points very randomly just based on the first five
     # samples of training data but it can be much better or smarter
     self.covariance = InducingPointKernel(base_kernel,
                                           inducing_points=x_train[:5],
                                           likelihood=likelihood)
    def __init__(self, train_x, train_y, likelihood, Z_init):

        super(BayesianSparseGPR_HMC, self).__init__(train_x, train_y,
                                                    likelihood)
        self.train_x = train_x
        self.train_y = train_y
        self.inducing_points = Z_init
        self.num_inducing = len(Z_init)
        self.mean_module = ZeroMean()
        self.base_covar_module = ScaleKernel(RBFKernel())
        self.covar_module = InducingPointKernel(self.base_covar_module,
                                                inducing_points=Z_init,
                                                likelihood=likelihood)
Example #9
0
 def __init__(self, train_inputs, train_targets, inducing_points,
              likelihood):
     super().__init__(train_inputs, train_targets, likelihood)
     if train_inputs.ndim == 2:
         dims = train_inputs.shape[1]
     else:
         dims = 1
     self.mean_module = gpytorch.means.ZeroMean()
     # self.base_covar_module = ScaleKernel(RBFKernel(ard_num_dims=dims))
     self.base_covar_module = ScaleKernel(
         MaternKernel(ard_num_dims=dims, nu=1.5))
     self.covar_module = InducingPointKernel(self.base_covar_module,
                                             inducing_points, likelihood)
    def __init__(self, train_x, train_y, likelihood):
        """Using InducingPointKernel in order to handle large data sets."""

        super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
        self.mean_module = gpytorch.means.ZeroMean()
        self.base_covar_module = ScaleKernel(RBFKernel())
        rank = 10
        X = train_x.numpy()
        induced_points = np.linspace(0, X.shape[0] - 1, num=rank, dtype=np.int)

        self.covar_module = InducingPointKernel(
            self.base_covar_module,
            inducing_points=train_x[induced_points, :],
            likelihood=likelihood)
Example #11
0
 def __init__(self, train_x, train_y, likelihood, Z_init):
     """The sparse GP class for regression with the collapsed bound.
        q*(u) is implicit.
     """
     super(SparseGPR, self).__init__(train_x, train_y, likelihood)
     self.train_x = train_x
     self.train_y = train_y
     self.inducing_points = Z_init
     self.num_inducing = len(Z_init)
     self.likelihood = likelihood
     self.mean_module = ZeroMean()
     self.base_covar_module = ScaleKernel(RBFKernel())
     self.covar_module = InducingPointKernel(self.base_covar_module,
                                             inducing_points=Z_init,
                                             likelihood=self.likelihood)
    def __init__(self,
                 train_x,
                 train_y,
                 number_of_inducing_points,
                 likelihood=gpytorch.likelihoods.GaussianLikelihood(),
                 rbf_var=5.,
                 rbf_lengthscale=10.,
                 lin_var=5):
        train_x = torch.tensor(train_x)
        train_y = torch.tensor(train_y)
        rand_select = random.sample(range(train_x.shape[0]),
                                    number_of_inducing_points)
        inducing_points = train_x[rand_select]

        super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
        self.likelihood = likelihood
        self.mean_module = ConstantMean()
        kernel = kernel_fun(rbf_var, rbf_lengthscale, lin_var)
        self.base_covar_module = kernel
        self.covar_module = InducingPointKernel(self.base_covar_module,
                                                inducing_points,
                                                likelihood=self.likelihood)
Example #13
0
    def __init__(self, inputs, targets, likelihood):

        # check the hardware
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')

        # store inputs and outputs
        self.inputs = torch.from_numpy(inputs).float().to(self.device)
        self.targets = torch.from_numpy(targets).float().to(self.device)

        # initialise GP and store likelihood
        ExactGP.__init__(self, self.inputs, self.targets, likelihood)
        self.likelihood = likelihood

        # mean and covariance
        self.mean = ConstantMean()
        # self.cov = GaussianSymmetrizedKLKernel()
        self.cov = MaternKernel(ard_num_dims=2)
        self.cov = ScaleKernel(self.cov, ard_num_dims=2)
        self.cov = InducingPointKernel(self.cov, self.inputs, self.likelihood)

        # you better have a GPU!
        self.likelihood.to(self.device).float()
        self.to(self.device).float()
Example #14
0
def gp_torch_train(train_x: Tensor,
                   train_y: Tensor,
                   n_inducing_points: int,
                   tkwargs: Dict[str, Any],
                   init,
                   scale: bool,
                   covar_name: str,
                   gp_file: Optional[str],
                   save_file: str,
                   input_wp: bool,
                   outcome_transform: Optional[OutcomeTransform] = None,
                   options: Dict[str, Any] = None) -> SingleTaskGP:
    assert train_y.ndim > 1, train_y.shape
    assert gp_file or init, (gp_file, init)
    likelihood = gpytorch.likelihoods.GaussianLikelihood()

    if init:
        # build hyp
        print("Initialize GP hparams...")
        print("Doing Kmeans init...")
        assert n_inducing_points > 0, n_inducing_points
        kmeans = MiniBatchKMeans(n_clusters=n_inducing_points,
                                 batch_size=min(10000, train_x.shape[0]),
                                 n_init=25)
        start_time = time.time()
        kmeans.fit(train_x.cpu().numpy())
        end_time = time.time()
        print(f"K means took {end_time - start_time:.1f}s to finish...")
        inducing_points = torch.from_numpy(kmeans.cluster_centers_.copy())

        output_scale = None
        if scale:
            output_scale = train_y.var().item()
        lscales = torch.empty(1, train_x.shape[1])
        for i in range(train_x.shape[1]):
            lscales[0, i] = torch.pdist(train_x[:, i].view(
                -1, 1)).median().clamp(min=0.01)
        base_covar_module = query_covar(covar_name=covar_name,
                                        scale=scale,
                                        outputscale=output_scale,
                                        lscales=lscales)

        covar_module = InducingPointKernel(base_covar_module,
                                           inducing_points=inducing_points,
                                           likelihood=likelihood)

        input_warp_tf = None
        if input_wp:
            # Apply input warping
            # initialize input_warping transformation
            input_warp_tf = CustomWarp(
                indices=list(range(train_x.shape[-1])),
                # use a prior with median at 1.
                # when a=1 and b=1, the Kumaraswamy CDF is the identity function
                concentration1_prior=LogNormalPrior(0.0, 0.75**0.5),
                concentration0_prior=LogNormalPrior(0.0, 0.75**0.5),
            )

        model = SingleTaskGP(train_x,
                             train_y,
                             covar_module=covar_module,
                             likelihood=likelihood,
                             input_transform=input_warp_tf,
                             outcome_transform=outcome_transform)
    else:
        # load model
        output_scale = 1  # will be overwritten when loading model
        lscales = torch.ones(
            train_x.shape[1])  # will be overwritten when loading model
        base_covar_module = query_covar(covar_name=covar_name,
                                        scale=scale,
                                        outputscale=output_scale,
                                        lscales=lscales)
        covar_module = InducingPointKernel(base_covar_module,
                                           inducing_points=torch.empty(
                                               n_inducing_points,
                                               train_x.shape[1]),
                                           likelihood=likelihood)

        input_warp_tf = None
        if input_wp:
            # Apply input warping
            # initialize input_warping transformation
            input_warp_tf = Warp(
                indices=list(range(train_x.shape[-1])),
                # use a prior with median at 1.
                # when a=1 and b=1, the Kumaraswamy CDF is the identity function
                concentration1_prior=LogNormalPrior(0.0, 0.75**0.5),
                concentration0_prior=LogNormalPrior(0.0, 0.75**0.5),
            )
        model = SingleTaskGP(train_x,
                             train_y,
                             covar_module=covar_module,
                             likelihood=likelihood,
                             input_transform=input_warp_tf,
                             outcome_transform=outcome_transform)
        print("Loading GP from file")
        state_dict = torch.load(gp_file)
        model.load_state_dict(state_dict)

    print("GP regression")
    start_time = time.time()
    model.to(**tkwargs)
    model.train()

    mll = ExactMarginalLogLikelihood(model.likelihood, model)
    # set approx_mll to False since we are using an exact marginal log likelihood
    # fit_gpytorch_model(mll, optimizer=fit_gpytorch_torch, approx_mll=False, options=options)
    fit_gpytorch_torch(mll,
                       options=options,
                       approx_mll=False,
                       clip_by_value=True if input_wp else False,
                       clip_value=10.0)
    end_time = time.time()
    print(f"Regression took {end_time - start_time:.1f}s to finish...")

    print("Save GP model...")
    torch.save(model.state_dict(), save_file)
    print("Done training of GP.")

    model.eval()
    return model