def __init__(self, train_inputs, train_targets, likelihood, batch_size=1):
     super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
     self.mean_module = MultitaskMean(
         ConstantMean(batch_size=batch_size, prior=gpytorch.priors.SmoothedBoxPrior(-1, 1)), num_tasks=2
     )
     if batch_size > 1:
         self.covar_module = MultitaskKernel(
             RBFKernel(
                 batch_size=batch_size,
                 lengthscale_prior=gpytorch.priors.NormalPrior(
                     loc=torch.zeros(batch_size, 1, 1), scale=torch.ones(batch_size, 1, 1)
                 ),
             ),
             num_tasks=2,
             rank=1,
         )
     else:
         self.covar_module = MultitaskKernel(
             RBFKernel(
                 lengthscale_prior=gpytorch.priors.NormalPrior(
                     loc=torch.zeros(batch_size, 1, 1), scale=torch.ones(batch_size, 1, 1)
                 ),
             ),
             num_tasks=2,
             rank=1,
         )
 def __init__(self, train_x, train_y, likelihood):
     super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = MultitaskMean(ConstantMean(), n_tasks=2)
     self.data_covar_module = RBFKernel()
     self.covar_module = MultitaskKernel(self.data_covar_module,
                                         n_tasks=2,
                                         rank=1)
 def __init__(self, train_x, train_y, likelihood):
     super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = MultitaskMean(ConstantMean(), num_tasks=2)
     self.data_covar_module = GridInterpolationKernel(RBFKernel(),
                                                      grid_size=100,
                                                      num_dims=1)
     self.covar_module = MultitaskKernel(self.data_covar_module,
                                         num_tasks=2,
                                         rank=1)
Exemple #4
0
 def __init__(self, train_inputs, train_targets, likelihood, batch_shape=torch.Size()):
     super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
     self.mean_module = MultitaskMean(
         ConstantMean(batch_shape=batch_shape, prior=gpytorch.priors.SmoothedBoxPrior(-1, 1)), num_tasks=2
     )
     self.covar_module = MultitaskKernel(
         RBFKernel(
             batch_shape=batch_shape,
             lengthscale_prior=gpytorch.priors.NormalPrior(loc=torch.tensor(0.0), scale=torch.tensor(1.0)),
         ),
         num_tasks=2,
         rank=1,
     )
Exemple #5
0
    def __init__(self, x: torch.Tensor, xe: torch.Tensor, y: torch.Tensor,
                 lik: GaussianLikelihood, **conf):
        super().__init__((x, xe), y.squeeze(), lik)
        mean = conf.get('mean', ConstantMean())
        kern = conf.get(
            'kern',
            ScaleKernel(MaternKernel(nu=1.5, ard_num_dims=x.shape[1]),
                        outputscale_prior=GammaPrior(0.5, 0.5)))
        kern_emb = conf.get('kern_emb', MaternKernel(nu=2.5))

        self.multi_task = y.shape[1] > 1
        self.mean = mean if not self.multi_task else MultitaskMean(
            mean, num_tasks=y.shape[1])
        if x.shape[1] > 0:
            self.kern = kern if not self.multi_task else MultitaskKernel(
                kern, num_tasks=y.shape[1])
        if xe.shape[1] > 0:
            assert 'num_uniqs' in conf
            num_uniqs = conf['num_uniqs']
            emb_sizes = conf.get('emb_sizes', None)
            self.emb_trans = EmbTransform(num_uniqs, emb_sizes=emb_sizes)
            self.kern_emb = kern_emb if not self.multi_task else MultitaskKernel(
                kern_emb, num_tasks=y.shape[1])
Exemple #6
0
    def __init__(self, input_size, target_size, device='cpu'):
        if device == 'gpu' and torch.cuda.is_available():
            self.device = torch.device('cuda:0')
        else:
            self.device = torch.device('cpu')

        self.input_size = input_size
        self.target_size = target_size

        _likelihood = MultitaskGaussianLikelihood(num_tasks=self.target_size)
        super(MultiTaskGPRegressor, self).__init__(train_inputs=None,
                                                   train_targets=None,
                                                   likelihood=_likelihood)

        self.mean_module = MultitaskMean(ZeroMean(), num_tasks=self.target_size)
        self.covar_module = MultitaskKernel(RBFKernel(), num_tasks=self.target_size, rank=1)

        self.input_trans = None
        self.target_trans = None
Exemple #7
0
    def __init__(
        self,
        train_x,
        train_y,
        likelihood,
        rank,
        num_mixtures,
        X_scaler,
    ):
        super().__init__(train_x, train_y, likelihood)
        num_dims = train_x.shape[1]
        num_tasks = train_y.shape[1]

        self.mean_module = MultitaskMean(Square2DPolynomialMean(),
                                         num_tasks=num_tasks)

        xmax = X_scaler.inverse_transform(np.ones((1, 2)))[0]
        xmin = X_scaler.inverse_transform(np.zeros((1, 2)))[0]
        llcoeff = (xmax - xmin)[0] / (xmax - xmin)[1]

        lower_ll_mean = 0.1
        upper_ll_mean = 1e2
        lower_ll_scale = 0.1
        upper_ll_scale = 1e2
        lower_mean_constraint = np.array(
            [1. / upper_ll_mean, 1. / (upper_ll_mean * llcoeff)])
        upper_mean_constraint = np.array(
            [1. / lower_ll_mean, 1. / (lower_ll_mean * llcoeff)])
        lower_scale_constraint = np.array(
            [1. / upper_ll_scale, 1. / (upper_ll_scale * llcoeff)])
        upper_scale_constraint = np.array(
            [1. / lower_ll_scale, 1. / (lower_ll_scale * llcoeff)])

        lower_mean_constraint = tensor(
            lower_mean_constraint,
            dtype=torch.float32,
            device=DEVICE,
        )
        upper_mean_constraint = tensor(
            upper_mean_constraint,
            dtype=torch.float32,
            device=DEVICE,
        )
        lower_scale_constraint = tensor(
            lower_scale_constraint,
            dtype=torch.float32,
            device=DEVICE,
        )
        upper_scale_constraint = tensor(
            upper_scale_constraint,
            dtype=torch.float32,
            device=DEVICE,
        )

        covar_module = PatchedSpectralMixtureKernel(
            num_mixtures,
            ard_num_dims=num_dims,
            mixture_scales_constraint=Interval(
                lower_scale_constraint,
                upper_scale_constraint,
            ),
            mixture_means_constraint=Interval(
                lower_mean_constraint,
                upper_mean_constraint,
            ),
        )
        if IS_CUDA:
            covar_module = covar_module.cuda(device=DEVICE)

        covar_module.initialize_from_data(train_x, train_y)
        self.covar_module = MultitaskKernel(
            covar_module,
            num_tasks=num_tasks,
            rank=rank,
        )
        self.init_multitask_from_data(Y=train_y, rank=rank)
 def __init__(self, train_x, train_y, likelihood):
     super().__init__(train_x, train_y, likelihood)
     self.mean_module = MultitaskMean(ConstantMean(), num_tasks=2)
     self.covar_module = MultitaskKernel(RBFKernel(), num_tasks=2, rank=1)
 def __init__(self, train_x, train_y, likelihood, num_tasks=2):
     super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = MultitaskMean(ConstantMean(), num_tasks=num_tasks)
     self.covar_module = MultitaskKernel(MaternKernel(),
                                         num_tasks=num_tasks)