コード例 #1
0
    def __init__(self,model_specs: list, init_Z: torch.tensor, N: float, likelihood : nn.Module, num_outputs: int, is_whiten: bool, Z_is_shared: bool, flow_specs: list,  be_fully_bayesian : bool ) -> None:
        """
                Args: 
                        :attr:  `model_specs`         (list)         :->: tuple (A,B) where A is a string representing the mean used and B is a kernel instance.
                                                                          This kernel instance should have batch_shape = number of outputs if K_is_shared = False,
                                                                          and batch_shape = 1 else. For the moment all the GPs at a layer shared the functional form of these.
                                `X`                   (torch.tensor) :->: Full training set (or subset) of samples used for the SVD 
                                `init_Z`              (torch.tensor) :->: initial inducing point locations
                                `N`                   (float)        :->: total training size	
                                `likelihood`          (nn.Module)    :->: Likelihood instance that will depend on the task to carry out
                                `num_outputs`         (int)          :->: number of output GP. The number of inputs is taken from the dimensionality of init_Z
                                `is_whiten`           (bool)         :->: use whitened representation of inducing points.
                                `Z_is_shared`         (bool)         :->: True if the inducing point locations are shared
                                `flow_specs`          (list)         :->: A list of list containing lists of strings (or flows instances) specifying the composition 
                                                                          and interconnection of flows per output dimension. The list contains num_output lists, specifying the flows per output GP.
                                `be_fully_bayesian`   (bool)         :->: If true, then input dependent flows are integrated with monte carlo dropout when possible


          # -> Some notes for deciding if whitenning or not: https://gpytorch.readthedocs.io/en/latest/variational.html#gpytorch.variational.UnwhitenedVariationalStrategy	
        """
        super(TGP, self).__init__()
        ## ==== Check assertions ==== ##
        assert len(model_specs) == 2, 'Parameter model_specs should be len 2. First position string with the mean and second position string with the kernels'

        ## ==== Config Variables ==== ##
        self.out_dim          = int(num_outputs)    # output dimension
        self.inp_dim          = int(init_Z.size(1)) # input dimension
        self.Z_is_shared      = Z_is_shared         # if the inducing points are shared 
        self.N                = float(N)            # training size
        self.M                = init_Z.size(0)      # number of inducing points
        self.likelihood       = likelihood
        
        self.fully_bayesian   = be_fully_bayesian

        ## ==== Tools ==== ##
        self.standard_sampler = td.MultivariateNormal(torch.zeros(1,).to(cg.device),torch.eye(1).to(cg.device))  # used in the reparameterization trick.

        if isinstance(self.likelihood, GaussianNonLinearMean):
            self.quad_points = self.likelihood.quad_points 
            self.quad        = GaussHermiteQuadrature1D(self.quad_points) # quadrature integrator. 
        else:
            self.quad_points = cg.quad_points
            self.quad        = GaussHermiteQuadrature1D(self.quad_points)
            

        ## ==== Set the Model ==== ##
        # Variational distribution
        self.initialize_inducing(init_Z)	
        self.initialize_variational_distribution(is_whiten)

        # Model distribution
        self.mean_function       = model_specs[0]
        self.covariance_function = model_specs[1]

        G_matrix                 =  self.initialize_flows(flow_specs)
        self.G_matrix            = G_matrix
コード例 #2
0
    def __init__(self):
        super(Bernoulli, self).__init__()

        self.C = 2
        self.quad_points = cg.quad_points
        self.quadrature_distribution = GaussHermiteQuadrature1D(
            self.quad_points)
        self.loss = nn.BCELoss(reduction='none')
        self.link_function = td.normal.Normal(0, 1).cdf
コード例 #3
0
 def __init__(self, inducing_points):
     variational_distribution = CholeskyVariationalDistribution(inducing_points.size(0))
     variational_strategy = VariationalStrategy(
         self, inducing_points, variational_distribution, learn_inducing_locations=True
     )
     super(GPClassificationModel, self).__init__(variational_strategy)
     self.mean_module = gpytorch.means.ZeroMean()
     self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
     
     self.quadrature = GaussHermiteQuadrature1D()
コード例 #4
0
    def __init__(self, out_dim: int, noise_init: float, noise_is_shared: bool,
                 flow: Flow, quad_points: int):
        super(WarpedGaussianLinearMean, self).__init__(out_dim, noise_init,
                                                       noise_is_shared)

        self.flow = nn.ModuleList([flow])
        self.quad_points = quad_points

        self.quad = GaussHermiteQuadrature1D(
            self.quad_points)  # quadrature integrator.
コード例 #5
0
    def __init__(self,out_dim : int, noise_init: float, noise_is_shared : bool, quadrature_points: int):
        super(GaussianNonLinearMean,self).__init__()

        self.out_dim = out_dim
        self.noise_is_shared = noise_is_shared

        if noise_is_shared: # if noise is shared create one parameter and expand to out_dim shape
            log_var_noise = nn.Parameter(torch.ones(1,1,dtype = cg.dtype)*inverse_positive_transform(torch.tensor(noise_init,dtype = cg.dtype)))

        else: # creates a vector of noise variance parameters.
            log_var_noise = nn.Parameter(torch.ones(out_dim,1,dtype = cg.dtype)*inverse_positive_transform(torch.tensor(noise_init,dtype = cg.dtype)))

        self.log_var_noise = log_var_noise

        self.quad_points = quadrature_points
        self.quadrature_distribution = GaussHermiteQuadrature1D(quadrature_points)
コード例 #6
0
ファイル: test_quadrature.py プロジェクト: xz6014/gpytorch
    def test_gauss_hermite_quadrature_1D_normal_batch(self, cuda=False):
        func = lambda x: torch.sin(x)

        means = torch.randn(3, 10)
        variances = torch.randn(3, 10).abs()
        quadrature = GaussHermiteQuadrature1D()

        if cuda:
            means = means.cuda()
            variances = variances.cuda()
            quadrature = quadrature.cuda()

        dist = torch.distributions.Normal(means, variances.sqrt())

        # Use quadrature
        results = quadrature(func, dist)

        # Use Monte-Carlo
        samples = dist.rsample(torch.Size([20000]))
        actual = func(samples).mean(0)

        self.assertLess(torch.mean(torch.abs(actual - results)), 0.1)
コード例 #7
0
 def __init__(self):
     super().__init__()
     self.quadrature = GaussHermiteQuadrature1D()
コード例 #8
0
 def __init__(self, flags):
     super().__init__()
     self.flags = flags
     # the following line cuts deep into the internals of GPyTorch and could break at any time
     self.quadrature = GaussHermiteQuadrature1D(num_locs=flags.num_samples)
コード例 #9
0
ファイル: _poisson_likelihood.py プロジェクト: yucho147/GP
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.quadrature = GaussHermiteQuadrature1D(num_locs=20)