示例#1
0
    def forward(self, x1, x2, batch_dims=None, **params):

        x1_ = torch.stack([x1[:, i] for i in self.psi_indices], dim=1)

        x2_ = torch.stack([x2[:, i] for i in self.psi_indices], dim=1)

        if batch_dims == (0, 2):
            print('batch bims here')

        prod = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 0:1].transpose(-1, -2))

        tone = prod * (self.u1)

        prod = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 1:2].transpose(-1, -2))

        ttwo = prod * (self.u2)

        diagone = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 1:2].transpose(-1, -2))

        diagtwo = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 0:1].transpose(-1, -2))

        tthree = (diagone + diagtwo) * ((self.rho - 1) * (self.u1)**.5 *
                                        (self.u2)**.5)

        random_effects = tone + ttwo + tthree

        final = random_effects * self.user_mat

        final = final + self.first_mat

        return final
示例#2
0
    def test_batch_get_indices(self):
        lhs = torch.randn(2, 5, 1)
        rhs = torch.randn(2, 1, 5)
        actual = lhs.matmul(rhs)
        res = MatmulLazyTensor(lhs, rhs)

        batch_indices = torch.tensor([0, 1, 0, 1], dtype=torch.long)
        left_indices = torch.tensor([1, 2, 4, 0], dtype=torch.long)
        right_indices = torch.tensor([0, 1, 3, 2], dtype=torch.long)

        self.assertTrue(
            approx_equal(
                actual[batch_indices, left_indices, right_indices],
                res._batch_get_indices(batch_indices, left_indices,
                                       right_indices),
            ))

        batch_indices = torch.tensor(
            [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
            dtype=torch.long)
        left_indices = torch.tensor(
            [1, 2, 4, 0, 1, 2, 3, 1, 2, 2, 1, 1, 0, 0, 4, 4, 4, 4],
            dtype=torch.long)
        right_indices = torch.tensor(
            [0, 1, 3, 2, 3, 4, 2, 2, 1, 1, 2, 1, 2, 4, 4, 3, 3, 0],
            dtype=torch.long)

        self.assertTrue(
            approx_equal(
                actual[batch_indices, left_indices, right_indices],
                res._batch_get_indices(batch_indices, left_indices,
                                       right_indices),
            ))
示例#3
0
    def test_batch_diag(self):
        lhs = torch.randn(4, 5, 3)
        rhs = torch.randn(4, 3, 5)
        actual = lhs.matmul(rhs)
        actual_diag = torch.cat([
            actual[0].diag().unsqueeze(0),
            actual[1].diag().unsqueeze(0),
            actual[2].diag().unsqueeze(0),
            actual[3].diag().unsqueeze(0),
        ])

        res = MatmulLazyTensor(lhs, rhs)
        self.assertTrue(approx_equal(actual_diag, res.diag()))
示例#4
0
    def forward(self, x1, x2):
        if x1.size() == x2.size() and torch.equal(x1, x2):
            # Use RootLazyTensor when x1 == x2 for efficiency when composing
            # with other kernels
            prod = RootLazyTensor(x1 - self.offset)
        else:
            prod = MatmulLazyTensor(x1 - self.offset,
                                    (x2 - self.offset).transpose(2, 1))

        return prod + self.variance.expand(prod.size())
 def _get_covariance(self, x1, x2):
     k_ux1 = self.base_kernel_module(x1, self.inducing_points).evaluate()
     if torch.equal(x1, x2):
         covar = RootLazyTensor(k_ux1.matmul(self._inducing_inv_root))
     else:
         k_ux2 = self.base_kernel_module(x2,
                                         self.inducing_points).evaluate()
         covar = MatmulLazyTensor(
             k_ux1.matmul(self._inducing_inv_root),
             k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2))
     return covar
示例#6
0
    def test_matmul(self):
        lhs = torch.randn(5, 3, requires_grad=True)
        rhs = torch.randn(3, 4, requires_grad=True)
        covar = MatmulLazyTensor(lhs, rhs)
        mat = torch.randn(4, 10)
        res = covar.matmul(mat)

        lhs_clone = lhs.clone().detach()
        rhs_clone = rhs.clone().detach()
        mat_clone = mat.clone().detach()
        lhs_clone.requires_grad = True
        rhs_clone.requires_grad = True
        mat_clone.requires_grad = True
        actual = lhs_clone.matmul(rhs_clone).matmul(mat_clone)

        self.assertTrue(approx_equal(res, actual))

        actual.sum().backward()

        res.sum().backward()
        self.assertTrue(approx_equal(lhs.grad, lhs_clone.grad))
        self.assertTrue(approx_equal(rhs.grad, rhs_clone.grad))
示例#7
0
    def _get_covariance(self, x1, x2):
        k_ux1 = delazify(self.base_kernel(x1, self.inducing_points))
        if torch.equal(x1, x2):
            covar = RootLazyTensor(k_ux1.matmul(self._inducing_inv_root))

            # Diagonal correction for predictive posterior
            correction = (self.base_kernel(x1, x2, diag=True) -
                          covar.diag()).clamp(0, math.inf)
            covar = PsdSumLazyTensor(covar, DiagLazyTensor(correction))
        else:
            k_ux2 = delazify(self.base_kernel(x2, self.inducing_points))
            covar = MatmulLazyTensor(
                k_ux1.matmul(self._inducing_inv_root),
                k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2))

        return covar
    def forward(self, x1, x2, batch_dims=None, **params):


        action_vector = torch.stack([torch.Tensor(x1)[:,i] for  i in [self.action_indices_one]],dim=1)\
+torch.stack([torch.Tensor(x1)[:,i] for  i in [self.action_indices_two]],dim=1)

        baseline_vector = torch.stack(
            [torch.Tensor(x1)[:, i] for i in [self.g_indices]], dim=1)

        fake_vector_one = torch.cat(
            (baseline_vector.squeeze(), action_vector.squeeze()), 1)

        action_vector = torch.stack([torch.Tensor(x2)[:,i] for  i in [self.action_indices_one]],dim=1)\
+torch.stack([torch.Tensor(x2)[:,i] for  i in [self.action_indices_two]],dim=1)
        #combine into new feature vector
        baseline_vector = torch.stack(
            [torch.Tensor(x2)[:, i] for i in [self.g_indices]], dim=1)
        fake_vector_two = torch.cat(
            (baseline_vector.squeeze(), action_vector.squeeze()), 1)
        #x1=[]
        #print(fake_vector_two)
        #x1_ = torch.stack([ fake_vector_one[:,i] for  i in self.psi_indices],dim=1)

        #x2_ =    torch.stack([fake_vector_two[:,i] for  i in self.psi_indices],dim=1)

        x1_ = fake_vector_one
        #torch.stack([ fake_vector_one[:,i] for  i in self.psi_indices],dim=1)

        x2_ = fake_vector_two
        #x1_ = torch.stack([x1[:,i] for  i in self.psi_indices],dim=1)

        #x2_ =    torch.stack([x2[:,i] for  i in self.psi_indices],dim=1)

        if batch_dims == (0, 2):
            print('batch bims here')

        prod = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 0:1].transpose(-1, -2))

        #.expand(1,100,100)
        tone = prod * (self.u1)

        prod = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 1:2].transpose(-1, -2))

        ttwo = prod * (self.u2)

        prod = MatmulLazyTensor(x1_[:, 2:3], x2_[:, 2:3].transpose(-1, -2))

        tthree = prod * (self.u3)

        prod = MatmulLazyTensor(x1_[:, 3:4], x2_[:, 3:4].transpose(-1, -2))

        tfour = prod * (self.u4)

        diagone = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 1:2].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 0:1].transpose(-1, -2))
        cov_12 = (diagone + diagtwo) * ((self.rho_12 - 1) * (self.u1)**.5 *
                                        (self.u2)**.5)

        diagone = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 2:3].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 2:3], x2_[:, 0:1].transpose(-1, -2))
        cov_13 = (diagone + diagtwo) * ((self.rho_13 - 1) * (self.u1)**.5 *
                                        (self.u3)**.5)

        diagone = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 3:4].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 3:4], x2_[:, 0:1].transpose(-1, -2))
        cov_14 = (diagone + diagtwo) * ((self.rho_14 - 1) * (self.u1)**.5 *
                                        (self.u4)**.5)

        diagone = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 2:3].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 2:3], x2_[:, 1:2].transpose(-1, -2))
        cov_23 = (diagone + diagtwo) * ((self.rho_23 - 1) * (self.u2)**.5 *
                                        (self.u3)**.5)

        diagone = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 3:4].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 3:4], x2_[:, 1:2].transpose(-1, -2))
        cov_24 = (diagone + diagtwo) * ((self.rho_24 - 1) * (self.u2)**.5 *
                                        (self.u4)**.5)

        diagone = MatmulLazyTensor(x1_[:, 2:3], x2_[:, 3:4].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 3:4], x2_[:, 2:3].transpose(-1, -2))
        cov_34 = (diagone + diagtwo) * ((self.rho_34 - 1) * (self.u3)**.5 *
                                        (self.u4)**.5)

        random_effects = tone + ttwo + tthree + tfour + cov_12 + cov_13 + cov_14 + cov_23 + cov_24 + cov_34

        #print(random_effects.evaluate())

        #print(random_effects)

        #print(random_effects.size())
        #print(self.user_mat.size())
        final = random_effects * self.user_mat

        #print(final.evaluate())
        #noise_term = (self.noise**2)*self.noise_mat
        #print(type(noise_term))
        #print(noise_term)
        #prod = MatmulLazyTensor(x1_, x2_.transpose(-1, -2))
        #prod = MatmulLazyTensor(prod,noise_term)
        #prod = prod*self.user_mat

        #final  = final + noise_term

        #final = torch.stack((tone,ttwo,tone,ttwo),dim=0)
        #print('one')
        #print(random_effects.evaluate())
        #print('two')
        #print(final.evaluate())
        #print(MatmulLazyTensor(random_effects,2*torch.eye(100)).evaluate())

        #n = self.first_mat
        #+noise_term

        final = final + self.first_mat
        #print(final.evaluate())
        return final
示例#9
0
 def test_diag(self):
     lhs = torch.randn(5, 3)
     rhs = torch.randn(3, 5)
     actual = lhs.matmul(rhs)
     res = MatmulLazyTensor(lhs, rhs)
     self.assertTrue(approx_equal(actual.diag(), res.diag()))
示例#10
0
 def test_transpose(self):
     lhs = torch.randn(5, 3)
     rhs = torch.randn(3, 5)
     actual = lhs.matmul(rhs)
     res = MatmulLazyTensor(lhs, rhs)
     self.assertTrue(approx_equal(actual.t(), res.t().evaluate()))
    def forward(self, x1, x2, batch_dims=None, **params):
        action_vector = torch.stack([torch.Tensor(x1)[:,i] for  i in [self.action_indices_one]],dim=1)\
+torch.stack([torch.Tensor(x1)[:,i] for  i in [self.action_indices_two]],dim=1)
#combine into new feature vector
#print('one')
        baseline_vector =torch.stack([torch.Tensor(x1)[:,i] for  i in [self.g_indices]],dim=1)
        #print('two')
        #print(baseline_vector.size())
        #print(action_vector.size())
        fake_vector_one = torch.cat((baseline_vector.squeeze(),action_vector.squeeze()),1)
#x1=[]  print()
#print('three')
        action_vector = torch.stack([torch.Tensor(x2)[:,i] for  i in [self.action_indices_one]],dim=1)\
+torch.stack([torch.Tensor(x2)[:,i] for  i in [self.action_indices_two]],dim=1)
    #combine into new feature vector
        baseline_vector =torch.stack([torch.Tensor(x2)[:,i] for  i in [self.g_indices]],dim=1)
        fake_vector_two = torch.cat((baseline_vector.squeeze(),action_vector.squeeze()),1)
    #x1=[]
    

    #fake_vector_one[:,i]
        x1_ = torch.stack([ fake_vector_one[:,i] for  i in self.psi_indices],dim=1)
        
        x2_ =    torch.stack([fake_vector_two[:,i] for  i in self.psi_indices],dim=1)
        #print(x1_.shape)
        #print(x2_.shape)
        #print(x2_[:4,:])
        if batch_dims == (0, 2):
            print('batch bims here')
        
        prod = MatmulLazyTensor(x1_[:,0:1], x2_[:,0:1].transpose(-1, -2))
        
        
        
        tone = prod * (self.u1)
        #print('here 1')
        
        prod = MatmulLazyTensor(x1_[:,1:2], x2_[:,1:2].transpose(-1, -2))
        
        ttwo = prod * (self.u2)
        #print('here 2')
        
        diagone = MatmulLazyTensor(x1_[:,0:1], x2_[:,1:2].transpose(-1, -2))
        
        
        diagtwo = MatmulLazyTensor(x1_[:,1:2], x2_[:,0:1].transpose(-1, -2))
        
        tthree = (diagone+diagtwo)*((self.rho-1)*(self.u1)**.5*(self.u2)**.5)
        #print('here 3')
        
        
        random_effects = tone+ttwo+tthree
        #print(random_effects)
        
        final = random_effects*self.user_mat
        #temp = prod*self.test
        #temp = temp*self.user_mat
        #print('here 4')
        
        
        final = final+self.first_mat
        
        return final
示例#12
0
    def forward(self, x1, x2, batch_dims=None, **params):

        #us = torch.cat([self.u1, self.u2], 0) # us is a vector of size 2
        #print(x1[0,:,0:2].size())
        # print(x1.size())
        #print(us.size())
        #x1_ =torch.stack((x1[:,self.psi_dim_one],x1[:,self.psi_dim_two]),dim=1)
        x1_ = torch.stack([x1[:, i] for i in self.psi_indices], dim=1)
        #x1_ =    torch.stack((x1[:,self.psi_dim_one],x1[:,self.psi_dim_two]),dim=1)
        #x2_ =torch.stack((x2[:,self.psi_dim_one],x2[:,self.psi_dim_two]),dim=1)
        x2_ = torch.stack([x2[:, i] for i in self.psi_indices], dim=1)
        #print(x1_)
        #print(x2_)
        #u2_= self.u2
        #u1_ =self.u1
        #print(self.u1)
        #print(x1_)
        #print(x2_)
        if batch_dims == (0, 2):
            print('batch bims here')
        #pass
        #print(x1_.size())

        #x1_ = x1_.view(x1_.size(0), x1_.size(1), -1, 1)
        #x1_ = x1_.permute(0, 2, 1, 3).contiguous()
        #x1_ = x1_.view(-1, x1_.size(-2), x1_.size(-1))

        #x2_ = x2_.view(x2_.size(0), x2_.size(1), -1, 1)
        #x2_ = x2_.permute(0, 2, 1, 3).contiguous()
        #x2_ = x2_.view(-1, x2_.size(-2), x2_.size(-1))
        #print(x1_.size())
        #print(x2_.size())
        #prod = MatmulLazyTensor(x1_, x2_.transpose(1, 0))

        prod = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 0:1].transpose(-1, -2))

        #.expand(1,100,100)
        tone = prod * (self.u1)

        prod = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 1:2].transpose(-1, -2))

        ttwo = prod * (self.u2)

        diagone = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 1:2].transpose(-1, -2))

        diagtwo = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 0:1].transpose(-1, -2))

        tthree = (diagone + diagtwo) * ((self.rho - 1) * (self.u1)**.5 *
                                        (self.u2)**.5)

        random_effects = tone + ttwo + tthree

        #print(random_effects.evaluate())

        #print(random_effects)

        #print(random_effects.size())
        #print(self.user_mat.size())
        final = random_effects * self.user_mat

        #print(final.evaluate())
        #noise_term = (self.noise**2)*self.noise_mat
        #print(type(noise_term))
        #print(noise_term)
        #prod = MatmulLazyTensor(x1_, x2_.transpose(-1, -2))
        #prod = MatmulLazyTensor(prod,noise_term)
        #prod = prod*self.user_mat

        #final  = final + noise_term

        #final = torch.stack((tone,ttwo,tone,ttwo),dim=0)
        #print('one')
        #print(random_effects.evaluate())
        #print('two')
        #print(final.evaluate())
        #print(MatmulLazyTensor(random_effects,2*torch.eye(100)).evaluate())

        #n = self.first_mat
        #+noise_term

        final = final + self.first_mat
        #print(final.evaluate())
        return final
lambda0 = 0.5

n_cells_1d = 50
forward_cutoff = 400  # Only make 200 observations (Fourier and pointwise).
my_problem = ToyFourier2d.build_problem(n_cells_1d, forward_cutoff)
updatable_gp = UpdatableGP(kernel,
                           lambda0,
                           sigma0,
                           m0,
                           torch.tensor(my_problem.grid.cells).float(),
                           n_chunks=200)

lazy_cov = UpdatableCovLazyTensor(updatable_gp.covariance)

# Test getitem.
lazy_cov[0:10, 0:10].evaluate()

# Test pivoted Cholesky decomposition.
from gpytorch.utils.pivoted_cholesky import pivoted_cholesky

res = pivoted_cholesky(lazy_cov, max_iter=300, error_tol=0.01)
preconditioner = MatmulLazyTensor(res, res.t())

# Now test conjugate gradient inversion.
rhs = torch.rand((lazy_cov.n, 1))
ans = linear_cg(lazy_cov.matmul,
                rhs,
                tolerance=0.1,
                max_iter=400,
                preconditioner=preconditioner.matmul)
示例#14
0
 def _getitem(self, row_index, col_index, *batch_indices):
     col_indexer = DiagLazyTensor(torch.ones(self.n))[:, col_index]
     row_indexer = DiagLazyTensor(torch.ones(self.n))[:, row_index]
     res = MatmulLazyTensor(
         MatmulLazyTensor(self, row_indexer).t(), col_indexer)
     return res
    def forward(self, x1, x2, batch_dims=None, **params):
        #action_vector = torch.stack([torch.Tensor(x1)[:,i] for  i in [self.action_indices_one]],dim=1)\
        #+torch.stack([torch.Tensor(x1)[:,i] for  i in [self.action_indices_two]],dim=1)

        action_vector = torch.stack(
            [torch.Tensor(x1)[:, i] for i in [self.action_indices_one]], dim=1)

        baseline_vector = torch.stack(
            [torch.Tensor(x1)[:, i] for i in [self.g_indices]], dim=1)
        #print(action_vector)
        fake_vector_one = torch.cat(
            (baseline_vector.squeeze(), action_vector.squeeze()), 1)

        #action_vector = torch.stack([torch.Tensor(x2)[:,i] for  i in [self.action_indices_one]],dim=1)\
        #+torch.stack([torch.Tensor(x2)[:,i] for  i in [self.action_indices_two]],dim=1)
        action_vector = torch.stack(
            [torch.Tensor(x2)[:, i] for i in [self.action_indices_one]], dim=1)

        baseline_vector = torch.stack(
            [torch.Tensor(x2)[:, i] for i in [self.g_indices]], dim=1)
        fake_vector_two = torch.cat(
            (baseline_vector.squeeze(), action_vector.squeeze()), 1)
        #x1=[]

        #fake_vector_one[:,i]
        x1_ = torch.stack([fake_vector_one[:, i] for i in self.psi_indices],
                          dim=1)

        x2_ = torch.stack([fake_vector_two[:, i] for i in self.psi_indices],
                          dim=1)

        #print(x1_)

        if batch_dims == (0, 2):
            print('batch bims here')

        prod = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 0:1].transpose(-1, -2))

        tone = prod * (self.u1)
        #print('here 1')

        prod = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 1:2].transpose(-1, -2))

        ttwo = prod * (self.u2)
        #print('here 2')

        diagone = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 1:2].transpose(-1, -2))

        diagtwo = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 0:1].transpose(-1, -2))

        tthree = (diagone + diagtwo) * ((self.rho - 1) * (self.u1)**.5 *
                                        (self.u2)**.5)
        #print('here 3')

        random_effects = tone + ttwo + tthree
        #print(random_effects)

        final = random_effects * self.user_mat

        prod = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 0:1].transpose(-1, -2))
        ttimeone = prod * (self.s1)

        prod = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 1:2].transpose(-1, -2))

        ttimetwo = prod * (self.s3)

        time_effects = ttimeone + ttimetwo
        time_effects = time_effects * self.time_mat
        final = final + time_effects

        final = final + self.first_mat

        return final
示例#16
0
 def create_lazy_tensor(self):
     lhs = torch.randn(5, 5, 6, requires_grad=True)
     rhs = lhs.clone().detach().transpose(-1, -2)
     covar = MatmulLazyTensor(lhs, rhs)
     return covar
示例#17
0
 def create_lazy_tensor(self):
     lhs = torch.randn(3, 5, 3, requires_grad=True)
     rhs = torch.randn(3, 3, 6, requires_grad=True)
     covar = MatmulLazyTensor(lhs, rhs)
     return covar
    def forward(self, x1, x2, batch_dims=None, **params):

        #x1_ = torch.stack([x1[:,i] for  i in self.psi_indices],dim=1)
        #x2_ =    torch.stack([x2[:,i] for  i in self.psi_indices],dim=1)


        action_vector = torch.stack([torch.Tensor(x1)[:,i] for  i in [self.action_indices_one]],dim=1)\
+torch.stack([torch.Tensor(x1)[:,i] for  i in [self.action_indices_two]],dim=1)
        #combine into new feature vector
        #print('one')
        print(x1.size())
        print('got action vector')
        #print(action_vector)
        baseline_vector = torch.stack(
            [torch.Tensor(x1)[:, i] for i in [self.g_indices]], dim=1)
        #print('two')
        #print(baseline_vector.size())
        #print(action_vector.size())
        fake_vector_one = torch.cat(
            (baseline_vector.squeeze(), action_vector.squeeze()), 1)
        #x1=[]  print()
        #print('three')
        action_vector = torch.stack([torch.Tensor(x2)[:,i] for  i in [self.action_indices_one]],dim=1)\
+torch.stack([torch.Tensor(x2)[:,i] for  i in [self.action_indices_two]],dim=1)
        #combine into new feature vector
        baseline_vector = torch.stack(
            [torch.Tensor(x2)[:, i] for i in [self.g_indices]], dim=1)
        fake_vector_two = torch.cat(
            (baseline_vector.squeeze(), action_vector.squeeze()), 1)
        #x1=[]
        #print(fake_vector_two)

        #fake_vector_one[:,i]
        print(self.psi_indices)
        print(fake_vector_one.size())
        print(fake_vector_two.size())
        x1_ = fake_vector_one
        #torch.stack([ fake_vector_one[:,i] for  i in self.psi_indices],dim=1)

        x2_ = fake_vector_two
        #torch.stack([fake_vector_two[:,i] for  i in self.psi_indices],dim=1)
        print('here')

        if batch_dims == (0, 2):
            print('batch bims here')
        #pass
        #print(x1_.size())

        #x1_ = x1_.view(x1_.size(0), x1_.size(1), -1, 1)
        #x1_ = x1_.permute(0, 2, 1, 3).contiguous()
        #x1_ = x1_.view(-1, x1_.size(-2), x1_.size(-1))

        #x2_ = x2_.view(x2_.size(0), x2_.size(1), -1, 1)
        #x2_ = x2_.permute(0, 2, 1, 3).contiguous()
        #x2_ = x2_.view(-1, x2_.size(-2), x2_.size(-1))
        #print(x1_.size())
        #print(x2_.size())
        #prod = MatmulLazyTensor(x1_, x2_.transpose(1, 0))

        prod = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 0:1].transpose(-1, -2))

        #.expand(1,100,100)
        tone = prod * (self.u1)

        prod = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 1:2].transpose(-1, -2))

        ttwo = prod * (self.u2)

        prod = MatmulLazyTensor(x1_[:, 2:3], x2_[:, 2:3].transpose(-1, -2))

        tthree = prod * (self.u3)

        prod = MatmulLazyTensor(x1_[:, 3:4], x2_[:, 3:4].transpose(-1, -2))

        tfour = prod * (self.u4)

        diagone = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 1:2].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 0:1].transpose(-1, -2))
        cov_12 = (diagone + diagtwo) * ((self.rho_12 - 1) * (self.u1)**.5 *
                                        (self.u2)**.5)

        diagone = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 2:3].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 2:3], x2_[:, 0:1].transpose(-1, -2))
        cov_13 = (diagone + diagtwo) * ((self.rho_13 - 1) * (self.u1)**.5 *
                                        (self.u3)**.5)

        diagone = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 3:4].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 3:4], x2_[:, 0:1].transpose(-1, -2))
        cov_14 = (diagone + diagtwo) * ((self.rho_14 - 1) * (self.u1)**.5 *
                                        (self.u4)**.5)

        diagone = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 2:3].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 2:3], x2_[:, 1:2].transpose(-1, -2))
        cov_23 = (diagone + diagtwo) * ((self.rho_23 - 1) * (self.u2)**.5 *
                                        (self.u3)**.5)

        diagone = MatmulLazyTensor(x1_[:, 1:2], x2_[:, 3:4].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 3:4], x2_[:, 1:2].transpose(-1, -2))
        cov_24 = (diagone + diagtwo) * ((self.rho_24 - 1) * (self.u2)**.5 *
                                        (self.u4)**.5)

        diagone = MatmulLazyTensor(x1_[:, 2:3], x2_[:, 3:4].transpose(-1, -2))
        diagtwo = MatmulLazyTensor(x1_[:, 3:4], x2_[:, 2:3].transpose(-1, -2))
        cov_34 = (diagone + diagtwo) * ((self.rho_34 - 1) * (self.u3)**.5 *
                                        (self.u4)**.5)

        random_effects = tone + ttwo + tthree + tfour + cov_12 + cov_13 + cov_14 + cov_23 + cov_24 + cov_34

        #print(random_effects.evaluate())

        #print(random_effects)

        #print(random_effects.size())
        #print(self.user_mat.size())
        final = random_effects * self.user_mat

        prod = MatmulLazyTensor(x1_[:, 0:1], x2_[:, 0:1].transpose(-1, -2))

        ttimeone = prod * (self.s1**2)

        prod = MatmulLazyTensor(x1_[:, 2:3], x2_[:, 2:3].transpose(-1, -2))

        ttimetwo = prod * (self.s3**2)

        time_effects = ttimeone + ttimetwo
        time_effects = time_effects * self.time_mat
        final = final + time_effects

        final = final + self.first_mat
        #print(final.evaluate())
        return final