コード例 #1
0
    def fit(self, X:LazyTensor):
        ''' 
        Args:   X = torch lazy tensor with features of shape 
                (1, n_samples, n_features)

        Returns: Fitted instance of the class
        '''

        # Basic checks: we have a lazy tensor and n_components isn't too large
        assert type(X) == LazyTensor, 'Input to fit(.) must be a LazyTensor.'
        assert X.shape[1] >= self.n_components, f'The application needs X.shape[1] >= n_components.'

        X = X.sum(dim=0) 
        # Number of samples
        n_samples = X.size(0)
        # Define basis
        rnd = check_random_state(self.random_state)
        inds = rnd.permutation(n_samples)
        basis_inds = inds[:self.n_components]
        basis = X[basis_inds]
        # Build smaller kernel
        basis_kernel = self._pairwise_kernels(basis, kernel = self.kernel)  
        # Get SVD
        U, S, V = torch.svd(basis_kernel)
        S = torch.maximum(S, torch.ones(S.size()) * 1e-12)
        self.normalization_ = torch.mm(U / np.sqrt(S), V.t())
        self.components_ = basis
        self.component_indices_ = inds
        
        return self
コード例 #2
0
 def K_approx(self, X:LazyTensor) -> LazyTensor:
     ''' Function to return Nystrom approximation to the kernel.
     
     Args:
         X[LazyTensor] = data used in fit(.) function.
     Returns
         K[LazyTensor] = Nystrom approximation to kernel'''
     
     X = X.sum(dim=0)
     K_nq = self._pairwise_kernels(X, self.components_, self.kernel)
     K_approx = K_nq @ self.normalization_ @ K_nq.t()
     return LazyTensor(K_approx[None,:,:])
コード例 #3
0
 def transform(self, X:LazyTensor) -> LazyTensor:
     ''' Applies transform on the data.
     
     Args:
         X [LazyTensor] = data to transform
     Returns
         X [LazyTensor] = data after transformation
     '''
     
     X = X.sum(dim=0)
     K_nq = self._pairwise_kernels(X, self.components_, self.kernel)
     return LazyTensor((K_nq @ self.normalization_.t())[None,:,:])
コード例 #4
0
###############################################################################
# .. note::
#   This operator uses a conjugate gradient solver and assumes
#   that **formula** defines a **symmetric**, positive and definite
#   **linear** reduction with respect to the alias ``"b"``
#   specified trough the third argument.
#
# Apply our solver on arbitrary point clouds:
#

print(
    "Solving a Gaussian linear system, with {} points in dimension {}.".format(
        N, D))
start = time.time()
K_xx = keops.exp(-keops.sum((Vi(x) - Vj(x))**2, dim=2) / (2 * sigma**2))
cfun = keops.solve(K_xx, Vi(b), alpha=alpha, call=False)
c = cfun()
end = time.time()
print("Timing (KeOps implementation):", round(end - start, 5), "s")

###############################################################################
# Compare with a straightforward PyTorch implementation:
#

start = time.time()
K_xx = alpha * torch.eye(N) + torch.exp(-torch.sum(
    (x[:, None, :] - x[None, :, :])**2, dim=2) / (2 * sigma**2))
c_py = torch.solve(b, K_xx)[0]
end = time.time()
print("Timing (PyTorch implementation):", round(end - start, 5), "s")
コード例 #5
0
ファイル: plot_lazytensors_c.py プロジェクト: zeta1999/keops
###############################################################################
# Scalar product, absolute value, power operator, and a SoftMax type reduction:
res = (abs(x_i | y_j)**1.5).sumsoftmaxweight(x_i, axis=1)

########################################################################
# The ``[]`` operator can be used to do element selection or slicing
# (Elem or Extract operation in KeOps).
res = (x_i[:2] * y_j[2:] - x_i[2:] * y_j[:2]).sqnorm2().sum(axis=1)

########################################################################
# Kernel inversion : let's do a gaussian kernel inversion. Note that
# we have to use both :func:`Vi <pykeops.torch.Vi>` and :func:`Vj <pykeops.torch.Vj>` helpers on the same tensor ``x`` here.
#
e_i = Vi(torch.rand(M, D).type(tensor))
x_j = Vj(x)
D2xx = LazyTensor.sum((x_i - x_j)**2)
sigma = 0.25
Kxx = (-D2xx / sigma**2).exp()
res = LazyTensor.solve(Kxx, e_i, alpha=0.1)

#########################################################################
# Use of loops or vector operations for sums of kernels
# -----------------------------------------------------

#############################################################################
# Let us now perform again a kernel convolution, but replacing the gaussian
# kernel by a sum of 4 gaussian kernels with different sigma widths.
# This can be done as follows with a for loop:
sigmas = tensor([0.5, 1.0, 2.0, 4.0])
b_j = Vj(torch.rand(N, D).type(tensor))
Kxy = 0
コード例 #6
0
ファイル: keops.py プロジェクト: isabella232/benchmark-3
 def sum(a, dim=-1):
     a_lazy = LazyTensor(a.unsqueeze(-1).unsqueeze(-1).contiguous())
     c = a_lazy.sum(-1).logsumexp(a.dim() - 1).squeeze(-1).squeeze(-1)
     return c