コード例 #1
0
###############################################################################
# .. note::
#   This operator uses a conjugate gradient solver and assumes
#   that **formula** defines a **symmetric**, positive and definite
#   **linear** reduction with respect to the alias ``"b"``
#   specified trough the third argument.
#
# Apply our solver on arbitrary point clouds:
#

print(
    "Solving a Gaussian linear system, with {} points in dimension {}.".format(
        N, D))
start = time.time()
K_xx = keops.exp(-keops.sum((Vi(x) - Vj(x))**2, dim=2) / (2 * sigma**2))
cfun = keops.solve(K_xx, Vi(b), alpha=alpha, call=False)
c = cfun()
end = time.time()
print("Timing (KeOps implementation):", round(end - start, 5), "s")

###############################################################################
# Compare with a straightforward PyTorch implementation:
#

start = time.time()
K_xx = alpha * torch.eye(N) + torch.exp(-torch.sum(
    (x[:, None, :] - x[None, :, :])**2, dim=2) / (2 * sigma**2))
c_py = torch.solve(b, K_xx)[0]
end = time.time()
print("Timing (PyTorch implementation):", round(end - start, 5), "s")
コード例 #2
0
ファイル: plot_lazytensors_c.py プロジェクト: zeta1999/keops
Kxx = (-D2xx / sigma**2).exp()
res = LazyTensor.solve(Kxx, e_i, alpha=0.1)

#########################################################################
# Use of loops or vector operations for sums of kernels
# -----------------------------------------------------

#############################################################################
# Let us now perform again a kernel convolution, but replacing the gaussian
# kernel by a sum of 4 gaussian kernels with different sigma widths.
# This can be done as follows with a for loop:
sigmas = tensor([0.5, 1.0, 2.0, 4.0])
b_j = Vj(torch.rand(N, D).type(tensor))
Kxy = 0
for sigma in sigmas:
    Kxy += LazyTensor.exp(-D2xy / sigma**2)
gamma = (Kxy * b_j).sum_reduction(axis=1)

###############################################################################
# Note again that after the for loop, no actual computation has been performed.
# So we can actually build formulas with much more flexibility than with the
# use of Genred.
#
# Ok, this was just to showcase the use of a for loop,
# however in this case there is no need for a for loop, we can do simply:
Kxy = LazyTensor.exp(-D2xy / sigmas**2).sum()
gamma = (Kxy * b_j).sum_reduction(axis=1)

###############################################################################
# This is because all operations are broadcasted, so the ``/`` operation above
# works and corresponds to a ``./`` (scalar-vector element-wise division)