Пример #1
0
    def test_gaussian_grad1conv_specific(self):
        #--------------------------------------------------------------------------------------
        from pykeops.numpy.convolutions.radial_kernels_grad1 import radial_kernels_grad1conv
        for k in (["gaussian", "laplacian", "cauchy", "inverse_multiquadric"]):
            with self.subTest(k=k):
                # Call cuda kernel
                gamma = np.zeros(self.D * self.N).astype('float32')
                radial_kernels_grad1conv(
                    self.a,
                    self.x,
                    self.y,
                    self.b,
                    gamma,
                    self.sigma,
                    kernel=k)  # In place, gamma_i = k(x_i,y_j) @ beta_j

                # Numpy version
                A = differences(self.x, self.y) * grad_np_kernel(
                    self.x, self.y, self.sigma, kernel=k)
                gamma_py = 2 * (np.sum(self.a *
                                       (np.matmul(A, self.b)), axis=2)).T

                # compare output
                self.assertTrue(np.allclose(gamma, gamma_py.ravel(),
                                            atol=1e-6))
Пример #2
0
    def test_gaussian_grad1conv_specific(self):
        ############################################################
        from pykeops.numpy.convolutions.radial_kernel import RadialKernelGrad1conv

        for k, t in itertools.product(
            ["gaussian", "laplacian", "cauchy", "inverse_multiquadric"],
            self.type_to_test,
        ):
            with self.subTest(k=k, t=t):
                # Call cuda kernel
                my_radial_conv = RadialKernelGrad1conv(t)
                gamma = my_radial_conv(
                    self.a.astype(t),
                    self.x.astype(t),
                    self.y.astype(t),
                    self.b.astype(t),
                    self.sigma.astype(t),
                    kernel=k,
                )

                # Numpy version
                tmp = differences(self.x, self.y) * grad_np_kernel(
                    self.x, self.y, self.sigma, kernel=k
                )
                gamma_py = 2 * (np.sum(self.a * (np.matmul(tmp, self.b)), axis=2)).T

                # compare output
                self.assertTrue(np.allclose(gamma, gamma_py, atol=1e-6))
Пример #3
0
    def test_grad1conv_kernels_feature(self):
        ############################################################
        import torch
        from pykeops.torch import Kernel, kernel_product

        params = {
            'gamma': 1. / self.sigmac**2,
            'mode': 'sum',
        }
        if pykeops.config.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'pytorch']
        else:
            backend_to_test = ['auto', 'pytorch']

        for k, b in itertools.product(
            ['gaussian', 'laplacian', 'cauchy', 'inverse_multiquadric'],
                backend_to_test):
            with self.subTest(k=k, b=b):
                params['id'] = Kernel(k + '(x,y)')
                params['backend'] = b

                # Call cuda kernel
                aKxy_b = torch.dot(
                    self.ac.view(-1),
                    kernel_product(params, self.xc, self.yc, self.bc).view(-1))
                gamma_keops = torch.autograd.grad(aKxy_b,
                                                  self.xc,
                                                  create_graph=False)[0].cpu()

                # Numpy version
                A = differences(self.x, self.y) * grad_np_kernel(
                    self.x, self.y, self.sigma, kernel=k)
                gamma_py = 2 * (np.sum(self.a *
                                       (np.matmul(A, self.b)), axis=2)).T

                # compare output
                self.assertTrue(
                    np.allclose(gamma_keops.cpu().data.numpy(),
                                gamma_py,
                                atol=1e-6))
# and a specific, handmade legacy CUDA code for kernel convolution gradients:
#

speed_numpy = {i: np.nan for i in kernel_to_test}
speed_pykeops = {i: np.nan for i in kernel_to_test}
speed_pytorch = {i: np.nan for i in kernel_to_test}
speed_pykeops_specific = {i: np.nan for i in kernel_to_test}

print("Timings for {}x{} convolution gradients:".format(M, N))

for k in kernel_to_test:
    print("kernel: " + k)

    # Pure numpy
    if use_numpy:
        gnumpy = chain_rules(a, x, y, grad_np_kernel(x, y, sigma, kernel=k), b)
        speed_numpy[k] = timeit.repeat(
            "gnumpy = chain_rules(a, x, y, grad_np_kernel(x, y, sigma, kernel=k), b)",
            globals=globals(),
            repeat=3,
            number=1,
        )
        print("Time for NumPy:               {:.4f}s".format(
            np.median(speed_numpy[k])))
    else:
        gnumpy = torch.zeros_like(xc).data.cpu().numpy()

    # Vanilla pytorch (with cuda if available, and cpu otherwise)
    if use_vanilla:
        try:
            aKxy_b = torch.dot(