Esempio n. 1
0
    def test_gaussian_grad1conv_specific(self):
        ############################################################
        from pykeops.numpy.convolutions.radial_kernel import RadialKernelGrad1conv

        for k, t in itertools.product(
            ["gaussian", "laplacian", "cauchy", "inverse_multiquadric"],
            self.type_to_test,
        ):
            with self.subTest(k=k, t=t):
                # Call cuda kernel
                my_radial_conv = RadialKernelGrad1conv(t)
                gamma = my_radial_conv(
                    self.a.astype(t),
                    self.x.astype(t),
                    self.y.astype(t),
                    self.b.astype(t),
                    self.sigma.astype(t),
                    kernel=k,
                )

                # Numpy version
                tmp = differences(self.x, self.y) * grad_np_kernel(
                    self.x, self.y, self.sigma, kernel=k
                )
                gamma_py = 2 * (np.sum(self.a * (np.matmul(tmp, self.b)), axis=2)).T

                # compare output
                self.assertTrue(np.allclose(gamma, gamma_py, atol=1e-6))
Esempio n. 2
0
    def test_gaussian_grad1conv_specific(self):
        #--------------------------------------------------------------------------------------
        from pykeops.numpy.convolutions.radial_kernels_grad1 import radial_kernels_grad1conv
        for k in (["gaussian", "laplacian", "cauchy", "inverse_multiquadric"]):
            with self.subTest(k=k):
                # Call cuda kernel
                gamma = np.zeros(self.D * self.N).astype('float32')
                radial_kernels_grad1conv(
                    self.a,
                    self.x,
                    self.y,
                    self.b,
                    gamma,
                    self.sigma,
                    kernel=k)  # In place, gamma_i = k(x_i,y_j) @ beta_j

                # Numpy version
                A = differences(self.x, self.y) * grad_np_kernel(
                    self.x, self.y, self.sigma, kernel=k)
                gamma_py = 2 * (np.sum(self.a *
                                       (np.matmul(A, self.b)), axis=2)).T

                # compare output
                self.assertTrue(np.allclose(gamma, gamma_py.ravel(),
                                            atol=1e-6))
Esempio n. 3
0
    def test_grad1conv_kernels_feature(self):
        ############################################################
        import torch
        from pykeops.torch import Kernel, kernel_product

        params = {
            'gamma': 1. / self.sigmac**2,
            'mode': 'sum',
        }
        if pykeops.config.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'pytorch']
        else:
            backend_to_test = ['auto', 'pytorch']

        for k, b in itertools.product(
            ['gaussian', 'laplacian', 'cauchy', 'inverse_multiquadric'],
                backend_to_test):
            with self.subTest(k=k, b=b):
                params['id'] = Kernel(k + '(x,y)')
                params['backend'] = b

                # Call cuda kernel
                aKxy_b = torch.dot(
                    self.ac.view(-1),
                    kernel_product(params, self.xc, self.yc, self.bc).view(-1))
                gamma_keops = torch.autograd.grad(aKxy_b,
                                                  self.xc,
                                                  create_graph=False)[0].cpu()

                # Numpy version
                A = differences(self.x, self.y) * grad_np_kernel(
                    self.x, self.y, self.sigma, kernel=k)
                gamma_py = 2 * (np.sum(self.a *
                                       (np.matmul(A, self.b)), axis=2)).T

                # compare output
                self.assertTrue(
                    np.allclose(gamma_keops.cpu().data.numpy(),
                                gamma_py,
                                atol=1e-6))