Ejemplo n.º 1
0
    def test_logSumExp_kernels_feature(self):
        ############################################################
        from pykeops.torch import Kernel, kernel_product

        params = {'gamma': 1. / self.sigmac**2, 'mode': 'lse'}
        if pykeops.config.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'pytorch']
        else:
            backend_to_test = ['auto', 'pytorch']

        for k, b in itertools.product(
            ['gaussian', 'laplacian', 'cauchy', 'inverse_multiquadric'],
                backend_to_test):
            with self.subTest(k=k, b=b):
                params['id'] = Kernel(k + '(x,y)')
                params['backend'] = b
                # Call cuda kernel
                gamma = kernel_product(params, self.xc, self.yc, self.gc).cpu()

                # Numpy version
                log_K = log_np_kernel(self.x, self.y, self.sigma, kernel=k)
                log_KP = log_K + self.g.T
                gamma_py = log_sum_exp(log_KP, axis=1)

                # compare output
                self.assertTrue(
                    np.allclose(gamma.data.numpy().ravel(),
                                gamma_py,
                                atol=1e-6))
Ejemplo n.º 2
0
    def test_conv_kernels_feature(self):
        ############################################################
        from pykeops.torch.kernel_product.kernels import Kernel, kernel_product
        params = {
            'gamma': 1. / self.sigmac**2,
            'mode': 'sum',
        }
        if pykeops.config.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'pytorch']
        else:
            backend_to_test = ['auto', 'pytorch']

        for k, b in itertools.product(
            ['gaussian', 'laplacian', 'cauchy', 'inverse_multiquadric'],
                backend_to_test):
            with self.subTest(k=k, b=b):
                params['id'] = Kernel(k + '(x,y)')
                params['backend'] = b
                # Call cuda kernel
                gamma = kernel_product(params, self.xc, self.yc, self.bc).cpu()

                # Numpy version
                gamma_py = np.matmul(
                    np_kernel(self.x, self.y, self.sigma, kernel=k), self.b)

                # compare output
                self.assertTrue(np.allclose(gamma.cpu().data.numpy(),
                                            gamma_py))
Ejemplo n.º 3
0
    def test_grad1conv_kernels_feature(self):
        ############################################################
        import torch
        from pykeops.torch import Kernel, kernel_product

        params = {
            'gamma': 1. / self.sigmac**2,
            'mode': 'sum',
        }
        if pykeops.config.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'pytorch']
        else:
            backend_to_test = ['auto', 'pytorch']

        for k, b in itertools.product(
            ['gaussian', 'laplacian', 'cauchy', 'inverse_multiquadric'],
                backend_to_test):
            with self.subTest(k=k, b=b):
                params['id'] = Kernel(k + '(x,y)')
                params['backend'] = b

                # Call cuda kernel
                aKxy_b = torch.dot(
                    self.ac.view(-1),
                    kernel_product(params, self.xc, self.yc, self.bc).view(-1))
                gamma_keops = torch.autograd.grad(aKxy_b,
                                                  self.xc,
                                                  create_graph=False)[0].cpu()

                # Numpy version
                A = differences(self.x, self.y) * grad_np_kernel(
                    self.x, self.y, self.sigma, kernel=k)
                gamma_py = 2 * (np.sum(self.a *
                                       (np.matmul(A, self.b)), axis=2)).T

                # compare output
                self.assertTrue(
                    np.allclose(gamma_keops.cpu().data.numpy(),
                                gamma_py,
                                atol=1e-6))