Пример #1
0
    def test_logSumExp_kernels_feature(self):
        ############################################################
        from pykeops.torch import Kernel, kernel_product

        params = {'gamma': 1. / self.sigmac**2, 'mode': 'lse'}
        if pykeops.config.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'pytorch']
        else:
            backend_to_test = ['auto', 'pytorch']

        for k, b in itertools.product(
            ['gaussian', 'laplacian', 'cauchy', 'inverse_multiquadric'],
                backend_to_test):
            with self.subTest(k=k, b=b):
                params['id'] = Kernel(k + '(x,y)')
                params['backend'] = b
                # Call cuda kernel
                gamma = kernel_product(params, self.xc, self.yc, self.gc).cpu()

                # Numpy version
                log_K = log_np_kernel(self.x, self.y, self.sigma, kernel=k)
                log_KP = log_K + self.g.T
                gamma_py = log_sum_exp(log_KP, axis=1)

                # compare output
                self.assertTrue(
                    np.allclose(gamma.data.numpy().ravel(),
                                gamma_py,
                                atol=1e-6))
Пример #2
0
    def test_generic_syntax_lse(self):
        ############################################################
        from pykeops.numpy import Genred
        aliases = ['p=Pm(0,1)', 'a=Vj(1,1)', 'x=Vi(2,3)', 'y=Vj(3,3)']
        formula = 'Square(p-a)*Exp(-SqNorm2(x-y))'

        if pykeops.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'GPU']
        else:
            backend_to_test = ['auto']

        for b, t in itertools.product(backend_to_test, self.type_to_test):
            with self.subTest(b=b, t=t):

                # Call cuda kernel
                myconv = Genred(formula,
                                aliases,
                                reduction_op='LogSumExp',
                                axis=1,
                                dtype=t)
                gamma_keops = myconv(self.sigma.astype(t),
                                     self.g.astype(t),
                                     self.x.astype(t),
                                     self.y.astype(t),
                                     backend=b)

                # Numpy version
                gamma_py = log_sum_exp(
                    (self.sigma - self.g.T)**2 *
                    np.exp(-squared_distances(self.x, self.y)),
                    axis=1)

                # compare output
                self.assertTrue(
                    np.allclose(gamma_keops.ravel(), gamma_py, atol=1e-6))
Пример #3
0
    def test_logSumExp_kernels_feature(self):
        ############################################################
        from pykeops.torch import Vi, Vj, Pm

        kernels = {
            "gaussian": lambda xc, yc, sigmac: (
                -Pm(1 / sigmac ** 2) * Vi(xc).sqdist(Vj(yc))
            ),
            "laplacian": lambda xc, yc, sigmac: (
                -(Pm(1 / sigmac ** 2) * Vi(xc).sqdist(Vj(yc))).sqrt()
            ),
            "cauchy": lambda xc, yc, sigmac: (
                1 + Pm(1 / sigmac ** 2) * Vi(xc).sqdist(Vj(yc))
            )
            .power(-1)
            .log(),
            "inverse_multiquadric": lambda xc, yc, sigmac: (
                1 + Pm(1 / sigmac ** 2) * Vi(xc).sqdist(Vj(yc))
            )
            .sqrt()
            .power(-1)
            .log(),
        }

        for k in ["gaussian", "laplacian", "cauchy", "inverse_multiquadric"]:
            with self.subTest(k=k):
                # Call cuda kernel
                gamma_lazy = kernels[k](self.xc, self.yc, self.sigmac)
                gamma_lazy = gamma_lazy.logsumexp(dim=1, weight=Vj(self.gc.exp())).cpu()
                # gamma = kernel_product(params, self.xc, self.yc, self.gc).cpu()

                # Numpy version
                log_K = log_np_kernel(self.x, self.y, self.sigma, kernel=k)
                log_KP = log_K + self.g.T
                gamma_py = log_sum_exp(log_KP, axis=1)

                # compare output
                self.assertTrue(
                    np.allclose(gamma_lazy.data.numpy().ravel(), gamma_py, atol=1e-6)
                )