Beispiel #1
0
    def test_gaussian_conv_specific(self):
        ############################################################
        from pykeops.numpy.convolutions.radial_kernel import RadialKernelConv

        for k, t in itertools.product(
            ["gaussian", "laplacian", "cauchy", "inverse_multiquadric"],
            self.type_to_test,
        ):
            with self.subTest(k=k):
                # Call cuda kernel
                my_radial_conv = RadialKernelConv(t)
                gamma = my_radial_conv(
                    self.x.astype(t),
                    self.y.astype(t),
                    self.b.astype(t),
                    self.sigma.astype(t),
                    kernel=k,
                )

                # Numpy version
                gamma_py = np.matmul(
                    np_kernel(self.x, self.y, self.sigma, kernel=k), self.b
                )

                # compare output
                self.assertTrue(np.allclose(gamma, gamma_py, atol=1e-6))
Beispiel #2
0
    def test_conv_kernels_feature(self):
        ############################################################
        from pykeops.torch.kernel_product.kernels import Kernel, kernel_product
        params = {
            'gamma': 1. / self.sigmac**2,
            'mode': 'sum',
        }
        if pykeops.config.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'pytorch']
        else:
            backend_to_test = ['auto', 'pytorch']

        for k, b in itertools.product(
            ['gaussian', 'laplacian', 'cauchy', 'inverse_multiquadric'],
                backend_to_test):
            with self.subTest(k=k, b=b):
                params['id'] = Kernel(k + '(x,y)')
                params['backend'] = b
                # Call cuda kernel
                gamma = kernel_product(params, self.xc, self.yc, self.bc).cpu()

                # Numpy version
                gamma_py = np.matmul(
                    np_kernel(self.x, self.y, self.sigma, kernel=k), self.b)

                # compare output
                self.assertTrue(np.allclose(gamma.cpu().data.numpy(),
                                            gamma_py))
    def test_conv_kernels_feature(self):
        #--------------------------------------------------------------------------------------
        from pykeops.torch.kernels import Kernel, kernel_product
        params = {
            "gamma": 1. / self.sigmac**2,
            "mode": 'sum',
        }
        if gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'pytorch']
        else:
            backend_to_test = ['auto', 'pytorch']

        for k, b in itertools.product(
            ["gaussian", "laplacian", "cauchy", "inverse_multiquadric"],
                backend_to_test):
            with self.subTest(k=k, b=b):
                params["id"] = Kernel(k + "(x,y)")
                params["backend"] = b
                # Call cuda kernel
                gamma = kernel_product(params, self.xc, self.yc, self.bc).cpu()

                # Numpy version
                gamma_py = np.matmul(
                    np_kernel(self.x, self.y, self.sigma, kernel=k), self.b)

                # compare output
                self.assertTrue(np.allclose(gamma.data.numpy(), gamma_py))
Beispiel #4
0
    def test_fshape_scp_specific(self):
        ############################################################
        from pykeops.numpy.shape_distance import FshapeScp

        for k, t in itertools.product(["gaussian", "cauchy"], self.type_to_test):
            # Call cuda kernel
            kgeom = k
            ksig = "gaussian"
            ksphere = "gaussian_oriented"

            sigma_geom = 1.0
            sigma_sig = 1.0
            sigma_sphere = np.pi / 2
            # Call cuda kernel
            my_fshape_scp = FshapeScp(
                kernel_geom=kgeom, kernel_sig=ksig, kernel_sphere=ksphere, dtype=t
            )
            gamma = my_fshape_scp(
                self.x.astype(t),
                self.y.astype(t),
                self.f.astype(t),
                self.g.astype(t),
                self.a.astype(t),
                self.b.astype(t),
                sigma_geom=sigma_geom,
                sigma_sig=sigma_sig,
                sigma_sphere=sigma_sphere,
            ).ravel()

            # Python version
            areaa = np.linalg.norm(self.a, axis=1)
            areab = np.linalg.norm(self.b, axis=1)

            nalpha = self.a / areaa[:, np.newaxis]
            nbeta = self.b / areab[:, np.newaxis]

            gamma_py = np.sum(
                (areaa[:, np.newaxis] * areab[np.newaxis, :])
                * np_kernel(self.x, self.y, sigma_geom, kgeom)
                * np_kernel(self.f, self.g, sigma_sig, ksig)
                * np_kernel_sphere(nalpha, nbeta, sigma_sphere, ksphere),
                axis=1,
            )

            # compare output
            self.assertTrue(np.allclose(gamma, gamma_py, atol=1e-6))
Beispiel #5
0
    def test_gaussian_conv_specific(self):
        #--------------------------------------------------------------------------------------
        from pykeops.numpy.convolutions.radial_kernels import radial_kernels_conv
        for k in (["gaussian", "laplacian", "cauchy", "inverse_multiquadric"]):
            with self.subTest(k=k):
                # Call cuda kernel
                gamma = np.zeros(self.E * self.N).astype('float32')
                radial_kernels_conv(self.x,
                                    self.y,
                                    self.b,
                                    gamma,
                                    self.sigma,
                                    kernel=k)

                # Numpy version
                gamma_py = np.matmul(
                    np_kernel(self.x, self.y, self.sigma, kernel=k), self.b)

                # compare output
                self.assertTrue(np.allclose(gamma, gamma_py.ravel(),
                                            atol=1e-6))
Beispiel #6
0
# With four backends: Numpy, vanilla PyTorch, Generic KeOps reductions
# and a specific, handmade legacy CUDA code for kernel convolutions:
#

speed_numpy = {i: np.nan for i in kernel_to_test}
speed_pytorch = {i: np.nan for i in kernel_to_test}
speed_pykeops_specific = {i: np.nan for i in kernel_to_test}
speed_pykeops = {i: np.nan for i in kernel_to_test}

print('Timings for {}x{} convolutions:'.format(M, N))

for k in kernel_to_test:
    print('kernel: ' + k)

    # Pure numpy
    g_numpy = np.matmul(np_kernel(x, y, sigma, kernel=k), b)
    speed_numpy[k] = timeit.repeat(
        'gnumpy = np.matmul( np_kernel(x, y, sigma, kernel=k), b)',
        globals=globals(),
        repeat=5,
        number=1)
    print('Time for NumPy:               {:.4f}s'.format(
        np.median(speed_numpy[k])))

    # Vanilla pytorch (with cuda if available, and cpu otherwise)
    try:
        g_pytorch = torch_kernel(xc, yc, sigmac, kernel=k) @ bc
        torch.cuda.synchronize()
        speed_pytorch[k] = np.array(
            timeit.repeat(
                "torch_kernel(xc, yc, sigmac, kernel=k) @ bc; torch.cuda.synchronize()",
Beispiel #7
0
sigma_sig = 1.0
sigma_sphere = np.pi / 2
kgeom = 'gaussian'
ksig = 'gaussian'
ksphere = 'gaussian_oriented'
myconv = FshapeScp(kernel_geom=kgeom, kernel_sig=ksig, kernel_sphere=ksphere)
gamma = myconv(x,
               y,
               f,
               g,
               a,
               b,
               sigma_geom=sigma_geom,
               sigma_sig=sigma_sig,
               sigma_sphere=sigma_sphere).ravel()

areaa = np.linalg.norm(a, axis=1)
areab = np.linalg.norm(b, axis=1)

nalpha = a / areaa[:, np.newaxis]
nbeta = b / areab[:, np.newaxis]

gamma_py = np.sum(
    (areaa[:, np.newaxis] * areab[np.newaxis, :]) *
    np_kernel(x, y, sigma_geom, kgeom) * np_kernel(f, g, sigma_sig, ksig) *
    np_kernel_sphere(nalpha, nbeta, sigma_sphere, ksphere),
    axis=1)

# compare output
print(np.allclose(gamma, gamma_py, atol=1e-6))
Beispiel #8
0
##############################
# Benchmark
##############################

enable_GC = False  # Garbage collection?
GC = 'gc.enable();' if enable_GC else 'pass;'
LOOPS = 200
print("Times to compute ", LOOPS, " convolutions of size {}x{}:".format(N, M))
print("\n", end="")

for k in (["gaussian", "laplacian", "cauchy", "inverse_multiquadric"]):
    print(k, " kernel: -----------------------------------")

    # pure numpy
    gnumpy = np_kernel(x, y, sigma, kernel=k) @ b
    speed_numpy = timeit.Timer('gnumpy =  np_kernel(x,y,sigma,kernel=k) @ b',
                               GC,
                               globals=globals(),
                               timer=time.time).timeit(LOOPS)
    print("Time for Python:              {:.4f}s".format(speed_numpy))

    # keops + pytorch : generic tiled implementation (with cuda if available else uses cpu)
    try:
        # Define a kernel: Wrap it (and its parameters) into a JSON dict structure
        mode = "sum"
        kernel = Kernel(k + "(x,y)")
        params = {
            "id":
            kernel,
            "gamma":