Esempio n. 1
0
    def test_non_contiguity(self):
        ############################################################
        from pykeops.numpy import Genred

        t = self.type_to_test[0]

        aliases = ["p=Pm(0,1)", "a=Vj(1,1)", "x=Vi(2,3)", "y=Vj(3,3)"]
        formula = "Square(p-a)*Exp(-SqNorm2(y-x))"

        my_routine = Genred(formula, aliases, reduction_op="Sum", axis=1)
        gamma_keops1 = my_routine(
            self.sigma.astype(t),
            self.g.astype(t),
            self.x.astype(t),
            self.y.astype(t),
            backend="auto",
        )

        yc_tmp = np.ascontiguousarray(self.y.T).T  # create a non contiguous copy
        gamma_keops2 = my_routine(
            self.sigma.astype(t), self.g.astype(t), self.x.astype(t), yc_tmp.astype(t)
        )

        # check output
        self.assertFalse(yc_tmp.flags.c_contiguous)
        self.assertTrue(np.allclose(gamma_keops1, gamma_keops2))
Esempio n. 2
0
    def test_heterogeneous_var_aliases(self):
        ############################################################
        from pykeops.numpy import Genred

        t = self.type_to_test[0]

        aliases = ["p=Pm(0,1)", "x=Vi(1,3)", "y=Vj(2,3)"]
        formula = "Square(p-Var(3,1,1))*Exp(-SqNorm2(y-x))"

        # Call cuda kernel
        myconv = Genred(formula, aliases, reduction_op="Sum", axis=1)
        gamma_keops = myconv(
            self.sigma.astype(t),
            self.x.astype(t),
            self.y.astype(t),
            self.g.astype(t),
            backend="auto",
        )

        # Numpy version
        gamma_py = np.sum(
            (self.sigma - self.g.T) ** 2 * np.exp(-squared_distances(self.x, self.y)),
            axis=1,
        )

        # compare output
        self.assertTrue(np.allclose(gamma_keops.ravel(), gamma_py, atol=1e-6))
Esempio n. 3
0
def generic_argkmin(formula, output, *aliases, **kwargs):
    r"""Alias for :class:`numpy.Genred <pykeops.numpy.Genred>` with an "ArgKMin" reduction.

    Args:
        formula (string): Scalar-valued symbolic KeOps expression, as in :class:`numpy.Genred <pykeops.numpy.Genred>`.
        output (string): An identifier of the form ``"AL = TYPE(K)"`` 
            that specifies the category and dimension of the output variable. Here:

              - ``AL`` is a dummy alphanumerical name.
              - ``TYPE`` is a *category*. One of:

                - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1.
                - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0.

              - ``K`` is an integer, the number of values to extract.

        *aliases (strings): List of identifiers, as in :class:`numpy.Genred <pykeops.numpy.Genred>`.

    Keyword Args:
        dtype (string, default = ``"float64"``): Specifies the numerical **dtype** of the input and output arrays. 
            The supported values are:

              - **dtype** = ``"float32"``,
              - **dtype** = ``"float64"``.

    Returns:
        A generic reduction that can be called on arbitrary
        NumPy arrays, as documented in :class:`numpy.Genred <pykeops.numpy.Genred>`.

    Example:
        Bruteforce K-nearest neighbors search in dimension 100:

        >>> knn = generic_argkmin(
        ...     'SqDist(x, y)',   # Formula
        ...     'a = Vi(3)',      # Output: 3 scalars per line
        ...     'x = Vi(100)',    # 1st input: dim-100 vector per line
        ...     'y = Vj(100)')    # 2nd input: dim-100 vector per line
        >>> x = np.random.randn(5,     100)
        >>> y = np.random.randn(20000, 100)
        >>> a = knn(x, y)
        >>> print(a)
        [[ 9054., 11653., 11614.],
         [13466., 11903., 14180.],
         [14164.,  8809.,  3799.],
         [ 2092.,  3323., 18479.],
         [14433., 11315., 11841.]]
        >>> print( np.linalg.norm(x - y[ a[:,0].astype(int) ], axis=1) )  # Distance to the nearest neighbor
        [10.7933, 10.3235, 10.1218, 11.4919, 10.5100]
        >>> print( np.linalg.norm(x - y[ a[:,1].astype(int) ], axis=1) )  # Distance to the second neighbor
        [11.3702, 10.6550, 10.7646, 11.5676, 11.1356]
        >>> print( np.linalg.norm(x - y[ a[:,2].astype(int) ], axis=1) )  # Distance to the third neighbor
        [11.3820, 10.6725, 10.8510, 11.6071, 11.1968]
    """
    _, cat, k, _ = get_type(output)
    return Genred(formula,
                  list(aliases),
                  reduction_op='ArgKMin',
                  axis=cat2axis(cat),
                  opt_arg=k,
                  **kwargs)
Esempio n. 4
0
    def test_generic_syntax_lse(self):
        ############################################################
        from pykeops.numpy import Genred
        aliases = ['p=Pm(0,1)', 'a=Vj(1,1)', 'x=Vi(2,3)', 'y=Vj(3,3)']
        formula = 'Square(p-a)*Exp(-SqNorm2(x-y))'

        if pykeops.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'GPU']
        else:
            backend_to_test = ['auto']

        for b, t in itertools.product(backend_to_test, self.type_to_test):
            with self.subTest(b=b, t=t):

                # Call cuda kernel
                myconv = Genred(formula,
                                aliases,
                                reduction_op='LogSumExp',
                                axis=1,
                                dtype=t)
                gamma_keops = myconv(self.sigma.astype(t),
                                     self.g.astype(t),
                                     self.x.astype(t),
                                     self.y.astype(t),
                                     backend=b)

                # Numpy version
                gamma_py = log_sum_exp(
                    (self.sigma - self.g.T)**2 *
                    np.exp(-squared_distances(self.x, self.y)),
                    axis=1)

                # compare output
                self.assertTrue(
                    np.allclose(gamma_keops.ravel(), gamma_py, atol=1e-6))
Esempio n. 5
0
    def test_generic_syntax_softmax(self):
    ############################################################
        from pykeops.numpy import Genred
        aliases = ['p=Pm(0,1)', 'a=Vj(1,1)', 'x=Vi(2,3)', 'y=Vj(3,3)']
        formula = 'Square(p-a)*Exp(-SqNorm2(x-y))'
        formula_weights = 'y'
        
        if pykeops.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'GPU']
        else:
            backend_to_test = ['auto']

        for b, t in itertools.product(backend_to_test, self.type_to_test):
            with self.subTest(b=b, t=t):

                # Call cuda kernel
                myop = Genred(formula, aliases, reduction_op='SumSoftMaxWeight', axis=1, dtype=t, formula2=formula_weights)
                gamma_keops= myop(self.sigma.astype(t), self.g.astype(t), self.x.astype(t), self.y.astype(t), backend=b)

                # Numpy version
                def np_softmax(x,w):
                    x -= np.max(x,axis=1)[:,None] # subtract the max for robustness
                    return np.exp(x)@w/np.sum(np.exp(x),axis=1)[:,None]
                gamma_py = np_softmax((self.sigma - self.g.T)**2 * np.exp(-squared_distances(self.x, self.y)), self.y)
                
                # compare output
                self.assertTrue(np.allclose(gamma_keops.ravel(), gamma_py.ravel(), atol=1e-6))
Esempio n. 6
0
    def test_generic_syntax_sum(self):
    ############################################################
        from pykeops.numpy import Genred
        aliases = ['p=Pm(0,1)', 'a=Vj(1,1)', 'x=Vi(2,3)', 'y=Vj(3,3)']
        formula = 'Square(p-a)*Exp(x+y)'
        axis = 1  # 0 means summation over i, 1 means over j

        if pykeops.gpu_available:
            backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'GPU']
        else:
            backend_to_test = ['auto']

        for b, t in itertools.product(backend_to_test, self.type_to_test):
            with self.subTest(b=b, t=t):

                # Call cuda kernel
                myconv = Genred(formula, aliases, reduction_op='Sum', axis=axis, dtype=t)
                gamma_keops = myconv(self.sigma.astype(t), self.g.astype(t), self.x.astype(t), self.y.astype(t), backend=b)

                # Numpy version
                gamma_py = np.sum((self.sigma - self.g)**2
                                  * np.exp((self.y.T[:,:,np.newaxis] + self.x.T[:,np.newaxis,:])), axis=1).T

                # compare output
                self.assertTrue( np.allclose(gamma_keops, gamma_py , atol=1e-6))
Esempio n. 7
0
    def test_argkmin(self):
        ############################################################

        from pykeops.numpy import Genred

        formula = "SqDist(x,y)"
        variables = [
            "x = Vi(" + str(self.D) + ")",  # First arg   : i-variable, of size D
            "y = Vj(" + str(self.D) + ")",
        ]  # Second arg  : j-variable, of size D

        my_routine = Genred(
            formula,
            variables,
            reduction_op="ArgKMin",
            axis=1,
            dtype=self.type_to_test[1],
            opt_arg=3,
        )

        c = my_routine(self.x, self.y, backend="auto").astype(int)
        cnp = np.argsort(
            np.sum((self.x[:, np.newaxis, :] - self.y[np.newaxis, :, :]) ** 2, axis=2),
            axis=1,
        )[:, :3]
        self.assertTrue(np.allclose(c.ravel(), cnp.ravel()))
Esempio n. 8
0
def generic_logsumexp(formula, output, *aliases, **kwargs):
    r"""Alias for :class:`numpy.Genred <pykeops.numpy.Genred>` with a "LogSumExp" reduction.

    Args:
        formula (string): Scalar-valued symbolic KeOps expression, as in :class:`numpy.Genred <pykeops.numpy.Genred>`.
        output (string): An identifier of the form ``"AL = TYPE(1)"`` 
            that specifies the category and dimension of the output variable. Here:

              - ``AL`` is a dummy alphanumerical name.
              - ``TYPE`` is a *category*. One of:

                - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1.
                - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0.

        *aliases (strings): List of identifiers, as in :class:`numpy.Genred <pykeops.numpy.Genred>`.

    Keyword Args:
        dtype (string, default = ``"float64"``): Specifies the numerical **dtype** of the input and output arrays. 
            The supported values are:

              - **dtype** = ``"float16"``,
              - **dtype** = ``"float32"``,
              - **dtype** = ``"float64"``.

    Returns:
        A generic reduction that can be called on arbitrary
        NumPy arrays, as documented in :class:`numpy.Genred <pykeops.numpy.Genred>`.

    Example:
        Log-likelihood of a Gaussian Mixture Model,

        .. math::
            a_i~=~f(x_i)~&=~ \log \sum_{j=1}^{N} \exp(-\gamma\cdot\|x_i-y_j\|^2)\cdot b_j \\\\
               ~&=~ \log \sum_{j=1}^{N} \exp\\big(-\gamma\cdot\|x_i-y_j\|^2 \,+\, \log(b_j) \\big).

        >>> log_likelihood = generic_logsumexp(
        ...     '(-(g * SqNorm2(x - y))) + b', # Formula
        ...     'a = Vi(1)',              # Output: 1 scalar per line
        ...     'x = Vi(3)',              # 1st input: dim-3 vector per line
        ...     'y = Vj(3)',              # 2nd input: dim-3 vector per line
        ...     'g = Pm(1)',              # 3rd input: vector of size 1
        ...     'b = Vj(1)')              # 4th input: 1 scalar per line
        >>> x = np.random.randn(1000000, 3)
        >>> y = np.random.randn(2000000, 3)
        >>> g = np.array([.5])            # Parameter of our GMM
        >>> b = np.random.rand(2000000, 1)  # Positive weights...
        >>> b = b / b.sum()               # Normalized to get a probability measure
        >>> a = log_likelihood(x, y, g, np.log(b))  # a_i = log sum_j exp(-g*|x_i-y_j|^2) * b_j
        >>> print(a.shape)
        (1000000, 1)
    """
    _, cat, _, _ = get_type(output)
    return Genred(formula,
                  list(aliases),
                  reduction_op='LogSumExp',
                  axis=cat2axis(cat),
                  **kwargs)
Esempio n. 9
0
def generic_argmin(formula, output, *aliases, **kwargs):
    r"""Alias for :class:`numpy.Genred <pykeops.numpy.Genred>` with an "ArgMin" reduction.

    Args:
        formula (string): Scalar-valued symbolic KeOps expression, as in :class:`numpy.Genred <pykeops.numpy.Genred>`.
        output (string): An identifier of the form ``"AL = TYPE(1)"`` 
            that specifies the category and dimension of the output variable. Here:

              - ``AL`` is a dummy alphanumerical name.
              - ``TYPE`` is a *category*. One of:

                - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1.
                - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0.

        *aliases (strings): List of identifiers, as in :class:`numpy.Genred <pykeops.numpy.Genred>`.

    Keyword Args:
        dtype (string, default = ``"float64"``): Specifies the numerical **dtype** of the input and output arrays. 
            The supported values are:

              - **dtype** = ``"float16"``,
              - **dtype** = ``"float32"``,
              - **dtype** = ``"float64"``.

    Returns:
        A generic reduction that can be called on arbitrary
        NumPy arrays, as documented in :class:`numpy.Genred <pykeops.numpy.Genred>`.

    Example:
        Bruteforce nearest neighbor search in dimension 100:

        >>> nearest_neighbor = generic_argmin(
        ...     'SqDist(x, y)',   # Formula
        ...     'a = Vi(1)',      # Output: 1 scalar per line
        ...     'x = Vi(100)',    # 1st input: dim-100 vector per line
        ...     'y = Vj(100)')    # 2nd input: dim-100 vector per line
        >>> x = np.random.randn(5,     100)
        >>> y = np.random.randn(20000, 100)
        >>> a = nearest_neighbor(x, y)
        >>> print(a)
        [[ 8761.],
         [ 2836.],
         [  906.],
         [16130.],
         [ 3158.]]
        >>> dists = np.linalg.norm(x - y[ a.view(-1).long() ], axis=1)  # Distance to the nearest neighbor
        >>> print(dists)
        [10.5926, 10.9132,  9.9694, 10.1396, 10.1955]
    """
    _, cat, _, _ = get_type(output)
    return Genred(formula,
                  list(aliases),
                  reduction_op='ArgMin',
                  axis=cat2axis(cat),
                  **kwargs)
Esempio n. 10
0
def generic_sum(formula, output, *aliases, **kwargs):
    r"""Alias for :class:`numpy.Genred <pykeops.numpy.Genred>` with a "Sum" reduction.

    Args:
        formula (string): Symbolic KeOps expression, as in :class:`numpy.Genred <pykeops.numpy.Genred>`.
        output (string): An identifier of the form ``"AL = TYPE(DIM)"`` 
            that specifies the category and dimension of the output variable. Here:

              - ``AL`` is a dummy alphanumerical name.
              - ``TYPE`` is a *category*. One of:

                - ``Vi``: indexation by :math:`i` along axis 0; reduction is performed along axis 1.
                - ``Vj``: indexation by :math:`j` along axis 1; reduction is performed along axis 0.

              - ``DIM`` is an integer, the dimension of the output variable; it should be compatible with **formula**.
        *aliases (strings): List of identifiers, as in :class:`numpy.Genred <pykeops.numpy.Genred>`.

    Keyword Args:
        dtype (string, default = ``"float64"``): Specifies the numerical **dtype** of the input and output arrays. 
            The supported values are:

              - **dtype** = ``"float16"``,
              - **dtype** = ``"float32"``,
              - **dtype** = ``"float64"``.

    Returns:
        A generic reduction that can be called on arbitrary
        NumPy arrays, as documented in :class:`numpy.Genred <pykeops.numpy.Genred>`.

    Example:
        >>> my_conv = generic_sum(       # Custom Kernel Density Estimator
        ...     'Exp(-SqNorm2(x - y))',  # Formula
        ...     'a = Vi(1)',             # Output: 1 scalar per line
        ...     'x = Vi(3)',             # 1st input: dim-3 vector per line
        ...     'y = Vj(3)')             # 2nd input: dim-3 vector per line
        >>> # Apply it to 2d arrays x and y with 3 columns and a (huge) number of lines
        >>> x = np.random.randn(1000000, 3)
        >>> y = np.random.randn(2000000, 3)
        >>> a = my_conv(x, y)  # a_i = sum_j exp(-|x_i-y_j|^2)
        >>> print(a.shape)
        (1000000, 1)
    """
    _, cat, _, _ = get_type(output)
    return Genred(formula,
                  list(aliases),
                  reduction_op='Sum',
                  axis=cat2axis(cat),
                  **kwargs)
Esempio n. 11
0
    def test_formula_simplification(self):
    ############################################################
        from pykeops.numpy import Genred
        
        t = self.type_to_test[0]

        aliases = ['x=Vi(0,3)']
        formula = 'Grad(Grad(x + Var(1,3,1), x, Var(2,3,0)),x, Var(3,3,0))'

        # Call cuda kernel
        myconv = Genred(formula, aliases, reduction_op='Sum', axis=1)
        gamma_keops= myconv(self.x.astype(t), self.y.astype(t), self.x.astype(t), self.x.astype(t), backend='auto')

        # Numpy version
        gamma_py = np.zeros_like(self.x)
        
        # compare output
        self.assertTrue(np.allclose(gamma_keops, gamma_py, atol=1e-6))
Esempio n. 12
0
def WarmUpGpu():
    # dummy first calls for accurate timing in case of GPU use
    print("Warming up the Gpu (numpy bindings) !!!")
    if pykeops.config.gpu_available:
        formula = "Exp(-oos2*SqDist(x,y))*b"
        aliases = [
            "x = Vi(1)",  # First arg   : i-variable, of size 1
            "y = Vj(1)",  # Second arg  : j-variable, of size 1
            "b = Vj(1)",  # Third arg  : j-variable, of size 1
            "oos2 = Pm(1)",
        ]  # Fourth arg  : scalar parameter
        my_routine = Genred(
            formula, aliases, reduction_op="Sum", axis=1, dtype="float64"
        )
        dum = np.random.rand(10, 1)
        dum2 = np.random.rand(10, 1)
        my_routine(dum, dum, dum2, np.array([1.0]))
        my_routine(dum, dum, dum2, np.array([1.0]))
Esempio n. 13
0
def WarmUpGpu():
    # dummy first calls for accurate timing in case of GPU use
    print("Warming up the Gpu (numpy bindings) !!!")
    if IsGpuAvailable():
        formula = 'Exp(-oos2*SqDist(x,y))*b'
        aliases = [
            'x = Vi(1)',  # First arg   : i-variable, of size 1
            'y = Vj(1)',  # Second arg  : j-variable, of size 1
            'b = Vj(1)',  # Third arg  : j-variable, of size 1
            'oos2 = Pm(1)'
        ]  # Fourth arg  : scalar parameter
        my_routine = Genred(formula,
                            aliases,
                            reduction_op='Sum',
                            axis=1,
                            dtype='float64')
        dum = np.random.rand(10, 1)
        dum2 = np.random.rand(10, 1)
        my_routine(dum, dum, dum2, np.array([1.0]))
        my_routine(dum, dum, dum2, np.array([1.0]))
Esempio n. 14
0
# By default we assume that there are two GPUs available with 0 and 1 labels:
gpuids = [0,1] if get_gpu_number() > 1 else [0]


####################################################################
# KeOps Kernel using Genred
# -------------------------
# Define some arbitrary KeOps routine:

formula   =  'Square(p-a) * Exp(x+y)'
variables = ['x = Vi(3)','y = Vj(3)','a = Vj(1)','p = Pm(1)']

dtype = 'float32'  # May be 'float32' or 'float64'

my_routine = Genred(formula, variables, reduction_op='Sum', axis=1, dtype=dtype)


####################################################################
#  Generate some data, stored on the CPU (host) memory:
#

M = 1000
N = 2000
x = np.random.randn(M,3).astype(dtype)
y = np.random.randn(N,3).astype(dtype)
a = np.random.randn(N,1).astype(dtype)
p = np.random.randn(1).astype(dtype)

####################################################################
# Launch our routine on the CPU:
Esempio n. 15
0
x = np.random.rand(N, D).astype(dtype)

###############################################################
# KeOps Kernel
# -------------

formula = "SqDist(x,y)"  # Use a simple Euclidean (squared) norm
variables = [
    "x = Vi(" + str(D) + ")",  # First arg : i-variable, of size D
    "y = Vj(" + str(D) + ")",
]  # Second arg: j-variable, of size D

# N.B.: The number K is specified as an optional argument `opt_arg`
my_routine = Genred(formula,
                    variables,
                    reduction_op="ArgKMin",
                    axis=1,
                    dtype=dtype,
                    opt_arg=K)

###############################################################
# Using our new :class:`pykeops.numpy.Genred` routine,
# we perform a K-nearest neighbor search ( **reduction_op** = ``"ArgKMin"`` )
# over the :math:`j` variable :math:`y_j` ( **axis** = 1):
#
# .. note::
#   If CUDA is available and **backend** is ``"auto"`` or not specified,
#   KeOps will:
#
#     1. Load the data on the GPU
#     2. Perform the computation on the device
#     3. Unload the result back to the CPU
Esempio n. 16
0
# KeOps kernel
# ---------------
#
# Create a new generic routine using the :class:`numpy.Genred <pykeops.numpy.Genred>`
# constructor:

formula = "SqDist(x,y)"
formula_weights = "b"
aliases = [
    "x = Vi(" + str(D) + ")",  # First arg:  i-variable of size D
    "y = Vj(" + str(D) + ")",  # Second arg: j-variable of size D
    "b = Vj(" + str(Dv) + ")",
]  # Third arg:  j-variable of size Dv

softmax_op = Genred(
    formula, aliases, reduction_op="SumSoftMaxWeight", axis=1, formula2=formula_weights
)

# Dummy first call to warmup the GPU and get accurate timings:
_ = softmax_op(x, y, b)

###############################################################################
# Use our new function on arbitrary Numpy arrays:
#

start = time.time()
c = softmax_op(x, y, b)
print("Timing (KeOps implementation): ", round(time.time() - start, 5), "s")

# compare with direct implementation
start = time.time()
Esempio n. 17
0
# -----------------------

formula = "Square(p-a)*Exp(x+y)"
variables = [
    "x = Vi(3)",  # First arg   : i-variable, of size 3
    "y = Vj(3)",  # Second arg  : j-variable, of size 3
    "a = Vj(1)",  # Third arg   : j-variable, of size 1 (scalar)
    "p = Pm(1)",
]  # Fourth  arg : Parameter,  of size 1 (scalar)

####################################################################
# Our sum reduction is performed over the index :math:`j`,
# i.e. on the axis ``1`` of the kernel matrix.
# The output c is an :math:`x`-variable indexed by :math:`i`.

my_routine = Genred(formula, variables, reduction_op="Sum", axis=1, dtype=dtype)
c = my_routine(x, y, a, p, backend="auto")

####################################################################
# The equivalent code in NumPy:
c_np = (
    (
        (p - a.T)[:, np.newaxis] ** 2
        * np.exp(x.T[:, :, np.newaxis] + y.T[:, np.newaxis, :])
    )
    .sum(2)
    .T
)

# Plot the results next to each other:
for i in range(3):