Ejemplo n.º 1
0
    def forward(ctx, formula, aliases, varinvpos, alpha, backend, dtype,
                device_id, eps, ranges, accuracy_flags, *args):

        optional_flags = ['-DPYTORCH_INCLUDE_DIR=' + ';'.join(include_dirs)
                          ] + accuracy_flags

        myconv = LoadKeOps(formula, aliases, dtype, 'torch',
                           optional_flags).import_module()

        # Context variables: save everything to compute the gradient:
        ctx.formula = formula
        ctx.aliases = aliases
        ctx.varinvpos = varinvpos
        ctx.alpha = alpha
        ctx.backend = backend
        ctx.dtype = dtype
        ctx.device_id = device_id
        ctx.eps = eps
        ctx.myconv = myconv
        ctx.ranges = ranges
        ctx.accuracy_flags = accuracy_flags
        if ranges is None: ranges = ()  # To keep the same type

        varinv = args[varinvpos]
        ctx.varinvpos = varinvpos

        tagCPUGPU, tag1D2D, tagHostDevice = get_tag_backend(backend, args)

        if tagCPUGPU == 1 & tagHostDevice == 1:
            device_id = args[0].device.index
            for i in range(1, len(args)):
                if args[i].device.index != device_id:
                    raise ValueError(
                        "[KeOps] Input arrays must be all located on the same device."
                    )

        (categories, dimensions) = parse_aliases(aliases)

        def linop(var):
            newargs = args[:varinvpos] + (var, ) + args[varinvpos + 1:]
            res = myconv.genred_pytorch(tagCPUGPU, tag1D2D, tagHostDevice,
                                        device_id, ranges, categories,
                                        dimensions, *newargs)
            if alpha:
                res += alpha * var
            return res

        global copy
        result = ConjugateGradientSolver('torch', linop, varinv.data, eps)

        # relying on the 'ctx.saved_variables' attribute is necessary  if you want to be able to differentiate the output
        #  of the backward once again. It helps pytorch to keep track of 'who is who'.
        ctx.save_for_backward(*args, result)

        return result
Ejemplo n.º 2
0
    def forward(ctx, formula, aliases, varinvpos, alpha, backend, dtype,
                device_id, eps, ranges, optional_flags, rec_multVar_highdim,
                *args):

        optional_flags += include_dirs

        # N.B. when rec_multVar_highdim option is set, it means that formula is of the form "sum(F*b)", where b is a variable
        # with large dimension. In this case we set compiler option MULT_VAR_HIGHDIM to allow for the use of the special "final chunk" computation
        # mode. However, this may not be also true for the gradients of the same formula. In fact only the gradient
        # with respect to variable b will have the same form. Hence, we save optional_flags current status into ctx,
        # before adding the MULT_VAR_HIGHDIM compiler option.
        ctx.optional_flags = optional_flags.copy()
        if rec_multVar_highdim is not None:
            optional_flags += ["-DMULT_VAR_HIGHDIM=1"]

        myconv = LoadKeOps(formula, aliases, dtype, "torch",
                           optional_flags).import_module()

        # Context variables: save everything to compute the gradient:
        ctx.formula = formula
        ctx.aliases = aliases
        ctx.varinvpos = varinvpos
        ctx.alpha = alpha
        ctx.backend = backend
        ctx.dtype = dtype
        ctx.device_id = device_id
        ctx.eps = eps
        ctx.myconv = myconv
        ctx.ranges = ranges
        ctx.rec_multVar_highdim = rec_multVar_highdim
        ctx.optional_flags = optional_flags
        if ranges is None:
            ranges = ()  # To keep the same type

        varinv = args[varinvpos]
        ctx.varinvpos = varinvpos

        tagCPUGPU, tag1D2D, tagHostDevice = get_tag_backend(backend, args)

        if tagCPUGPU == 1 & tagHostDevice == 1:
            device_id = args[0].device.index
            for i in range(1, len(args)):
                if args[i].device.index != device_id:
                    raise ValueError(
                        "[KeOps] Input arrays must be all located on the same device."
                    )

        def linop(var):
            newargs = args[:varinvpos] + (var, ) + args[varinvpos + 1:]
            res = myconv.genred_pytorch(tagCPUGPU, tag1D2D, tagHostDevice,
                                        device_id, ranges, *newargs)
            if alpha:
                res += alpha * var
            return res

        global copy
        result = ConjugateGradientSolver("torch", linop, varinv.data, eps)

        # relying on the 'ctx.saved_variables' attribute is necessary  if you want to be able to differentiate the output
        #  of the backward once again. It helps pytorch to keep track of 'who is who'.
        ctx.save_for_backward(*args, result)

        return result
Ejemplo n.º 3
0
    def __call__(self,
                 *args,
                 backend='auto',
                 device_id=-1,
                 alpha=1e-10,
                 eps=1e-6,
                 ranges=None):
        r"""
        To apply the routine on arbitrary NumPy arrays.
            
        Warning:
            Even for variables of size 1 (e.g. :math:`a_i\in\mathbb{R}`
            for :math:`i\in[0,M)`), KeOps expects inputs to be formatted
            as 2d arrays of size ``(M,dim)``. In practice,
            ``a.view(-1,1)`` should be used to turn a vector of weights
            into a *list of scalar values*.
        
        Args:
            *args (2d arrays (variables ``Vi(..)``, ``Vj(..)``) and 1d arrays (parameters ``Pm(..)``)): The input numerical arrays, 
                which should all have the same ``dtype``, be **contiguous** and be stored on 
                the **same device**. KeOps expects one array per alias, 
                with the following compatibility rules:

                    - All ``Vi(Dim_k)`` variables are encoded as **2d-arrays** with ``Dim_k`` columns and the same number of lines :math:`M`.
                    - All ``Vj(Dim_k)`` variables are encoded as **2d-arrays** with ``Dim_k`` columns and the same number of lines :math:`N`.
                    - All ``Pm(Dim_k)`` variables are encoded as **1d-arrays** (vectors) of size ``Dim_k``.

        Keyword Args:
            alpha (float, default = 1e-10): Non-negative 
                **ridge regularization** parameter, added to the diagonal
                of the Kernel matrix :math:`K_{xx}`.

            backend (string): Specifies the map-reduce scheme,
                as detailed in the documentation 
                of the :class:`numpy.Genred <pykeops.numpy.Genred>` module.

            device_id (int, default=-1): Specifies the GPU that should be used 
                to perform   the computation; a negative value lets your system 
                choose the default GPU. This parameter is only useful if your 
                system has access to several GPUs.

            ranges (6-uple of IntTensors, None by default):
                Ranges of integers that specify a 
                :doc:`block-sparse reduction scheme <../../sparsity>`
                with *Mc clusters along axis 0* and *Nc clusters along axis 1*,
                as detailed in the documentation 
                of the :class:`numpy.Genred <pykeops.numpy.Genred>` module.

                If **None** (default), we simply use a **dense Kernel matrix**
                as we loop over all indices
                :math:`i\in[0,M)` and :math:`j\in[0,N)`.

        Returns:
            (M,D) or (N,D) array:

            The solution of the optimization problem, which is always a 
            **2d-array** with :math:`M` or :math:`N` lines (if **axis** = 1 
            or **axis** = 0, respectively) and a number of columns 
            that is inferred from the **formula**.

        """
        # Get tags
        tagCpuGpu, tag1D2D, _ = get_tag_backend(backend, args)
        varinv = args[self.varinvpos]

        if ranges is None: ranges = ()  # ranges should be encoded as a tuple

        def linop(var):
            newargs = args[:self.varinvpos] + (var, ) + args[self.varinvpos +
                                                             1:]
            res = self.myconv.genred_numpy(tagCpuGpu, tag1D2D, 0, device_id,
                                           ranges, *newargs)
            if alpha:
                res += alpha * var
            return res

        return ConjugateGradientSolver('numpy', linop, varinv, eps=eps)