示例#1
0
    def forward(ctx, formula, aliases, backend, dtype, device_id, ranges, accuracy_flags, *args):
    
        optional_flags = ['-DPYTORCH_INCLUDE_DIR=' + ';'.join(include_dirs)] + accuracy_flags

        myconv = LoadKeOps(formula, aliases, dtype, 'torch', optional_flags).import_module()

        # Context variables: save everything to compute the gradient:
        ctx.formula = formula
        ctx.aliases = aliases
        ctx.backend = backend
        ctx.dtype = dtype
        ctx.device_id = device_id
        ctx.ranges = ranges
        ctx.accuracy_flags = accuracy_flags
        ctx.myconv = myconv

        tagCPUGPU, tag1D2D, tagHostDevice = get_tag_backend(backend, args)

        if tagCPUGPU==1 & tagHostDevice==1:
            device_id = args[0].device.index
            for i in range(1,len(args)):
                if args[i].device.index != device_id:
                    raise ValueError("[KeOps] Input arrays must be all located on the same device.")
        
        if ranges is None : ranges = () # To keep the same type

        result = myconv.genred_pytorch(tagCPUGPU, tag1D2D, tagHostDevice, device_id, ranges, *args)

        # relying on the 'ctx.saved_variables' attribute is necessary  if you want to be able to differentiate the output
        #  of the backward once again. It helps pytorch to keep track of 'who is who'.
        ctx.save_for_backward(*args, result)

        return result
示例#2
0
    def forward(ctx, formula, aliases, backend, dtype, device_id, ranges,
                optional_flags, rec_multVar_highdim, nx, ny, *args):

        # N.B. when rec_multVar_highdim option is set, it means that formula is of the form "sum(F*b)", where b is a variable
        # with large dimension. In this case we set compiler option MULT_VAR_HIGHDIM to allow for the use of the special "final chunk" computation
        # mode. However, this may not be also true for the gradients of the same formula. In fact only the gradient
        # with respect to variable b will have the same form. Hence, we save optional_flags current status into ctx,
        # before adding the MULT_VAR_HIGHDIM compiler option.
        ctx.optional_flags = optional_flags.copy()
        if rec_multVar_highdim is not None:
            optional_flags += ["-DMULT_VAR_HIGHDIM=1"]

        myconv = LoadKeOps(formula, aliases, dtype, 'torch', optional_flags,
                           include_dirs).import_module()

        # Context variables: save everything to compute the gradient:
        ctx.formula = formula
        ctx.aliases = aliases
        ctx.backend = backend
        ctx.dtype = dtype
        ctx.device_id = device_id
        ctx.ranges = ranges
        ctx.rec_multVar_highdim = rec_multVar_highdim
        ctx.myconv = myconv
        ctx.nx = nx
        ctx.ny = ny

        tagCPUGPU, tag1D2D, tagHostDevice = get_tag_backend(backend, args)

        if tagCPUGPU == 1 & tagHostDevice == 1:
            device_id = args[0].device.index
            for i in range(1, len(args)):
                if args[i].device.index != device_id:
                    raise ValueError(
                        "[KeOps] Input arrays must be all located on the same device."
                    )

        if ranges is None:
            ranges = ()  # To keep the same type

        # N.B.: KeOps C++ expects contiguous integer arrays as ranges
        ranges = tuple(r.contiguous() for r in ranges)

        result = myconv.genred_pytorch(tagCPUGPU, tag1D2D, tagHostDevice,
                                       device_id, ranges, nx, ny, *args)

        # relying on the 'ctx.saved_variables' attribute is necessary  if you want to be able to differentiate the output
        #  of the backward once again. It helps pytorch to keep track of 'who is who'.
        ctx.save_for_backward(*args, result)

        return result