Exemple #1
0
 def canon_multi_target_and_host(target, target_host=None):
     """Returns a TVM Array<Target> capturing target and target_host. The given target can be in
     any form recognized by Target.canon_multi_target. If given, target_host can be in
     any form recognized by Target.canon_target. If target_host is given it will be set
     as the 'host' in each result Target object (and a warning given).
     """
     # Convert target to Array<Target>, but not yet accounting for any host.
     raw_targets = Target.canon_multi_target(target)
     assert raw_targets is not None and len(raw_targets) > 0
     # Convert host to Target, if given.
     if raw_targets[0].host is None and target_host is not None:
         warnings.warn(
             "target_host parameter is going to be deprecated. "
             "Please pass in tvm.target.Target(target, host=target_host) instead."
         )
         # Make sure the (canonical) host is captured in all the (canonical) targets.
         target_host = Target.canon_target(target_host)
         raw_targets = convert(
             [tgt.with_host(target_host) for tgt in raw_targets])
     return raw_targets
Exemple #2
0
def call_pure_extern(dtype, func_name, *args):
    """Build expression by calling a pure extern function.

    Parameters
    ----------
    dtype : str
        The data type of the result.

    func_name: str
        The extern function name.

    args : list
        Positional arguments.

    Returns
    -------
    call : PrimExpr
        The call expression.
    """
    return Call(dtype, func_name, convert(args), Call.PureExtern, None, 0)
Exemple #3
0
def call_extern(dtype, func_name, *args):
    """Build expression by calling a extern function.

    Parameters
    ----------
    dtype : str
        The data type of the result.

    func_name: str
        The extern function name.

    args : list
        Positional arguments.

    Returns
    -------
    call : PrimExpr
        The call expression.
    """
    return Call(dtype, Op.get("tir.call_extern"),
                convert((StringImm(func_name), ) + args), Call.Extern)
def squeeze_shape_func(attrs, inputs, _):
    """
    Shape function for squeeze op.
    """
    axis = attrs.axis if attrs.axis is None else get_const_tuple(attrs.axis)
    keep_axes = []
    if axis is not None:
        for i in range(inputs[0].shape[0].value):
            if i not in axis:
                keep_axes.append(i)

    # Due to current relay type system, it is possible even
    # a static kernel function needs shape function. To handle
    # this case, we allow axis to be None in squeeze shape func
    # for now.
    # TODO(kevinthesun): Enhance relay type system to avoid this.
    if keep_axes:
        out = _squeeze_shape_func(inputs[0], convert(keep_axes))
    else:
        out = te.compute((), lambda *indices: 0)
    return [out]
Exemple #5
0
def call_intrin(dtype, func_name, *args):
    """Build expression by calling an intrinsic function.

    Intrinsics can be overloaded with multiple data types via
    the intrinsic translation rule.

    Parameters
    ----------
    dtype : str
        The data type of the result.

    func_name: str
        The intrinsic function name.

    args : list
        Positional arguments.

    Returns
    -------
    call : PrimExpr
        The call expression.
    """
    return Call(dtype, func_name, convert(args))
Exemple #6
0
def call_pure_extern(dtype, func_name, *args, span=None):
    """Build expression by calling a pure extern function.

    Parameters
    ----------
    dtype : str
        The data type of the result.

    func_name: str
        The extern function name.

    args : list
        Positional arguments.

    span : Optional[Span]
        The location of this operator in the source code.

    Returns
    -------
    call : PrimExpr
        The call expression.
    """
    return Call(dtype, Op.get("tir.call_pure_extern"),
                convert((StringImm(func_name), ) + args), span)
Exemple #7
0
def resize_shape_func(attrs, inputs, _):
    """
    Shape function for dyn.image.resize op.
    """
    layout = attrs.layout
    if nchw_pack_layout(layout) or nchw_xc_layout(layout):
        out = [
            _resize_shape_func(inputs[0].shape, inputs[1],
                               convert(len(inputs[0].shape)), convert(2),
                               convert(3))
        ]
    else:
        height_axis = width_axis = 1
        for i, letter in enumerate(layout):
            if letter == "H":
                height_axis = i
            if letter == "W":
                width_axis = i
        out = [
            _resize_shape_func(inputs[0].shape, inputs[1],
                               convert(len(inputs[0].shape)),
                               convert(height_axis), convert(width_axis))
        ]
    return out
Exemple #8
0
def dilate_shape_func(attrs, inputs, _):
    """
    Shape function for dilate op.
    """
    return [_dilate_shape_func(inputs[0], convert(attrs.strides))]
Exemple #9
0
def mirror_pad_func(attrs, inputs, _):
    pad_width_tuple = [get_const_tuple(p) for p in attrs.pad_width]
    return [_mirror_pad_func(inputs[0], convert(pad_width_tuple))]
Exemple #10
0
def reduce_shape_func(attrs, inputs, _):
    """
    Shape function for reduce op.
    """
    axis_record = _create_axis_record(attrs, inputs)
    return [_reduce_shape_func(inputs[0], convert(axis_record))]
Exemple #11
0
def affine_grid_func(attrs, inputs, _):
    """
    Shape function for affine_grid op.
    """
    target_shape = get_const_tuple(attrs.target_shape)
    return [_affine_grid_func(inputs[0], convert(target_shape))]
Exemple #12
0
def layout_transform_shape_func(attrs, inputs, _):
    """
    Shape function for layout_transform op.
    """
    def _fetch_axis(layout):
        major_axes = []
        minor_axes = {}
        num_start = -1
        for i, item in enumerate(layout):
            if "A" <= item <= "Z":
                major_axes.append(item)
            elif "a" <= item <= "z":
                last_num = int(layout[num_start:i])
                minor_axes[item] = last_num
                num_start = -1
            elif num_start < 0:
                num_start = i
        return major_axes, minor_axes

    _, src_minor_axes = _fetch_axis(attrs.src_layout)
    dst_major_axes, dst_minor_axes = _fetch_axis(attrs.dst_layout)
    src_letter_list = []
    dst_letter_list = []
    for item in attrs.src_layout:
        if "A" <= item <= "Z" or "a" <= item <= "z":
            src_letter_list.append(item)
    for item in attrs.dst_layout:
        if "A" <= item <= "Z" or "a" <= item <= "z":
            dst_letter_list.append(item)
    out_layout_len = len(dst_major_axes) + len(dst_minor_axes)
    dst_equal_list = []
    dst_mul_list = []
    dst_div_list = []
    dst_mix_list = []

    for key in dst_major_axes:
        if key.lower() not in dst_minor_axes:
            if key.lower() not in src_minor_axes:
                dst_equal_list.append(
                    (dst_letter_list.index(key), src_letter_list.index(key)))
            else:
                dst_mul_list.append(
                    (dst_letter_list.index(key), src_letter_list.index(key),
                     src_letter_list.index(key.lower())))
        else:
            if key.lower() not in src_minor_axes:
                dst_div_list.append(
                    (dst_letter_list.index(key), src_letter_list.index(key),
                     dst_letter_list.index(key.lower()),
                     dst_minor_axes[key.lower()]))
            else:
                dst_mix_list.append(
                    (dst_letter_list.index(key), src_letter_list.index(key),
                     src_minor_axes[key.lower()],
                     dst_letter_list.index(key.lower()),
                     dst_minor_axes[key.lower()]))

    return [
        _layout_transform_shape_func(inputs[0], convert(out_layout_len),
                                     convert(dst_equal_list),
                                     convert(dst_mul_list),
                                     convert(dst_div_list),
                                     convert(dst_mix_list))
    ]
Exemple #13
0
def concatenate_shape_func(attrs, inputs, _):
    axis = get_const_int(attrs.axis)
    if axis < 0:
        axis += inputs[0].shape[0]
    return [_concatenate_shape_func(inputs, convert(axis))]
Exemple #14
0
def concatenate_shape_func(attrs, inputs, _):
    axis = get_const_int(attrs.axis)
    return [_concatenate_shape_func(inputs, convert(axis))]
Exemple #15
0
def one_hot_shape_func(attrs, inputs, _):
    """
    Shape function for dyn.one_hot op.
    """
    axis = len(inputs[0].shape) if attrs.axis == -1 else attrs.axis
    return [_onehot_shape_func(inputs[0].shape, inputs[3], convert(axis))]
Exemple #16
0
 def _make_reduce(expr, axis, where=None, init=None):
     code = fcombine.__code__
     assert fcombine.__code__.co_argcount == 2
     expr = convert(expr)
     if init is not None:
         init = convert(init)
     if isinstance(expr, Array):
         size = len(expr)
         larr = []
         rarr = []
         dtypes = []
         for i in range(size):
             dtype = expr[i].dtype
             dtypes.append(dtype)
             lname = code.co_varnames[0] + "_" + str(i)
             larr.append(Var(lname, dtype))
             rname = code.co_varnames[1] + "_" + str(i)
             rarr.append(Var(rname, dtype))
         if init is not None:
             init = convert(init)
             assert isinstance(init, Array)
             assert len(init) == size
             for init_i in range(size):
                 init_i = convert(init_i)
                 assert isinstance(init_i,
                                   (tvm.tir.ProducerLoad, tvm.tir.IntImm,
                                    tvm.tir.FloatImm))
         else:
             init = convert([])
         lhs = convert(larr)
         rhs = convert(rarr)
         result = fcombine(lhs, rhs)
         id_elem = fidentity(*dtypes)
     else:
         assert isinstance(expr, tvm.ir.PrimExpr)
         size = 1
         dtype = expr.dtype
         lvar = Var(code.co_varnames[0], dtype)
         rvar = Var(code.co_varnames[1], dtype)
         result = [fcombine(lvar, rvar)]
         id_elem = [fidentity(dtype)]
         lhs = convert([lvar])
         rhs = convert([rvar])
         expr = convert([expr])
         if init is not None:
             assert isinstance(
                 init,
                 (tvm.tir.ProducerLoad, tvm.tir.IntImm, tvm.tir.FloatImm))
             init = convert([init])
     result = convert(result)
     id_elem = convert(id_elem)
     combiner = CommReducer(lhs, rhs, result, id_elem)
     axis = convert(axis if isinstance(axis, (list, tuple)) else [axis])
     if where is None:
         where = convert(True)
     if init is None:
         outputs = tuple(
             tvm.tir.Reduce(combiner, expr, axis, where, i, convert([]))
             for i in range(size))
     else:
         outputs = tuple(
             tvm.tir.Reduce(combiner, expr, axis, where, i, init)
             for i in range(size))
     return outputs[0] if size == 1 else outputs
Exemple #17
0
def strided_slice_shape_func(attrs, inputs, _):
    """
    Shape func for strided_slice
    """
    slice_mode = convert(0 if attrs.slice_mode == "end" else 1)
    return [_strided_slice_shape_func_input_data(*inputs, slice_mode)]
Exemple #18
0
def reshape_shape_func(attrs, inputs, out_ndims):
    if attrs.newshape is None:
        return [_reshape_shape_func_input_data(*inputs, out_ndims[0])]
    return [_reshape_shape_func_input_shape(inputs[0],
                                            convert(attrs.newshape),
                                            out_ndims[0])]
Exemple #19
0
def stack_shape_func(attrs, inputs, _):
    axis = get_const_int(attrs.axis)
    if axis < 0:
        axis += inputs[0].shape[0] + 1
    return [_stack_shape_func(inputs[0], convert(axis), convert(len(inputs)))]
Exemple #20
0
    def __init__(self, target, host=None):
        """Construct a TVM target object from
        1) Raw target string
        2) Target config dict
        3) Target tag

        Parameters
        ----------
        target : Union[str, Dict[str, Any]]
            Can be one of a literal target string, a json string describing
            a configuration, or a dictionary of configuration options.
            When using a dictionary or json string to configure target, the
            possible values are:

            kind :  str (required)
                Which codegen path to use, for example 'llvm' or 'cuda'.
            keys : List of str (optional)
                A set of strategies that can be dispatched to. When using
                "kind=opencl" for example, one could set keys to ["mali", "opencl", "gpu"].
            device : str (optional)
                A single key that corresponds to the actual device being run on.
                This will be effectively appended to the keys.
            libs : List of str (optional)
                The set of external libraries to use. For example ['cblas', 'mkl'].
            system-lib : bool (optional)
                If True, build a module that contains self registered functions.
                Useful for environments where dynamic loading like dlopen is banned.
            mcpu : str (optional)
                The specific cpu being run on. Serves only as an annotation.
            model : str (optional)
                An annotation indicating what model a workload came from.
            runtime : str (optional)
                An annotation indicating which runtime to use with a workload.
            mtriple : str (optional)
                The llvm triplet describing the target, for example "arm64-linux-android".
            mattr : List of str (optional)
                The llvm features to compile with, for example ["+avx512f", "+mmx"].
            mfloat-abi : str (optional)
                An llvm setting that is one of 'hard' or 'soft' indicating whether to use
                hardware or software floating-point operations.
            mabi : str (optional)
                An llvm setting. Generate code for the specified ABI, for example "lp64d".
            host : Union[str, Dict[str, Any]] (optional)
                Description for target host. Can be recursive. Similar to target.
        host : Optional[Union[str, Dict[str, Any]]]
            Similar to target but for target host. Can be one of a literal target host string,
            a json string describing a configuration, or a dictionary of configuration options.
            When using a dictionary or json string to configure target, the possible values are
            same as target.
        """
        if isinstance(target, (dict, str)):
            target = convert(target)
        if isinstance(host, (dict, str)):
            host = convert(host)
        if target is None or not isinstance(target, (Map, String, Target)):
            raise ValueError("target has to be a string or dictionary.")
        if host is not None:
            if not isinstance(host, (Map, String, Target)):
                raise ValueError(
                    "target host has to be a string or dictionary.")
            self.__init_handle_by_constructor__(_ffi_api.Target,
                                                Target(target), Target(host))
        else:
            self.__init_handle_by_constructor__(_ffi_api.Target, target)
Exemple #21
0
def compute(shape, fcompute, name="compute", tag="", attrs=None):
    """Construct a new tensor by computing over the shape domain.

    The compute rule is result[axis] = fcompute(axis)

    Parameters
    ----------
    shape: Tuple of Expr
        The shape of the tensor

    fcompute: lambda function of indices-> value
        Specifies the input source expression

    name: str, optional
        The name hint of the tensor

    tag: str, optional
        Additional tag information about the compute.

    attrs: dict, optional
        The additional auxiliary attributes about the compute.

    Returns
    -------
    tensor: Tensor
        The created tensor
    """
    if _tag.TagScope.get_current() is not None:
        if tag != "":
            raise ValueError("nested tag is not allowed for now")
        tag = _tag.TagScope.get_current().tag
    shape = (shape,) if isinstance(shape, _expr.PrimExpr) else shape
    # for python3
    shape = tuple([int(s) if isinstance(s, float) else s for s in shape])
    ndim = len(shape)
    code = fcompute.__code__

    out_ndim = ndim
    if code.co_argcount == 0:
        arg_names = ["i%d" % i for i in range(ndim)]
    else:
        arg_names = code.co_varnames[:code.co_argcount]
        out_ndim = code.co_argcount

    if out_ndim != len(arg_names):
        raise ValueError("fcompute do not match dimension, ndim=%d" % ndim)

    dim_var = [_IterVar((0, s), x, 0) for x, s in zip(arg_names, shape[:out_ndim])]
    body = fcompute(*[v.var for v in dim_var])

    if isinstance(body, _tensor.TensorIntrinCall):
        for i, s in enumerate(shape[out_ndim:]):
            var_name = "ax" + str(i)
            dim_var.append(_IterVar((0, s), var_name, 4))
        op_node = _api_internal._TensorComputeOp(name,
                                                 tag,
                                                 dim_var,
                                                 body.reduce_axis,
                                                 out_ndim,
                                                 body.intrin,
                                                 body.tensors,
                                                 body.regions,
                                                 body.scalar_inputs)
    else:
        if not isinstance(body, (list, tuple)):
            body = [body]
        body = convert(body)
        op_node = _api_internal._ComputeOp(
            name, tag, attrs, dim_var, body)

    num = op_node.num_outputs
    outputs = tuple(op_node.output(i) for i in range(num))
    return outputs[0] if num == 1 else outputs
Exemple #22
0
def roi_align_shape_func(attrs, inputs, _):
    return [
        _roi_align_shape_func(inputs[0], inputs[1], convert(attrs.pooled_size))
    ]
Exemple #23
0
def reshape_shape_func(attrs, inputs, out_ndims):
    newshape = get_const_tuple(attrs.newshape)
    return [
        _reshape_shape_func_input_shape(inputs[0], convert(newshape),
                                        out_ndims[0])
    ]
Exemple #24
0
def full_shape_func(attrs, inputs, out_ndims):
    """
    Shape func for zeros, zeros_like, ones, ones_like.
    """
    shape = get_const_tuple(attrs.shape)
    return [_full_shape_func(convert(shape))]
Exemple #25
0
def one_hot_shape_func(attrs, inputs, _):
    """
    Shape func for one_hot
    """
    shape_func = [_one_hot_shape_func(inputs[0], convert(attrs.depth), convert(attrs.axis))]
    return shape_func
Exemple #26
0
def compute(shape,
            fcompute,
            name="compute",
            tag="",
            attrs=None,
            varargs_names=None):
    """Construct a new tensor by computing over the shape domain.

    The compute rule is result[axis] = fcompute(axis)

    Parameters
    ----------
    shape: Tuple of Expr
        The shape of the tensor

    fcompute: lambda function of indices-> value
        Specifies the input source expression

    name: str, optional
        The name hint of the tensor

    tag: str, optional
        Additional tag information about the compute.

    attrs: dict, optional
        The additional auxiliary attributes about the compute.

    varargs_names: list, optional
        The names to use for each of the varargs. If not supplied, the varargs
        will be called i1, i2, ...

    Returns
    -------
    tensor: Tensor
        The created tensor
    """
    if _tag.TagScope.get_current() is not None:
        if tag != "":
            raise ValueError("nested tag is not allowed for now")
        tag = _tag.TagScope.get_current().tag
    shape = (shape, ) if isinstance(shape, tvm.tir.PrimExpr) else shape
    # for python3
    shape = tuple([int(s) if isinstance(s, float) else s for s in shape])
    out_ndim = len(shape)

    argspec = inspect.getfullargspec(fcompute)
    if len(argspec.args) == 0 and argspec.varargs is None:
        arg_names = ["i%d" % i for i in range(out_ndim)]
    elif argspec.varargs is not None:
        # if there is a varargs, it takes the remaining dimensions of out_ndim
        num_remaining_args = out_ndim - len(argspec.args)
        if varargs_names is not None:
            if len(varargs_names) != num_remaining_args:
                raise RuntimeError(
                    f"Number of varargs ({num_remaining_args}) does not match number"
                    f"of varargs_names ({len(varargs_names)})")
            arg_names = argspec.args + varargs_names
        else:
            arg_names = argspec.args + [
                f"i{i}" for i in range(out_ndim - len(argspec.args))
            ]
    else:
        arg_names = argspec.args
        # if there are fewer args than out dimensions, the remaining dimensions
        # are implicitly broadcast
        out_ndim = len(arg_names)
    assert argspec.varkw is None, "Variable keyword arguments not supported in fcompute"
    assert argspec.defaults is None, "Default arguments not supported in fcompute"
    assert len(argspec.kwonlyargs
               ) == 0, "Keyword arguments are not supported in fcompute"

    if out_ndim != len(arg_names):
        raise ValueError(
            "Number of args to fcompute does not match dimension, "
            "args=%d, dimension=%d" % (len(arg_names), out_ndim))

    dim_var = [
        tvm.tir.IterVar((0, s), x, 0)
        for x, s in zip(arg_names, shape[:out_ndim])
    ]
    body = fcompute(*[v.var for v in dim_var])

    if isinstance(body, _tensor.TensorIntrinCall):
        for i, s in enumerate(shape[out_ndim:]):
            var_name = "ax" + str(i)
            dim_var.append(tvm.tir.IterVar((0, s), var_name, 4))
        op_node = _ffi_api.TensorComputeOp(
            name,
            tag,
            dim_var,
            body.reduce_axis,
            out_ndim,
            body.intrin,
            body.tensors,
            body.regions,
            body.scalar_inputs,
        )
    else:
        if not isinstance(body, (list, tuple)):
            body = [body]
        body = convert(body)
        op_node = _ffi_api.ComputeOp(name, tag, attrs, dim_var, body)

    num = op_node.num_outputs
    outputs = tuple(op_node.output(i) for i in range(num))
    return outputs[0] if num == 1 else outputs
Exemple #27
0
    def access_ptr(self,
                   access_mask,
                   ptr_type="handle",
                   content_lanes=1,
                   offset=0,
                   extent=None):
        """Get an access pointer to the head of buffer.

        This is the recommended method to get buffer data
        ptress when interacting with external functions.

        Parameters
        ----------
        access_mask : int
            The access pattern MASK. Indicate whether the
            access will read or write to the data content.

        ptr_type : str, optional
            The data type of the result pointer. Do not specify
            unless we want to cast pointer to specific type.

        content_lanes: int, optional
            The number of lanes for the data type. This value
            is greater than one for vector types.

        offset: Expr, optional
            The offset of pointer. We can use it to offset by
            the number of elements from the address of ptr.

        extent: Expr, optional
            The extent of pointer.

        Examples
        --------
        .. code-block:: python

          # Get access ptr for read
          buffer.access_ptr("r")
          # Get access ptr for read/write with bitmask
          buffer.access_ptr(Buffer.READ | Buffer.WRITE)
          # Get access ptr for read/write with str flag
          buffer.access_ptr("rw")
          # Get access ptr for read with offset
          buffer.access_ptr("r", offset = 100)
          # Get access ptr for read with extent
          buffer.access_ptr("r", extent = 100)
        """
        if isinstance(access_mask, string_types):
            mask = 0
            for value in access_mask:
                if value == "r":
                    mask = mask | Buffer.READ
                elif value == "w":
                    mask = mask | Buffer.WRITE
                else:
                    raise ValueError("Unknown access_mask %s" % access_mask)
            access_mask = mask
        offset = convert(offset)
        extent = convert(extent)
        return _ffi_api.BufferAccessPtr(
            self,
            access_mask,
            ptr_type,
            content_lanes,
            offset,
            extent  # type: ignore
        )
Exemple #28
0
def dynamic_reshape_shape_func(attrs, inputs, out_ndims):
    allowzero = attrs.allowzero
    return [
        _reshape_shape_func_input_data(*inputs, out_ndims[0],
                                       convert(allowzero))
    ]