Esempio n. 1
0
 def pop_stmt(self):
     """Create a statement from the statements within current stage."""
     stmts = self.stmt_stack.pop()
     if not stmts or callable(stmts[-1]):
         stmts.append(_stmt.Evaluate(0))
     stmtwrap = lambda x: x[0] if len(x) == 1 else _stmt.SeqStmt(
         list(reversed(x)))
     ret_stmt = [stmts[-1]]
     for s in reversed(stmts[:-1]):
         if callable(s):
             ret_stmt = [s(stmtwrap(ret_stmt))]
         else:
             assert isinstance(s, _stmt.Stmt)
             ret_stmt.append(s)
     return stmtwrap(ret_stmt)
Esempio n. 2
0
File: util.py Progetto: yongwww/tvm
def make_nop():
    """Returns a 'no operation' node in HalideIR."""
    return _stmt.Evaluate(tvm.runtime.const(0, dtype='int32'))
Esempio n. 3
0
def extern(shape,
           inputs,
           fcompute,
           name="extern",
           dtype=None,
           in_buffers=None,
           out_buffers=None,
           tag="",
           attrs=None):
    """Compute several tensor via extern function.

    Parameters
    ----------
    shape: tuple or list of tuples.
        The shape of the outputs.

    inputs: list of Tensor
        The inputs

    fcompute: lambda function of inputs, outputs-> stmt
        Specifies the IR statement to do the computation.
        See the following note for function signature of fcompute

        .. note::
             **Parameters**

             - **ins** (list of :any:`Buffer`) - Placeholder for each inputs
             - **outs** (list of :any:`Buffer`) - Placeholder for each outputs

             **Returns**

             - **stmt** (:any:`Stmt`) - The statement that carries out array computation.

    name: str, optional
        The name hint of the tensor

    dtype: str or list of str, optional
        The data types of outputs,
        by default dtype will be same as inputs.

    in_buffers: Buffer or list of Buffer, optional
        Input buffers.

    out_buffers: Buffer or list of Buffers, optional
        Output buffers.


    tag: str, optional
        Additonal tag information about the compute.

    attrs: dict, optional
        The additional auxiliary attributes about the compute.

    Returns
    -------
    tensor: Tensor or list of Tensors
        The created tensor or tuple of tensors it it contains multiple outputs.

    Example
    -------
    In the code below, C is generated by calling external PackedFunc
    `tvm.contrib.cblas.matmul`

    .. code-block:: python

        A = tvm.placeholder((n, l), name='A')
        B = tvm.placeholder((l, m), name='B')
        C = tvm.extern((n, m), [A, B],
                       lambda ins, outs: tvm.call_packed(
                          "tvm.contrib.cblas.matmul",
                            ins[0], ins[1], outs[0], 0, 0), name="C")
    """
    if _tag.TagScope.get_current() is not None:
        if tag != "":
            raise ValueError("nested tag is not allowed for now")
        tag = _tag.TagScope.get_current().tag
    shape = (shape, ) if isinstance(shape,
                                    (_expr.PrimExpr, _Integral)) else shape
    if shape == () or isinstance(shape[0], (_expr.PrimExpr, _Integral)):
        shape = [shape]
    if in_buffers is not None:
        in_buffers = [in_buffers
                      ] if not isinstance(in_buffers, list) else in_buffers
        if len(inputs) != len(in_buffers):
            raise RuntimeError(
                "Number of inputs and in_buffers mismatch: %d vs %d." %
                (len(inputs), len(in_buffers)))
    if out_buffers is not None:
        out_buffers = [out_buffers
                       ] if not isinstance(out_buffers, list) else out_buffers
        if len(shape) != len(out_buffers):
            raise RuntimeError(
                "Number of outputs and out_buffers mismatch: %d vs %d." %
                (len(shape), len(out_buffers)))
    input_placeholders = in_buffers or []
    output_placeholders = out_buffers or []
    types = set()
    for t in inputs:
        if not isinstance(t, _tensor.Tensor):
            raise ValueError("expect inputs to be tensor")
        if in_buffers is None:
            input_placeholders.append(decl_buffer(t.shape, t.dtype, t.op.name))
        types.add(t.dtype)

    if dtype is None:
        if len(types) != 1:
            raise ValueError(
                "Cannot infer output type, please provide dtype argument")
        infered_type = types.pop()
        dtype = [infered_type for _ in shape]
    if isinstance(dtype, str):
        dtype = [dtype]

    if out_buffers is None:
        for shp, dt in zip(shape, dtype):
            output_placeholders.append(decl_buffer(shp, dt, name))
    body = fcompute(input_placeholders, output_placeholders)
    if isinstance(body, _expr.PrimExpr):
        body = _stmt.Evaluate(body)

    op = _api_internal._ExternOp(name, tag, attrs, inputs, input_placeholders,
                                 output_placeholders, body)
    res = [op.output(i) for i in range(len(output_placeholders))]
    return res[0] if len(res) == 1 else res
Esempio n. 4
0
def decl_tensor_intrin(op,
                       fcompute,
                       name="tensor_intrin",
                       binds=None,
                       scalar_params=None):
    """Declare a tensor intrinsic function.

    Parameters
    ----------
    op: Operation
        The symbolic description of the intrinsic operation

    fcompute: lambda function of inputs, outputs-> stmt
        Specifies the IR statement to do the computation.
        See the following note for function signature of fcompute

        .. note::
             **Parameters**

             - **ins** (list of :any:`Buffer`) - Placeholder for each inputs
             - **outs** (list of :any:`Buffer`) - Placeholder for each outputs

             **Returns**

             - **stmt** (:any:`Stmt`, or tuple of three stmts)
             - If a single stmt is returned, it represents the body
             - If tuple of three stmts are returned they corresponds to body,
               reduce_init, reduce_update

    name: str, optional
        The name of the intrinsic.

    binds: dict of :any:`Tensor` to :any:`Buffer`, optional
        Dictionary that maps the Tensor to Buffer which specified the data layout
        requirement of the function. By default, a new compact buffer is created
        for each tensor in the argument.

    scalar_params: a list of variables used by op, whose values will be passed
                   as scalar_inputs when the tensor intrinsic is called.

    Returns
    -------
    intrin: TensorIntrin
        A TensorIntrin that can be used in tensorize schedule.
    """
    if not isinstance(op, _tensor.Operation):
        raise TypeError("expect Operation")
    inputs = op.input_tensors
    binds = binds if binds else {}
    tensors = list(inputs)
    for i in range(op.num_outputs):
        tensors.append(op.output(i))

    binds_list = []
    for t in inputs:
        if not isinstance(t.op, _tensor.PlaceholderOp):
            raise ValueError("Do not yet support composition op")

    cfg = current_build_config()
    for t in tensors:
        buf = (binds[t] if t in binds else _api.decl_buffer(
            t.shape,
            t.dtype,
            t.op.name,
            data_alignment=cfg.data_alignment,
            offset_factor=cfg.offset_factor))
        binds_list.append(buf)

    if scalar_params:
        body = fcompute(binds_list[:len(inputs)], binds_list[len(inputs):],
                        scalar_params)
    else:
        body = fcompute(binds_list[:len(inputs)], binds_list[len(inputs):])
        scalar_params = []
    if isinstance(body, (_expr.PrimExpr, _stmt.Stmt)):
        body = [body]
    body = [
        _stmt.Evaluate(x) if isinstance(x, _expr.PrimExpr) else x for x in body
    ]
    if len(body) < 3:
        body += [None] * (3 - len(body))
    return _api_internal._TensorIntrin(name, op, inputs, binds_list,
                                       scalar_params, *body)