Пример #1
0
def test_custom_op_rel_infer_exception():
    """Tests infer type for custom_op"""
    def custom_log1_rel(arg_types, attrs):
        assert len(arg_types) == 2, "type relation arg number mismatch!"
        return None

    op_name = "custom_log2"
    _op.register(op_name, r"code(cal log of a tensor.)code")
    _op.get(op_name).set_num_inputs(1)
    _op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
    _op.get(op_name).set_attrs_type_key("DictAttrs")
    # call customized relation functions
    _op.get(op_name).add_type_rel("custom_log2", custom_log1_rel)
    _op.get(op_name).set_support_level(1)
    _op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
    _op.register_stateful(op_name, False)

    def clog(x):
        return relay.Call(_op.get(op_name), [x])

    tp = relay.TensorType((10, 10), "float32")
    x = relay.var("x", tp)
    sb = relay.ScopeBuilder()
    t1 = sb.let("t1", clog(x))
    t2 = sb.let("t2", relay.add(t1, x))
    sb.ret(t2)
    f = relay.Function([x], sb.get())
    with pytest.raises(tvm.error.TVMError) as cm:
        fchecked = infer_expr(f)
        assert "type relation arg number mismatch" in str(cm.execption)
Пример #2
0
def test_custom_op_rel_infer():
    """Tests infer type for custom_op"""
    def custom_log1_rel(arg_types, attrs):
        assert len(arg_types) == 1, "type relation arg number mismatch!"
        if attrs:
            assert isinstance(attrs, DictAttrs)
        inputa_type = arg_types[0]
        return relay.TensorType(inputa_type.shape, inputa_type.dtype)

    op_name = "custom_log1"
    _op.register(op_name, r"code(cal log of a tensor.)code")
    _op.get(op_name).set_num_inputs(1)
    _op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
    _op.get(op_name).set_attrs_type_key("DictAttrs")
    # call customized relation functions
    _op.get(op_name).add_type_rel("custom_log1", custom_log1_rel)
    _op.get(op_name).set_support_level(1)
    _op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
    _op.register_stateful(op_name, False)

    def clog(x):
        return relay.Call(_op.get(op_name), [x])

    tp = relay.TensorType((10, 10), "float32")
    x = relay.var("x", tp)
    sb = relay.ScopeBuilder()
    t1 = sb.let("t1", clog(x))
    t2 = sb.let("t2", relay.add(t1, x))
    sb.ret(t2)
    f = relay.Function([x], sb.get())
    fchecked = infer_expr(f)
    assert fchecked.checked_type == relay.FuncType([tp], tp)
Пример #3
0
def test_op_register():
    """Tests register_op functionality."""
    op_name = "custom_op"

    _op.register(op_name, r"code(Add two tensor with inner broadcasting.)code")
    _op.get(op_name).set_num_inputs(2)
    _op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
    _op.get(op_name).add_argument("data_1", "Tensor", "The input data tensor.")
    # call default relation functions
    _op.get(op_name).add_type_rel("Identity")
    _op.get(op_name).set_support_level(1)
    _op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
    _op.register_stateful(op_name, False)

    assert _op.get(op_name).name == op_name
    assert _op.get(op_name).num_inputs == 2
    assert _op.get(op_name).get_attr("TOpPattern") == _op.OpPattern.ELEMWISE
    assert _op.get(op_name).get_attr("TOpIsStateful") == False
Пример #4
0
def test_custom_op_infer():
    """Tests infer type for custom_op"""
    op_name = "custom_log"
    _op.register(op_name, r"code(cal log of a tensor.)code")
    _op.get(op_name).set_num_inputs(1)
    _op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
    # call default relation functions
    _op.get(op_name).add_type_rel("Identity")
    _op.get(op_name).set_support_level(1)
    _op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
    _op.register_stateful(op_name, False)

    def clog(x):
        return relay.Call(_op.get(op_name), [x])

    tp = relay.TensorType((10, 10), "float32")
    x = relay.var("x", tp)
    sb = relay.ScopeBuilder()
    t1 = sb.let("t1", clog(x))
    t2 = sb.let("t2", relay.add(t1, x))
    sb.ret(t2)
    f = relay.Function([x], sb.get())
    fchecked = infer_expr(f)
    assert fchecked.checked_type == relay.FuncType([tp], tp)
Пример #5
0
from tvm import te
from tvm import topi

from tvm.relay.op import op as reg
from tvm.relay.op import strategy as _strategy
from tvm.relay.op.op import OpPattern, OpStrategy

from .utils import is_packed_layout
from .vta_conv2d import conv2d_packed, schedule_conv2d_packed
from .vta_conv2d_transpose import conv2d_transpose_packed, schedule_conv2d_transpose_packed
from .vta_group_conv2d import group_conv2d_packed, schedule_group_conv2d_packed
from .vta_dense import dense_packed, schedule_dense_packed
from ..environment import get_env

# override to force partition at copy
reg.register_pattern("copy", OpPattern.INJECTIVE, level=15)


# add clip vta strategy
def compute_clip_vta(attrs, inputs, output_type):
    """ Clip operator. """
    x = inputs[0]
    a_min = attrs.a_min
    a_max = attrs.a_max
    const_min = tvm.tir.const(a_min, x.dtype)
    const_max = tvm.tir.const(a_max, x.dtype)
    with tvm.te.tag_scope(topi.tag.ELEMWISE):
        x = te.compute(x.shape,
                       lambda *i: tvm.te.min(x(*i), const_max),
                       name="clipA")
        x = te.compute(x.shape,
Пример #6
0
    def _bitpack(*indices):
        ret = None
        mask = tvm.const((1 << bits) - 1, pack_type)
        for k in range(lanes):
            idx = list(indices)
            idx[-1] = idx[-1] * lanes + k
            elem = data(*idx).astype(pack_type)
            if k == 0:
                ret = elem & mask
            else:
                val = (elem & mask) << tvm.const(k * bits, pack_type)
                ret = ret | val
        return ret

    return tvm.compute(oshape, _bitpack, name=name, tag='bitpack')


@register_compute("bitpack", level=15)
def compute_bitpack(attrs, inputs):
    lanes = attrs.lanes
    dtype = inputs[0].dtype
    assert dtype == "int8"
    width = 8
    assert width % lanes == 0
    bits = 8 // lanes
    return bitpack(inputs[0], bits, dtype)


register_schedule("bitpack", schedule_injective)
register_pattern("bitpack", OpPattern.INJECTIVE)
Пример #7
0
    if attrs.kind == QAnnotateKind.IDENTITY:
        return [topi.identity(data)]

    # simulate rounding error
    scaled_data = topi.divide(data, scale)
    clipped_data = topi.maximum(topi.minimum(scaled_data, clip_max), clip_min)
    round_data = topi.round(clipped_data)

    # recover data
    rdata = topi.multiply(round_data, scale)
    return [rdata]


_reg.register_injective_schedule("relay.op.annotation.simulated_quantize")
_reg.register_pattern("relay.op.annotation.simulated_quantize",
                      _reg.OpPattern.ELEMWISE)
_reg.register_injective_schedule("annotation.cast_hint")


@tvm._ffi.register_object("relay.QAnnotateExpr")
class QAnnotateExpr(_expr.TempExpr):
    """A special kind of Expr for Annotating.

    Parameters
    ---------
    expr: Expr
        the original relay ir expr.

    kind: QAnnotateKind
        the kind of annotation field.
    """
Пример #8
0

# dense
@reg.register_compute("nn.custom_dense")
def compute_custom_dense(attrs, inputs, out_type, target):
    out_dtype = inputs[0].dtype
    return [topi.nn.dense(inputs[0], inputs[1], None, out_dtype)]


@reg.register_schedule("nn.custom_dense")
def schedule_dense(attrs, outputs, target):
    with target:
        return topi.generic.schedule_dense(outputs)


reg.register_pattern("nn.custom_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)


# weight pack
@reg.register_compute("nn.dense_weight_pack")
def compute_dense_weight_pack(attrs, inputs, out_type, target):
    out_dtype = inputs[0].dtype
    return [custom_fp32_dense.dense_weight_pack(inputs[0], attrs.pack_width)]


@reg.register_schedule("nn.dense_weight_pack")
def schedule_dense_weight_pack(attrs, outputs, target):
    with target:
        return custom_fp32_dense.schedule_dense_weight_pack(outputs)