Example #1
0
def test_recursion():
    """
    Program:
       def @f(%n: int32, %data: float32) -> float32 {
          if (%n == 0) {
              %data
          } else {
              @f(%n - 1, log(%data))
          }
       }
    """
    sb = relay.ScopeBuilder()
    f = relay.GlobalVar("f")
    ti32 = relay.scalar_type("int32")
    tf32 = relay.scalar_type("float32")
    n = relay.var("n", ti32)
    data = relay.var("data", tf32)

    with sb.if_scope(relay.equal(n, relay.const(0, ti32))):
        sb.ret(data)
    with sb.else_scope():
        sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data)))
    mod = relay.Module()
    mod[f] = relay.Function([n, data], sb.get())
    assert "@f(%1, %2) /* ty=float32 */" in mod.astext()
    assert mod[f].checked_type == relay.FuncType([ti32, tf32], tf32)
Example #2
0
 def expected():    
     add = relay.add(a, b)
     mul = relay.multiply(c, d)
     copy_mul_sub = relay.device_copy(mul, cpu_ctx, dev_ctx)
     sub = relay.subtract(add, copy_mul_sub)
     func = relay.Function([a, b, c, d], sub)
     return func
Example #3
0
 def annotated():
     add = relay.add(x, y)
     sub = relay.subtract(add, z)
     func = relay.Function([x, y, z], sub)
     func = relay.ir_pass.infer_type(func)
     func = relay.ir_pass.rewrite_annotated_ops(func,
                                                ctx1.device_type)
     return func
Example #4
0
    def get_func():
        add = relay.add(x, y)
        sqrt = relay.sqrt(add)
        log = relay.log(add)
        subtract = relay.subtract(sqrt, log)
        exp = relay.exp(subtract)

        func = relay.Function([x, y], exp)
        return func
Example #5
0
        def expected():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            copy_sub_exp = relay.device_copy(subtract, dev_ctx, cpu_ctx)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
Example #6
0
    def annotated():
        add = relay.add(x, y)
        _add = relay.annotation.on_device(add, ctx2)
        sub = relay.subtract(add, z)
        _sub = relay.annotation.on_device(sub, ctx2)

        func = relay.Function([x, y, z],
                              relay.Tuple(tvm.convert([_add, _sub,
                                                       sub])))
        func = relay.ir_pass.infer_type(func)
        func = relay.ir_pass.rewrite_annotated_ops(func,
                                                   ctx1.device_type)
        func = relay.ir_pass.infer_type(func)
        return relay.Function(relay.ir_pass.free_vars(func.body[2]),
                              func.body[2])
def test_simple_loop():
    mod = relay.module.Module({})
    sum_up = relay.GlobalVar('sum_up')
    i = relay.var('i', shape=[], dtype='int32')
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
        sb.ret(i)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, dtype='int32'))
        rec_call = relay.Call(sum_up, [one_less])
        sb.ret(relay.add(rec_call, i))
    func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
    mod[sum_up] = func
    i_data = np.array(10, dtype='int32')
    check_eval(sum_up, [i_data], sum(range(1, 11)), mod=mod)
Example #8
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_ctx)

            func = relay.Function([x, y],
                                  relay.Tuple(tvm.convert([_exp, exp])))
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       dev_ctx.device_type)
            func = relay.ir_pass.infer_type(func)
            return relay.Function(relay.ir_pass.free_vars(func.body[1]),
                                  func.body[1])
Example #9
0
 def annotated():    
     add = relay.add(a, b)
     _add = relay.annotation.on_device(add, dev_ctx)
     mul = relay.multiply(c, d)
     _mul = relay.annotation.on_device(mul, cpu_ctx)
     sub = relay.subtract(add, mul)
     _sub = relay.annotation.on_device(sub, dev_ctx)
     func = relay.Function([a, b, c, d],
                           relay.Tuple(tvm.convert([_add, _mul,
                                                    _sub, sub])))
     func = relay.ir_pass.infer_type(func)
     func = relay.ir_pass.rewrite_annotated_ops(func,
                                                dev_ctx.device_type)
     func = relay.ir_pass.infer_type(func)
     return relay.Function(relay.ir_pass.free_vars(func.body[3]),
                           func.body[3])
def test_loop():
    mod = relay.module.Module({})
    sum_up = relay.GlobalVar('sum_up')
    i = relay.var('i', shape=[], dtype='int32')
    accum = relay.var('accum', shape=[], dtype='int32')
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0))):
        sb.ret(accum)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1))
        new_accum = relay.add(accum, i)
        sb.ret(relay.Call(sum_up, [one_less, new_accum]))
    func = relay.Function([i, accum], sb.get())
    mod[sum_up] = func
    i_data = np.array(10, dtype='int32')
    accum_data = np.array(0, dtype='int32')
    check_eval(sum_up, [i_data, accum_data], sum(range(1, 11)), mod=mod)
Example #11
0
def test_count_loop():
    mod = relay.module.Module({})
    sum_up = relay.GlobalVar('sum_up')
    i = relay.var('i', shape=[], dtype='int32')
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
        sb.ret(i)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, dtype='int32'))
        rec_call = relay.Call(sum_up, [one_less])
        sb.ret(relay.add(rec_call, i))
    func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
    mod[sum_up] = func
    i_data = np.array(0, dtype='int32')
    iarg = relay.var('i', shape=[], dtype='int32')
    mod[mod.entry_func] = relay.Function([iarg], sum_up(iarg))
    result = veval(mod, i_data)
    tvm.testing.assert_allclose(result.asnumpy(), i_data)
Example #12
0
def test_let_if_scope():
    x = relay.var("x", "float32")
    y = relay.var("y", "float32")
    cond = relay.var("cond", "bool")

    sb = relay.ScopeBuilder()
    with sb.if_scope(cond):
        v1 = sb.let("v", relay.const(1, "float32"))
        v2 = sb.let("v", x)
        sb.ret(relay.subtract(v1, v2))
    with sb.else_scope():
        v3 = relay.var("v")
        let2 = relay.Let(v3, y, v3)
        sb.ret(relay.add(let2, let2))
    result = sb.get()

    f = relay.Function([x, y, cond], result)
    text = f.astext()
    assert text.count("{") == 4
    assert "%cond: bool" in text
    show(f.astext())
 def before(x, conv_weight, in_bias, in_scale, channels):
     args = [x, conv_weight, in_bias, in_scale]
     x = relay.multiply(in_scale, x)
     x = relay.nn.relu(x)
     x = relay.subtract(x, in_bias)
     y1 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NHWC",
                          weight_layout="HWIO",
                          groups=channels,
                          padding=(1, 1))
     y2 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NHWC",
                          weight_layout="HWIO",
                          groups=channels,
                          padding=(1, 1))
     z = relay.add(y1, y2)
     return relay.Function(args, z)
Example #14
0
def test_sum_loop():
    mod = relay.module.Module({})
    sum_up = relay.GlobalVar('sum_up')
    i = relay.var('i', shape=[], dtype='int32')
    accum = relay.var('accum', shape=[], dtype='int32')
    sb = ScopeBuilder()
    with sb.if_scope(relay.equal(i, relay.const(0, 'int32'))):
        sb.ret(accum)
    with sb.else_scope():
        one_less = relay.subtract(i, relay.const(1, 'int32'))
        new_accum = relay.add(accum, i)
        sb.ret(relay.Call(sum_up, [one_less, new_accum]))
    func = relay.Function([i, accum], sb.get())
    mod[sum_up] = func
    loop_bound = 0
    i_data = np.array(loop_bound, dtype='int32')
    accum_data = np.array(0, dtype='int32')
    iarg = relay.var('i', shape=[], dtype='int32')
    aarg = relay.var('accum', shape=[], dtype='int32')
    mod[mod.entry_func] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
    result = veval(mod, i_data, accum_data)
    tvm.testing.assert_allclose(result.asnumpy(), sum(range(1, loop_bound + 1)))
Example #15
0
 def expected(x, conv_weight, in_bias, in_scale, channels):
     args = [x, conv_weight, in_bias]
     x = relay.nn.relu(x)
     in_bias = relay.divide(in_bias, in_scale)
     x = relay.subtract(x, in_bias)
     y1 = relay.nn.conv2d(x,
                          relay.multiply(conv_weight, in_scale),
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NHWC",
                          kernel_layout="HWIO",
                          groups=channels,
                          padding=(1, 1))
     y2 = relay.nn.conv2d(x,
                          relay.multiply(conv_weight, in_scale),
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NHWC",
                          kernel_layout="HWIO",
                          groups=channels,
                          padding=(1, 1))
     z = relay.add(y1, y2)
     return relay.Function(args, z)
Example #16
0
 def get_ref_sub():
     ref_sub = relay.Function([x, y], relay.subtract(relay.add(x, x), relay.add(y, y)))
     return ref_sub
    def _execute(self):
        self.node_dict = {}
        # self.node_dict['1'] = relay.const(np.zeros((1, 128)), dtype='int32')
        gelu_a = relay.var('gelu_a', shape=())
        gelu_b = relay.var('gelu_b', shape=())
        gelu_c = relay.var('gelu_c', shape=())
        gelu_d = relay.var('gelu_d', shape=())
        gelu_e = relay.var('gelu_e', shape=())

        self.node_dict['1'] = relay.var('input.1', shape=(1,128), dtype='int32')
        self.node_dict['2'] = relay.var('input.2', shape=(1,128), dtype='int32')
        for gnode in self.graph:
            name = gnode['name']
            op_type = gnode['op_type']
            attrs = gnode['attrs']
            del attrs['A_shape']
            del attrs['O_shape']

            inputs = gnode['inputs']

            if op_type == 'Const':
                arr = np.zeros(attrs['shape'], dtype=np.int32)
                y =  relay.const(arr, dtype='int32')

            elif op_type == 'expand_dims':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.expand_dims(x, attrs['axis'], attrs['num_newaxis'])

            elif op_type == 'reshape':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.reshape(x, attrs['newshape'])
            elif op_type == 'take':
                data = get_input(self.node_dict, self.params, inputs[0])
                indices = get_input(self.node_dict, self.params, inputs[1])
                y = relay.take(data, indices, axis=attrs['axis'][0], mode=attrs['mode'])
            elif op_type == 'one_hot':
                x = get_input(self.node_dict, self.params, inputs[0])
                cc1 = get_input(self.node_dict, self.params, inputs[1])
                cc2 = get_input(self.node_dict, self.params, inputs[2])
                y = relay.one_hot(x, cc1, cc2, **attrs)
            elif op_type == 'strided_slice':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.strided_slice(x, **attrs)
            elif op_type == 'mean':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.mean(x, axis=attrs['axis'],
                        exclude=attrs['exclude'],
                        keepdims=attrs['keepdims'])
            elif op_type == 'nn.dense':
                x = get_input(self.node_dict, self.params, inputs[0])
                weight = get_input(self.node_dict, self.params, inputs[1])
                y = relay.nn.dense(x, weight, units=attrs['units'][0])
            elif op_type == 'add':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.add(x1, x2)
            elif op_type == 'subtract':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.subtract(x1, x2)
            elif op_type == 'multiply':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.multiply(x1, x2)
            elif op_type == 'power':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.power(x1, x2)
            elif op_type == 'transpose':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.transpose(x, **attrs)
            elif op_type == 'tanh':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.tanh(x)
            elif op_type == 'squeeze':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.squeeze(x, **attrs)
            elif op_type == 'nn.batch_matmul':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.nn.batch_matmul(x1, x2)
            elif op_type == 'nn.softmax':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.nn.softmax(x, **attrs)
            elif op_type == 'gelu':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = x * gelu_a * (gelu_b + relay.tanh(
                           ( gelu_c * (x + gelu_d *
                                   relay.power(x, gelu_e)))))
            else:
                import pdb; pdb.set_trace()
                print( 'not supported op %s ' % op_type)
            self.node_dict[name] = y

        output_name = self.output_node_ids[0]
        output = self.node_dict[output_name]

        inputs = relay.analysis.free_vars(output)
        # inputs = [self.node_dict['1'], self.node_dict['2']]
        func = relay.Function(inputs, output)
        mod = tvm.IRModule()
        mod['main'] = func

        with relay.build_config(opt_level=0):
            graph, lib, params = relay.build(mod, 'llvm', params={})
        self.m = graph_runtime.create(graph, lib, tvm.cpu())
Example #18
0
def _conv2d_legalize(attrs, inputs, arg_types):
    """Legalizes Conv2D op.

    Parameters
    ----------
    attrs : tvm.ir.Attrs
        Attributes of current convolution
    inputs : list of tvm.relay.Expr
        The args of the Relay expr to be legalized
    types : list of types
        List of input and output types

    Returns
    -------
    result : tvm.relay.Expr
        The legalized expr
    """

    # Dilation not supported yet. Return None if dilation is not (1, 1)
    dilation = attrs.get_int_tuple("dilation")
    if not (dilation[0] == 1 and dilation[1] == 1):
        return None

    # No legalization for depthwise convolutions yet.
    groups = attrs.get_int("groups")
    if groups != 1:
        return None

    # Collect the input tensors.
    data_tensor, kernel_tensor = arg_types[0], arg_types[1]
    data_dtype = data_tensor.dtype
    kernel_dtype = kernel_tensor.dtype

    # Collect the output tensor.
    output_tensor = arg_types[2]

    # Collect the input exprs.
    data, kernel = inputs

    # Get the conv attrs
    new_attrs = {k: attrs[k] for k in attrs.keys()}

    is_int8_inputs = False
    # If both the inputs are int8, we can add 128 to make the input dtype uint8, and then adjust the
    # output. This will help picking up Intel VNNI instructions.
    # Original --> C = A (conv) B
    # A and B are int8
    #   C = (A + 128 - 128) (conv) B
    #   C = (A' conv B) - 128 (conv) B
    # where A' = A + 128
    # and 128 (conv) B is basically a reduce on CRS axis for weights.
    if data_tensor.dtype == "int8" and kernel_tensor.dtype == "int8":
        is_int8_inputs = True
        padding = attrs.get_int_tuple("padding")
        kh, kw = attrs.get_int_tuple("kernel_size")
        pt, pl, pb, pr = get_pad_tuple(padding, (kh, kw))

        if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
            adjust_shift = relay.sum(relay.cast(kernel, dtype="int32"),
                                     axis=(0, 1, 2))
            pad_width = ((0, 0), (pt, pb), (pl, pr), (0, 0))
        elif attrs["data_layout"] == "NCHW" and attrs[
                "kernel_layout"] == "OIHW":
            pad_width = ((0, 0), (0, 0), (pt, pb), (pl, pr))
            adjust_shift = relay.sum(relay.cast(kernel, dtype="int32"),
                                     axis=(1, 2, 3))
            adjust_shift = relay.expand_dims(adjust_shift,
                                             axis=1,
                                             num_newaxis=2)
        else:
            return None

        data = relay.cast(data, "int32")
        data = relay.add(data, relay.const(128, "int32"))
        data = relay.cast(data, "uint8")

        # Do external padding as pad value has to be 128.
        if any(padding):
            data = relay.nn.pad(data, pad_width=pad_width, pad_value=128)
        new_attrs["padding"] = (0, 0)

        # The data type is now shifted to uint8
        data_dtype = "uint8"

        # Multiply 128 to adjust shift.
        adjust_shift = relay.multiply(adjust_shift, relay.const(128, "int32"))

    # Legalize if the datatypes are suitable for fast Int8 instructions.  Int8 instructions require
    # input channel to be a multiple of 4 and output channels to be a multiple of 16. For input
    # channels, we pad both the inputs and weights input channels. For output channels, we pad the
    # weight and stride_slice the output.
    if is_int8_hw_support(data_dtype, kernel_dtype):
        # Flags to remember if the expr is modified
        ic_modified = False
        oc_modified = False

        # Find the value of input and output channel.
        in_channel = -1
        out_channel = -1
        if attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
            in_channel = data_tensor.shape[3].value
            out_channel = kernel_tensor.shape[3].value
        elif attrs["data_layout"] == "NCHW" and attrs[
                "kernel_layout"] == "OIHW":
            in_channel = data_tensor.shape[1].value
            out_channel = kernel_tensor.shape[0].value
        else:
            return None

        if in_channel % 4 != 0:
            new_in_channel = ((in_channel + 4) // 4) * 4
            diff = new_in_channel - in_channel
            if attrs["data_layout"] == "NHWC" and attrs[
                    "kernel_layout"] == "HWIO":
                data = relay.nn.pad(data,
                                    pad_width=((0, 0), (0, 0), (0, 0), (0,
                                                                        diff)))
                kernel = relay.nn.pad(kernel,
                                      pad_width=((0, 0), (0, 0), (0, diff),
                                                 (0, 0)))
                ic_modified = True
            elif attrs["data_layout"] == "NCHW" and attrs[
                    "kernel_layout"] == "OIHW":
                pad_width = ((0, 0), (0, diff), (0, 0), (0, 0))
                data = relay.nn.pad(data, pad_width=pad_width)
                kernel = relay.nn.pad(kernel, pad_width=pad_width)
                ic_modified = True
            else:
                return None

        new_out_channel = out_channel
        if out_channel % 16 != 0:
            new_out_channel = ((out_channel + 16) // 16) * 16
            diff = new_out_channel - out_channel
            if attrs["data_layout"] == "NHWC" and attrs[
                    "kernel_layout"] == "HWIO":
                kernel = relay.nn.pad(kernel,
                                      pad_width=((0, 0), (0, 0), (0, 0),
                                                 (0, diff)))
                oc_modified = True
            elif attrs["data_layout"] == "NCHW" and attrs[
                    "kernel_layout"] == "OIHW":
                kernel = relay.nn.pad(kernel,
                                      pad_width=((0, diff), (0, 0), (0, 0),
                                                 (0, 0)))
                oc_modified = True
            else:
                return None

        if oc_modified:
            new_attrs["channels"] = new_out_channel
            out = tvm.relay.nn.conv2d(data, kernel, **new_attrs)
            original_out_shape = [x.value for x in output_tensor.shape]
            out = relay.strided_slice(out,
                                      begin=[0, 0, 0, 0],
                                      end=original_out_shape)
        else:
            out = relay.nn.conv2d(data, kernel, **new_attrs)

        if is_int8_inputs:
            out = relay.subtract(out, adjust_shift)

        return out
    return None
Example #19
0
def test_subtract():
    i = relay.var('i', shape=[], dtype='int32')
    sub = relay.subtract(i, relay.const(1, dtype='int32'))
    func = relay.Function([i], sub, ret_type=relay.TensorType([], 'int32'))
    i_data = np.array(1, dtype='int32')
    check_eval(func, [i_data], 0)
Example #20
0
def tensor_add():
    x = relay.var("x", relay.TensorType((2, 4), "int32"))
    y = relay.var("y", relay.TensorType((2, 4), "int32"))
    return relay.Function([x, y], relay.subtract(x, y))
Example #21
0
def test_byoc_microtvm(workspace_dir, board, west_cmd, microtvm_debug, use_fvp):
    """This is a simple test case to check BYOC capabilities of microTVM"""
    model = test_utils.ZEPHYR_BOARDS[board]
    build_config = {"debug": microtvm_debug}
    x = relay.var("x", shape=(10, 10))
    w0 = relay.var("w0", shape=(10, 10))
    w1 = relay.var("w1", shape=(10, 10))
    w2 = relay.var("w2", shape=(10, 10))
    w3 = relay.var("w3", shape=(10, 10))
    w4 = relay.var("w4", shape=(10, 10))
    w5 = relay.var("w5", shape=(10, 10))
    w6 = relay.var("w6", shape=(10, 10))
    w7 = relay.var("w7", shape=(10, 10))

    # C compiler
    z0 = relay.add(x, w0)
    p0 = relay.subtract(z0, w1)
    q0 = relay.multiply(p0, w2)

    z1 = relay.add(x, w3)
    p1 = relay.subtract(z1, w4)
    q1 = relay.multiply(p1, w5)

    # Other parts on TVM
    z2 = relay.add(x, w6)
    q2 = relay.subtract(z2, w7)

    r = relay.concatenate((q0, q1, q2), axis=0)
    f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
    mod = tvm.IRModule()
    ann = byoc.CcompilerAnnotator()
    mod["main"] = ann.visit(f)
    mod = tvm.relay.transform.PartitionGraph()(mod)
    mod = tvm.relay.transform.InferType()(mod)

    x_data = np.random.rand(10, 10).astype("float32")
    w_data = []
    for _ in range(8):
        w_data.append(np.random.rand(10, 10).astype("float32"))

    map_inputs = {"w{}".format(i): w_data[i] for i in range(8)}
    map_inputs["x"] = x_data
    check_result(
        temp_dir=workspace_dir,
        relay_mod=mod,
        map_inputs=map_inputs,
        out_shape=(30, 10),
        result=np.concatenate(
            (
                ((x_data + w_data[0]) - w_data[1]) * w_data[2],
                ((x_data + w_data[3]) - w_data[4]) * w_data[5],
                x_data + w_data[6] - w_data[7],
            ),
            axis=0,
        ),
        model=model,
        zephyr_board=board,
        west_cmd=west_cmd,
        build_config=build_config,
        use_fvp=use_fvp,
    )
Example #22
0
def test_byoc_microtvm_multiple_subgraphs(merge_compiler_regions):
    """This is a test case to check BYOC capabilities of AOT with multiple sub graphs"""
    use_unpacked_api = False
    interface_api = "packed"
    test_runner = AOT_DEFAULT_RUNNER

    input_x = relay.var("x", shape=(10, 10))
    input_w0 = relay.var("w0", shape=(10, 10))
    input_w1 = relay.var("w1", shape=(10, 10))
    input_w2 = relay.var("w2", shape=(10, 10))
    input_w3 = relay.var("w3", shape=(10, 10))
    input_w4 = relay.var("w4", shape=(10, 10))
    input_w5 = relay.var("w5", shape=(10, 10))
    input_w6 = relay.var("w6", shape=(10, 10))
    input_w7 = relay.var("w7", shape=(10, 10))

    # C compiler
    ccompiler_add_1 = relay.add(input_x, input_w0)
    ccompiler_sub_1 = relay.subtract(ccompiler_add_1, input_w1)
    ccompiler_mul_1 = relay.multiply(ccompiler_sub_1, input_w2)

    ccompiler_add_2 = relay.add(input_x, input_w3)
    ccompiler_sub_2 = relay.subtract(ccompiler_add_2, input_w4)
    ccompiler_mul_2 = relay.multiply(ccompiler_sub_2, input_w5)

    # Other parts on TVM
    tvm_add = relay.add(input_x, input_w6)
    tvm_sub = relay.subtract(tvm_add, input_w7)

    concat_outputs = relay.concatenate(
        (ccompiler_mul_1, ccompiler_mul_2, tvm_sub), axis=0)
    relay_func = relay.Function(
        [
            input_x, input_w0, input_w1, input_w2, input_w3, input_w4,
            input_w5, input_w6, input_w7
        ],
        concat_outputs,
    )
    mod = tvm.IRModule()
    ann = byoc.CcompilerAnnotator()
    mod["main"] = ann.visit(relay_func)

    if merge_compiler_regions:
        mod = transform.MergeCompilerRegions()(mod)

    mod = tvm.relay.transform.PartitionGraph("mod_name")(mod)
    mod = tvm.relay.transform.InferType()(mod)

    x_data = np.random.rand(10, 10).astype("float32")
    w_data = []
    for _ in range(8):
        w_data.append(np.random.rand(10, 10).astype("float32"))

    map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i])
                                                for i in range(8)])
    output_list = generate_ref_data(mod, map_inputs)
    input_list = [map_inputs["x"]]
    input_list.extend([map_inputs["w{}".format(i)] for i in range(8)])
    compile_and_run(
        AOTTestModel(name="my_mod",
                     module=mod,
                     inputs=map_inputs,
                     outputs=output_list),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
Example #23
0
 def expected():
     add = relay.add(x, y)
     copy_add_sub = relay.device_copy(add, ctx2, ctx1)
     sub = relay.subtract(copy_add_sub, z)
     func = relay.Function([x, y, z], sub)
     return func
Example #24
0
def test_sequential_pass():
    shape = (10, )
    dtype = 'float32'
    tp = relay.TensorType(shape, dtype)
    x = relay.var("x", tp)
    y = relay.var("y", tp)
    v_sub = relay.GlobalVar("mySub")
    sub = relay.Function([x, y], relay.subtract(x, y))

    z = relay.var("z", tp)
    v_log = relay.GlobalVar("myLog")
    log = relay.Function([z], relay.log(z))

    mod = relay.Module({v_sub: sub, v_log: log})

    def get_ref_log():
        ref_log = relay.Function([x], relay.log(relay.add(x, x)))
        return ref_log

    def get_ref_sub():
        ref_sub = relay.Function([x, y],
                                 relay.subtract(
                                     relay.add(x, x), relay.add(y, y)))
        return ref_sub

    def get_ref_abs():
        shape = (5, 10)
        tp = relay.TensorType(shape, "float32")
        a = relay.var("a", tp)
        ref_abs = relay.Function([a], relay.abs(relay.add(a, a)))
        return ref_abs

    # Register a module pass.
    opt_tester = OptTester(mod)
    pass_ctx = None

    @ir_pass.module_pass(opt_level=1)
    def mod_transform(expr, ctx):
        return opt_tester.transform(expr, ctx)

    module_pass = mod_transform

    # Register a function pass.
    @ir_pass.function_pass(opt_level=1)
    def func_transform(expr, ctx):
        return opt_tester.transform(expr, ctx)

    function_pass = func_transform

    def test_pass_registration():
        passes = [module_pass, function_pass]
        opt_level = 2
        pass_name = "sequential_pass"
        sequential_pass = ir_pass.sequential_pass(passes=passes,
                                                  opt_level=opt_level)
        assert isinstance(sequential_pass, ir_pass.SequentialPass)
        pass_info = sequential_pass.info
        assert pass_info.name == pass_name
        assert pass_info.opt_level == opt_level

    def test_no_pass():
        passes = []
        sequential_pass = ir_pass.sequential_pass(opt_level=1, passes=passes)
        ret_mod = sequential_pass(mod)
        mod_func = ret_mod[v_sub]
        check_func(sub, mod_func)

    def test_only_module_pass():
        passes = [module_pass]
        sequential_pass = ir_pass.sequential_pass(opt_level=1, passes=passes)
        ret_mod = sequential_pass(mod)
        # Check the subtract function.
        sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, sub)

        # Check the abs function is added.
        abs_var, abs_func = get_var_func()
        abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
        check_func(new_abs, abs_func)

    def test_only_function_pass():
        # Check the subtract function.
        passes = [function_pass]
        sequential_pass = ir_pass.sequential_pass(opt_level=1, passes=passes)
        ret_mod = sequential_pass(mod)
        _, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, get_ref_sub())

        # Check the log function.
        log_var, new_log = extract_var_func(ret_mod, v_log.name_hint)
        check_func(new_log, get_ref_log())

    def test_multiple_passes():
        # Reset the current module since mod has been polluted by the previous
        # function pass.
        mod = relay.Module({v_sub: sub, v_log: log})
        passes = [module_pass, function_pass]
        sequential_pass = ir_pass.sequential_pass(opt_level=1, passes=passes)
        ret_mod = sequential_pass(mod)

        # Check the abs function is added.
        abs_var, abs_func = get_var_func()
        abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
        check_func(new_abs, get_ref_abs())

        # Check the subtract function is modified correctly.
        _, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, get_ref_sub())

        # Check the log function is modified correctly.
        _, new_log = extract_var_func(ret_mod, v_log.name_hint)
        check_func(new_log, get_ref_log())

        # Execute the updated subtract function.
        x_nd = get_rand(shape, dtype)
        y_nd = get_rand(shape, dtype)
        ref_res = np.subtract(x_nd.asnumpy() * 2, y_nd.asnumpy() * 2)
        for target, ctx in ctx_list():
            exe1 = relay.create_executor("graph", ctx=ctx, target=target)
            exe2 = relay.create_executor("debug", ctx=ctx, target=target)
            res1 = exe1.evaluate(new_sub)(x_nd, y_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_sub)(x_nd, y_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

        # Execute the updated abs function.
        x_nd = get_rand((5, 10), dtype)
        ref_res = np.abs(x_nd.asnumpy() * 2)
        for target, ctx in ctx_list():
            exe1 = relay.create_executor("graph", ctx=ctx, target=target)
            exe2 = relay.create_executor("debug", ctx=ctx, target=target)
            res1 = exe1.evaluate(new_abs)(x_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_abs)(x_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

    test_pass_registration()
    test_no_pass()
    test_only_module_pass()
    test_only_function_pass()
    test_multiple_passes()
def test_multi_node_subgraph(check_result):
    x = relay.var("x", shape=(10, 10))
    w0 = relay.var("w0", shape=(10, 10))
    w1 = relay.var("w1", shape=(10, 10))
    w2 = relay.var("w2", shape=(10, 10))
    w3 = relay.var("w3", shape=(10, 10))
    w4 = relay.var("w4", shape=(10, 10))
    w5 = relay.var("w5", shape=(10, 10))
    w6 = relay.var("w6", shape=(10, 10))
    w7 = relay.var("w7", shape=(10, 10))

    # subgraph0
    x0 = relay.var("x0", shape=(10, 10))
    w00 = relay.var("w00", shape=(10, 10))
    w01 = relay.var("w01", shape=(10, 10))
    w02 = relay.var("w02", shape=(10, 10))
    z00 = relay.add(x0, w00)
    p00 = relay.subtract(z00, w01)
    q00 = relay.multiply(p00, w02)
    subgraph0 = relay.Function([x0, w00, w01, w02], q00)
    subgraph0 = set_external_func_attr(subgraph0, "ccompiler", "ccompiler_0")
    call0 = relay.Call(subgraph0, [x, w0, w1, w2])

    # subgraph1
    x1 = relay.var("x1", shape=(10, 10))
    w10 = relay.var("w10", shape=(10, 10))
    w11 = relay.var("w11", shape=(10, 10))
    w12 = relay.var("w12", shape=(10, 10))
    z10 = relay.add(x1, w10)
    p10 = relay.subtract(z10, w11)
    q10 = relay.multiply(p10, w12)
    subgraph1 = relay.Function([x1, w10, w11, w12], q10)
    subgraph1 = set_external_func_attr(subgraph1, "ccompiler", "ccompiler_1")
    call1 = relay.Call(subgraph1, [x, w3, w4, w5])

    # Other parts on TVM
    z2 = relay.add(x, w6)
    q2 = relay.subtract(z2, w7)

    r = relay.concatenate((call0, call1, q2), axis=0)
    f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
    mod = tvm.IRModule()
    mod["main"] = f
    mod = relay.transform.InferType()(mod)

    x_data = np.random.rand(10, 10).astype("float32")
    w_data = []
    for _ in range(8):
        w_data.append(np.random.rand(10, 10).astype("float32"))

    map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i])
                                                for i in range(8)])
    check_result(
        mod,
        map_inputs,
        (30, 10),
        np.concatenate(
            (
                ((x_data + w_data[0]) - w_data[1]) * w_data[2],
                ((x_data + w_data[3]) - w_data[4]) * w_data[5],
                x_data + w_data[6] - w_data[7],
            ),
            axis=0,
        ),
    )
Example #26
0
 def expected():
     add = relay.add(x, y)
     copy_add_sub = relay.device_copy(add, dev1, dev2)
     sub = relay.subtract(copy_add_sub, z)
     return sub
Example #27
0
 def get_ref_sub():
     ref_sub = relay.Function([x, y],
                              relay.subtract(
                                  relay.add(x, x), relay.add(y, y)))
     return ref_sub
def test_subtract():
    i = relay.var('i', shape=[], dtype='int32')
    sub = relay.subtract(i, relay.const(1, dtype='int32'))
    func = relay.Function([i], sub, ret_type=relay.TensorType([], 'int32'))
    i_data = np.array(1, dtype='int32')
    check_eval(func, [i_data], 0)
Example #29
0
 def expected():
     add = relay.add(x, y)
     sub = relay.subtract(add, z)
     func = relay.Function([x, y, z], sub)
     return func
Example #30
0
def test_subtract():
    i = relay.var("i", shape=[], dtype="int32")
    sub = relay.subtract(i, relay.const(1, dtype="int32"))
    func = relay.Function([i], sub, ret_type=relay.TensorType([], "int32"))
    i_data = np.array(1, dtype="int32")
    def annotated():
        in_1 = relay.var("in_1", shape=(10, 10), dtype="float32")
        in_2 = relay.var("in_2", shape=(10, 10), dtype="float32")
        in_3 = relay.var("in_3", shape=(10, 10), dtype="float32")
        in_4 = relay.var("in_4", shape=(10, 10), dtype="float32")
        in_5 = relay.var("in_5", shape=(10, 10), dtype="float32")
        in_6 = relay.var("in_6", shape=(10, 10), dtype="float32")
        in_7 = relay.var("in_7", shape=(10, 10), dtype="float32")
        in_8 = relay.var("in_8", shape=(10, 10), dtype="float32")
        in_9 = relay.var("in_9", shape=(10, 10), dtype="float32")
        in_10 = relay.var("in_10", shape=(10, 10), dtype="float32")

        begin0 = compiler_begin(in_1, "test")
        begin1 = compiler_begin(in_2, "test")
        begin2 = compiler_begin(in_3, "test")
        begin3 = compiler_begin(in_4, "test")
        node0 = relay.add(begin0, begin1)
        node1 = relay.add(begin2, begin3)
        end0 = compiler_end(node0, "test")
        end1 = compiler_end(node1, "test")
        begin4 = compiler_begin(end0, "test")
        begin5 = compiler_begin(end1, "test")
        node2 = relay.add(begin4, begin5)
        end2 = compiler_end(node2, "test")

        dbegin0 = compiler_begin(in_5, "default")
        dbegin1 = compiler_begin(in_6, "default")
        node3 = relay.subtract(dbegin0, dbegin1)
        dbegin2 = compiler_begin(in_7, "default")
        dend1 = compiler_end(node3, "default")
        dbegin3 = compiler_begin(dend1, "default")
        node4 = relay.subtract(dbegin2, dbegin3)
        dend2 = compiler_end(node4, "default")

        begin6 = compiler_begin(end2, "test")
        begin7 = compiler_begin(dend2, "test")
        node5 = relay.add(begin6, begin7)
        end3 = compiler_end(node5, "test")
        end4 = compiler_end(node5, "test")
        dbegin4 = compiler_begin(in_8, "default")
        dbegin5 = compiler_begin(end3, "default")
        node6 = relay.subtract(dbegin4, dbegin5)
        begin8 = compiler_begin(in_9, "test")
        begin9 = compiler_begin(end4, "test")
        node7 = relay.add(begin8, begin9)
        end5 = compiler_end(node7, "test")

        dend3 = compiler_end(node6, "default")
        begin10 = compiler_begin(dend3, "test")
        begin11 = compiler_begin(end5, "test")
        node8 = relay.add(begin10, begin11)
        end6 = compiler_end(node8, "test")
        begin12 = compiler_begin(in_10, "test")
        begin13 = compiler_begin(end6, "test")
        node9 = relay.add(begin12, begin13)
        end7 = compiler_end(node9, "test")

        f = relay.Function(
            [in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10],
            end7)
        mod = tvm.IRModule.from_expr(f)
        return mod
Example #32
0
def test_partition():
    in_1 = relay.var("in_1", shape=(10, 10), dtype="float32")
    in_2 = relay.var("in_2", shape=(10, 10), dtype="float32")
    in_3 = relay.var("in_3", shape=(10, 10), dtype="float32")
    in_4 = relay.var("in_4", shape=(10, 10), dtype="float32")
    in_5 = relay.var("in_5", shape=(10, 10), dtype="float32")
    in_6 = relay.var("in_6", shape=(10, 10), dtype="float32")
    in_7 = relay.var("in_7", shape=(10, 10), dtype="float32")
    in_8 = relay.var("in_8", shape=(10, 10), dtype="float32")
    in_9 = relay.var("in_9", shape=(10, 10), dtype="float32")
    in_10 = relay.var("in_10", shape=(10, 10), dtype="float32")

    begin0 = compiler_begin(in_1, "onnx")
    begin1 = compiler_begin(in_2, "onnx")
    begin2 = compiler_begin(in_3, "onnx")
    begin3 = compiler_begin(in_4, "onnx")
    node0 = relay.add(begin0, begin1)
    node1 = relay.add(begin2, begin3)
    end0 = compiler_end(node0, "onnx")
    end1 = compiler_end(node1, "onnx")
    begin4 = compiler_begin(end0, "onnx")
    begin5 = compiler_begin(end1, "onnx")
    node2 = relay.add(begin4, begin5)
    end2 = compiler_end(node2, "onnx")

    dbegin0 = compiler_begin(in_5, "default")
    dbegin1 = compiler_begin(in_6, "default")
    node3 = relay.subtract(dbegin0, dbegin1)
    dbegin2 = compiler_begin(in_7, "default")
    dend1 = compiler_end(node3, "default")
    dbegin3 = compiler_begin(dend1, "default")
    node4 = relay.subtract(dbegin2, dbegin3)
    dend2 = compiler_end(node4, "default")

    begin6 = compiler_begin(end2, "onnx")
    begin7 = compiler_begin(dend2, "onnx")
    node5 = relay.add(begin6, begin7)
    end3 = compiler_end(node5, "onnx")
    end4 = compiler_end(node5, "onnx")
    dbegin4 = compiler_begin(in_8, "default")
    dbegin5 = compiler_begin(end3, "default")
    node6 = relay.subtract(dbegin4, dbegin5)
    begin8 = compiler_begin(in_9, "onnx")
    begin9 = compiler_begin(end4, "onnx")
    node7 = relay.multiply(begin8, begin9)
    end5 = compiler_end(node7, "onnx")

    dend3 = compiler_end(node6, "default")
    begin10 = compiler_begin(dend3, "onnx")
    begin11 = compiler_begin(end5, "onnx")
    node8 = relay.add(begin10, begin11)
    end6 = compiler_end(node8, "onnx")
    begin12 = compiler_begin(in_10, "onnx")
    begin13 = compiler_begin(end6, "onnx")
    node9 = relay.add(begin12, begin13)
    end7 = compiler_end(node9, "onnx")

    func = relay.Function(
        [in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10], end7)

    target = "llvm"
    mod = IRModule.from_expr(func)
    mod = transform.PartitionGraph()(mod)

    with tvm.transform.PassContext(opt_level=3, disabled_pass=["FuseOps"]):
        graph_json, mod1, params = relay.build(mod, target)

    assert mod1.type_key == "metadata"
    assert mod1.imported_modules[0].type_key == "llvm"
    assert mod1.imported_modules[0].get_source()
    assert mod1.imported_modules[1].type_key == "onnx"
    assert mod1.imported_modules[1].get_source()
Example #33
0
 def expected():
     add = relay.add(x, y)
     sub = relay.subtract(add, z)
     func = relay.Function([x, y, z], sub)
     return func
Example #34
0
 def expected():
     add = relay.add(x, y)
     copy_add_sub = relay.device_copy(add, ctx2, ctx1)
     sub = relay.subtract(copy_add_sub, z)
     func = relay.Function([x, y, z], sub)
     return func
Example #35
0
def test_sequential_pass():
    shape = (10, )
    dtype = 'float32'
    tp = relay.TensorType(shape, dtype)
    x = relay.var("x", tp)
    y = relay.var("y", tp)
    v_sub = relay.GlobalVar("mySub")
    sub = relay.Function([x, y], relay.subtract(x, y))

    z = relay.var("z", tp)
    v_log = relay.GlobalVar("myLog")
    log = relay.Function([z], relay.log(z))

    mod = relay.Module({v_sub: sub, v_log: log})

    def get_ref_log():
        ref_log = relay.Function([x], relay.log(relay.add(x, x)))
        return ref_log

    def get_ref_sub():
        ref_sub = relay.Function([x, y],
                                 relay.subtract(relay.add(x, x),
                                                relay.add(y, y)))
        return ref_sub

    def get_ref_abs():
        shape = (5, 10)
        tp = relay.TensorType(shape, "float32")
        a = relay.var("a", tp)
        ref_abs = relay.Function([a], relay.abs(relay.add(a, a)))
        return ref_abs

    # Register a module pass.
    opt_tester = OptTester(mod)
    pass_ctx = None

    @_transform.module_pass(opt_level=1)
    def mod_transform(expr, ctx):
        return opt_tester.transform(expr, ctx)

    module_pass = mod_transform

    # Register a function pass.
    @_transform.function_pass(opt_level=1)
    def func_transform(expr, mod, ctx):
        return opt_tester.transform(expr, ctx)

    function_pass = func_transform

    def test_pass_registration():
        passes = [module_pass, function_pass]
        opt_level = 2
        pass_name = "sequential"
        sequential = _transform.Sequential(passes=passes, opt_level=opt_level)
        pass_info = sequential.info
        assert pass_info.name == pass_name
        assert pass_info.opt_level == opt_level

    def test_no_pass():
        passes = []
        sequential = _transform.Sequential(opt_level=1, passes=passes)
        ret_mod = sequential(mod)
        mod_func = ret_mod[v_sub]
        check_func(sub, mod_func)

    def test_only_module_pass():
        passes = [module_pass]
        sequential = _transform.Sequential(opt_level=1, passes=passes)
        ret_mod = sequential(mod)
        # Check the subtract function.
        sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, sub)

        # Check the abs function is added.
        abs_var, abs_func = get_var_func()
        abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
        check_func(new_abs, abs_func)

    def test_only_function_pass():
        # Check the subtract function.
        passes = [function_pass]
        sequential = _transform.Sequential(opt_level=1, passes=passes)
        ret_mod = sequential(mod)
        _, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, get_ref_sub())

        # Check the log function.
        log_var, new_log = extract_var_func(ret_mod, v_log.name_hint)
        check_func(new_log, get_ref_log())

    def test_multiple_passes():
        # Reset the current module since mod has been polluted by the previous
        # function pass.
        mod = relay.Module({v_sub: sub, v_log: log})
        passes = [module_pass, function_pass]
        sequential = _transform.Sequential(opt_level=1, passes=passes)
        ret_mod = sequential(mod)

        # Check the abs function is added.
        abs_var, abs_func = get_var_func()
        abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
        check_func(new_abs, get_ref_abs())

        # Check the subtract function is modified correctly.
        _, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
        check_func(new_sub, get_ref_sub())

        # Check the log function is modified correctly.
        _, new_log = extract_var_func(ret_mod, v_log.name_hint)
        check_func(new_log, get_ref_log())

        # Execute the updated subtract function.
        x_nd = get_rand(shape, dtype)
        y_nd = get_rand(shape, dtype)
        ref_res = np.subtract(x_nd.asnumpy() * 2, y_nd.asnumpy() * 2)
        for target, ctx in ctx_list():
            exe1 = relay.create_executor("graph", ctx=ctx, target=target)
            exe2 = relay.create_executor("debug", ctx=ctx, target=target)
            res1 = exe1.evaluate(new_sub)(x_nd, y_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_sub)(x_nd, y_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

        # Execute the updated abs function.
        x_nd = get_rand((5, 10), dtype)
        ref_res = np.abs(x_nd.asnumpy() * 2)
        for target, ctx in ctx_list():
            exe1 = relay.create_executor("graph", ctx=ctx, target=target)
            exe2 = relay.create_executor("debug", ctx=ctx, target=target)
            res1 = exe1.evaluate(new_abs)(x_nd)
            tvm.testing.assert_allclose(res1.asnumpy(), ref_res, rtol=1e-5)
            res2 = exe2.evaluate(new_abs)(x_nd)
            tvm.testing.assert_allclose(res2.asnumpy(), ref_res, rtol=1e-5)

    test_pass_registration()
    test_no_pass()
    test_only_module_pass()
    test_only_function_pass()
    test_multiple_passes()
 def annotated():
     add = relay.add(x, y)
     sub = relay.subtract(add, z)
     func = relay.Function([x, y, z], sub)
     func = run_opt_pass(func, transform.RewriteAnnotatedOps(dev1.device_type))
     return func