Example #1
0
    def before(x, conv_weight, out_bias, out_scale, channels):
        args = [x, conv_weight, out_bias]
        y0 = relay.nn.conv2d(x, conv_weight,
                             channels=channels,
                             kernel_size=(3, 3),
                             padding=(1, 1))
        y0 = relay.multiply(y0, out_scale)
        y0 = relay.nn.relu(y0)

        y1 = relay.nn.conv2d(y0, conv_weight,
                             channels=channels,
                             kernel_size=(3, 3),
                             padding=(1, 1))
        y1 = relay.multiply(y1, out_scale)
        y1 = relay.nn.relu(y1)

        y2 = relay.nn.conv2d(y0, conv_weight,
                             channels=channels,
                             kernel_size=(3, 3),
                             padding=(1, 1))
        y2 = relay.multiply(y2, out_scale)
        y2 = relay.nn.relu(y2)

        y = relay.add(y1, y2)
        return relay.Function(args, y)
 def before(x, w1, w2, scale1, scale2):
     args = [x, w1, w2, scale1, scale2]
     y1 = relay.nn.conv2d(x, w1)
     y1 = relay.multiply(y1, scale1)
     y2 = relay.nn.conv2d(x, w2)
     y2 = relay.multiply(y2, scale2)
     y = relay.Tuple((y1, y2))
     return relay.Function(args, y)
 def expected(x, w1, w2, scale1, scale2, channels1, channels2):
     args = [x, w1, w2, scale1, scale2]
     w = relay.concatenate((w1, w2), axis=0)
     y = relay.nn.conv2d(x, w, channels=channels1 + channels2)
     y1 = relay.strided_slice(y, [0, 0], [None, channels1])
     y2 = relay.strided_slice(y, [0, channels1], [None, channels1 + channels2])
     y1 = relay.multiply(y1, scale1)
     y2 = relay.multiply(y2, scale2)
     y = relay.Tuple((y1, y2))
     return relay.Function(args, y)
 def simple_bn(x, gamma, beta, moving_mean, moving_var,
               axis=1, epsilon=1e-5, shape=None):
     # expect = (x - moving_mean) / sqrt(moving_var + eps) * gamma + beta
     scale = rly.multiply(rly.const(1, 'float32') /
             rly.sqrt(moving_var + rly.const(epsilon, 'float32')), gamma)
     shift = rly.add(
         rly.multiply(rly.negative(moving_mean), scale), beta)
     num_newaxis = len(shape) - (axis + 1)
     if num_newaxis:
         scale = rly.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
         shift = rly.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
     return x * scale + shift
Example #5
0
 def expected():    
     add = relay.add(a, b)
     mul = relay.multiply(c, d)
     copy_mul_sub = relay.device_copy(mul, cpu_ctx, dev_ctx)
     sub = relay.subtract(add, copy_mul_sub)
     func = relay.Function([a, b, c, d], sub)
     return func
def test_mul_param():
    x = relay.var('x', shape=(10, 10))
    y = relay.var('y', shape=(1, 10))
    func = relay.Function([x, y], relay.multiply(x, y))
    x_data = np.random.rand(10, 10).astype('float32')
    y_data = np.random.rand(1, 10).astype('float32')
    check_eval(func, [x_data, y_data], x_data * y_data)
Example #7
0
    def expected(x, conv_weight, out_bias, out_scale, channels):
        # use a fixed order of args so alpha equal check can pass
        args = [x, conv_weight, out_bias]
        out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
        squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
        conv_weight = relay.multiply(
            conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))

        y = relay.nn.conv2d(x, conv_weight,
                            channels=channels,
                            kernel_size=(3, 3),
                            padding=(1, 1))
        out_bias = relay.multiply(out_bias,
                                  relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
        y = relay.add(y, out_bias)
        y = relay.nn.relu(y)
        return relay.Function(args, y)
Example #8
0
 def before(x, conv_weight, out_scale, channels):
     args = [x, conv_weight]
     y = relay.nn.conv2d(x, conv_weight,
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     y = relay.multiply(y, out_scale)
     return relay.Function(args, y)
Example #9
0
 def before():
     c = relay.const(c_data)
     x = relay.var("x")
     y = relay.add(c, c)
     y = relay.multiply(y, relay.const(2, "float32"))
     y = relay.add(x, y)
     z = relay.add(y, c)
     return relay.Function([x], z)
Example #10
0
 def before(x, conv_weight, out_scale, channels):
     y = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NCHW",
                          padding=(1, 1))
     y = relay.nn.relu(y)
     y = relay.multiply(x, out_scale)
     return relay.Function(relay.ir_pass.free_vars(y), y)
Example #11
0
 def expected():
     x = relay.var("x", shape=(1, 64, 56, 56))
     weight = relay.var('weight', shape=(64, 64, 3, 3))
     y = relay.nn.conv2d(x, relay.multiply(weight, relay.const(2.0, "float32")),
                         channels=64,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     y = relay.nn.relu(y)
     y = relay.Function([x, weight], y)
     return y
Example #12
0
 def before():
     x = relay.var("x", shape=(1, 64, 56, 56))
     bias = relay.var("bias", shape=(64,))
     scale = relay.var("scale", shape=(64, 1, 1))
     weight = relay.var("weight")
     y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
     y = relay.nn.bias_add(y, bias) # test broadcasting to lhs
     y = relay.multiply(scale, y)         # test broadcasting to rhs
     y = relay.Function(free_vars(y), y)
     return y
 def before(x, conv_weight, in_bias, in_scale, channels):
     x = relay.multiply(x, in_scale)
     xx = relay.nn.leaky_relu(x, alpha=0.1)
     y1 = relay.nn.conv2d(xx, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NHWC",
                          padding=(1, 1))
     z = relay.add(y1, x)
     return relay.Function(relay.ir_pass.free_vars(z), z)
Example #14
0
 def before(x, conv_weight, out_bias, out_scale, channels):
     args = [x, conv_weight, out_bias]
     out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
     y = relay.nn.conv2d(x, conv_weight,
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     y = relay.add(y, out_bias)
     y = relay.nn.relu(y)
     y = relay.multiply(y, out_scale)
     return relay.Function(args, y)
Example #15
0
 def fail2(x, conv_weight, out_bias, out_scale, channels):
     args = [x, conv_weight, out_bias]
     out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
     y1 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          padding=(1, 1))
     y2 = relay.nn.relu(y1)
     # fold will fail because y1 is referred also by y2
     y1 = relay.multiply(y1, out_scale)
     y = relay.add(y1, y2)
     return relay.Function(args, y)
 def before(x, conv_weight, in_bias, in_scale, channels):
     args = [x, conv_weight, in_bias, in_scale]
     in_scale = relay.expand_dims(in_scale, axis=1, num_newaxis=2)
     in_bias = relay.expand_dims(in_bias, axis=1, num_newaxis=2)
     x = relay.multiply(x, in_scale)
     x = relay.nn.relu(x)
     x = relay.add(x, in_bias)
     y = relay.nn.conv2d(x, conv_weight,
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     return relay.Function(args, y)
 def expected(x, conv_weight, in_bias, in_scale, channels):
     args = [x, conv_weight, in_bias, in_scale]
     x = relay.nn.relu(x)
     in_bias = relay.divide(in_bias, in_scale)
     x = relay.subtract(x, in_bias)
     y1 = relay.nn.conv2d(x,
                          relay.multiply(conv_weight, in_scale),
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NHWC",
                          weight_layout="HWIO",
                          groups=channels,
                          padding=(1, 1))
     y2 = relay.nn.conv2d(x,
                          relay.multiply(conv_weight, in_scale),
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NHWC",
                          weight_layout="HWIO",
                          groups=channels,
                          padding=(1, 1))
     z = relay.add(y1, y2)
     return relay.Function(args, z)
 def expected():
     x = relay.var("x", shape=(1, 64, 56, 56))
     weight = relay.var('weight', shape=(64, 64, 3, 3))
     y = relay.nn.conv2d(x,
                         relay.multiply(weight, relay.const(2.0,
                                                            "float32")),
                         channels=64,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     y = relay.nn.relu(y)
     mod = tvm.IRModule()
     foo = relay.GlobalVar('foo')
     mod[foo] = relay.Function([x, weight], y)
     mod["main"] = relay.Function([x, weight], foo(x, weight))
     return mod
    def expected():
        x = relay.var("x", shape=(1, 500, 500, 64))
        kernel = relay.var('kernel', shape=(3, 3, 64, 64), dtype='float32')
        bias = relay.var("bias", shape=(64,))
        multiplier1 = relay.var('multiplier1', shape=(1, ), dtype='float32')
        multiplier2 = relay.var('multiplier2', shape=(1, 1), dtype='float32')

        b = relay.expand_dims(bias, axis=0, num_newaxis=3)
        b = relay.layout_transform(b, "NHWC", "NCHW16c")

        y = relay.layout_transform(x, "NHWC", "NCHW16c")
        y = relay.nn.conv2d(y, kernel,
                            data_layout='NCHW16c',
                            kernel_layout="HWIO",
                            kernel_size=(3, 3))

        y = relay.add(b, y)
        y = relay.nn.relu(y)

        y = relay.multiply(multiplier1, y)
        y = relay.multiply(y, multiplier2)
        y = relay.layout_transform(y, "NCHW16c", "NHWC")
        y = relay.Function(analysis.free_vars(y), y)
        return y
Example #20
0
def test_qemu_make_fail(temp_dir, board, west_cmd, tvm_debug):
    """Testing QEMU make fail."""
    if board not in ["qemu_x86", "mps2_an521", "mps3_an547"]:
        pytest.skip(msg="Only for QEMU targets.")

    model = test_utils.ZEPHYR_BOARDS[board]
    build_config = {"debug": tvm_debug}
    shape = (10, )
    dtype = "float32"

    # Construct Relay program.
    x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
    xx = relay.multiply(x, x)
    z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
    func = relay.Function([x], z)
    ir_mod = tvm.IRModule.from_expr(func)

    target = tvm.target.target.micro(model)
    executor = Executor("aot")
    runtime = Runtime("crt")
    with tvm.transform.PassContext(opt_level=3,
                                   config={"tir.disable_vectorize": True}):
        lowered = relay.build(ir_mod,
                              target,
                              executor=executor,
                              runtime=runtime)

    sample = np.zeros(shape=shape, dtype=dtype)
    project, project_dir = test_utils.generate_project(temp_dir,
                                                       board,
                                                       west_cmd,
                                                       lowered,
                                                       build_config,
                                                       sample,
                                                       shape,
                                                       dtype,
                                                       load_cmsis=False)

    file_path = (pathlib.Path(project_dir) / "build" / "zephyr" /
                 "CMakeFiles" / "run.dir" / "build.make")
    assert file_path.is_file(), f"[{file_path}] does not exist."

    # Remove a file to create make failure.
    os.remove(file_path)
    project.flash()
    with pytest.raises(server.JSONRPCError) as excinfo:
        project.transport().open()
    assert "QEMU setup failed" in str(excinfo.value)
Example #21
0
    def before(x, conv_weight, in_bias, in_scale, channels, blocking):
        args = [x, conv_weight, in_bias]
        x = relay.multiply(x, in_scale)
        x = relay.nn.relu(x)
        x = relay.add(x, in_bias)
        y = relay.nn.conv2d(
            x,
            conv_weight,
            channels=channels,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
            kernel_layout="OIHW2i{}o".format(blocking[1])
            if blocking else "OIHW")

        return relay.Function(args, y)
 def expected(x, conv_weight, in_bias, in_scale, channels):
     # use a fixed order of args so alpha equal check can pass
     args = [x, conv_weight, in_bias, in_scale]
     in_scale = relay.expand_dims(in_scale, axis=1, num_newaxis=2)
     in_bias = relay.expand_dims(in_bias, axis=1, num_newaxis=2)
     squeezed_scale = relay.squeeze(in_scale, axis=[1,2])
     x = relay.nn.relu(x)
     in_bias = relay.divide(in_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
     x = relay.add(x, in_bias)
     conv_weight = relay.multiply(
         conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
     y = relay.nn.conv2d(x, conv_weight,
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     return relay.Function(args, y)
 def expected(x, w1, w2, scale1, scale2, bias, channels1, channels2):
     args = [x, w1, w2, scale1, scale2, bias]
     w = relay.concatenate((w1, w2), axis=0)
     scale = relay.concatenate((scale1, scale2), axis=0)
     y = relay.nn.conv2d(x, w, channels=channels1 + channels2)
     y = relay.multiply(y, scale)
     y = relay.nn.relu(y)
     y1 = relay.strided_slice(
         y, begin=[0, 0], end=[-1, channels1], strides=[1, 1], slice_mode="size"
     )
     y2 = relay.strided_slice(
         y, begin=[0, channels1], end=[-1, channels2], strides=[1, 1], slice_mode="size"
     )
     y2 = relay.add(y2, bias)
     y = relay.Tuple((y1, y2))
     return relay.Function(args, y)
Example #24
0
 def annotated():    
     add = relay.add(a, b)
     _add = relay.annotation.on_device(add, dev_ctx)
     mul = relay.multiply(c, d)
     _mul = relay.annotation.on_device(mul, cpu_ctx)
     sub = relay.subtract(add, mul)
     _sub = relay.annotation.on_device(sub, dev_ctx)
     func = relay.Function([a, b, c, d],
                           relay.Tuple(tvm.convert([_add, _mul,
                                                    _sub, sub])))
     func = relay.ir_pass.infer_type(func)
     func = relay.ir_pass.rewrite_annotated_ops(func,
                                                dev_ctx.device_type)
     func = relay.ir_pass.infer_type(func)
     return relay.Function(relay.ir_pass.free_vars(func.body[3]),
                           func.body[3])
Example #25
0
def test_mul_param(interface_api, use_unpacked_api, test_runner):
    x = relay.var("x", shape=(10, 10))
    y = relay.var("y", shape=(1, 10))
    func = relay.Function([x, y], relay.multiply(x, y))
    x_data = np.random.rand(10, 10).astype("float32")
    y_data = np.random.rand(1, 10).astype("float32")

    inputs = OrderedDict([("x", x_data), ("y", y_data)])
    output_list = generate_ref_data(func, inputs)

    compile_and_run(
        AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
        test_runner,
        interface_api,
        use_unpacked_api,
    )
 def fail2(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
     args = [x, conv_weight, out_bias]
     y1 = relay.nn.conv2d(
         x,
         conv_weight,
         channels=channels,
         kernel_size=(3, 3),
         padding=(1, 1),
         data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
         kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
     )
     y2 = relay.nn.relu(y1)
     # fold will fail because y1 is referred also by y2
     y1 = relay.multiply(y1, out_scale)
     y = relay.add(y1, y2)
     return relay.Function(args, y)
Example #27
0
 def expected():
     x = relay.var("x", shape=(1, 64, 56, 56))
     bias = relay.var("bias", shape=(64,))
     scale = relay.var("scale", shape=(64, 1, 1))
     weight = relay.var("weight")
     x = relay.layout_transform(x, "NCHW", "NCHW16c")
     bias = relay.expand_dims(bias, 1, 2)
     bias = relay.layout_transform(bias, "CHW", "CHW16c")
     scale = relay.layout_transform(scale, "CHW", "CHW16c")
     y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1),
                         data_layout="NCHW16c")
     y = relay.add(y, bias)          # test broadcasting to lhs
     y = relay.multiply(scale, y)      # test broadcasting to rhs
     y = relay.layout_transform(y, "NCHW16c", "NCHW")
     y = relay.Function(free_vars(y), y)
     return y
Example #28
0
    def make_qgraph(data, weight):
        out = data * relay.const(32.0)
        out = relay.round(out)
        out = relay.clip(out, a_min=-127, a_max=127)
        out = out.astype('int8')

        out = relay.nn.conv2d(out,
                              weight,
                              kernel_size=(3, 3),
                              padding=(1, 1),
                              channels=c,
                              out_dtype='int32')
        out = out.astype('float32')
        out = relay.multiply(out, relay.const(0.00024414062))
        out = relay.Function(relay.ir_pass.free_vars(out), out)
        return out
Example #29
0
 def expected():
     x = relay.var("x", shape=(1, 64, 56, 56))
     bias = relay.var("bias", shape=(64,))
     scale = relay.var("scale", shape=(64, 1, 1))
     weight = relay.var("weight")
     x = relay.layout_transform(x, "NCHW", "NCHW16c")
     bias = relay.expand_dims(bias, 1, 2)
     bias = relay.layout_transform(bias, "CHW", "CHW16c")
     scale = relay.layout_transform(scale, "CHW", "CHW16c")
     y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1),
                         data_layout="NCHW16c")
     y = relay.add(y, bias)          # test broadcasting to lhs
     y = relay.multiply(scale, y)      # test broadcasting to rhs
     y = relay.layout_transform(y, "NCHW16c", "NCHW")
     y = relay.Function(free_vars(y), y)
     return y
    def after_B():
        inputs = [relay.var('input_' + str(i), shape=(10, 10)) for i in range(8)]
        add_relu_calls = []
        for i in range(4):
            x = relay.var('x' + str(i))
            y = relay.var('x' + str(i))
            add_relu = relay.add(x, y)
            add_relu = relay.nn.relu(add_relu)
            add_relu = relay.Function([x, y], add_relu)
            add_relu = add_relu.set_attribute('Composite', tir.StringImm('add_relu'))
            add_relu_call = relay.Call(add_relu, [inputs[i*2], inputs[i*2+1]])
            add_relu_calls.append(add_relu_call)

        add = relay.add(add_relu_calls[0], add_relu_calls[1])
        sub = relay.subtract(add_relu_calls[2], add_relu_calls[3])
        out = relay.multiply(add, sub)
        return relay.Function(inputs, out)
def test_compile_nhwc_pack():
    data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
    weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
    p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
    conv = relay.nn.conv2d(data,
                           weight,
                           kernel_size=(1, 1),
                           data_layout="NHWC",
                           kernel_layout="HWIO",
                           out_dtype="int32")
    multiply = relay.multiply(relay.const(-22, dtype='int32'), p2)
    tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
    subtract = relay.subtract(conv, tile)

    func = subtract
    mod = relay.Function(relay.analysis.free_vars(func), func)
    relay.build(mod, target="llvm")
Example #32
0
        def create_external_func1(mod_, compiler_name, symbol_name):
            x_int = relay.var("x_int", shape=(10, 10))
            w0_int = relay.var("w0_int", shape=(10, 10))
            w1_int = relay.var("w1_int", shape=(10, 10))
            w2_int = relay.var("w2_int", shape=(10, 10))

            z0 = relay.add(x_int, w0_int)
            p0 = relay.subtract(z0, w1_int)
            q0 = relay.multiply(z0, w2_int)
            f1_o_tuple = relay.Tuple([p0, q0])

            f1 = relay.Function([x_int, w0_int, w1_int, w2_int], f1_o_tuple)
            f1 = set_func_attr(f1, compiler_name, symbol_name)
            glb_f1 = relay.GlobalVar(symbol_name)
            mod_[glb_f1] = f1
            mod_ = relay.transform.InferType()(mod_)
            return glb_f1, mod_
 def before(x, conv_weight, out_bias, out_scale, channels):
     args = [x, conv_weight, out_bias]
     y1 = relay.nn.conv2d(x,
                          conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          padding=(1, 1))
     y1 = relay.nn.relu(y1)
     y2 = relay.nn.conv2d(x,
                          conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          padding=(1, 1))
     y2 = relay.nn.relu(y2)
     y = relay.add(y1, y2)
     y = relay.multiply(y, out_scale)
     return relay.Function(args, y)
Example #34
0
    def __impl_philox_2x_round(self, ctr, key):
        """Compute a round in Philox2x32.

        :param ctr: uint64 vector
        :param key: uint32 scalar
        :return:
        """
        ctr_0 = relay.right_shift(ctr, RELAY_UINT64_32)
        ctr_1 = relay.bitwise_and(ctr, RELAY_UINT64_CLEAR_HIGH)

        # mul_hi_lo
        product = relay.multiply(RELAY_PHILOX_M2x32_0, ctr_0)

        key_64 = relay.cast(key, "uint64")
        ctr_1_xor_key = relay.bitwise_xor(ctr_1, key_64)
        ctr_1_xor_key_up = relay.left_shift(ctr_1_xor_key, RELAY_UINT64_32)
        return relay.bitwise_xor(product, ctr_1_xor_key_up)
 def before(x, conv_weight, out_bias, out_scale, channels):
     args = [x, conv_weight, out_bias, out_scale]
     out_scale = relay.expand_dims(out_scale, axis=1, num_newaxis=2)
     out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
     y1 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          padding=(1, 1))
     y1 = relay.nn.relu(y1)
     y2 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          padding=(1, 1))
     y2 = relay.nn.relu(y2)
     y = relay.add(y1, y2)
     y = relay.multiply(y, out_scale)
     return relay.Function(args, y)
Example #36
0
def test_checkpoint():
    dtype = "float32"
    xs = [relay.var("x{}".format(i), dtype) for i in range(4)]
    f = relay.multiply(relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3]))
    f_checkpoint = relay.annotation.checkpoint(f)

    func, func_checkpoint = relay.Function(xs, f), relay.Function(xs, f_checkpoint)
    f, f_checkpoint = run_infer_type(func), run_infer_type(func_checkpoint)
    assert f.checked_type == f_checkpoint.checked_type

    inputs = [np.random.uniform() for _ in range(len(xs))]
    for target, dev in tvm.testing.enabled_targets():
        for kind in ["graph", "debug"]:
            intrp = relay.create_executor(kind, device=dev, target=target)
            f_res = intrp.evaluate(f)(*inputs)
            f_checkpoint_res = intrp.evaluate(f_checkpoint)(*inputs)
            tvm.testing.assert_allclose(f_res.numpy(), f_checkpoint_res.numpy(), 0, 0)
Example #37
0
def test_qemu_make_fail(platform, west_cmd, skip_build, tvm_debug):
    if platform not in ["host", "mps2_an521"]:
        pytest.skip(msg="Only for QEMU targets.")
    """Testing QEMU make fail."""
    model, zephyr_board = PLATFORMS[platform]
    build_config = {"skip_build": skip_build, "debug": tvm_debug}
    shape = (10, )
    dtype = "float32"

    this_dir = pathlib.Path(__file__).parent
    tvm_source_dir = this_dir / ".." / ".." / ".."
    runtime_path = tvm_source_dir / "apps" / "microtvm" / "zephyr" / "aot_demo"

    # Construct Relay program.
    x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
    xx = relay.multiply(x, x)
    z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
    func = relay.Function([x], z)

    target = tvm.target.target.micro(
        model, options=["-link-params=1", "--executor=aot"])
    with tvm.transform.PassContext(opt_level=3,
                                   config={"tir.disable_vectorize": True}):
        lowered = relay.build(func, target)

    # Generate input/output header files
    model_files_path = os.path.join(runtime_path, "include")
    _create_header_file((f"input_data"), np.zeros(shape=shape, dtype=dtype),
                        model_files_path)
    _create_header_file("output_data", np.zeros(shape=shape, dtype=dtype),
                        model_files_path)

    session_kw = _build_session_kw(model, target, zephyr_board, west_cmd,
                                   lowered.lib, runtime_path, build_config)

    file_path = os.path.join(session_kw["binary"].base_dir,
                             "zephyr/CMakeFiles/run.dir/build.make")
    assert os.path.isfile(file_path), f"[{file_path}] does not exist."

    # Remove a file to create make failure.
    os.remove(file_path)
    transport = session_kw["flasher"].flash(session_kw["binary"])
    with pytest.raises(RuntimeError) as excinfo:
        transport.open()
    assert "QEMU setup failed" in str(excinfo.value)
Example #38
0
    def expected():
        a = relay.var('a', shape=(10, 10))
        b = relay.var('b', shape=(10, 10))
        c = relay.var('c', shape=(10, 10))

        # add_sub_mul function
        in_1 = relay.var('in_1', shape=(10, 10))
        in_2 = relay.var('in_2', shape=(10, 10))
        add_node = relay.add(in_1, in_2)
        sub_node = relay.subtract(in_1, in_2)
        mul_node = relay.multiply(add_node, sub_node)
        add_sub_mul = relay.Function([in_1, in_2], mul_node)

        # merged function
        add_sub_mul_1 = relay.Call(add_sub_mul, [a, b])
        add_sub_mul_2 = relay.Call(add_sub_mul, [c, add_sub_mul_1])
        r = relay.nn.relu(add_sub_mul_2)
        return relay.Function([a, b, c], r)
 def before(x, conv_weight, in_bias, in_scale, channels):
     args = [x, conv_weight, in_bias]
     x = relay.multiply(x, in_scale)
     x = relay.nn.relu(x)
     x = relay.add(x, in_bias)
     x_var = relay.Var("x_var")
     y1 = relay.nn.conv2d(
         x_var,
         conv_weight,
         channels=channels,
         kernel_size=(3, 3),
         data_layout="NHWC",
         kernel_layout="HWIO",
         padding=(1, 1),
     )
     z = relay.add(y1, x)
     let = relay.Let(x_var, x, z)
     return relay.Function(args, let)
    def test_multiply_left_constant(self):
        right = relay.var("right", relay.TensorType((-1, 4, 2, 2), "float32"))
        left = relay.expr.const(np.zeros((2, 2), dtype=np.float32))

        net = relay.multiply(left, right)
        net = relay.Function([right], net)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})
        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert 'relay_id' in layers[0].attrs

        assert layers[1].type[0] == 'Scale'
        assert layers[1].shapes == [-1, 4, 2, 2]
        assert 'relay_id' in layers[1].attrs
Example #41
0
def test_mul_param(interface_api, use_unpacked_api, use_calculated_workspaces):
    x = relay.var("x", shape=(10, 10))
    y = relay.var("y", shape=(1, 10))
    func = relay.Function([x, y], relay.multiply(x, y))
    x_data = np.random.rand(10, 10).astype("float32")
    y_data = np.random.rand(1, 10).astype("float32")

    inputs = OrderedDict([("x", x_data), ("y", y_data)])
    output_list = generate_ref_data(func, inputs)

    compile_and_run(
        func,
        inputs,
        output_list,
        interface_api,
        use_unpacked_api,
        use_calculated_workspaces,
    )
Example #42
0
def relay_take_grad_inp(c, _nb_indices, _indices, _values):
    assert _nb_indices.is_constant(int)
    values = c.ref(_values)
    r_indices = relay.reshape(c.ref(_indices),
                              tuple(_indices.abstract.xshape()) + (1, ))
    n_rows = _nb_indices.value
    n_cols = _values.abstract.xshape()[-1]
    outputs = []
    indices_dtype = type_to_np_dtype(_indices.abstract.element.xtype())
    out_dtype = type_to_np_dtype(_values.abstract.element.xtype())
    for i in range(n_rows):
        select_entries = relay.equal(r_indices, relay.const(i, indices_dtype))
        casted_select = relay.cast(select_entries, out_dtype)
        select_dout = relay.multiply(casted_select, values)
        reshape_out = relay.reshape(select_dout, (-1, n_cols))
        vector = relay.sum(reshape_out, 0)
        outputs.append(relay.reshape(vector, (1, n_cols)))
    return relay.concatenate(outputs, 0)
Example #43
0
    def before():
        data = relay.var('data', shape=(1, 512, 28, 28))
        kernel = relay.var('kernel', shape=(256, 512, 1, 1))
        bias = relay.var('bias', shape=(256,))
        a = relay.var('a', shape=(1, 256, 28, 28))
        b = relay.var('b', shape=(1, 256, 28, 28))

        conv_node = relay.nn.conv2d(data,
                                    kernel,
                                    kernel_size=(1, 1),
                                    padding=(0, 0),
                                    strides=(1, 1))

        bias_node = relay.nn.bias_add(conv_node, bias)
        relu_node = relay.nn.relu(bias_node)
        add_node = relay.add(relu_node, a)
        relu_node_2 = relay.nn.relu(add_node)
        r = relay.multiply(relu_node_2, b)
        return relay.Function([data, kernel, bias, a, b], r)
Example #44
0
 def fail1(x, conv_weight, out_bias, out_scale, channels):
     args = [x, conv_weight, out_bias]
     out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
     y1 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          padding=(1, 1))
     y1 = relay.nn.relu(y1)
     y2 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          padding=(1, 1),
                          out_layout="CNHW")
     # fold will fail because the axis from two path
     # differs from each other.
     y2 = relay.nn.relu(y2)
     y = relay.add(y1, y2)
     y = relay.multiply(y, out_scale)
     return relay.Function(args, y)
Example #45
0
 def fail1(x, conv_weight, out_bias, out_scale, channels):
     args = [x, conv_weight, out_bias]
     out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
     y1 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          padding=(1, 1))
     y1 = relay.nn.relu(y1)
     y2 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          padding=(1, 1),
                          out_layout="CNHW")
     # fold will fail because the axis from two path
     # differs from each other.
     y2 = relay.nn.relu(y2)
     y = relay.add(y1, y2)
     y = relay.multiply(y, out_scale)
     return relay.Function(args, y)
Example #46
0
    def expected():
        data = relay.var('data', shape=(1, 512, 28, 28))
        kernel = relay.var('kernel', shape=(256, 512, 1, 1))
        bias = relay.var('bias', shape=(256, ))
        a = relay.var('a', shape=(1, 256, 28, 28))
        b = relay.var('b', shape=(1, 256, 28, 28))

        # conv_bias_relu function
        in_1 = relay.var('in_1', shape=(1, 512, 28, 28))
        in_2 = relay.var('in_2', shape=(256, 512, 1, 1))
        in_3 = relay.var('in_3', shape=(256, ))

        conv_node = relay.nn.conv2d(in_1,
                                    in_2,
                                    kernel_size=(1, 1),
                                    padding=(0, 0),
                                    strides=(1, 1))

        bias_node = relay.nn.bias_add(conv_node, in_3)
        r = relay.nn.relu(bias_node)
        conv_bias_add_relu = relay.Function([in_1, in_2, in_3], r)
        conv_bias_add_relu = conv_bias_add_relu.set_attribute(
            "Primitive", tir.IntImm("int32", 1))
        conv_bias_add_relu = conv_bias_add_relu.set_attribute(
            "Composite", tir.StringImm("conv2d_bias_relu"))

        # add_relu function
        in_4 = relay.var('in_4', shape=(1, 256, 28, 28))
        in_5 = relay.var('in_5', shape=(1, 256, 28, 28))
        add_node = relay.add(in_4, in_5)
        r = relay.nn.relu(add_node)
        add_relu = relay.Function([in_4, in_5], r)
        add_relu = add_relu.set_attribute("Primitive", tir.IntImm("int32", 1))
        add_relu = add_relu.set_attribute("Composite",
                                          tir.StringImm("add_relu"))

        # merged function
        conv_bias_add_relu_1 = relay.Call(conv_bias_add_relu,
                                          [data, kernel, bias])
        add_relu_1 = relay.Call(add_relu, [conv_bias_add_relu_1, a])
        r = relay.multiply(add_relu_1, b)
        return relay.Function([data, kernel, bias, a, b], r)
def graph2relay(graph: Graph, batch_size):
    node2var = {}
    params = {}

    for node in graph.nodes():
        if node is graph.enter_node:
            node2var[node] = relay.var(name_hint=node.name, shape=(batch_size, *node.output_shape))
        else:
            term_vars = []
            for term in node.inputs:
                value_vars = []
                for value in term:
                    if value.begin == 0 and value.end == value.node.output_shape[0]:
                        var = node2var[value.node]
                    else:
                        var = relay.strided_slice(node2var[value.node], begin=[0, value.begin, 0, 0], end=[batch_size, value.end, *value.node.output_shape[1:]])
                    value_vars.append(var)
                term_var = value_vars[0]
                for value_var in value_vars[1:]:
                    if isinstance(node, Element):
                        if node.op_type == 'mul':
                            term_var = relay.multiply(term_var, value_var)
                        elif node.op_type == 'add':
                            term_var = term_var + value_var
                        else:
                            raise ValueError
                    else:
                        term_var = term_var + value_var
                term_vars.append(term_var)
            if len(term_vars) > 1:
                x = relay.concatenate(term_vars, axis=1)
            else:
                x = term_vars[0]
            node2var[node] = do_layer(x, node, params)

    x = node2var[graph.exit_node]
    fn = relay.Function(relay.analysis.free_vars(x), x)

    if tvm_minor_version() <= 6:
        return relay.Module.from_expr(fn), params
    else:
        return tvm.ir.IRModule.from_expr(fn), params
 def expected(x, w1, w2, b1, b2, scale1, scale2, newshape):
     args = [x, w1, w2, b1, b2, scale1, scale2]
     x_stacked = relay.stack((x, x), axis=0)
     w = relay.stack((w1, w2), axis=0)
     y = relay.nn.batch_matmul(x_stacked, w)
     b1 = relay.expand_dims(b1, 0)
     b2 = relay.expand_dims(b2, 0)
     b = relay.stack((b1, b2), axis=0)
     y = relay.add(y, b)
     scale1 = relay.expand_dims(scale1, 0)
     scale2 = relay.expand_dims(scale2, 0)
     scale = relay.stack((scale1, scale2), axis=0)
     y = relay.multiply(y, scale)
     (y1, y2) = relay.split(y, 2)
     y1 = relay.squeeze(y1, [0])
     y2 = relay.squeeze(y2, [0])
     y1 = relay.reshape(y1, newshape=newshape)
     y2 = relay.reshape(y2, newshape=newshape)
     y = relay.Tuple((y1, y2))
     return relay.Function(args, y)
Example #49
0
def test_lower_to_tir():
    data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
    weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
    p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
    conv = relay.nn.conv2d(
        data,
        weight,
        kernel_size=(1, 1),
        data_layout="NHWC",
        kernel_layout="HWIO",
        out_dtype="int32",
    )
    multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
    tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
    subtract = relay.subtract(conv, tile)
    func = subtract
    expr = relay.Function(relay.analysis.free_vars(func), func)
    mod = tvm.IRModule.from_expr(expr)
    mod = relay.transform.InferType()(mod)
    lower_to_tir(mod["main"])
    def expected():
        data = relay.var("data", shape=(1, 512, 28, 28))
        kernel = relay.var("kernel", shape=(256, 512, 1, 1))
        bias = relay.var("bias", shape=(256, ))
        a = relay.var("a", shape=(1, 256, 28, 28))
        b = relay.var("b", shape=(1, 256, 28, 28))

        # conv_bias_relu function
        in_1 = relay.var("in_1", shape=(1, 512, 28, 28))
        in_2 = relay.var("in_2", shape=(256, 512, 1, 1))
        in_3 = relay.var("in_3", shape=(256, ))

        conv_node = relay.nn.conv2d(in_1,
                                    in_2,
                                    kernel_size=(1, 1),
                                    padding=(0, 0),
                                    strides=(1, 1))

        bias_node = relay.nn.bias_add(conv_node, in_3)
        r = relay.nn.relu(bias_node)
        conv_bias_add_relu = relay.Function([in_1, in_2, in_3], r)
        conv_bias_add_relu = conv_bias_add_relu.with_attr(
            "Composite", "conv2d_bias_relu")
        conv_bias_add_relu = conv_bias_add_relu.with_attr(
            "PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")

        # add_relu function
        in_4 = relay.var("in_4", shape=(1, 256, 28, 28))
        in_5 = relay.var("in_5", shape=(1, 256, 28, 28))
        add_node = relay.add(in_4, in_5)
        r = relay.nn.relu(add_node)
        add_relu = relay.Function([in_4, in_5], r)
        add_relu = add_relu.with_attr("Composite", "add_relu")
        add_relu = add_relu.with_attr("PartitionedFromPattern", "add_nn.relu_")

        # merged function
        conv_bias_add_relu_1 = relay.Call(conv_bias_add_relu,
                                          [data, kernel, bias])
        add_relu_1 = relay.Call(add_relu, [conv_bias_add_relu_1, a])
        r = relay.multiply(add_relu_1, b)
        return relay.Function([data, kernel, bias, a, b], r)
Example #51
0
 def before(x, conv_weight, in_bias, in_scale, channels):
     args = [x, conv_weight, in_bias]
     x = relay.multiply(in_scale, x)
     x = relay.nn.relu(x)
     x = relay.subtract(x, in_bias)
     y1 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NHWC",
                          kernel_layout="HWIO",
                          groups=channels,
                          padding=(1, 1))
     y2 = relay.nn.conv2d(x, conv_weight,
                          channels=channels,
                          kernel_size=(3, 3),
                          data_layout="NHWC",
                          kernel_layout="HWIO",
                          groups=channels,
                          padding=(1, 1))
     z = relay.add(y1, y2)
     return relay.Function(args, z)
Example #52
0
 def alter_conv2d(attrs, inputs, tinfos):
     data, weight = inputs
     weight = relay.multiply(weight, relay.const(2.0, "float32"))
     return relay.nn.conv2d(data, weight, **attrs)
Example #53
0
 def fold_conv_weight():
     squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
     return  relay.multiply(
         conv_weight ,
         relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
Example #54
0
def lstm_cell(num_hidden, batch_size=1, dtype="float32", name=""):
    """Long-Short Term Memory (LSTM) network cell.

    Parameters
    ----------
    num_hidden : int
        Number of units in output symbol.

    batch_size : int
        Batch size (length of states).

    Returns
    -------
    result : tvm.relay.Function
        A Relay function that evaluates an LSTM cell.
        The function takes in a tensor of input data, a tuple of two
        states, and weights and biases for dense operations on the
        inputs and on the state. It returns a tuple with two members,
        an output tensor and a tuple of two new states.
    """
    builder = relay.ScopeBuilder()

    input_type = relay.TensorType((batch_size, num_hidden), dtype)
    weight_type = relay.TensorType((4*num_hidden, num_hidden), dtype)
    bias_type = relay.TensorType((4*num_hidden,), dtype)

    dense_type = relay.TensorType((batch_size, 4*num_hidden), dtype)
    slice_type = relay.TupleType([input_type, input_type,
                                  input_type, input_type])
    ret_type = relay.TupleType([input_type,
                                relay.TupleType([input_type, input_type])])

    inputs = relay.Var("inputs", input_type)
    states = relay.Var("states",
                       relay.TupleType([input_type, input_type]))

    i2h_weight = relay.Var("i2h_weight", weight_type)
    i2h_bias = relay.Var("i2h_bias", bias_type)

    h2h_weight = relay.Var("h2h_weight", weight_type)
    h2h_bias = relay.Var("h2h_bias", bias_type)

    i2h = builder.let(("i2h", dense_type),
                      layers.dense_add_bias(
                          data=inputs,
                          units=num_hidden * 4,
                          weight=i2h_weight, bias=i2h_bias,
                          name="%si2h" % name))
    h2h = builder.let(("h2h", dense_type),
                      layers.dense_add_bias(
                          data=relay.TupleGetItem(states, 0),
                          units=num_hidden * 4,
                          weight=h2h_weight, bias=h2h_bias,
                          name="%sh2h" % name))

    gates = builder.let(("gates", dense_type), relay.add(i2h, h2h))
    slice_gates = builder.let(("slice_gates", slice_type),
                              relay.split(gates,
                                          indices_or_sections=4,
                                          axis=1).astuple())

    in_gate = builder.let(("in_gate", input_type),
                          relay.sigmoid(relay.TupleGetItem(slice_gates, 0)))
    forget_gate = builder.let(("forget_gate", input_type),
                              relay.sigmoid(relay.TupleGetItem(slice_gates, 1)))
    in_transform = builder.let(("in_transform", input_type),
                               relay.tanh(relay.TupleGetItem(slice_gates, 2)))
    out_gate = builder.let(("out_gate", input_type),
                           relay.sigmoid(relay.TupleGetItem(slice_gates, 3)))

    next_c = builder.let(("next_c", input_type),
                         relay.add(relay.multiply(forget_gate,
                                                  relay.TupleGetItem(states, 1)),
                                   relay.multiply(in_gate, in_transform)))
    next_h = builder.let(("next_h", input_type),
                         relay.multiply(out_gate, relay.tanh(next_c)))
    ret = builder.let(("ret", ret_type),
                      relay.Tuple([next_h, relay.Tuple([next_h, next_c])]))
    builder.ret(ret)

    body = builder.get()

    return relay.Function([inputs, states, i2h_weight,
                           i2h_bias, h2h_weight, h2h_bias],
                          body, ret_type)
 def fold_conv_weight():
     return  relay.multiply(
         conv_weight ,
         relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
Example #56
0
def test_call():
    # select right function to call: simple ident case
    id_func = relay.Var("id")
    assert parses_as(
        """
        let %id = fn (%x) { %x };
        10 * %id(10)
        """,
        relay.Let(
            id_func,
            relay.Function([X], X, None, []),
            relay.multiply(relay.const(10), relay.Call(id_func, [relay.const(10)]))
        )
    )

    # 0 args
    constant = relay.Var("constant")
    assert parses_as(
        """
        let %constant = fn () { 0 };
        %constant()
        """,
        relay.Let(
            constant,
            relay.Function([], relay.const(0), None, []),
            relay.Call(constant, [], None, None)
        )
    )

    # 1 arg
    id_var = relay.Var("id")
    assert parses_as(
        """
        let %id = fn (%x) { %x };
        %id(1)
        """,
        relay.Let(
            id_var,
            relay.Function([X], X, None, []),
            relay.Call(id_var, [relay.const(1)], None, None)
        )
    )

    # 2 args
    multiply = relay.Var("multiply")
    assert parses_as(
        """
        let %multiply = fn (%x, %y) { %x * %y };
        %multiply(0, 0)
        """,
        relay.Let(
            multiply,
            relay.Function(
                [X, Y],
                relay.multiply(X, Y),
                None,
                []
            ),
            relay.Call(multiply, [relay.const(0), relay.const(0)], None, None)
        )
    )

    # anonymous function
    assert parses_as(
        """
        (fn (%x) { %x })(0)
        """,
        relay.Call(
            relay.Function(
                [X],
                X,
                None,
                []
            ),
            [relay.const(0)],
            None,
            None
        )
    )

    # TODO(@jmp): re-enable after sequence parsing improvements
    # curried function
    # curried_mult = relay.Var("curried_mult")
    # assert parses_as(
    #     """
    #     let %curried_mult =
    #         fn (%x) {
    #         fn (%y) {
    #             %x * %y
    #         }
    #         };
    #     %curried_mult(0);
    #     %curried_mult(0)(0)
    #     """,
    #     relay.Let(
    #         curried_mult,
    #         relay.Function(
    #             [X],
    #             relay.Function(
    #                 [Y],
    #                 relay.multiply(X, Y),
    #                 None,
    #                 []
    #             ),
    #             None,
    #             []
    #         ),
    #         relay.Let(
    #             _,
    #             relay.Call(curried_mult, [relay.const(0)], None, None),
    #             relay.Call(relay.Call(curried_mult, [relay.const(0)], None, None), [relay.const(0)], None, None)
    #         )
    #     )
    # )

    # op
    assert parses_as(
        "abs(1)",
        relay.Call(relay.op.get("abs"), [relay.const(1)], None, None)
    )