Ejemplo n.º 1
0
def test_transpose_infer_type():
    n, t, d = tvm.var("n"), tvm.var("t"), 100
    x = relay.var("x", relay.TensorType((n, t, d), "float32"))
    y = relay.transpose(x, axes=(1, 0, 2))
    assert "axes=" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType(
        (t, n, 100), "float32")

    y = relay.transpose(x)
    assert "axes=" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType(
        (100, t, n), "float32")
Ejemplo n.º 2
0
 def verify_reshape(shape, newshape):
     x = relay.var("x", relay.TensorType(shape, "float32"))
     z = relay.transpose(x, newshape)
     func = relay.Function([x], z)
     x_data = np.random.uniform(low=-1, high=1,
                                size=shape).astype("float32")
     verify_results(func, [x_data], "test_transpose", rtol=1e-5, atol=1e-5)
Ejemplo n.º 3
0
def test_transpose_infer_type():
    n, t, d = tvm.var("n"), tvm.var("t"), 100
    x = relay.var("x", relay.TensorType((n, t, d), "float32"))
    y = relay.transpose(x, axes=(1, 0, 2))
    "axes=" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((t, n, 100), "float32")
Ejemplo n.º 4
0
def test_name_sanitiser_name_clash():
    """Test that 2 input tensors with names that clash once sanitized, generates an error"""

    interface_api = "c"
    use_unpacked_api = True
    test_runner = AOT_DEFAULT_RUNNER

    dtype = "float32"
    x = relay.var("input::-1", shape=(10, 5), dtype=dtype)
    # Next 2 input tensor names will clash once sanitized.
    y = relay.var("input::-2", shape=(10, 5), dtype=dtype)
    t = relay.var("input:--2", shape=(), dtype=dtype)
    a = relay.add(x, y)
    b = relay.transpose(a)
    z = relay.add(b, t)
    # Check result.
    func = relay.Function([x, y, t], z)
    x_data = np.random.rand(10, 5).astype(dtype)
    y_data = np.random.rand(10, 5).astype(dtype)
    t_data = np.random.uniform(size=()).astype(dtype)

    inputs = {"input::-1": x_data, "input::-2": y_data, "input:--2": t_data}
    output_list = generate_ref_data(func, inputs)

    with pytest.raises(TVMError, match="Sanitized input tensor name clash"):
        compile_and_run(
            AOTTestModel(module=IRModule.from_expr(func),
                         inputs=inputs,
                         outputs=output_list),
            test_runner,
            interface_api,
            use_unpacked_api,
            enable_op_fusion=False,
        )
Ejemplo n.º 5
0
def test_transpose(interface_api, use_unpacked_api, test_runner):
    """Test that non-inpleaceable operations (e.g., transpose) do not happen in-place."""

    dtype = "float32"
    x = relay.var("x", shape=(10, 5), dtype=dtype)
    y = relay.var("y", shape=(10, 5), dtype=dtype)
    t = relay.var("z", shape=(), dtype=dtype)
    a = relay.add(x, y)
    b = relay.transpose(a)
    z = relay.add(b, t)
    # Check result.
    func = relay.Function([x, y, t], z)
    x_data = np.random.rand(10, 5).astype(dtype)
    y_data = np.random.rand(10, 5).astype(dtype)
    t_data = np.random.uniform(size=()).astype(dtype)

    inputs = {"x": x_data, "y": y_data, "z": t_data}
    output_list = generate_ref_data(func, inputs)
    compile_and_run(
        AOTTestModel(module=IRModule.from_expr(func),
                     inputs=inputs,
                     outputs=output_list),
        test_runner,
        interface_api,
        use_unpacked_api,
        enable_op_fusion=False,
    )
Ejemplo n.º 6
0
def test_fake_transpose_quantize_conv_bias_add_per_channel():
    x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
    w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
    bias = relay.var("bias", shape=[16], dtype="int32")
    one = relay.const(1.0)
    zero = relay.const(0)
    w_scale = (np.random.random([16]).astype("float32") - 0.5) / 10 + 0.5
    noise = (np.random.random([16]).astype("float32") - 0.5) * 1e-15
    w_zp = relay.const([0] * 16)

    x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
    x = relay.transpose(x, [0, 3, 1, 2])
    op = relay.op.nn.conv2d(x,
                            relay.qnn.op.dequantize(w,
                                                    relay.const(w_scale),
                                                    w_zp,
                                                    axis=0),
                            kernel_size=[5, 5])
    op = relay.op.nn.bias_add(
        op,
        relay.qnn.op.dequantize(bias,
                                relay.const(2.0 * w_scale + noise),
                                w_zp,
                                axis=0))
    op = relay.qnn.op.quantize(op, one, zero)

    x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
    w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
    bias_np = np.random.randint(-32768, 32767, size=[16], dtype="int32")

    compare_fq_to_int(op, [x_np, w_np, bias_np], allow_rounding_error=True)
Ejemplo n.º 7
0
 def convnet():
     """Alternating layout of simple convnet (from image super-resolution).
     """
     bias1 = relay.var('bias1', shape=(64,))
     bias2 = relay.var('bias2', shape=(64,))
     bias3 = relay.var('bias3', shape=(64,))
     bias4 = relay.var('bias4', shape=(64,))
     weight1 = relay.var('weight1', shape=(64, 1, 5, 5))
     weight2 = relay.var('weight2', shape=(64, 64, 3, 3))
     weight3 = relay.var('weight3', shape=(64, 64, 3, 3))
     weight4 = relay.var('weight4', shape=(64, 64, 3, 3))
     data = relay.var("x", shape=(1, 1, 224, 224))
     n00 = relay.nn.conv2d(data, weight1, padding=[2, 2], kernel_size=[5, 5])
     n01 = relay.expand_dims(bias1, axis=1, num_newaxis=2)
     n02 = relay.add(n00, n01)
     n03 = relay.nn.relu(n02)
     n04 = relay.nn.conv2d(n03, weight2, padding=[1, 1], kernel_size=[3, 3])
     n05 = relay.expand_dims(bias2, axis=1, num_newaxis=2)
     n06 = relay.add(n04, n05)
     n07 = relay.nn.relu(n06)
     n08 = relay.nn.conv2d(n07, weight3, padding=[1, 1], kernel_size=[3, 3])
     n09 = relay.expand_dims(bias3, axis=1, num_newaxis=2)
     n10 = relay.add(n08, n09)
     n11 = relay.nn.relu(n10)
     n12 = relay.nn.conv2d(n11, weight4, padding=[1, 1], kernel_size=[3, 3])
     n13 = relay.expand_dims(bias4, axis=1, num_newaxis=2)
     n14 = relay.add(n12, n13)
     n15 = relay.reshape(n14, newshape=[1, 1, 3, 3, 224, 224])
     n16 = relay.transpose(n15, axes=[0, 1, 4, 2, 5, 3])
     net = relay.reshape(n16, newshape=[1, 1, 672, 672])
     args = relay.ir_pass.free_vars(net)
     return relay.Function(args, net)
Ejemplo n.º 8
0
def test_compile_injective_with_tuple():
    x = relay.var("x", shape=(2, 3))
    y = relay.var("y", shape=(2, 3))
    x_transpose = relay.transpose(x)
    output = relay.Tuple([x_transpose, y])
    func = relay.Function([x, y], output)
    relay.build(func, 'llvm')
Ejemplo n.º 9
0
def layout_transform(tensor: "relay.Expr", current_layout: str,
                     desired_layout: str):
    """Transform a tensor with the current layout to the desired layout.

    E.g. layout_transform(t, "NCHW", "CNHW") --> relay.transpose(t, [1, 0, 2, 3])

    Parameters
    ----------
    tensor: relay.Expr
        The Tensor to transpose

    current_layout: str
        The current layout e.g. NCHW or OIHW

    desired_layout: str
        The desired layout, must be compatible with current_layout

    Returns
    -------
    The layout_transformed tensor.
    """
    if sorted(current_layout) != sorted(desired_layout):
        raise ValueError(
            f"Incompatible layouts: {current_layout} vs {desired_layout}")

    if current_layout == desired_layout:
        return tensor

    current_layout_map = {c: i for i, c in enumerate(current_layout)}
    desired_layout_map = {c: i for i, c in enumerate(desired_layout)}

    axes = [None] * len(current_layout)
    for c, i in desired_layout_map.items():
        axes[i] = current_layout_map[c]
    return relay.transpose(tensor, axes=axes)
Ejemplo n.º 10
0
def test_fake_transpose_quantize_conv_bias_add():
    x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
    w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
    bias = relay.var("bias", shape=[16], dtype="int32")
    one = relay.const(1.0)
    zero = relay.const(0)

    x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
    x = relay.transpose(x, [0, 3, 1, 2])
    op = relay.op.nn.conv2d(x,
                            relay.qnn.op.dequantize(w, relay.const(0.5), zero))
    op = relay.op.nn.bias_add(op, relay.qnn.op.dequantize(bias, one, zero))
    op = relay.qnn.op.quantize(op, one, zero)

    mod = tvm.IRModule.from_expr(op)
    mod = tvm.relay.transform.InferType()(mod)

    x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
    w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
    bias_np = np.random.randint(-32768, 32767, size=[16], dtype="int32")

    mod2 = tvm.relay.transform.FakeQuantizationToInteger()(mod)
    assert not tvm.ir.structural_equal(mod, mod2)
    mod2 = tvm.relay.transform.FoldConstant()(mod2)

    ex = relay.create_executor("vm", mod=mod, device=tvm.cpu(), target="llvm")
    result = ex.evaluate()(x_np, w_np, bias_np).asnumpy()

    ex = relay.create_executor("vm", mod=mod2, device=tvm.cpu(), target="llvm")
    result2 = ex.evaluate()(x_np, w_np, bias_np).asnumpy()

    assert np.array_equal(result, result2)
Ejemplo n.º 11
0
 def before():
     x = relay.var("x", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.squeeze(y)
     u = relay.transpose(y, axes=[0, 1])
     w = relay.left_shift(z, u)
     return relay.Function([x], w)
Ejemplo n.º 12
0
 def convnet():
     """Alternating layout of simple convnet (from image super-resolution).
     """
     bias1 = relay.var('bias1', shape=(64,))
     bias2 = relay.var('bias2', shape=(64,))
     bias3 = relay.var('bias3', shape=(64,))
     bias4 = relay.var('bias4', shape=(64,))
     weight1 = relay.var('weight1', shape=(64, 1, 5, 5))
     weight2 = relay.var('weight2', shape=(64, 64, 3, 3))
     weight3 = relay.var('weight3', shape=(64, 64, 3, 3))
     weight4 = relay.var('weight4', shape=(64, 64, 3, 3))
     data = relay.var("x", shape=(1, 1, 224, 224))
     n00 = relay.nn.conv2d(data, weight1, padding=[2, 2], kernel_size=[5, 5])
     n01 = relay.expand_dims(bias1, axis=1, num_newaxis=2)
     n02 = relay.add(n00, n01)
     n03 = relay.nn.relu(n02)
     n04 = relay.nn.conv2d(n03, weight2, padding=[1, 1], kernel_size=[3, 3])
     n05 = relay.expand_dims(bias2, axis=1, num_newaxis=2)
     n06 = relay.add(n04, n05)
     n07 = relay.nn.relu(n06)
     n08 = relay.nn.conv2d(n07, weight3, padding=[1, 1], kernel_size=[3, 3])
     n09 = relay.expand_dims(bias3, axis=1, num_newaxis=2)
     n10 = relay.add(n08, n09)
     n11 = relay.nn.relu(n10)
     n12 = relay.nn.conv2d(n11, weight4, padding=[1, 1], kernel_size=[3, 3])
     n13 = relay.expand_dims(bias4, axis=1, num_newaxis=2)
     n14 = relay.add(n12, n13)
     n15 = relay.reshape(n14, newshape=[1, 1, 3, 3, 224, 224])
     n16 = relay.transpose(n15, axes=[0, 1, 4, 2, 5, 3])
     net = relay.reshape(n16, newshape=[1, 1, 672, 672])
     args = relay.analysis.free_vars(net)
     return relay.Function(args, net)
 def before():
     x = relay.var("x", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.squeeze(y)
     u = relay.transpose(y, axes=[0, 1])
     w = relay.left_shift(z, u)
     return relay.Function([x], w)
Ejemplo n.º 14
0
def test_compile_injective_with_tuple():
    x = relay.var("x", shape=(2, 3))
    y = relay.var("y", shape=(2, 3))
    x_transpose = relay.transpose(x)
    output = relay.Tuple([x_transpose, y])
    func = relay.Function([x, y], output)
    relay.build(tvm.IRModule.from_expr(func), 'llvm')
Ejemplo n.º 15
0
 def test_transpose_weights_dense(x_shape=(1, 16), k_shape=(16, 32)):
     x = relay.var('x', shape=(x_shape), dtype='float32')
     kernel = relay.var('kernel', shape=(k_shape), dtype='float32')
     kernel_t = relay.transpose(kernel, (1, 0))
     # Dense requires constant weights in TensorRT, so the weights are transposed by us.
     out = relay.nn.dense(x, kernel_t)
     f = relay.Function([x, kernel], out)
     return f, {'x': x_shape, 'kernel': k_shape}
Ejemplo n.º 16
0
 def test_transpose_weights_conv2d(x_shape=(1, 32, 9, 9), k_shape=(3, 3, 32, 16), order=(3, 2, 0, 1)):
     x = relay.var('x', shape=(x_shape), dtype='float32')
     kernel = relay.var('kernel', shape=(k_shape), dtype='float32')
     kernel_t = relay.transpose(kernel, order)
     # Conv2d requires constant weights in TensorRT, so the weights are transposed by us.
     out = relay.nn.conv2d(x, kernel_t, channels=k_shape[order[0]], kernel_size=(3, 3))
     f = relay.Function([x, kernel], out)
     return f, {'x': x_shape, 'kernel': k_shape}
    def before4():
        """
        Simplify transpose->layout_transform and its inverse.

        Input:
        NHWC -> NCHW -> NCHW4c -> op -> NCHW4c -> NCHW -> NHWC

        Simplified:
        NHWC -> NCHW4c -> op -> NCHW4c -> NHWC
        """
        x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")
        y = relay.transpose(x, axes=[0, 3, 1, 2])
        y = relay.layout_transform(y, "NCHW", "NCHW4c")
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, "NCHW4c", "NCHW")
        y = relay.transpose(y, axes=[0, 2, 3, 1])
        return relay.Function([x], y)
Ejemplo n.º 18
0
def conv2d_transpose_legalize(attrs, inputs, types):
    """Legalizes Transposed 2D convolution op.

    Parameters
    ----------
    attrs : tvm.ir.Attrs
        Attributes of current Transposed 2D convolution
    inputs : list of tvm.relay.Expr
        The args of the Relay expr to be legalized
    types : list of types
        List of input and output types

    Returns
    -------
    result : tvm.relay.Expr
        The legalized expr
    """

    data, kernel = inputs
    kernel_layout = attrs["kernel_layout"]
    if attrs["data_layout"] == "NHWC":
        kernel = layout_transform(kernel, kernel_layout, "IOHW")

        # Set new attrs for conv2d_transpose.
        new_attrs = {k: attrs[k] for k in attrs.keys()}
        new_attrs["data_layout"] = "NCHW"
        # layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
        new_attrs["kernel_layout"] = "IOHW"

        # Convert data to NCHW.
        data = relay.transpose(data, axes=(0, 3, 1, 2))
        deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)
        # Convert back to original NHWC layout.
        out = relay.transpose(deconv, axes=(0, 2, 3, 1))
        return out

    if attrs["data_layout"] == "NCHW":
        kernel = layout_transform(kernel, kernel_layout, "IOHW")
        new_attrs = {k: attrs[k] for k in attrs.keys()}

        # layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
        new_attrs["kernel_layout"] = "IOHW"
        return relay.nn.conv2d_transpose(data, kernel, **new_attrs)

    return None
Ejemplo n.º 19
0
 def get_graph(x_shape=(1, 32, 9, 9), k_shape=(3, 3, 32, 16), order=(3, 2, 0, 1)):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     kernel = relay.var("kernel", shape=(k_shape), dtype="float32")
     kernel_t = relay.transpose(kernel, order)
     # Conv2d requires constant weights in TensorRT, so the weights should be transposed by
     # FoldConstant.
     out = relay.nn.conv2d(x, kernel_t, channels=k_shape[order[0]], kernel_size=(3, 3))
     f = relay.Function([x, kernel], out)
     return f, {"x": x_shape, "kernel": k_shape}, ["kernel"]
Ejemplo n.º 20
0
def verify_any_transpose(data_shape, axes, static_data_shape):
    mod = tvm.IRModule()
    dtype = "float32"
    data = relay.var("data", shape=data_shape, dtype=dtype)
    y = relay.transpose(data, axes=axes)
    mod["main"] = relay.Function([data], y)
    data_np = np.random.uniform(size=static_data_shape).astype(dtype)
    ref_out = np.transpose(data_np, axes)
    check_result([data_np], mod, ref_out)
Ejemplo n.º 21
0
def _conv2d_legalize(attrs, inputs, arg_types):
    """Legalizes Conv2D op.

    Parameters
    ----------
    attrs : tvm.attrs.Attrs
        Attributes of current convolution
    inputs : list of tvm.relay.Expr
        The args of the Relay expr to be legalized
    types : list of types
        List of input and output types

    Returns
    -------
    result : tvm.relay.Expr
        The legalized expr
    """

    if attrs['data_layout'] == 'NHWC':
        data, kernel = inputs
        if attrs['kernel_layout'] == 'HWIO':
            # Handle HWIO layout. This is common in TF graph.
            kernel = relay.transpose(kernel, axes=(3, 2, 0, 1))
        elif attrs['kernel_layout'] == 'HWOI':
            # Handle HWOI layout. This is common in TF depthwise conv2d graph.
            kernel = relay.transpose(kernel, axes=(2, 3, 0, 1))
        elif attrs['kernel_layout'] != 'OIHW':
            return None

        logger.warning(
            "Legalize arm_cpu - NHWC schedule absent. Inserting layout transforms to "
            + "fallback to NCHW. This can result in performance degradation.")
        # Set new attrs for the tranposed conv.
        new_attrs = {k: attrs[k] for k in attrs.keys()}
        new_attrs['data_layout'] = 'NCHW'
        new_attrs['kernel_layout'] = 'OIHW'

        # Convert from NHWC to NCHW.
        data = relay.transpose(data, axes=(0, 3, 1, 2))
        conv = relay.nn.conv2d(data, kernel, **new_attrs)
        # Convert back to original NHWC layout.
        out = relay.transpose(conv, axes=(0, 2, 3, 1))
        return out
    return None
Ejemplo n.º 22
0
def test_transpose_infer_type():
    ib = relay.ir_builder.IRBuilder()
    n, t, d = tvm.var("n"), tvm.var("t"), 100
    x = ib.param("x", relay.ty.TensorType((n, t, d), "float32"))
    with ib.function(x) as func:
        ib.ret(relay.transpose(x, axes=(1, 0, 2)))
    ib.ret(func)
    func = relay.ir_pass.infer_type(ib.env, func.to_func())
    ftype = func.checked_type
    assert ftype.ret_type == relay.ty.TensorType((t, n, 100), "float32")
Ejemplo n.º 23
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.squeeze(y)
     u = relay.transpose(y, axes=[0, 1])
     w = relay.left_shift(z, u)
     f1 = relay.Function([x], w)
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     return relay.Function([x], y)
Ejemplo n.º 24
0
def test_simplify_fc_transpose():
    data = relay.var("data", shape=(1, 32), dtype="float32")
    x = relay.nn.relu(data)
    w1 = relay.var("w1", shape=(32, 64), dtype="float32")
    y = relay.nn.dense(x, relay.transpose(w1, axes=[1, 0]))
    z = relay.nn.relu(y)
    w2 = relay.var("w2", shape=(64, 16), dtype="float32")
    zz = relay.nn.dense(z, relay.transpose(w2, axes=[1, 0]))
    func = relay.Function(relay.analysis.free_vars(zz), zz)
    params = {
        "w1": tvm.nd.array(np.random.uniform(-1, 1, (32, 64)).astype("float32")),
        "w2": tvm.nd.array(np.random.uniform(-1, 1, (64, 16)).astype("float32")),
    }
    x_np = np.random.randn(1, 32).astype("float32")
    old_result = run_func(func, params, x_np)

    new_func, new_params = simplify_fc_transpose.convert(func, params)
    new_result = run_func(new_func, new_params, x_np)
    np.testing.assert_allclose(old_result, new_result, atol=1e-5, rtol=1e-5)
Ejemplo n.º 25
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.squeeze(y)
     u = relay.transpose(y, axes=[0, 1])
     w = relay.left_shift(z, u)
     f1 = relay.Function([x], w)
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     return relay.Function([x], y)
Ejemplo n.º 26
0
def tvm_lenet(num_classes=10,
              data_shape=(1, 1, 32, 32),
              dtype='float32',
              alpha=1.0,
              is_shallow=False):
    from tvm import relay
    from tvm.relay.testing import layers
    """Function to construct a Lenet"""
    data = relay.var("data", shape=data_shape, dtype=dtype)
    conv1 = layers.conv2d(data=data,
                          channels=6,
                          kernel_size=(5, 5),
                          name='conv1')
    conv1 = relay.tanh(conv1)
    pool2 = relay.nn.avg_pool2d(conv1, pool_size=(2, 2), strides=(2, 2))
    conv3 = layers.conv2d(data=pool2,
                          channels=16,
                          kernel_size=(5, 5),
                          name='conv3')
    conv3 = relay.tanh(conv3)
    pool4 = relay.nn.avg_pool2d(conv3, pool_size=(2, 2), strides=(2, 2))

    conv5 = layers.conv2d(data=pool4,
                          channels=120,
                          kernel_size=(5, 5),
                          name='conv5')
    conv5 = relay.tanh(conv5)
    # Temp
    flattened6 = relay.reshape(conv5, (1, 120))
    # flattened6 = relay.nn.batch_flatten(conv5)
    fcw7 = relay.var('fc7_weight', shape=(120, 84))
    fcw7 = relay.transpose(fcw7)
    fc7 = relay.nn.dense(data=flattened6, weight=fcw7, units=84)
    fc7 = relay.tanh(fc7)

    fcw8 = relay.var('fc6_weight', shape=(84, 10))
    fcw8 = relay.transpose(fcw8)

    fc8 = relay.nn.dense(data=fc7, weight=fcw8, units=10)

    softmax = relay.nn.softmax(data=fc8)
    fn = relay.Function(relay.analysis.free_vars(softmax), softmax)
    return fn
Ejemplo n.º 27
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.squeeze(y)
     u = relay.transpose(y, axes=[0, 1])
     w = relay.left_shift(z, u)
     f1 = relay.Function([x], w)
     f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     return relay.Function([x], y)
Ejemplo n.º 28
0
 def verify_transpose(dshape, axes):
     x = relay.var('x', relay.TensorType(dshape, 'float32'))
     z = relay.transpose(x, axes=axes)
     func = relay.Function([x], z)
     x_data = np.random.uniform(low=(- 1), high=1, size=dshape).astype('float32')
     ref_res = np.transpose(x_data, axes=axes)
     for (target, ctx) in tvm.testing.enabled_targets():
         for kind in ['graph', 'debug']:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(func)(x_data)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-05)
Ejemplo n.º 29
0
def verify_any_transpose(data_shape, axes, static_data_shape):
    mod = tvm.IRModule()
    dtype = "float32"
    data = relay.var('data', shape=data_shape, dtype=dtype)
    y = relay.transpose(data, axes=axes)
    mod["main"] = relay.Function([data], y)
    data_np = np.random.uniform(size=static_data_shape).astype(dtype)
    ref_out = np.transpose(data_np, axes)
    for kind in ["debug", "vm"]:
        ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm")
        result = ex.evaluate()(data_np)
        tvm.testing.assert_allclose(result.asnumpy(), ref_out)
Ejemplo n.º 30
0
def tvm_gemm(node, ctx):
    inputs = [
        ctx[node.args[0].name], ctx[node.args[1].name], ctx[node.args[2].name]
    ]
    alpha = node.kwargs['alpha']
    beta = node.kwargs['beta']
    transA = node.kwargs['transA']
    transB = node.kwargs['transB']
    channels = infer_channels(inputs[1], not transB)
    if transA:
        inputs[0] = relay.transpose(inputs[0], axes=(1, 0))
    if transB:
        inputs[1] = relay.transpose(inputs[1], axes=(1, 0))
    # inputs[0] = relay.nn.batch_flatten(inputs[0])

    if alpha != 1.0:
        inputs[0] *= relay.expr.const(alpha)
    out = relay.nn.dense(inputs[0], inputs[1], units=channels)

    # skip (beta * C) if zero
    # if (beta == 0.0):
    return node.args[3].name, out
Ejemplo n.º 31
0
    def verify_transpose(dshape, axes):
        x = relay.var("x", relay.TensorType(dshape, "float32"))
        z = relay.transpose(x, axes=axes)

        func = relay.Function([x], z)
        x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
        ref_res = np.transpose(x_data, axes=axes)

        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(x_data)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
Ejemplo n.º 32
0
    def verify_transpose(dshape, axes):
        x = relay.var("x", relay.TensorType(dshape, "float32"))
        z = relay.transpose(x, axes=axes)

        func = relay.Function([x], z)
        x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
        ref_res = np.transpose(x_data, axes=axes)

        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(x_data)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
Ejemplo n.º 33
0
def test_fake_transpose_quantize_conv():
    x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
    w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
    one = relay.const(1.0)
    zero = relay.const(0)

    x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
    x = relay.transpose(x, [0, 3, 1, 2])
    op = relay.op.nn.conv2d(x, relay.qnn.op.dequantize(w, relay.const(0.5), zero))
    op = relay.qnn.op.quantize(op, one, zero)

    x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
    w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")

    compare_fq_to_int(op, [x_np, w_np])
Ejemplo n.º 34
0
    def test_transpose(self):
        data = relay.var("data", relay.TensorType((-1, 3, 2, 2), "float32"))

        net = relay.transpose(data, axes=(0, 2, 3, 1))

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"
        assert layers[0].shapes == [-1, 3, 2, 2]
        assert layers[1].type[0] == "Transpose"
        assert layers[1].shapes == [-1, 2, 2, 3]
Ejemplo n.º 35
0
def test_fake_transpose_quantize_conv_bias_add_mismatch():
    x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
    w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
    bias = relay.var("bias", shape=[16], dtype="int32")
    one = relay.const(1.0)
    two = relay.const(2.0)
    zero = relay.const(0)

    x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
    x = relay.transpose(x, [0, 3, 1, 2])
    op = relay.op.nn.conv2d(x, relay.qnn.op.dequantize(w, relay.const(0.5), zero))
    op = relay.op.nn.bias_add(op, relay.qnn.op.dequantize(bias, two, zero))
    op = relay.qnn.op.quantize(op, one, zero)

    x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
    w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
    bias_np = np.random.randint(-32768, 32767, size=[16], dtype="int32")

    compare_fq_to_int(op, [x_np, w_np, bias_np])
Ejemplo n.º 36
0
    def test_transpose_constant(self):
        d = np.zeros((1, 3, 2, 2))
        data = relay.var("data", relay.TensorType((1, 3, 2, 2), "float32"))

        net = relay.transpose(data, axes=(0, 2, 3, 1))
        net = relay.Tuple([net])
        net = relay.Function(relay.analysis.free_vars(net), net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {"data": d})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Constant"
        assert layers[0].shapes == [1, 2, 2, 3]
        np.testing.assert_array_equal(layers[0].data[0],
                                      np.transpose(d, (0, 2, 3, 1)))