示例#1
0
    def validate(shape, value, dtype):
        def before_left(x, elem_op, full):
            return elem_op(full, x)

        def after_left(x, elem_op, value):
            return elem_op(relay.const(value, dtype), x)

        def before_right(x, elem_op, full):
            return elem_op(x, full)

        def after_right(x, elem_op, value):
            return elem_op(x, relay.const(value, dtype))

        x = relay.var("x", shape=shape, dtype=dtype)
        elem_ops = [relay.add, relay.multiply, relay.subtract, relay.divide]
        full_ops = []
        if value == 0:
            full_ops.append(relay.zeros(shape, dtype))
            full_ops.append(relay.zeros_like(x))
        if value == 1:
            full_ops.append(relay.ones(shape, dtype))
            full_ops.append(relay.ones_like(x))
        else:
            full_ops.append(relay.full(relay.const(value, dtype), shape))
            full_ops.append(relay.full_like(x, relay.const(value, dtype)))
        for op in elem_ops:
            for full in full_ops:
                z = before_left(x, op, full)
                zz = run_opt_pass(z, transform.SimplifyExpr())
                after = run_opt_pass(after_left(x, op, value),
                                     transform.InferType())
                assert tvm.ir.structural_equal(zz, after)

                z = before_right(x, op, full)
                zz = run_opt_pass(z, transform.SimplifyExpr())
                after = run_opt_pass(after_right(x, op, value),
                                     transform.InferType())
                assert tvm.ir.structural_equal(zz, after)

        # Test the case in which x is broadcast to full's shape
        full_ops = []
        if value == 0:
            full_ops.append(relay.zeros(shape * 2, dtype))
        if value == 1:
            full_ops.append(relay.ones(shape * 2, dtype))
        else:
            full_ops.append(relay.full(relay.const(value, dtype), shape * 2))
        for op in elem_ops:
            for full in full_ops:
                z = before_left(x, op, full)
                zz = run_opt_pass(z, transform.SimplifyExpr())
                after = run_opt_pass(before_left(x, op, full),
                                     transform.InferType())
                assert tvm.ir.structural_equal(zz, after)

                z = before_right(x, op, full)
                zz = run_opt_pass(z, transform.SimplifyExpr())
                after = run_opt_pass(before_right(x, op, full),
                                     transform.InferType())
                assert tvm.ir.structural_equal(zz, after)
示例#2
0
 def check(x, y=None, do_nothing=False):
     expected = run_infer_type(x)
     if do_nothing:
         actual = run_opt_pass(x, transform.SimplifyExpr())
         assert tvm.ir.structural_equal(actual, expected)
     else:
         assert y is not None
         actual = run_opt_pass(y, transform.SimplifyExpr())
         assert tvm.ir.structural_equal(actual, expected)
示例#3
0
def partition_for_dnnl(mod, params=None):
    """Partition the graph greedily offloading supported operators to DNNL.

    Parameters
    ----------
    mod : Module
        The module to run passes on.
    params : Optional[Dict[str, NDArray]]
        Constant input parameters.
    Returns
    -------
    mod : Module
        Annotated and partitioned module.
    """

    if params:
        mod["main"] = bind_params_by_name(mod["main"], params)
    seq = tvm.transform.Sequential([
        transform.CanonicalizeOps(),
        transform.InferType(),
        transform.SimplifyInference(),
        transform.FoldConstant(),
        transform.FoldScaleAxis(),
        # fold consecutive add ops to simplify pattern `conv2d-bias_add-bn-relu`
        transform.SimplifyExpr(),
        transform.FoldConstant(),
        transform.MergeComposite(pattern_table()),
        transform.AnnotateTarget("dnnl"),
        transform.MergeCompilerRegions(),
        transform.PartitionGraph(),
    ])
    with tvm.transform.PassContext(opt_level=3):
        mod = seq(mod)
    return mod
示例#4
0
def test_simplify_transpose():
    # Test a series of transpose and layout_transform ops
    def before1():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.transpose(x, axes=[0, 2, 3, 1])  # To NHWC
        y = relay.layout_transform(y, "NHWC", "HWCN")  # To HWCN
        y = relay.transpose(y, axes=[3, 0, 1, 2])  # To NHWC
        return relay.Function([x], y)

    def expected1():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.transpose(x, axes=[0, 2, 3, 1])  # To NHWC
        return relay.Function([x], y)

    # Test that all transpose ops can be cancelled
    def before2():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.nn.relu(x)
        y = relay.transpose(y, axes=[0, 2, 3, 1])  # To NHWC
        y = relay.transpose(y, axes=[1, 2, 3, 0])  # To HWCN
        y = relay.transpose(y, axes=[3, 2, 0, 1])  # To NCHW
        return relay.Function([x], y)

    def expected2():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.nn.relu(x)
        return relay.Function([x], y)

    # Test default axis (reverse) and negative axis
    def before3():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.nn.relu(x)
        y = relay.transpose(y)  # Reverse
        y = relay.transpose(y)  # Reverse
        y = relay.transpose(y, axes=[0, 2, -1, 1])
        y = relay.transpose(y)  # Reverse
        y = relay.transpose(y)  # Reverse
        return relay.Function([x], y)

    def expected3():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.nn.relu(x)
        y = relay.transpose(y, axes=[0, 2, 3, 1])
        return relay.Function([x], y)

    for before, expected in [
        [before1(), expected1()],
        [before2(), expected2()],
        [before3(), expected3()],
    ]:
        after = run_opt_pass(before, transform.SimplifyExpr())
        expected = run_opt_pass(expected, transform.InferType())
        assert tvm.ir.structural_equal(
            after,
            expected), "\nafter: {} \nexpected: {}".format(after, expected)
示例#5
0
def test_simplify_consecutive_add():
    shape = (32, 1, 1)
    c_data = np.empty(shape).astype("float32")
    c1 = relay.const(c_data)
    c2 = relay.const(c_data)

    def before_const_right():
        x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
        w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
        y = relay.nn.conv2d(x, w, padding=(1, 1))
        y = relay.add(y, c1)
        y = relay.add(y, c2)
        y = relay.nn.relu(y)
        return relay.Function([x, w], y)

    def before_const_left():
        x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
        w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
        y = relay.nn.conv2d(x, w, padding=(1, 1))
        y = relay.add(c1, y)
        y = relay.add(c2, y)
        y = relay.nn.relu(y)
        return relay.Function([x, w], y)

    def expected():
        x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
        w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
        y = relay.nn.conv2d(x, w, padding=(1, 1))
        c3 = relay.add(c1, c2)
        y = relay.add(y, c3)
        y = relay.nn.relu(y)
        return relay.Function([x, w], y)

    zr = before_const_right()
    zl = before_const_left()
    zzr = run_opt_pass(zr, transform.SimplifyExpr())
    zzl = run_opt_pass(zl, transform.SimplifyExpr())
    after = run_opt_pass(expected(), transform.InferType())
    assert tvm.ir.structural_equal(zzr, after)
    assert tvm.ir.structural_equal(zzl, after)
示例#6
0
def test_simplify_reshape():
    def before():
        x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
        w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
        y = relay.nn.conv2d(x, w, padding=(1, 1))
        y = relay.reshape(y, newshape=(1, 16, -1))
        y = relay.reshape(y, newshape=(4, 8, -1, 16))
        y = relay.reverse_reshape(y, newshape=(32, 0, -1))
        return relay.Function([x, w], y)

    def expected():
        x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
        w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
        y = relay.nn.conv2d(x, w, padding=(1, 1))
        y = relay.reshape(y, newshape=(32, 16, 16))
        return relay.Function([x, w], y)

    def symbolic():
        b = tvm.te.size_var("b")
        x = relay.var("x", shape=(b, 16, 16, 16), dtype="float32")
        w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
        y = relay.nn.conv2d(x, w, padding=(1, 1))
        y = relay.reshape(y, newshape=(1, 16, -1))
        y = relay.reshape(y, newshape=(4, 8, -1, 16))
        y = relay.reverse_reshape(y, newshape=(32, 0, -1))
        return relay.Function([x, w], y)

    z = before()
    zz = run_opt_pass(z, transform.SimplifyExpr())
    after = run_opt_pass(expected(), transform.InferType())
    assert tvm.ir.structural_equal(zz, after)

    z = symbolic()
    zz = run_opt_pass(z, transform.SimplifyExpr())
    after = run_opt_pass(symbolic(), transform.InferType())
    assert tvm.ir.structural_equal(zz, after)
示例#7
0
    def validate(ndim, pad_width, pad_value, pad_mode, orig_padding, layout):
        if layout[1] == "C":
            shape = [1, 3] + [10] * ndim
            wshape = [8, 3] + [3] * ndim
        elif layout[-1] == "C":
            shape = [1] + [10] * ndim + [3]
            wshape = [8] + [3] * ndim + [3]
        else:
            raise ValueError("This test only supports NC* and N*C")

        x = relay.var("x", shape=shape, dtype="float32")
        w = relay.var("w", shape=wshape, dtype="float32")
        pad = relay.nn.pad(x, pad_width, pad_value, pad_mode)
        if layout[1] == "C":
            conv = convs[ndim - 1](pad, w, padding=orig_padding)
        else:
            conv = convs[ndim - 1](pad,
                                   w,
                                   padding=orig_padding,
                                   data_layout=layout,
                                   kernel_layout="DHWIO"[3 - ndim:])

        if pad_mode == "constant" and pad_value == 0:
            new_padding = []
            for j in range(2):
                for i in range(len(pad_width)):
                    if layout[i] in ["D", "H", "W"]:
                        new_padding.append(pad_width[i][j])
            for i in range(len(new_padding)):
                new_padding[i] += orig_padding[i]
            if layout[1] == "C":
                after = convs[ndim - 1](x, w, padding=new_padding)
            else:
                after = convs[ndim - 1](x,
                                        w,
                                        padding=new_padding,
                                        data_layout=layout,
                                        kernel_layout="DHWIO"[3 - ndim:])
        else:
            after = conv

        zz = run_opt_pass(conv, transform.SimplifyExpr())
        expected = run_opt_pass(after, transform.InferType())
        assert tvm.ir.structural_equal(zz, expected)

        mod1 = tvm.IRModule.from_expr(conv)
        mod2 = tvm.IRModule.from_expr(zz)

        with tvm.transform.PassContext(disabled_pass="******"):
            ex1 = relay.create_executor("vm",
                                        mod=mod1,
                                        ctx=tvm.cpu(),
                                        target="llvm")
        ex2 = relay.create_executor("vm",
                                    mod=mod2,
                                    ctx=tvm.cpu(),
                                    target="llvm")
        x_np = np.random.rand(*shape).astype("float32")
        w_np = np.random.rand(*wshape).astype("float32")
        result1 = ex1.evaluate()(x_np, w_np)
        result2 = ex2.evaluate()(x_np, w_np)

        tvm.testing.assert_allclose(result1.asnumpy(), result2.asnumpy())
def test_simplify_transpose():
    # Test a series of transpose and layout_transform ops
    def before1():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.transpose(x, axes=[0, 2, 3, 1])  # To NHWC
        y = relay.layout_transform(y, "NHWC", "HWCN")  # To HWCN
        y = relay.transpose(y, axes=[3, 0, 1, 2])  # To NHWC
        return relay.Function([x], y)

    def expected1():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.transpose(x, axes=[0, 2, 3, 1])  # To NHWC
        return relay.Function([x], y)

    # Test that all transpose ops can be cancelled
    def before2():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.nn.relu(x)
        y = relay.transpose(y, axes=[0, 2, 3, 1])  # To NHWC
        y = relay.transpose(y, axes=[1, 2, 3, 0])  # To HWCN
        y = relay.transpose(y, axes=[3, 2, 0, 1])  # To NCHW
        return relay.Function([x], y)

    def expected2():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.nn.relu(x)
        return relay.Function([x], y)

    # Test default axis (reverse) and negative axis
    def before3():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.nn.relu(x)
        y = relay.transpose(y)  # Reverse
        y = relay.transpose(y)  # Reverse
        y = relay.transpose(y, axes=[0, 2, -1, 1])
        y = relay.transpose(y)  # Reverse
        y = relay.transpose(y)  # Reverse
        return relay.Function([x], y)

    def expected3():
        x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32")  # NCHW
        y = relay.nn.relu(x)
        y = relay.transpose(y, axes=[0, 2, 3, 1])
        return relay.Function([x], y)

    # Test a series of transpose and rank changing layout_transform
    def before4():
        """
        Simplify transpose->layout_transform and its inverse.

        Input:
        NHWC -> NCHW -> NCHW4c -> op -> NCHW4c -> NCHW -> NHWC

        Simplified:
        NHWC -> NCHW4c -> op -> NCHW4c -> NHWC
        """
        x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")
        y = relay.transpose(x, axes=[0, 3, 1, 2])
        y = relay.layout_transform(y, "NCHW", "NCHW4c")
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, "NCHW4c", "NCHW")
        y = relay.transpose(y, axes=[0, 2, 3, 1])
        return relay.Function([x], y)

    def expected4():
        x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")  # NHWC
        y = relay.layout_transform(x, "NHWC", "NCHW4c")  # To NCHW4c
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, "NCHW4c", "NHWC")  # To NHWC
        return relay.Function([x], y)

    def before5():
        """
        Simplify layout_transform->layout_transform and its inverse.

        Input:
        NHWC -> NCHW -> NCHW4c -> op -> NCHW4c -> NCHW -> NHWC

        Simplified:
        NHWC -> NCHW4c -> op -> NCHW4c -> NHWC
        """
        x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")  # NHWC
        y = relay.layout_transform(x, "NHWC", "NCHW")  # To NCHW
        y = relay.layout_transform(y, "NCHW", "NCHW4c")  # To NCHW4c
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, "NCHW4c", "NCHW")  # To NCHW
        y = relay.layout_transform(y, "NCHW", "NHWC")  # To NHWC
        return relay.Function([x], y)

    def expected5():
        x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")  # NHWC
        y = relay.layout_transform(x, "NHWC", "NCHW4c")  # To NCHW4c
        y = relay.nn.relu(y)
        y = relay.layout_transform(y, "NCHW4c", "NHWC")  # To NHWC
        return relay.Function([x], y)

    def before6():
        """
        Remove trivial layout_transform->layout_transform.

        Input:
        NCHW -> NHWC -> NCHW -> op

        Simplified:
        NHWC -> op
        """

        x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
        y = relay.layout_transform(x, "NCHW", "NHWC")
        y = relay.layout_transform(y, "NHWC", "NCHW")
        y = relay.nn.relu(y)
        return relay.Function([x], y)

    def expected6():
        x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
        y = relay.nn.relu(x)
        return relay.Function([x], y)

    def before7():
        """
        Remove trivial layout_transform->layout_transform.

        Input:
        NCHW4c -> NCHW8c -> NCHW4c -> op

        Simplified:
        NCHW4c -> op
        """
        x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
        y = relay.layout_transform(x, "NCHW4c", "NCHW8c")
        y = relay.layout_transform(y, "NCHW8c", "NCHW4c")
        y = relay.nn.relu(y)
        return relay.Function([x], y)

    def expected7():
        x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
        y = relay.nn.relu(x)
        return relay.Function([x], y)

    def before8():
        """
        Simplify layout_transform->layout_transform with rank contraction and expansion

        Input:
        NCHW4c -> NCHW -> NCHW8c -> op

        Simplified:
        NCHW4c -> NCHW8c -> op
        """
        x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
        y = relay.layout_transform(x, "NCHW4c", "NCHW")
        y = relay.layout_transform(y, "NCHW", "NCHW8c")
        y = relay.nn.relu(y)
        return relay.Function([x], y)

    def expected8():
        x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
        y = relay.layout_transform(x, "NCHW4c", "NCHW8c")
        y = relay.nn.relu(y)
        return relay.Function([x], y)

    def before9():
        """
        Remove trivial layout_transform->layout_transform.

        Input:
        NCHW -> NCHW4c -> NCHW -> op

        Simplified:
        NCHW -> op
        """
        x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
        y = relay.layout_transform(x, "NCHW", "NCHW4c")
        y = relay.layout_transform(y, "NCHW4c", "NCHW")
        y = relay.nn.relu(y)
        return relay.Function([x], y)

    def expected9():
        x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
        y = relay.nn.relu(x)
        return relay.Function([x], y)

    def before10():
        """
        Simplify layout_transform->layout_transform without rank change to transpose.

        Input:
        NCHW -> NHWC -> CHWN -> op

        Simplified:
        NCHW -> CHWN -> op
        """
        x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
        y = relay.layout_transform(x, "NCHW", "NHWC")
        y = relay.layout_transform(y, "NHWC", "CHWN")
        y = relay.nn.relu(y)
        return relay.Function([x], y)

    def expected10():
        x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
        y = relay.transpose(x, axes=[1, 2, 3, 0])
        y = relay.nn.relu(y)
        return relay.Function([x], y)

    for before, expected in [
        [before1(), expected1()],
        [before2(), expected2()],
        [before3(), expected3()],
        [before4(), expected4()],
        [before5(), expected5()],
        [before6(), expected6()],
        [before7(), expected7()],
        [before8(), expected8()],
        [before9(), expected9()],
        [before10(), expected10()],
    ]:
        after = run_opt_pass(before, transform.SimplifyExpr())
        expected = run_opt_pass(expected, transform.InferType())
        assert tvm.ir.structural_equal(
            after,
            expected), "\nafter: {} \nexpected: {}".format(after, expected)
示例#9
0
文件: test_dnnl.py 项目: wenxcs/tvm
def partition_for_dnnl(mod, params=None, alter_layout=True):
    """Partition the graph greedily offloading supported operators to DNNL.

    Parameters
    ----------
    mod : Module
        The module to run passes on.
    params : Optional[Dict[str, NDArray]]
        Constant input parameters.
    Returns
    -------
    mod : Module
        Annotated and partitioned module.
    """
    if params:
        mod["main"] = bind_params_by_name(mod["main"], params)

    with TempOpAttr("nn.conv2d", "FTVMLegalize", dnnl.legalize_group_conv):
        with TempOpAttr("nn.conv2d_transpose", "FTVMLegalize",
                        dnnl.legalize_group_conv):
            seq = tvm.transform.Sequential([
                transform.CanonicalizeOps(),
                transform.InferType(),
                transform.SimplifyInference(),
                transform.FoldConstant(),
                transform.FoldScaleAxis(),
                # fold consecutive add ops to simplify pattern `conv2d-bias_add-bn-relu`
                transform.SimplifyExpr(),
                transform.FoldConstant(),
                # alter group conv /conv_transpose layout to `GOIHW` / `GIOHW`
                transform.Legalize(),
                transform.FoldConstant(),
            ])
            with tvm.transform.PassContext(opt_level=3):
                mod = seq(mod)
    if alter_layout:
        with TempOpAttr("nn.conv1d", "FTVMAlterOpLayout", dnnl.alter_conv):
            with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", dnnl.alter_conv):
                with TempOpAttr("nn.conv3d", "FTVMAlterOpLayout",
                                dnnl.alter_conv):
                    with TempOpAttr("nn.conv2d_transpose", "FTVMAlterOpLayout",
                                    dnnl.alter_conv_transpose):
                        with TempOpAttr("nn.conv3d_transpose",
                                        "FTVMAlterOpLayout",
                                        dnnl.alter_conv_transpose):
                            alter_layout_seq = tvm.transform.Sequential([
                                transform.AlterOpLayout(),
                                transform.FoldConstant(),
                            ])
                            with tvm.transform.PassContext(opt_level=3):
                                mod = alter_layout_seq(mod)

    byoc_seq = tvm.transform.Sequential([
        transform.MergeComposite(dnnl.pattern_table()),
        transform.AnnotateTarget("dnnl"),
        transform.MergeCompilerRegions(),
        transform.PartitionGraph(),
    ])
    with tvm.transform.PassContext(opt_level=3):
        mod = byoc_seq(mod)
    return mod