예제 #1
0
    def check(shape, channels, blocking):
        x = relay.var("x", shape=shape)
        weight = relay.var("weight")
        if blocking:
            in_channels = shape[1] * shape[4]
            in_bias = relay.var("in_bias",
                                shape=(1, in_channels // blocking[0], 1, 1,
                                       blocking[0]))
            in_scale = relay.const(
                _get_positive_scale(
                    (1, in_channels // blocking[0], 1, 1, blocking[0])))
        else:
            in_channels = shape[1]
            in_bias = relay.var("in_bias", shape=(in_channels, 1, 1))
            in_scale = relay.const(_get_positive_scale((in_channels, 1, 1)))
        y1 = before(x, weight, in_bias, in_scale, channels, blocking)
        y1 = run_opt_pass(y1, transform.InferType())
        type_dict = {x.name_hint: x.checked_type for x in y1.params}
        weight = relay.var("weight", type_dict["weight"])
        y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
        y1_expected = expected(x, weight, in_bias, in_scale, in_channels,
                               channels, blocking)

        y1_folded = run_opt_pass(y1_folded, transform.InferType())
        y1_expected = run_opt_pass(y1_expected, transform.InferType())
        assert tvm.ir.structural_equal(y1_folded, y1_expected)
예제 #2
0
    def check(dshape, channels, blocking):
        x = relay.var("x", shape=dshape)
        if blocking:
            in_channels = dshape[3] * dshape[4]
            wshape = (3, 3, 1, channels // blocking[1], 1, blocking[1]
                      )  # HWIOio
            weight = relay.var("weight", shape=wshape)
            in_bias = relay.var("in_bias",
                                shape=(in_channels // blocking[0],
                                       blocking[0]))
            in_scale = relay.const(
                _get_positive_scale((in_channels // blocking[0], blocking[0])))
        else:
            in_channels = dshape[-1]
            wshape = (3, 3, 1, channels)  # HWIO
            weight = relay.var("weight", shape=wshape)
            in_bias = relay.var("in_bias", shape=(in_channels, ))
            in_scale = relay.const(_get_positive_scale(in_channels, ))

        # test depthwise
        assert in_channels == channels

        y1 = before(x, weight, in_bias, in_scale, channels, blocking)
        y1 = run_opt_pass(y1, transform.InferType())
        y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
        type_dict = {x.name_hint: x.checked_type for x in y1.params}
        weight = relay.var("weight", type_dict["weight"])
        y1_expected = expected(x, weight, in_bias, in_scale, channels,
                               blocking)
        y1_expected = run_opt_pass(y1_expected, transform.InferType())
        assert tvm.ir.structural_equal(y1_folded, y1_expected)
 def check(shape, channels, in_scale):
     x = relay.var("x", shape=shape)
     in_channels = shape[-1]
     # test depthwise
     assert in_channels == channels
     weight = relay.var("weight")
     in_bias = relay.var("in_bias", shape=(in_channels, ))
     y1 = before(x, weight, in_bias, in_scale, channels)
     y1 = run_opt_pass(y1, transform.InferType())
     y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
     assert relay.analysis.alpha_equal(y1, y1_folded)
 def check(shape, channels):
     x = relay.var("x", shape=shape)
     in_channels = shape[-1]
     in_bias = relay.var("in_bias", shape=(in_channels, ))
     in_scale = relay.const(_get_positive_scale(size=(in_channels, )))
     # test depthwise
     assert in_channels == channels
     weight = relay.var("weight")
     y1 = before(x, weight, in_bias, in_scale, channels)
     y1 = run_opt_pass(y1, transform.InferType())
     y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
     assert tvm.ir.structural_equal(y1, y1_folded)
 def check(shape, channels):
     x = relay.var("x", shape=shape)
     in_channels = shape[1]
     in_scale = relay.const(-_get_positive_scale((in_channels, 1, 1)))
     weight = relay.var("weight")
     y1 = before(x, weight, in_scale, channels)
     y1 = run_opt_pass(y1, transform.InferType())
     type_dict = {x.name_hint: x.checked_type for x in y1.params}
     weight = relay.var("weight", type_dict["weight"])
     y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
     y1_expected = expected(x, weight, in_scale, channels)
     y1_expected = run_opt_pass(y1_expected, transform.InferType())
     assert relay.analysis.alpha_equal(y1_folded, y1_expected)
    def check(data_shape, weight_shape):
        x = relay.var("x", shape=data_shape)
        weight = relay.var("weight", shape=weight_shape)
        in_channels = data_shape[1]
        in_bias = relay.var("in_bias", shape=(in_channels, ))
        in_scale = relay.const(_get_positive_scale((in_channels, )))
        y1 = before(x, weight, in_bias, in_scale)
        y1 = run_opt_pass(y1, transform.InferType())
        y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
        y1_expected = expected(x, weight, in_bias, in_scale)

        y1_folded = run_opt_pass(y1_folded, transform.InferType())
        y1_expected = run_opt_pass(y1_expected, transform.InferType())
        assert tvm.ir.structural_equal(y1_folded, y1_expected)
    def check(shape, channels, blocking, in_scale):
        x = relay.var("x", shape=shape)
        weight = relay.var("weight")
        if blocking:
            in_channels = shape[3] * shape[4]
            in_bias = relay.var("in_bias", shape=(1, in_channels // blocking[0], 1, 1, blocking[0]))
        else:
            in_channels = shape[-1]
            in_bias = relay.var("in_bias", shape=(in_channels,))

        assert in_channels == channels
        y1 = before(x, weight, in_bias, in_scale, channels, blocking)
        y1 = run_opt_pass(y1, transform.InferType())
        y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
        assert tvm.ir.structural_equal(y1, y1_folded)
 def check(dshape, channels):
     x = relay.var("x", shape=dshape)
     in_channels = dshape[-1]
     # test depthwise
     assert in_channels == channels
     wshape = (3, 3, 1, channels)  # HWIO
     weight = relay.var("weight", shape=wshape)
     in_bias = relay.var("in_bias", shape=(in_channels, ))
     in_scale = relay.const(_get_positive_scale(in_channels, ))
     y1 = before(x, weight, in_bias, in_scale, channels)
     y1 = run_opt_pass(y1, transform.InferType())
     y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
     type_dict = {x.name_hint: x.checked_type for x in y1.params}
     weight = relay.var("weight", type_dict["weight"])
     y1_expected = expected(x, weight, in_bias, in_scale, channels)
     y1_expected = run_opt_pass(y1_expected, transform.InferType())
     assert relay.analysis.alpha_equal(y1_folded, y1_expected)
예제 #9
0
 def match_pass_name(name):
     if name == 'FoldScaleAxis':
         return transform.FoldScaleAxis()
     if name == 'BackwardFoldScaleAxis':
         return transform.BackwardFoldScaleAxis()
     if name == 'ForwardFoldScaleAxis':
         return transform.ForwardFoldScaleAxis()
     if name == 'FuseOps':
         return transform.FuseOps(3)
     if name == 'FoldConstant':
         return transform.FoldConstant()
     if name == 'CombineParallelConv2d':
         return transform.CombineParallelConv2D()
     if name == 'AlterOpLayout':
         return transform.AlterOpLayout()
     if name == 'EliminateCommonSubexpr':
         return transform.EliminateCommonSubexpr()
     if name == 'PartialEvaluate':
         return transform.PartialEvaluate()
     if name == 'CanonicalizeCast':
         return transform.CanonicalizeCast()
     if name == 'CanonicalizeOps':
         return transform.CanonicalizeOps()
     raise Exception('Name {} does not match any pass'.format(name))