def test_pos_conv_bn_sum_act(use_bias, data_shape, alg, quantize):
    # conv + bn + add + act fusion case
    class ConvBNSumAct(nn.HybridBlock):
        def __init__(self, alg, use_bias, **kwargs):
            super(ConvBNSumAct, self).__init__(**kwargs)
            self.conv0 = nn.Conv2D(channels=64,
                                   kernel_size=(3, 3),
                                   strides=1,
                                   use_bias=use_bias)
            self.conv1 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1)
            self.conv1.share_parameters(self.conv0.collect_params())
            self.bn = nn.BatchNorm()
            if alg == "relu6":
                self.act = RELU6()
            elif alg == "leakyrelu":
                self.act = nn.LeakyReLU(0.25)
            elif alg == "gelu":
                self.act = nn.GELU()
            else:
                self.act = nn.Activation(activation=alg)

        def hybrid_forward(self, F, x):
            out = self.bn(self.conv0(x)) + self.conv1(x)
            out = self.act(out)
            return out

    attr = {
        'conv': {
            'with_sum': 'true',
            'with_postsum_act': 'true',
            'with_bn': 'true'
        }
    }
    net = ConvBNSumAct(alg, use_bias)
    check_fusion(net, data_shape, attr, check_quantization=quantize)
def test_pos_conv_act(use_bias, data_shape, alg, quantize):
    # conv + act fusion case
    class ConvAct(nn.HybridBlock):
        def __init__(self, use_bias, alg, **kwargs):
            super(ConvAct, self).__init__(**kwargs)
            self.conv0 = nn.Conv2D(channels=64,
                                   kernel_size=(3, 3),
                                   strides=1,
                                   use_bias=use_bias)
            if alg == "relu6":
                self.act = RELU6()
            elif alg == "leakyrelu":
                self.act = nn.LeakyReLU(0.25)
            elif alg == "gelu":
                self.act = nn.GELU()
            else:
                self.act = nn.Activation(activation=alg)

        def hybrid_forward(self, F, x):
            out = self.act(self.conv0(x))
            return out

    attrs = {'conv': {'with_act': 'true'}}
    net = ConvAct(False, alg)
    check_fusion(net, data_shape, attrs, check_quantization=quantize)
    net = ConvAct(True, alg)
    check_fusion(net, data_shape, attrs, check_quantization=quantize)
Пример #3
0
def test_pos_conv_bn_act(use_bias, data_shape, alg, quantize):
# conv + bn + act fusion case
  class ConvBNAct(nn.HybridBlock):
    def __init__(self, alg, use_bias, **kwargs):
        super(ConvBNAct, self).__init__(**kwargs)
        self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias)
        self.bn = nn.BatchNorm()
        if alg == "relu6":
          self.act = RELU6()
        elif alg == "leakyrelu":
          self.act = nn.LeakyReLU(0.25)
        elif alg == "gelu":
          self.act = nn.GELU()
        elif alg == "gelu_tanh":
          self.act = nn.GELU(approximation='tanh')
        else:
          self.act = nn.Activation(activation = alg)

    def forward(self, x):
      out = self.act(self.bn(self.conv0(x)))
      return out

  attr = {'conv': {'with_bn': 'true', 'with_act': 'true'}}
  net = ConvBNAct(alg, use_bias)
  check_fusion(net, data_shape, attr, check_quantization=quantize)
def test_fc_eltwise(data_shape, use_bias, flatten, alg):
    # fc + eltwise fusion case
    class FCEltwise(nn.HybridBlock):
        def __init__(self, use_bias, flatten, alg, **kwargs):
            super(FCEltwise, self).__init__(**kwargs)
            self.fc = nn.Dense(
                units=64,
                use_bias=use_bias,
                flatten=flatten,
                weight_initializer=CustomNormalInit(mean=0.5, sigma=0.1)
                if alg == 'square_root' else None)
            #avoid calculating square root of negative values
            self.alg = alg

        def hybrid_forward(self, F, x):
            fc_out = self.fc(x)
            if self.alg in ['relu', 'sigmoid', 'tanh', 'softrelu']:
                out = F.Activation(fc_out, act_type=self.alg)
            elif self.alg == 'square':
                out = F.square(fc_out)
            elif self.alg == 'square_root':
                out = F.sqrt(fc_out)
            elif self.alg == 'abs':
                out = F.abs(fc_out)
            elif self.alg == 'exp':
                out = F.exp(fc_out)
            else:
                out = F.clip(fc_out, 0, 1.0)
            return out

    attrs = {'fc': {'with_eltwise': 'true'}}
    net = FCEltwise(use_bias, flatten, alg)
    check_fusion(net, data_shape, attrs, check_quantization=flatten)
Пример #5
0
def test_pos_conv_act_add(data_shape, alg, quantize, use_bias):
# conv + act + add fusion case
  class ConvActAdd(nn.HybridBlock):
    def __init__(self, use_bias, alg, **kwargs):
        super(ConvActAdd, self).__init__(**kwargs)
        self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias,
                               weight_initializer=mx.init.Xavier(magnitude=2.24))
        if alg == "relu6":
          self.act = RELU6()
        elif alg == "leakyrelu":
          self.act = nn.LeakyReLU(0.25)
        elif alg == "gelu":
          self.act = nn.GELU()
        elif alg == "gelu_tanh":
          self.act = nn.GELU(approximation='tanh')
        else:
          self.act = nn.Activation(activation = alg)
        self.conv1 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias)

    def forward(self, x):
        out = self.act(self.conv0(x)) + self.conv1(x)
        return out

  attrs = {'sg_onednn_conv_act_0': {'with_act': 'true'},
           'sg_onednn_conv_add_1': {'with_sum': 'true'}}

  net = ConvActAdd(use_bias, alg)
  check_fusion(net, data_shape, attrs, check_quantization=quantize)
def test_mobilenetv2_struct(data_shape, reverse_sum_order, dedup_subgraph):
    attr = {'sg_mkldnn_conv_bn_0': {'with_bn': 'true'}}
    net = MobileNetV2Struct(reverse_sum_order=reverse_sum_order)
    check_fusion(net,
                 data_shape,
                 attr,
                 out_types=['int8', 'auto'],
                 dedup_subgraph=dedup_subgraph)
def test_conv_bn_sum(data_shape, reverse_sum_order, dedup_subgraph):
    attr = {'sg_mkldnn_conv_bn_add_0': {'with_bn': 'true'}}
    # channels after conv+bn should be same as input channels
    net = ConvBNSum(channels=data_shape[1],
                    reverse_sum_order=reverse_sum_order)
    check_fusion(net,
                 data_shape,
                 attr,
                 out_types=['int8', 'auto'],
                 dedup_subgraph=dedup_subgraph)
def test_single_fc(data_shape, use_bias, flatten):
    class SingleFC(nn.HybridBlock):
        def __init__(self, use_bias, flatten, **kwargs):
            super(SingleFC, self).__init__(**kwargs)
            self.fc = nn.Dense(units=64, use_bias=use_bias, flatten=flatten)

        def forward(self, x):
            return self.fc(x)

    attrs = {'fc': {}}
    net = SingleFC(use_bias, flatten)
    check_fusion(net, data_shape, attrs, check_quantization=flatten)
Пример #9
0
def test_pos_single_conv(use_bias, data_shape):
  # single conv fusion case
  class Conv(nn.HybridBlock):
    def __init__(self, **kwargs):
        super(Conv, self).__init__(**kwargs)
        self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias)

    def forward(self, x):
        out = self.conv0(x)
        return out

  attr = {'conv': []}
  net = Conv()
  check_fusion(net, data_shape, attr)
Пример #10
0
def function_fc_add(data_shape, add_op, quantize_mode, fc_out_add, flatten,
                    relu, out_type):
    class FCWithSumExample(nn.HybridBlock):
        def __init__(self, num_hidden, add_op, fc_out_add, **kwargs):
            super(FCWithSumExample, self).__init__(**kwargs)
            self.fca = nn.Dense(units=num_hidden, flatten=flatten)
            self.elemwise_add = (add_op == 'elemwise_add')
            self.fc_out_as_rhs = (fc_out_add == 'rhs')
            self.relu = (relu == 'leaky_relu')

        def forward(self, data1a, data2):
            fc_out = self.fca(data1a)
            if self.relu:
                fc_out = mx.npx.leaky_relu(fc_out, act_type='gelu')
            if self.fc_out_as_rhs:
                if self.elemwise_add:
                    sum1 = mx.nd.elemwise_add(
                        data2.as_nd_ndarray(),
                        fc_out.as_nd_ndarray()).as_np_ndarray()
                else:
                    sum1 = data2 + fc_out
            else:
                if self.elemwise_add:
                    sum1 = mx.nd.elemwise_add(
                        fc_out.as_nd_ndarray(),
                        data2.as_nd_ndarray()).as_np_ndarray()
                else:
                    sum1 = fc_out + data2
            return sum1

    attrs = {'fc': {'with_sum': 'true'}}
    if quantize_mode is not None:
        attrs['fc']['quantized'] = 'true'
        if quantize_mode == 'smart':
            attrs['fc']['enabled_float_output'] = mx.nd.get_dtype_name(
                mx.np.float32)
    num_hidden = 10
    net = FCWithSumExample(num_hidden, add_op, fc_out_add)
    if flatten:
        data_shapes = [data_shape, (data_shape[0], num_hidden)]
    else:
        data_shapes = [data_shape, (*data_shape[0:-1], num_hidden)]
    check_fusion(net,
                 data_shapes,
                 attrs,
                 out_types=[out_type],
                 check_fusion=(quantize_mode is None),
                 check_quantization=(quantize_mode is not None) and flatten,
                 quantize_mode=quantize_mode)
Пример #11
0
def test_pos_conv_bn(use_bias, data_shape):
  # conv + bn fusion case
  class ConvBN(nn.HybridBlock):
    def __init__(self, use_bias, **kwargs):
        super(ConvBN, self).__init__(**kwargs)
        self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias)
        self.bn = nn.BatchNorm()

    def forward(self, x):
        out = self.bn(self.conv0(x))
        return out

  attr = {'conv': {'with_bn': 'true'}}
  net = ConvBN(use_bias)
  check_fusion(net, data_shape, attr)
Пример #12
0
def test_pos_conv_add2(no_bias, data_shape):
  # conv + add fusion case 2
  class ConvAdd(nn.HybridBlock):
    def __init__(self, use_bias, **kwargs):
        super(ConvAdd, self).__init__(**kwargs)
        self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias)
        self.conv1 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1)
        self.pool = nn.AvgPool2D(pool_size=(1,1))

    def forward(self, x):
      out = self.pool(self.conv1(x)) + self.conv0(x)
      return out

  attr = {'conv': {'with_sum': 'true'}}
  net = ConvAdd(use_bias=True)
  check_fusion(net, data_shape, attr, check_quantization=False)
def test_fc_eltwise(data_shape, use_bias, flatten, alg):
    # fc + eltwise fusion case
    class FCEltwise(nn.HybridBlock):
        def __init__(self, use_bias, flatten, alg, **kwargs):
            super(FCEltwise, self).__init__(**kwargs)
            self.fc = nn.Dense(units=64,
                               use_bias=use_bias,
                               flatten=flatten,
                               weight_initializer=CustomNormalInit(
                                   mean=0.5, sigma=0.1, bounded=True)
                               if alg == 'square_root' else None)
            #avoid calculating square root of negative values
            self.alg = alg

        def forward(self, x):
            if self.alg == 'square_root':
                x = abs(x)
            fc_out = self.fc(x)
            if self.alg in [
                    'relu', 'sigmoid', 'log_sigmoid', 'mish', 'tanh',
                    'softrelu'
            ]:
                out = mx.npx.activation(fc_out, act_type=self.alg)
            elif self.alg in ['gelu', 'elu', 'leaky']:
                out = mx.npx.leaky_relu(fc_out, act_type=self.alg)
            elif self.alg == 'square':
                out = mx.np.square(fc_out)
            elif self.alg == 'square_root':
                out = mx.np.sqrt(fc_out)
            elif self.alg == 'abs':
                out = mx.np.abs(fc_out)
            elif self.alg == 'exp':
                out = mx.np.exp(fc_out)
            else:
                out = mx.np.clip(fc_out, 0, 1.0)
            return out

    not_quant_fuze = [
        'sigmoid', 'log_sigmoid', 'softrelu', 'tanh', 'mish', 'square',
        'square_root', 'exp'
    ]
    attrs = {'fc': {'with_eltwise': 'true'}}
    net = FCEltwise(use_bias, flatten, alg)
    check_fusion(net,
                 data_shape,
                 attrs,
                 check_quantization=flatten and not alg in not_quant_fuze)
def test_fc_int8_and_fp32_outputs(data_shape, flatten):

    #                 /---> Quantizable op
    # Input ---> FC -|
    #                 \---> Non quantizable op

    class MultiOutputFC(nn.HybridBlock):
        def __init__(self, **kwargs):
            super(MultiOutputFC, self).__init__(**kwargs)
            self.dense0 = nn.Dense(64, flatten=flatten)
            self.dense1 = nn.Dense(64, flatten=flatten)

        def forward(self, x):
            x = self.dense0(x)
            y = self.dense1(x)  # quantizable
            z = mx.npx.softmax(x)  # non quantizable
            return y + z

    attrs = {'fc': {}}
    net = MultiOutputFC()
    check_fusion(net, data_shape, attrs, check_quantization=flatten)
def test_fc_identity_eltwise(identity_node):
    class FCIdentityEltwise(nn.HybridBlock):
        def __init__(self, identity_node, **kwargs):
            super(FCIdentityEltwise, self).__init__(**kwargs)
            self.fc1 = nn.Dense(units=64,
                                use_bias=False,
                                weight_initializer=None,
                                flatten=True)
            self.fc2 = nn.Dense(units=64,
                                use_bias=False,
                                weight_initializer=None,
                                flatten=True)
            self.identity_node = identity_node

        def forward(self, x):
            out = self.fc1(x)
            if self.identity_node == 'copy':
                out = mx.np.copy(out)
            else:
                out = mx.npx.dropout(out)
            out = mx.npx.activation(out, act_type='relu')
            out = self.fc2(out)
            if self.identity_node == 'copy':
                out = mx.np.copy(out)
            else:
                out = mx.npx.dropout(out)
            out = mx.npx.activation(out, act_type='relu')
            return out

    data_shape = (64, 4, 10, 10)
    attrs = {
        'sg_onednn_fully_connected_eltwise_0': {
            'with_eltwise': 'true'
        },
        'sg_onednn_fully_connected_eltwise_1': {
            'with_eltwise': 'true'
        }
    }
    net = FCIdentityEltwise(identity_node)
    check_fusion(net, data_shape, attrs, check_quantization=False)