def test_pos_concat_scale_align(data_shape, out_type): # concat scale alignment case class ConcatScaleAlign(nn.HybridBlock): def __init__(self, **kwargs): super(ConcatScaleAlign, self).__init__(**kwargs) self.shared_weight = mx.gluon.Parameter('shared_weight', shape=(64, data_shape[1], 3, 3), init=mx.init.Xavier(magnitude=2.24), dtype='float32', allow_deferred_init=True) def forward(self, x): conv1 = mx.npx.convolution(x, kernel=(3,3), num_filter=64, weight=self.shared_weight.data(x.device), no_bias=True) conv2 = mx.npx.convolution(x, kernel=(3,3), num_filter=64, weight=self.shared_weight.data(x.device)*2, no_bias=True) conv3 = mx.npx.convolution(x, kernel=(3,3), num_filter=64, weight=self.shared_weight.data(x.device)*3, no_bias=True) conv4 = mx.npx.convolution(x, kernel=(3,3), num_filter=64, weight=self.shared_weight.data(x.device)*4, no_bias=True) return mx.np.concatenate([conv1, conv2, conv3, conv4], axis=1) def infer_shape(self, x, *args): self.shared_weight.weight = (64, data_shape[1], 3, 3) concat = ConcatScaleAlign() check_quantize(concat, data_shape, out_type, check_calibration=True, check_scale_align=True)
def test_conv_reshape_conv(use_bias, data_shape, out_type, module): class Conv_Reshape_Conv(nn.HybridBlock): def __init__(self, **kwargs): super(Conv_Reshape_Conv, self).__init__(**kwargs) self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias) self.conv1 = nn.Conv2D(channels=32, kernel_size=(5, 5), strides=1, use_bias=use_bias) def forward(self, x): out = self.conv0(x) if module == mx.npx: attrs = { "newshape": (-1, int(out.shape[1] / 4), out.shape[2] * 2, out.shape[3] * 2) } else: attrs = { "shape": (-1, int(out.shape[1] / 4), out.shape[2] * 2, out.shape[3] * 2) } out = out.as_nd_ndarray() out = getattr(module, "reshape")(out, **attrs) out = self.conv1(out.as_np_ndarray()) return out net = Conv_Reshape_Conv() check_quantize(net, data_shape, out_type)
def test_pos_conv_add3(no_bias, data_shape, out_type): # conv + add fusion case 3 class ConvAdd(nn.HybridBlock): def __init__(self, use_bias, **kwargs): super(ConvAdd, self).__init__(**kwargs) self.conv0 = nn.Conv2D(channels=data_shape[1], kernel_size=(1, 1), strides=1, use_bias=use_bias) def forward(self, x): out = x + self.conv0(x) return out net = ConvAdd(use_bias=True) check_quantize(net, data_shape, out_type)
def test_fc_transpose(data_shape, use_bias, out_type, module): class FC_Transpose(nn.HybridBlock): def __init__(self, use_bias, **kwargs): super(FC_Transpose, self).__init__(**kwargs) self.fc = nn.Dense(units=64, use_bias=use_bias) def forward(self, x): out = self.fc(x) if module == mx.nd: out = out.as_nd_ndarray() out = module.transpose(out) return out.as_np_ndarray() net = FC_Transpose(use_bias) check_quantize(net, data_shape, out_type, name='fc')
def test_pos_single_concat_pos_neg(data_shape, out_type): class ConvDataConcat(nn.HybridBlock): def __init__(self, dim, **kwargs): super(ConvDataConcat, self).__init__(**kwargs) self.conv0 = nn.Conv2D(channels=4, kernel_size=(1, 1), strides=1, use_bias=False) self.act = nn.Activation(activation = 'relu') self.concat_dim = dim def forward(self, x): relu_out = self.act(self.conv0(x)) out = mx.np.concatenate([x, relu_out], axis=self.concat_dim) return out concat = ConvDataConcat(dim=1) check_quantize(concat, data_shape, out_type, name='', check_calibration=False)
def test_pos_single_concat(data_shape, input_num, dim, out_type): # single concat case class SingleConcat(nn.HybridBlock): def __init__(self, input_num, dim, **kwargs): super(SingleConcat, self).__init__(**kwargs) self.concat = nn.HybridConcatenate(axis=dim) for _ in range(input_num): self.concat.add(nn.Identity()) def forward(self, x): out = self.concat(x) return out concat = SingleConcat(input_num, dim) check_quantize(concat, data_shape, out_type, name='conv', check_calibration=False)
def test_fc_reshape(data_shape, use_bias, out_type, flatten, module): class FC_Reshape(nn.HybridBlock): def __init__(self, use_bias, flatten, **kwargs): super(FC_Reshape, self).__init__(**kwargs) self.fc = nn.Dense(units=64, use_bias=use_bias, flatten=flatten) def forward(self, x): out = self.fc(x) if module == mx.npx: attrs = {"newshape": (1, -1)} else: attrs = {"shape": (1, -1)} out = out.as_nd_ndarray() out = getattr(module, "reshape")(out, **attrs) return out.as_np_ndarray() net = FC_Reshape(use_bias, flatten) check_quantize(net, data_shape, out_type, name='fc')
def test_conv_transpose_conv(use_bias, data_shape, out_type, module): class Conv_Transpose_Conv(nn.HybridBlock): def __init__(self, **kwargs): super(Conv_Transpose_Conv, self).__init__(**kwargs) self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias) self.conv1 = nn.Conv2D(channels=32, kernel_size=(5, 5), strides=1, use_bias=use_bias) def forward(self, x): out = self.conv0(x) if module == mx.nd: out = out.as_nd_ndarray() out = module.transpose(out, axes = [0,1,3,2]) out = self.conv1(out.as_np_ndarray()) return out net = Conv_Transpose_Conv() check_quantize(net, data_shape, out_type)
def test_pos_concat_scale_align(data_shape, out_type): # concat scale alignment case class ConcatScaleAlign(nn.HybridBlock): def __init__(self, **kwargs): super(ConcatScaleAlign, self).__init__(**kwargs) self.shared_weight = mx.gluon.Parameter( 'shared_weight', init=mx.init.Xavier(magnitude=2.24), dtype='float32', allow_deferred_init=True) def hybrid_forward(self, F, x, shared_weight): conv1 = F.Convolution(x, kernel=(3, 3), num_filter=64, weight=shared_weight, no_bias=True) conv2 = F.Convolution(x, kernel=(3, 3), num_filter=64, weight=shared_weight * 2, no_bias=True) conv3 = F.Convolution(x, kernel=(3, 3), num_filter=64, weight=shared_weight * 3, no_bias=True) conv4 = F.Convolution(x, kernel=(3, 3), num_filter=64, weight=shared_weight * 4, no_bias=True) return F.concat(conv1, conv2, conv3, conv4, dim=1) concat = ConcatScaleAlign() check_quantize(concat, data_shape, out_type, check_calibration=True, check_scale_align=True)
def function_add_quantized(data_shape, add_op, quantize_mode, relu, out_type, broadcast, calib_mode): class SumExample(nn.HybridBlock): def __init__(self, add_op, **kwargs): super(SumExample, self).__init__(**kwargs) self.elemwise_add = (add_op == 'ele_add') self.relu = (relu == 'relu') def forward(self, data1a, data2): fc_out = data1a if self.relu: fc_out = mx.npx.relu(fc_out) if self.elemwise_add: sum1 = mx.nd.elemwise_add( data2.as_nd_ndarray(), fc_out.as_nd_ndarray()).as_np_ndarray() else: sum1 = data2 + fc_out return sum1 attrs = {add_op: {}} net = SumExample(add_op) if broadcast: broadcasted_shape = (1, ) + data_shape[1:-1] + (1, ) data_shapes = [broadcasted_shape, data_shape] else: data_shapes = [data_shape, data_shape] # check_calibration could be enabled if check_qsym_calibrated will be reimplemented # to find operator names instead of node names check_quantize(net, data_shapes, out_type, name="contrib_quantized_" + add_op, quantize_mode=quantize_mode, attrs_dict=attrs, calib_mode=calib_mode, check_calibration=(calib_mode != 'none') and False, check_fusion=False)