def __init__(self, num_class=10, channel=1): super(LeNet5, self).__init__() self.num_class = num_class self.conv1 = nn.Conv2dBnFoldQuant(channel, 6, 5, pad_mode='valid', per_channel=True, quant_delay=900) self.conv2 = nn.Conv2dBnFoldQuant(6, 16, 5, pad_mode='valid', per_channel=True, quant_delay=900) self.fc1 = nn.DenseQuant(16 * 5 * 5, 120, per_channel=True, quant_delay=900) self.fc2 = nn.DenseQuant(120, 84, per_channel=True, quant_delay=900) self.fc3 = nn.DenseQuant(84, self.num_class, per_channel=True, quant_delay=900) self.relu = nn.ActQuant(nn.ReLU(), per_channel=False, quant_delay=900) self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) self.flatten = nn.Flatten()
def __init__(self, num_class=10, channel=1): super(LeNet5, self).__init__() self.num_class = num_class self.qconfig = create_quant_config(per_channel=(True, False), symmetric=(True, False)) self.conv1 = nn.Conv2dQuant(channel, 6, 5, pad_mode='valid', quant_config=self.qconfig, quant_dtype=QuantDtype.INT8) self.conv2 = nn.Conv2dQuant(6, 16, 5, pad_mode='valid', quant_config=self.qconfig, quant_dtype=QuantDtype.INT8) self.fc1 = nn.DenseQuant(16 * 5 * 5, 120, quant_config=self.qconfig, quant_dtype=QuantDtype.INT8) self.fc2 = nn.DenseQuant(120, 84, quant_config=self.qconfig, quant_dtype=QuantDtype.INT8) self.fc3 = nn.DenseQuant(84, self.num_class, quant_config=self.qconfig, quant_dtype=QuantDtype.INT8) self.relu = nn.ActQuant(nn.ReLU(), quant_config=self.qconfig, quant_dtype=QuantDtype.INT8) self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) self.flatten = nn.Flatten()
def __init__(self, in_channels, out_channels, has_bias, has_bn): super(LastQuantLayer, self).__init__() self.dense_inner = nn.DenseQuant(in_channels, out_channels, has_bias=has_bias, quant_config=quant_config, quant_dtype=QuantDtype.INT8) self.fake_quant_act = nn.FakeQuantWithMinMaxObserver( min_init=-16, max_init=16, ema=True, quant_dtype=QuantDtype.INT8, per_channel=False, symmetric=True, narrow_range=True, mode="LEARNED_SCALE")
def __init__(self, block, layer_nums, in_channels, out_channels, strides, num_classes): super(ResNet, self).__init__() if not len(layer_nums) == len(in_channels) == len(out_channels) == 4: raise ValueError( "the length of layer_num, in_channels, out_channels list must be 4!" ) self.conv1 = ConvBNReLU(3, 64, kernel_size=7, stride=2) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") self.layer1 = self._make_layer(block, layer_nums[0], in_channel=in_channels[0], out_channel=out_channels[0], stride=strides[0]) self.layer2 = self._make_layer(block, layer_nums[1], in_channel=in_channels[1], out_channel=out_channels[1], stride=strides[1]) self.layer3 = self._make_layer(block, layer_nums[2], in_channel=in_channels[2], out_channel=out_channels[2], stride=strides[2]) self.layer4 = self._make_layer(block, layer_nums[3], in_channel=in_channels[3], out_channel=out_channels[3], stride=strides[3]) self.mean = P.ReduceMean(keep_dims=True) self.flatten = nn.Flatten() self.end_point = nn.DenseQuant(out_channels[3], num_classes, has_bias=True, per_channel=_per_channel, symmetric=_symmetric) self.output_fake = nn.FakeQuantWithMinMax(ema=True, ema_decay=_ema_decay) # init weights self._initialize_weights()
def __init__(self, num_classes=1000, width_mult=1., has_dropout=False, inverted_residual_setting=None, round_nearest=8): super(MobileNetV2Quant, self).__init__() block = InvertedResidual input_channel = 32 last_channel = 1280 # setting of inverted residual blocks self.cfgs = inverted_residual_setting if inverted_residual_setting is None: self.cfgs = [ # t, c, n, s [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] # building first layer input_channel = _make_divisible(input_channel * width_mult, round_nearest) self.out_channels = _make_divisible( last_channel * max(1.0, width_mult), round_nearest) self.input_fake = nn.FakeQuantWithMinMax(ema=True, ema_decay=_ema_decay, quant_delay=_quant_delay) features = [ConvBNReLU(3, input_channel, stride=2)] # building inverted residual blocks for t, c, n, s in self.cfgs: output_channel = _make_divisible(c * width_mult, round_nearest) for i in range(n): stride = s if i == 0 else 1 features.append( block(input_channel, output_channel, stride, expand_ratio=t)) input_channel = output_channel # building last several layers features.append( ConvBNReLU(input_channel, self.out_channels, kernel_size=1)) # make it nn.CellList self.features = nn.SequentialCell(features) # mobilenet head head = ([ GlobalAvgPooling(), nn.DenseQuant(self.out_channels, num_classes, has_bias=True, per_channel=_per_channel, symmetric=_symmetric, quant_delay=_quant_delay), nn.FakeQuantWithMinMax(ema=True, ema_decay=_ema_decay) ] if not has_dropout else [ GlobalAvgPooling(), nn.Dropout(0.2), nn.DenseQuant(self.out_channels, num_classes, has_bias=True, per_channel=_per_channel, symmetric=_symmetric, quant_delay=_quant_delay), nn.FakeQuantWithMinMax( ema=True, ema_decay=_ema_decay, quant_delay=_quant_delay) ]) self.head = nn.SequentialCell(head)