예제 #1
0
    def __init__(self,
                 input_channels,
                 output_channels,
                 kernel_size=-1,
                 stride=-1,
                 padding=-1,
                 groups=1,
                 channel_shuffle=0,
                 shuffle_groups=1,
                 last_relu=0,
                 abits=8,
                 wbits=8,
                 first_layer=0):
        super(DorefaConv2d, self).__init__()
        self.last_relu = last_relu
        self.channel_shuffle_flag = channel_shuffle
        self.shuffle_groups = shuffle_groups
        self.first_layer = first_layer

        self.q_conv = Conv2d_Q(input_channels,
                               output_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               groups=groups,
                               a_bits=abits,
                               w_bits=wbits,
                               first_layer=first_layer)
        self.bn = nn.BatchNorm2d(output_channels)
        self.relu = nn.ReLU(inplace=True)
예제 #2
0
    def __init__(self, input_channels, output_channels,
            kernel_size=-1, stride=-1, padding=-1, groups=1, channel_shuffle=0, shuffle_groups=1, last_relu=0, abits=8, wbits=8, bn_fold=0, q_type=1, first_layer=0):
        super(QuanConv2d, self).__init__()
        self.last_relu = last_relu
        self.channel_shuffle_flag = channel_shuffle
        self.shuffle_groups = shuffle_groups
        self.bn_fold = bn_fold
        self.first_layer = first_layer

        if self.bn_fold == 1:
            self.bn_q_conv = BNFold_Conv2d_Q(input_channels, output_channels,
                    kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, a_bits=abits, w_bits=wbits, q_type=q_type, first_layer=first_layer)
        else:
            self.q_conv = Conv2d_Q(input_channels, output_channels,
                    kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, a_bits=abits, w_bits=wbits, q_type=q_type, first_layer=first_layer)
            self.bn = nn.BatchNorm2d(output_channels, momentum=0.01) # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比,一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右
        self.relu = nn.ReLU(inplace=True)
예제 #3
0
    def __init__(self,
                 input_channels,
                 output_channels,
                 kernel_size=-1,
                 stride=-1,
                 padding=-1,
                 last_relu=0,
                 groups=1,
                 channel_shuffle=0,
                 shuffle_groups=1,
                 abits=8,
                 wbits=8,
                 bn_fold=0,
                 q_type=1,
                 first_layer=0):
        super(QuanConv2d, self).__init__()
        self.last_relu = last_relu
        self.channel_shuffle_flag = channel_shuffle
        self.shuffle_groups = shuffle_groups
        self.bn_fold = bn_fold
        self.first_layer = first_layer

        if self.bn_fold == 1:
            self.bn_q_conv = BNFold_Conv2d_Q(input_channels,
                                             output_channels,
                                             kernel_size=kernel_size,
                                             stride=stride,
                                             padding=padding,
                                             groups=groups,
                                             a_bits=abits,
                                             w_bits=wbits,
                                             q_type=q_type,
                                             first_layer=first_layer)
        else:
            self.q_conv = Conv2d_Q(input_channels,
                                   output_channels,
                                   kernel_size=kernel_size,
                                   stride=stride,
                                   padding=padding,
                                   groups=groups,
                                   a_bits=abits,
                                   w_bits=wbits,
                                   q_type=q_type,
                                   first_layer=first_layer)
            self.bn = nn.BatchNorm2d(output_channels)
        self.relu = nn.ReLU(inplace=True)