示例#1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True,
                 padding_mode='zeros',
                 momentum=0.1,
                 channel_shuffle=0,
                 shuffle_groups=1,
                 a_bits=8,
                 w_bits=8,
                 first_layer=0):
        super(QuantConvBNReLU, self).__init__()
        self.channel_shuffle_flag = channel_shuffle
        self.shuffle_groups = shuffle_groups

        self.quant_conv = QuantConv2d(in_channels,
                                      out_channels,
                                      kernel_size,
                                      stride=stride,
                                      padding=padding,
                                      dilation=dilation,
                                      groups=groups,
                                      bias=bias,
                                      padding_mode=padding_mode,
                                      a_bits=a_bits,
                                      w_bits=w_bits,
                                      first_layer=first_layer)
        self.bn = nn.BatchNorm2d(out_channels, momentum=momentum)
        self.relu = nn.ReLU(inplace=True)
示例#2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True,
                 padding_mode='zeros',
                 momentum=0.1,
                 A=2,
                 W=2,
                 last_relu=0,
                 last_bin=0):
        super(TnnBinConvBNReLU, self).__init__()
        self.last_relu = last_relu
        self.last_bin = last_bin

        # ********************* 量化(三/二值)卷积 *********************
        self.tnn_bin_conv = QuantConv2d(in_channels,
                                        out_channels,
                                        kernel_size,
                                        stride=stride,
                                        padding=padding,
                                        dilation=dilation,
                                        groups=groups,
                                        bias=bias,
                                        padding_mode=padding_mode,
                                        A=A,
                                        W=W)
        self.bn = nn.BatchNorm2d(out_channels, momentum=momentum)
        self.relu = nn.ReLU(inplace=True)
        self.activation_bin = ActivationBin(A=A)
示例#3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True,
                 padding_mode='zeros',
                 eps=1e-5,
                 momentum=0.1,
                 channel_shuffle=0,
                 shuffle_groups=1,
                 a_bits=8,
                 w_bits=8,
                 bn_fuse=0,
                 q_type=1,
                 q_level=0,
                 first_layer=0,
                 device='cuda',
                 weight_observer=0):
        super(QuantConvBNReLU, self).__init__()
        self.channel_shuffle_flag = channel_shuffle
        self.shuffle_groups = shuffle_groups
        self.bn_fuse = bn_fuse

        if self.bn_fuse == 1:
            self.quant_bn_fuse_conv = QuantBNFuseConv2d(
                in_channels,
                out_channels,
                kernel_size,
                stride=stride,
                padding=padding,
                dilation=dilation,
                groups=groups,
                bias=False,
                padding_mode=padding_mode,
                eps=eps,
                momentum=momentum,
                a_bits=a_bits,
                w_bits=w_bits,
                q_type=q_type,
                q_level=q_level,
                first_layer=first_layer,
                device=device,
                weight_observer=weight_observer)
        else:
            self.quant_conv = QuantConv2d(in_channels,
                                          out_channels,
                                          kernel_size,
                                          stride=stride,
                                          padding=padding,
                                          dilation=dilation,
                                          groups=groups,
                                          bias=bias,
                                          padding_mode=padding_mode,
                                          a_bits=a_bits,
                                          w_bits=w_bits,
                                          q_type=q_type,
                                          q_level=q_level,
                                          first_layer=first_layer,
                                          device=device,
                                          weight_observer=weight_observer)
            self.bn = nn.BatchNorm2d(out_channels, eps=eps, momentum=momentum)
        self.relu = QuantReLU(inplace=True,
                              a_bits=a_bits,
                              q_type=q_type,
                              device=device)