コード例 #1
0
    def __init__(self, cfg = None, A=2):
        super(Net, self).__init__()
        if cfg is None:
            # 模型结构
            cfg = [192, 160, 96, 192, 192, 192, 192, 192]

        self.tnn_bin = nn.Sequential(
                nn.Conv2d(3, cfg[0], kernel_size=5, stride=1, padding=2),
                bn.BatchNorm2d_bin(cfg[0], affine_flag=2),
                Tnn_Bin_Conv2d(cfg[0], cfg[1], kernel_size=1, stride=1, padding=0, A=A),
                Tnn_Bin_Conv2d(cfg[1],  cfg[2], kernel_size=1, stride=1, padding=0, activation_mp=1, A=A),
                nn.MaxPool2d(kernel_size=3, stride=2, padding=1),

                Tnn_Bin_Conv2d(cfg[2], cfg[3], kernel_size=5, stride=1, padding=2, activation_nor=0, A=A),
                Tnn_Bin_Conv2d(cfg[3], cfg[4], kernel_size=1, stride=1, padding=0, A=A),
                Tnn_Bin_Conv2d(cfg[4], cfg[5], kernel_size=1, stride=1, padding=0, activation_mp=1, A=A),
                nn.AvgPool2d(kernel_size=3, stride=2, padding=1),

                Tnn_Bin_Conv2d(cfg[5], cfg[6], kernel_size=3, stride=1, padding=1, activation_nor=0, A=A),
                Tnn_Bin_Conv2d(cfg[6], cfg[7], kernel_size=1, stride=1, padding=0, last=0, last_relu=1, A=A),
                nn.Conv2d(cfg[7],  10, kernel_size=1, stride=1, padding=0),
                bn.BatchNorm2d_bin(10, affine_flag=2),
                nn.ReLU(inplace=True),
                nn.AvgPool2d(kernel_size=8, stride=1, padding=0),
                )
コード例 #2
0
    def __init__(self, cfg = None, A=2):
        super(Net, self).__init__()
        if cfg is None:
            # 模型结构
            cfg = [256, 256, 256, 512, 512, 512, 1024, 1024]

        self.tnn_bin = nn.Sequential(
                nn.Conv2d(3, cfg[0], kernel_size=5, stride=1, padding=2),
                bn.BatchNorm2d_bin(cfg[0], affine_flag=2),
                Tnn_Bin_Conv2d(cfg[0], cfg[1], kernel_size=1, stride=1, padding=0, groups=2, channel_shuffle=0, A=A),
                Tnn_Bin_Conv2d(cfg[1], cfg[2], kernel_size=1, stride=1, padding=0, groups=2, channel_shuffle=1, shuffle_groups=2, activation_mp=1, A=A),
                nn.MaxPool2d(kernel_size=2, stride=2, padding=0),

                Tnn_Bin_Conv2d(cfg[2], cfg[3], kernel_size=3, stride=1, padding=1, groups=16, channel_shuffle=1, shuffle_groups=2, activation_nor=0, A=A),
                Tnn_Bin_Conv2d(cfg[3], cfg[4], kernel_size=1, stride=1, padding=0, groups=4, channel_shuffle=1, shuffle_groups=16, A=A),
                Tnn_Bin_Conv2d(cfg[4], cfg[5], kernel_size=1, stride=1, padding=0, groups=4, channel_shuffle=1, shuffle_groups=4, activation_mp=1, A=A),
                nn.MaxPool2d(kernel_size=2, stride=2, padding=0),

                Tnn_Bin_Conv2d(cfg[5], cfg[6], kernel_size=3, stride=1, padding=1, groups=32, channel_shuffle=1, shuffle_groups=4, activation_nor=0, A=A),
                Tnn_Bin_Conv2d(cfg[6], cfg[7], kernel_size=1, stride=1, padding=0, groups=8, channel_shuffle=1, shuffle_groups=32, last=0, last_relu=1, A=A),
                nn.Conv2d(cfg[7],  10, kernel_size=1, stride=1, padding=0),
                bn.BatchNorm2d_bin(10, affine_flag=2),
                nn.ReLU(inplace=True),
                nn.AvgPool2d(kernel_size=8, stride=1, padding=0),
                )
コード例 #3
0
ファイル: nin.py プロジェクト: lufeng22/model-compression-1
 def __init__(self,
              input_channels,
              output_channels,
              kernel_size=-1,
              stride=-1,
              padding=-1,
              dropout=0,
              last=0,
              activation_mp=0,
              activation_nor=1,
              last_relu=0,
              A=2):
     super(Tnn_Bin_Conv2d, self).__init__()
     self.A = A
     self.dropout_ratio = dropout
     self.last = last
     self.activation_mp = activation_mp
     self.activation_nor = activation_nor
     self.last_relu = last_relu
     if dropout != 0:
         self.dropout = nn.Dropout(dropout)
     self.conv = nn.Conv2d(input_channels,
                           output_channels,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding)
     #self.bn = nn.BatchNorm2d(output_channels)
     self.bn = bn.BatchNorm2d_bin(
         output_channels, momentum=0.1,
         affine_flag=2)  #自定义BN_γ=1、β-train;WbAb_momentum=0.8
     self.relu = nn.ReLU(inplace=True)
コード例 #4
0
 def __init__(self,
              input_channels,
              output_channels,
              kernel_size=-1,
              stride=-1,
              padding=-1,
              dropout=0,
              groups=1,
              last_relu=0,
              A=2,
              W=2):
     super(Tnn_Bin_Conv2d, self).__init__()
     self.A = A
     self.W = W
     self.dropout_ratio = dropout
     self.last_relu = last_relu
     if self.dropout_ratio != 0:
         self.dropout = nn.Dropout(dropout)
     # ********************* 量化(三/二值)卷积 *********************
     self.tnn_bin_conv = Conv2d_Q(input_channels,
                                  output_channels,
                                  kernel_size=kernel_size,
                                  stride=stride,
                                  padding=padding,
                                  groups=groups,
                                  A=A,
                                  W=W)
     #self.bn = nn.BatchNorm2d(output_channels)
     self.bn = bn.BatchNorm2d_bin(output_channels,
                                  affine_flag=2)  #自定义BN_γ=1、β-train
     self.relu = nn.ReLU(inplace=True)
コード例 #5
0
 def __init__(self, input_channels, output_channels,
         kernel_size=-1, stride=-1, padding=-1, dropout=0, groups=1, channel_shuffle=0, shuffle_groups=1, last=0, activation_mp=0, activation_nor=1, last_relu=0, A=2):
     super(Tnn_Bin_Conv2d, self).__init__()
     self.A = A
     self.dropout_ratio = dropout
     self.channel_shuffle_flag = channel_shuffle
     self.shuffle_groups = shuffle_groups
     self.last = last
     self.activation_mp = activation_mp
     self.activation_nor = activation_nor
     self.last_relu = last_relu
     if dropout!=0:
         self.dropout = nn.Dropout(dropout)
     self.conv = nn.Conv2d(input_channels, output_channels,
             kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)
     #self.bn = nn.BatchNorm2d(output_channels)
     self.bn = bn.BatchNorm2d_bin(output_channels, affine_flag=1)#自定义BN_γ=1、β-train
     self.relu = nn.ReLU(inplace=True)