コード例 #1
0
  def __init__(self, C_in, C_out, kernel_size, stride, padding, group,affine=True,options="D"):
    super(BinaryGroupConv, self).__init__()
    self.group = int(C_in/4)
    self.bn_1 = nn.BatchNorm2d(C_in, affine=affine)
    self.conv_1 = Layer.Conv2d_1w1a(C_in, C_in, kernel_size=kernel_size, 
                                    stride=stride, padding=padding, groups=self.group, bias=False)
    self.shuffle = ShuffleBlock(self.group)

    self.shortcut = nn.Sequential()
    if stride != 1:
        if options == "A":
            self.shortcut = LambdaLayer(lambda x:
                            F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, cin//4, cin//4), "constant", 0))
        elif options == "B":
            self.shortcut = nn.Sequential(
                 Layer.Conv2d_1w1a(C_in, C_in, kernel_size=1, stride=stride, bias=False),
                 nn.BatchNorm2d(C_in,affine=affine)
            )
        elif options == "C":
            self.shortcut = nn.Sequential( 
                nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            )
        else:
            self.shortcut = nn.Sequential( 
                nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
            )
コード例 #2
0
 def __init__(self, C_in, C_out, affine=True):
     super(FactorizedReduce, self).__init__()
     assert C_out % 2 == 0
     self.conv_1 = Layer.Conv2d_1w1a(C_in,
                                     C_out // 2,
                                     1,
                                     stride=2,
                                     padding=0,
                                     bias=False)
     self.conv_2 = Layer.Conv2d_1w1a(C_in,
                                     C_out // 2,
                                     1,
                                     stride=2,
                                     padding=0,
                                     bias=False)
     self.bn = nn.BatchNorm2d(C_out, affine=affine)
コード例 #3
0
    def __init__(self,
                 C_in,
                 C_out,
                 kernel_size,
                 stride,
                 padding,
                 dilation,
                 group,
                 affine=True,
                 options="B"):
        super(BinaryDilGroupConv, self).__init__()

        self.conv_1 = Layer.Conv2d_1w1a(C_in,
                                        C_in,
                                        kernel_size=kernel_size,
                                        stride=stride,
                                        padding=padding,
                                        dilation=dilation,
                                        groups=group,
                                        bias=False)
        self.bn_1 = nn.BatchNorm2d(C_in, affine=affine)
        self.conv_2 = Layer.Conv2d_1w1a(C_in,
                                        C_out,
                                        kernel_size=1,
                                        padding=0,
                                        bias=False)
        self.bn_2 = nn.BatchNorm2d(C_out, affine=affine)
        self.shortcut = nn.Sequential()
        if stride != 1:
            if options == "A":
                self.shortcut = LambdaLayer(lambda x: F.pad(
                    x[:, :, ::2, ::2],
                    (0, 0, 0, 0, C_in // 4, C_in // 4), "constant", 0))
            elif options == "B":
                self.shortcut = nn.Sequential(
                    Layer.Conv2d_1w1a(C_in,
                                      C_in,
                                      kernel_size=1,
                                      stride=stride,
                                      bias=False),
                    nn.BatchNorm2d(C_in, affine=affine))
            else:
                self.shortcut = nn.Sequential(
                    nn.MaxPool2d(kernel_size=kernel_size,
                                 stride=stride,
                                 padding=padding), )
コード例 #4
0
 def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
     super(BinaryConvBN, self).__init__()
     self.op = nn.Sequential(
         Layer.Conv2d_1w1a(C_in,
                           C_out,
                           kernel_size,
                           stride=stride,
                           padding=padding,
                           bias=False), nn.BatchNorm2d(C_out,
                                                       affine=affine))
コード例 #5
0
        C, C, 3, stride, 1, group=group, affine=affine),
    'group_conv_5x5':
    lambda C, stride, group, affine: BinaryGroupConv(
        C, C, 5, stride, 2, group=group, affine=affine),
    'group_conv_7x7':
    lambda C, stride, group, affine: BinaryGroupConv(
        C, C, 7, stride, 3, group=group, affine=affine),
    'dil_group_conv_3x3':
    lambda C, stride, group, affine: BinaryDilGroupConv(
        C, C, 3, stride, 2, 2, group=group, affine=affine),
    'dil_group_conv_5x5':
    lambda C, stride, group, affine: BinaryDilGroupConv(
        C, C, 5, stride, 4, 2, group=group, affine=affine),
    'group_conv_7x1_1x7':
    lambda C, stride, group, affine: nn.Sequential(
        Layer.Conv2d_1w1a(
            C, C, (1, 7), stride=(1, stride), padding=(0, 3), bias=False),
        nn.BatchNorm2d(C, affine=affine),
        Layer.Conv2d_1w1a(
            C, C, (7, 1), stride=(stride, 1), padding=(3, 0), bias=False),
        nn.BatchNorm2d(C, affine=affine)),
}


class LambdaLayer(nn.Module):
    def __init__(self, lambd):
        super(LambdaLayer, self).__init__()
        self.lambd = lambd

    def forward(self, x):
        return self.lambd(x)
コード例 #6
0
    def __init__(self,
                 C_in,
                 C_out,
                 kernel_size,
                 stride,
                 padding,
                 dilation,
                 group,
                 affine=True,
                 options="D"):
        super(Reactblock, self).__init__()
        norm_layer = nn.BatchNorm2d

        self.stride = stride
        self.inplanes = C_in
        self.planes = C_out
        self.group = int(C_in // 6)

        # react sign
        self.move11 = LearnableBias(C_in)
        self.binary_3x3 = nn.Conv2d(C_in,
                                    C_in,
                                    kernel_size=kernel_size,
                                    dilation=dilation,
                                    stride=stride,
                                    padding=padding,
                                    groups=self.group,
                                    bias=False)
        self.bn1 = norm_layer(C_in, affine=affine)
        self.shuffle = ShuffleBlock(self.group)
        self.shortcut = nn.Sequential()
        if stride != 1:
            if options == "A":
                self.shortcut = LambdaLayer(lambda x: F.pad(
                    x[:, :, ::2, ::2],
                    (0, 0, 0, 0, C_in // 4, C_in // 4), "constant", 0))
            elif options == "B":
                self.shortcut = nn.Sequential(
                    Layer.Conv2d_1w1a(C_in,
                                      C_in,
                                      kernel_size=1,
                                      stride=stride,
                                      bias=False),
                    nn.BatchNorm2d(C_in, affine=affine))
            elif options == "C":
                self.shortcut = nn.Sequential(
                    nn.MaxPool2d(kernel_size=3, stride=2, padding=1), )
            else:
                self.shortcut = nn.Sequential(
                    nn.AvgPool2d(kernel_size=3, stride=2, padding=1), )
        # react prelu
        self.move12 = LearnableBias(C_in)
        self.prelu1 = nn.PReLU(C_in)
        self.move13 = LearnableBias(C_in)

        # react sign
        self.move21 = LearnableBias(C_in)
        self.binary_pw = Layer.Conv2d_1w1a(C_in,
                                           C_out,
                                           kernel_size=1,
                                           stride=1,
                                           bias=False)
        self.bn2 = norm_layer(C_out, affine=affine)

        self.move22 = LearnableBias(C_out)
        self.prelu2 = nn.PReLU(C_out)
        self.move23 = LearnableBias(C_out)