Пример #1
0
 def __init__(self, channel_num):
     super(CARBBlock, self).__init__()
     self.conv1 = M.Sequential(
         M.Conv2d(channel_num,
                  channel_num,
                  kernel_size=3,
                  padding=1,
                  stride=1),
         M.ReLU(),
         M.Conv2d(channel_num,
                  channel_num,
                  kernel_size=3,
                  padding=1,
                  stride=1),
     )
     # self.global_average_pooling = nn.AdaptiveAvgPool2d((1,1))  # B,C,H,W -> B,C,1,1
     self.linear = M.Sequential(M.Linear(channel_num, channel_num // 2),
                                M.ReLU(),
                                M.Linear(channel_num // 2, channel_num),
                                M.Sigmoid())
     self.conv2 = M.Conv2d(channel_num * 2,
                           channel_num,
                           kernel_size=1,
                           padding=0,
                           stride=1)
     self.lrelu = M.LeakyReLU()
Пример #2
0
    def __init__(self, inp, ksize, stride):
        super().__init__()

        self.M = 2
        self.G = 2

        self.pad = ksize // 2
        inp_gap = max(16, inp // 16)
        self.inp = inp
        self.ksize = ksize
        self.stride = stride

        self.wn_fc1 = M.Conv2d(inp_gap,
                               self.M // self.G * inp,
                               1,
                               1,
                               0,
                               groups=1,
                               bias=True)
        self.sigmoid = M.Sigmoid()
        self.wn_fc2 = M.Conv2d(self.M // self.G * inp,
                               inp * ksize * ksize,
                               1,
                               1,
                               0,
                               groups=inp,
                               bias=False)
Пример #3
0
    def __init__(self,
                 channels,
                 reduction=16,
                 norm_layer=M.BatchNorm2d,
                 activation=M.ReLU(),
                 attention_act=M.Sigmoid()):
        """

        Args:
            channels (int):
            reduction (int):
            norm_layer (M.Module):
            activation (M.Module):
            attention_act (M.Module):
        """
        super(SEModule, self).__init__()
        inter_ch = int(channels // reduction)
        self.fc = M.Sequential(
            M.AdaptiveAvgPool2d(1),
            Conv2d(channels,
                   inter_ch,
                   norm_layer=norm_layer,
                   activation=activation),
            Conv2d(inter_ch,
                   channels,
                   norm_layer=norm_layer,
                   activation=attention_act))
Пример #4
0
 def __init__(self, mode, fused=False):
     super().__init__()
     self.mode = mode
     self.fused = fused
     self.data = (np.random.random(
         (1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
     self.sigmoid = M.Sigmoid()
     self.act = ActiveOpr.str2fun[self.mode]
Пример #5
0
    def __init__(self,
                 in_ch,
                 reduction=16,
                 norm_layer=None,
                 nolinear=M.ReLU(),
                 sigmoid=M.Sigmoid()):
        '''
            Initialize the module.
            @in_ch: int, the number of channels of input,
            @reduction: int, the coefficient of dimensionality reduction
            @sigmoid: M.Module, the sigmoid function, in MobilenetV3 is H-Sigmoid and in SeNet is sigmoid
            @norm_layer: M.Module, the batch normalization moldule
            @nolinear: M.Module, the nolinear function module
            @sigmoid: M.Module, the sigmoid layer
        '''
        super(SEModule, self).__init__()
        if norm_layer is None:
            norm_layer = M.BatchNorm2d

        if nolinear is None:
            nolinear = M.ReLU()

        if sigmoid is None:
            sigmoid = M.Sigmoid()

        self.avgpool = M.AdaptiveAvgPool2d(1)
        self.fc = M.Sequential(
            M.Conv2d(in_ch,
                     in_ch // reduction,
                     kernel_size=1,
                     stride=1,
                     padding=0),
            norm_layer(in_ch // reduction),
            nolinear,
            M.Conv2d(in_ch // reduction,
                     in_ch,
                     kernel_size=1,
                     stride=1,
                     padding=0),
            norm_layer(in_ch),
            sigmoid,
        )
Пример #6
0
 def __init__(self,
              channels,
              reduction=16,
              round_mid=False,
              mid_activation=M.ReLU(),
              out_activation=M.Sigmoid()):
     super(SEBlock, self).__init__()
     mid_channels = channels // reduction if not round_mid else round_channels(
         float(channels) / reduction)
     self.conv1 = conv1x1(in_channels=channels,
                          out_channels=mid_channels,
                          bias=True)
     self.activ = mid_activation
     self.conv2 = conv1x1(in_channels=mid_channels,
                          out_channels=channels,
                          bias=True)
     self.sigmoid = out_activation
Пример #7
0
 def __init__(self):
     super(Swish, self).__init__()
     self.activate = M.Sigmoid()