Exemplo n.º 1
0
    def __init__(self, pretrained_net, n_class, Time=False, Space=False):
        super().__init__()
        self.n_class = n_class
        self.Time = Time
        self.Space = Space
        self.pretrained_net = pretrained_net
        self.S1 = SEGet(64, 2)
        self.S2 = SEGet(128, 16)
        self.S3 = SEGet(256, 16)
        self.S4 = SEGet(512, 16)
        self.S5 = SEGet(512, 16)

        self.relu = nn.ReLU(inplace=True)
        self.deconv1 = nn.ConvTranspose2d(512,
                                          512,
                                          kernel_size=3,
                                          stride=2,
                                          padding=1,
                                          dilation=1,
                                          output_padding=1)
        self.se1 = SELayer(512, 16)
        self.bn1 = nn.BatchNorm2d(512)
        self.deconv2 = nn.ConvTranspose2d(512,
                                          256,
                                          kernel_size=3,
                                          stride=2,
                                          padding=1,
                                          dilation=1,
                                          output_padding=1)
        self.se2 = SELayer(256, 16)
        self.bn2 = nn.BatchNorm2d(256)
        self.deconv3 = nn.ConvTranspose2d(256,
                                          128,
                                          kernel_size=3,
                                          stride=2,
                                          padding=1,
                                          dilation=1,
                                          output_padding=1)
        self.se3 = SELayer(128, 16)
        self.bn3 = nn.BatchNorm2d(128)
        self.deconv4 = nn.ConvTranspose2d(128,
                                          64,
                                          kernel_size=3,
                                          stride=2,
                                          padding=1,
                                          dilation=1,
                                          output_padding=1)
        self.se4 = SELayer(64, 16)
        self.bn4 = nn.BatchNorm2d(64)
        self.deconv5 = nn.ConvTranspose2d(64,
                                          32,
                                          kernel_size=3,
                                          stride=2,
                                          padding=1,
                                          dilation=1,
                                          output_padding=1)
        self.se5 = SELayer(32, 16)
        self.bn5 = nn.BatchNorm2d(32)
        self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
Exemplo n.º 2
0
    def __init__(self):
        super(PieceSelection, self).__init__()

        self.flatten = nn.Flatten()
        self.relu = nn.ReLU()
        self.linear = nn.Linear(8192, 3)
        self.conv = nn.Conv2d(15, FILTERS, kernel_size=3, padding=1)
        self.SE1 = SELayer(FILTERS, 32)
        self.SE2 = SELayer(FILTERS, 32)
        self.SE3 = SELayer(FILTERS, 32)
Exemplo n.º 3
0
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):

        super(SEDenseNet, self).__init__()

        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', nn.BatchNorm2d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Add SELayer at first convolution
        # self.features.add_module("SELayer_0a", SELayer(channel=num_init_features))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            # Add a SELayer 
            self.features.add_module("SELayer_%da" % (i + 1), SELayer(channel=num_features))

            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)

            num_features = num_features + num_layers * growth_rate

            if i != len(block_config) - 1:
                # Add a SELayer behind each transition block
                self.features.add_module("SELayer_%db" % (i + 1), SELayer(channel=num_features))

                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # Add SELayer
        # self.features.add_module("SELayer_0b", SELayer(channel=num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
Exemplo n.º 4
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              norm_layer=None,
              reduction=16):
     super(SE_Bottleneck, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     width = int(planes * (base_width / 64.)) * groups
     # Both self.conv2 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv1x1(inplanes, width)
     self.bn1 = norm_layer(width)
     self.conv2 = conv3x3(width, width, stride, groups, dilation)
     self.bn2 = norm_layer(width)
     self.conv3 = conv1x1(width, planes * self.expansion)
     self.bn3 = norm_layer(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.se = SELayer(planes * self.expansion, reduction)
     self.downsample = downsample
     self.stride = stride
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              norm_layer=None,
              *,
              reduction=16):
     super(SEBottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = nn.BatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = nn.BatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.se = SELayer(planes * 4, reduction)
     self.downsample = downsample
     self.stride = stride
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              ibn=False,
              reduction=16):
     super(SEBottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     if ibn:
         self.bn1 = IBN(planes)
     else:
         self.bn1 = nn.BatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = nn.BatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.se = SELayer(planes * 4, reduction)
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 7
0
    def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
        super(_DenseLayer, self).__init__()
        # Add SELayer at here, like SE-PRE block in original paper illustrates
        self.add_module("selayer", SELayer(channel=num_input_features)),

        self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
        self.add_module('relu1', nn.ReLU(inplace=True)),
        self.add_module(
            'conv1',
            nn.Conv2d(num_input_features,
                      bn_size * growth_rate,
                      kernel_size=1,
                      stride=1,
                      bias=False)),
        self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
        self.add_module('relu2', nn.ReLU(inplace=True)),
        self.add_module(
            'conv2',
            nn.Conv2d(bn_size * growth_rate,
                      growth_rate,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False)),
        self.drop_rate = drop_rate
Exemplo n.º 8
0
    def __init__(self, in_channel, channel_chunck, channel_reduction):
        super(Local_Global_Attention_Hybrid_Light, self).__init__()
        self.channel_reduction = channel_reduction
        self.in_channel = in_channel
        self.channel_chunck = channel_chunck
        self.ses = nn.ModuleList([SELayer(channel_chunck, channel_reduction)] *
                                 (in_channel // channel_chunck))
        self.nlayers = in_channel // channel_chunck
        self.global_attention_fc = nn.Sequential(nn.Linear(self.nlayers, 1),
                                                 nn.ReLU(True),
                                                 nn.Linear(1, self.nlayers))
        self.global_pool = nn.AdaptiveAvgPool2d(1)
        print(self.ses[0] == self.ses[1])

        self.conv0s = nn.ModuleList(
            [ConvBlock(channel_chunck, 1, kernel_size=3, stride=1, padding=1)
             ] * self.nlayers)
        self.conv1s = nn.ModuleList([ConvBlock(1, 1, 3, stride=2, padding=1)] *
                                    self.nlayers)
        self.conv2s = nn.ModuleList([ConvBlock(1, 1, 1)] * self.nlayers)

        self.global_spatial_decoder_top = nn.Sequential(
            ConvBlock(self.nlayers,
                      self.nlayers,
                      kernel_size=3,
                      stride=2,
                      padding=1),  # top 
            ConvBlock(self.nlayers, 1, 1, 1, 0),
            ConvBlock(1, self.nlayers, 1, 1, 0))

        # upsample

        self.global_spatial_decoder_down = nn.Sequential(
            ConvBlock(self.nlayers, self.nlayers, 3, 1, 1))
Exemplo n.º 9
0
 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1,sparable = False,se = False):
     super(Bottleneck, self).__init__()
     
     if sparable:
         pass
         # self.conv1 = SeparableConv2d(inplanes, planes, kernel_size=1, bias=False)
         # self.conv2 = SeparableConv2d(planes, planes, kernel_size=3, stride=stride,
         # dilation=dilation, bias=False)
         # self.conv3 = SeparableConv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
     else:
         self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
         self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                                     padding=dilation, dilation=dilation, bias=False)
         self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
     self.bn1 = nn.BatchNorm2d(planes, momentum=bn_momentum)
     
    
     self.bn2 = nn.BatchNorm2d(planes, momentum=bn_momentum)
     
    
     self.bn3 = nn.BatchNorm2d(planes * self.expansion, momentum=bn_momentum)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     if se:
         self.selayer = SELayer(planes * self.expansion,reduction=16)
     self.stride = stride
Exemplo n.º 10
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              norm_layer=None,
              reduction=16):
     super(SE_BasicBlock, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             "Dilation > 1 not supported in BasicBlock")
     # Both self.conv1 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.se = SELayer(planes, reduction)
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 11
0
    def __init__(self, channels, stride=1, reduction=16):
        super(aifrNet, self).__init__()
        self.se = SELayer(channels, reduction)
        self.se_net = SELayer(192, reduction)
        self.agg = Aggregate(4) #ag_fact =4 if we use aggregration also

        self.fc_id =  mfm(5*5*192, 256, type = 0)
        self.fc_output = nn.Linear(256, 10000)

        self.fc_age = mfm(5*5*192,256,type = 0)
        self.fc_age_output = nn.Linear(256, 58)
        self.genmodel = GenModel(192)     
        
        self.fc_layer = nn.Linear(686,4800)
        self.fc_layer1 = nn.Linear(58,4800)
        self.fc_layer2 = nn.Linear(10000,4800)

        '''
Exemplo n.º 12
0
 def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16):
     super(SEBasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = nn.BatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes, 1)
     self.bn2 = nn.BatchNorm2d(planes)
     self.se = SELayer(planes, reduction)
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 13
0
 def __init__(self, inplanes, planes, stride=1, reduction=16):
     super(CifarSEBasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = nn.BatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = nn.BatchNorm2d(planes)
     self.se = SELayer(planes, reduction)
     self.downsample = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False),
                                     nn.BatchNorm2d(planes))
     self.stride = stride
Exemplo n.º 14
0
 def __init__(self, num_input_features, num_output_features):
     super(_Transition, self).__init__()
     self.add_module("selayer", SELayer(channel=num_input_features))
     self.add_module('norm', nn.BatchNorm2d(num_input_features))
     self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module(
         'conv',
         nn.Conv2d(num_input_features,
                   num_output_features,
                   kernel_size=1,
                   stride=1,
                   bias=False))
     self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
Exemplo n.º 15
0
 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1,sparable = False,se = False):
     super(BasicBlock, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, bias=False,padding=1)
     self.bn1 = nn.BatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                                     padding=dilation, dilation=dilation, bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
     self.se = se
     if se:
         self.selayer = SELayer(planes,reduction=16)
Exemplo n.º 16
0
    def __init__(self):
        super(Mixed_6a, self).__init__()

        self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2)

        self.branch1 = nn.Sequential(
            BasicConv2d(320, 256, kernel_size=1, stride=1),
            BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),
            BasicConv2d(256, 384, kernel_size=3, stride=2)
        )

        self.branch2 = nn.MaxPool2d(3, stride=2)
        self.se = SELayer(1088)
Exemplo n.º 17
0
    def __init__(self, scale=1.0):
        super(Block17, self).__init__()

        self.scale = scale

        self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1)

        self.branch1 = nn.Sequential(
            BasicConv2d(1088, 128, kernel_size=1, stride=1),
            BasicConv2d(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)),
            BasicConv2d(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0))
        )

        self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)
        self.se = SELayer(1080)
        self.relu = nn.ReLU(inplace=False)
Exemplo n.º 18
0
    def __init__(self,
                 inplanes,
                 planes,
                 baseWidth,
                 cardinality,
                 stride=1,
                 downsample=None):
        """ Constructor
        Args:
            inplanes: input channel dimensionality
            planes: output channel dimensionality
            baseWidth: base width.
            cardinality: num of convolution groups.
            stride: conv stride. Replaces pooling layer.
        """
        super(Bottleneck, self).__init__()

        D = int(math.floor(planes * (baseWidth / 64)))
        C = cardinality
        reduction = 16

        self.conv1 = nn.Conv2d(inplanes,
                               D * C,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(D * C)
        self.conv2 = nn.Conv2d(D * C,
                               D * C,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               groups=C,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(D * C)
        self.conv3 = nn.Conv2d(D * C,
                               planes * 4,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)

        self.se = SELayer(planes * 4, reduction)
        self.downsample = downsample
Exemplo n.º 19
0
    def __init__(self, inp, oup, stride, expand_ratio, reduction=16):
        super(SEInvertedResidual, self).__init__()
        self.stride = stride
        self.se = SELayer(inp, reduction)
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = self.stride == 1 and inp == oup

        if expand_ratio == 1:
            self.conv = nn.Sequential(
                # dw
                nn.Conv2d(hidden_dim,
                          hidden_dim,
                          3,
                          stride=stride,
                          padding=1,
                          groups=hidden_dim,
                          bias=False),
                nn.BatchNorm2d(hidden_dim),
                nn.ReLU6(inplace=True),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
            )
        else:
            self.conv = nn.Sequential(
                # pw
                nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
                nn.BatchNorm2d(hidden_dim),
                nn.ReLU6(inplace=True),
                # dw
                nn.Conv2d(hidden_dim,
                          hidden_dim,
                          3,
                          stride,
                          1,
                          groups=hidden_dim,
                          bias=False),
                nn.BatchNorm2d(hidden_dim),
                nn.ReLU6(inplace=True),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
            )
    def __init__(self, num_classes):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=11, stride=4, padding=2),  # 64  223 223

            nn.BatchNorm2d(64, momentum=momentum_num),
            nn.ReLU(inplace=False),
            nn.MaxPool2d(kernel_size=3, stride=2),  # 64  55  55
            nn.Conv2d(64, 192, kernel_size=5, padding=2),  # 192 27 27

            nn.BatchNorm2d(192, momentum=momentum_num),
            nn.ReLU(inplace=False),
            nn.MaxPool2d(kernel_size=3, stride=2),  # 192  27 27
            nn.Conv2d(192, 384, kernel_size=3, padding=1),  # 384 13 13

            nn.BatchNorm2d(384, momentum=momentum_num),
            nn.ReLU(inplace=False),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),  # 256 13 13

            nn.BatchNorm2d(256, momentum=momentum_num),
            nn.ReLU(inplace=False))
        # nn.Conv2d(256, 256, kernel_size=3, padding=1),  #256 13 13
        #
        # nn.BatchNorm2d(256, momentum=momentum_num))
        self.conv1 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm2d(256, momentum=momentum_num)

        self.se = SELayer(256, 16)
        # nn.ReLU(inplace=False),
        # nn.MaxPool2d(kernel_size=3, stride=2), )    #256 13 13

        self.activate = nn.ReLU(inplace=False)
        self.pool = nn.MaxPool2d(kernel_size=3, stride=2)

        '''修改了dropout的位置,移到了激活函数之后'''
        self.classifier = nn.Sequential(
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=False),
            nn.Dropout(),

            nn.Linear(4096, 4096),
            nn.ReLU(inplace=False),
            nn.Dropout(),

            nn.Linear(4096, num_classes))
Exemplo n.º 21
0
    def __init__(self, scale=1.0, noReLU=False):
        super(Block8, self).__init__()

        self.scale = scale
        self.noReLU = noReLU

        self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1)

        self.branch1 = nn.Sequential(
            BasicConv2d(2080, 192, kernel_size=1, stride=1),
            BasicConv2d(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)),
            BasicConv2d(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
        )

        self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)
        self.se = SELayer(2080)
        if not self.noReLU:
            self.relu = nn.ReLU(inplace=False)
    def __init__(self, in_channel, channel_chunck, channel_reduction):
        super(Local_Global_Attention_Hybrid, self).__init__()
        self.channel_reduction = channel_reduction
        self.in_channel = in_channel
        self.channel_chunck = channel_chunck
        self.ses = nn.ModuleList([SELayer(channel_chunck, channel_reduction)] *
                                 (in_channel // channel_chunck))
        self.nlayers = in_channel // channel_chunck
        self.global_attention_fc = nn.Sequential(nn.Linear(self.nlayers, 1),
                                                 nn.ReLU(True),
                                                 nn.Linear(1, self.nlayers))
        self.global_pool = nn.AdaptiveAvgPool2d(1)

        self.conv1s = nn.ModuleList([ConvBlock(1, 1, 3, stride=2, padding=1)] *
                                    self.nlayers)
        self.conv2s = nn.ModuleList([ConvBlock(1, 1, 1)] * self.nlayers)

        self.global_spatial_decoder = nn.Sequential(
            ConvBlock(self.nlayers, 1, 1, 1, 0),
            ConvBlock(1, self.nlayers, 1, 1, 0))
Exemplo n.º 23
0
 def __init__(self, inplanes, planes, stride=1, reduction=16):
     super(CifarSEBasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = nn.BatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = nn.BatchNorm2d(planes)
     self.se = SELayer(planes, reduction)
     # sice we are reducing the number of channels by k=4 in SE_module, we need to reshape the residual to output_channels/k
     #reduce_planes= planes//4
     #self.downsample1 = nn.Sequential(nn.Conv2d(planes, reduce_planes, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(reduce_planes))
     if inplanes != planes:
         self.downsample = nn.Sequential(
             nn.Conv2d(inplanes,
                       planes,
                       kernel_size=1,
                       stride=stride,
                       bias=False), nn.BatchNorm2d(planes))
     else:
         self.downsample = lambda x: x
     self.stride = stride
Exemplo n.º 24
0
    def __init__(self, scale=1.0):
        super(Block35, self).__init__()

        self.scale = scale

        self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)

        self.branch1 = nn.Sequential(
            BasicConv2d(320, 32, kernel_size=1, stride=1),
            BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)
        )

        self.branch2 = nn.Sequential(
            BasicConv2d(320, 32, kernel_size=1, stride=1),
            BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1),
            BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1)
        )

        self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)
        self.se = SELayer(320)
        self.relu = nn.ReLU(inplace=False)
Exemplo n.º 25
0
    def __init__(self):
        super(Mixed_5b, self).__init__()

        self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)

        self.branch1 = nn.Sequential(
            BasicConv2d(192, 48, kernel_size=1, stride=1),
            BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2)
        )

        self.branch2 = nn.Sequential(
            BasicConv2d(192, 64, kernel_size=1, stride=1),
            BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
            BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
        )

        self.branch3 = nn.Sequential(
            nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
            BasicConv2d(192, 64, kernel_size=1, stride=1)
        )
        self.se = SELayer(320)
 def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
     super(_DenseLayer, self).__init__()
     self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
     self.add_module('relu1', nn.ReLU(inplace=True)),
     self.add_module(
         'conv1',
         nn.Conv2d(num_input_features,
                   bn_size * growth_rate,
                   kernel_size=1,
                   stride=1,
                   bias=False)),
     self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
     self.add_module('relu2', nn.ReLU(inplace=True)),
     self.add_module(
         'conv2',
         nn.Conv2d(bn_size * growth_rate,
                   growth_rate,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False)),
     self.add_module("se_layer", SELayer(bn_size * growth_rate * 4))
     self.drop_rate = drop_rate
Exemplo n.º 27
0
 def __init__(self, in_c, out_c, t, stride):
     super(Bottleneck, self).__init__()
     self.stride = stride
     self.in_c = in_c
     self.out_c = out_c
     self.block = nn.Sequential(
         nn.Conv2d(in_c, in_c * t, kernel_size=1, bias=False),
         nn.BatchNorm2d(in_c * t),
         #nn.Dropout2d(0.2),
         nn.ReLU6(True),
         nn.Conv2d(in_c * t,
                   in_c * t,
                   kernel_size=3,
                   stride=stride,
                   padding=1,
                   groups=in_c * t,
                   bias=False),
         nn.BatchNorm2d(in_c * t),
         #nn.Dropout2d(0.2),
         nn.ReLU6(True),
         nn.Conv2d(in_c * t, out_c, kernel_size=1, bias=False),
         nn.BatchNorm2d(out_c),
     )
     self.se = SELayer(out_c, reduction=8)
Exemplo n.º 28
0
    def __init__(self, num_classes, aux_logits=True, transform_input=False):
        super(SEInception3, self).__init__()
        model = Inception3(num_classes=num_classes,
                           aux_logits=aux_logits,
                           transform_input=transform_input)
        model.Mixed_5b.add_module("SELayer", SELayer(192))
        model.Mixed_5c.add_module("SELayer", SELayer(256))
        model.Mixed_5d.add_module("SELayer", SELayer(288))
        model.Mixed_6a.add_module("SELayer", SELayer(288))
        model.Mixed_6b.add_module("SELayer", SELayer(768))
        model.Mixed_6c.add_module("SELayer", SELayer(768))
        model.Mixed_6d.add_module("SELayer", SELayer(768))
        model.Mixed_6e.add_module("SELayer", SELayer(768))
        if aux_logits:
            model.AuxLogits.add_module("SELayer", SELayer(768))
        model.Mixed_7a.add_module("SELayer", SELayer(768))
        model.Mixed_7b.add_module("SELayer", SELayer(1280))
        model.Mixed_7c.add_module("SELayer", SELayer(2048))

        self.model = model
        self._flag = True