예제 #1
0
파일: customize.py 프로젝트: ccssyy/MSCNet
    def __init__(self,
                 in_channels,
                 norm_layer,
                 up_kwargs,
                 spp_size=[1, 2, 3, 6]):
        super(PyramidPooling, self).__init__()
        self.pool1 = AdaptiveAvgPool2d(spp_size[0])
        self.pool2 = AdaptiveAvgPool2d(spp_size[1])
        self.pool3 = AdaptiveAvgPool2d(spp_size[2])
        self.pool4 = AdaptiveAvgPool2d(spp_size[3])

        out_channels = int(in_channels / 4)
        self.conv1 = Sequential(
            Conv2d(in_channels, out_channels, 1, bias=False),
            norm_layer(out_channels), ReLU(True))
        self.conv2 = Sequential(
            Conv2d(in_channels, out_channels, 1, bias=False),
            norm_layer(out_channels), ReLU(True))
        self.conv3 = Sequential(
            Conv2d(in_channels, out_channels, 1, bias=False),
            norm_layer(out_channels), ReLU(True))
        self.conv4 = Sequential(
            Conv2d(in_channels, out_channels, 1, bias=False),
            norm_layer(out_channels), ReLU(True))
        # bilinear upsample options
        self._up_kwargs = up_kwargs
예제 #2
0
    def __init__(self,
                 n_classes=1,
                 num_init_features=64,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16)):
        super(MultiInputsDenseNet, self).__init__()

        self.features1, _ = create_densenet_features(3, num_init_features,
                                                     growth_rate, block_config)
        self.features2, _ = create_densenet_features(1, num_init_features,
                                                     growth_rate, block_config)
        self.features3, num_features = create_densenet_features(
            2, num_init_features, growth_rate, block_config)

        # Linear layer
        self.classifier1 = Sequential(AdaptiveAvgPool2d(1), Flatten(),
                                      Linear(num_features, num_features // 2),
                                      ReLU(True), Dropout(0.5))
        self.classifier2 = Sequential(AdaptiveAvgPool2d(1), Flatten(),
                                      Linear(num_features, num_features // 2),
                                      ReLU(True), Dropout(0.5))
        self.classifier3 = Sequential(AdaptiveAvgPool2d(1), Flatten(),
                                      Linear(num_features, num_features // 2),
                                      ReLU(True), Dropout(0.5))
        self.final_classifier = Sequential(
            Linear(num_features // 2 * 3, n_classes))
        self._initialize_weights()
예제 #3
0
    def __init__(self, in_planes, ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = AdaptiveAvgPool2d(1)
        self.max_pool = AdaptiveAvgPool2d(1)

        self.shareMLP = nn.Sequential(
            nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False), nn.ReLU(),
            nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False))
        self.sigmoid = nn.Sigmoid()
예제 #4
0
    def __init__(self, in_channels, norm_layer):
        super(PyramidPooling, self).__init__()
        self.pool1 = AdaptiveAvgPool2d(1)
        self.pool2 = AdaptiveAvgPool2d(2)
        self.pool3 = AdaptiveAvgPool2d(3)
        self.pool4 = AdaptiveAvgPool2d(6)

        out_channels = int(in_channels/4)
        self.conv1 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),norm_layer(out_channels),ReLU(True))
        self.conv2 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),norm_layer(out_channels),ReLU(True))
        self.conv3 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),norm_layer(out_channels),ReLU(True))
        self.conv4 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False),norm_layer(out_channels),ReLU(True))
예제 #5
0
    def __init__(self, in_channels):
        super(PyramidPooling, self).__init__()
        self.pool1 = AdaptiveAvgPool2d(1)
        self.pool2 = AdaptiveAvgPool2d(2)
        #self.pool3 = AdaptiveAvgPool2d(4)
        #self.pool4 = AdaptiveAvgPool2d(8)

        out_channels = int(in_channels / 2)
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.ReLU(True))
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.ReLU(True))
예제 #6
0
 def __init__(self,
              output_size: Union[int, Tuple[int, int]],
              trunc_quant: Optional[AccQuantType] = TruncTo8bit,
              return_quant_tensor: bool = True,
              cache_kernel_size_stride: bool = True,
              **kwargs):
     AdaptiveAvgPool2d.__init__(self, output_size=output_size)
     QuantLayerMixin.__init__(self, return_quant_tensor)
     QuantTruncMixin.__init__(self, trunc_quant=trunc_quant, **kwargs)
     self.cache_kernel_size_stride = cache_kernel_size_stride
     self._cached_kernel_size = None
     self._cached_kernel_stride = None
예제 #7
0
 def __init__(self, width_in, width_out):
     super().__init__()
     self.proj = Conv2d(width_in, width_out, (1, 1), (2, 2), bias=False)
     self.bn = BatchNorm2d(width_out)
     self.f = Sequential(
         Sequential(  # block a
             Conv2d(width_in, width_out, (1, 1), (1, 1), bias=False),
             BatchNorm2d(width_out),
             ReLU(_relu_inplace),
         ),
         Sequential(  # block b
             Conv2d(width_out,
                    width_out, (3, 3), (2, 2), (1, 1),
                    groups=2,
                    bias=False),
             BatchNorm2d(width_out),
             ReLU(_relu_inplace),
         ),
         Sequential(  # block se
             AdaptiveAvgPool2d((1, 1)),
             Sequential(
                 Conv2d(width_out, 2, (1, 1), (1, 1), bias=False),
                 ReLU(_relu_inplace),
                 Conv2d(2, width_out, (1, 1), (1, 1), bias=False),
                 Sigmoid(),
             ),
         ),
         Conv2d(width_out, width_out, (1, 1), (1, 1),
                bias=False),  # block c
         BatchNorm2d(width_out),  # final_bn
     )
     self.relu = ReLU()
     self.need_fsdp_wrap = True
예제 #8
0
    def __init__(self, num_classes=1000):
        super(SqueezeNetV11BN, self).__init__()
        self.num_classes = num_classes

        self.features = Sequential(
            Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            BatchNorm2d(64),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=1),
            FireBN(64, 16, 64, 64),
            FireBN(128, 16, 64, 64),
            MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            FireBN(128, 32, 128, 128),
            FireBN(256, 32, 128, 128),
            MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            FireBN(256, 48, 192, 192),
            FireBN(384, 48, 192, 192),
            FireBN(384, 64, 256, 256),
            FireBN(512, 64, 256, 256),
        )
        # Final convolution is initialized differently form the rest
        final_conv = Conv2d(512, self.num_classes, kernel_size=1)
        self.classifier = Sequential(Dropout(p=0.5), final_conv,
                                     ReLU(inplace=True), AdaptiveAvgPool2d(1))

        for m in self.modules():
            if isinstance(m, Conv2d):
                if m is final_conv:
                    normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
예제 #9
0
    def __init__(self, pretrained=True):
        super(FurnitureResNet101_350_finetune, self).__init__()

        self.model = resnet101(num_classes=1000, pretrained=pretrained)
        self.model.avgpool = AdaptiveAvgPool2d(1)

        # create aliases:
        self.stem = ModuleList([
            self.model.conv1,
            self.model.bn1,
            self.model.layer1,
            self.model.layer2,
        ])
        self.features = ModuleList([
            self.model.layer3,
            self.model.layer4,
        ])
        self.classifier = self.model.fc

        self.final_classifiers = Sequential(
            Linear(1000, 1000),
            Linear(1000, 128),
        )

        for m in self.final_classifiers.modules():
            if isinstance(m, Linear):
                normal_(m.weight, 0, 0.01)
                constant_(m.bias, 0)

        # freeze internal layers:
        for param in self.stem.parameters():
            param.requires_grad = False
예제 #10
0
    def __init__(self, pretrained=True):
        super(FurnitureResNet152_350, self).__init__()

        self.model = resnet152(num_classes=1000, pretrained=pretrained)
        num_features = self.model.fc.in_features
        self.model.fc = Linear(num_features, 128)
        self.model.avgpool = AdaptiveAvgPool2d(1)

        for m in self.model.fc.modules():
            if isinstance(m, Linear):
                normal_(m.weight, 0, 0.01)
                constant_(m.bias, 0)

        # create aliases:
        self.stem = ModuleList([
            self.model.conv1,
            self.model.bn1,
        ])
        self.features = ModuleList([
            self.model.layer1,
            self.model.layer2,
            self.model.layer3,
            self.model.layer4,
        ])
        self.classifier = self.model.fc
예제 #11
0
    def __init__(self, features, featuremap_output_size, n_cls_layers=512):
        super(FurnitureModelOnCrops, self).__init__()

        self.base_features = features
        self.avgpool = AdaptiveAvgPool2d(1)

        n_crops = 6
        self.crop_classifiers = []
        for i in range(n_crops):
            self.crop_classifiers.append(
                Sequential(
                    ReLU(),
                    Linear(featuremap_output_size, n_cls_layers),
                    ReLU(),
                    Dropout(p=0.4)
                )
            )
            for m in self.crop_classifiers[-1].modules():
                if isinstance(m, Linear):
                    normal_(m.weight, 0, 0.01)
                    constant_(m.bias, 0.0)

        self.crop_classifiers = ModuleList(self.crop_classifiers)

        self.final_classifier = Linear(n_cls_layers, 128)
        for m in self.final_classifier.modules():
            normal_(m.weight, mean=0.0, std=0.01)
            if m.bias is not None:
                constant_(m.bias, 0.0)
예제 #12
0
    def __init__(self, input_n_channels, n_classes=2):
        super(IcebergResNet, self).__init__()

        self.stem = Sequential(
            Conv2d(input_n_channels, 64, kernel_size=3, stride=1, padding=1, bias=False),
            BatchNorm2d(64),
            ReLU(inplace=True),
            Conv2d(64, 64, kernel_size=1, stride=2, bias=False),
            BatchNorm2d(64),
            ReLU(inplace=True),
        )

        self.inplanes = 64
        layers = [3, 4]
        block = Bottleneck

        self.features = Sequential(
            self._make_layer(block, 64, layers[0]),
            self._make_layer(block, 128, layers[1])
        )

        self.classifier = Sequential(
            AdaptiveAvgPool2d(1),
            Flatten(),
            Linear(128 * block.expansion, n_classes)
        )
        initialize_weights(self.modules())
예제 #13
0
 def __init__(self):
     super().__init__()
     self.seq = Sequential(
         OrderedDict([
             (
                 "conv1",
                 Conv2d(3,
                        16,
                        kernel_size=3,
                        stride=2,
                        padding=1,
                        bias=True),
             ),
             ("act1", ReLU()),
             (
                 "conv2",
                 Conv2d(16,
                        32,
                        kernel_size=3,
                        stride=2,
                        padding=1,
                        bias=True),
             ),
             ("act2", ReLU()),
         ]))
     self.pool = AdaptiveAvgPool2d(1)
     self.mlp = Sequential(
         OrderedDict([("fc", Linear(32, 10, bias=True)),
                      ("sig", Sigmoid())]))
예제 #14
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.net = Sequential(
         Conv2d(3, 64, kernel_size=(3, 3), padding=(1, 1)),
         LeakyReLU(0.2),
         Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
         BatchNorm2d(64),
         LeakyReLU(0.2),
         Conv2d(64, 128, kernel_size=(3, 3), padding=(1, 1)),
         BatchNorm2d(128),
         LeakyReLU(0.2),
         Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1),
                padding=(1, 1)),
         BatchNorm2d(128),
         LeakyReLU(0.2),
         Conv2d(128, 256, kernel_size=(3, 3), padding=(1, 1)),
         BatchNorm2d(256),
         LeakyReLU(0.2),
         Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1),
                padding=(1, 1)),
         BatchNorm2d(256),
         LeakyReLU(0.2),
         Conv2d(256, 512, kernel_size=(3, 3), padding=(1, 1)),
         BatchNorm2d(512),
         LeakyReLU(0.2),
         Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1),
                padding=(1, 1)),
         BatchNorm2d(512),
         LeakyReLU(0.2),
         AdaptiveAvgPool2d(1),
         Conv2d(512, 1024, kernel_size=(1, 1)),
         LeakyReLU(0.2),
         Conv2d(1024, 1, kernel_size=(1, 1)),
     )
예제 #15
0
 def __init__(self, features, WH, M, G, r, stride=1, L=32):
     """ Constructor
     Args:
         features: input channel dimensionality.
         WH: input spatial dimensionality, used for GAP kernel size.
         M: the number of branchs.
         G: num of convolution groups.
         r: the radio for compute d, the length of z.
         stride: stride, default 1.
         L: the minimum dim of the vector z in paper, default 32.
     """
     super(SKConv, self).__init__()
     d = max(int(features / r), L)
     # d = 8
     self.M = M
     self.features = features
     self.convs = nn.ModuleList([])
     for i in range(M):
         self.convs.append(nn.Sequential(
             nn.Conv2d(features, features, kernel_size=3 + i * 2, stride=stride, padding=1 + i, groups=features),
             nn.BatchNorm2d(features),
             nn.ReLU(inplace=False)
         ))
     # self.gap = nn.AvgPool2d(int(WH / stride))
     #
     self.gap = AdaptiveAvgPool2d(1)
     self.fc = nn.Linear(features, d)
     self.fcs = nn.ModuleList([])
     for i in range(M):
         self.fcs.append(
             nn.Linear(d, features)
         )
     self.softmax = nn.Softmax(dim=1)
예제 #16
0
 def __init__(self, channel, reduction=4):
     super(SELayer, self).__init__()
     self.avg_pool = AdaptiveAvgPool2d(1)
     self.fc = Sequential(Linear(channel, channel // reduction),
                          ReLU(inplace=True),
                          Linear(channel // reduction, channel),
                          h_sigmoid())
예제 #17
0
def _detach_head(model):
	from types import MethodType
	from torch.nn import AdaptiveAvgPool2d
	from torch.utils.data.dataloader import DataLoader

	def extractor_forward(self, x):
		if isinstance(x, DataLoader):
			from torch import cat

			from captioner.utils import get_tqdm
			tqdm = get_tqdm()

			return cat([self(x_i.to(mag.device)) for x_i, _ in tqdm(iter(x))])

		x = self.conv1(x)
		x = self.bn1(x)
		x = self.relu(x)
		x = self.maxpool(x)

		x = self.layer1(x)
		x = self.layer2(x)
		x = self.layer3(x)
		x = self.layer4(x)

		x = self.avgpool(x)
		x = x.view(x.size(0), -1)

		return x

	model.feature_size = model.fc.in_features
	del model.fc
	model.avgpool = AdaptiveAvgPool2d(1)
	model.forward = MethodType(extractor_forward, model)
예제 #18
0
    def __init__(self, config, hidden_size=512, n_layers=8, bidirectional=False, attention=False):
        super(_LSTMModel, self).__init__()
        self.attention = attention

        # lstm layers
        self.lstm = LSTM(64, hidden_size, n_layers, dropout=config.lstm_dropout, bidirectional=bidirectional)

        n_layers *= 2 if bidirectional else 1
        hidden_size *= 2 if bidirectional else 1

        if attention:
            self.att_layer = Attention(hidden_size, (256, hidden_size), batch_first=True)

        self.avg_pooling = AdaptiveAvgPool2d((1, hidden_size))

        # fully connected output layers
        self.gender_out = Sequential(
            Dropout(config.fc_dropout),
            Linear(hidden_size, 3)
        )

        self.accent_out = Sequential(
            Dropout(config.fc_dropout),
            Linear(hidden_size, 16)
        )

        # initialise the network's weights
        self.init_weights()
예제 #19
0
 def __init__(self, channels, reduction):
     super(SEModule, self).__init__()
     self.global_avg_pool = AdaptiveAvgPool2d(1)
     self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
     self.relu = ReLU(inplace=True)
     self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
     self.sigmoid = Sigmoid()
예제 #20
0
    def __init__(self):
        """Initialization."""
        super().__init__()
        self.inplanes = ResNet2.inplanes

        self.conv1 = Conv2d(3,
                            self.inplanes,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
        self.bn1 = BatchNorm2d(self.inplanes)
        self.relu = ReLU(inplace=True)
        self.maxpool = MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(BasicBlock, ResNet2.inplanes, 2)
        self.layer2 = self._make_layer(BasicBlock,
                                       2 * ResNet2.inplanes,
                                       2,
                                       stride=2)
        self.layer3 = self._make_layer(BasicBlock,
                                       4 * ResNet2.inplanes,
                                       2,
                                       stride=2)
        self.avgpool = AdaptiveAvgPool2d((1, 1))
        self.fc = Linear(4 * ResNet2.inplanes, self.num_classes)
예제 #21
0
 def __init__(self, channels_in):
     super(GoogLeNetV3, self).__init__()
     self.in_block = Sequential(
         Conv2d_BN(channels_in, 32, 3, stride=2, padding=1),  # size /= 2
         Conv2d_BN(32, 32, 3, stride=1, padding=1),
         Conv2d_BN(32, 64, 3, stride=1, padding=1),
         MaxPool2d(3, stride=2, padding=1),  # size /= 2
         Conv2d_BN(64, 80, 1, stride=1, padding=0),
         Conv2d_BN(80, 192, 3, stride=1, padding=1),
         MaxPool2d(3, stride=2, padding=1)  # size /= 2
     )  # 192 channels
     self.mix_block = Sequential(
         InceptionA(192, 32),
         InceptionA(256, 64),
         InceptionA(288, 64),
         InceptionB(288),  # size /= 2
         InceptionC(768, 128),
         InceptionC(768, 160),
         InceptionC(768, 160),
         InceptionC(768, 192),
         InceptionD(768),  # size /= 2
         InceptionE(1280),
         InceptionE(2048)
     )  # 2048 channels
     self.out_block = Sequential(
         Conv2d_BN(2048, 1024, 1, stride=1, padding=0),
         AdaptiveAvgPool2d(1)
     )  # 1024 channels
     self.full_connect = Linear(1024, 1)
 def forward(self, x):
     if self.Train:
         return AdaptiveAvgPool2d(self.sz)(x)
     else:
         return nn.AvgPool2d(kernel_size=(self.sz_list.index(x.size(2)),
                                          self.sz_list.index(x.size(3))),
                             ceil_mode=False)
예제 #23
0
 def __init__(self,
              expanded_channels: int,
              squeezed_channels: int,
              act_type: str = "relu"):
     super().__init__()
     self.squeeze = AdaptiveAvgPool2d(1)
     self.reduce = Sequential(
         OrderedDict([
             (
                 "conv",
                 Conv2d(
                     in_channels=expanded_channels,
                     out_channels=squeezed_channels,
                     kernel_size=1,
                 ),
             ),
             (
                 "act",
                 create_activation(act_type,
                                   inplace=False,
                                   num_channels=squeezed_channels),
             ),
         ]))
     self.expand = Sequential(
         OrderedDict([
             (
                 "conv",
                 Conv2d(
                     in_channels=squeezed_channels,
                     out_channels=expanded_channels,
                     kernel_size=1,
                 ),
             ),
             ("act", Sigmoid()),
         ]))
예제 #24
0
    def __init__(self, channels, kernel, reduction=2, use_hard_sigmoid=False):
        """
        Channel-wise attention module, Squeeze-and-Excitation Networks Jie Hu1, Li Shen, Gang Sun - https://arxiv.org/pdf/1709.01507v2.pdf
        :param channels: Number of input channels
        :param reduction: Reduction factor for the number of hidden units
        """
        super(_ChannelAttentionModule, self).__init__()

        if use_hard_sigmoid:
            act_type = "hard_sigmoid"
        else:
            act_type = "sigmoid"
        self.avg_pool = AdaptiveAvgPool2d(1)
        self.body = Sequential(
            Conv1d(in_channels=channels,
                   out_channels=channels,
                   kernel_size=kernel,
                   padding=kernel // 2,
                   stride=1,
                   bias=True), get_act("sigmoid"))

        self.fc = Sequential(
            Linear(channels, channels // reduction, bias=False),
            ReLU(inplace=True),
            Linear(channels // reduction, channels, bias=False),
            get_act(act_type))
예제 #25
0
 def forward(self, x):
     x = F.relu(self.conv1(x))
     x = F.relu(self.conv2(x))
     x = F.relu(self.conv3(x))
     x = AdaptiveAvgPool2d(1)(x).squeeze()
     x = self.fc(x)
     return x
예제 #26
0
    def rmac(self, x):
        y = []
        m_max = AdaptiveMaxPool2d((1, 1))
        m_mean = AdaptiveAvgPool2d((1, 1))
        for r in self.regions:
            x_sliced = x[:, :, r[1]:r[3], r[0]:r[2]]
            if self.power is None:
                x_maxed = m_max(x_sliced)  # x_maxed [B,K]
            else:
                x_maxed = m_mean((x_sliced**self.power))**(1.0 / self.power)
                x_maxed = torch.pow(m_mean((torch.pow(x_sliced, self.power))),
                                    (1.0 / self.power))
            y.append(x_maxed.squeeze(-1).squeeze(-1))
        # y list(N) N [B,K]
        y = torch.stack(y, dim=0)  # y [N,B,K]
        y = y.transpose(0, 1)  # y [B,N,K]

        if self.norm:
            y = F.normalize(y, p=2, dim=-1)  # y [B,N,K]

        m_max = AdaptiveMaxPool2d((1, None))
        if self.sum_fm:
            y = AdaptiveMaxPool2d((1, None))(y)  # y [B,K]
            y = y.squeeze(1)
        return y
예제 #27
0
    def __init__(self, pretrained=True):
        super(FurnitureInceptionV4_350_FC2, self).__init__()

        self.model = inceptionv4(num_classes=1000, pretrained=pretrained)
        self.model.avg_pool = AdaptiveAvgPool2d(1)
        self.final_classifier = Sequential(
            Linear(1000, 512),
            ReLU(inplace=True),
            Dropout(p=0.5),
            Linear(512, 128),
        )

        for m in self.final_classifier.modules():
            if isinstance(m, Linear):
                m.weight.data.normal_(0, 0.01)
                m.bias.data.zero_()

        # create aliases:
        self.stem = ModuleList([
            self.model.features[0],
            self.model.features[1],
            self.model.features[2],
        ])
        self.features = ModuleList([
            self.model.features[i] for i in range(3, len(self.model.features))
        ])
        self.classifier = self.model.last_linear
예제 #28
0
 def __init__(self, num_classes=10):
     super(CNNNet, self).__init__()
     self.features = Sequential(
         Conv2d(1, 64, kernel_size=3, stride=1, padding=2),
         ReLU(),
         MaxPool2d(kernel_size=3, stride=2),
         BatchNorm2d(64),
         Conv2d(64, 32, kernel_size=3, padding=2),
         ReLU(),
         BatchNorm2d(32),
         MaxPool2d(kernel_size=3, stride=2),
         Conv2d(32, 16, kernel_size=3, padding=2),
         ReLU(),
         BatchNorm2d(16),
         # BatchNorm2d(32),
         # MaxPool2d(kernel_size=3, stride=2),
         # Conv2d(32, 32, kernel_size=3, padding=2),
         # ReLU(),
         # BatchNorm2d(32),
         # MaxPool2d(kernel_size=3, stride=2),
         # Conv2d(32, 32, kernel_size=3, padding=2),
         # ReLU(),
         # Conv2d(192, 384, kernel_size=3, padding=1),
         # ReLU(),
         # Conv2d(384, 256, kernel_size=3, padding=1),
         # ReLU(),
         # Conv2d(256, 256, kernel_size=3, padding=1),e
         # ReLU(),
         MaxPool2d(kernel_size=3, stride=2),
     )
     self.avgpool = AdaptiveAvgPool2d((6, 6))
     self.classifier = Sequential(Dropout(), Linear(16 * 6 * 6, 64), ReLU(),
                                  Linear(64, num_classes))
예제 #29
0
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        classes: int,
        dropout: float,
        class_type: str,
    ):
        super().__init__()
        self.conv = Conv2d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=1,
            bias=False,
        )
        self.bn = BatchNorm2d(num_features=out_channels)
        self.act = Swish(out_channels)
        self.pool = AdaptiveAvgPool2d(1)
        self.dropout = Dropout(p=dropout)
        self.fc = Linear(out_channels, classes)

        if class_type == "single":
            self.softmax = Softmax(dim=1)
        elif class_type == "multi":
            self.softmax = Sigmoid()
        else:
            raise ValueError(
                "unknown class_type given of {}".format(class_type))
예제 #30
0
 def __init__(self, channel, reduction=16):
     super(SELayer, self).__init__()
     self.avg_pool = AdaptiveAvgPool2d(1)
     self.fc = nn.Sequential(
         nn.Linear(channel, channel // reduction, bias=False),
         nn.ReLU(inplace=True),
         nn.Linear(channel // reduction, channel, bias=False), nn.Sigmoid())