def __init__(self, in_channel_block, in_channel_clf, num_classes, cls,
                 hidden_channel, attention_1, attention_2):
        super(ClassifierModuleMiddle, self).__init__()
        self.relu = nn.ReLU(inplace=False)
        self.BN = nn.BatchNorm2d(in_channel_block)

        self.linear_h = nn.Linear(in_channel_block + in_channel_clf,
                                  hidden_channel)
        self.linear = nn.Linear(hidden_channel, num_classes)

        if attention_1 == 'se':
            self.attention_1 = SELayer(in_channel_block, 16)
        elif attention_1 == 'scan':
            self.attention_1 = ScanLayer(in_channel_block)
        elif attention_1 == 'no':
            self.attention_1 = lambda x: x  # dummy
        else:
            raise ValueError

        self.cls = cls
        if self.cls != 0:
            self.b0 = nn.ParameterList(
                [nn.Parameter(torch.zeros(1, hidden_channel))])
            self.linear_bw = nn.Linear(hidden_channel,
                                       in_channel_block + in_channel_clf)
        self.BN1d = nn.BatchNorm1d(hidden_channel)

        if attention_2 == 'yes':
            self.attention_2 = LinearLayer(in_channel_clf)
        elif attention_2 == 'no':
            self.attention_2 = lambda x: x  # dummy
        else:
            raise ValueError
Exemple #2
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 use_att=False,
                 att_mode='ours'):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes,
                               planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes,
                               planes * self.expansion,
                               kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.use_att = use_att

        if use_att:
            assert att_mode in ['ours', 'se']
            if att_mode == 'ours':
                self.att = AttentionModule(planes * self.expansion)
            elif att_mode == 'se':
                self.att = SELayer(planes * self.expansion)
Exemple #3
0
    def __init__(self, in_channel_block, num_classes, hidden_channel):
        super(ClassifierModuleFirst, self).__init__()
        self.relu = nn.ReLU(inplace=False)
        self.BN = nn.BatchNorm2d(in_channel_block)
        self.se = SELayer(in_channel_block, 16)

        self.linear_h = nn.Linear(in_channel_block, hidden_channel)
        self.linear = nn.Linear(hidden_channel, num_classes)

        self.BN1d = nn.BatchNorm1d(hidden_channel)
    def __init__(self, in_channel_block, num_classes, hidden_channel,
                 attention):
        super(ClassifierModuleFirst, self).__init__()
        self.relu = nn.ReLU(inplace=False)
        self.BN = nn.BatchNorm2d(in_channel_block)
        if attention == 'se':
            self.attention = SELayer(in_channel_block, 16)
        elif attention == 'scan':
            self.attention = ScanLayer(in_channel_block)
        elif attention == 'no':
            self.attention = lambda x: x  # dummy
        else:
            raise ValueError

        self.linear_h = nn.Linear(in_channel_block, hidden_channel)
        self.linear = nn.Linear(hidden_channel, num_classes)

        self.BN1d = nn.BatchNorm1d(hidden_channel)
Exemple #5
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 use_att=False,
                 att_mode='ours'):
        super(BasicBlock, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes)
        self.downsample = downsample
        self.stride = stride
        self.use_att = use_att

        if use_att:
            assert att_mode in ['ours', 'se']
            if att_mode == 'ours':
                self.att = AttentionModule(planes)
            elif att_mode == 'se':
                self.att = SELayer(planes)
Exemple #6
0
    def __init__(self,
                 in_channel_block,
                 in_channel_clf,
                 num_classes,
                 cls,
                 hidden_channel,
                 score_layer=False):
        super(ClassifierModuleMiddle, self).__init__()
        self.relu = nn.ReLU(inplace=False)
        self.BN = nn.BatchNorm2d(in_channel_block)
        self.se = SELayer(in_channel_block, 16)

        self.linear_h = nn.Linear(in_channel_block + in_channel_clf,
                                  hidden_channel)
        self.linear = nn.Linear(hidden_channel, num_classes)

        self.cls = cls
        if self.cls != 0:
            self.b0 = nn.ParameterList(
                [nn.Parameter(torch.zeros(1, hidden_channel))])
            self.linear_bw = nn.Linear(hidden_channel,
                                       in_channel_block + in_channel_clf)
        self.BN1d = nn.BatchNorm1d(hidden_channel)