示例#1
0
    def _make_head(self, head_channels, pre_stage_channels):
        head_block = ShuffleBlock

        # Increasing the #channels on each resolution
        # from C, 2C, 4C, 8C to 128, 256, 512, 1024
        incre_modules = nn.ModuleList()
        for i, channels in enumerate(pre_stage_channels):
            self.inplanes = channels
            incre_module = self._make_layer(head_block,
                                            head_channels[i],
                                            1,
                                            stride=1)
            incre_modules.append(incre_module)

        # downsampling modules
        downsamp_modules = nn.ModuleList()
        for i in range(len(pre_stage_channels) - 1):
            downsamp_module = ConvModule(
                in_channels=head_channels[i],
                out_channels=head_channels[i + 1],
                kernel_size=3,
                stride=2,
                padding=1,
            )
            downsamp_modules.append(downsamp_module)
        feat_size = 2048
        if self.cifar10:
            feat_size = 1024
        final_layer = ConvModule(in_channels=head_channels[3],
                                 out_channels=feat_size,
                                 kernel_size=1)

        return incre_modules, downsamp_modules, final_layer
 def __init__(self, inc, n_location, n_dim):
     super(YOLOv3PredictionHead, self).__init__()
     self.extract_feature = nn.ModuleList()
     for _ in range(3):
         self.extract_feature.append(
             [ConvModule(inc, inc // 2, 1),
              ConvModule(inc // 2, inc, 1)])
     self.location = ConvModule(inc, n_location, 1)
     self.embedding = ConvModule(inc, n_dim, 3, 1, 1)
    def __init__(self, in_channels, n_dim):
        super(HourGlassHead, self).__init__()

        self.head = nn.Sequential(
            ConvModule(in_channels, 256, 3, padding=1, use_bn=False),
            ConvModule(256,
                       n_dim,
                       1,
                       activation='linear',
                       use_bn=False,
                       bias=True))
示例#4
0
    def _make_transition_layer(self,
                               num_channels_pre_layer,
                               num_channels_cur_layer,
                               activation,
                               csp=False):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = nn.ModuleList()
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[
                        i] or csp:
                    if csp:
                        transition_layers.append(
                            ConvModule(num_channels_pre_layer[i],
                                       num_channels_cur_layer[i],
                                       kernel_size=1,
                                       activation=activation))
                        transition_layers.append(
                            ConvModule(num_channels_cur_layer[i] * 2,
                                       num_channels_cur_layer[i] * 2,
                                       kernel_size=1,
                                       activation=activation))
                    else:
                        transition_layers.append(
                            ConvModule(num_channels_pre_layer[i],
                                       num_channels_cur_layer[i],
                                       kernel_size=3,
                                       padding=1,
                                       activation=activation))
                else:
                    transition_layers.append(None)
            else:
                if not csp:
                    conv3x3s = []
                    for j in range(i + 1 - num_branches_pre):
                        inchannels = num_channels_pre_layer[-1]
                        outchannels = num_channels_cur_layer[i] \
                            if j == i-num_branches_pre else inchannels
                        conv3x3s.append(
                            ConvModule(inchannels,
                                       outchannels,
                                       kernel_size=3,
                                       stride=2,
                                       padding=1,
                                       activation=activation))
                    transition_layers.append(nn.Sequential(*conv3x3s))

        return transition_layers
    def __init__(self, inc, ouc, sr=2, stride=1):
        super(fire_module, self).__init__()
        self.conv1 = ConvModule(inc, ouc // sr, 1, activation='linear')
        # self.conv1    = nn.Conv2d(inp_dim, out_dim // sr, kernel_size=1, stride=1, bias=False)
        # self.bn1      = nn.BatchNorm2d(out_dim // sr)

        self.conv_1x1 = ConvModule(ouc // sr, ouc // 2, 1, stride=stride, activation='linear', use_bn=False)
        # self.conv_1x1 = nn.Conv2d(out_dim // sr, out_dim // 2, kernel_size=1, stride=stride, bias=False)
        self.conv_3x3 = ConvModule(ouc // sr, ouc // 2, 3, stride=stride, padding=1, activation='linear', use_bn=False)
        # self.conv_3x3 = nn.Conv2d(out_dim // sr, out_dim // 2, kernel_size=3, padding=1, 
                                #   stride=stride, groups=out_dim // sr, bias=False)
        self.skip = (stride == 1 and inc == ouc)
        self.bn  = nn.BatchNorm2d(ouc)        
        if self.skip:
            self.relu = nn.ReLU(inplace=True)
示例#6
0
    def __init__(self, stage_repeats, stage_out_channels, **kwargs):
        super(HACNN, self).__init__()
        self.stage_repeats = stage_repeats
        self.stage_out_channels = stage_out_channels
        in_channels = self.stage_out_channels[0]
        self.conv = ConvModule(3, in_channels, 3, stride=2, padding=1)
        self.stages = nn.ModuleList()
        self.has = nn.ModuleList()

        for stage_i in range(len(self.stage_repeats)):
            stage = []
            out_channels = self.stage_out_channels[stage_i + 1]
            stage.append(ShuffleB(in_channels, out_channels, 1))
            for _ in range(self.stage_repeats[stage_i]):
                stage.append(ShuffleA(out_channels, out_channels))
            stage.append(ShuffleB(out_channels, out_channels, 2))
            self.has.append(HABlock(out_channels))
            self.stages.append(nn.Sequential(*stage))
            in_channels = out_channels

        feat_dim = self.stage_out_channels[-1]
        self.global_fc = nn.Sequential(
            nn.Linear(out_channels, feat_dim),
            nn.BatchNorm1d(feat_dim),
            nn.ReLU(),
        )

        self.pooling = GeM()
        self._initialize_weights()
示例#7
0
    def _make_stage(self,
                    num_modules,
                    num_branches,
                    num_blocks,
                    num_channels,
                    block,
                    fused_method,
                    num_inchannels,
                    activation,
                    useSE,
                    multi_scale_output=True):

        modules = []
        before_branches = nn.ModuleList()
        for i in range(num_branches):
            before_branches.append(
                ConvModule(num_inchannels[i] * 2,
                           num_inchannels[i],
                           kernel_size=1))

        for i in range(num_modules):
            # multi_scale_output is only used last module
            if not multi_scale_output and i == num_modules - 1:
                reset_multi_scale_output = False
            else:
                reset_multi_scale_output = True

            modules.append(
                HighResolutionModule(num_branches, block, num_blocks,
                                     num_inchannels, num_channels,
                                     fused_method, activation, useSE,
                                     reset_multi_scale_output))
            num_inchannels = modules[-1].get_num_inchannels()

        return nn.Sequential(*modules), before_branches, num_inchannels
示例#8
0
    def _make_fuse_layers(self, activation):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        num_inchannels = self.num_inchannels
        fuse_layers = []
        for i in range(num_branches if self.multi_scale_output else 1):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(
                        nn.Sequential(
                            ConvModule(num_inchannels[j],
                                       num_inchannels[i],
                                       kernel_size=1,
                                       activation='linear'),
                            nn.Upsample(scale_factor=2**(j - i),
                                        mode='nearest')))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv3x3s = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            num_outchannels_conv3x3 = num_inchannels[i]
                            conv3x3s.append(
                                ConvModule(num_inchannels[j],
                                           num_outchannels_conv3x3,
                                           kernel_size=3,
                                           stride=2,
                                           padding=1,
                                           activation='linear'))
                        else:
                            num_outchannels_conv3x3 = num_inchannels[j]
                            conv3x3s.append(
                                ConvModule(num_inchannels[j],
                                           num_outchannels_conv3x3,
                                           kernel_size=3,
                                           stride=2,
                                           padding=1,
                                           activation=activation))
                    fuse_layer.append(nn.Sequential(*conv3x3s))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
    def __init__(self, inc, ouc, k=3, stride=1):
        super(residual, self).__init__()
        p = (k - 1) // 2

        self.conv1 = ConvModule(inc, ouc, k, stride=stride, padding=p)
        # self.conv1 = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(p, p), stride=(stride, stride), bias=False)
        # self.bn1   = nn.BatchNorm2d(out_dim)
        # self.relu1 = nn.ReLU(inplace=True)

        self.conv2 = ConvModule(ouc, ouc, k, stride=1, padding=p, activation='linear')
        # self.conv2 = nn.Conv2d(out_dim, out_dim, (k, k), padding=(p, p), bias=False)
        # self.bn2   = nn.BatchNorm2d(out_dim)
        
        self.skip = ConvModule(inc, ouc, 1, stride=stride, activation='linear') if stride != 1 or inc != ouc else nn.Sequential()
        # self.skip  = nn.Sequential(
        #     nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),
        #     nn.BatchNorm2d(out_dim)
        # ) 
        self.relu = nn.ReLU(inplace=True)
    def __init__(self, in_channels, num_classes, featc=1024):
        super(ShuffleNetv2PlusClassifierHead, self).__init__()
        featc = int(featc * 0.75)
        self.v3_conv = ConvModule(in_channels, featc, 1, activation='hs')
        self.gap = nn.AdaptiveAvgPool2d(1)
        self.v3_se = SEModule(featc)
        self.v3_fc = nn.Linear(featc, featc, bias=False)
        self.v3_hs = HSwish()
        self.dropout = nn.Dropout(0.2)
        self.v3_fc2 = nn.Linear(featc, num_classes, bias=False)

        self._initialize_weights()
 def __init__(self, in_channels, num_dim, num_layers=4, feat_size=64):
     super(RegressionHead, self).__init__()
     features = []
     for _ in range(num_layers):
         features.append(
             ConvModule(in_channels,
                        feat_size,
                        3,
                        stride=1,
                        padding=1,
                        activation='relu',
                        use_bn=False))
         in_channels = feat_size
     self.features = nn.Sequential(*features)
     self.head = ConvModule(in_channels,
                            num_dim,
                            3,
                            stride=1,
                            padding=1,
                            activation='linear',
                            use_bn=False)
示例#12
0
    def __init__(self, stacks=2):
        super(HourglassNet, self).__init__()
        self.stacks = stacks
        # self.heads= heads

        # self.pre = nn.Sequential(
        #     ConvModule(3, 128, 7, stride=2, padding=3),
        #     # convolution(7, 3, 128, stride=2, Norm=Norm),
        #     residual(128, 256, stride=2),
        #     residual(256, 256, stride=2)
        # )
        self.pre = Res2NetStem(3, 256)

        self.hg_mods = nn.ModuleList([
            hg_module(
                4, [256, 256, 384, 384, 512], [2, 2, 2, 2, 4],
                make_pool_layer=make_pool_layer,
                make_unpool_layer=make_unpool_layer,
                make_up_layer=make_layer,
                make_low_layer=make_layer,
                make_hg_layer_revr=make_layer_revr,
                make_hg_layer=make_hg_layer,
                make_merge_layer=make_merge_layer
            ) for _ in range(stacks)
        ])

        self.cnvs    = nn.ModuleList([ConvModule(256, 256, 3, padding=1) for _ in range(stacks)])
        # self.cnvs    = nn.ModuleList([convolution(3, 256, 256, Norm=Norm) for _ in range(stacks)])
        self.inters  = nn.ModuleList([residual(256, 256) for _ in range(stacks - 1)])
        self.cnvs_   = nn.ModuleList([self._merge_mod() for _ in range(stacks - 1)])
        self.inters_ = nn.ModuleList([self._merge_mod() for _ in range(stacks - 1)])    

        # for head in heads.keys():
        #     if 'hm' in head:
        #         module =  nn.ModuleList([
        #             self._pred_mod(heads[head]) for _ in range(stacks)
        #         ])
        #         self.__setattr__(head, module)
        #         for heat in self.__getattr__(head):
        #             heat[-1].bias.data.fill_(-2.19)
        #     else:
        #         module = nn.ModuleList([
        #             self._pred_mod(heads[head]) for _ in range(stacks)
        #         ])
        #         self.__setattr__(head, module)

        # self.relu = nn.LeakyReLU(inplace=True) if Norm is ABN else nn.ReLU(inplace=True)
        self.relu = nn.ReLU(inplace=True)

        self._init_params()    
    def __init__(self, in_channels, kernal_size, feat_dim):
        super(IAPHead, self).__init__()
        self.iap_GDConv = ConvModule(in_channels,
                                     in_channels,
                                     kernal_size,
                                     groups=in_channels,
                                     activation='linear',
                                     use_bn=False)

        self.iap_fc = nn.Sequential(
            nn.Linear(in_channels, feat_dim),
            nn.BatchNorm1d(feat_dim),
            nn.PReLU(),
        )

        self._init_params()
 def __init__(self, in_channels, n_dim, kernal_size=None, triplet=False):
     super(ReIDTrickHead, self).__init__()
     self.triplet = triplet
     self.n_dim = n_dim
     if kernal_size is not None:
         self.gap = ConvModule(in_channels,
                               in_channels,
                               kernal_size,
                               groups=in_channels,
                               activation='linear',
                               use_bn=False)
     else:
         self.gap = nn.AdaptiveAvgPool2d(1)
     self.BNNeck = nn.BatchNorm2d(in_channels)
     self.BNNeck.bias.requires_grad_(False)  # no shift
     self.BNNeck.apply(weights_init_kaiming)
     if self.n_dim > 0:
         self.id_fc = nn.Linear(in_channels, n_dim, bias=False)
         self.id_fc.apply(weights_init_classifier)
 def __init__(self):
     super(SpatialAttn, self).__init__()
     self.conv1 = ConvModule(1, 1, 3, stride=2, padding=1)
     self.conv2 = ConvModule(1, 1, 1)
 def __init__(self, in_channels):
     super(SoftAttn, self).__init__()
     self.spatial_attn = SpatialAttn()
     self.channel_attn = ChannelAttn(in_channels)
     self.conv = ConvModule(in_channels, in_channels, 1)
示例#17
0
 def _merge_mod(self):
     return ConvModule(256, 256, 1, activation='linear')
示例#18
0
    def __init__(
        self,
        stage_num_modules=[1, 4, 3],
        stage_num_branches=[2, 3, 4],
        stage_num_blocks=[[4, 4], [4, 4, 4], [4, 4, 4, 4]],
        stage_num_channels=[[24, 112], [24, 112, 232], [24, 112, 232, 464]],
        stage_blocks=[ShuffleBlock, ShuffleBlock, ShuffleBlock],
        stage_fused_method=['sum', 'sum', 'sum'],
        stage_activation=['relu', 'hs', 'hs'],
        stage_useSE=[False, False, True],
        classification=False,
        cifar10=False,
    ):
        super(PoseHighResolutionNet, self).__init__()

        self.inplanes = 64
        self.stage_num_branches = stage_num_branches
        self.classification = classification
        self.cifar10 = cifar10

        # stem net
        if self.cifar10:
            self.conv1 = ConvModule(3,
                                    64,
                                    kernel_size=3,
                                    stride=1,
                                    padding=1,
                                    activation='linear')
            self.conv2 = ConvModule(64, 64, kernel_size=3, stride=1, padding=1)
        else:
            self.conv1 = ConvModule(3,
                                    64,
                                    kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    activation='linear')
            self.conv2 = ConvModule(64, 64, kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(ShuffleBlock, self.inplanes, 4)

        self.stages = nn.ModuleList()
        self.transitions = nn.ModuleList()
        self.csp_transitions = nn.ModuleList()
        self.before_branches = nn.ModuleList()
        pre_stage_channels = [self.inplanes]
        for i in range(3):
            transition = self._make_transition_layer(
                pre_stage_channels, stage_num_channels[i],
                stage_activation[max(i - 1, 0)])
            csp_channels = [c // 2 for c in stage_num_channels[i]]
            csp_transition = self._make_transition_layer(stage_num_channels[i],
                                                         csp_channels,
                                                         'linear',
                                                         csp=True)
            stage, before_branches, pre_stage_channels = self._make_stage(
                stage_num_modules[i],
                stage_num_branches[i],
                stage_num_blocks[i],
                csp_channels,
                stage_blocks[i],
                stage_fused_method[i],
                csp_channels,
                stage_activation[i],
                stage_useSE[i],
            )
            pre_stage_channels = [c * 2 for c in pre_stage_channels]
            self.transitions.append(transition)
            self.stages.append(stage)
            self.csp_transitions.append(csp_transition)
            self.before_branches.append(before_branches)

        if self.classification:
            self.incre_modules, self.downsamp_modules, \
            self.final_layer = self._make_head(stage_num_channels[-1], pre_stage_channels)
        else:
            last_inp_channels = np.int(np.sum(pre_stage_channels))
            self.last_layer = ConvModule(last_inp_channels,
                                         64,
                                         1,
                                         activation='hs')

        self.init_weights()
示例#19
0
    def __init__(self,
                 inc,
                 midc,
                 ouc,
                 ksize,
                 stride,
                 activation,
                 useSE,
                 mode,
                 affine=True):
        super(ShuffleBlock, self).__init__()
        self.stride = stride
        pad = ksize // 2
        inc = inc // 2

        if mode == 'v2':
            branch_main = [
                ConvModule(inc, midc, 1, activation=activation, affine=affine),
                ConvModule(midc,
                           midc,
                           ksize,
                           stride=stride,
                           padding=pad,
                           groups=midc,
                           activation='linear',
                           affine=affine),
                ConvModule(midc,
                           ouc - inc,
                           1,
                           activation=activation,
                           affine=affine),
            ]
        elif mode == 'xception':
            assert ksize == 3
            branch_main = [
                ConvModule(inc,
                           inc,
                           3,
                           stride=stride,
                           padding=1,
                           groups=inc,
                           activation='linear',
                           affine=affine),
                ConvModule(inc, midc, 1, activation=activation, affine=affine),
                ConvModule(midc,
                           midc,
                           3,
                           stride=1,
                           padding=1,
                           groups=midc,
                           activation='linear',
                           affine=affine),
                ConvModule(midc, midc, 1, activation=activation,
                           affine=affine),
                ConvModule(midc,
                           midc,
                           3,
                           stride=1,
                           padding=1,
                           groups=midc,
                           activation='linear',
                           affine=affine),
                ConvModule(midc,
                           ouc - inc,
                           1,
                           activation=activation,
                           affine=affine),
            ]
        else:
            raise TypeError

        if activation == 'relu':
            assert useSE == False
        else:
            if useSE:
                branch_main.append(SEModule(ouc - inc))
        self.branch_main = nn.Sequential(*branch_main)

        if stride == 2:
            self.branch_proj = nn.Sequential(
                ConvModule(inc,
                           inc,
                           ksize,
                           stride=stride,
                           padding=pad,
                           groups=inc,
                           activation='linear',
                           affine=affine),
                ConvModule(inc, inc, 1, activation=activation, affine=affine),
            )
        else:
            self.branch_proj = None
 def __init__(self, in_channels, reduction_rate=16):
     super(ChannelAttn, self).__init__()
     assert in_channels % reduction_rate == 0
     self.conv1 = ConvModule(in_channels, in_channels // reduction_rate, 1)
     self.conv2 = ConvModule(in_channels // reduction_rate, in_channels, 1)
     self.gap = nn.AdaptiveAvgPool2d(1)