コード例 #1
0
ファイル: resnet_maxout2.py プロジェクト: miraclebiu/reid5
 def reset_params(self):
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             init_kaiming_normal(m.weight, mode='fan_out')
             if m.bias is not None:
                 init_constant(m.bias, 0)
         elif isinstance(m, nn.BatchNorm2d):
             init_constant(m.weight, 1)
             init_constant(m.bias, 0)
         elif isinstance(m, nn.Linear):
             init_normal(m.weight, std=0.001)
             if m.bias is not None:
                 init_constant(m.bias, 0)
コード例 #2
0
ファイル: resnet_maxout2.py プロジェクト: miraclebiu/reid5
    def __init__(self, depth, pretrained=True, num_features=0, 
        norm=False, dropout=0, num_classes=0, add_l3_softmax=False, test_stage=False):
        super(ResNet_MaxOut2, self).__init__()

        self.depth = depth
        self.pretrained = pretrained
        self.before_cls = True
        self.test_stage=test_stage

        # Construct base (pretrained) resnet
        if depth not in ResNet_MaxOut2.__factory:
            raise KeyError("Unsupported depth:", depth)
        self.base = ResNet_MaxOut2.__factory[depth](pretrained=pretrained)
        self.num_features = num_features
        self.norm = norm
        self.dropout = dropout
        self.num_classes = num_classes

        out_planes = self.base.fc.in_features

        # Append new layers

        self.layer2_maxpool = nn.MaxPool2d(kernel_size=4,stride=4)
        self.layer3_maxpool = nn.MaxPool2d(kernel_size=2,stride=2)

        #last embedding
        self.feat = nn.Linear(out_planes, self.num_features)
        self.feat_bn = nn.BatchNorm1d(self.num_features)

        init_kaiming_normal(self.feat.weight, mode='fan_out')
        init_constant(self.feat.bias, 0)
        init_constant(self.feat_bn.weight, 1)
        init_constant(self.feat_bn.bias, 0)

        if self.dropout > 0:
            self.drop = nn.Dropout(self.dropout)
        if self.num_classes > 0:
            self.classifier = nn.Linear(self.num_features, self.num_classes)
            # self.classifier = nn.Linear(out_planes, self.num_classes)
            init_normal(self.classifier.weight, std=0.001)
            init_constant(self.classifier.bias, 0)

        if not self.pretrained:
            self.reset_params()
コード例 #3
0
ファイル: layer.py プロジェクト: miraclebiu/reid5
def _initialize_weights(models):
    for m in models:
        if isinstance(m, list):
            for mini_m in m:
                _initialize_weights(m)
        else:
            if isinstance(m, nn.Sequential):
                _initialize_weights(m.modules())
            elif isinstance(m, nn.Module):
                _initialize_weights(m.modules())
            elif isinstance(m, nn.Conv2d):
                init_kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init_constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init_constant(m.weight, 1)
                init_constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init_normal(m.weight, std=0.001)
                if m.bias is not None:
                    init_constant(m.bias, 0)
コード例 #4
0
    def __init__(self, depth, num_classes=283, num_features=1024, norm=True):

        super(ResNet_mgn_lr, self).__init__()
        block = Bottleneck
        if depth not in ResNet_mgn_lr.__factory:
            raise KeyError("Unsupported depth:", depth)
        layers = ResNet_mgn_lr.__factory[depth]
        self.inplanes = 64


        self.num_features = num_features
        self.norm = norm
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)

        self.layer3_0 = self._make_layer(block, 256, layers[2], stride=2,is_last=False)
        self.layer3_1 = self._make_layer(block, 256, layers[2], stride=2,is_last=False)
        self.layer3_lr = self._make_layer(block, 256, layers[2], stride=2,is_last=False)
        self.layer3_2 = self._make_layer(block, 256, layers[2], stride=2,is_last=True)
        
        self.layer4_0 = self._make_layer(block, 512, layers[3], stride=2,is_last=False)
        self.layer4_1 = self._make_layer(block, 512, layers[3], stride=1,is_last=False)
        self.layer4_lr = self._make_layer(block, 512, layers[3], stride=1,is_last=False)
        self.layer4_2 = self._make_layer(block, 512, layers[3], stride=1,is_last=True)

        # self.classifier_g1 = nn.Linear(256, num_classes)
        # self.g1_reduce = self._cbr2()

        # self.classifier_g2 = nn.Linear(256, num_classes)
        # self.g2_reduce = self._cbr2()
        # self.classifier_p2_1 = nn.Linear(256, num_classes)
        # self.p2_1_reduce = self._cbr2()
        # self.classifier_p2_2 = nn.Linear(256, num_classes)
        # self.p2_2_reduce = self._cbr2()


        # self.classifier_g2_lr = nn.Linear(256, num_classes)
        # self.g2_reduce_lr = self._cbr2()
        # self.classifier_p2_1_lr = nn.Linear(256, num_classes)
        # self.p2_1_reduce_lr = self._cbr2()
        # self.classifier_p2_2_lr = nn.Linear(256, num_classes)
        # self.p2_2_reduce_lr = self._cbr2()


        # self.classifier_g3 = nn.Linear(256, num_classes)
        # self.g3_reduce = self._cbr2()
        # self.classifier_p3_1 = nn.Linear(256, num_classes)
        # self.p3_1_reduce = self._cbr2()
        # self.classifier_p3_2 = nn.Linear(256, num_classes)
        # self.p3_2_reduce = self._cbr2()
        # self.classifier_p3_3 = nn.Linear(256, num_classes)
        # self.p3_3_reduce = self._cbr2()
###----------------------------###
        self.classifier_g1 = nn.Linear(512, num_classes)
        self.g1_reduce = self._cbr2()

        self.classifier_g2 = nn.Linear(512, num_classes)
        self.g2_reduce = self._cbr2()
        self.classifier_p2_1 = nn.Linear(512, num_classes)
        self.p2_1_reduce = self._cbr2()
        self.classifier_p2_2 = nn.Linear(512, num_classes)
        self.p2_2_reduce = self._cbr2()


        self.classifier_g2_lr = nn.Linear(512, num_classes)
        self.g2_reduce_lr = self._cbr2()
        self.classifier_p2_1_lr = nn.Linear(512, num_classes)
        self.p2_1_reduce_lr = self._cbr2()
        self.classifier_p2_2_lr = nn.Linear(512, num_classes)
        self.p2_2_reduce_lr = self._cbr2()


        self.classifier_g3 = nn.Linear(512, num_classes)
        self.g3_reduce = self._cbr2()
        self.classifier_p3_1 = nn.Linear(512, num_classes)
        self.p3_1_reduce = self._cbr2()
        self.classifier_p3_2 = nn.Linear(512, num_classes)
        self.p3_2_reduce = self._cbr2()
        self.classifier_p3_3 = nn.Linear(512, num_classes)
        self.p3_3_reduce = self._cbr2()




        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init_kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init_constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init_constant(m.weight, 1)
                init_constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init_normal(m.weight, std=0.001)
                if m.bias is not None:
                    init_constant(m.bias, 0)
コード例 #5
0
    def __init__(self,
                 depth,
                 num_features=0,
                 norm=False,
                 dropout=0,
                 num_classes=751,
                 add_l3_softmax=False,
                 test_stage=False):

        super(ResNet_mgn_lr, self).__init__()
        block = Bottleneck
        if depth not in ResNet_mgn_lr.__factory:
            raise KeyError("Unsupported depth:", depth)
        layers = ResNet_mgn_lr.__factory[depth]
        self.inplanes = 64
        self.test_stage = test_stage

        self.num_features = num_features
        self.norm = norm
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)

        self.layer3_0 = self._make_layer(block,
                                         256,
                                         layers[2],
                                         stride=2,
                                         is_last=False)
        self.layer3_1 = self._make_layer(block,
                                         256,
                                         layers[2],
                                         stride=2,
                                         is_last=False)
        self.layer3_lr = self._make_layer(block,
                                          256,
                                          layers[2],
                                          stride=2,
                                          is_last=False)
        self.layer3_2 = self._make_layer(block,
                                         256,
                                         layers[2],
                                         stride=2,
                                         is_last=True)

        self.layer4_0 = self._make_layer(block,
                                         512,
                                         layers[3],
                                         stride=2,
                                         is_last=False)
        self.layer4_1 = self._make_layer(block,
                                         512,
                                         layers[3],
                                         stride=1,
                                         is_last=False)
        self.layer4_lr = self._make_layer(block,
                                          512,
                                          layers[3],
                                          stride=1,
                                          is_last=False)
        self.layer4_2 = self._make_layer(block,
                                         512,
                                         layers[3],
                                         stride=1,
                                         is_last=True)

        ######################################################################## ori

        # self.classifier_g1 = nn.Linear(512 * block.expansion, num_classes)
        # self.g1_reduce = self._cbr2()
        #
        # self.classifier_g2   = nn.Linear(512 * block.expansion, num_classes)
        # self.g2_reduce = self._cbr2()
        # self.classifier_p2_1 = nn.Linear(256, num_classes)
        # self.p2_1_reduce = self._cbr2()
        # self.classifier_p2_2 = nn.Linear(256, num_classes)
        # self.p2_2_reduce = self._cbr2()
        # self.classifier_p2 = nn.Linear(256*2, num_classes)
        #
        # self.classifier_g3   = nn.Linear(512 * block.expansion, num_classes)
        # self.g3_reduce = self._cbr2()
        # self.classifier_p3_1 = nn.Linear(256, num_classes)
        # self.p3_1_reduce = self._cbr2()
        # self.classifier_p3_2 = nn.Linear(256, num_classes)
        # self.p3_2_reduce = self._cbr2()
        # self.classifier_p3_3 = nn.Linear(256, num_classes)
        # self.p3_3_reduce = self._cbr2()
        # self.classifier_p3 = nn.Linear(256*3, num_classes)
        ########################################test###############################################################

        self.classifier_g1 = nn.Linear(256, num_classes)
        self.g1_reduce = self._cbr2()

        self.classifier_g2 = nn.Linear(256, num_classes)
        self.g2_reduce = self._cbr2()
        self.classifier_p2_1 = nn.Linear(256, num_classes)
        self.p2_1_reduce = self._cbr2()
        self.classifier_p2_2 = nn.Linear(256, num_classes)
        self.p2_2_reduce = self._cbr2()

        self.classifier_g2_lr = nn.Linear(256, num_classes)
        self.g2_reduce_lr = self._cbr2()
        self.classifier_p2_1_lr = nn.Linear(256, num_classes)
        self.p2_1_reduce_lr = self._cbr2()
        self.classifier_p2_2_lr = nn.Linear(256, num_classes)
        self.p2_2_reduce_lr = self._cbr2()

        self.classifier_g3 = nn.Linear(256, num_classes)
        self.g3_reduce = self._cbr2()
        self.classifier_p3_1 = nn.Linear(256, num_classes)
        self.p3_1_reduce = self._cbr2()
        self.classifier_p3_2 = nn.Linear(256, num_classes)
        self.p3_2_reduce = self._cbr2()
        self.classifier_p3_3 = nn.Linear(256, num_classes)
        self.p3_3_reduce = self._cbr2()

        for m in self.modules():
            # if isinstance(m, nn.Conv2d):
            #     n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            #     m.weight.data.normal_(0, math.sqrt(2. / n))
            # elif isinstance(m, nn.BatchNorm2d):
            #     m.weight.data.fill_(1)
            #     m.bias.data.zero_()
            # elif isinstance(m,nn.Linear):
            #     init.kaiming_normal(m.weight, mode='fan_out')
            #     if m.bias is not None:
            #         init.constant(m.bias, 0)
            #     # init.normal(m.weight, std=0.001) # can change to kaiming_normal
            #     # if m.bias is not None:
            #     #     init.constant(m.bias, 0)
            # elif isinstance(m, nn.BatchNorm1d):
            #     init.constant(m.weight,1)
            #     init.constant(m.bias,0)
            if isinstance(m, nn.Conv2d):
                init_kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init_constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init_constant(m.weight, 1)
                init_constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init_normal(m.weight, std=0.001)
                if m.bias is not None:
                    init_constant(m.bias, 0)