def init(self):
     #pass
     #self.two_view_resnet.apply(_kaiming_init_)
     #self.head.apply(_kaiming_init_)
     init_cnn(self.head_im)
     init_cnn(self.meta_fc)
     init_cnn(self.final_conv)
     init_cnn(self.final_conv[-1], negative_slope=1.0)
 def init(self):
     #pass
     #self.two_view_resnet.apply(_kaiming_init_)
     #self.head.apply(_kaiming_init_)
     init_cnn(self.blk_cb)
     init_cnn(self.blk_rb)
     init_cnn(self.classifer)
    def __init__(self, n_class, arch, use_CBAM=False):
        super(ISICModel_singleview_reid, self).__init__()
        self.mode = 'singleview_reid'

        cfg = gl.get_value('cfg')
        self.cfg = cfg
        if arch == 'resnet50':

            if cfg.MODEL.USE_ADL is True:
                model_backbone = resnet50_adl(
                    pretrained=True,
                    num_classes=n_class,
                    ADL_position=cfg.MODEL.ADL_POSITION,
                    drop_rate=cfg.MODEL.ADLRATE,
                    drop_thr=cfg.MODEL.ADLTHR)
            else:
                model_backbone = models.resnet50(pretrained=True)
            #in_features =  4096
            self.backbone = (nn.Sequential(
                *list(model_backbone.children())[:-2]))
            self.backbone_lc = nn.ReLU(inplace=True)  #skip

        elif arch == 'sk_resnet50':
            model_backbone = sk_resnet50(pretrained=True)
            #in_features =  4096
            self.backbone = (nn.Sequential(
                *list(model_backbone.children())[:-2]))
            self.backbone_lc = nn.ReLU(inplace=True)  #skip

        elif arch == 'resnet50d':
            model_backbone = resnet50d(pretrained=True)
            #in_features =  4096
            self.backbone = (nn.Sequential(
                *list(model_backbone.children())[:-2]))
            self.backbone_lc = nn.ReLU(inplace=True)  #skip

        elif arch == 'sge_resnet50':
            model_backbone = sge_resnet50(pretrained=True)
            #in_features =  4096
            self.backbone = (nn.Sequential(
                *list(model_backbone.children())[:-2]))
            self.backbone_lc = nn.ReLU(inplace=True)  #skip
        elif arch == 'resnext50_32x4d':

            model_backbone = models.resnext50_32x4d(pretrained=True)
            self.backbone = (nn.Sequential(
                *list(model_backbone.children())[:-2]))
            self.backbone_lc = nn.ReLU(inplace=True)  #skip

        elif arch == 'se_resnext50':

            model_backbone = SENet(block=SEResNeXtBottleneck,
                                   layers=[3, 4, 6, 3],
                                   groups=32,
                                   reduction=16,
                                   dropout_p=None,
                                   inplanes=64,
                                   input_3x3=False,
                                   downsample_kernel_size=1,
                                   downsample_padding=0,
                                   last_stride=2)
            param_dict = torch.load(
                '../models/se_resnext50_32x4d-a260b3a4.pth')

            for i in param_dict:
                if 'classifier' in i or 'last_linear' in i:
                    continue
                model_backbone.state_dict()[i].copy_(param_dict[i])

            self.backbone = model_backbone  #(nn.Sequential(*list(model_backbone.children())[:-3]) )
            self.backbone_lc = nn.ReLU(inplace=True)  #skip

        elif arch == 'effnetb4':
            model_backbone = EfficientNet.from_pretrained('efficientnet-b4')
            self.backbone = model_backbone  #(nn.Sequential(*list(model_backbone.children())[:-3]) )
            self.backbone_lc = nn.ReLU(inplace=True)  #skip

        self.imfeat_dim = cfg.MODEL.IMG_FCS  #(4096,512)

        self.use_fc = cfg.MODEL.REID_USE_FC

        self.num_classes = n_class
        self.pdrop_lin = cfg.MODEL.REID_PDROP_LIN
        self.neck_feat = cfg.MODEL.REID_NECK_FEAT

        if self.use_fc is True:
            self.in_planes = self.imfeat_dim[1]
            self.after_backbone = nn.Sequential(
                layers.AvgPool(), nn.Dropout(p=self.pdrop_lin),
                nn.Linear(self.imfeat_dim[0] // 2, self.in_planes, bias=False))
            self.bottleneck = nn.BatchNorm1d(self.imfeat_dim[1])
            self.classifier = nn.Linear(self.in_planes,
                                        self.num_classes,
                                        bias=False)
        else:
            self.in_planes = self.imfeat_dim[0] // 2
            self.after_backbone = layers.AvgPool()
            self.bottleneck = nn.BatchNorm1d(self.in_planes)
            self.classifier = nn.Sequential(
                nn.Dropout(p=self.pdrop_lin),
                nn.Linear(self.in_planes, self.num_classes, bias=False))

        self.bottleneck.bias.requires_grad_(False)  # no shift

        self.center_feat = torch.zeros(n_class, self.in_planes)
        device = get_device(self.cfg)
        self.center_feat = self.center_feat.to(device)

        #self.head_im = nn.Sequential(self.after_backbone,self.bottleneck)

        init_cnn(self.after_backbone)
        init_cnn(self.bottleneck)
        init_cnn(self.classifier)

        self.meta_fc = nn.ReLU(inplace=True)
        self.final_conv = nn.ReLU(inplace=True)  #skip

        #if cfg.MODEL.BACKBONE_PRETRAIN_PATH is not None  and os.path.exists(cfg.MODEL.BACKBONE_PRETRAIN_PATH):
        #    self.backbone.load_state_dict(torch.load(cfg.MODEL.BACKBONE_PRETRAIN_PATH))

        if cfg.MODEL.PRETRAIN_PATH is not None and os.path.exists(
                cfg.MODEL.PRETRAIN_PATH):
            self.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
 def init(self):
     #pass
     #self.two_view_resnet.apply(_kaiming_init_)
     #self.head.apply(_kaiming_init_)
     init_cnn(self.head_im, negative_slope=1.0)
     init_cnn(self.attentions)