예제 #1
0
    def __init__(self, cfg):
        super().__init__()
        self.register_buffer(
            "pixel_mean",
            torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(1, -1, 1, 1))
        self.register_buffer(
            "pixel_std",
            torch.Tensor(cfg.MODEL.PIXEL_STD).view(1, -1, 1, 1))
        self._cfg = cfg
        # backbone
        self.backbone = build_backbone(cfg)

        # head
        pool_type = cfg.MODEL.HEADS.POOL_LAYER
        if pool_type == 'avgpool': pool_layer = FastGlobalAvgPool2d()
        elif pool_type == 'maxpool': pool_layer = nn.AdaptiveMaxPool2d(1)
        elif pool_type == 'gempool': pool_layer = GeneralizedMeanPoolingP()
        elif pool_type == "avgmaxpool": pool_layer = AdaptiveAvgMaxPool2d()
        elif pool_type == "identity": pool_layer = nn.Identity()
        else:
            raise KeyError(
                f"{pool_type} is invalid, please choose from "
                f"'avgpool', 'maxpool', 'gempool', 'avgmaxpool' and 'identity'."
            )

        in_feat = cfg.MODEL.HEADS.IN_FEAT
        num_classes = cfg.MODEL.HEADS.NUM_CLASSES
        self.heads = build_reid_heads(cfg, in_feat, num_classes, pool_layer)
예제 #2
0
파일: mgn.py 프로젝트: yukichou/fast-reid
    def __init__(self, cfg):
        super().__init__()
        self.register_buffer(
            "pixel_mean",
            torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(1, -1, 1, 1))
        self.register_buffer(
            "pixel_std",
            torch.Tensor(cfg.MODEL.PIXEL_STD).view(1, -1, 1, 1))
        self._cfg = cfg

        # backbone
        bn_norm = cfg.MODEL.BACKBONE.NORM
        num_splits = cfg.MODEL.BACKBONE.NORM_SPLIT
        with_se = cfg.MODEL.BACKBONE.WITH_SE

        backbone = build_backbone(cfg)
        self.backbone = nn.Sequential(backbone.conv1, backbone.bn1,
                                      backbone.relu, backbone.maxpool,
                                      backbone.layer1, backbone.layer2,
                                      backbone.layer3[0])
        res_conv4 = nn.Sequential(*backbone.layer3[1:])
        res_g_conv5 = backbone.layer4

        res_p_conv5 = nn.Sequential(
            Bottleneck(1024,
                       512,
                       bn_norm,
                       num_splits,
                       False,
                       with_se,
                       downsample=nn.Sequential(
                           nn.Conv2d(1024, 2048, 1, bias=False),
                           get_norm(bn_norm, 2048, num_splits))),
            Bottleneck(2048, 512, bn_norm, num_splits, False, with_se),
            Bottleneck(2048, 512, bn_norm, num_splits, False, with_se))
        res_p_conv5.load_state_dict(backbone.layer4.state_dict())

        pool_type = cfg.MODEL.HEADS.POOL_LAYER
        if pool_type == 'avgpool': pool_layer = FastGlobalAvgPool2d()
        elif pool_type == 'maxpool': pool_layer = nn.AdaptiveMaxPool2d(1)
        elif pool_type == 'gempool': pool_layer = GeneralizedMeanPoolingP()
        elif pool_type == "avgmaxpool": pool_layer = AdaptiveAvgMaxPool2d()
        elif pool_type == "identity": pool_layer = nn.Identity()
        else:
            raise KeyError(
                f"{pool_type} is invalid, please choose from "
                f"'avgpool', 'maxpool', 'gempool', 'avgmaxpool' and 'identity'."
            )

        # head
        in_feat = cfg.MODEL.HEADS.IN_FEAT
        num_classes = cfg.MODEL.HEADS.NUM_CLASSES
        # branch1
        self.b1 = nn.Sequential(copy.deepcopy(res_conv4),
                                copy.deepcopy(res_g_conv5))
        self.b1_pool = self._build_pool_reduce(pool_layer,
                                               bn_norm,
                                               num_splits,
                                               reduce_dim=in_feat)

        self.b1_head = build_reid_heads(cfg, in_feat, num_classes,
                                        nn.Identity())

        # branch2
        self.b2 = nn.Sequential(copy.deepcopy(res_conv4),
                                copy.deepcopy(res_p_conv5))
        self.b2_pool = self._build_pool_reduce(pool_layer,
                                               bn_norm,
                                               num_splits,
                                               reduce_dim=in_feat)
        self.b2_head = build_reid_heads(cfg, in_feat, num_classes,
                                        nn.Identity())

        self.b21_pool = self._build_pool_reduce(pool_layer,
                                                bn_norm,
                                                num_splits,
                                                reduce_dim=in_feat)
        self.b21_head = build_reid_heads(cfg, in_feat, num_classes,
                                         nn.Identity())

        self.b22_pool = self._build_pool_reduce(pool_layer,
                                                bn_norm,
                                                num_splits,
                                                reduce_dim=in_feat)
        self.b22_head = build_reid_heads(cfg, in_feat, num_classes,
                                         nn.Identity())

        # branch3
        self.b3 = nn.Sequential(copy.deepcopy(res_conv4),
                                copy.deepcopy(res_p_conv5))
        self.b3_pool = self._build_pool_reduce(pool_layer,
                                               bn_norm,
                                               num_splits,
                                               reduce_dim=in_feat)
        self.b3_head = build_reid_heads(cfg, in_feat, num_classes,
                                        nn.Identity())

        self.b31_pool = self._build_pool_reduce(pool_layer,
                                                bn_norm,
                                                num_splits,
                                                reduce_dim=in_feat)
        self.b31_head = build_reid_heads(cfg, in_feat, num_classes,
                                         nn.Identity())

        self.b32_pool = self._build_pool_reduce(pool_layer,
                                                bn_norm,
                                                num_splits,
                                                reduce_dim=in_feat)
        self.b32_head = build_reid_heads(cfg, in_feat, num_classes,
                                         nn.Identity())

        self.b33_pool = self._build_pool_reduce(pool_layer,
                                                bn_norm,
                                                num_splits,
                                                reduce_dim=in_feat)
        self.b33_head = build_reid_heads(cfg, in_feat, num_classes,
                                         nn.Identity())
예제 #3
0
    def __init__(self, cfg):
        super().__init__()
        self._cfg = cfg
        assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
        self.register_buffer(
            "pixel_mean",
            torch.tensor(cfg.MODEL.PIXEL_MEAN).view(1, -1, 1, 1))
        self.register_buffer(
            "pixel_std",
            torch.tensor(cfg.MODEL.PIXEL_STD).view(1, -1, 1, 1))

        # backbone
        self.teacher_net = build_backbone(cfg)
        self.student_net = build_backbone(cfg)
        self.D_Net = cam_Classifier(2048, 2).apply(weights_init_kaiming)
        if 'Dis_loss_cam' in self._cfg.MODEL.LOSSES.NAME:
            if "Hazy_DukeMTMC" in self._cfg.TDATASETS.NAMES:
                camid = int(8)
            elif "Hazy_Market1501" in self._cfg.TDATASETS.NAMES:
                camid = int(6)
            self.D_Net = CamClassifier(2048, camid)
        elif 'Dis_loss' in self._cfg.MODEL.LOSSES.NAME:
            if self._cfg.MODEL.PARAM.Dis_net == "cam_Classifier":
                self.D_Net = cam_Classifier(2048,
                                            2).apply(weights_init_kaiming)
            elif self._cfg.MODEL.PARAM.Dis_net == "cam_Classifier_1024":
                self.D_Net = cam_Classifier_1024(2048,
                                                 2).apply(weights_init_kaiming)
            elif self._cfg.MODEL.PARAM.Dis_net == "cam_Classifier_1024_nobias":
                self.D_Net = cam_Classifier_1024_nobias(
                    2048, 2).apply(weights_init_kaiming)
            elif self._cfg.MODEL.PARAM.Dis_net == "cam_Classifier_fc":
                self.D_Net = cam_Classifier_fc(2048,
                                               2).apply(weights_init_kaiming)
            elif self._cfg.MODEL.PARAM.Dis_net == "cam_Classifier_fc_nobias_in_last_layer":
                self.D_Net = cam_Classifier_fc_nobias_in_last_layer(
                    2048, 2).apply(weights_init_kaiming)

        self.D_Net = self.D_Net.to(torch.device(cfg.MODEL.DEVICE))
        self.CrossEntropy_loss = nn.CrossEntropyLoss().to(
            torch.device(cfg.MODEL.DEVICE))
        self.bn = nn.BatchNorm2d(2048)
        self.bn.bias.requires_grad_(False)
        self.bn.apply(weights_init_kaiming)

        # head
        pool_type = cfg.MODEL.HEADS.POOL_LAYER
        if pool_type == 'avgpool': pool_layer = FastGlobalAvgPool2d()
        elif pool_type == 'maxpool': pool_layer = nn.AdaptiveMaxPool2d(1)
        elif pool_type == 'gempool': pool_layer = GeneralizedMeanPoolingP()
        elif pool_type == "avgmaxpool": pool_layer = AdaptiveAvgMaxPool2d()
        elif pool_type == "identity": pool_layer = nn.Identity()
        else:
            raise KeyError(
                f"{pool_type} is invalid, please choose from "
                f"'avgpool', 'maxpool', 'gempool', 'avgmaxpool' and 'identity'."
            )

        in_feat = cfg.MODEL.HEADS.IN_FEAT
        num_classes = cfg.MODEL.HEADS.NUM_CLASSES
        self.teacher_heads = build_reid_heads(cfg, in_feat, num_classes,
                                              pool_layer)
        self.student_heads = build_reid_heads(cfg, in_feat, num_classes,
                                              pool_layer)