Exemple #1
0
    def _init_classifier(self):

        classifier = nn.Linear(
            len(self.dan_module_names) * self.output_dim,
            self.owner().num_classes)
        init_params(classifier)
        self.classifier = classifier
Exemple #2
0
    def _init_classifiers(self):

        self.classifiers = nn.ModuleList()

        for p in range(1, self.part_num + 1):
            classifier = nn.Linear(self.output_dim, self.num_classes)
            init_params(classifier)
            self.classifiers.append(classifier)
Exemple #3
0
    def __init__(self, backbone, args, num_classes, **kwargs):

        super().__init__(backbone, args, num_classes, **kwargs)

        from torchreid.utils.torchtools import init_params
        self.final_classifier = nn.Linear(
            args['cls_dim'],
            num_classes,
        )
        init_params(self.final_classifier)
Exemple #4
0
    def _init_reduction_layer(self):

        reduction = nn.Sequential(
            nn.Conv2d(self.input_dim,
                      self.output_dim,
                      kernel_size=1,
                      bias=False), nn.BatchNorm2d(self.output_dim),
            nn.ReLU(inplace=True))
        init_params(reduction)

        self.reduction = reduction
Exemple #5
0
    def _init_fc_layer(self):

        dropout_p = self.args['dropout']

        if dropout_p is not None:
            dropout_layer = [nn.Dropout(p=dropout_p)]
        else:
            dropout_layer = []

        fc = nn.Sequential(nn.Linear(self.input_dim, self.output_dim),
                           nn.BatchNorm1d(self.output_dim),
                           nn.ReLU(inplace=True), *dropout_layer)
        init_params(fc)

        return fc
Exemple #6
0
    def _init_attention_modules(self):

        args = self.args
        self.dan_module_names = set()
        DAN_module_names = {'cam', 'pam'} & set(args['dan_dan'])
        use_head = not args['dan_dan_no_head']
        self.use_dan = bool(DAN_module_names)

        before_module = get_attention_module_instance('identity',
                                                      self.output_dim,
                                                      use_head=False)
        self.dan_module_names.add('before_module')
        self.before_module = before_module
        if use_head:
            init_params(before_module)

        if 'cam' in DAN_module_names:
            cam_module = get_attention_module_instance('cam',
                                                       self.input_dim,
                                                       out_dim=self.output_dim,
                                                       use_head=use_head)
            init_params(cam_module)
            self.dan_module_names.add('cam_module')
            self.cam_module = cam_module

        if 'pam' in DAN_module_names:
            pam_module = get_attention_module_instance('pam',
                                                       self.input_dim,
                                                       out_dim=self.output_dim,
                                                       use_head=use_head)
            init_params(pam_module)
            self.dan_module_names.add('pam_module')
            self.pam_module = pam_module
Exemple #7
0
    def _init_attention_modules(self):

        args = self.args
        self.dan_module_names = set()
        DAN_module_names = {'cam', 'pam'} & set(args['abd_dan'])
        use_head = not args['abd_dan_no_head']
        self.use_dan = bool(DAN_module_names)

        before_module = get_attention_module_instance('identity',
                                                      self.output_dim,
                                                      use_head=use_head)
        self.dan_module_names.add('before_module')
        self.before_module = before_module
        if use_head:
            init_params(before_module)

        if 'cam' in DAN_module_names:
            cam_module = get_attention_module_instance('cam',
                                                       self.output_dim,
                                                       use_head=use_head)
            init_params(cam_module)
            self.dan_module_names.add('cam_module')
            self.cam_module = cam_module

        if 'pam' in DAN_module_names:
            pam_module = get_attention_module_instance('pam',
                                                       self.output_dim,
                                                       use_head=use_head)
            init_params(pam_module)
            self.dan_module_names.add('pam_module')
            self.pam_module = pam_module

        sum_conv = nn.Sequential(
            nn.Dropout2d(0.1, False),
            nn.Conv2d(self.output_dim, self.output_dim, kernel_size=1))
        init_params(sum_conv)
        self.sum_conv = sum_conv
Exemple #8
0
    def _init_classifier(self):

        classifier = nn.Linear(self.output_dim, self.num_classes)
        init_params(classifier)

        return classifier