def __init__(self, rpool, whiten=None, L=3, eps=1e-6): super(Rpool, self).__init__() self.rpool = rpool self.L = L self.whiten = whiten self.norm = L2N() self.eps = eps
def __init__(self, features, pool, whiten, meta): super(ImageRetrievalNet, self).__init__() self.features = nn.Sequential(*features) self.pool = pool self.whiten = whiten self.norm = L2N() self.meta = meta
def __init__(self, features, lwhiten, pool, whiten, meta): super(ImageRetrievalNet, self).__init__() self.features = nn.Sequential(*features) # hxq added # self.features = nn.DataParallel(self.features) self.lwhiten = lwhiten self.pool = pool self.whiten = whiten self.norm = L2N() self.meta = meta
def __init__( self, n_classes: int, model_name: str = 'resnet50', pretrained: bool = True, pooling_name: str = 'adaptive', # 'GeM', args_pooling: dict = {}, normalize: bool = True, use_fc: bool = False, fc_dim: int = 512, dropout: float = 0.0, loss_module: str = 'softmax'): super().__init__() self.backbone, final_in_features = self.get_backbone( model_name, pretrained, num_classes=fc_dim) if pooling_name in ('AdaptiveAvgPool2d', 'adaptive'): self.pooling = nn.AdaptiveAvgPool2d(1) elif pooling_name in ('MAC', 'SPoC', 'GeM', 'GeMmp', 'RMAC', 'Rpool'): self.pooling = getattr(cirtorch.pooling, pooling_name)(**args_pooling) elif pooling_name is None or pooling_name.lower() == 'identity': self.pooling = nn.Identity() else: raise ValueError("Incorrect pooling name") if normalize: self.norm = L2N() else: self.norm = None self.use_fc = use_fc if use_fc: self.final_block = nn.Sequential( OrderedDict([('bn1', nn.BatchNorm1d(final_in_features)), ('dropout', nn.Dropout(p=dropout)), ('fc2', nn.Linear(final_in_features, fc_dim)), ('bn2', nn.BatchNorm1d(fc_dim))])) self._init_params() final_in_features = fc_dim self.loss_module = loss_module if loss_module == 'arcface': self.final = ArcMarginProduct(final_in_features, n_classes) else: self.final = nn.Linear(final_in_features, n_classes)
def __init__(self, d_in, d_out): super(Whiten_layer, self).__init__() self.w = nn.Linear(d_in, d_out) self.norm = L2N()