def forward(self, x): x = self.backbone(x) dis_gen2cen, dis_gen2ori, thresholds, amplified_thresholds, embed_gen = None, None, None, None, None gap = (F.adaptive_avg_pool2d(x, 1)).view(x.size(0), -1) if hasattr(self,'decorrelation'): gap = self.decorrelation(gap) embed_fea = self.embeddingLayer(gap) if hasattr(self, 'embeddingLayer') else gap centroids = F.normalize(self.centroids, dim=1, p=2) if self.norm_centroid else self.centroids SIMI = Similarity(scaled=self.scaled) sim_fea2cen = getattr(SIMI, self.similarity)(embed_fea, centroids) DIST = Distance(scaled=self.scaled) dis_fea2cen = getattr(DIST, self.distance)(embed_fea, centroids) dis_cen2cen = getattr(DIST, self.distance)(centroids, centroids) dis_thr2thr = self.cal_thr2thr() # if hasattr(self, 'estimator'): # dis_gen2cen = getattr(DIST, self.distance)(embed_gen, centroids) # dis_gen2ori = getattr(DIST, self.distance)(embed_gen, self.origin) return { "gap": gap, "embed_fea": embed_fea, "embed_gen": embed_gen, "sim_fea2cen": sim_fea2cen, "dis_fea2cen": dis_fea2cen, "dis_cen2cen": dis_cen2cen, "dis_thr2thr": dis_thr2thr, "thresholds": self.thresholds }
def forward(self, x): x = self.backbone(x) gap = (F.adaptive_avg_pool2d(x, 1)).view(x.size(0), -1) # [n, backbone_c] embed_fea = gap # embed_fea = self.embeddingLayer(gap) # [n, embed_dim] norm_fea = torch.norm(embed_fea, dim=1, p=2, keepdim=True) # norm length for each image [n,1] embed_fea_normed = F.normalize(embed_fea, dim=1, p=2) # [n, embed_dim] centroids = self.centroids # [class, embed_dim] centroids_normed = F.normalize(centroids, dim=1, p=2) # [class, embed_dim] SIMI = Similarity() # dotproduct: X*W = ||X|| * ||W|| * cos(X,W) # [n,class] dotproduct_fea2cen = getattr(SIMI, "dotproduct")(embed_fea, centroids) # cosine: cos(X,W) # [n,class] # cosine_fea2cen = getattr(SIMI, "dotproduct")(embed_fea_normed, centroids_normed) # normweight: ||X|| * cos(X,W) # [n,class] normweight_fea2cen = getattr(SIMI, "dotproduct")(embed_fea, centroids_normed) energy = torch.logsumexp(normweight_fea2cen, dim=1, keepdim=False) # [n] return { # "gap": gap, # [n,self.feat_dim] gap extracted by backbone # "embed_fea": embed_fea, # [n,embed_dim] embedded features "norm_fea": norm_fea, # [n,1] the norm value of one images features, keepdim for post-processing # "dotproduct_fea2cen": dotproduct_fea2cen, # [n,num_classes] # "cosine_fea2cen": cosine_fea2cen, # [n,num_classes] "normweight_fea2cen": normweight_fea2cen, "energy": energy }
def forward(self, x): x = self.backbone(x) dis_gen2cen, dis_gen2ori, thresholds, amplified_thresholds, embed_gen = None, None, None, None, None gap = (F.adaptive_avg_pool2d(x, 1)).view(x.size(0), -1) if hasattr(self, 'thresholds'): thresholds = self.thresholds gen = whitennoise_generator(self.estimator, gap) embed_gen = self.embeddingLayer(gen) if hasattr(self, 'embeddingLayer') else gen amplified_thresholds = self.thresholds * self.amplifier embed_fea = self.embeddingLayer(gap) if hasattr(self, 'embeddingLayer') else gap centroids = F.normalize(self.centroids, dim=1, p=2) if self.norm_centroid else self.centroids SIMI = Similarity(scaled=self.scaled) sim_fea2cen = getattr(SIMI, self.similarity)(embed_fea, centroids) DIST = Distance(scaled=self.scaled) dis_fea2cen = getattr(DIST, self.distance)(embed_fea, centroids) if hasattr(self, 'thresholds'): dis_gen2cen = getattr(DIST, self.distance)(embed_gen, centroids) dis_gen2ori = getattr(DIST, self.distance)(embed_gen, self.origin) return { "gap": gap, "embed_fea": embed_fea, "embed_gen": embed_gen, "sim_fea2cen": sim_fea2cen, "dis_fea2cen": dis_fea2cen, "dis_gen2cen": dis_gen2cen, "dis_gen2ori": dis_gen2ori, "amplified_thresholds": amplified_thresholds, "thresholds": thresholds }
def forward(self, input): x = self.backbone(input) gap = (F.adaptive_avg_pool2d(x, 1)).view(x.size(0), -1) embed_fea = self.embeddingLayer(gap) if hasattr( self, 'embeddingLayer') else gap gen = self.generator(input) embed_gen = self.embeddingLayer(gen) if hasattr( self, 'embeddingLayer') else gen centroids = F.normalize(self.centroids, dim=1, p=2) if self.norm_centroid else self.centroids SIMI = Similarity(scaled=self.scaled) sim_fea2cen = getattr(SIMI, self.similarity)(embed_fea, centroids) DIST = Distance(scaled=self.scaled) dis_fea2cen = getattr(DIST, self.distance)(embed_fea, centroids) dis_gen2cen = getattr(DIST, self.distance)(embed_gen, centroids) dis_gen2ori = getattr(DIST, self.distance)(embed_gen, self.origin) thresholds = None if hasattr(self, 'thresholds'): thresholds = self.thresholds return { "gap": gap, "embed_fea": embed_fea, "embed_gen": embed_gen, "sim_fea2cen": sim_fea2cen, "dis_fea2cen": dis_fea2cen, "dis_gen2cen": dis_gen2cen, "dis_gen2ori": dis_gen2ori, "thresholds": thresholds }
def forward(self, x): x = self.backbone(x) gap = (F.adaptive_avg_pool2d(x, 1)).view(x.size(0), -1) embed_fea = self.embeddingLayer(gap) norm_fea = torch.norm(embed_fea, dim=1, p=2, keepdim=True) embed_fea_normed = F.normalize(embed_fea, dim=1, p=2) centroids = self.centroids centroids_normed = F.normalize(centroids, dim=1, p=2) SIMI = Similarity() # dotproduct: X*W = ||X|| * ||W|| * cos(X,W) ignoring transposition operation dotproduct_fea2cen = getattr(SIMI, "dotproduct")(embed_fea, centroids) # cosine: cos(X,W) ignoring transposition operation cosine_fea2cen = getattr(SIMI, "dotproduct")(embed_fea_normed, centroids_normed) # normweight: ||X|| * cos(X,W) normweight_fea2cen = getattr(SIMI, "dotproduct")(embed_fea, centroids_normed) return { "gap": gap, # [n,self.feat_dim] gap extracted by backbone "embed_fea": embed_fea, # [n,embed_dim] embedded features "norm_fea": norm_fea, # [n,1] the norm value of one images features, keepdim for post-processing "dotproduct_fea2cen": dotproduct_fea2cen, # [n,num_classes] "cosine_fea2cen": cosine_fea2cen, # [n,num_classes] "normweight_fea2cen": normweight_fea2cen }
def forward(self, x): x = self.backbone(x) dis_gen2cen, dis_gen2ori, thresholds, amplified_thresholds, embed_gen = None, None, None, None, None gap = x if hasattr(self, 'estimator'): thresholds = self.thresholds gen = self.estimator.sampler(gap) embed_gen = self.embeddingLayer(gen) if hasattr(self, 'embeddingLayer') else gen embed_fea = self.embeddingLayer(gap) if hasattr(self, 'embeddingLayer') else gap embed_fea_2d = (self.fuse*embed_fea).sum(dim=2, keepdim=False).squeeze(dim=-1) centroids = F.normalize(self.centroids, dim=1, p=2) if self.norm_centroid else self.centroids SIMI = Similarity(scaled=self.scaled) sim_fea2cen = getattr(SIMI, self.similarity)(embed_fea_2d, centroids) DIST = Distance(scaled=self.scaled) dis_fea2cen = getattr(DIST, self.distance)(embed_fea_2d, centroids) if hasattr(self, 'estimator'): dis_gen2cen = getattr(DIST, self.distance)(embed_gen, centroids) dis_gen2ori = getattr(DIST, self.distance)(embed_gen, self.origin) return { "gap": gap, "embed_fea": embed_fea, "embed_gen": embed_gen, "sim_fea2cen": sim_fea2cen, "dis_fea2cen": dis_fea2cen, "dis_gen2cen": dis_gen2cen, "dis_gen2ori": dis_gen2ori, "thresholds": thresholds }
def forward(self, x): x = self.backbone(x) gap = (F.adaptive_avg_pool2d(x, 1)).view(x.size(0), -1) embed_fea = self.embeddingLayer(gap) if hasattr( self, 'embeddingLayer') else gap SIMI = Similarity(scaled=self.scaled) centroids = F.normalize(self.centroids, dim=1, p=2) if self.norm_centroid else self.centroids sim_fea2cen = getattr(SIMI, self.distance)(embed_fea, centroids) return {"gap": x, "embed_fea": embed_fea, "sim_fea2cen": sim_fea2cen}
def forward(self, x): x = self.backbone(x) gap = (F.adaptive_avg_pool2d(x, 1)).view(x.size(0), -1) embed_fea = self.embeddingLayer(gap) embed_fea_norm = F.normalize(embed_fea, dim=1, p=2) centroids = self.centroids centroids_norm = F.normalize(centroids, dim=1, p=2) SIMI = Similarity() dotproduct_fea2cen = getattr(SIMI, "dotproduct")(embed_fea, centroids) # cosine distance equals l2-normalized dotproduct distance. For efficient computation. cosine_fea2cen = getattr(SIMI, "dotproduct")(embed_fea_norm, centroids_norm) # # # re-range to distance defination, [0,1], smaller indicates more similar. # cosine_fea2cen = (1.0-cosine_fea2cen)/2.0 return { "gap": gap, # [n,self.feat_dim] "embed_fea": embed_fea, # [n,embed_dim] "dotproduct_fea2cen": dotproduct_fea2cen, # [n,num_classes] "cosine_fea2cen": cosine_fea2cen # [n,num_classes] }
def forward(self, x): x = self.backbone(x) gap = (F.adaptive_avg_pool2d(x, 1)).view(x.size(0), -1) embed_fea = self.embeddingLayer(gap) embed_fea_norm = F.normalize(embed_fea, dim=1, p=2) centroids = self.centroids centroids_norm = F.normalize(centroids, dim=1, p=2) SIMI = Similarity() dotproduct_fea2cen = getattr(SIMI, "dotproduct")(embed_fea, centroids) cosine_fea2cen = getattr(SIMI, "dotproduct")(embed_fea_norm, centroids_norm) normweight_fea2cen = getattr(SIMI, "dotproduct")(embed_fea, centroids_norm) return { "gap": gap, # [n,self.feat_dim] "embed_fea": embed_fea, # [n,embed_dim] "dotproduct_fea2cen": dotproduct_fea2cen, # [n,num_classes] "cosine_fea2cen": cosine_fea2cen, # [n,num_classes] "normweight_fea2cen": normweight_fea2cen }
# sim_dis = SimDistance(dim_data) # sim_cor = SimCorrelation(dim_data) # sim_mu = SimMutual(dim_data) def calc_and_output(sim): # labels_predict = k_means(sim, 6) model = AgglomerativeClustering(n_clusters=8, affinity='euclidean') labels_predict = model.fit(sim).labels_ # labels_predict = knn_model.predict(np.max(sim) - sim) print('ARI:', ARI(labels, labels_predict)) return labels_predict, ARI(labels, labels_predict) sim_data = Similarity(dim_data, alpha=0.6, beta=0.3) pred, ari = calc_and_output(sim_data) rel = RelevanceMatrix(pred) print(len(rel[rel > 1e-10])) sim_next = sim_data l0 = len(sim_next[sim_next > 1e-10]) while l0 != 0: print('l0:', l0) # print(sim_next, rel) sim_next = 0.01 * sim_next + 0.99 * rel pred, ari = calc_and_output(sim_next) rel = get_normalize(RelevanceMatrix(pred)) l0 = len(sim_next[sim_next > 1e-5]) # calc_and_output(sim_dis) # calc_and_output(sim_cor)