def pdist(self, fX):
        """Compute pdist à-la scipy.spatial.distance.pdist

        Parameters
        ----------
        fX : (n, d) torch.Tensor
            Embeddings.

        Returns
        -------
        distances : (n * (n-1) / 2,) torch.Tensor
            Condensed pairwise distance matrix
        """

        n_sequences, _ = fX.size()
        distances = []

        for i in range(n_sequences - 1):

            if self.metric in ('cosine', 'angular'):
                d = 1. - F.cosine_similarity(
                    fX[i, :].expand(n_sequences - 1 - i, -1),
                    fX[i+1:, :], dim=1, eps=1e-8)

                if self.metric == 'angular':
                    d = torch.acos(torch.clamp(1. - d, -1 + 1e-6, 1 - 1e-6))

            elif self.metric == 'euclidean':
                d = F.pairwise_distance(
                    fX[i, :].expand(n_sequences - 1 - i, -1),
                    fX[i+1:, :], p=2, eps=1e-06).view(-1)

            distances.append(d)

        return torch.cat(distances)
Example #2
0
    def validate(self,val_loader,criterion):
        batch_time=AverageMeter()
        top1=AverageMeter()
        val_loss=AverageMeter()
        
        #switch to evaluate mode
        self.model.eval()
        end=time.time()
        for i,valdata in enumerate(val_loader,0):
            batch_time.update(i)
            input_var=list()
            [img0,img1,label]=valdata
            opts.cuda=torch.cuda.is_available()
            img0,img1,label=Variable(img0,volatile=True),Variable(img1,volatile=True),Variable(label,volatile=True)
            if opts.cuda:
                img0,img1,label=img0.cuda(),img1.cuda(),label.cuda()
            output1,output2=self.model(img0,img1)
            #use euclidean distance of two image
            euclidean_distance =F.pairwise_distance(output1,output2)#
                       
            output=euclidean_distance.cpu().data.numpy()
            target=label.cpu().data.numpy()
            loss_=self.criterion(output1,output2,label)
            val_loss.update(loss_)
            res=Accuracy(output,target)
            top1.update(res)
        print('validate :abs(output_euclidean_distance-label) => {} '.format(top1.avg[0]))

        return top1.avg[0]
Example #3
0
    def __init__(self, dset, args):
        super(LabelEmbedPlus, self).__init__(dset, args)
        if 'conv' in args.image_extractor:
            self.image_embedder = torch.nn.Sequential(
                torch.nn.Conv2d(dset.feat_dim, args.emb_dim, 7),
                torch.nn.ReLU(True), Reshape(-1, args.emb_dim))
        else:
            self.image_embedder = MLP(dset.feat_dim, args.emb_dim)

        self.compare_metric = lambda img_feats, pair_embed: -F.pairwise_distance(
            img_feats, pair_embed)
        self.train_forward = self.train_forward_triplet
        self.val_forward = self.val_forward_distance_fast

        input_dim = dset.feat_dim if args.clf_init else args.emb_dim
        self.attr_embedder = nn.Embedding(len(dset.attrs), input_dim)
        self.obj_embedder = nn.Embedding(len(dset.objs), input_dim)
        self.T = MLP(2 * input_dim, args.emb_dim, num_layers=args.nlayers)

        # init with word embeddings
        if args.emb_init:
            pretrained_weight = load_word_embeddings(args.emb_init, dset.attrs)
            self.attr_embedder.weight.data.copy_(pretrained_weight)
            pretrained_weight = load_word_embeddings(args.emb_init, dset.objs)
            self.obj_embedder.weight.data.copy_(pretrained_weight)

        # init with classifier weights
        elif args.clf_init:
            for idx, attr in enumerate(dset.attrs):
                at_id = dset.attrs.index(attr)
                weight = torch.load('%s/svm/attr_%d' %
                                    (args.data_dir, at_id)).coef_.squeeze()
                self.attr_embedder.weight[idx].data.copy_(
                    torch.from_numpy(weight))
            for idx, obj in enumerate(dset.objs):
                obj_id = dset.objs.index(obj)
                weight = torch.load('%s/svm/obj_%d' %
                                    (args.data_dir, obj_id)).coef_.squeeze()
                self.obj_emb.weight[idx].data.copy_(torch.from_numpy(weight))

        # static inputs
        if args.static_inp:
            for param in self.attr_embedder.parameters():
                param.requires_grad = False
            for param in self.obj_embedder.parameters():
                param.requires_grad = False
Example #4
0
def triplet_evaluate(device, model, train_batch, test_batch):
    train_batch = train_batch.to(device)
    test_batch = test_batch.to(device)
    assert not model.training
    with torch.no_grad():
        train_embeds, *_ = model(train_batch)
        test_embeds, *_ = model(test_batch)

    dist_matrix = torch.stack([
        F.pairwise_distance(train_embeds, test_embeds[i], p=2)
        for i in range(test_embeds.size(0))
    ])
    preds = torch.argmin(dist_matrix, dim=1)
    labels = torch.tensor(range(train_batch.size(0)), device=device)
    corrects = torch.sum(preds == labels).item()
    acc = corrects / train_batch.size(0)
    return 1 - acc
Example #5
0
    def forward(self, feature_l, feature_r, label):
        if self.opt_style == 'resample':
            #feature contrain
            E_w_f = F.pairwise_distance(feature_l,
                                        feature_r,
                                        p=1,
                                        keepdim=True)

            Q = self.margin
            e = 2.71828
            iden_contrastive = label * (2 / Q) * E_w_f**2 + (
                1 - label) * 2 * Q * e**(-2.77 * E_w_f / Q)

            total_loss = iden_contrastive.mean()
            return total_loss
        else:
            raise Exception(f'Unknown opt style: {self.opt_style}')
Example #6
0
def perceptual_features_reconstruction(list_attentions_a, list_attentions_b, factor=1.):
    loss = 0.

    for i, (a, b) in enumerate(zip(list_attentions_a, list_attentions_b)):
        bs, c, w, h = a.shape

        # a of shape (b, c, w, h) to (b, c * w * h)
        a = a.view(bs, -1)
        b = b.view(bs, -1)

        a = F.normalize(a, p=2, dim=-1)
        b = F.normalize(b, p=2, dim=-1)

        layer_loss = (F.pairwise_distance(a, b, p=2)**2) / (c * w * h)
        loss += torch.mean(layer_loss)

    return factor * (loss / len(list_attentions_a))
Example #7
0
    def _algo_2_vert_comp(self, sent1_block_a, sent2_block_a):
        comparison_feats = []
        ws_no_inf = [w for w in self.filter_widths if not np.isinf(w)]
        for ws1 in self.filter_widths:
            x1 = sent1_block_a[ws1]['max']
            for ws2 in self.filter_widths:
                x2 = sent2_block_a[ws2]['max']
                if (not np.isinf(ws1)
                        and not np.isinf(ws2)) or (np.isinf(ws1)
                                                   and np.isinf(ws2)):
                    comparison_feats.append(
                        F.cosine_similarity(x1, x2).unsqueeze(1))
                    comparison_feats.append(
                        F.pairwise_distance(x1, x2).unsqueeze(1))
                    comparison_feats.append(torch.abs(x1 - x2))

        return torch.cat(comparison_feats, dim=1)
Example #8
0
def get_accuracy(test, threshold, model):
    correct = 0
    for i in range(len(test)):
        img0 = test_data[test[i][0]].unsqueeze(0)
        img1 = test_data[test[i][1]].unsqueeze(0)
        true_label = test[i][2]
        img0, img1 = img0.cuda(), img1.cuda()

        output_f0, output_f1 = model(img0, img1)

        euclidean_distance = F.pairwise_distance(output_f0, output_f1)

        if euclidean_distance > threshold and true_label == -1:
            correct += 1
        if euclidean_distance <= threshold and true_label == 1:
            correct += 1
    return correct / len(test)
Example #9
0
 def forward(self, x, target_z):
     z, logp = x[:2]
     d = F.pairwise_distance(
         z,
         target_z,
         p=self.norm,
         eps=1e-6,
         keepdim=False,  # Default value
     )
     if len(d.shape) == 2:
         d = torch.mean(d, dim=1)  # Drop 1 dimension
     per_ex = self.weight * d - logp
     retval = torch.mean(per_ex)
     if retval != retval:
         raise ValueError("NaN")
     return retval
     return torch.mean(d)
def evaluate(dataiter, net, split, device):
    for i in range(2):
        x0, _, _ = next(dataiter)
        for j in range(10):
            _, x1, _ = next(dataiter)
            concatenated = torch.cat((x0, x1), 0)
            output1, output2 = net(
                Variable(x0).to(device),
                Variable(x1).to(device))
            euclidean_distance = F.pairwise_distance(output1, output2)
            #imshow(torchvision.utils.make_grid(concatenated),'%s, dissimilarity:%.2f'%(split, euclidean_distance.item()))
            imshow(
                torchvision.utils.make_grid(concatenated),
                '%s, dissimilarity:%.2f' %
                (split, torch.mean(euclidean_distance)))
            plt.savefig('%s_%d_%d.png' % (split, i, j))
            plt.close()
Example #11
0
    def test_inference_hmm_posterior_importance_sampling(self):
        observation = self._observation
        posterior_mean_correct = self._posterior_mean_correct

        posterior = self._model.posterior_distribution(samples,
                                                       observation=observation)
        posterior_mean_unweighted = posterior.mean_unweighted
        posterior_mean = posterior.mean

        l2_distance = float(
            F.pairwise_distance(posterior_mean, posterior_mean_correct).sum())

        util.debug('samples', 'posterior_mean_unweighted', 'posterior_mean',
                   'posterior_mean_correct', 'l2_distance')
        add_perf_score_importance_sampling(l2_distance)

        self.assertLess(l2_distance, 6)
Example #12
0
def compute(a_feats, b_feats):
    cmc = np.zeros(100, )
    for i in xrange(a_feats.size()[0]):
        query_feat = a_feats[i].view(1, -1)
        query_feat = query_feat.expand(a_feats.size())
        dis = F.pairwise_distance(query_feat, b_feats, 2)

        dis, order = torch.sort(dis, 0)
        order = order.numpy().reshape(-1, )

        a = np.where(order == i)[0][0]

        for j in xrange(a, 100):
            cmc[j] += 1

    #print 'cmc[0]',cmc[0]*1.0/100
    return cmc[0] * 1.0 / 100
Example #13
0
def contrastive_loss(anchor_features, simense_features, flags):
    total_loss = 0
    for i in range(4):
        _feature1 = anchor_features[i]
        _feature1 = torch.div(_feature1, torch.norm(_feature1, 2))
        _feature1 = torch.unsqueeze(_feature1, 0)

        _feature2 = simense_features[i]
        _feature2 = torch.div(_feature2, torch.norm(_feature1, 2))
        _feature2 = torch.unsqueeze(_feature2, 0)
        dis = F.pairwise_distance(_feature1, _feature2, p=2)
        if flags[i] == 1:
            total_loss += dis
        else:
            total_loss += torch.clamp(1.5 - dis, 0)

    return total_loss
Example #14
0
    def construct_graph_object(self):
        """
        Constructs the entire Graph object to serve as input to the MPN, and stores it in self.graph_obj,
        """
        # Load Appearance Data
        reid_embeddings, node_feats = self._load_appearance_data()

        # Determine graph connectivity (i.e. edges) and compute edge features
        edge_ixs = self._get_edge_ixs(reid_embeddings)
        edge_feats_dict = compute_edge_feats_dict(
            edge_ixs=edge_ixs,
            det_df=self.graph_df,
            fps=self.seq_info_dict['fps'],
            use_cuda=self.inference_mode)
        edge_feats = [
            edge_feats_dict[feat_names]
            for feat_names in self.dataset_params['edge_feats_to_use']
            if feat_names in edge_feats_dict
        ]
        edge_feats = torch.stack(edge_feats).T

        # Compute embeddings distances. Pairwise distance computation might create out of memmory errors, hence we batch it
        emb_dists = []
        for i in range(0, edge_ixs[0].shape[0], 50000):
            emb_dists.append(
                F.pairwise_distance(
                    reid_embeddings[edge_ixs[0][i:i + 50000]],
                    reid_embeddings[edge_ixs[1][i:i + 50000]]).view(-1, 1))
        emb_dists = torch.cat(emb_dists, dim=0)

        # Add embedding distances to edge features if needed
        if 'emb_dist' in self.dataset_params['edge_feats_to_use']:
            edge_feats = torch.cat((edge_feats, emb_dists), dim=1)

        self.graph_obj = Graph(
            x=node_feats,
            edge_attr=torch.cat((edge_feats, edge_feats), dim=0),
            edge_index=torch.cat(
                (edge_ixs, torch.stack((edge_ixs[1], edge_ixs[0]))), dim=1))

        if self.inference_mode:
            self.graph_obj.reid_emb_dists = torch.cat((emb_dists, emb_dists))

        self.graph_obj.to(
            torch.device("cuda" if torch.cuda.is_available()
                         and self.inference_mode else "cpu"))
    def forward(self, output1, id1, output2, id2, flag):
        _, inx1 = torch.sort(id1)
        out1 = output1[inx1]

        _, inx2 = torch.sort(id2)
        out2 = output2[inx2]

        euclidean_distance = F.pairwise_distance(out1, out2)

        loss_contrastive = torch.mean(
            (1 - flag) *
            torch.pow(torch.clamp(euclidean_distance - 0, min=0.0), 2) +
            (flag) *
            torch.pow(torch.clamp(self.margin -
                                  euclidean_distance, min=0.0), 2))

        return loss_contrastive
Example #16
0
 def kNN(self, n, input, target):
     dict = {}
     tmp = input.shape[1]
     length = len(target)
     for i in range(length):
         if n != i and target[n] == target[i]:
             dist = func.pairwise_distance(input[n].view(tmp, -1),
                                           input[i].view(tmp, -1)).sum()
             dict[i] = dist
     dict = sorted(dict.items(), key=lambda item: item[1])
     nums = []
     for i in range(len(dict)):
         if i < self.k:
             nums.append(dict[i][0])
         else:
             return nums
     return nums
Example #17
0
    def forward(self, output1, output2, label):
        '''
        Args:
            output1: 图片1(经过网路层的)
            output2: 图片2(经过网路层的)
            label: 表示是否为同一个类别,1 表示他们属于不同的类别, 反之为0
        Returns: 返回损失

        '''
        euclidean_distance = F.pairwise_distance(output1, output2)  # 计算距离
        # 计算损失:当为同一类别时,我们希望距离小,那么计算出的损失小;当不为同一个类别的时候,我们希望距离大,距离小
        loss_contrastive = torch.mean(
            (1 - label) * torch.pow(euclidean_distance, 2) + (label) *
            torch.pow(torch.clamp(self.margin -
                                  euclidean_distance, min=0.0), 2))

        return loss_contrastive
Example #18
0
    def forward(self, x, label=None):

        out_anchor = torch.mean(x[:, 1:, :], 1)
        out_positive = x[:, 0, :]
        stepsize = out_anchor.size()[0]

        output = -1 * (F.pairwise_distance(
            out_positive.unsqueeze(-1).expand(-1, -1, stepsize),
            out_anchor.unsqueeze(-1).expand(-1, -1, stepsize).transpose(0, 2))
                       **2)
        label = torch.from_numpy(numpy.asarray(range(0, stepsize))).cuda()
        nloss = self.criterion(output, label)
        prec1, _ = accuracy(output.detach().cpu(),
                            label.detach().cpu(),
                            topk=(1, 5))

        return nloss, prec1
Example #19
0
    def get_embedding_score(self, support_xf_ori, query_xf_ori, n_way,
                            query_sz):

        # sum up samples with support
        k_shot = int(support_xf_ori.size(0) / n_way)

        if self.use_relation_net:
            ch_sz, spatial_sz = support_xf_ori.size(1), support_xf_ori.size(2)
            # support_xf_ori: 25/5, 256, 5, 5
            # query_xf_ori: 75, 256, 5, 5
            # first expand
            support_xf_ori = support_xf_ori.unsqueeze(0).expand(
                query_sz, -1, -1, -1,
                -1).contiguous().view(query_sz * n_way * k_shot, ch_sz,
                                      spatial_sz, spatial_sz)
            query_xf_ori = query_xf_ori.unsqueeze(1).expand(
                -1, n_way * k_shot, -1, -1,
                -1).contiguous().view(query_sz * n_way * k_shot, ch_sz,
                                      spatial_sz, spatial_sz)
            embed_combine = torch.stack([support_xf_ori, query_xf_ori],
                                        dim=1).view(query_sz * n_way * k_shot,
                                                    -1, spatial_sz, spatial_sz)
            _out = self.relation2(self.relation1(embed_combine))

            _out = _out.view(_out.size(0), -1)
            score = self.fc(_out).view(query_sz, n_way, k_shot)

        else:
            support_xf = support_xf_ori.view(
                support_xf_ori.size(0),
                -1)  # size: 25/5 (support_sz/n_way) x feat_dim
            query_xf = query_xf_ori.view(
                query_xf_ori.size(0), -1)  # size: 75 (query_size) x feat_dim
            feat_dim = support_xf.size(-1)
            support_xf = support_xf.unsqueeze(0).expand(query_sz, -1,
                                                        -1).contiguous().view(
                                                            -1, feat_dim)
            query_xf = query_xf.unsqueeze(1).expand(-1, n_way * k_shot,
                                                    -1).contiguous().view(
                                                        -1, feat_dim)
            score = -F.pairwise_distance(support_xf, query_xf, p=2)
            score = score.view(query_sz, n_way, k_shot)

        # sum up here
        score = torch.sum(score, dim=2, keepdim=False)
        return score
Example #20
0
def triple_loss(args,targets, output,identity, eucledian):
    """
    output: [N,3,100,100] tensor
    identity: tuple of 2x [N,512-args.num_dims,1,1 ] tensors
    eucledian: tuple of 2x [N,args.num_dims,1,1 ] tensors


    """
    #Extract arguments

    x_eucledian=eucledian[0] # [N,args.num_dims,1,1 ]

    x_eucledian=x_eucledian.view(-1,x_eucledian.size(1)) #collapse to 2d  [N,args.num_dims]

    y_eucledian=eucledian[1]  #[N,args.num_dims,1,1 ]

    y_eucledian=y_eucledian.view(-1,y_eucledian.size(1)) #collapse to 2d  [N,args.num_dims]

    x_identity=identity[0] #[N,512-args.num_dims,1,1 ]

    x_identity=x_identity.view(-1,x_identity.size(1)) #[N,512-args.num_dims]

    y_identity=identity[1] #[N,512-args.num_dims,1,1 ]

    y_identity=y_identity.view(-1,y_identity.size(1)) #[N,512-args.num_dims]

    #1 Reconstriction Loss
    recon_loss=reconstruction_loss(args,output,targets)

    #2 Eucledian space loss

    eucledian_loss=EucledianVectorLoss(type=args.loss_type)

    rotation_loss=eucledian_loss(x_eucledian,y_eucledian)

    #3 Idenity loss (L2 distance)

    identity_loss=F.pairwise_distance(x_identity,y_identity, p=2)
    
    identity_loss=identity_loss.mean()
 
    total_loss=args.alpha*rotation_loss + args.gamma * identity_loss + (1-args.alpha-args.gamma)*recon_loss


    return (total_loss,rotation_loss,identity_loss,recon_loss)
def vector():
    dataloader_pre_post = DataLoader(dataset_pre_post,
                                     shuffle=True,
                                     num_workers=1,
                                     batch_size=1)

    data_iter = iter(dataloader_pre_post)

    while True:

        anchor_tuple, positive_tuple, _, anchor, positive, negative = next(
            data_iter)

        anchor_in, positive_in, negative_in = Variable(anchor).cuda(
        ), Variable(positive).cuda(), Variable(negative).cuda()
        (anchor_output, positive_output,
         _) = net_pre_post(anchor_in, positive_in, negative_in)

        same_distance = F.pairwise_distance(anchor_output, positive_output)

        anchor_output = anchor_output.data.cpu().numpy()[0]
        anchor_output = (np.array2string(anchor_output,
                                         precision=3,
                                         separator='\t,\t',
                                         suppress_small=True))

        positive_output = positive_output.data.cpu().numpy()[0]
        positive_output = (np.array2string(positive_output,
                                           precision=3,
                                           separator='\t,\t',
                                           suppress_small=True))

        same_distance = str(same_distance.data.cpu().numpy()[0][0])
        same_distance = same_distance[:4]
        # output = str(anchor_output[:3]) + " ...." + str(anchor_output[-3:-1])
        time.sleep(3)

        socketio.emit(
            'vector', {
                'img_a': anchor_tuple,
                'img_p': positive_tuple,
                'vector_a': str(anchor_output),
                'vector_p': str(positive_output),
                'distance': same_distance
            })
Example #22
0
 def forward_multi(self, x1, x2):
     ys = []
     for i, x_s in enumerate([x1, x2]):
         if i == 0:
             x_mt = self.activation(self.fc1_left(x_s))
         else:
             x_mt = self.activation(self.fc1_right(x_s))
         x_mt = self.activation(self.fc3(x_mt))
         x_mt = self.activation(self.fc4(x_mt))
         ys.append(x_mt)
     if self.dist == 'linear':
         y = torch.cat((ys), 1)
         y = self.mt_activation(self.fc5(y))
     elif self.dist == 'euclidean':
         y = torch.exp(-F.pairwise_distance(ys[0], ys[1]))
     elif self.dist == 'cosine':
         y = torch.exp(-F.cosine_similarity(ys[0], ys[1]))
     return (y)
def get_index(gf, query, qf):
    if opts.use_siamese:
        qf = qf.unsqueeze_(0).repeat(len(gf), 1)
        distance = F.pairwise_distance(qf, gf, keepdim=True)
        distance = distance.squeeze(1).cpu()
        distance = distance.numpy()
        # predict index
        index = np.argsort(distance)  # from small to large
    else:
        score = torch.mm(gf, query)
        score = score.squeeze(1).cpu()
        score = score.numpy()
        # predict index
        index = np.argsort(score)  # from small to large
        index = index[::-1]
        # index = index[0:2000]
        # good index
    return index
Example #24
0
def test(model, loader, criterion, test_losses, write_pred, out_file):
    with torch.no_grad():
        model.eval() 
        N = 0
        test_loss = 0.0
        for i, (x1, x2, targets) in enumerate(loader):
            output1, output2 = model(x1,x2) 
            N += x1.shape[0] 
            test_loss += x1.shape[0] * criterion(output1, output2 , targets).item() #problem here
            test_losses.append(test_loss/N)
            eudist = F.pairwise_distance(output1, output2, keepdim = True)
            auc_results = auc_scores(targets, eudist[:,0])
            target_log = targets.numpy()
            pred_log = eudist[:,0].numpy()
            assert len(target_log) == len(pred_log)
            if write_pred is True: 
                np.savetxt(out_file, np.c_[target_log, pred_log], fmt='\t'.join(['%i'] + ['%1.15f']))
        return test_loss/N, test_losses, auc_results
Example #25
0
    def get_loss(self, logits, gt_labels):
        cls_logits, pixel_loss, batch_center_vecs = logits
        gt = gt_labels.long()
        loss_cls = F.cross_entropy(cls_logits, gt)

        batch_size = gt_labels.size(0)
        unique_gt_labels = gt_labels.view(int(batch_size / 2), 2)[:, 0]
        # aux_loss_cls = F.cross_entropy(aux_logits, unique_gt_labels.long())

        aux_loss_cls = F.pairwise_distance(
            self.center_feat_bank[unique_gt_labels].cuda().detach(),
            batch_center_vecs, 2)
        self.update_center_vec(gt_labels, batch_center_vecs.detach())

        loss = loss_cls + self.loss_local_factor * pixel_loss.mean(
        ) + self.loss_global_factor * aux_loss_cls.mean()
        return loss, loss_cls, self.loss_local_factor * pixel_loss.mean(
        ), self.loss_global_factor * aux_loss_cls.mean()
Example #26
0
def Test(net):
    folder_dataset_test = dset.ImageFolder(root=Config.testing_dir)
    siamese_dataset = SiameseNetworkDataset(
        imageFolderDataset=folder_dataset_test, should_invert=False)
    test_dataloader = DataLoader(siamese_dataset,
                                 num_workers=0,
                                 batch_size=1,
                                 shuffle=True)
    dataiter = iter(test_dataloader)
    x0, _, _ = next(dataiter)

    for i in range(10):
        _, x1, _ = next(dataiter)
        concatenated = torch.cat((x0, x1), 0)
        output1, output2 = net(Variable(x0).cuda(), Variable(x1).cuda())
        euclidean_distance = F.pairwise_distance(output1, output2)
        imshow(torchvision.utils.make_grid(concatenated),
               'Dissimilarity: {:.2f}'.format(euclidean_distance.item()))
Example #27
0
    def forward(self, output1, output2, output_class, siamese_label,
                class_label):
        euclidean_distance = F.pairwise_distance(output1, output2, 2)
        label = siamese_label.view(siamese_label.size()[0])

        loss_same = label * torch.pow(euclidean_distance, 2)
        loss_diff = (1 - label) * torch.pow(
            torch.clamp(self.margin - euclidean_distance, min=0.0), 2)
        # print("same{}\t\tdiff{}".format(torch.mean(loss_same), torch.mean(loss_diff)))

        loss_contrastive = torch.mean(loss_same + loss_diff)
        loss_classify = self.loss_function(output_class, class_label)

        print("loss_classify{}\t\tloss_contrastive{}".format(
            loss_classify, loss_contrastive))

        loss = loss_contrastive + loss_classify
        return loss
Example #28
0
 def cal_dis(x, y, num_category=20, all=False, n_frames=8):
     ave_dis = []
     for s in range(num_category):
         idx = []
         for i in range(x.shape[0]):
             category = info[str((i // n_frames if all else i) + start)]
             #category = int(info['video' + str(i + start)])
             value = category * 0.05
             if s != -1 and category != s:
                 continue
             idx.append(i)
         cluster = torch.stack(
             [torch.from_numpy(x[idx]),
              torch.from_numpy(y[idx])], dim=1)
         center = cluster.mean(0).unsqueeze(0)
         ave_dis.append(F.pairwise_distance(center, cluster, p=2).view(-1))
     ave_dis = torch.cat(ave_dis, 0).mean(0)
     print(ave_dis)
Example #29
0
def is_goal_unreachable(a_net,
                        state,
                        goal,
                        goal_dim,
                        margin,
                        device,
                        absolute_goal=False):
    state = torch.from_numpy(state[:goal_dim]).float().to(device)
    goal = torch.from_numpy(goal).float().to(device)
    if not absolute_goal:
        goal = state + goal
    inputs = torch.stack((state, goal), dim=0)
    outputs = a_net(inputs)
    s_embedding = outputs[0]
    g_embedding = outputs[1]
    dist = F.pairwise_distance(s_embedding.unsqueeze(0),
                               g_embedding.unsqueeze(0)).squeeze()
    return dist > margin
Example #30
0
def test():
    model.load_state_dict(torch.load('./checkpoints/2.pth')['model'])
    dataset = dset.ImageFolder(cfg.training_dir)
    siamese_dataset = FaceData(dataset,
                               transform=transforms.Compose([
                                   transforms.Resize((100, 100)),
                                   transforms.ToTensor()
                               ]))
    train_loader = DataLoader(siamese_dataset, batch_size=1, shuffle=True)
    for sample in train_loader:
        img0, img1, label = sample
        output1, output2 = model(img0, img1)
        euclidean_distance = F.pairwise_distance(output1,
                                                 output2,
                                                 keepdim=True)
        print(euclidean_distance, label)
        concatenated = torch.cat((sample[0], sample[1]), 0)
        imshow(torchvision.utils.make_grid(concatenated))
Example #31
0
def api(face_im):
    face_input = torch.unsqueeze(img_transform(face_im), dim=0)
    model.load_state_dict(
        torch.load(r'E:\code\FaceRecognition-tensorflow\checkpoints\2.pth')
        ['model'])
    dataset = dset.ImageFolder(cfg.training_dir)
    clss = dataset.class_to_idx.keys()
    for cls in clss:
        print(cls)
        cls_dir = os.path.join(cfg.training_dir, cls)
        for im in os.listdir(cls_dir):
            cls_im = Image.open(os.path.join(cls_dir, im))
            cls_input = torch.unsqueeze(img_transform(cls_im), dim=0)
            output1, output2 = model(face_input, cls_input)
            euclidean_distance = F.pairwise_distance(output1,
                                                     output2,
                                                     keepdim=True)
            print(int(euclidean_distance.detach()))
Example #32
0
    def forward(self, output1, output2, label):
        # Euclidean distance of two output feature vectors
        euclidean_distance = F.pairwise_distance(output1, output2)
        euclidean_distance = torch.pow(euclidean_distance, 2)

        # Normalization of the distance
        normalized_distance = 2 * (1 / (1 + torch.exp(-euclidean_distance)) - 0.5)

        # Contrastive Loss Function
        # # perform contrastive loss calculation with the distance
        # loss_contrastive = torch.mean((1 - label) * torch.pow(euclidean_distance, 2) +
        #                               (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))

        # Normalized Double-Margin Contrastive Loss Function
        loss = 0.5 * torch.mean(label * torch.clamp(normalized_distance - self.margin_positive, min=0.0) + (1 - label) *
                                torch.clamp(self.margin_negative - normalized_distance, min=0.0))

        return loss, normalized_distance
Example #33
0
def deepcompare_siam2stream_l2(input, params):
    def single(patch):
        return F.normalize(streams(patch, params))
    return - F.pairwise_distance(*map(single, input.split(1, dim=1)))
Example #34
0
    def forward(self,output1,output2,label):
        euclidean_distance=F.pairwise_distance(output1,output2)
        loss_contrastive=torch.mean((1-label)*torch.pow(euclidean_distance,2)+label*torch.pow(torch.clamp(self.margin-euclidean_distance,min=0.0),2))

        return loss_contrastive