Exemplo n.º 1
0
    def forward(self, current, target, labels):
        current_global_feat = torch.nn.functional.normalize(current, dim=1, p=2)
        current_dist_mat = euclidean_dist(current_global_feat, current_global_feat)
        current_dist_ap, current_dist_an = hard_example_mining(current_dist_mat, labels)

        target_global_feat = torch.nn.functional.normalize(target, dim=1, p=2)
        target_dist_mat = euclidean_dist(target_global_feat, target_global_feat)
        target_dist_ap, target_dist_an = hard_example_mining(target_dist_mat, labels)

        max_ap, _ = torch.max(torch.stack((current_dist_ap, target_dist_ap)), 0)
        min_an, _ = torch.min(torch.stack((current_dist_an, target_dist_an)), 0)

        y = torch.zeros_like(max_ap).fill_(1)
        loss = self.ranking_loss(min_an, max_ap, y)
        return loss
Exemplo n.º 2
0
    def forward(self, global_feat, labels):
        global_feat = torch.nn.functional.normalize(global_feat, dim=1, p=2)
        dist_mat = euclidean_dist(global_feat, global_feat)
        dist_ap, dist_an = hard_example_mining(dist_mat, labels)
        # y = dist_an.new().resize_as_(dist_an).fill_(1)
        ################################################################################
        # Person re-identification by multi-channel parts-based CNN with improved triplet loss function.
        # loss = ap + (ap - an + mergin)+
        ################################################################################
        # zero = torch.zeros_like(dist_an)
        # ap_an_margin = dist_ap - dist_an + self.margin
        # ap_an_margin = torch.max(torch.stack((ap_an_margin, zero)), 0)
        # loss = (dist_ap + ap_an_margin[0]).mean()

        # If y = 1  then it assumed the first input should be ranked higher
        # (have a larger value) than the second input,
        # and vice-versa for y = -1.
        y = torch.zeros_like(dist_an).fill_(1)
        loss = self.ranking_loss(dist_an, dist_ap, y)

        if self.learning_weight:
            # TODO
            # loss = 0.5 * torch.exp(-self.uncertainty) * loss + self.uncertainty
            # loss = loss.squeeze(-1)
            pass

        # return loss, dist_ap, dist_an
        return loss
Exemplo n.º 3
0
    def forward(self, x, labels):
        """
        Args:
            x: feature matrix with shape (batch_size, feat_dim).
            labels: ground truth labels with shape (num_classes).
        """
        assert x.size(0) == labels.size(
            0), "features.size(0) is not equal to labels.size(0)"

        batch_size = x.size(0)
        dist_mat = euclidean_dist(x, self.centers)

        classes = torch.arange(self.num_classes).long()
        if self.centers.is_cuda:
            classes = classes.cuda()
        labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
        mask = labels.eq(classes.expand(batch_size, self.num_classes)).float()
        dist = dist_mat * mask
        loss = dist.sum() / batch_size

        if self.learning_weight:
            loss = 0.5 * torch.exp(-self.uncertainty) * loss + self.uncertainty
            loss = loss.squeeze(-1)

        return loss
Exemplo n.º 4
0
def compute_dist(feat, if_re_ranking):
    if if_re_ranking:
        dist_matrix = re_ranking(feat, feat, k1=20, k2=6, lambda_value=0.3)
        dist_matrix = torch.from_numpy(dist_matrix)
    else:
        dist_matrix = euclidean_dist(feat, feat)
    dist_matrix.clamp_(min=1e-12, max=1e+12)
    return dist_matrix
Exemplo n.º 5
0
def get_euclidean_indices(qf, gf):
    distmat = euclidean_dist(qf, gf).cpu().numpy()
    np.save('qf_dist.npy', euclidean_dist(qf, qf).cpu().numpy())
    return np.argsort(distmat, axis=1)
Exemplo n.º 6
0
def re_ranking(probFea, galFea, k1, k2, lambda_value, local_distmat=None, only_local=False):
    # if feature vector is numpy, you should use 'torch.tensor' transform it to tensor
    query_num = probFea.size(0)
    all_num = query_num + galFea.size(0)
    if only_local:
        original_dist = local_distmat
    else:
        feat = torch.cat([probFea, galFea])
        distmat = euclidean_dist(feat, feat)
        original_dist = distmat.cpu().numpy()
        del feat
        if not local_distmat is None:
            original_dist = original_dist + local_distmat
    gallery_num = original_dist.shape[0]
    original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
    V = np.zeros_like(original_dist).astype(np.float16)
    # initial_rank = np.argsort(original_dist).astype(np.int32)
    # top K1+1
    initial_rank = np.argpartition(original_dist, range(1, k1 + 1))

    logger.info('starting re_ranking')
    for i in range(all_num):
        # k-reciprocal neighbors
        forward_k_neigh_index = initial_rank[i, :k1 + 1]
        backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
        fi = np.where(backward_k_neigh_index == i)[0]
        k_reciprocal_index = forward_k_neigh_index[fi]
        k_reciprocal_expansion_index = k_reciprocal_index
        for j in range(len(k_reciprocal_index)):
            candidate = k_reciprocal_index[j]
            candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2)) + 1]
            candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
                                               :int(np.around(k1 / 2)) + 1]
            fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
            candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
            if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
                    candidate_k_reciprocal_index):
                k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)

        k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
        weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
        V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
    original_dist = original_dist[:query_num, ]
    if k2 != 1:
        V_qe = np.zeros_like(V, dtype=np.float16)
        for i in range(all_num):
            V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
        V = V_qe
        del V_qe
    del initial_rank
    invIndex = []
    for i in range(gallery_num):
        invIndex.append(np.where(V[:, i] != 0)[0])

    jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)

    for i in range(query_num):
        temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16)
        indNonZero = np.where(V[i, :] != 0)[0]
        indImages = [invIndex[ind] for ind in indNonZero]
        for j in range(len(indNonZero)):
            temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
                                                                               V[indImages[j], indNonZero[j]])
        jaccard_dist[i] = 1 - temp_min / (2 - temp_min)

    final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
    del original_dist
    del V
    del jaccard_dist
    final_dist = final_dist[:query_num, query_num:]
    return final_dist
Exemplo n.º 7
0
def torch_re_ranking(probFeat, galFeat, k1, k2, lambda_value, local_distmat=None, only_local=False):
    # if feature vector is numpy, you should use 'torch.tensor' transform it to tensor
    query_num = probFeat.size(0)
    all_num = query_num + galFeat.size(0)
    if only_local:
        original_dist = local_distmat
    else:
        feat = torch.cat([probFeat, galFeat])
        logger.info('using GPU to compute original distance')
        original_dist = euclidean_dist(feat, feat)
        del feat
        if local_distmat is not None:
            original_dist = original_dist + local_distmat

    original_num = original_dist.size(0)

    original_dist = original_dist / original_dist.max(axis=1)[0]
    V = torch.zeros_like(original_dist)
    print("argsort")
    initial_rank = original_dist.argsort()
    initial_rank = initial_rank[:, :max(k1 + 1, round(k1 / 2) + 1)]

    for i in range(all_num):
        # k-reciprocal neighbors
        forward_k_neigh_index = initial_rank[i, :k1 + 1]
        backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
        fi = torch.where(backward_k_neigh_index == i)[0]
        k_reciprocal_index = forward_k_neigh_index[fi]
        k_reciprocal_expansion_index = k_reciprocal_index
        for j in range(len(k_reciprocal_index)):
            candidate = k_reciprocal_index[j]
            candidate_forward_k_neigh_index = initial_rank[candidate, :int(round(k1 / 2) + 1)]
            candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index, :round(k1 / 2) + 1]
            fi_candidate = torch.where(candidate_backward_k_neigh_index == candidate)[0]
            candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]

            candidate_len = (candidate_k_reciprocal_index == k_reciprocal_index[
                                                             :candidate_k_reciprocal_index.size(0)]).sum()

            if candidate_len > 2 / 3 * candidate_k_reciprocal_index.size(0):
                k_reciprocal_expansion_index = torch.cat((k_reciprocal_expansion_index, candidate_k_reciprocal_index),
                                                         dim=0)

        k_reciprocal_expansion_index = torch.unique(k_reciprocal_expansion_index)
        weight = torch.exp(-original_dist[i, k_reciprocal_expansion_index])
        V[i, k_reciprocal_expansion_index] = weight / torch.sum(weight)
    original_dist = original_dist[:query_num, ]
    print(V)
    if k2 != 1:
        V_qe = torch.zeros_like(V)
        for i in range(all_num):
            V_qe[i, :] = torch.mean(V[initial_rank[i, :k2], :], dim=0)
        V = V_qe
        del V_qe
    del initial_rank
    invIndex = []
    for i in range(original_num):
        invIndex.append(torch.where(V[:, i] != 0)[0])

    jaccard_dist = torch.zeros_like(original_dist)

    for i in range(query_num):
        temp_min = torch.zeros([1, original_num])
        indNonZero = torch.where(V[i, :] != 0)[0]
        indImages = [invIndex[ind] for ind in indNonZero]
        for j in range(indNonZero.size(0)):
            temp = V[i, indNonZero[j]].expand_as(V[indImages[j], indNonZero[j]]).unsqueeze(dim=0)
            temp = torch.cat([temp, V[indImages[j], indNonZero[j]].unsqueeze(dim=0)])
            temp, _ = temp.min(dim=0, keepdim=True)
            temp_min[0, indImages[j]] += temp.squeeze()
        jaccard_dist[i] = 1 - temp_min / (2 - temp_min)

    final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
    del original_dist
    del V
    del jaccard_dist
    final_dist = final_dist[:query_num, query_num:]
    return final_dist