コード例 #1
0
def generate_first_predict_xml(normalize_flag=False,
                               re_ranking_flag=False,
                               contain_top_n=None):
    print('generate_predict_xml(normalize_flag=%s, contain_top_n=%s)' %
          (normalize_flag, contain_top_n))
    g = np.load('result/test_features/predict_gallery_features.npy')
    p = np.load('result/test_features/predict_probe_features.npy')
    if normalize_flag:
        g = normalize(g)
        p = normalize(p)
    g_names_list = []
    g_names_order_list = []
    with open('../data/predict_gallery_name.csv', "r") as f:
        for name_order in f.readlines():
            name_order = name_order.strip('\n')
            g_names_list.append(int(name_order.split(',')[0]))
            g_names_order_list.append(float(name_order.split(',')[1]))
    g_names = np.array(g_names_list)
    g_names_order = np.array(g_names_order_list)
    p_names_list = []
    p_names_order_list = []
    with open('../data/predict_probe_name.csv', "r") as f:
        for name_order in f.readlines():
            name_order = name_order.strip('\n')
            p_names_list.append(int(name_order.split(',')[0]))
            p_names_order_list.append(float(name_order.split(',')[1]))
    p_names = np.array(p_names_list)
    p_names_order = np.array(p_names_order_list)
    if False in (g_names_order == np.array(range(58061))):
        print('g_names_order error')
        return
    if False in (p_names_order == np.array(range(4480))):
        print('p_names_order error')
        return
    print('start compute distance')
    distmat = compute_distmat(g, p)
    if re_ranking_flag:
        re_ranking(distmat.transpose(),
                   p,
                   g_names,
                   g,
                   k1=20,
                   lambda_=0.3,
                   top_n=100)
    if contain_top_n is not None:
        distmat = pick_top(distmat, contain_top_n=contain_top_n)
    print('start sort')
    sort_g_names_top_n = sorted_image_names(distmat, g_names, top_n=200)
    print('start create xml')
    create_xml(p_names, sort_g_names_top_n[:, :200],
               '../data/predict_result.xml')
コード例 #2
0
ファイル: evaluate_rerank.py プロジェクト: wuyangfeng/VRID
def rerank_main(query_f, query_cam, query_label, gallery_f, gallery_cam,
                gallery_label):
    CMC = torch.IntTensor(len(gallery_label)).zero_()
    ap = 0.0
    query_f = query_f.numpy()
    gallery_f = gallery_f.numpy()
    #re-ranking
    print('calculate initial distance')
    # IPython.embed()
    q_g_dist = np.dot(query_f, np.transpose(gallery_f))
    q_q_dist = np.dot(query_f, np.transpose(query_f))
    g_g_dist = np.dot(gallery_f, np.transpose(gallery_f))
    since = time.time()
    # print(q_g_dist,'\n',q_q_dist, '\n', g_g_dist)
    re_rank = re_ranking(q_g_dist, q_q_dist, g_g_dist)
    time_elapsed = time.time() - since
    print('Reranking complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    for i in range(len(query_label)):
        ap_tmp, CMC_tmp = evaluate(re_rank[i, :], query_label[i], query_cam[i],
                                   gallery_label, gallery_cam)
        if CMC_tmp[0] == -1:
            continue
        CMC = CMC + CMC_tmp
        ap += ap_tmp
        #print(i, CMC_tmp[0])

    CMC = CMC.float()
    CMC = CMC / len(query_label)  #average CMC
    mAP = ap / len(query_label)
    return CMC, mAP
    print('top1:%f top5:%f top10:%f mAP:%f' %
          (CMC[0], CMC[4], CMC[9], ap / len(query_label)))
コード例 #3
0
def calacc(featDict,
           probeDict,
           galleryDict,
           norm_flag=False,
           rerank=False,
           top_num=[1, 5, 10, 20, 30]):
    probeLabel, probeFeat = dict2feature(featDict, probeDict)
    galleryLabel, galleryFeat = dict2feature(featDict, galleryDict)
    if norm_flag:
        probeFeat = pre.normalize(probeFeat, axis=1)
        galleryFeat = pre.normalize(galleryFeat, axis=1)
    if rerank:
        dist = re_ranking(probeFeat,
                          galleryFeat,
                          k1=10,
                          k2=6,
                          lambda_value=0.3)
    else:
        dist = cdist(probeFeat, galleryFeat)
    index = []
    for i in range(len(dist)):
        a = dist[i]
        ind = np.where(galleryLabel == probeLabel[i])
        dp = a[ind]
        a.sort()
        index.append(list(a).index(dp))

    index = np.array(index)
    cmc = lambda top, index: len(np.where(index < top)[0]) / len(probeLabel)
    cmc_curve = [cmc(top, index) for top in top_num]
    return cmc_curve
コード例 #4
0
def valid_mAP(normalize_flag=False, re_ranking_flag=False, contain_top_n=None):
    print('valid_mAP(normalize_flag=%s, contain_top_n=%s)' %
          (normalize_flag, contain_top_n))
    min_step = 100000000
    max_step = 0
    for root, dirs, files in os.walk(
            os.path.abspath('./result/test_features')):
        for name in files:
            if 'valid_probe_features_step-' in name:
                step = int(name.split('.')[0].split('-')[-1])
                if step > max_step:
                    max_step = step
                if step < min_step:
                    min_step = step
    map2_all = []
    # valid mAP
    for step in range(min_step, max_step + 1, 5000):
        g = np.load('result/test_features/valid_gallery_features_step-%d.npy' %
                    step)
        print('step: %s, g_feature abs mean : %s' % (step, np.mean(np.abs(g))))
        g_labels = np.load('result/test_features/valid_gallery_labels.npy')
        p = np.load('result/test_features/valid_probe_features_step-%d.npy' %
                    step)
        p_labels = np.load('result/test_features/valid_probe_labels.npy')
        if normalize_flag:
            g = normalize(g)
            p = normalize(p)
        distmat = compute_distmat(g, p)
        if re_ranking_flag:
            re_ranking(distmat.transpose(),
                       p,
                       np.array(range(g.shape[0])),
                       g,
                       k1=20,
                       lambda_=0.3,
                       top_n=100)
        if contain_top_n is not None:
            distmat = pick_top(distmat, contain_top_n=contain_top_n)
        map1, map2 = mAP(distmat,
                         glabels=g_labels,
                         plabels=p_labels,
                         top_n=200)
        map2_all.append(map2)
        print('step: %d, map: %f, %f ' % (step, map1, map2))
    plt.plot(range(min_step, max_step + 1, 5000), map2_all)
    plt.show()
def evaluate_reranking(qf,
                       q_pids,
                       q_camids,
                       gf,
                       g_pids,
                       g_camids,
                       ranks,
                       dis_type="cosine"):
    m, n = qf.size(0), gf.size(0)

    if dis_type == "cosine":
        qf = qf / (qf**2).sum(dim=1, keepdim=True).sqrt()
        gf = gf / (gf**2).sum(dim=1, keepdim=True).sqrt()
        q_g_dist = 1 - torch.matmul(qf, gf.t())
        np.save("with_talw", q_g_dist.cpu().numpy())
        q_q_dist = 1 - torch.matmul(qf, qf.t())
        g_g_dist = 1 - torch.matmul(gf, gf.t())

    else:
        q_g_dist = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                   torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        q_g_dist.addmm_(1, -2, qf, gf.t())
        q_q_dist = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
                   torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
        q_q_dist.addmm_(1, -2, qf, qf.t())
        g_g_dist = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
                   torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
        g_g_dist.addmm_(1, -2, gf, gf.t())

    q_g_dist = q_g_dist.cpu().numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(q_g_dist, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    q_q_dist = q_q_dist.cpu().numpy()
    g_g_dist = g_g_dist.cpu().numpy()
    rerank_dis = re_ranking(q_g_dist, q_q_dist, g_g_dist)

    print("Computing rerank CMC and mAP")
    rerank_cmc, rerank_mAP = evaluate(rerank_dis, q_pids, g_pids, q_camids,
                                      g_camids)

    print("rerank Results ----------")
    print("mAP: {:.1%}".format(rerank_mAP))
    print("CMC curve")
    for r in ranks:
        print("rerank Rank-{:<3}: {:.1%}".format(r, rerank_cmc[r - 1]))
    print("------------------")
    return cmc + [mAP]
コード例 #6
0
def test(query_feature,
         query_label,
         gallery_feature,
         gallery_label,
         method='cosine'):
    D = pairwise_distances(gallery_feature,
                           query_feature,
                           metric=method,
                           n_jobs=-2)
    query_label, gallery_label = _re_assign_labels(query_label, gallery_label)
    gallery_labels_set = np.unique(gallery_label)

    if opt.re_rank:
        q_g_dist = np.dot(query_feature, np.transpose(gallery_feature))
        q_q_dist = np.dot(query_feature, np.transpose(query_feature))
        g_g_dist = np.dot(gallery_feature, np.transpose(gallery_feature))

    for label in query_label:
        if label not in gallery_labels_set:
            print('Probe-id is out of Gallery-id sets.')

    Times = 100
    k = 110

    res = np.zeros(k)

    gallery_labels_map = [[] for i in range(gallery_labels_set.size)]
    for i, g in enumerate(gallery_label):
        gallery_labels_map[g].append(i)

    for __ in range(Times):
        # Randomly select one gallery sample per label selected
        newD = np.zeros((gallery_labels_set.size, query_label.size))
        print(newD.shape)
        for i, g in enumerate(gallery_labels_set):
            j = np.random.choice(gallery_labels_map[g])
            newD[i, :] = D[j, :]
        # Compute CMC
        print(newD.shape)

        res += _cmc_core(newD, gallery_labels_set, query_label, k)

    if opt.re_rank:
        newD = re_ranking(q_g_dist, q_q_dist, g_g_dist)
        newD = np.transpose(newD)
        res += _cmc_core(newD, gallery_labels_set, query_label, k)
    res /= Times
    return res
コード例 #7
0
def calculate_result_rerank(gallery_feature,
                            gallery_label,
                            gallery_cam,
                            query_feature,
                            query_label,
                            query_cam,
                            result_file,
                            k1=100,
                            k2=15,
                            lambda_value=0):
    query_feature = torch.FloatTensor(query_feature).cuda()
    gallery_feature = torch.FloatTensor(gallery_feature).cuda()
    CMC = torch.IntTensor(len(gallery_label)).zero_()

    ap = 0.0
    print('calculate initial distance')
    since = time.time()
    q_g_dist = torch.mm(query_feature, torch.transpose(gallery_feature, 0, 1))
    q_q_dist = torch.mm(query_feature, torch.transpose(query_feature, 0, 1))
    g_g_dist = torch.mm(gallery_feature,
                        torch.transpose(gallery_feature, 0, 1))

    # to cpu
    q_g_dist, q_q_dist, g_g_dist = q_g_dist.cpu().numpy(), q_q_dist.cpu(
    ).numpy(), g_g_dist.cpu().numpy()
    re_rank = re_ranking(q_g_dist, q_q_dist, g_g_dist, k1, k2, lambda_value)
    time_elapsed = time.time() - since
    print('Reranking complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    for i in range(len(query_label)):
        ap_tmp, CMC_tmp = evaluate(re_rank[i, :], query_label[i], query_cam[i],
                                   gallery_label, gallery_cam)
        if CMC_tmp[0] == -1:
            continue
        CMC = CMC + CMC_tmp
        ap += ap_tmp
    CMC = CMC.float()
    CMC = CMC / len(query_label)  #average CMC
    str_result = 're-ranking Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f\n' % (
        CMC[0], CMC[4], CMC[9], ap / len(query_label))
    print(k1, k2, lambda_value, str_result)
    text_file = open(result_file, "a")
    text_file.write(str_result)
    text_file.close()
コード例 #8
0
def eva_rerank(opt):
    result = scipy.io.loadmat('pytorch_result_%s.mat' % opt.name)
    query_feature = result['query_f']
    query_cam = result['query_cam'][0]
    query_label = result['query_label'][0]
    gallery_feature = result['gallery_f']
    gallery_cam = result['gallery_cam'][0]
    gallery_label = result['gallery_label'][0]

    CMC = torch.IntTensor(len(gallery_label)).zero_()
    ap = 0.0
    #re-ranking
    print('calculate initial distance')
    q_g_dist = np.dot(query_feature, np.transpose(gallery_feature))
    q_q_dist = np.dot(query_feature, np.transpose(query_feature))
    g_g_dist = np.dot(gallery_feature, np.transpose(gallery_feature))
    since = time.time()
    re_rank = re_ranking(q_g_dist, q_q_dist, g_g_dist)
    time_elapsed = time.time() - since
    print('Reranking complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    for i in range(len(query_label)):
        ap_tmp, CMC_tmp = evaluate(re_rank[i, :], query_label[i], query_cam[i],
                                   gallery_label, gallery_cam)
        if CMC_tmp[0] == -1:
            continue
        CMC = CMC + CMC_tmp
        ap += ap_tmp
        #print(i, CMC_tmp[0])

    CMC = CMC.float()
    CMC = CMC / len(query_label)  #average CMC
    print('top1:%f top5:%f top10:%f mAP:%f' %
          (CMC[0], CMC[4], CMC[9], ap / len(query_label)))
    file_name = './evaluateResult.txt'
    with open(file_name, 'a+') as f:
        f.write('\nname: %s rerank results\n' % opt.name)
        f.write('train dataset: %s\n' % opt.data_dir)
        f.write('test dataset: %s\n' % opt.test_dir)
        f.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
        f.write('\nmulti Rank@1:%f\nRank@5:%f\nRank@10:%f \nmAP:%f\n\n' %
                (CMC[0], CMC[4], CMC[9], ap / len(query_label)))
        f.close()
コード例 #9
0
    def evaluation_metrics(self):

        self.q_features = l2_norm_standardize(self.q_features)
        self.g_features = l2_norm_standardize(self.g_features)

        # Scores against all feature vectors in the gallery image for each
        # image in the query data.
        scores = joint_scores(self.q_features, self.q_cam_ids, self.q_frames,
                              self.g_features, self.g_cam_ids, self.g_frames,
                              self.trainer.datamodule.st_distribution)

        if self.rerank:
            scores = re_ranking(scores)

        # Metrics & Evaluation
        mean_ap, cmc = mAP(scores, self.q_targets, self.q_cam_ids,
                           self.g_targets, self.g_cam_ids)

        return mean_ap, cmc
コード例 #10
0
ファイル: trainer.py プロジェクト: lkf59553/reid_mgn-dgnet
    def test(self):
        epoch = self.scheduler.last_epoch + 1
        self.ckpt.write_log('\n[INFO] Test:')
        self.model.eval()

        self.ckpt.add_log(torch.zeros(1, 5))
        qf = self.extract_feature(self.query_loader).numpy()
        gf = self.extract_feature(self.test_loader).numpy()

        if self.args.re_rank:
            q_g_dist = np.dot(qf, np.transpose(gf))
            q_q_dist = np.dot(qf, np.transpose(qf))
            g_g_dist = np.dot(gf, np.transpose(gf))
            dist = re_ranking(q_g_dist, q_q_dist, g_g_dist)
        else:
            dist = cdist(qf, gf)
        r = cmc(dist,
                self.queryset.ids,
                self.testset.ids,
                self.queryset.cameras,
                self.testset.cameras,
                separate_camera_set=False,
                single_gallery_shot=False,
                first_match_break=True)
        m_ap = mean_ap(dist, self.queryset.ids, self.testset.ids,
                       self.queryset.cameras, self.testset.cameras)

        self.ckpt.log[-1, 0] = m_ap
        self.ckpt.log[-1, 1] = r[0]
        self.ckpt.log[-1, 2] = r[2]
        self.ckpt.log[-1, 3] = r[4]
        self.ckpt.log[-1, 4] = r[9]
        best = self.ckpt.log.max(0)
        self.ckpt.write_log(
            '[INFO] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} (Best: {:.4f} @epoch {})'
            .format(m_ap, r[0], r[2], r[4], r[9], best[0][0],
                    (best[1][0] + 1) * self.args.test_every))
        if not self.args.test_only:
            self.ckpt.save(self,
                           epoch,
                           is_best=((best[1][0] + 1) *
                                    self.args.test_every == epoch))
コード例 #11
0
def evaluate_reranking(qf, q_pids, q_camids, gf, g_pids, g_camids, ranks):
    m, n = qf.size(0), gf.size(0)

    q_g_dist = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
               torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    q_g_dist.addmm_(1, -2, qf, gf.t())
    q_q_dist = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
               torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
    q_q_dist.addmm_(1, -2, qf, qf.t())
    g_g_dist = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
               torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
    g_g_dist.addmm_(1, -2, gf, gf.t())

    q_g_dist = q_g_dist.cpu().numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(q_g_dist, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    q_q_dist = q_q_dist.cpu().numpy()
    g_g_dist = g_g_dist.cpu().numpy()
    rerank_dis = re_ranking(q_g_dist, q_q_dist, g_g_dist)

    print("Computing rerank CMC and mAP")
    rerank_cmc, rerank_mAP = evaluate(rerank_dis, q_pids, g_pids, q_camids,
                                      g_camids)

    print("rerank Results ----------")
    print("mAP: {:.1%}".format(rerank_mAP))
    print("CMC curve")
    for r in ranks:
        print("rerank Rank-{:<3}: {:.1%}".format(r, rerank_cmc[r - 1]))
    print("------------------")
    return cmc + [mAP]
コード例 #12
0
query_feature = result['query_f']
query_cam = result['query_cam'][0]
query_label = result['query_label'][0]
gallery_feature = result['gallery_f']
gallery_cam = result['gallery_cam'][0]
gallery_label = result['gallery_label'][0]

CMC = torch.IntTensor(len(gallery_label)).zero_()
ap = 0.0
#re-ranking
print('calculate initial distance')
q_g_dist = np.dot(query_feature, np.transpose(gallery_feature))
q_q_dist = np.dot(query_feature, np.transpose(query_feature))
g_g_dist = np.dot(gallery_feature, np.transpose(gallery_feature))
since = time.time()
re_rank = re_ranking(q_g_dist, q_q_dist, g_g_dist)
time_elapsed = time.time() - since
print('Reranking complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,
                                                     time_elapsed % 60))
for i in range(len(query_label)):
    ap_tmp, CMC_tmp = evaluate(re_rank[i, :], query_label[i], query_cam[i],
                               gallery_label, gallery_cam)
    if CMC_tmp[0] == -1:
        continue
    CMC = CMC + CMC_tmp
    ap += ap_tmp
    #print(i, CMC_tmp[0])

CMC = CMC.float()
CMC = CMC / len(query_label)  #average CMC
print('top1:%f top5:%f top10:%f mAP:%f' %
コード例 #13
0
mat_path = 'E:\\graduation thesis\\code modification\\triplet-reid-pytorch-master-BagofTricks(BNNeck + CenterLoss + Smoothing Research)\\model\\all_scores.mat'
all_scores = scipy.io.loadmat(mat_path)  #important
all_dist = all_scores['all_scores']
print('all_dist shape:', all_dist.shape)
print('query_cam shape:', query_cam.shape)

CMC = torch.IntTensor(len(gallery_label)).zero_()
ap = 0.0
#re-ranking
print('calculate initial distance')
# q_g_dist = np.dot(query_feature, np.transpose(gallery_feature))
# q_q_dist = np.dot(query_feature, np.transpose(query_feature))
# g_g_dist = np.dot(gallery_feature, np.transpose(gallery_feature))

since = time.time()
re_rank = re_ranking(len(query_cam), all_dist)
time_elapsed = time.time() - since
print('Reranking complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,
                                                     time_elapsed % 60))
for i in range(len(query_label)):
    ap_tmp, CMC_tmp = evaluate(re_rank[i, :], query_label[i], query_cam[i],
                               gallery_label, gallery_cam)
    if CMC_tmp[0] == -1:
        continue
    CMC = CMC + CMC_tmp
    ap += ap_tmp
    #print(i, CMC_tmp[0])

CMC = CMC.float()
CMC = CMC / len(query_label)  #average CMC
print('top1:%f top5:%f top10:%f mAP:%f' %
コード例 #14
0
def main():

    cfg = Config()

    # The images which will be visualized
    query_disp_choice = [33]

    dist_save_path = "distance_matrix.p"
    query_save_path = "query_features.p"
    gallery_save_path = "gallery_features.p"

    print("Starting Evaluation")

    #ReIDModel change version for different model eval
    model = ReIDModel(version=cfg.model)

    test_dataset, query_dataset = None, None

    # Load test and query datasets
    if cfg.dataset == 'Market1501':

        dataset_dir = 'data/Market-1501-v15.09.15'

        query_dataset = Market1501Dataset(dataset_dir + "/query",
                                          model.transform, model.preprocessor)

        test_dataset = Market1501Dataset(dataset_dir + "/bounding_box_test",
                                         model.transform, model.preprocessor)
    elif cfg.dataset == 'Cuhk03':
        dataset_dir = 'data/cuhk03-np/labeled'

        query_dataset = Cuhk03Dataset(dataset_dir + "/query", model.transform,
                                      model.preprocessor)
        test_dataset = Cuhk03Dataset(dataset_dir + "/bounding_box_test",
                                     model.transform, model.preprocessor)
    else:
        raise ValueError("Unknown dataset name")

    query_loader = DataLoader(query_dataset,
                              batch_size=16,
                              num_workers=4,
                              pin_memory=True,
                              shuffle=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=16,
                             num_workers=4,
                             pin_memory=True,
                             shuffle=False)

    print("Retreiving features for query...")
    query_features, query_pid, query_cam, query_path = generate_features(
        model, query_loader, query_save_path, cfg.use_save)
    print("Done.")

    print("Retreiving features for gallery")
    test_features, test_pid, test_cam, test_path = generate_features(
        model, test_loader, gallery_save_path, cfg.use_save)
    print("Done.")

    print("Calculating distances")

    #Euclidean distance between each query feature vector and each gallery feature vector
    if dist_save_path is not None and os.path.exists(
            dist_save_path) and cfg.use_save == True:
        print("Loading distance matrix from save: " + dist_save_path)
        q_g_distances, q_q_distances, g_g_distances = pickle.load(
            open(dist_save_path, "rb"))
    else:
        print(
            "Save not detected or --use_save is False. Calculauting new distance matrix."
        )
        q_g_distances = distance.cdist(query_features, test_features,
                                       'euclidean')
        q_q_distances = distance.cdist(query_features, query_features,
                                       'euclidean')
        g_g_distances = distance.cdist(test_features, test_features,
                                       'euclidean')
        if dist_save_path is not None:
            print("Saving distances...")
            pickle.dump((q_g_distances, q_q_distances, g_g_distances),
                        open(dist_save_path, "wb"))
        else:
            print("Could not save features as path was not specified.")

    avg_precision = np.zeros(query_features.shape[0])
    r1 = []

    sorted_ind = np.argsort(q_g_distances, axis=1)

    if cfg.rerank == True:
        print("Re-Ranking...")
        re_ranked_q_g_distances = re_ranking(q_g_distances, q_q_distances,
                                             g_g_distances)
        print("Done.")

    else:
        re_ranked_q_g_distances = q_g_distances

    sorted_ind = np.argsort(re_ranked_q_g_distances, axis=1)
    print(re_ranked_q_g_distances[0][sorted_ind[0]])
    print(re_ranked_q_g_distances[1][sorted_ind[1]])
    print(re_ranked_q_g_distances[2][sorted_ind[2]])

    for k in range(0, re_ranked_q_g_distances.shape[0]):

        # junk images with pid == -1 after sorting
        junk_images_pid = np.where(np.array(test_pid)[sorted_ind[k]] == -1)[0]
        junk_images_cam = np.where(
            np.array(test_cam)[sorted_ind[k]] == query_cam[k])[0]
        junk_images = np.concatenate((junk_images_pid, junk_images_cam))
        good_images = np.delete(np.arange(len(test_pid)), junk_images)

        if np.array(test_pid)[sorted_ind[k]][good_images][0] == query_pid[k]:
            r1.append(1)
        else:
            r1.append(0)

        binary_labels = np.array(test_pid)[
            sorted_ind[k]][good_images] == query_pid[k]
        avg_precision[k] = calculate_ap(binary_labels)

        if k in query_disp_choice:
            result_paths = np.array(
                test_path)[sorted_ind][k][good_images][0:20]
            result_pid = np.array(test_pid)[sorted_ind][k][good_images][0:20]
            result_cam = np.array(test_cam)[sorted_ind][k][good_images][0:20]
            result_dist = re_ranked_q_g_distances[k][
                sorted_ind[k]][good_images][0:20]
            query_disp = query_pid[k], query_path[k], query_cam[k]
            display(query_disp,
                    (result_paths, result_pid, result_cam, result_dist),
                    dataset_dir)

    # Calculate average precision for each query image

    print("mAP:" + str(round(np.mean(avg_precision), 4) * 100) + "%" +
          " rank 1: " + str(round(np.mean(np.array(r1)), 4) * 100) + "%")
    print(avg_precision)
コード例 #15
0
    if q_cam[i] != -1:
        junk_index = np.argwhere(q_cam == q_cam[i])
        index = np.setdiff1d(junk_index, good_index)
        q_q_dist[i, index] = q_q_dist[i, index] - 0.3
    else:
        # same direction in 6,7,8,9
        junk_index = np.argwhere(q_q_direction_sim[i,:] >= 0.25)
        index = np.setdiff1d(junk_index, good_index)
        q_q_dist[i,index] = q_q_dist[i,index] - 0.1
    if q_cam_cluster[i] != -1:
        junk_index = np.argwhere(q_cam_cluster == q_cam_cluster[i])
        index = np.setdiff1d(junk_index, good_index)
        q_q_dist[i, index] = q_q_dist[i, index] - 0.2

if not os.path.isfile('rerank_score.mat'):
    score_total = re_ranking(q_g_dist, q_q_dist, g_g_dist, k1 = opt.k1, k2 = opt.k2, lambda_value=opt.lam)
    score = {'score_total':score_total}
    scipy.io.savemat('rerank_score.mat', score)
else: 
    score = scipy.io.loadmat('rerank_score.mat')
    score_total = score['score_total']
for i in range(nq):
    if q_cam[i] !=-1:
        ignore_index = np.argwhere(g_cam==q_cam[i])
        score_total[i,ignore_index] = score_total[i,ignore_index] + 0.3
    else:
        # same direction in 6,7,8,9
        ignore_index = np.argwhere(q_g_direction_sim[i,:] >= 0.25)
        #ignore_index2 = np.argwhere(g_cam == -1)
        #ignore_index = np.intersect1d(ignore_index1, ignore_index2)
        score_total[i,ignore_index] = score_total[i,ignore_index] + 0.1