Exemple #1
0
def evaluate(qf, ql, qc, gf, gl, gc):
    """
    Evaluation
    """
    query = qf.view(-1, 1)
    score = torch.mm(gf, query)
    score = score.squeeze(1).cpu()
    score = score.numpy()

    # prediction index
    index = np.argsort(score)
    index = index[::-1]

    query_index = np.argwhere(gl == ql)
    camera_index = np.argwhere(gc == qc)

    good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
    junk_index1 = np.argwhere(gl == -1)
    junk_index2 = np.intersect1d(query_index, camera_index)
    junk_index = np.append(junk_index2, junk_index1)  #.flatten())

    ap_tmp, CMC_tmp = compute_map(index, good_index, junk_index)
    return ap_tmp, CMC_tmp
Exemple #2
0
            fine_val_loader = load_dataloader(bs, fine_val_dataset)
            fine_nb = len(fine_val_loader)
            for i, fine_val in tqdm.tqdm(enumerate(fine_val_loader),
                                         total=fine_nb):
                fine_results += make_results(fine_model, fine_val, device)

        if len(coarse_dataset.tolist()) > 0:
            coarse_val_dataset = load_dataset(coarse_dataset, fine_tr, bs)
            coarse_val_loader = load_dataloader(bs, coarse_val_dataset)
            coarse_nb = len(coarse_train_loader)
            for i, coarse_val in tqdm.tqdm(enumerate(coarse_val_loader),
                                           total=coarse_nb):
                coarse_results += make_results(coarse_model, coarse_val,
                                               device)

        map50 = compute_map(fine_results, coarse_results)
        print('Validation MAP: \n', map50)

    # save
    if e % opt.save_freq == 0:
        torch.save(fine_model, os.path.join(opt.save, 'fine_model'))
        torch.save(coarse_model, os.path.join(opt.save, 'coarse_model'))

# Testing
fine_dataset, coarse_dataset, policies = rl_agent.eval(split_test_path,
                                                       original_img_path_test)
fine_results, coarse_results = [], []

if len(fine_dataset.tolist()) > 0:
    fine_test_dataset = load_dataset(fine_dataset, fine_tr, bs)
    fine_test_loader = load_dataloader(bs, fine_test_dataset)
Exemple #3
0
                    fine_val_loader = load_dataloader(bs, fine_val_dataset)
                    fine_nb = len(fine_val_loader)
                    for i, fine_val in tqdm.tqdm(enumerate(fine_val_loader), total=fine_nb):
                        for j in fine_detector.test(fine_val):
                            fine_results.append(j)

                print('len(coarse_dataset.tolist()): \n', len(coarse_dataset.tolist()))
                if len(coarse_dataset.tolist()) > 0:
                    coarse_val_dataset = load_dataset(coarse_dataset, coarse_tr, bs)
                    coarse_val_loader = load_dataloader(bs, coarse_val_dataset)
                    coarse_nb = len(coarse_val_loader)
                    for i, coarse_val in tqdm.tqdm(enumerate(coarse_val_loader), total=coarse_nb):
                        for j in coarse_detector.test(coarse_val):
                            coarse_results.append(j)

                map50 = compute_map(fine_results, coarse_results)
                print('Validation mAP: \n', map50)
                print('Validation find mAP: \n', compute_map(fine_results, []))
                print('Validation coarse mAP: \n', compute_map([], coarse_results))
                print('Time for validation: \n', time.time() - s_time)

                with open(opt.save_path + '/val_result.txt', 'a') as f:
                    f.write(str(map50) + '\n')

                eff = 0
                for i in policies:
                    eff += int(i)
                with open(opt.save_path + '/val_policies.txt', 'a') as f:
                    f.write(str(eff / len(policies)) + '\n')

        # Testing
Exemple #4
0
#
# Build recommender system
#

item = Item(tracks_info, 0.075, 0.075)
slim = Slim(lambda_i=0.001, lambda_j=0.0001, epochs=3, lr=0.1)
h1 = HybridSimilarity(item, 0.7, slim, 0.3)

user = User(knn=64)
h2 = Hybrid(h1, 0.85, user, 0.15)

w = Warp(NUM_TRACKS=NUM_TRACKS, no_components=300, epochs=50)
a = ALS(factors=1024, iterations=5)
h3 = Hybrid(w, 0.7, a, 0.3)

recsys = Hybrid(h2, 0.85, h3, 0.15)

#
# Run recommender system
#

recs = recsys.run(train_data, targets)

#
# Compute MAP or export recommendations
#

if TEST:
    utils.compute_map(recs, test_data, targets)
else:
    utils.save_recommendations(recs)
        sim = torch.mm(train_features, query_features.T)
        ranks = torch.argsort(-sim, dim=0).cpu().numpy()

        ############################################################################
        # Step 3: evaluate
        gnd = dataset_train.cfg['gnd']
        # evaluate ranks
        ks = [1, 5, 10]
        # search for easy & hard
        gnd_t = []
        for i in range(len(gnd)):
            g = {}
            g['ok'] = np.concatenate([gnd[i]['easy'], gnd[i]['hard']])
            g['junk'] = np.concatenate([gnd[i]['junk']])
            gnd_t.append(g)
        mapM, apsM, mprM, prsM = utils.compute_map(ranks, gnd_t, ks)
        # search for hard
        gnd_t = []
        for i in range(len(gnd)):
            g = {}
            g['ok'] = np.concatenate([gnd[i]['hard']])
            g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['easy']])
            gnd_t.append(g)
        mapH, apsH, mprH, prsH = utils.compute_map(ranks, gnd_t, ks)
        print('>> {}: mAP M: {}, H: {}'.format(
            args.dataset, np.around(mapM * 100, decimals=2),
            np.around(mapH * 100, decimals=2)))
        print('>> {}: mP@k{} M: {}, H: {}'.format(
            args.dataset, np.array(ks), np.around(mprM * 100, decimals=2),
            np.around(mprH * 100, decimals=2)))
    dist.barrier()