Ejemplo n.º 1
0
            thresh_file=opt.thresh_file, \
            add_gt=False, \
            train_mode=False, \
            jittering=False, \
            nms_thresh=opt.nms_thresh)
""" Load the test triplets """
target_triplets = dset.get_zeroshottriplets(
)  # uncomment to eval zeroshot triplets
#target_triplets = dset.visualphrases.words() # uncomment to eval all triplets
""" Keys to analyze """
keys = ['s-sro-o', 's-r-o-sro']
""" Aggregate csv result files (from official HICO eval code) """
# Logger path
logger_path = osp.join(opt.logger_dir, opt.exp_name)

detection_path = parser.get_res_dir(opt, 'detections_' + opt.embedding_type)
res_path = parser.get_res_dir(opt, 'res_' + opt.embedding_type)

for key in keys:
    """ File out : 1 file for AP results : group all zeroshot triplets AP """
    filename_out = osp.join(res_path, 'results_{}_{}_{}_{}.csv'.format(\
                                        opt.cand_test,\
                                        opt.test_split,\
                                        opt.epoch_model,\
                                        key))

    with open(filename_out, 'wb') as f:
        writer = csv.writer(f)

        ap_triplets = []
Ejemplo n.º 2
0
                    use_precompobjectscore = opt.use_precompobjectscore)

loader = torch.utils.data.DataLoader(dset_loader,
                                     batch_size=8,
                                     shuffle=False,
                                     num_workers=0,
                                     collate_fn=dset_loader.collate_fn)

##################
""" Load model """
##################

# Logger path
logger_path = osp.join(opt.logger_dir, opt.exp_name)

save_dir = parser.get_res_dir(opt, 'detections_' + opt.embedding_type)
opt = parser.get_opts_from_dset(opt, dset)  # additional opts from dset

# Model
model = models.get_model(opt)
model = nn.DataParallel(model).cuda()
checkpoint = torch.load(
    osp.join(logger_path,
             'model_' + opt.epoch_model + '.pth.tar'))  #.module.state_dict()

# for k,v in checkpoint.items():
#     print(k)

# print("*****************************************")

model.load_state_dict(checkpoint['model'], False)
Ejemplo n.º 3
0
    lang_feats_precomp_sro = model.get_language_features(queries_sro, 'sro')
elif opt.use_analogy:
    print('Precomputing query features in joint space with analogy...')
    lang_feats_precomp_sro = model.get_language_features_analogy(queries_sro)


################
""" Evaluate """
################

keys = ['r','sro','s','o']

name_dir = 'scores_retrieval_' + subset_test + '_' + opt.embedding_type


save_dir = parser.get_res_dir(opt, name_dir)
sim_dir = parser.get_res_dir(opt, 'similarities_' + subset_test + '_' + opt.embedding_type)

# Print source triplets and simmilarities
if not opt.embedding_type=='target':
    print_similarities(target_triplets, queries_sro)


print('Begin evaluation')
datasets = ['unrel','vrd']
num_cand_theoric = {'vrd_candidates':290974,'vrd_gt-candidates':51036, 'unrel_candidates':166368, 'unrel_gt-candidates':10308}

for cand_test in ['gt-candidates','candidates']:
#for cand_test in ['gt-candidates']:
    for data_name in datasets:
Ejemplo n.º 4
0
# Target queries indices
queries_sro, triplet_queries_idx = model.precomp_target_queries(
    triplet_queries)

# Pre-compute language features in joint sro space
print('Precomputing query features in joint space with analogy...')
lang_feats_precomp_sro = model.get_language_features_analogy(queries_sro)

################
""" Evaluate """
################

keys = opt.mixture_keys.split('_') if opt.mixture_keys else [
    's-r-o', 's-r-o-sro', 's-sro-o'
]
save_dir = parser.get_res_dir(
    opt, 'apRetrieval_{}_{}'.format(subset_test, opt.embedding_type))

##########################
""" Get the detections """
##########################

# Save the detections by key (group all triplets together)
det_path = osp.join(save_dir, 'detections_{}_{}_{}_{}_{}.pkl'.format(opt.cand_test,\
                                                                opt.epoch_model,\
                                                                opt.data_name,\
                                                                '%s',\
                                                                '%s'))

det_file = det_path % (subset_test, 's-r-o')

print('Begin evaluation')