ap_triplets.append(ap) # Write mAP writer.writerow(['mAP', np.nanmean(ap_triplets)]) """ Get the similarities used for predictions : in detection_dir. 1 file for each target triplet (more readable) """ if not opt.embedding_type == 'target': occurrences_train = dset.get_occurrences_precomp( opt.train_split.split('_')[0]) opt.occurrences = dset.get_occurrences_precomp( opt.train_split.split('_')[0]) opt = parser.get_opts_from_dset(opt, dset) # additional opts from dset model = models.get_model(opt) checkpoint = torch.load(osp.join(logger_path, 'model_' + opt.epoch_model + '.pth.tar'), map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['model']) model.eval() if torch.cuda.is_available(): model.cuda() """ Pre-compute language feats """ if opt.use_analogy: model.precomp_language_features()
""" Get the triplets to retrieve """ #################################### subset_test = 'unrel' target_triplets = pickle.load(open('/sequoia/data2/jpeyre/datasets/unrel_iccv17/unrel-dataset/triplet_queries.pkl', 'rb')) # Unrel queries in VG vocab print('Computing retrieval on {} triplet queries'.format(len(target_triplets))) ################## """ Load model """ ################## logger_path = osp.join(opt.logger_dir, opt.exp_name) opt = parser.get_opts_from_dset(opt, dset) # Load model print('Loading model') model = models.get_model(opt) checkpoint = torch.load(osp.join(logger_path, 'model_' + opt.epoch_model + '.pth.tar'), map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['model']) model.eval() if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") model = nn.DataParallel(model) if torch.cuda.is_available():