示例#1
0
import numpy as np
""" Parsing options """
args = argparse.ArgumentParser()
parser = Parser(args)
opt = parser.make_options()
""" Load dataset """
data_path = '{}/{}'.format(opt.data_path, 'hico')
image_path = '{}/{}/{}'.format(opt.data_path, 'hico', 'images')
cand_dir = '{}/{}/{}'.format(opt.data_path, 'hico', 'detections')

dset = Hico(data_path, \
            image_path, \
            opt.test_split, \
            cand_dir=cand_dir,\
            thresh_file=opt.thresh_file, \
            add_gt=False, \
            train_mode=False, \
            jittering=False, \
            nms_thresh=opt.nms_thresh)
""" Load the test triplets """
target_triplets = dset.get_zeroshottriplets(
)  # uncomment to eval zeroshot triplets
#target_triplets = dset.visualphrases.words() # uncomment to eval all triplets
""" Keys to analyze """
keys = ['s-sro-o', 's-r-o-sro']
""" Aggregate csv result files (from official HICO eval code) """
# Logger path
logger_path = osp.join(opt.logger_dir, opt.exp_name)

detection_path = parser.get_res_dir(opt, 'detections_' + opt.embedding_type)
示例#2
0
import csv
from utils import Parser
""" Parsing options """
args = argparse.ArgumentParser()
parser = Parser(args)
opt = parser.make_options()
""" Load dataset """
data_path = '{}/{}'.format(opt.data_path, 'hico')
image_path = '{}/{}/{}'.format(opt.data_path, 'hico', 'images')
cand_dir = '{}/{}/{}'.format(opt.data_path, 'hico', 'detections')

dset = Hico(data_path, \
            image_path, \
            opt.test_split, \
            cand_dir=cand_dir,\
            thresh_file=opt.thresh_file, \
            add_gt=False, \
            train_mode=False, \
            jittering=False, \
            nms_thresh=opt.nms_thresh)
""" Key types """
keys = ['s-r-o', 's-sro-o', 's-r-o-sro']
""" Load the test triplets """
target_triplets = dset.get_zeroshottriplets()
subset = 'zeroshottriplet'

for key in keys:
    """ Load ap results for all triplets """
    filename_in = osp.join(opt.logger_dir, opt.exp_name, 'results_{}_{}_{}_{}_def.csv'.format(\
                                        opt.cand_test,\
                                        opt.test_split,\
示例#3
0
store_ram = []
store_ram.append(
    'objectscores') if opt.use_ram and opt.use_precompobjectscore else None
store_ram.append(
    'appearance') if opt.use_ram and opt.use_precompappearance else None

data_path = '{}/{}'.format(opt.data_path, opt.data_name)
image_path = '{}/{}/{}'.format(opt.data_path, opt.data_name, 'images')
cand_dir = '{}/{}/{}'.format(opt.data_path, opt.data_name, 'detections')

dset = Hico(data_path, \
            image_path, \
            opt.test_split, \
            cand_dir = cand_dir,\
            thresh_file = opt.thresh_file,\
            use_gt = False, \
            train_mode = False, \
            jittering = False,\
            nms_thresh = opt.nms_thresh, \
            store_ram = store_ram, \
            l2norm_input = opt.l2norm_input)


dset_loader = TestSampler(dset, \
                    use_image = opt.use_image, \
                    use_precompappearance = opt.use_precompappearance, \
                    use_precompobjectscore = opt.use_precompobjectscore)

loader = torch.utils.data.DataLoader(dset_loader,
                                     batch_size=8,
                                     shuffle=False,
elif data_name == 'hicoforcocoa':
    from datasets.hico_api import Hico as Dataset
    splits = ['trainval', 'test']

elif data_name == 'cocoa':
    from datasets.cocoa_api import Cocoa as Dataset
    splits = ['all']

data_path = osp.join(DATA_PATH, data_name)
image_path = osp.join(data_path, 'images')
cand_dir = osp.join(data_path, 'detections')

proposals = {}
for split in splits:

    dataset = Dataset(data_path, image_path, split, cand_dir=cand_dir,\
                 thresh_file='', use_gt=False, add_gt=True, train_mode=False, jittering=False, store_ram=[])

    for im_id in dataset.image_ids:

        cand_boxes = dataset.get_boxes(im_id)
        obj_id = dataset.get_obj_id(im_id)
        proposals[im_id] = np.hstack((obj_id[:, None], cand_boxes))

        assert len(
            np.unique(obj_id)) == len(obj_id), 'Careful duplicate obj_id'

    pickle.dump(proposals,
                open(osp.join(cand_dir, split + '_proposals.pkl'), 'wb'))
示例#5
0
    if opt.use_precompobjectscore:
        store_ram.append('objectscores')
    if opt.use_precompappearance:
        store_ram.append('appearance')

data_path = '{}/{}'.format(opt.data_path, '%s')
image_path = '{}/{}/{}'.format(opt.data_path, '%s', 'images')
cand_dir = '{}/{}/{}'.format(opt.data_path, '%s', 'detections')

dset_train = Hico(  data_path % 'hicoforcocoa',\
                    image_path % 'hicoforcocoa', \
                    opt.train_split, \
                    cand_dir = cand_dir % 'hicoforcocoa',\
                    thresh_file = opt.thresh_file, \
                    use_gt = opt.use_gt, \
                    add_gt = opt.add_gt, \
                    train_mode = False, \
                    jittering = False, \
                    nms_thresh = opt.nms_thresh,\
                    store_ram = [],\
                    l2norm_input = opt.l2norm_input,\
                    neg_GT = opt.neg_GT)


dset_test = Cocoa(  data_path % 'cocoa',\
                    image_path % 'cocoa', \
                    'all', \
                    cand_dir = cand_dir % 'cocoa',\
                    thresh_file = opt.thresh_file, \
                    use_gt = False, \
                    add_gt = False, \
示例#6
0
import os
import __init__
import numpy as np
import os.path as osp
from datasets.hico_api import Hico
import csv
import pickle

# Load vocabulary of triplets
root_path = './data'
data_path  = '{}/{}'.format(root_path, 'hico')
image_path = '{}/{}/{}'.format(root_path, 'hico', 'images')
cand_dir   = '{}/{}/{}'.format(root_path, 'hico', 'detections')

split = 'trainval' #'train','trainval'
dset = Hico(data_path, image_path, split, cand_dir)

# Get set of triplets
triplets_remove = dset.get_zeroshottriplets()

triplet_cat_remove = []
for l in range(len(triplets_remove)):
    triplet_cat_remove.append(dset.visualphrases.word2idx[triplets_remove[l]])

# Build a new set of candidates excluding the triplet categories
cand_positives = pickle.load(open(osp.join(data_path, 'cand_positives_' + split + '.pkl'),'rb'))

idx_keep = []
for j in range(cand_positives.shape[0]):
    if j%100000==0:
        print('Done {}/{}'.format(j, cand_positives.shape[0]))