Пример #1
0
def vg_check_pre_stats():
    client = MongoClient("mongodb://localhost:27017")
    db = client.visual_genome_1_2
    db_rel_train_all = db.relationships_all_train.find(no_cursor_timeout=True)
    db_rel_test_all = db.relationships_all_test.find(no_cursor_timeout=True)
    train_stats = {}
    test_stats = {}
    cnt = 0
    zl.tick()
    for db_rel in db_rel_train_all:
        if cnt % 1000 == 0:
            print cnt, zl.tock()
            zl.tick()
        cnt += 1
        for r in db_rel['relationships']:
            name = r['predicate']
            if name not in train_stats:
                train_stats[name] = 0
            train_stats[name] += 1

    for db_rel in db_rel_test_all:
        if cnt % 1000 == 0:
            print cnt, zl.tock()
            zl.tick()
        cnt += 1
        for r in db_rel['relationships']:
            name = r['predicate']
            if name not in test_stats:
                test_stats[name] = 0
            test_stats[name] += 1
    zl.save('output/train_pre_stats.pkl', train_stats)
    zl.save('output/test_pre_stats.pkl', test_stats)
    print zl.sort_dict_by_val(train_stats)
    print zl.sort_dict_by_val(test_stats)
Пример #2
0
    def setup(self, bottom, top):
        """Setup the RoIDataLayer."""

        self._cur_idx = 0
        self.gt_labels = {}
        self.meta = h5py.File('/home/zawlin/Dropbox/proj/sg_vrd_meta.h5', 'r')
        self.cache = h5py.File('output/sg_vrd_cache.h5','r')
        self.cache_mem = {}
        if os.path.exists('output/cache/sg_vrd_gt.pkl'):
            self.gt_labels = zl.load('output/cache/sg_vrd_gt.pkl')
            glog.info('loaded gt data from cache')
        else:
            glog.info( 'Preloading gt')
            zl.tic()
            for k in self.meta['gt/train'].keys():
                rlp_labels = self.meta['gt/train/%s/rlp_labels'%k][...]
                sub_boxes = self.meta['gt/train/%s/sub_boxes'%k][...].astype(np.float)
                obj_boxes = self.meta['gt/train/%s/obj_boxes'%k][...].astype(np.float)
                if sub_boxes.shape[0]>0:
                    zeros = np.zeros((sub_boxes.shape[0],1), dtype=np.float)
                    # first index is always zero since we do one image by one image
                    sub_boxes = np.concatenate((zeros, sub_boxes),axis=1)
                    obj_boxes = np.concatenate((zeros, obj_boxes),axis=1)
                self.gt_labels[k] = {}
                self.gt_labels[k]['rlp_labels']=rlp_labels
                self.gt_labels[k]['sub_boxes']=sub_boxes
                self.gt_labels[k]['obj_boxes']=obj_boxes
            glog.info('done preloading gt %f'%zl.toc())
            zl.save('output/cache/sg_vrd_gt.pkl',self.gt_labels)

        self.imids = []
        for k in self.gt_labels.keys():
            self.imids.append(k)
        self.imidx =0
        random.shuffle(self.imids)
        # parse the layer parameter string, which must be valid YAML
        layer_params = yaml.load(self.param_str)

        self._num_classes = layer_params['num_classes']
        self._name_to_top_map = {}

        # data blob: holds a batch of N images, each with 3 channels
        idx = 0
        top[idx].reshape(cfg.TRAIN.IMS_PER_BATCH, 1024,
                         50,50)
        self._name_to_top_map['conv_new_1'] = idx
        idx += 1

        top[idx].reshape(1, 5, 1, 1)
        self._name_to_top_map['sub_boxes'] = idx
        idx += 1

        top[idx].reshape(1, 5, 1, 1)
        self._name_to_top_map['obj_boxes'] = idx
        idx += 1
        # labels blob: R categorical labels in [0, ..., K] for K foreground
        # classes plus background
        top[idx].reshape(1, 1, 1, 1)
        self._name_to_top_map['labels'] = idx
Пример #3
0
def save_test_words_to_pickle():
    ilsvrc_words =  ['antelope','cup','ball','cooking']
    model = gensim.models.Word2Vec.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary=True)
    word2vec = {}
    for w in ilsvrc_words:
        w = w.replace('_',' ')
        word2vec[w] = model[w]
    zl.save('test_word2vec',word2vec)
Пример #4
0
def save_ilsvrc_vectors_to_pickle():
    ilsvrc_words = get_ilsvrc_word_list()
    model = gensim.models.Word2Vec.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary=True)
    word2vec = {}
    for w in ilsvrc_words:
        w = w.replace('_',' ')
        if w in model:
            word2vec[w] = model[w]
        else:
            print '%s not in model'%w
    zl.save('ilsvrc_word2vec',word2vec)
Пример #5
0
def vg_count_only_one_triplet():
    client = MongoClient("mongodb://localhost:27017")
    db = client.visual_genome_1_2
    db_results = db.relationships_all_train.find(no_cursor_timeout=True)
    cnt = 0
    spo_info = {}
    spo_list = []
    zl.tick()
    for doc in db_results:
        id = doc['image_id']
        cnt += 1
        if cnt % 1000 == 0:
            print cnt, zl.tock()
            zl.tick()
        rcnt = 0
        for r in doc['relationships']:
            pre = r['predicate']
            sub_name = r['subject']['name']
            obj_name = r['object']['name']
            spo = sub_name + '_' + pre + '_' + obj_name
            if spo not in spo_info:
                spo_info[spo] = 0
            spo_info[spo] += 1

    db_results_2 = db.relationships_all_test.find(no_cursor_timeout=True)

    for doc in db_results_2:
        id = doc['image_id']
        cnt += 1
        if cnt % 1000 == 0:
            print cnt, zl.tock()
            zl.tick()
        rcnt = 0
        for r in doc['relationships']:
            pre = r['predicate']
            sub_name = r['subject']['name']
            obj_name = r['object']['name']
            spo = sub_name + '_' + pre + '_' + obj_name
            if spo not in spo_info:
                spo_info[spo] = 0
            spo_info[spo] += 1
    zl.save('output/spo_info_vg.pkl', spo_info)
    #total_pairs = len(sub_obj_info.keys())+0.0
    total_spo = len(spo_info.keys()) + 0.0
    one_count = 0
    for k in spo_info.keys():
        if spo_info[k] >= 5:
            spo_list.append(k)
            one_count += 1
    #print total_spo,one_count
    vg_total_annotation_count(spo_list)
Пример #6
0
def vg_make_meta_visual_phrase():
    m = h5py.File('data/vg1_2_meta.h5', 'r', driver='core')

    h5f = h5py.File('data/vg1_2_vp_meta.h5')

    triplets = {}
    cnt = 0
    zl.tick()
    for k in m['gt/train'].keys():
        if cnt % 1000 == 0:

            print cnt, zl.tock()
            zl.tick()
        cnt += 1
        # sub_boxes = m['gt/train/%s/sub_boxes'%k][...]
        # obj_boxes = m['gt/train/%s/obj_boxes'%k][...]
        rlp_labels = m['gt/train/%s/rlp_labels' % k][...]
        for i in xrange(rlp_labels.shape[0]):
            # sub_box = sub_boxes[i]
            # obj_box = obj_boxes[i]
            rlp_label = rlp_labels[i]
            # joint_bbox = [min(sub_bbox[0],obj_bbox[0]), min(sub_bbox[1],obj_bbox[1]),max(sub_bbox[2],obj_bbox[2]),max(sub_bbox[3],obj_bbox[3])]

            s_lbl = zl.idx2name_cls(m, rlp_label[0])
            o_lbl = zl.idx2name_cls(m, rlp_label[2])
            p_lbl = zl.idx2name_pre(m, rlp_label[1])

            spo = '%s_%s_%s' % (s_lbl, p_lbl, o_lbl)
            # spo = '%d_%d_%d'%(rlp_label[0],rlp_label[1],rlp_label[2])
            if spo not in triplets:
                triplets[spo] = 0
            triplets[spo] += 1
    zl.save('output/pkl/triplets_train_vp.pkl', triplets)
    triplets_sorted = zl.sort_dict_by_val(triplets)

    triplets_ok = []

    for k, v in triplets_sorted:
        triplets_ok.append(k)
        print k, v
    triplets_ok = sorted(triplets_ok)
    triplets_ok = ['__background__'] + triplets_ok
    for i in xrange(len(triplets_ok)):
        h5f['meta/tri/idx2name/%d' % i] = triplets_ok[i]
        h5f['meta/tri/name2idx/%s' % triplets_ok[i]] = i
    print len(triplets_ok)
Пример #7
0
def vg_stats_predicate():
    client = MongoClient("mongodb://localhost:27017")
    db = client.visual_genome_1_2
    db_results = db.relationships_cannon.find(no_cursor_timeout=True)
    cnt = 0
    mappings = make_mappings()
    mappings_p = make_p_mappings()
    wnl = WordNetLemmatizer()
    spl = SpellingReplacer()
    sub_obj_info = {}
    zl.tick()
    for doc in db_results:
        id = doc['image_id']
        cnt += 1
        if cnt % 1000 == 0:
            print cnt, zl.tock()
            zl.tick()
        rcnt = 0
        for r in doc['relationships']:
            pre = r['predicate']
            sub_name = r['subject']['name']
            obj_name = r['object']['name']
            so_pair = sub_name + '_' + obj_name
            if so_pair not in sub_obj_info:
                so_info = {'total': 0, 'predicates': []}

            else:
                so_info = sub_obj_info[so_pair]
            so_info['total'] += 1
            if pre not in so_info['predicates']:
                so_info['predicates'].append(pre)
    zl.save('output/sub_obj_info.pkl', sub_obj_info)
    #total_pairs = len(sub_obj_info.keys())+0.0
    total_pairs = 0.0
    total_of_averages = 0.0
    for k in sub_obj_info.keys():
        so_info = sub_obj_info[k]
        total_predicates = len(so_info['predicates']) + 0.0
        if so_info['total'] < 2: continue
        total_pairs += 1
        total_annotated_pairs = so_info['total'] + 0.0
        avg_predicates_for_this_pair = total_predicates / total_annotated_pairs
        total_of_averages += avg_predicates_for_this_pair
    total_of_averages /= total_pairs
    print 'total_pairs = %d' % total_pairs
    print 'total_of_averages = %d' % total_of_averages
Пример #8
0
def gen_meta_for_retrieval():
    out_pkl = 'output/pkl/vg_retr_meta.pkl'
    m = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5', 'r')

    rlp_labels = []
    files = []
    counts = []
    cnt = 0
    zl.tick()
    for k in m['gt/test']:
        if cnt % 100 == 0:
            print cnt, zl.tock()
            zl.tick()
        cnt += 1
        gt_rlp_labels = m['gt/test'][k]['rlp_labels'][...]
        for i in xrange(gt_rlp_labels.shape[0]):
            gt_rlp_label = gt_rlp_labels[i]
            if len(rlp_labels) == 0:
                rlp_labels.append(gt_rlp_label.tolist())
                files.append([k])
                counts.append(1)
                continue
            bInd = np.all(gt_rlp_label == rlp_labels, axis=1)
            ind = np.arange(len(rlp_labels))[bInd]
            if len(ind) == 0:
                rlp_labels.append(gt_rlp_label.tolist())
                files.append([k])
                counts.append(1)
            else:
                files[ind].append(k)
                counts[ind] = counts[ind] + 1
                # rlp_labels.append(gt_rlp_label.tolist())
                # files.append([k])
                # counts.append(1)
    rlp_labels = np.array(rlp_labels)
    files = np.array(files)
    counts = np.array(counts)

    ind = np.argsort(counts)[::-1]

    counts = counts[ind]
    files = files[ind]
    rlp_labels = rlp_labels[ind]

    retr_meta = {'counts': counts, 'files': files, 'rlp_labels': rlp_labels}
    zl.save(out_pkl, retr_meta)
Пример #9
0
def vg_count_top():
    client = MongoClient("mongodb://localhost:27017")
    db = client.visual_genome_1_2
    db_results = db.relationships_cannon.find(no_cursor_timeout=True)

    cnt = 0
    start = time.time()
    last = time.time()

    subjects = {}
    objects = {}
    objects_all = {}
    predicate = {}
    cnt = 0
    for doc in db_results:
        imid = doc['image_id']
        cnt += 1
        if cnt % 10000 == 0:
            print cnt
        rcnt = 0
        for r in doc['relationships']:
            if r['predicate'] == 'C_FAILED': continue
            if r['subject']['name'] == 'C_FAILED': continue
            if r['object']['name'] == 'C_FAILED': continue

            rcnt += 1
            if r['predicate'] not in predicate:
                predicate[r['predicate']] = 1
            else:
                predicate[r['predicate']] += 1
            if r['subject']['name'] not in subjects:
                subjects[r['subject']['name']] = 1
            else:
                subjects[r['subject']['name']] += 1
            if r['object']['name'] not in objects:
                objects[r['object']['name']] = 1
            else:
                objects[r['object']['name']] += 1
            if r['subject']['name'] not in objects_all:
                objects_all[r['subject']['name']] = 1
            else:
                objects_all[r['subject']['name']] += 1
            if r['object']['name'] not in objects_all:
                objects_all[r['object']['name']] = 1
            else:
                objects_all[r['object']['name']] += 1
    zl.save('output/objects_all.pkl', objects_all)
    zl.save('output/objects.pkl', objects)
    zl.save('output/subjects.pkl', subjects)
    zl.save('output/predicate.pkl', predicate)
    objects_all_sorted = sorted(objects_all.items(),
                                key=operator.itemgetter(1))
    objects_sorted = sorted(objects.items(), key=operator.itemgetter(1))
    subjects_sorted = sorted(subjects.items(), key=operator.itemgetter(1))
    predicate_sorted = sorted(predicate.items(), key=operator.itemgetter(1))
Пример #10
0
    def setup(self, bottom, top):
        self._cur_idx = 0
        self.vgg_data = {}
        vgg_h5 = h5py.File(
            "output/precalc/vg1_2_2016_predicate_exp_train.hdf5", 'r')
        layer_params = yaml.load(self.param_str_)

        self.imids = []
        for k in vgg_h5.keys():
            self.imids.append(k)
        self.imidx = 0

        if os.path.exists('output/cache/vg1_2_2016_pre_train_concat.pkl'):
            self.vgg_data = zl.load(
                'output/cache/vg1_2_2016_pre_train_concat.pkl')
            print 'loaded train data from cache'
        else:
            print 'Preloading training data'
            zl.tick()
            for k in vgg_h5.keys():
                sub_visual = vgg_h5[k]['sub_visual'][...]
                obj_visual = vgg_h5[k]['obj_visual'][...]
                pre_label = vgg_h5[k]['pre_label'][...]
                self.vgg_data[k] = {}
                self.vgg_data[k]['sub_visual'] = sub_visual
                self.vgg_data[k]['obj_visual'] = obj_visual
                self.vgg_data[k]['pre_label'] = pre_label
            print 'done preloading training data %f' % zl.tock()
            zl.save('output/cache/vg1_2_2016_pre_train_concat.pkl',
                    self.vgg_data)
            vgg_h5.close()

        self._batch_size = layer_params['batch_size']
        self.train_data = []
        self._name_to_top_map = {}

        # data blob: holds a batch of N images, each with 3 channels
        # top[0].reshape(self._batch_size, 4096 * 2 )

        top[0].reshape(self._batch_size, 2 * 4096)
        top[1].reshape(self._batch_size)
        self._name_to_top_map['visual'] = 0
        self._name_to_top_map['label'] = 1
Пример #11
0
    def setup(self, bottom, top):
        self._cur_idx = 0
        self.vgg_data = {}
        self.gt_labels = {}
        vgg_h5 = h5py.File("output/precalc/vg1_2_2016_train.hdf5", 'r')
        if os.path.exists('output/cache/vg1_2_2016_train.pkl'):
            self.vgg_data = zl.load('output/cache/vg1_2_2016_train.pkl')
            print 'loaded train data from cache'
        else:
            print 'Preloading training data'
            zl.tick()
            for k in vgg_h5.keys():
                classemes = vgg_h5[k]['classemes'][...]
                visuals = vgg_h5[k]['visuals'][...]
                locations = vgg_h5[k]['locations'][...]
                cls_confs = vgg_h5[k]['cls_confs'][...]
                self.vgg_data[k] = {}
                self.vgg_data[k]['classemes'] = classemes
                self.vgg_data[k]['visuals'] = visuals
                self.vgg_data[k]['cls_confs'] = cls_confs
                self.vgg_data[k]['locations'] = locations
            print 'done preloading training data %f' % zl.tock()
            zl.save('output/cache/vg1_2_2016_train.pkl', self.vgg_data)
            vgg_h5.close()

        self.meta = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5', 'r')
        if os.path.exists('output/cache/vg1_2_2016_gt.pkl'):
            self.gt_labels = zl.load('output/cache/vg1_2_2016_gt.pkl')
            print 'loaded gt data from cache'
        else:
            print 'Preloading gt'
            zl.tick()
            for k in self.meta['gt/train'].keys():
                rlp_labels = self.meta['gt/train/%s/rlp_labels' % k][...]
                sub_boxes = self.meta['gt/train/%s/sub_boxes' % k][...].astype(
                    np.float)
                obj_boxes = self.meta['gt/train/%s/obj_boxes' % k][...].astype(
                    np.float)
                self.gt_labels[k] = {}
                self.gt_labels[k]['rlp_labels'] = rlp_labels
                self.gt_labels[k]['sub_boxes'] = sub_boxes
                self.gt_labels[k]['obj_boxes'] = obj_boxes
            print 'done preloading gt %f' % zl.tock()
            zl.save('output/cache/vg1_2_2016_gt.pkl', self.gt_labels)

        self.imids = []
        for k in self.vgg_data.keys():
            self.imids.append(k)
        self.imidx = 0
        random.shuffle(self.imids)
        layer_params = yaml.load(self.param_str_)

        self._batch_size = layer_params['batch_size']
        self.train_data = []
        self._name_to_top_map = {}

        # top[0].reshape(self._batch_size, 201*2)
        top[0].reshape(self._batch_size, 4096 * 2)
        # top[0].reshape(self._batch_size, 4*2)
        top[1].reshape(self._batch_size)

        # self._name_to_top_map['classeme'] = 0
        self._name_to_top_map['visual'] = 0
        # self._name_to_top_map['location'] = 0
        self._name_to_top_map['label'] = 1
Пример #12
0
def run_retrieval_zeroshot():
    # h5_path = 'output/sg_vrd_2016_result_all_19500.hdf5'
    # h5_path = 'output/sg_vrd_2016_result_diff_all_5000.hdf5'
    # h5_path= 'output/results/lu_method_results.hdf5'
    # h5_path = 'output/sg_vrd_2016_result.hdf5.dd'
    # h5_path = 'output/results/lu_method_results_max.hdf5'
    h5_path = 'output/results/lu_visual_method_results.hdf5'

    data_root = 'data/sg_vrd_2016/Data/sg_test_images/'
    m = h5py.File('/home/zawlin/Dropbox/proj/sg_vrd_meta.h5', 'r', 'core')
    zeroshots = m['meta/zeroshots'][...]
    gt_cache_path = 'output/cache/sg_vrd_gt_cache.pkl'
    gt_h5f = {}
    np.random.seed(76)
    if os.path.exists(gt_cache_path):
        print 'load gt from cache'
        gt_h5f = zl.load(gt_cache_path)
    else:
        print 'cacheing gt'
        for k in m['gt/test']:
            gt_h5f[k] = {}
            sub_boxes = m['gt/test'][k]['sub_boxes'][...]
            rlp_labels = m['gt/test'][k]['rlp_labels'][...]
            obj_boxes = m['gt/test'][k]['obj_boxes'][...]
            gt_h5f[k]['sub_boxes'] = sub_boxes
            gt_h5f[k]['obj_boxes'] = obj_boxes
            gt_h5f[k]['rlp_labels'] = rlp_labels
        print 'caching gt done'
        zl.save(gt_cache_path, gt_h5f)
    cache_path = 'output/cache/%s.pkl' % h5_path.split('/')[-1]
    # h5f = h5py.File('output/sg_vrd_2016_result.classeme.hdf5')
    if os.path.exists(cache_path):
        print 'load from cache'
        h5f = zl.load(cache_path)
    else:
        h5_in = h5py.File(h5_path, 'r')
        h5f = {}
        print 'preloading data'
        for i in h5_in:
            h5f[i] = {}
            h5f[i]['rlp_labels'] = h5_in[i]['rlp_labels'][...]
            h5f[i]['rlp_confs'] = h5_in[i]['rlp_confs'][...]
            h5f[i]['sub_boxes'] = h5_in[i]['sub_boxes'][...]
            h5f[i]['obj_boxes'] = h5_in[i]['obj_boxes'][...]
        zl.save(cache_path, h5f)
        print 'preloading data done'
    #h5f = h5py.file('output/results/lu_method_results.hdf5')
    retr_meta = zl.load('output/pkl/vr_retr_meta.pkl')
    thresh = 0.0
    images = {}
    imids = h5f.keys()
    results = {}
    cnt = 0
    r_acc_100 = 0
    r_acc_50 = 0

    tp_total = 0
    gt_total = 0
    median = []
    for k in xrange(len(retr_meta['rlp_labels'])):
        # if k>1000:
        # break
        rlp_label = retr_meta['rlp_labels'][k]
        if not np.any(np.all(zeroshots == rlp_label, axis=1)): continue
        gt_files = retr_meta['files'][k]

        cnt += 1
        results = {}
        zl.tic()
        for imid in imids:
            rlp_labels = h5f[imid]['rlp_labels']
            rlp_confs = h5f[imid]['rlp_confs']
            if rlp_confs.shape[0] == 0:
                results[imid] = 0.0
                continue
            indexor = np.arange(rlp_labels.shape[0])
            ind = indexor[np.all(rlp_labels == rlp_label, axis=1)]
            if ind.shape[0] == 0:
                results[imid] = 0.0
                continue
            confs = rlp_confs[ind]
            results[imid] = np.random.uniform(-1, 1)  #np.average(confs)

        results_sorted = zl.sort_dict_by_val(results)
        total_gt = len(gt_files) + 0.0
        gt_total += total_gt + 0.0
        tp_50 = 0.
        tp_100 = 0.
        found = False
        delay = 0
        s_lbl = zl.idx2name_cls(m, rlp_label[0])
        p_lbl = zl.idx2name_pre(m, rlp_label[1])
        o_lbl = zl.idx2name_cls(m, rlp_label[2])
        lbl_str = '%s_%s_%s' % (s_lbl, p_lbl, o_lbl)
        r_at_k = 5
        for i in xrange(len(results_sorted)):
            imid, v = results_sorted[i]
            if found and i >= r_at_k:
                break
            cor = imid in gt_files
            if cor:
                if not found:
                    found = True
                    median.append(i)
                if i < r_at_k:
                    tp_100 += 1
                    tp_total += 1
                    if i < 50: tp_50 += 1
            # if True:
            # cor_or_not = str(cor)
            # if cor :delay=0
            # if delay ==0:
            # im = cv2.imread(data_root+imid+'.jpg')
            # cv2.putText(im, cor_or_not, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
            # cv2.putText(im, lbl_str, (50, 80), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
            # cv2.putText(im, str(i), (50, 100), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
            # cv2.imshow('im',im)
            # c = cv2.waitKey(delay)&0xFF
            # if c ==27:
            # exit(0)
            # if c == ord('s'):
            # delay = 1-delay

            # if c == ord('c'):
            # delay = 1
        r_50 = tp_50 / r_at_k  #total_gt
        r_100 = tp_100 / r_at_k  #total_gt
        r_acc_50 += r_50
        r_acc_100 += r_100
        med = np.median(median)
        print '%d %f %f %f %f %d %f' % (cnt, r_50, r_100, r_acc_50 / cnt,
                                        r_acc_100 / cnt, med, zl.toc())
Пример #13
0
def run_retrieval_vp_v2():
    # h5_path = 'output/sg_vrd_2016_result.classeme.hdf5'
    h5_path = 'output/precalc/vg1_2_vp2016_test_nms2_.4.hdf5'
    m = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5', 'r', 'core')
    m_vp = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_vp_meta.h5', 'r', 'core')
    cache_path = 'output/cache/%s.pkl' % h5_path.split('/')[-1]
    data_root = '/home/zawlin/data/data_vrd/vg_1.2/'
    rlp_map = []
    for i in xrange(1, len(m_vp['meta/tri/idx2name'].keys())):
        tri = str(m_vp['meta/tri/idx2name'][str(i)][...])
        s_lbl = tri.split('_')[0]
        p_lbl = tri.split('_')[1]
        o_lbl = tri.split('_')[2]
        rlp_label = [
            zl.name2idx_cls(m, s_lbl),
            zl.name2idx_pre(m, p_lbl),
            zl.name2idx_cls(m, o_lbl)
        ]
        rlp_map.append(rlp_label)
    rlp_map = np.array(rlp_map)
    if os.path.exists(cache_path):
        print 'load from cache'
        h5f = zl.load(cache_path)
    else:
        h5_in = h5py.File(h5_path, 'r')
        h5f = {}
        print 'preloading data'
        for i in h5_in:
            h5f[i] = {}
            h5f[i]['labels'] = h5_in[i]['labels'][...]
            h5f[i]['confs'] = h5_in[i]['confs'][...]
            h5f[i]['boxes'] = h5_in[i]['boxes'][...]
        zl.save(cache_path, h5f)
        print 'preloading data done'
    retr_meta = zl.load('output/pkl/vg_retr_meta.pkl')
    thresh = 0.0
    images = {}
    imids = h5f.keys()
    results = {}
    cnt = 0
    r_acc_100 = 0
    r_acc_50 = 0

    tp_total = 0
    gt_total = 0
    median = []
    total = 0
    retr_meta = zl.load('output/pkl/vg_retr_meta.pkl')
    for k in xrange(len(retr_meta['rlp_labels'])):
        total += retr_meta['counts'][k]
        if k > 1000:
            print total / 1000.
            exit(0)
            break
        continue
        cnt += 1
        rlp_label = retr_meta['rlp_labels'][k]
        gt_files = retr_meta['files'][k]
        # print gt_files
        # exit(0)
        # for f in gt_files:
        # impath= zl.imid2path(m,f)
        # print impath
        # im= cv2.imread(data_root+impath)
        # cv2.imshow('im',im)
        # cv2.waitKey(0)
        results = {}
        zl.tick()
        ranks = []
        for imid in imids:
            labels = h5f[imid]['labels'] - 1
            rlp_confs = h5f[imid]['confs']
            rlp_labels = rlp_map[labels]
            if rlp_labels.shape[0] == 0:
                results[imid] = 0.0
                continue
            indexor = np.arange(rlp_labels.shape[0])
            ind = indexor[np.all(rlp_labels == rlp_label, axis=1)]
            if ind.shape[0] == 0:
                results[imid] = 0.0
                continue
            confs = rlp_confs[ind]
            results[imid] = np.average(confs)

        results_sorted = zl.sort_dict_by_val(results)
        total_gt = len(gt_files) + 0.0
        gt_total += total_gt + 0.0
        tp_50 = 0.
        tp_100 = 0.
        found = False
        s_lbl = zl.idx2name_cls(m, rlp_label[0])
        p_lbl = zl.idx2name_pre(m, rlp_label[1])
        o_lbl = zl.idx2name_cls(m, rlp_label[2])
        lbl_str = '%s_%s_%s' % (s_lbl, p_lbl, o_lbl)

        delay = 0
        for i in xrange(len(results_sorted)):
            imid, v = results_sorted[i]
            impath = zl.imid2path(m, imid)
            if found and i >= 5:
                break
            # print gt_files
            cor = imid in gt_files
            if cor:
                if not found:
                    found = True
                    median.append(i)
                if i < 5:
                    tp_100 += 1
                    tp_total += 1
                    if i < 50: tp_50 += 1
            # if True:
            # cor_or_not = str(cor)
            # if cor :delay=0
            # if delay ==0:
            # im = cv2.imread(data_root+impath)
            # cv2.putText(im, cor_or_not, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
            # cv2.putText(im, lbl_str, (50, 80), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
            # cv2.putText(im, str(i), (50, 100), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
            # cv2.imshow('im',im)
            # c = cv2.waitKey(delay)&0xFF
            # if c ==27:
            # exit(0)
            # if c == ord('s'):
            # delay = 1-delay

            # if c == ord('c'):
            # delay = 1

        r_50 = tp_50 / 5  #total_gt
        r_100 = tp_100 / 5  #total_gt
        r_acc_50 += r_50
        r_acc_100 += r_100
        med = np.median(median)
        print '%d %f %f %f %f %d %f' % (cnt, r_50, r_100, r_acc_50 / cnt,
                                        r_acc_100 / cnt, med, zl.tock())
Пример #14
0
def run_relation(model_type, iteration):
    cache_h5 = h5py.File('output/sg_vrd_cache.h5')['test']
    result = h5py.File('output/sg_vrd_2016_result_' + model_type + '_' +
                       iteration + '.hdf5')
    m = h5py.File('data/sg_vrd_meta.h5')
    #--------------cache boxes-----------------
    h5_boxes = h5py.File('output/precalc/sg_vrd_objs.hdf5')
    cache_boxes = {}
    if os.path.exists('output/cache/sg_vrd_objs_test.pkl'):
        cache_boxes = zl.load('output/cache/sg_vrd_objs_test.pkl')
        glog.info('loaded obj data from cache')
    else:
        glog.info('Preloading obj')
        zl.tic()
        for k in h5_boxes['test'].keys():
            boxes = h5_boxes['test/%s/boxes' % k][...]
            cache_boxes[k] = boxes
        glog.info('done preloading obj %f' % zl.toc())
        zl.save('output/cache/sg_vrd_obj_test.pkl', cache_boxes)
    #--------------cache boxes-----------------

    #--------------cache old boxes-------------
    h5_boxes = h5py.File('output/sg_vrd_2016_test.hdf5')
    cache_old_boxes = {}
    if os.path.exists('output/cache/sg_vrd_objs_test_vgg.pkl'):
        cache_old_boxes = zl.load('output/cache/sg_vrd_objs_test_vgg.pkl')
        glog.info('loaded obj data from cache')
    else:
        glog.info('Preloading obj')
        zl.tic()
        for k in h5_boxes.keys():
            locations = h5_boxes['%s/locations' % k][...]
            cls_confs = h5_boxes['%s/cls_confs' % k][...]
            boxes = np.concatenate((locations, cls_confs[:, 1, np.newaxis],
                                    cls_confs[:, 0, np.newaxis]),
                                   axis=1)
            cache_old_boxes[k] = boxes
        glog.info('done preloading obj %f' % zl.toc())
        zl.save('output/cache/sg_vrd_obj_test_vgg.pkl', cache_old_boxes)

    #--------------cache old boxes-------------
    data_root = 'data/sg_vrd_2016/Data/sg_test_images/'
    keep = 100
    thresh = 0.0001
    prototxt = 'models/sg_vrd/rel_pre_iccv/test_' + model_type + '.prototxt'
    model_path = 'output/rel_iccv/' + model_type + '_iter_' + iteration + '.caffemodel'
    net = caffe.Net(prototxt, model_path, caffe.TEST)
    # sio.savemat('output/'+model_type+'.mat',{'params_'+model_type:net.params['relation'][0].data})
    # exit(0)
    #net = caffe.Net('models/sg_vrd/relation/test.prototxt','output/models/sg_vrd_relation_vgg16_iter_264000.caffemodel',caffe.TEST)
    cnt = 0
    for imid in cache_h5.keys():
        cnt += 1
        if cnt % 10 == 0:
            glog.info(cnt)
        obj_boxes_gt = m['gt/test'][imid]['obj_boxes'][...]
        sub_boxes_gt = m['gt/test'][imid]['sub_boxes'][...]
        rlp_labels_gt = m['gt/test'][imid]['rlp_labels'][...]
        rlp_labels = []
        rlp_confs = []
        sub_boxes = []
        obj_boxes = []
        #boxes = cache_boxes[imid]
        boxes = cache_old_boxes[imid]

        if boxes.shape[0] >= 2:
            for s in xrange(boxes[:20].shape[0]):
                for o in xrange(boxes[:20].shape[0]):
                    if s == o: continue
                    if boxes[s][4] < 0.01: continue
                    if boxes[o][4] < 0.01: continue
                    sbox = boxes[s][:4]
                    obox = boxes[o][:4]
                    rlp_labels.append([boxes[s, 5], -1, boxes[o, 5]])
                    rlp_confs.append(boxes[s, 4] + boxes[o, 4])
                    sub_boxes.append(sbox)
                    obj_boxes.append(obox)
            if len(sub_boxes) <= 0: continue

            #sub_box = np.array(sub_box)
            #obj_box = np.array(obj_box)
            im_path = C.get_sg_vrd_path_test(imid)
            im_detect(net, im_path, sub_boxes, obj_boxes)
            relation_prob = net.blobs['relation_prob'].data[...]
            for r in xrange(relation_prob.shape[0]):
                argmax = np.argmax(relation_prob[r, ...])
                rs = relation_prob[r, argmax].squeeze()

                rlp_labels[r][1] = argmax
                rlp_confs[r] = rlp_confs[r] + rs

        im = cv2.imread(data_root + imid + '.jpg')
        for i in xrange(len(sub_boxes)):
            sb = sub_boxes[i]
            cv2.rectangle(im, (int(sb[0]), int(sb[1])),
                          (int(sb[2]), int(sb[3])), (255, 0, 0), 2)
            pass
        cv2.imshow('im', im)
        #if cv2.waitKey(0)==27:
        #    exit(0)

        #rlp_confs.append(rlp_conf)
        #rlp_labels.append(rlp_label)
        #sub_boxes.append(sub_box)
        #obj_boxes.append(obj_box)
        # for i in xrange(70):
        # rs = relation_score[i]
        # if rs>0.0:
        # predicate =i
        # #print relation_score[predicate]
        # rlp_label = np.array([sub_cls[s],predicate,obj_cls[s]]).astype(np.int32)
        # #print '%s %s %s %f'%(m['meta/cls/idx2name/'+str(rlp_label[0])][...],m['meta/pre/idx2name/'+str(rlp_label[1])][...],m['meta/cls/idx2name/'+str(rlp_label[2])][...],rs)
        # rlp_conf = rs#+sub_score+obj_score#relation_score[predicate]

        # rlp_confs.append(rlp_conf)
        # rlp_labels.append(rlp_label)
        # sub_boxes.append(sub_boxes_gt[s])
        # obj_boxes.append(obj_boxes_gt[s])

        result.create_dataset(imid + '/rlp_confs',
                              dtype='float16',
                              data=np.array(rlp_confs).astype(np.float16))
        result.create_dataset(imid + '/sub_boxes',
                              dtype='float16',
                              data=np.array(sub_boxes).astype(np.float16))
        result.create_dataset(imid + '/obj_boxes',
                              dtype='float16',
                              data=np.array(obj_boxes).astype(np.float16))
        result.create_dataset(imid + '/rlp_labels',
                              dtype='float16',
                              data=np.array(rlp_labels).astype(np.float16))
Пример #15
0
    def setup(self, bottom, top):
        self._cur_idx = 0
        self.rdata = sio.loadmat('data/meta/vrd/annotation_train.mat',
                                 struct_as_record=False,
                                 squeeze_me=True)
        self.meta = h5py.File('data/sg_vrd_meta.h5', 'r')
        self.gt_labels = {}
        if os.path.exists('output/cache/sg_vrd_gt.pkl'):
            self.gt_labels = zl.load('output/cache/sg_vrd_gt.pkl')
            glog.info('loaded gt data from cache')
        else:
            glog.info('Preloading gt')
            zl.tic()
            for k in self.meta['gt/train'].keys():
                rlp_labels = self.meta['gt/train/%s/rlp_labels' % k][...]
                sub_boxes = self.meta['gt/train/%s/sub_boxes' % k][...].astype(
                    np.float)
                obj_boxes = self.meta['gt/train/%s/obj_boxes' % k][...].astype(
                    np.float)
                if sub_boxes.shape[0] > 0:
                    zeros = np.zeros((sub_boxes.shape[0], 1), dtype=np.float)
                    # first index is always zero since we do one image by one image
                    sub_boxes = np.concatenate((zeros, sub_boxes), axis=1)
                    obj_boxes = np.concatenate((zeros, obj_boxes), axis=1)
                self.gt_labels[k] = {}
                self.gt_labels[k]['rlp_labels'] = rlp_labels
                self.gt_labels[k]['sub_boxes'] = sub_boxes
                self.gt_labels[k]['obj_boxes'] = obj_boxes
            glog.info('done preloading gt %f' % zl.toc())
        vgg_h5 = h5py.File("output/sg_vrd_2016_train_predicate_exp_train.hdf5",
                           'r', 'core')

        if os.path.exists('output/cache/sg_vrd_2016_train.pkl'):
            self.vgg_data = zl.load('output/cache/sg_vrd_2016_train.pkl')
            glog.info('loaded train data from cache')
        else:
            glog.info('Preloading training data')
            zl.tic()
            self.vgg_data = {}
            for k in vgg_h5.keys():
                sub_visual = vgg_h5[k]['sub_classeme'][...]
                obj_visual = vgg_h5[k]['obj_classeme'][...]
                pre_label = vgg_h5[k]['pre_label'][...]
                self.vgg_data[k] = {}
                self.vgg_data[k]['sub_visual'] = sub_visual
                self.vgg_data[k]['obj_visual'] = obj_visual
                self.vgg_data[k]['pre_label'] = pre_label
            glog.info('done preloading training data %f' % zl.toc())
            zl.save('output/cache/sg_vrd_2016_train.pkl', self.vgg_data)
            vgg_h5.close()
        self.meta = h5py.File('data/sg_vrd_meta.h5', 'r', 'core')
        layer_params = yaml.load(self.param_str)

        self._batch_size = layer_params['batch_size']
        self.train_data = []
        self._name_to_top_map = {}

        # data blob: holds a batch of N images, each with 3 channels
        # top[0].reshape(self._batch_size, 4096 * 2 )

        top[0].reshape(self._batch_size, 2 * 101)
        top[1].reshape(self._batch_size)
        # self._name_to_top_map['visual'] = 0
        # self._name_to_top_map['classeme'] = 0
        self._name_to_top_map['visual'] = 0
        # self._name_to_top_map['location'] = 1
        self._name_to_top_map['label'] = 1
Пример #16
0
def run_relation_batch(model_type, iteration):
    vgg_h5 = h5py.File('output/precalc/vg1_2_2016_test.hdf5')
    vgg_data = {}
    if os.path.exists('output/cache/vg1_2_2016_test.pkl'):
        vgg_data = zl.load('output/cache/vg1_2_2016_test.pkl')
        print 'loaded test data from cache'
    else:
        print 'Preloading testing data'
        zl.tick()
        for k in vgg_h5.keys():
            classemes = vgg_h5[k]['classemes'][...]
            visuals = vgg_h5[k]['visuals'][...]
            locations = vgg_h5[k]['locations'][...]
            cls_confs = vgg_h5[k]['cls_confs'][...]
            vgg_data[k] = {}
            vgg_data[k]['classemes'] = classemes
            vgg_data[k]['visuals'] = visuals
            vgg_data[k]['cls_confs'] = cls_confs
            vgg_data[k]['locations'] = locations
        print 'done preloading testing data %f' % zl.tock()
        zl.save('output/cache/vg1_2_2016_test.pkl', vgg_data)
        vgg_h5.close()
    result = h5py.File('output/vg_results/vg1_2_2016_result_' + model_type +
                       '_' + iteration + '.hdf5')
    m = h5py.File('data/vg1_2_meta.h5')
    data_root = 'data/vg1_2_2016/Data/test/'
    keep = 100
    thresh = 0.0001
    net = caffe.Net(
        'models/vg1_2/relation/test_' + model_type + '.prototxt',
        'output/relation/vg/relation_vgg16_' + model_type + '_iter_' +
        iteration + '.caffemodel', caffe.TEST)
    #net = caffe.Net('models/sg_vrd/relation/test.prototxt','output/models/sg_vrd_relation_vgg16_iter_264000.caffemodel',caffe.TEST)
    cnt = 1
    zl.tick()
    imids = sorted(vgg_data.keys())
    for imid in imids:
        if cnt % 100 == 0:
            print cnt, zl.tock()
            zl.tick()
        cnt += 1
        if imid in result: continue
        classemes = vgg_data[imid]['classemes']
        visuals = vgg_data[imid]['visuals']
        locations = vgg_data[imid]['locations']
        cls_confs = vgg_data[imid]['cls_confs']

        # im = cv2.imread(data_root+imid+'.jpg')
        # #print cls_confs
        # # for box in locations:
        # # b=box[:4].astype(np.int32)
        # # cv2.rectangle(im,(b[0],b[1]),(b[2],b[3]),(255,0,0))
        # w,h = im.shape[2],im.shape[1]

        rlp_labels = []
        rlp_confs = []
        sub_boxes = []
        obj_boxes = []
        relation_vectors = []

        classemes_in = []
        visuals_in = []
        locations_in = []
        cls_confs_in = []
        sub_cls_in = []
        obj_cls_in = []
        sub_score_in = []
        obj_score_in = []
        sub_boxes = []
        obj_boxes = []
        for s in xrange(len(locations)):
            for o in xrange(len(locations)):
                if s == o: continue
                sub = locations[s]
                obj = locations[o]
                sub_visual = visuals[s]
                obj_visual = visuals[o]
                sub_cls = cls_confs[s, 0]
                obj_cls = cls_confs[o, 0]
                sub_score = cls_confs[s, 1]
                obj_score = cls_confs[o, 1]
                sub_classme = classemes[s]
                obj_classme = classemes[o]
                sub_loc_encoded = bbox_transform(np.array([obj[:4]]),
                                                 np.array([sub[:4]]))[0]
                obj_loc_encoded = bbox_transform(np.array([sub[:4]]),
                                                 np.array([obj[:4]]))[0]

                visual = np.hstack((sub_visual, obj_visual)).reshape(8192)
                classeme = np.hstack((sub_classme, obj_classme)).reshape(402)
                loc = np.hstack((sub_loc_encoded, obj_loc_encoded)).reshape(8)

                classemes_in.append(classeme)
                visuals_in.append(visual)
                locations_in.append(loc)
                sub_cls_in.append(sub_cls)
                obj_cls_in.append(obj_cls)
                sub_score_in.append(sub_score)
                obj_score_in.append(obj_score)
                sub_boxes.append(sub[:4])
                obj_boxes.append(obj[:4])

        if 'all' in model_type:
            blob = {
                'classeme': np.array(classemes_in),
                'visual': np.array(visuals_in),
                'location': np.array(locations_in)
            }
            net.blobs['classeme'].reshape(*blob['classeme'].shape)
            net.blobs['visual'].reshape(*blob['visual'].shape)
            net.blobs['location'].reshape(*blob['location'].shape)
        elif 'visual' in model_type:
            blob = {
                'visual': np.array(visuals_in),
            }
            net.blobs['visual'].reshape(*blob['visual'].shape)
        elif 'classeme' in model_type:
            blob = {
                'classeme': np.array(classemes_in),
            }

            net.blobs['classeme'].reshape(*blob['classeme'].shape)
        elif 'location' in model_type:
            blob = {'location': np.array(locations_in)}
            #batch this
            net.blobs['location'].reshape(*blob['location'].shape)
        if len(locations_in) == 0:
            rlp_confs = []
            sub_boxes = []
            obj_boxes = []
            rlp_labels = []
        else:
            net.forward_all(**blob)
            relation_score = net.blobs['relation_prob'].data.copy()
            argmax = np.argmax(relation_score, axis=1)
            rs = relation_score[np.arange(relation_score.shape[0]), argmax]
            rlp_labels = np.vstack((sub_cls_in, argmax, obj_cls_in)).T
            rlp_confs = np.array(sub_score_in) + np.array(rs) + np.array(
                obj_score_in)
        result.create_dataset(imid + '/rlp_confs',
                              dtype='float16',
                              data=np.array(rlp_confs).astype(np.float16))
        result.create_dataset(imid + '/sub_boxes',
                              dtype='float16',
                              data=np.array(sub_boxes).astype(np.float16))
        result.create_dataset(imid + '/obj_boxes',
                              dtype='float16',
                              data=np.array(obj_boxes).astype(np.float16))
        result.create_dataset(imid + '/rlp_labels',
                              dtype='float16',
                              data=np.array(rlp_labels).astype(np.float16))
Пример #17
0
def gen_meta_for_retrieval():
    out_pkl = 'output/pkl/vr_retr_meta.pkl'
    m = h5py.File('/home/zawlin/Dropbox/proj/sg_vrd_meta.h5', 'r')
    data_root = 'data/sg_vrd_2016/Data/sg_test_images/'

    rlp_labels = []
    files = []
    counts = []
    sub_boxes = []
    obj_boxes = []

    for k in m['gt/test']:
        gt_rlp_labels = m['gt/test'][k]['rlp_labels'][...]
        gt_sub_boxes = m['gt/test'][k]['sub_boxes'][...]
        gt_obj_boxes = m['gt/test'][k]['obj_boxes'][...]
        for i in xrange(gt_rlp_labels.shape[0]):
            gt_rlp_label = gt_rlp_labels[i]
            gt_sub_box = gt_sub_boxes[i]
            gt_obj_box = gt_obj_boxes[i]
            if len(rlp_labels) == 0:
                rlp_labels.append(gt_rlp_label.tolist())
                files.append([k])
                sub_boxes.append([gt_sub_box.tolist()])
                obj_boxes.append([gt_obj_box.tolist()])
                counts.append(1)
                continue
            bInd = np.all(gt_rlp_label == rlp_labels, axis=1)
            ind = np.arange(len(rlp_labels))[bInd]
            if len(ind) == 0:
                rlp_labels.append(gt_rlp_label.tolist())
                files.append([k])
                counts.append(1)
                sub_boxes.append([gt_sub_box.tolist()])
                obj_boxes.append([gt_obj_box.tolist()])
            else:
                files[ind].append(k)
                counts[ind] = counts[ind] + 1
                sub_boxes[ind].append(gt_sub_box.tolist())
                obj_boxes[ind].append(gt_obj_box.tolist())
                # rlp_labels.append(gt_rlp_label.tolist())
                # files.append([k])
                # counts.append(1)
    rlp_labels = np.array(rlp_labels)
    files = np.array(files)
    counts = np.array(counts)
    sub_boxes = np.array(sub_boxes)
    obj_boxes = np.array(obj_boxes)
    ind = np.argsort(counts)[::-1]

    counts = counts[ind]
    files = files[ind]
    rlp_labels = rlp_labels[ind]
    sub_boxes = sub_boxes[ind]
    obj_boxes = obj_boxes[ind]
    print sub_boxes[:4]
    for i in xrange(20):
        rlp_label = rlp_labels[i]
        print files[i]
        s_lbl = zl.idx2name_cls(m, rlp_label[0])
        p_lbl = zl.idx2name_pre(m, rlp_label[1])
        o_lbl = zl.idx2name_cls(m, rlp_label[2])
        print s_lbl, p_lbl, o_lbl
        for j in xrange(len(files[i])):
            s_box = sub_boxes[i][j]
            o_box = obj_boxes[i][j]
            fpath = files[i][j]
            impath = data_root + fpath + '.jpg'
            im = cv2.imread(impath)
            cv2.rectangle(im, (s_box[0], s_box[1]), (s_box[2], s_box[3]),
                          (0, 255, 0), 1)
            cv2.rectangle(im, (o_box[0], o_box[1]), (o_box[2], o_box[3]),
                          (255, 0, 0), 1)
            cv2.imshow('im', im)
            cv2.waitKey(0)
    retr_meta = {
        'counts': counts,
        'files': files,
        'rlp_labels': rlp_labels,
        'sub_boxes': sub_boxes,
        'obj_boxes': obj_boxes
    }
    zl.save(out_pkl, retr_meta)