コード例 #1
0
ファイル: converter.py プロジェクト: RuaHU/TLfPS
def main(argv):
    #default:
    M = 'yolov3'
    gpu = '0'
    path = None
    try:
        opts, args = getopt.getopt(argv[1:], 'hm:p:g:', ['m=', 'path=', 'gpu='])
    except getopt.GetoptError:
        print(argv[0] + ' -m <M> -p <path> -g <gpu>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print(argv[0] + ' -m <M> -p <path> -g <gpu>')
        elif opt in ['-m', '--M']:
            M = arg
        elif opt in ['-p', '--path']:
            path = arg
        elif opt in ['-g', '--gpu']:
            gpu = arg
    
    if path == None:
        raise ValueError('you should specify the model path via [python convertor -p /path/to/model/weights]')
    print('model: [%s], gpu: [%s], weights: [%s]'%(M, gpu, path))
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    config = Config(M)
    config.mgn = True
    model = MODELS(config = config).model
    load_weights_by_name(model, path)
    load_weights_by_name(model, os.path.join(parpath, 'pretrained_weights/%s.h5'%M))
    saved_path = os.path.join(parpath, 'saved_weights/%s_reid.h5'%M)
    print('weights saving to %s'%saved_path)
    model.save_weights(saved_path)
コード例 #2
0
ファイル: elapsedtime.py プロジェクト: RuaHU/TLfPS
class ELAPSEDTIME():
    def __init__(self, dataset, reid, detector='yolov3'):
        self.detector = detector
        self.reid = reid
        self.dataset = dataset
        self.config = Config(detector)
        self.DA = DA('validation', self.config)

    def load_model(self, ):
        if self.reid:
            self.model = MODELS(self.config).load_model()
        else:
            self.model = MODELS(
                self.config, model_type='detection').load_model(model_name='')

    def timer(self, ):
        if self.config.M == 'mrcnn' and not hasattr(self, 'anchors'):
            self.anchors = ANCHORS(self.config)
        pool = loadmat(
            os.path.join(self.dataset,
                         'dataset/annotation/pool.mat'))['pool'].squeeze()
        gallery = [imname[0] for imname in pool]
        if not hasattr(self, 'model'):
            self.load_model()
        t_step1, t_step2 = 0, 0
        overall_timer_start = time.time()
        for i, inmame in enumerate(gallery):
            step0_timer = time.time()
            img = cv2.imread(
                os.path.join(self.dataset, 'dataset/Image/SSM/', inmame))
            input_img, input_box, input_ids, meta = self.DA(img, [])
            step1_timer = time.time()
            t_step1 += (step1_timer - step0_timer)
            if self.config.M == 'mrcnn':
                self.model.predict([
                    np.stack([input_img]),
                    np.stack([input_box]),
                    np.stack(meta),
                    np.stack([self.anchors.get_anchors(input_img.shape)])
                ])[0]
            else:
                self.model.predict(
                    [np.stack([input_img]),
                     np.stack([input_box])])[0]
            step2_timer = time.time()
            t_step2 += (step2_timer - step1_timer)
            print("\r%d|%d|%.3f|%.3f|%.3f" %
                  (i + 1, len(gallery), step2_timer - overall_timer_start,
                   t_step1, t_step2),
                  end='')
        t_all = time.time() - overall_timer_start
        return [t_all, t_step1, t_step2]
コード例 #3
0
ファイル: getmodel.py プロジェクト: RuaHU/TLfPS
def main(argv):
    M = 'yolov3'
    gpu = '0'
    try:
        opts, args = getopt.getopt(argv[1:], 'hm:g:', ['m=', 'gpu='])
    except getopt.GetoptError:
        print(argv[0] + ' -m <M> -g <gpu>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print(argv[0] + ' -m <M> -g <gpu>')
        elif opt in ['-m', '--M']:
            M = arg
        elif opt in ['-g', '--gpu']:
            gpu = arg

    print('model: [%s] gpu: [%s]' % (M, gpu))
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu

    config = Config(M)
    model = MODELS(config, model_type='detection').load_model()
    print('saving model to pretrained_weights/%s.h5' % config.M)
    model.save_weights(
        os.path.join(parpath, 'pretrained_weights/%s.h5' % config.M))
コード例 #4
0
ファイル: evaluation.py プロジェクト: RuaHU/TLfPS
 def load_model(self, ):
     self.reid_model = MODELS(config=self.config).load_model()
コード例 #5
0
ファイル: evaluation.py プロジェクト: RuaHU/TLfPS
class EVALUATION():
    def __init__(self,
                 dataset,
                 detector='yolov3',
                 experiment_name='default',
                 overwrite=False):
        self.detector = detector
        self.dataset = dataset
        self.overwrite = overwrite
        self.experiment_name = experiment_name
        self.checkdir()
        self.config = Config(detector)
        self.DA = DA('validation', self.config)

    def get_sims(self, gfeats, qfeat, _eval=True):
        '''
        gfeats: gallery features
        qfeat: query feature
        '''
        if _eval: return gfeats.dot(qfeat.ravel()).ravel()
        gfeats_norm = np.linalg.norm(gfeats, keepdims=True, axis=-1)
        qfeat_norm = np.linalg.norm(qfeat, keepdims=True)
        gfeats_nl = gfeats / gfeats_norm
        qfeat_nl = qfeat / qfeat_norm
        sim = gfeats_nl.dot(qfeat_nl.ravel()).ravel()
        return sim

    def checkdir(self, ):
        dirpath = os.path.join(parpath, 'experiment_results',
                               self.experiment_name)
        if os.path.exists(dirpath):
            print('experiment [%s] existed' % self.experiment_name)
            if self.overwrite:
                print('cleaning experiment [%s] [overwrite == True]' %
                      self.experiment_name)
                shutil.rmtree(dirpath, ignore_errors=True)
                if os.path.exists(dirpath):
                    print(
                        'it seems the experiment directory can not be deleted. please check the status of the directory %s'
                        % dirpath)
                os.mkdir(dirpath)
                assert os.path.exists(dirpath)
            else:
                print(
                    'the results of experiment [%s] will be reused [overwrite == False]'
                    % self.experiment_name)
        else:
            os.mkdir(dirpath)
            assert os.path.exists(dirpath)

    def load_model(self, ):
        self.reid_model = MODELS(config=self.config).load_model()

    def simple_evaluation(self, model, gallery_size=50):
        #extract query feature vectors
        TestG50 = loadmat(
            os.path.join(self.dataset,
                         'dataset/annotation/test/train_test/TestG50.mat')
        )['TestG50'].squeeze()
        qfeatures = []
        for item in TestG50['Query']:
            img_name = item['imname'][0, 0][0]
            roi = item['idlocate'][0, 0][0].astype(np.int32)
            img = cv2.imread(
                os.path.join(self.dataset, 'dataset/Image/SSM/', img_name))
            input_img, input_box, input_ids, meta = self.DA(img, [roi])
            feature = model.predict([
                np.stack([input_img]),
                np.stack([input_box]),
                np.stack([input_ids])
            ])
            qfeatures.append(
                [img_name, feature[0, :, 0, 0, :],
                 np.array([roi])])
            print("\r%d|%d" % (len(qfeatures), len(TestG50['Query'])), end='')
        print('')

        #extract gallery feature vectors
        filepath = os.path.join(
            parpath, 'experiment_results/cuhk_%s_gallery.pkl' % self.config.M)
        assert os.path.exists(filepath)
        f = open(filepath, 'rb')
        pre_gallery = pickle.load(f, encoding='latin1')
        gallery = []
        for item in pre_gallery:
            imname, features, boxes = item
            if features is None:
                features = np.zeros([0, 256], dtype=np.float32)
                boxes = np.zeros([0, 5], dtype=np.float32)
            img = cv2.imread(
                os.path.join(self.dataset, 'dataset/Image/SSM/', imname))
            #xyxy 2 xywh
            t_boxes = boxes.copy()
            t_boxes[:, 2:4] -= t_boxes[:, :2]
            input_img, input_box, input_ids, meta = self.DA(
                img, t_boxes[:, :4])
            feats = model.predict([
                np.stack([input_img]),
                np.stack([input_box]),
                np.stack([input_ids])
            ])
            gallery.append([imname, feats[0, :, 0, 0, :], boxes])
            print("\r%d|%d" % (len(gallery), len(pre_gallery)), end='')
        print('')
        name_to_det_feat = {}
        for img_name, features, boxes in gallery:
            name_to_det_feat[img_name] = (boxes, features)
        return self.evaluation(qfeatures,
                               name_to_det_feat,
                               _eval=False,
                               gallery_size=gallery_size)

    def evaluation(self, qfeatures, name_to_det_feat, _eval, gallery_size=100):
        fname = 'TestG{}'.format(gallery_size if (gallery_size != -1) else 50)
        protoc = loadmat(
            os.path.join(self.dataset, 'dataset/annotation/test/train_test',
                         fname + '.mat'))[fname].squeeze()
        aps, accs, topk = [], [], [1, 5, 10]
        all_recall_rate = []
        tape = {}
        for i in range(len(qfeatures)):
            y_true, y_score = [], []
            y_boxes, y_gname = [], []
            count_gt, count_tp = 0, 0
            qimg_name, qfeat, qbox = qfeatures[i]
            probe_imname = str(protoc['Query'][i]['imname'][0, 0][0])
            assert probe_imname == qimg_name
            tested = set([probe_imname])
            for item in protoc['Gallery'][i].squeeze():
                gallery_imname = str(item[0][0])
                gt = item[1][0].astype(np.int32)
                gt[2:] += gt[:2]
                count_gt += (gt.size > 0)
                if gallery_imname not in name_to_det_feat: continue
                gboxes, gfeatures = name_to_det_feat[gallery_imname]
                sim = self.get_sims(gfeatures, qfeat, _eval)
                label = np.zeros(len(sim), dtype=np.int32)
                if gt.size > 0:
                    w, h = gt[2] - gt[0], gt[3] - gt[1]
                    iou_thresh = min(0.5,
                                     (w * h * 1.0) / ((w + 10) * (h + 10)))
                    inds = np.argsort(sim)[::-1]
                    sim = sim[inds]
                    gboxes = gboxes[inds]
                    for j, roi in enumerate(gboxes[:, :4]):
                        if self._compute_iou(roi, gt) >= iou_thresh:
                            label[j] = 1
                            count_tp += 1
                            break

                y_true.extend(list(label))
                y_score.extend(list(sim))
                y_boxes.extend(list(gboxes))
                y_gname.extend([gallery_imname for _ in gboxes])
                tested.add(gallery_imname)
            if gallery_size == -1:
                for gallery_imname in name_to_det_feat.keys():
                    if gallery_imname in tested: continue
                    gboxes, gfeatures = name_to_det_feat[gallery_imname]
                    sim = self.get_sims(gfeatures, qfeat, _eval)
                    label = np.zeros(len(sim), dtype=np.int32)
                    y_true.extend(list(label))
                    y_score.extend(list(sim))
                    y_boxes.extend(list(gboxes))
                    y_gname.extend([gallery_imname for _ in gboxes])

            y_score = np.array(y_score)
            y_true = np.array(y_true)
            y_boxes = np.array(y_boxes)
            y_gname = np.array(y_gname)
            assert count_tp <= count_gt
            recall_rate = count_tp * 1.0 / count_gt
            all_recall_rate.append(recall_rate)
            ap = 0 if count_tp == 0 else \
                average_precision_score(y_true, y_score) * recall_rate
            aps.append(ap)

            inds = np.argsort(y_score)[::-1]
            y_score = y_score[inds]
            y_true = y_true[inds]
            y_boxes = y_boxes[inds]
            y_gname = y_gname[inds]
            acc = [min(1, sum(y_true[:k])) for k in topk]
            accs.append(acc)
            tape[qimg_name] = [
                i, qbox, ap, acc, recall_rate, y_score, y_true, y_boxes,
                y_gname
            ]
            print("\r%d:\t%d|%d|%.2f|%.2f" %
                  (gallery_size, len(aps), len(qfeatures), np.mean(aps),
                   np.mean(accs, axis=0)[0]),
                  end='')
        print('')
        print('search ranking:')
        print('aRR:%.4f' % np.mean(all_recall_rate))
        print('  mAP = {:.2%}'.format(np.mean(aps)))
        accs = np.mean(accs, axis=0)
        for i, k in enumerate(topk):
            print('  top-{:2d} = {:.2%}'.format(k, accs[i]))

        if gallery_size == -1:
            record_aps = []
            new_tape = {}
            for key in tape.keys():
                record_aps.append(tape[key][2])
            record_aps.sort()
            th = record_aps[50]
            for key in tape.keys():
                if tape[key][2] > th: continue
                new_tape[key] = tape[key]

            filepath = os.path.join(parpath, 'experiment_results',
                                    self.experiment_name,
                                    'cuhk_%s_tape.pkl' % self.config.M)
            pickle.dump(new_tape, filepath)

        return aps, accs

    def query_feature_extractor(self, ):
        filepath = os.path.join(parpath, 'experiment_results',
                                self.experiment_name,
                                'cuhk_%s_query_features.pkl' % self.config.M)
        if os.path.exists(filepath):
            return

        if not hasattr(self, 'reid_model'):
            self.load_model()

        if self.config.M == 'mrcnn' and not hasattr(self, 'anchors'):
            self.anchors = ANCHORS(self.config)

        TestG50 = loadmat(
            os.path.join(self.dataset,
                         'dataset/annotation/test/train_test/TestG50.mat')
        )['TestG50'].squeeze()
        query_features = []
        for item in TestG50['Query']:
            img_name = item['imname'][0, 0][0]
            roi = item['idlocate'][0, 0][0].astype(np.int32)
            img = cv2.imread(
                os.path.join(self.dataset, 'dataset/Image/SSM/', img_name))
            input_img, input_box, _, meta = self.DA(img, [roi])
            if self.config.M == 'mrcnn':
                feature = self.reid_model.predict([
                    np.stack([input_img]),
                    np.stack([input_box]),
                    np.stack(meta),
                    np.stack([self.anchors.get_anchors(input_img.shape)])
                ])[0]
            else:
                feature = self.reid_model.predict(
                    [np.stack([input_img]),
                     np.stack([input_box])])[0]
            query_features.append([img_name, feature[0], np.array([roi])])
            print("\r%d|%d" % (len(query_features), len(TestG50['Query'])),
                  end='')
        print('')
        self.query_features = query_features
        f = open(filepath, 'wb')
        pickle.dump(query_features, f)
        f.close()
        return

    def oim_evaluation(self, gallery_size=100):
        gfilepath = os.path.join(parpath, 'experiment_results/oim_gallery.pkl')
        f = open(gfilepath, 'rb')
        gfeatures = pickle.load(f, encoding='latin1')
        f.close()
        qfilepath = os.path.join(parpath, 'experiment_results/oim_query.pkl')
        f = open(qfilepath, 'rb')
        qfeatures = pickle.load(f, encoding='latin1')
        f.close()
        name_to_det_feat = {}
        for imname, features, boxes in gfeatures:
            if features is None:
                features = np.zeros([0, 256], dtype=np.float32)
                boxes = np.zeros([0, 4], dtype=np.float32)
            name_to_det_feat[imname] = (boxes, features)

        res = {}
        gallery_length = [50, 100, 500, 1000, 2000, 4000, -1]
        for gallery_size in gallery_length:
            res[gallery_size] = self.evaluation(qfeatures,
                                                name_to_det_feat,
                                                _eval=True,
                                                gallery_size=gallery_size)
        f = open(os.path.join(parpath, 'experiment_results', 'oim_res.pkl'),
                 'wb')
        pickle.dump(res, f)
        f.close()

    def gallery_feature_extractor(self, ):
        filepath = os.path.join(parpath, 'experiment_results',
                                self.experiment_name,
                                'cuhk_%s_gallery_features.pkl' % self.config.M)
        if os.path.exists(filepath):
            return

        if not hasattr(self, 'reid_model'):
            self.load_model()

        if self.config.M == 'mrcnn' and not hasattr(self, 'anchors'):
            self.anchors = ANCHORS(self.config)

        pool_path = os.path.join(self.dataset, 'dataset/annotation/pool.mat')
        if not os.path.exists(pool_path):
            raise ValueError('cannot found %s' % pool_path)
        pool = loadmat(pool_path)['pool'].squeeze()
        imnames = [imname[0] for imname in pool]
        gallery = []
        for imname in imnames:
            img = cv2.imread(
                os.path.join(self.dataset, 'dataset/Image/SSM/', imname))
            input_img, input_box, _, meta = self.DA(img, [])
            if self.config.M == 'mrcnn':
                feats, _, _, _, det_features, det, _ = self.reid_model.predict(
                    [
                        np.stack([input_img]),
                        np.stack([input_box]),
                        np.stack(meta),
                        np.stack([self.anchors.get_anchors(input_img.shape)])
                    ])
            elif self.config.M == 'dla_34':
                feats, det_features, det, _ = self.reid_model.predict(
                    [np.stack([input_img]),
                     np.stack([input_box])])
            else:
                feats, _, _, _, det_features, det, _ = self.reid_model.predict(
                    [np.stack([input_img]),
                     np.stack([input_box])])
            det = self.DA.unmold(det[0], meta)
            det[:, 2:] += det[:, :2]
            gallery.append([imname, det_features[0], det])
            print("\r%d|%d" % (len(gallery), len(imnames)), end='')
        print('')
        f = open(filepath, 'wb')
        pickle.dump(gallery, f)
        f.close()
        return

    def private_detector_evaluation(self, gallery_size=100):
        print(
            'the results of this experiment using end-to-end detector [%s] + feature extractor [%s%s]'
            %
            (self.config.M, self.config.M, '_mgn' if self.config.mgn else ''))
        topk = [1, 5, 10]
        gallery_length = [50, -1, 100, 500, 1000, 2000, 4000]
        respath = os.path.join(parpath, 'experiment_results',
                               self.experiment_name,
                               'cuhk_%s_res.pkl' % self.config.M)
        if os.path.exists(respath):
            f = open(respath, 'rb')
            res = pickle.load(f)
            f.close()
            for gallery_size in gallery_length:
                aps, accs = res[gallery_size]
                print(gallery_size)
                print('  mAP = {:.2%}'.format(np.mean(aps)))
                for i, k in enumerate(topk):
                    print('  top-{:2d} = {:.2%}'.format(k, accs[i]))
            return

        qfilepath = os.path.join(parpath, 'experiment_results',
                                 self.experiment_name,
                                 'cuhk_%s_query_features.pkl' % self.config.M)
        assert os.path.exists(qfilepath)
        f = open(qfilepath, 'rb')
        qfeatures = pickle.load(f)
        f.close()
        gfilepath = os.path.join(
            parpath, 'experiment_results', self.experiment_name,
            'cuhk_%s_gallery_features.pkl' % self.config.M)
        assert os.path.exists(gfilepath)
        f = open(gfilepath, 'rb')
        gfeatures = pickle.load(f, encoding='latin1')
        f.close()
        name_to_det_feat = {}
        for img_name, features, boxes in gfeatures:
            name_to_det_feat[img_name] = (boxes, features)

        res = {}
        for gallery_size in gallery_length:
            res[gallery_size] = self.evaluation(qfeatures,
                                                name_to_det_feat,
                                                _eval=True,
                                                gallery_size=gallery_size)

        f = open(respath, 'wb')
        pickle.dump(res, f)
        f.close()

    def _compute_iou(self, box1, box2):
        a, b = box1.copy(), box2.copy()
        x1 = max(a[0], b[0])
        y1 = max(a[1], b[1])
        x2 = min(a[2], b[2])
        y2 = min(a[3], b[3])
        inter = max(0, x2 - x1) * max(0, y2 - y1)
        union = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] -
                                                                 b[1]) - inter
        return inter * 1.0 / union
コード例 #6
0
ファイル: tsne_prw.py プロジェクト: RuaHU/TLfPS
class EVALUATION():
    def __init__(self,
                 dataset,
                 detector='yolov3',
                 experiment_name='default',
                 overwrite=False):
        if dataset is None: return
        self.detector = detector
        self.dataset = dataset
        self.overwrite = overwrite
        self.experiment_name = experiment_name
        self.checkdir()
        self.config = Config(detector)
        self.DA = DA('validation', self.config)
        self.load_gallery()

    def checkdir(self, ):
        dirpath = os.path.join(parpath, 'experiment_results',
                               self.experiment_name)
        if os.path.exists(dirpath):
            print('experiment [%s] existed' % self.experiment_name)
            if self.overwrite:
                print('cleaning experiment [%s] [overwrite == True]' %
                      self.experiment_name)
                shutil.rmtree(dirpath, ignore_errors=True)
                if os.path.exists(dirpath):
                    print(
                        'it seems the experiment directory can not be deleted. please check the status of the directory %s'
                        % dirpath)
                os.mkdir(dirpath)
                assert os.path.exists(dirpath)
            else:
                print(
                    'the results of experiment [%s] will be reused [overwrite == False]'
                    % self.experiment_name)
        else:
            os.mkdir(dirpath)
            assert os.path.exists(dirpath)

    def load_model(self, ):
        self.reid_model = MODELS(config=self.config).load_model()

    def load_gallery(self):
        self.gallery_dict = {}
        frame_test = scipy.io.loadmat(
            os.path.join(self.dataset, 'frame_test.mat'))
        frame_indices = frame_test['img_index_test']
        for index, im_name in enumerate(frame_indices[:, 0]):
            mat = scipy.io.loadmat(
                os.path.join(self.dataset, 'annotations',
                             im_name[0] + '.jpg.mat'))
            boxes = mat[list(mat.keys())[-1]]
            ids = boxes[:, 0]
            boxes = boxes[:, 1:5]
            self.gallery_dict[im_name[0] +
                              '.jpg'] = [im_name[0] + '.jpg', boxes, ids]

    def TSNE(self, ):
        fTSNE_features = os.path.join(
            parpath, 'experiment_results', self.experiment_name,
            'prw_%s_TSNE_features.pkl' % self.config.M)
        fTSNE_id = os.path.join(parpath, 'experiment_results',
                                self.experiment_name,
                                'prw_%s_TSNE_id.pkl' % self.config.M)
        fTSNE_imgs = os.path.join(parpath, 'experiment_results',
                                  self.experiment_name,
                                  'prw_%s_TSNE_imgs.pkl' % self.config.M)
        if os.path.exists(fTSNE_features):
            f = open(fTSNE_features, 'rb')
            TSNE_features = pickle.load(f)
            f = open(fTSNE_id, 'rb')
            TSNE_id = pickle.load(f)
            f.close()
        else:
            if not hasattr(self, 'reid_model'):
                self.load_model()

            if self.config.M == 'mrcnn' and not hasattr(self, 'anchors'):
                self.anchors = ANCHORS(self.config)

            TSNE_features = []
            TSNE_id = []
            TSNE_imgs = []
            gallery = []

            for imname in self.gallery_dict.keys():
                _, boxes, ids = self.gallery_dict[imname]
                img = cv2.imread(os.path.join(self.dataset, 'frames', imname))

                input_img, input_box, _, meta = self.DA(img, boxes)
                if self.config.M == 'mrcnn':
                    feats, _, _, _, det_features, det, _ = self.reid_model.predict(
                        [
                            np.stack([input_img]),
                            np.stack([input_box]),
                            np.stack(meta),
                            np.stack(
                                [self.anchors.get_anchors(input_img.shape)])
                        ])
                else:
                    feats, _, _, _, det_features, det, _ = self.reid_model.predict(
                        [np.stack([input_img]),
                         np.stack([input_box])])
                for i, feat in enumerate(feats[0]):
                    TSNE_features.append(feat)
                    TSNE_id.append(ids[i])
                    x, y, w, h = boxes[i]
                    TSNE_imgs.append(img[int(y):int(y + h),
                                         int(x):int(x + w), :])
                gallery.append(imname)
                print("\r%d|%d" % (len(gallery), len(self.gallery_dict)),
                      end='')
            print('')
            TSNE_features = np.array(TSNE_features)
            TSNE_id = np.array(TSNE_id)

            f = open(fTSNE_features, 'wb')
            pickle.dump(TSNE_features, f)
            f.close()
            f = open(fTSNE_id, 'wb')
            pickle.dump(TSNE_id, f)
            f.close()
            f = open(fTSNE_imgs, 'wb')
            pickle.dump(TSNE_imgs, f)
            f.close()
コード例 #7
0
ファイル: testmodel.py プロジェクト: RuaHU/TLfPS
def main(argv):
    M = 'yolov3'
    gpu = '0'
    image_path = None
    try:
        opts, args = getopt.getopt(argv[1:], 'hm:g:p:',
                                   ['m=', 'gpu=', 'path='])
    except getopt.GetoptError:
        print(argv[0] + ' -m <M> -g <gpu>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print(argv[0] + ' -m <M> -g <gpu>')
        elif opt in ['-m', '--M']:
            M = arg
        elif opt in ['-p', '--path']:
            image_path = arg
        elif opt in ['-g', '--gpu']:
            gpu = arg

    print('model: [%s] gpu: [%s]' % (M, gpu))
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    set_session(sess)

    if not os.path.exists(image_path):
        raise ValueError(
            'you must specify a image for testing %s' %
            ('' if image_path is None else 'bad image path: [%s]' %
             image_path))

    config = Config(M)
    model_path = os.path.join(parpath, 'pretrained_weights/%s.h5' % config.M)
    model = MODELS(config, model_type='detection').load_model(model_name='')

    #test
    da = DA('validation', config)
    img = cv2.imread(image_path)
    input_img, input_box, input_ids, meta = da(img, [])
    cv2.imwrite('input_img.jpg', (input_img * 255).astype(np.uint8))
    if config.M == 'mrcnn':
        anchors = ANCHORS(config)
        detection, detection_score = model.predict([
            np.stack([input_img]),
            np.stack([input_box]),
            np.stack(meta),
            np.stack([anchors.get_anchors(input_img.shape)])
        ])
    else:
        detection, detection_score = model.predict(
            [np.stack([input_img]),
             np.stack([input_box])])
    detection = da.unmold(detection[0], meta)
    image = draw(img, detection, detection_score)
    print(detection_score)
    print(detection)
    cv2.imwrite('%s_test.jpg' % config.M, image)
    print('detection results saved as: %s_test.jpg' % config.M)
コード例 #8
0
ファイル: elapsedtime.py プロジェクト: RuaHU/TLfPS
 def load_model(self, ):
     if self.reid:
         self.model = MODELS(self.config).load_model()
     else:
         self.model = MODELS(
             self.config, model_type='detection').load_model(model_name='')
コード例 #9
0
ファイル: tsne_cuhk.py プロジェクト: RuaHU/TLfPS
class EVALUATION():
    def __init__(self,
                 dataset,
                 detector='yolov3',
                 experiment_name='default',
                 overwrite=False):
        if dataset is None: return
        self.detector = detector
        self.dataset = dataset
        self.overwrite = overwrite
        self.experiment_name = experiment_name
        self.checkdir()
        self.config = Config(detector)
        self.DA = DA('validation', self.config)
        self.load_gallery()

    def checkdir(self, ):
        dirpath = os.path.join(parpath, 'experiment_results',
                               self.experiment_name)
        if os.path.exists(dirpath):
            print('experiment [%s] existed' % self.experiment_name)
            if self.overwrite:
                print('cleaning experiment [%s] [overwrite == True]' %
                      self.experiment_name)
                shutil.rmtree(dirpath, ignore_errors=True)
                if os.path.exists(dirpath):
                    print(
                        'it seems the experiment directory can not be deleted. please check the status of the directory %s'
                        % dirpath)
                os.mkdir(dirpath)
                assert os.path.exists(dirpath)
            else:
                print(
                    'the results of experiment [%s] will be reused [overwrite == False]'
                    % self.experiment_name)
        else:
            os.mkdir(dirpath)
            assert os.path.exists(dirpath)

    def load_gallery(self, ):
        pool_path = os.path.join(self.dataset, 'dataset/annotation/pool.mat')
        Person_path = os.path.join(self.dataset,
                                   'dataset/annotation/Person.mat')
        if not os.path.exists(pool_path):
            raise ValueError('cannot found %s' % pool_path)
        pool = scipy.io.loadmat(pool_path)['pool'].squeeze()
        Person = scipy.io.loadmat(Person_path)['Person'].squeeze()
        imnames = [imname[0] for imname in pool]

        images = {}
        for person in Person:
            pid = int(person[0][0][1:])
            for item in person[2][0]:
                if item[0][0] in images:
                    images[item[0][0]][0].append(item[1][0].tolist())
                    images[item[0][0]][1].append(pid)
                else:
                    images[item[0][0]] = [[item[1][0].tolist()], [pid]]

        self.gallery_dict = {}
        for key in images.keys():
            if key in imnames:
                self.gallery_dict[key] = images[key]

    def load_model(self, ):
        self.reid_model = MODELS(self.config).load_model()

    def TSNE(self, ):
        fTSNE_features = os.path.join(
            parpath, 'experiment_results', self.experiment_name,
            'cuhk_%s_TSNE_features.pkl' % self.config.M)
        fTSNE_id = os.path.join(parpath, 'experiment_results',
                                self.experiment_name,
                                'cuhk_%s_TSNE_id.pkl' % self.config.M)
        fTSNE_imgs = os.path.join(parpath, 'experiment_results',
                                  self.experiment_name,
                                  'cuhk_%s_TSNE_imgs.pkl' % self.config.M)
        if os.path.exists(fTSNE_features):
            f = open(fTSNE_features, 'rb')
            TSNE_features = pickle.load(f)
            f = open(fTSNE_id, 'rb')
            TSNE_id = pickle.load(f)
            f.close()
        else:
            if not hasattr(self, 'reid_model'):
                self.load_model()

            if self.config.M == 'mrcnn' and not hasattr(self, 'anchors'):
                self.anchors = ANCHORS(self.config)

            TSNE_features = []
            TSNE_id = []
            TSNE_imgs = []
            gallery = []

            for imname in self.gallery_dict.keys():
                boxes, ids = self.gallery_dict[imname]
                img = cv2.imread(
                    os.path.join(self.dataset, 'dataset/Image/SSM', imname))
                input_img, input_box, _, meta = self.DA(img, boxes)
                if self.config.M == 'mrcnn':
                    feats, _, _, _, det_features, det, _ = self.reid_model.predict(
                        [
                            np.stack([input_img]),
                            np.stack([input_box]),
                            np.stack(meta),
                            np.stack(
                                [self.anchors.get_anchors(input_img.shape)])
                        ])
                else:
                    feats, _, _, _, det_features, det, _ = self.reid_model.predict(
                        [np.stack([input_img]),
                         np.stack([input_box])])
                for i, feat in enumerate(feats[0]):
                    TSNE_features.append(feat)
                    TSNE_id.append(ids[i])
                    x, y, w, h = boxes[i]
                    TSNE_imgs.append(img[int(y):int(y + h),
                                         int(x):int(x + w), :])
                gallery.append(imname)
                print("\r%d|%d" % (len(gallery), len(self.gallery_dict)),
                      end='')
            print('')
            TSNE_features = np.array(TSNE_features)
            TSNE_id = np.array(TSNE_id)

            f = open(fTSNE_features, 'wb')
            pickle.dump(TSNE_features, f)
            f.close()
            f = open(fTSNE_id, 'wb')
            pickle.dump(TSNE_id, f)
            f.close()
            f = open(fTSNE_imgs, 'wb')
            pickle.dump(TSNE_imgs, f)
            f.close()
コード例 #10
0
def main(argv):
    M = 'yolov3'
    gpu = '0'
    CUHK_SYSU = "/home/ronghua/Projects/data/dataset-v2/"
    PRW = '/home/ronghua/Projects/data/PRW-v16.04.20/'
    try:
        opts, args = getopt.getopt(argv[1:], 'hm:g:p:c:',
                                   ['m=', 'gpu=', 'prw=', 'cuhk='])
    except getopt.GetoptError:
        print(argv[0] + ' -m <M> -g <gpu>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print(argv[0] + ' -m <M> -g <gpu>')
        elif opt in ['-m', '--M']:
            M = arg
        elif opt in ['-p', '--path']:
            PRW = arg
        elif opt in ['-c', '--cuhk']:
            CUHK_SYSU = arg
        elif opt in ['-g', '--gpu']:
            gpu = arg

    print('model: [%s] gpu: [%s], CUHK_SYSU: [%s], PRW: [%s]' %
          (M, gpu, CUHK_SYSU, PRW))
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu

    dataset = False
    if os.path.exists(CUHK_SYSU):
        dataset = True
    if os.path.exists(PRW):
        dataset = True

    if not dataset:
        raise ValueError(
            'you should specify at least one dataset [CUHK_SYSU or PRW]')

    config = Config(M)
    model_path = os.path.join(parpath, 'pretrained_weights/%s.h5' % config.M)
    model = MODELS(config, model_type='detection').load_model(model_name='')
    if config.M == 'mrcnn':
        anchors = ANCHORS(config)

    load_weights_by_name(model, model_path)
    da = DA('validation', config)

    if os.path.exists(CUHK_SYSU):
        filepath = os.path.join(parpath, 'experiment_results',
                                'cuhk_%s_gallery.pkl' % config.M)
        if os.path.exists(filepath):
            print('cuhk_%s_gallery.pkl exists.' % config.M)
        else:
            print('creating cuhk-sysu gallery for %s' % config.M)
            gallery = []
            pool_path = os.path.join(CUHK_SYSU, 'dataset/annotation/pool.mat')
            if not os.path.exists(pool_path):
                raise ValueError('cannot found %s' % pool_path)
            pool = loadmat(pool_path)['pool'].squeeze()
            imnames = [imname[0] for imname in pool]
            for imname in imnames:
                img = cv2.imread(
                    os.path.join(CUHK_SYSU, 'dataset/Image/SSM/', imname))
                input_img, input_box, input_ids, meta = da(img, [])
                if config.M == 'mrcnn':
                    detection, scores = model.predict([
                        np.stack([input_img]),
                        np.stack([input_box]),
                        np.stack(meta),
                        np.stack([anchors.get_anchors(input_img.shape)])
                    ])
                else:
                    detection, scores = model.predict(
                        [np.stack([input_img]),
                         np.stack([input_box])])
                detection = da.unmold(detection[0], meta)
                detection[:, 2:] += detection[:, :2]
                features = np.zeros([len(detection), 0])
                gallery.append([imname, features, detection])
                print("\r%d|%d" % (len(gallery), len(imnames)), end='')
            print('')
            f = open(filepath, 'wb')
            pickle.dump(gallery, f)
            f.close()

    if os.path.exists(PRW):
        filepath = os.path.join(parpath, 'experiment_results',
                                'prw_%s_gallery.pkl' % config.M)
        if os.path.exists(filepath):
            print('prw_%s_gallery.pkl exists.' % config.M)
        else:
            print('creating prw gallery for %s' % config.M)
            gallery = []
            frame_test_path = os.path.join(PRW, 'frame_test.mat')
            if not os.path.exists(frame_test_path):
                raise ValueError('cannot found %s' % frame_test_path)
            frame_indices = loadmat(
                frame_test_path)['img_index_test'].squeeze()
            imnames = [imname[0] + '.jpg' for imname in frame_indices]
            for imname in imnames:
                img = cv2.imread(os.path.join(PRW, 'frames', imname))
                input_img, input_box, input_ids, meta = da(img, [])
                if config.M == 'mrcnn':
                    detection, scores = model.predict([
                        np.stack([input_img]),
                        np.stack([input_box]),
                        np.stack(meta),
                        np.stack([anchors.get_anchors(input_img.shape)])
                    ])
                else:
                    detection, scores = model.predict(
                        [np.stack([input_img]),
                         np.stack([input_box])])
                detection = da.unmold(detection[0], meta)
                detection[:, 2:] += detection[:, :2]
                features = np.zeros([len(detection), 0])
                gallery.append([imname, features, detection])
                print("\r%d|%d" % (len(gallery), len(imnames)), end='')
            print('')
            f = open(filepath, 'wb')
            pickle.dump(gallery, f)
            f.close()
コード例 #11
0
ファイル: prw_evaluation.py プロジェクト: RuaHU/TLfPS
class EVALUATION():
    def __init__(self,
                 dataset,
                 detector='yolov3',
                 experiment_name='default',
                 overwrite=False):
        if dataset is None: return
        self.detector = detector
        self.dataset = dataset
        self.overwrite = overwrite
        self.experiment_name = experiment_name
        self.checkdir()
        self.config = Config(detector)
        self.DA = DA('validation', self.config)
        self.load_gallery()
        self.load_query()

    def get_sims(self, gfeats, qfeat, _eval=True):
        '''
        gfeats: gallery features
        qfeat: query feature
        '''
        if _eval: return gfeats.dot(qfeat.ravel()).ravel()
        gfeats_norm = np.linalg.norm(gfeats, keepdims=True, axis=-1)
        qfeat_norm = np.linalg.norm(qfeat, keepdims=True)
        gfeats_nl = gfeats / gfeats_norm
        qfeat_nl = qfeat / qfeat_norm
        sim = gfeats_nl.dot(qfeat_nl.ravel()).ravel()
        return sim

    def checkdir(self, ):
        dirpath = os.path.join(parpath, 'experiment_results',
                               self.experiment_name)
        if os.path.exists(dirpath):
            print('experiment [%s] existed' % self.experiment_name)
            if self.overwrite:
                print('cleaning experiment [%s] [overwrite == True]' %
                      self.experiment_name)
                shutil.rmtree(dirpath, ignore_errors=True)
                if os.path.exists(dirpath):
                    print(
                        'it seems the experiment directory can not be deleted. please check the status of the directory %s'
                        % dirpath)
                os.mkdir(dirpath)
                assert os.path.exists(dirpath)
            else:
                print(
                    'the results of experiment [%s] will be reused [overwrite == False]'
                    % self.experiment_name)
        else:
            os.mkdir(dirpath)
            assert os.path.exists(dirpath)

    def load_model(self, ):
        self.reid_model = MODELS(config=self.config).load_model()

    def load_gallery(self):
        self.gallery_dict = {}
        frame_test = scipy.io.loadmat(
            os.path.join(self.dataset, 'frame_test.mat'))
        frame_indices = frame_test['img_index_test']
        for index, im_name in enumerate(frame_indices[:, 0]):
            mat = scipy.io.loadmat(
                os.path.join(self.dataset, 'annotations',
                             im_name[0] + '.jpg.mat'))
            boxes = mat[list(mat.keys())[-1]]
            ids = boxes[:, 0]
            boxes = boxes[:, 1:5]
            self.gallery_dict[im_name[0] +
                              '.jpg'] = [im_name[0] + '.jpg', boxes, ids]

    def load_query(self):
        file = open(os.path.join(self.dataset, 'query_info.txt'))
        self.query_list = []
        for line in file:
            items = line.split()
            ids, roi = int(items[0]), [
                float(items[1]),
                float(items[2]),
                float(items[3]),
                float(items[4])
            ]
            self.query_list.append([items[-1] + '.jpg', roi, ids])

    def query_feature_extractor(self, ):
        filepath = os.path.join(parpath, 'experiment_results',
                                self.experiment_name,
                                'prw_%s_query_features.pkl' % self.config.M)
        if os.path.exists(filepath):
            return

        if not hasattr(self, 'reid_model'):
            self.load_model()

        if self.config.M == 'mrcnn' and not hasattr(self, 'anchors'):
            self.anchors = ANCHORS(self.config)

        query_features = []
        for item in self.query_list:
            img_name, roi, _ = item
            img = cv2.imread(os.path.join(self.dataset, 'frames', img_name))
            input_img, input_box, _, meta = self.DA(img, [roi])
            if self.config.M == 'mrcnn':
                feature = self.reid_model.predict([
                    np.stack([input_img]),
                    np.stack([input_box]),
                    np.stack(meta),
                    np.stack([self.anchors.get_anchors(input_img.shape)])
                ])[0]
            else:
                feature = self.reid_model.predict(
                    [np.stack([input_img]),
                     np.stack([input_box])])[0]
            query_features.append([img_name, feature[0], np.array([roi])])
            print("\r%d|%d" % (len(query_features), len(self.query_list)),
                  end='')
        print('')
        self.query_features = query_features
        f = open(filepath, 'wb')
        pickle.dump(query_features, f)
        f.close()
        return

    def gallery_feature_extractor(self, ):
        filepath = os.path.join(parpath, 'experiment_results',
                                self.experiment_name,
                                'prw_%s_gallery_features.pkl' % self.config.M)
        if os.path.exists(filepath):
            return

        if not hasattr(self, 'reid_model'):
            self.load_model()

        if self.config.M == 'mrcnn' and not hasattr(self, 'anchors'):
            self.anchors = ANCHORS(self.config)

        gallery = []
        for imname in self.gallery_dict.keys():
            img = cv2.imread(os.path.join(self.dataset, 'frames', imname))
            input_img, input_box, _, meta = self.DA(img, [])
            if self.config.M == 'mrcnn':
                feats, _, _, _, det_features, det, _ = self.reid_model.predict(
                    [
                        np.stack([input_img]),
                        np.stack([input_box]),
                        np.stack(meta),
                        np.stack([self.anchors.get_anchors(input_img.shape)])
                    ])
            elif self.config.M == 'dla_34':
                feats, det_features, det, _ = self.reid_model.predict(
                    [np.stack([input_img]),
                     np.stack([input_box])])
            else:
                feats, _, _, _, det_features, det, _ = self.reid_model.predict(
                    [np.stack([input_img]),
                     np.stack([input_box])])
            det = self.DA.unmold(det[0], meta)
            det[:, 2:] += det[:, :2]
            gallery.append([imname, det_features[0], det])
            print("\r%d|%d" % (len(gallery), len(self.gallery_dict)), end='')
        print('')
        f = open(filepath, 'wb')
        pickle.dump(gallery, f)
        f.close()
        return

    def simple_evaluation(self, model, gallery_size=50):
        #extract query feature vectors
        qfeatures = []
        for item in self.query_list:
            img_name, roi, _ = item
            img = cv2.imread(os.path.join(self.dataset, 'frames', img_name))
            input_img, input_box, input_ids, meta = self.DA(img, [roi])
            feature = model.predict([
                np.stack([input_img]),
                np.stack([input_box]),
                np.stack([input_ids])
            ])
            qfeatures.append([img_name, feature[0], np.array([roi])])
            print("\r%d|%d" % (len(qfeatures), len(self.query_list)), end='')
        print('')
        #extract gallery feature vectors
        filepath = os.path.join(
            parpath, 'experiment_results/prw_%s_gallery.pkl' % self.config.M)
        assert os.path.exists(filepath)
        f = open(filepath, 'rb')
        oim_gallery = pickle.load(f, encoding='latin1')
        gallery = []
        for item in oim_gallery:
            imname, oim_features, oim_boxes = item
            if oim_features is None:
                oim_features = np.zeros([0, 256], dtype=np.float32)
                oim_boxes = np.zeros([0, 5], dtype=np.float32)
            img = cv2.imread(os.path.join(self.dataset, 'frames', imname))
            #xyxy 2 xywh
            toim_boxes = oim_boxes.copy()
            toim_boxes[:, 2:4] -= toim_boxes[:, :2]
            input_img, input_box, input_ids, meta = self.DA(
                img, toim_boxes[:, :4])
            feats = model.predict([
                np.stack([input_img]),
                np.stack([input_box]),
                np.stack([input_ids])
            ])
            gallery.append([imname, feats[0, :, 0, 0, :], oim_boxes])
            print("\r%d|%d" % (len(gallery), len(oim_gallery)), end='')
        print('')

        name_to_det_feat = {}
        for img_name, features, boxes in gallery:
            name_to_det_feat[img_name] = (boxes, features)
        return self.evaluation(qfeatures, name_to_det_feat, _eval=True)

    def evaluation(self, qfeatures, name_to_det_feat, _eval):
        aps, accs, topk = [], [], [1, 5, 10]
        log = open('log.txt', 'w')
        sysout = sys.stdout
        all_recall_rate = []
        #tape = {}
        for i, query in enumerate(self.query_list):
            sys.stdout = log
            qimg_name, qroi, qid = query
            y_true, y_score = [], []
            count_gt, count_tp = 0, 0
            qfeat = qfeatures[i][1].ravel()
            gallery_items = [
                self.gallery_dict[key] for key in self.gallery_dict
                if qid in self.gallery_dict[key][-1] and key != qimg_name
            ]
            gallery_gts = {}
            for item in gallery_items:
                gallery_gts[item[0]] = item[1][item[2] == qid]
            gallery_imgs = [
                key for key in self.gallery_dict if key != qimg_name
            ]
            imgs, y_boxes, y_gname = [], [], []
            for gallery_imname in gallery_imgs:
                count_gt += (gallery_imname in gallery_gts)
                if gallery_imname not in name_to_det_feat: continue
                gboxes, gfeatures = name_to_det_feat[gallery_imname]
                sim = self.get_sims(gfeatures, qfeat, _eval)
                label = np.zeros(len(sim), dtype=np.int32)
                if gallery_imname in gallery_gts:

                    gt = gallery_gts[gallery_imname].ravel()
                    w, h = gt[2], gt[3]
                    gt[2], gt[3] = gt[0] + gt[2], gt[1] + gt[3]
                    iou_thresh = min(0.5,
                                     (w * h * 1.0) / ((w + 10) * (h + 10)))
                    inds = np.argsort(sim)[::-1]
                    sim = sim[inds]
                    gboxes = gboxes[inds]
                    for j, roi in enumerate(gboxes[:, :4]):
                        if self._compute_iou(roi, gt) >= iou_thresh:
                            label[j] = 1
                            count_tp += 1
                            break

                y_true.extend(list(label))
                y_score.extend(list(sim))
                y_boxes.extend(list(gboxes))
                y_gname.extend([gallery_imname for _ in gboxes])

            y_score = np.array(y_score)
            y_true = np.array(y_true)
            y_boxes = np.array(y_boxes)
            y_gname = np.array(y_gname)
            assert count_tp <= count_gt
            recall_rate = count_tp * 1.0 / count_gt
            all_recall_rate.append(recall_rate)
            ap = 0 if count_tp == 0 else \
                average_precision_score(y_true, y_score) * recall_rate
            aps.append(ap)

            inds = np.argsort(y_score)[::-1]
            y_score = y_score[inds]
            y_true = y_true[inds]
            y_boxes = y_boxes[inds]
            y_gname = y_gname[inds]
            acc = [min(1, sum(y_true[:k])) for k in topk]
            accs.append(acc)
            #tape[qimg_name] = [qid, qroi, ap, acc, recall_rate, y_score, y_true, y_boxes, y_gname]
            sys.stdout = sysout
            print("\r%d:\t%d|%d|%.2f|%.2f" %
                  (-1, len(aps), len(qfeatures), np.mean(aps),
                   np.mean(accs, axis=0)[0]),
                  end='')
        print('')
        print('search ranking:')
        print('aRR:%.4f' % np.mean(all_recall_rate))
        print('  mAP = {:.2%}'.format(np.mean(aps)))
        accs = np.mean(accs, axis=0)
        for i, k in enumerate(topk):
            print('  top-{:2d} = {:.2%}'.format(k, accs[i]))

        #record_aps = []
        #new_tape = {}
        #for key in tape.keys():
        #    record_aps.append(tape[key][2])
        #record_aps.sort()
        #th = record_aps[50]
        #for key in tape.keys():
        #    if tape[key][2] > th:continue
        #    new_tape[key] = tape[key]

        #filepath = os.path.join(parpath, 'experiment_results', self.experiment_name, 'prw_%s_tape.pkl'%self.config.M)
        #pickle.dump(new_tape, filepath)

        return aps, accs

    def private_detector_evaluation(self):
        print(
            'the results of this experiment using end-to-end detector [%s] + feature extractor [%s%s]'
            %
            (self.config.M, self.config.M, '_mgn' if self.config.mgn else ''))
        topk = [1, 5, 10]
        respath = os.path.join(parpath, 'experiment_results',
                               self.experiment_name,
                               'prw_%s_res.pkl' % self.config.M)
        if os.path.exists(respath):
            f = open(respath, 'rb')
            res = pickle.load(f)
            f.close()
            aps, accs = res
            print('  mAP = {:.2%}'.format(np.mean(aps)))
            for i, k in enumerate(topk):
                print('  top-{:2d} = {:.2%}'.format(k, accs[i]))
            return

        qfilepath = os.path.join(parpath, 'experiment_results',
                                 self.experiment_name,
                                 'prw_%s_query_features.pkl' % self.config.M)
        assert os.path.exists(qfilepath)
        f = open(qfilepath, 'rb')
        qfeatures = pickle.load(f)
        f.close()
        gfilepath = os.path.join(parpath, 'experiment_results',
                                 self.experiment_name,
                                 'prw_%s_gallery_features.pkl' % self.config.M)
        assert os.path.exists(gfilepath)
        f = open(gfilepath, 'rb')
        gfeatures = pickle.load(f, encoding='latin1')
        f.close()
        name_to_det_feat = {}
        for img_name, features, boxes in gfeatures:
            name_to_det_feat[img_name] = (boxes, features)

        res = self.evaluation(qfeatures, name_to_det_feat, _eval=True)

        f = open(respath, 'wb')
        pickle.dump(res, f)
        f.close()

    def _compute_iou(self, box1, box2):
        a, b = box1.copy(), box2.copy()
        x1 = max(a[0], b[0])
        y1 = max(a[1], b[1])
        x2 = min(a[2], b[2])
        y2 = min(a[3], b[3])
        inter = max(0, x2 - x1) * max(0, y2 - y1)
        union = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] -
                                                                 b[1]) - inter
        return inter * 1.0 / union