Пример #1
0
        ])

        sp_score = is_dst_pt_inside_dt.mean()
        assert (0. <= sp_score <= 1.)
        return sp_score

    def get_scores(self):
        # return self._sift_matches(), self._sift_inliers()
        return self._sm(), self._sp()


if __name__ == '__main__':
    import pickle
    from baselines.sift.test import test
    from flowmatch.networks.flownet import FlowNet
    from flowmatch.utils import load_config

    # Loading an (un-preprocessed) example that was saved as a pickle file.
    with open('/home/sancha/repos/osid/sandbox/example.pkl', 'rb') as f:
        example = pickle.load(f)

    cfg = load_config('../../pipeline/configs/tdid_avd2_manual_easy.yaml')
    net = FlowNet(cfg.flownet)

    sift_matches = test(net, example)
    example['pred_sift'] = sift_matches

    # Reclassification.
    reclass_scorer = SiftMatchScorer(example)
    scores = reclass_scorer.get_scores()
Пример #2
0
    def __len__(self):
        return len(self.ids)

    def id_to_str(self, id):
        return id_to_str(id, block=8)

    def str_to_id(self, string):
        return str_to_id(string, block=8)

    def _init_transform(self):
        # Initialize torch-defined transforms that will be used by self.transform().
        self.transform_ops = {'ToTensor': transforms.ToTensor()}

    def add_gt_flow(self, example):
        return example


if __name__ == '__main__':
    """For creating cache"""
    parser = argparse.ArgumentParser(
        description='Creating an object dataset cache.')
    parser.add_argument('--config', default='config', help='config file path')
    args = parser.parse_args()

    cfg = load_config(args.config)

    tg_store = TargetStore(obj_json=cfg.obj_det.obj_json,
                           obj_root=cfg.obj_det.obj_root,
                           cfg=cfg)
    tg_store.create_cache()
Пример #3
0
from tqdm import tqdm

from flowmatch.datasets.coco  import CocoDataset
from flowmatch.networks.flownet import FlowNet
from flowmatch.flowutils.utils import get_identity_flow
from flowmatch.utils import load_config
from flowmatch.exps.coco.main import coco_filter

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Computing baseline performance on COCO')
    parser.add_argument('--config', default='config', help='name of config')
    parser.add_argument('--num_examples', '-r', type=int, default=None, help='number of examples to test on')
    args = parser.parse_args()

    cfg_path = '../../exps/coco/{}.py'.format(args.config)
    cfg = load_config(cfg_path)

    coco_valid = CocoDataset(root=cfg.coco.valid.image_dir, annFile=cfg.coco.valid.ann_file, cfg=cfg)
    print('Filtering training set ... ', end='', flush=True)
    filtered_ids = coco_filter(coco_valid)
    coco_valid.item_ids = filtered_ids
    print('done.')

    net = FlowNet(cfg)  # just for pre-processing data

    # Variables for identity flow
    sum_identity_epe = 0.0
    identity_flow = get_identity_flow(cfg.tg_size, cfg.tg_size)

    # Variables for best constant flow
    sum_gt_flow, sum_sq_gt_flow, sum_tg_mask = 0., 0., 0.
Пример #4
0
                res[detections[i]['id']] = detections[i]['score']
            else:
                next_highest_score = other_scores[over_inds[0]]
                res[detections[i]['id']] = max(
                    0, detections[i]['score'] - next_highest_score)

        res = [{'id': x, 'scores': [res[x]] * nvp} for x in res]
        return res

    def _others(self, arrs, i):
        return [np.concatenate([arr[:i], arr[i + 1:]]) for arr in arrs]


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--config',
                        default='config.yaml',
                        help='config file path')
    parser.add_argument('--eval_ds',
                        default='gmu_test',
                        help='dataset to generate SimObj scores')
    parser.set_defaults()

    args = parser.parse_args()

    eval_ds = args.eval_ds
    cfg = load_config(args.config).combine

    scorer = SimObjScorer(os.path.join(cfg.root, cfg[eval_ds].dt_json))
    scorer.create_score_files(os.path.join(cfg.root, cfg[eval_ds].run_dir))