Пример #1
0
def eval(dataset, tracker_name):
    # root = os.path.realpath(os.path.join(os.path.dirname(__file__),
    #                                      '../testing_dataset'))
    # root = os.path.join(root, dataset)
    tracker_dir = "./"
    trackers = [tracker_name]
    if 'OTB' in args.dataset:
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        eval_auc = benchmark.eval_success(tracker_name)
        auc = np.mean(list(eval_auc[tracker_name].values()))
        return auc
    elif 'LaSOT' == args.dataset:
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        eval_auc = benchmark.eval_success(tracker_name)
        auc = np.mean(list(eval_auc[tracker_name].values()))
        return auc
    elif 'UAV' in args.dataset:
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        eval_auc = benchmark.eval_success(tracker_name)
        auc = np.mean(list(eval_auc[tracker_name].values()))
        return auc
    elif 'NFS' in args.dataset:
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        eval_auc = benchmark.eval_success(tracker_name)
        auc = np.mean(list(eval_auc[tracker_name].values()))
        return auc
    if args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = EAOBenchmark(dataset)
        eval_eao = benchmark.eval(tracker_name)
        eao = eval_eao[tracker_name]['all']
        return eao
    elif 'VOT2018-LT' == args.dataset or 'VOT2019-LT' == args.dataset:
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        num_process = 4
        with Pool(processes=num_process) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result, show_video_level=False)

    return 0
Пример #2
0
def main():
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(
        os.path.join(args.tracker_path, args.dataset,
                     args.tracker_prefix + '*'))
    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = os.path.realpath(
        os.path.join(os.path.dirname(__file__), '../../pysot/testing_dataset'))
    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
    elif 'UAV' in args.dataset:
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)
    elif 'GOT-10k' == args.dataset:
        dataset = GOT10kDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
Пример #3
0
                    config['lr'], config['search_region'], eao))
        reporter(EAO=eao)
    else:
        raise NotImplementedError


if __name__ == '__main__':
    torch.backends.cudnn.benchmark = True
    cfg.merge_from_file(args.config)

    cur_dir = os.path.dirname(os.path.realpath(__file__))
    dataset_root = os.path.join(cur_dir, '../testing_dataset', args.dataset)
    dataset = DatasetFactory.create_dataset(name=args.dataset,
                                            dataset_root=dataset_root,
                                            load_img=False)
    benchmark = EAOBenchmark(dataset)

    model = ModelBuilder()
    model = load_pretrain(model, args.snapshot).cuda().eval()

    # the resources you computer have, object_store_memory is shm
    ray.init(num_gpus=1, num_cpus=8, object_store_memory=30000000000)
    tune.register_trainable('fitness', fitness)

    # define search space
    params = {
        'penalty_k': hp.quniform('penalty_k', 0.001, 0.6, 0.001),
        'lr': hp.quniform('scale_lr', 0.1, 0.8, 0.001),
        'window_influence': hp.quniform('window_influence', 0.05, 0.65, 0.001),
        'search_region': hp.choice('search_region', [255]),
    }
Пример #4
0
def main():
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(
        os.path.join(args.tracker_path, args.dataset,
                     args.tracker_prefix + '*'))
    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = os.path.join(dataset_root_, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            for attr, videos in dataset.attr.items():
                if attr == 'ALL':
                    draw_success_precision(success_ret,
                                           name=dataset.name,
                                           videos=videos,
                                           attr=attr,
                                           precision_ret=precision_ret)
    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            draw_success_precision(success_ret,
                                   name=dataset.name,
                                   videos=dataset.attr['ALL'],
                                   attr='ALL',
                                   precision_ret=precision_ret,
                                   norm_precision_ret=norm_precision_ret)
    elif 'UAV' in args.dataset:
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)

    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)
Пример #5
0
def main():
    tracker_dir = Path(args.tracker_path)
    tracker_path = tracker_dir / args.dataset
    trackers = tracker_path.glob("*")
    trackers = [Path(x).stem for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = str(Path(args.dataset_root) / args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019', 'debug']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_path, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
Пример #6
0
def evaluation(dataset='VOT2018',
               tracker_prefix='DaSiamRPN',
               tracker_path='./results',
               num=4,
               show_video_level=True):

    tracker_dir = os.path.join(tracker_path)
    trackers = glob(os.path.join(tracker_path, tracker_prefix))
    #  tracker_prefix+'*'))
    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    num = min(num, len(trackers))

    # root = os.path.realpath(os.path.join(os.path.dirname(__file__),
    #                         '../datasets'))
    root = '/home/lyuyu/dataset/'
    root = os.path.join(root, dataset)
    if 'OTB' in dataset:
        dataset = OTBDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=show_video_level)
    elif 'LaSOT' == dataset:
        dataset = LaSOTDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=show_video_level)
    elif 'UAV' in dataset:
        dataset = UAVDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=show_video_level)
    elif 'NFS' in dataset:
        dataset = NFSDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=show_video_level)
    elif dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=show_video_level)
    elif 'VOT2018-LT' == dataset:
        dataset = VOTLTDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result, show_video_level=show_video_level)
Пример #7
0
def evaluate(args):
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(
        os.path.join(args.tracker_path, args.dataset, args.tracker_name + '*'))

    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    #root = os.path.realpath(os.path.join(os.path.dirname(__file__),
    #                         'testing_dataset'))
    root = './datasets'

    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)

    elif 'DTB70' in args.dataset:
        dataset = DTB70Dataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)

    elif 'UAVDT' in args.dataset:
        dataset = UAVDTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)

    elif 'VisDrone' in args.dataset:
        dataset = VisDroneDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)

    elif 'GOT-10k' in args.dataset:
        root_dir = os.path.abspath('datasets/GOT-10k')
        e = ExperimentGOT10k(root_dir)
        ao, sr, speed = e.report(['siamcar'])
        ss = 'ao:%.3f --sr:%.3f -speed:%.3f' % (float(ao), float(sr),
                                                float(speed))
        print(ss)

    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
    elif 'UAV' in args.dataset:  #注意UAVDT和 UAV123 以及 UAV20L的区别
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            # for ret in tqdm(pool.imap_unordered(ar_benchmark.eval,
            #     trackers), desc='eval ar', total=len(trackers), ncols=100):
            #     ar_result.update(ret)

            for ret in pool.imap_unordered(ar_benchmark.eval, trackers):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:

            for ret in pool.imap_unordered(benchmark.eval, trackers):
                eao_result.update(ret)

        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)
Пример #8
0
def main():
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(
        os.path.join(args.tracker_path, args.dataset,
                     args.tracker_prefix + '*'))
    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = os.path.realpath(
        os.path.join(os.path.dirname(__file__), '../testing_dataset'))
    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            for attr, videos in dataset.attr.items():
                draw_success_precision(success_ret,
                                       name=dataset.name,
                                       videos=videos,
                                       attr=attr,
                                       precision_ret=precision_ret,
                                       bold_name='Ours')
    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            draw_success_precision(success_ret,
                                   name=dataset.name,
                                   videos=dataset.attr['ALL'],
                                   attr='ALL',
                                   precision_ret=precision_ret,
                                   norm_precision_ret=norm_precision_ret,
                                   bold_name='Ours')
    elif 'UAV' in args.dataset:
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            for attr, videos in dataset.attr.items():
                draw_success_precision(success_ret,
                                       name=dataset.name,
                                       videos=videos,
                                       attr=attr,
                                       precision_ret=precision_ret,
                                       bold_name='Ours')
    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            for attr, videos in dataset.attr.items():
                draw_success_precision(success_ret,
                                       name=dataset.name,
                                       video=videos,
                                       attr=attr,
                                       precision_ret=precision_ret,
                                       bold_name='Ours')
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        if args.vis:
            benchmark = EAOBenchmark(dataset,
                                     tags=[
                                         "all", "camera_motion",
                                         "illum_change", "motion_change",
                                         "size_change", "occlusion", "empty"
                                     ])
        else:
            benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
        if args.vis:
            draw_eao(eao_result)
    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)
        if args.vis:
            draw_f1(f1_result, bold_name='Ours')
    elif 'TrackingNet' in args.dataset:
        print('Please evaluate on the server!')
    elif 'VisDrone' in args.dataset:
        dataset = VisDroneDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
Пример #9
0
def main():
    trackers = []
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    for prfx in args.tracker_prefix:
        trackers1 = glob(
            os.path.join(args.tracker_path, args.dataset, prfx + '*'))
        trackers.extend([x.split('/')[-1] for x in trackers1])

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = os.path.realpath(
        os.path.join(os.path.dirname(__file__), '../testing_dataset'))
    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        # videos=[]
        # attr = 'occlusion'
        # for v_idx, video in enumerate(dataset):
        #     if hasattr(video.attr, attr):
        #         videos.append(video.name)
        ####################################################################
        for k, v in dataset.attr.items():
            #            if k=='Occlusion':
            draw_success_precision(success_ret, 'OTB100', v, str(k),
                                   precision_ret)
#####################################################################

    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
    elif 'UAV' in args.dataset:
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        ####################################################################
        for k, v in dataset.attr.items():
            if k == 'Full Occlusion':
                draw_success_precision(success_ret, 'UAV123', v, str(k),
                                       precision_ret)


#####################################################################

    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)