コード例 #1
0
def main(args):
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(
        os.path.join(args.tracker_path, args.dataset,
                     args.tracker_prefix + '*'))
    trackers = [x.split('/')[-1] for x in trackers]
    # trackers = os.listdir(os.path.join(args.tracker_path, args.dataset, args.tracker_prefix))

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = os.path.realpath(
        os.path.join(os.path.dirname(__file__), '../testing_dataset'))
    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
コード例 #2
0
ファイル: eval.py プロジェクト: tankebuaa/pyvis
def main():
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(
        os.path.join(args.tracker_path, args.dataset,
                     args.tracker_prefix + '*'))
    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = os.path.realpath(
        os.path.join(os.path.dirname(__file__), '../../pysot/testing_dataset'))
    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
    elif 'UAV' in args.dataset:
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)
    elif 'GOT-10k' == args.dataset:
        dataset = GOT10kDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
コード例 #3
0
ファイル: eval_new.py プロジェクト: yulingfeng120/CSA
def main():
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(
        os.path.join(args.tracker_path, args.dataset,
                     args.tracker_prefix + '*'))
    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = os.path.join(dataset_root_, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            for attr, videos in dataset.attr.items():
                if attr == 'ALL':
                    draw_success_precision(success_ret,
                                           name=dataset.name,
                                           videos=videos,
                                           attr=attr,
                                           precision_ret=precision_ret)
    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            draw_success_precision(success_ret,
                                   name=dataset.name,
                                   videos=dataset.attr['ALL'],
                                   attr='ALL',
                                   precision_ret=precision_ret,
                                   norm_precision_ret=norm_precision_ret)
    elif 'UAV' in args.dataset:
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)

    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)
コード例 #4
0
def main():
    tracker_dir = Path(args.tracker_path)
    tracker_path = tracker_dir / args.dataset
    trackers = tracker_path.glob("*")
    trackers = [Path(x).stem for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = str(Path(args.dataset_root) / args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019', 'debug']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_path, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
コード例 #5
0
def evaluation(dataset='VOT2018',
               tracker_prefix='DaSiamRPN',
               tracker_path='./results',
               num=4,
               show_video_level=True):

    tracker_dir = os.path.join(tracker_path)
    trackers = glob(os.path.join(tracker_path, tracker_prefix))
    #  tracker_prefix+'*'))
    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    num = min(num, len(trackers))

    # root = os.path.realpath(os.path.join(os.path.dirname(__file__),
    #                         '../datasets'))
    root = '/home/lyuyu/dataset/'
    root = os.path.join(root, dataset)
    if 'OTB' in dataset:
        dataset = OTBDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=show_video_level)
    elif 'LaSOT' == dataset:
        dataset = LaSOTDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=show_video_level)
    elif 'UAV' in dataset:
        dataset = UAVDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=show_video_level)
    elif 'NFS' in dataset:
        dataset = NFSDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=show_video_level)
    elif dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=show_video_level)
    elif 'VOT2018-LT' == dataset:
        dataset = VOTLTDataset(dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result, show_video_level=show_video_level)
コード例 #6
0
ファイル: my_eval.py プロジェクト: HonglinChu/SiamTrackers
def evaluate(args):
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(
        os.path.join(args.tracker_path, args.dataset, args.tracker_name + '*'))

    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    #root = os.path.realpath(os.path.join(os.path.dirname(__file__),
    #                         'testing_dataset'))
    root = './datasets'

    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)

    elif 'DTB70' in args.dataset:
        dataset = DTB70Dataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)

    elif 'UAVDT' in args.dataset:
        dataset = UAVDTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)

    elif 'VisDrone' in args.dataset:
        dataset = VisDroneDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)

    elif 'GOT-10k' in args.dataset:
        root_dir = os.path.abspath('datasets/GOT-10k')
        e = ExperimentGOT10k(root_dir)
        ao, sr, speed = e.report(['siamcar'])
        ss = 'ao:%.3f --sr:%.3f -speed:%.3f' % (float(ao), float(sr),
                                                float(speed))
        print(ss)

    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
    elif 'UAV' in args.dataset:  #注意UAVDT和 UAV123 以及 UAV20L的区别
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            # for ret in tqdm(pool.imap_unordered(ar_benchmark.eval,
            #     trackers), desc='eval ar', total=len(trackers), ncols=100):
            #     ar_result.update(ret)

            for ret in pool.imap_unordered(ar_benchmark.eval, trackers):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:

            for ret in pool.imap_unordered(benchmark.eval, trackers):
                eao_result.update(ret)

        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)
コード例 #7
0
def main():
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(
        os.path.join(args.tracker_path, args.dataset,
                     args.tracker_prefix + '*'))
    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = os.path.realpath(
        os.path.join(os.path.dirname(__file__), '../testing_dataset'))
    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            for attr, videos in dataset.attr.items():
                draw_success_precision(success_ret,
                                       name=dataset.name,
                                       videos=videos,
                                       attr=attr,
                                       precision_ret=precision_ret,
                                       bold_name='Ours')
    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            draw_success_precision(success_ret,
                                   name=dataset.name,
                                   videos=dataset.attr['ALL'],
                                   attr='ALL',
                                   precision_ret=precision_ret,
                                   norm_precision_ret=norm_precision_ret,
                                   bold_name='Ours')
    elif 'UAV' in args.dataset:
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            for attr, videos in dataset.attr.items():
                draw_success_precision(success_ret,
                                       name=dataset.name,
                                       videos=videos,
                                       attr=attr,
                                       precision_ret=precision_ret,
                                       bold_name='Ours')
    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        if args.vis:
            for attr, videos in dataset.attr.items():
                draw_success_precision(success_ret,
                                       name=dataset.name,
                                       video=videos,
                                       attr=attr,
                                       precision_ret=precision_ret,
                                       bold_name='Ours')
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        if args.vis:
            benchmark = EAOBenchmark(dataset,
                                     tags=[
                                         "all", "camera_motion",
                                         "illum_change", "motion_change",
                                         "size_change", "occlusion", "empty"
                                     ])
        else:
            benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
        if args.vis:
            draw_eao(eao_result)
    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)
        if args.vis:
            draw_f1(f1_result, bold_name='Ours')
    elif 'TrackingNet' in args.dataset:
        print('Please evaluate on the server!')
    elif 'VisDrone' in args.dataset:
        dataset = VisDroneDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
コード例 #8
0
def main():
    trackers = []
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    for prfx in args.tracker_prefix:
        trackers1 = glob(
            os.path.join(args.tracker_path, args.dataset, prfx + '*'))
        trackers.extend([x.split('/')[-1] for x in trackers1])

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))

    root = os.path.realpath(
        os.path.join(os.path.dirname(__file__), '../testing_dataset'))
    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        # videos=[]
        # attr = 'occlusion'
        # for v_idx, video in enumerate(dataset):
        #     if hasattr(video.attr, attr):
        #         videos.append(video.name)
        ####################################################################
        for k, v in dataset.attr.items():
            #            if k=='Occlusion':
            draw_success_precision(success_ret, 'OTB100', v, str(k),
                                   precision_ret)
#####################################################################

    elif 'LaSOT' == args.dataset:
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                                                trackers),
                            desc='eval norm precision',
                            total=len(trackers),
                            ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              norm_precision_ret,
                              show_video_level=args.show_video_level)
    elif 'UAV' in args.dataset:
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
        ####################################################################
        for k, v in dataset.attr.items():
            if k == 'Full Occlusion':
                draw_success_precision(success_ret, 'UAV123', v, str(k),
                                       precision_ret)


#####################################################################

    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                                                trackers),
                            desc='eval success',
                            total=len(trackers),
                            ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                                                trackers),
                            desc='eval precision',
                            total=len(trackers),
                            ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret,
                              precision_ret,
                              show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(ar_benchmark.eval, trackers),
                            desc='eval ar',
                            total=len(trackers),
                            ncols=100):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval eao',
                            total=len(trackers),
                            ncols=100):
                eao_result.update(ret)
        ar_benchmark.show_result(ar_result,
                                 eao_result,
                                 show_video_level=args.show_video_level)
    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval, trackers),
                            desc='eval f1',
                            total=len(trackers),
                            ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                              show_video_level=args.show_video_level)