Пример #1
0
 def test_got10k(self):
     root_dir = os.path.join(self.data_dir, 'GOT-10k')
     # run experiment
     experiment = ExperimentGOT10k(root_dir)
     experiment.run(self.tracker, visualize=False)
     # report performance
     experiment.report([self.tracker.name])
Пример #2
0
def example_show():
    # setup experiment
    experiment = ExperimentGOT10k(root_dir=ROOT_DIR,
                                  subset='test',
                                  result_dir='results',
                                  report_dir='reports')

    # visualize tracking results
    tracker_names = ['SiamFCv2', 'GOTURN', 'CCOT', 'MDNet']
    experiment.show(tracker_names)
Пример #3
0
def example_plot_curves():
    # reports of 25 baseline entries can be downloaded from
    # http://got-10k.aitestunion.com/downloads
    report_files = [
        'reports/GOT-10k/performance_25_entries.json']
    tracker_names = [
        'SiamFCv2', 'GOTURN', 'CCOT', 'MDNet']
    
    # setup experiment and plot curves
    experiment = ExperimentGOT10k('data/GOT-10k', subset='test')
    experiment.plot_curves(report_files, tracker_names)
Пример #4
0
def example_track_val_set():
    # setup tracker
    tracker = IdentityTracker()

    # run experiment on validation set
    experiment = ExperimentGOT10k(root_dir=ROOT_DIR,
                                  subset='val',
                                  result_dir='results',
                                  report_dir='reports')
    experiment.run(tracker, visualize=False)

    # report performance
    experiment.report([tracker.name])
Пример #5
0
def main_got(subset='val'):
    dataset_name = "GOT10k"
    if subset != 'val':
        dataset_name += "_" + subset
    tracker = build_tracker()
    experiment = ExperimentGOT10k(
        root_dir=GOT10K_ROOT_DIR,  # GOT-10k's root directory
        subset=subset,  # 'train' | 'val' | 'test'
        result_dir=RESULT_DIR,  # where to store tracking results
        report_dir=REPORT_DIR,  # where to store evaluation reports
        start_idx=args.start_idx,
        end_idx=args.end_idx)
    experiment.run(tracker, visualize=args.visualize_experiment)
    experiment.report([tracker.name])
Пример #6
0
def example_track_test_set():
    # setup tracker
    tracker = IdentityTracker()

    # run experiment on test set
    experiment = ExperimentGOT10k(root_dir=ROOT_DIR,
                                  subset='test',
                                  result_dir='results',
                                  report_dir='reports')
    experiment.run(tracker, visualize=False)

    # a ".zip" file will be generated ready for submission
    # follow the guide to submit your results to
    # http://got-10k.aitestunion.com/
    experiment.report([tracker.name])
Пример #7
0
def init_experiment(
        dataset_type: DatasetType, dataset_dir_path: str, results_dir_path: str,
        reports_dir_path: str):
    params = dict(
        root_dir=dataset_dir_path, result_dir=results_dir_path,
        report_dir=reports_dir_path)
    
    if dataset_type == DatasetType.OTB13:
        return ExperimentOTB(version=2013, **params)
    elif dataset_type == DatasetType.OTB15:
        return ExperimentOTB(version=2015, **params)
    elif dataset_type == DatasetType.GOT10k:
        return ExperimentGOT10k(subset='val', **params)
    elif dataset_type == DatasetType.VOT15:
        return ExperimentVOT(version=2015, **params)
    elif dataset_type == DatasetType.UAV123:
        return ExperimentUAV123(**params)
    else:
        raise ValueError(f"unsupported dataset type {dataset_type}")
Пример #8
0
                is_deterministic=True,  # stochastic (False) or deterministic (True)
            )

        def init(self, image, box):
            self.box = box

        def update(self, image):
            return self.box

    # instantiate a tracker
    tracker = IdentityTracker()

    # setup experiment (validation subset)
    experiment = ExperimentGOT10k(
        root_dir=os.path.join(benchmark_dir, "data", "GOT-10k"),
        subset="val",  # 'train' | 'val' | 'test'
        result_dir=os.path.join(benchmark_dir, "results"),  # where to store tracking results
        report_dir=os.path.join(benchmark_dir, "reports"),  # where to store evaluation reports
    )
    experiment.run(tracker, visualize=True)

    experiment.report([tracker.name])

    # ----------------------#
    # PyTorchGoturn Tracker #
    # ----------------------#

    # setup experiment (validation subset)
    experiment = ExperimentGOT10k(
        root_dir=os.path.join(benchmark_dir, "data", "GOT-10k"),
        subset="val",  # 'train' | 'val' | 'test'
        result_dir=os.path.join(benchmark_dir, "results"),  # where to store tracking results
Пример #9
0
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--tracker', default='DiMPsuper_RF',
                        help='whether visualzie result')
    parser.add_argument('--rf_code', default='a',
                        help='whether visualzie result')
    parser.add_argument('--overwrite', action='store_true',
                        help='whether overwrite results')
    parser.add_argument('--vis', action='store_true', default=False,
                        help='whether visualzie result')
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()

    # setup tracker
    tracker = Trackers[args.tracker](args.rf_code)

    # run experiments on GOT-10k (validation subset)
    data_root = os.path.join(_dataset_root, 'GOT10k')
    experiment = ExperimentGOT10k(
        data_root, subset='test',
        result_dir=check_dir(os.path.join(save_dir, 'results')),
        report_dir=check_dir(os.path.join(save_dir, 'reports'))
    )
    experiment.run(tracker, visualize=args.vis, overwrite_result=args.overwrite)

    # report performance
    experiment.report([tracker.name])
Пример #10
0
    def init(self, image, box):
        self.box = box
        origin_coor = box.tolist()
        frame = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
        self.tracker.select_first_region(frame, origin_coor)

    def update(self, image):
        try:
            frame = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
            track_result = self.tracker.predict_future_coor(frame)
            return np.array(track_result)
        except Exception:
            return np.array(self.tracker.current_coor)


if __name__ == '__main__':
    # setup tracker
    tracker = IdentityTracker()

    # setup experiment (validation subset)
    experiment = ExperimentGOT10k(
        root_dir='../GOT10K',  # GOT-10k's root directory
        subset='val',  # 'train' | 'val' | 'test'
        result_dir='results',  # where to store tracking results
        report_dir='reports'  # where to store evaluation reports
    )

    experiment.run(tracker, visualize=False)

    # report performance
    experiment.report([tracker.name])
Пример #11
0
from got10k.trackers import Tracker
from got10k.experiments import ExperimentGOT10k


class IdentityTracker(Tracker):
    def __init__(self):
        super(IdentityTracker, self).__init__(name='IdentityTracker')

    def init(self, image, box):
        self.box = box

    def update(self, image):
        return self.box


if __name__ == '__main__':
    # setup tracker
    tracker = IdentityTracker()

    # run experiments on GOT-10k (validation subset)
    experiment = ExperimentGOT10k('H:/datasets/GOT-10k', subset='val')
    experiment.run(tracker, visualize=True)

    # report performance
    experiment.report([tracker.name])
Пример #12
0
TrTracker = Tracker(args.tracker_name, args.tracker_param, args.run_id)


class GOT_Tracker(GOT_Tracker):
    def __init__(self):
        super(GOT_Tracker, self).__init__(name='GOT_Tracker')
        self.tracker = TrTracker.tracker_class(TrTracker.get_parameters())

    def init(self, image, box):
        image = np.array(image)
        self.tracker.initialize(image, box)

    def update(self, image):
        image = np.array(image)
        self.box = self.tracker.track(image)
        return self.box


if __name__ == '__main__':
    # setup tracker
    tracker = GOT_Tracker()

    # run experiments on GOT-10k (validation subset)
    experiment = ExperimentGOT10k('/data1/wangning/GOT-10k',
                                  subset='test')  ##### test val
    experiment.run(tracker, visualize=False)

    # report performance
    experiment.report([tracker.name])
Пример #13
0
def eval(args):
    tracker_dir = os.path.join(args.tracker_path, args.dataset)
    trackers = glob(os.path.join(args.tracker_path,
                                 args.dataset,
                                 args.tracker_name+'*'))

    trackers = [x.split('/')[-1] for x in trackers]

    assert len(trackers) > 0
    args.num = min(args.num, len(trackers))
    
    root = './datasets'

    root = os.path.join(root, args.dataset)
    if 'OTB' in args.dataset:
        dataset = OTBDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,trackers), desc='eval success', total=len(trackers), ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                trackers), desc='eval precision', total=len(trackers), ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret, precision_ret,
                show_video_level=args.show_video_level)
    
    elif 'DTB70' in args.dataset:
        dataset = DTB70Dataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,trackers), desc='eval success', total=len(trackers), ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                trackers), desc='eval precision', total=len(trackers), ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret, precision_ret,
                show_video_level=args.show_video_level)
    
    elif 'UAVDT' in args.dataset:
        dataset = UAVDTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,trackers), desc='eval success', total=len(trackers), ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                trackers), desc='eval precision', total=len(trackers), ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret, precision_ret,
                show_video_level=args.show_video_level)

    elif 'VisDrone' in args.dataset:
        dataset = VisDroneDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,trackers), desc='eval success', total=len(trackers), ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                trackers), desc='eval precision', total=len(trackers), ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret, precision_ret,
                show_video_level=args.show_video_level)
    
    elif 'GOT-10k' in args.dataset:
        root_dir = os.path.abspath('datasets/GOT-10k') 
        e = ExperimentGOT10k(root_dir) 
        ao, sr, speed=e.report(['siamban'])
        ss='ao:%.3f --sr:%.3f -speed:%.3f' % (float(ao),float(sr),float(speed))
        print(ss)

    elif 'LaSOT' == args.dataset: 
        dataset = LaSOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                trackers), desc='eval success', total=len(trackers), ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                trackers), desc='eval precision', total=len(trackers), ncols=100):
                precision_ret.update(ret)
        norm_precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
                trackers), desc='eval norm precision', total=len(trackers), ncols=100):
                norm_precision_ret.update(ret)
        benchmark.show_result(success_ret, precision_ret, norm_precision_ret,
                show_video_level=args.show_video_level)
    elif 'UAV' in args.dataset:  #注意UAVDT和 UAV123 以及 UAV20L的区别
        dataset = UAVDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                trackers), desc='eval success', total=len(trackers), ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                trackers), desc='eval precision', total=len(trackers), ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret, precision_ret,
                show_video_level=args.show_video_level)
    elif 'NFS' in args.dataset:
        dataset = NFSDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = OPEBenchmark(dataset)
        success_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
                trackers), desc='eval success', total=len(trackers), ncols=100):
                success_ret.update(ret)
        precision_ret = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
                trackers), desc='eval precision', total=len(trackers), ncols=100):
                precision_ret.update(ret)
        benchmark.show_result(success_ret, precision_ret,
                show_video_level=args.show_video_level)
    elif args.dataset in ['VOT2016', 'VOT2017', 'VOT2018', 'VOT2019']:
        dataset = VOTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        ar_benchmark = AccuracyRobustnessBenchmark(dataset)
        ar_result = {}
        with Pool(processes=args.num) as pool:

            for ret in pool.imap_unordered(ar_benchmark.eval,
                trackers):
                ar_result.update(ret)

        benchmark = EAOBenchmark(dataset)
        eao_result = {}
        with Pool(processes=args.num) as pool:

            for ret in pool.imap_unordered(benchmark.eval,
                trackers):
                eao_result.update(ret)

        ar_benchmark.show_result(ar_result, eao_result,
                show_video_level=args.show_video_level)
    elif 'VOT2018-LT' == args.dataset:
        dataset = VOTLTDataset(args.dataset, root)
        dataset.set_tracker(tracker_dir, trackers)
        benchmark = F1Benchmark(dataset)
        f1_result = {}
        with Pool(processes=args.num) as pool:
            for ret in tqdm(pool.imap_unordered(benchmark.eval,
                trackers), desc='eval f1', total=len(trackers), ncols=100):
                f1_result.update(ret)
        benchmark.show_result(f1_result,
                show_video_level=args.show_video_level)
Пример #14
0
from __future__ import absolute_import

from got10k.experiments import ExperimentGOT10k

from goturn import TrackerGOTURN

if __name__ == '__main__':
    # setup tracker
    net_path = "../checkpoints/pytorch_goturn.pth.tar"
    tracker = TrackerGOTURN(net_path=net_path)

    # setup experiments
    # got10k toolkit expects either extracted directories or zip files for
    # all sequences in OTB data directory
    experiments = [ExperimentGOT10k('../data/GOT-10k', subset='val')]

    # run tracking experiments and report performance
    for e in experiments:
        e.run(tracker, visualize=False)
        e.report([tracker.name])
Пример #15
0
            logging.info('%21s :%s' % ('Scale learning rate', self.scale_lr))
            logging.info('%21s :%s' %
                         ('Hanning window weight', self.hann_weight))
            logging.info('%21s :%d' % ('Scale level', self.scale_num))
            logging.info('%21s :%d' %
                         ('Interpolation factor', self.interpolation_beta))
            logging.info('%21s :%s' %
                         ('s_x limit factor', self.s_x_limit_beta))


if __name__ == '__main__':
    tracker = SFC(arg_test.name_tracker)
    exp = ExperimentOTB(os.path.join(arg_test.data_root, 'OTB'),
                        version=2013,
                        result_dir=arg_test.result_dir,
                        report_dir=arg_test.report_dir)
    exp.run(tracker)
    exp.report([tracker.name])
    exp2 = ExperimentGOT10k(os.path.join(arg_test.data_root, 'got10k'),
                            subset='val',
                            result_dir=arg_test.result_dir,
                            report_dir=arg_test.report_dir)
    exp2.run(tracker)
    exp2.report([tracker.name])
    exp3 = ExperimentGOT10k(os.path.join(arg_test.data_root, 'got10k'),
                            subset='test',
                            result_dir=arg_test.result_dir,
                            report_dir=arg_test.report_dir)
    exp3.run(tracker)
    exp3.report([tracker.name])
Пример #16
0
from got10k.trackers import Tracker
from got10k.experiments import ExperimentGOT10k

class IdentityTracker(Tracker):
    def __init__(self):
        super(IdentityTracker, self).__init__(name='IdentityTracker')
    
    def init(self, image, box):
        self.box = box

    def update(self, image):
        return self.box

if __name__ == '__main__':
    # setup tracker
    tracker = IdentityTracker()

    # run experiments on GOT-10k (validation subset)
    experiment = ExperimentGOT10k('data/GOT-10k', subset='val')
    experiment.run(tracker, visualize=True)

    # report performance
    experiment.report([tracker.name])
Пример #17
0
                toc /= cv2.getTickFrequency()
                model_name = "SiamRPN++"
                dataset_path = '/home/sourabhswain/Documents/SiamRPN/GOT/results'

                video_path = os.path.join(dataset_path, model_name, video_name)
                if not os.path.isdir(video_path):
                    os.makedirs(video_path)
                result_path = os.path.join(video_path, '{}_001.txt'.format(video_name))
                with open(result_path, 'w') as f:
                    for x in pred_bboxes:
                        f.write(','.join([str(i) for i in x]) + '\n')
                result_path = os.path.join(video_path,
                                           '{}_time.txt'.format(video_name))
                with open(result_path, 'w') as f:
                    for x in track_times:
                        f.write("{:.6f}\n".format(x))


    """
    #experiment

    experiments = ExperimentGOT10k(
        '/home/sourabhswain/Documents/SiamRPN/dataset',
        subset='val',
        result_dir='/home/sourabhswain/Documents/SiamRPN/compare/GOT_res',
        report_dir='/home/sourabhswain/Documents/SiamRPN/GOT/reports_org')

    #experiments.run(tracker)
    experiments.report([tracker.name])
Пример #18
0
from got10k.trackers import Tracker
from got10k.experiments import ExperimentGOT10k


class IdentityTracker(Tracker):
    def __init__(self):
        super(IdentityTracker, self).__init__(name='IdentityTracker')

    def init(self, image, box):
        self.box = box

    def update(self, image):
        return self.box


if __name__ == '__main__':
    # setup tracker
    tracker = IdentityTracker()

    # run experiments on GOT-10k (validation subset)
    experiment = ExperimentGOT10k(
        'E:\PSUThirdSemester\CSE586ComputerVision\Term-Project1\Pythonversion\data\GOT-10k',
        subset='val')
    experiment.run(tracker, visualize=True)

    # report performance
    experiment.report([tracker.name])
from got10k.experiments import ExperimentGOT10k

# report_files = ['/home/etvuz/project/siam_rcnn/experiments/got10k/ao.json']
report_files = [
    '/home/etvuz/project3/siamrcnn2/results/got10k_v21/100000/ao.json'
]
tracker_names = ['SiamFCv2', 'GOTURN', 'CCOT', 'MDNet']

# setup experiment and plot curves
experiment = ExperimentGOT10k('/data/zhbli/Dataset/got10k', subset='test')
experiment.plot_curves(report_files, tracker_names)