def main():
    if OVERWRITE_RESULT:
        # tracker = KCFTracker(feature_type='multi_cnn', sub_feature_type='dsst',
        #                        sub_sub_feature_type='adapted_lr', load_model=True, vgglayer='',
        #                        model_path='./trained_models/CNN_Model_OBT100_multi_cnn_best_cifar_big_valid.h5',
        #                        name_suffix='_best_valid_CNN')
        tracker = KCFTracker(feature_type='multi_cnn', sub_feature_type='dnn_scale',
                               load_model=True,
                               model_path='./trained_models/CNN_Model_OBT100_multi_cnn_best_cifar_big_valid.h5',
                             name_suffix='_best_valid_CNN')
    else:
        tracker = Tracker(name='HDT')
    # evalTypes = ['OPE', 'SRE', 'TRE']
    evalTypes = ['OPE']
    loadSeqs = 'UAV123'

    if SETUP_SEQ:
        print('Setup sequences ...')
        UAV_script.setup_seqs(loadSeqs, SRC_DIR,  ANNO_DIR, IMG_DIR)

    print('Starting benchmark for trackers: {0}, evalTypes : {1}'.format(tracker.name, evalTypes))
    for evalType in evalTypes:
        seqNames = UAV_script.get_seq_names(loadSeqs, ANNO_DIR)
        seqs = UAV_script.load_seq_configs(seqNames, ANNO_DIR)
        #seqs = seqs[:40]
        ######################################################################
        results = run_trackers(tracker, seqs, evalType)
        ######################################################################
        if len(results) > 0:
            ######################################################################
            evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType, SRC_DIR)
            ######################################################################
            print ("Result of Sequences\t -- '{0}'".format(tracker.name))
            for i, seq in enumerate(seqs):
                try:
                    print('\t{0}:\'{1}\'{2}\taveCoverage : {3:.3f}%\taveErrCenter : {4:.3f}'.format(
                        i,
                        seq.name,
                        " " * (12 - len(seq.name)),
                        sum(seq.aveCoverage) / len(seq.aveCoverage) * 100,
                        sum(seq.aveErrCenter) / len(seq.aveErrCenter)))
                except:
                    print('\t\'{0}\'  ERROR!!'.format(seq.name))

            print("Result of attributes\t -- '{0}'".format(tracker.name))
            for attr in attrList:
                print("\t\'{}\'\t overlap : {:04.2f}% \t\t failures : {:04.2f}".format(attr.name, attr.overlap, attr.error))

            if SAVE_RESULT:
                UAV_script.save_scores(attrList, RESULT_SRC)
Ejemplo n.º 2
0
def main(benchmark_config, tracker_config):
    if benchmark_config['loadSeqs'] not in [
            'All', 'all', 'tb50', 'tb100', 'cvpr13', 'vid', 'tc78'
    ]:
        loadSeqs = [x.strip() for x in benchmark_config['loadSeqs'].split(',')]
    else:
        loadSeqs = benchmark_config['loadSeqs']

    if SETUP_SEQ:
        print('Setup sequences ...')
        butil.setup_seqs(loadSeqs)

    tracker = benchmark_config['tracker']
    for evalType in benchmark_config['evalTypes']:
        seqNames = butil.get_seq_names(loadSeqs)
        seqs = butil.load_seq_configs(seqNames)
        results = run_trackers(seqs, evalType, shiftTypeSet, tracker_config)
        if len(results) > 0:
            evalResults, attrList = butil.calc_result(tracker, seqs, results,
                                                      evalType)
            print("Result of Sequences\t -- '{0}'".format(tracker))
            for seq in seqs:
                try:
                    print('\t\'{0}\'{1}'.format(seq.name,
                                                " " * (12 - len(seq.name))),
                          end='')
                    print("\taveCoverage : {0:.3f}%".format(
                        sum(seq.aveCoverage) / len(seq.aveCoverage) * 100),
                          end='')
                    print("\taveErrCenter : {0:.3f}".format(
                        sum(seq.aveErrCenter) / len(seq.aveErrCenter)))
                except:
                    print('\t\'{0}\'  ERROR!!'.format(seq.name))

            print("Result of attributes\t -- '{0}'".format(tracker))
            for attr in attrList:
                print("\t\'{0}\'".format(attr.name), end='')
                print("\toverlap : {0:02.2f}%".format(attr.overlap), end='')
                print("\tfailures : {0:.2f}".format(attr.error), end='')
                print("\tAUC : {0:.3f}".format(
                    sum(attr.successRateList) / len(attr.successRateList)))
                if attr.name == 'ALL':
                    report_result = sum(attr.successRateList) / len(
                        attr.successRateList)
            if SAVE_RESULT:
                butil.save_scores(attrList, benchmark_config['testname'])
        return report_result
Ejemplo n.º 3
0
def main():

    for name in Tracker_names:
        tracker = Tracker(name=name)
        # evalTypes = ['OPE', 'SRE', 'TRE']
        evalTypes = ['OPE']
        loadSeqs = 'UAV123'

        if SETUP_SEQ:
            print('Setup sequences ...')
            UAV_script.setup_seqs(loadSeqs, SRC_DIR,  ANNO_DIR, IMG_DIR)

        print('Starting benchmark for trackers: {0}, evalTypes : {1}'.format(tracker.name, evalTypes))
        for evalType in evalTypes:
            seqNames = UAV_script.get_seq_names(loadSeqs, ANNO_DIR)
            seqs = UAV_script.load_seq_configs(seqNames, ANNO_DIR)
            ######################################################################
            results = run_trackers(tracker, seqs, evalType)
            ######################################################################
            if len(results) > 0:
                ######################################################################
                evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType, SRC_DIR)
                ######################################################################
                print ("Result of Sequences\t -- '{0}'".format(tracker.name))
                for i, seq in enumerate(seqs):
                    try:
                        print('\t{0}:\'{1}\'{2}\taveCoverage : {3:.3f}%\taveErrCenter : {4:.3f}'.format(
                            i,
                            seq.name,
                            " " * (12 - len(seq.name)),
                            sum(seq.aveCoverage) / len(seq.aveCoverage) * 100,
                            sum(seq.aveErrCenter) / len(seq.aveErrCenter)))
                    except:
                        print('\t\'{0}\'  ERROR!!'.format(seq.name))

                print("Result of attributes\t -- '{0}'".format(tracker.name))
                for attr in attrList:
                    print("\t\'{}\'\t overlap : {:04.2f}% \t\t failures : {:04.2f}".format(attr.name, attr.overlap, attr.error))

                if SAVE_RESULT:
                    UAV_script.save_scores(attrList, RESULT_SRC)
def main(argv):
    trackers = [Tracker(name='SRDCF')]
    #trackers = [Tracker(name='KCFmulti_cnn_dsst_adapted_lr_best_valid_CNN')]

    evalTypes = ['OPE']
    loadSeqs = 'UAV123'
    try:
        opts, args = getopt.getopt(argv, "ht:e:s:", ["tracker=", "evaltype=", "sequence="])
    except getopt.GetoptError:
        print('usage : run_trackers.py -t <trackers> -s <sequences>' + '-e <evaltypes>')
        sys.exit(1)

    for opt, arg in opts:
        if opt == '-h':
            print('usage : run_trackers.py -t <trackers> -s <sequences>' + '-e <evaltypes>')
            sys.exit(0)
        elif opt in ("-t", "--tracker"):
            trackers = [x.strip() for x in arg.split(',')]
            # trackers = [arg]
        elif opt in ("-s", "--sequence"):
            loadSeqs = arg
            if loadSeqs != 'All' and loadSeqs != 'all' and \
                            loadSeqs != 'tb50' and loadSeqs != 'tb100' and \
                            loadSeqs != 'cvpr13':
                loadSeqs = [x.strip() for x in arg.split(',')]
        elif opt in ("-e", "--evaltype"):
            evalTypes = [x.strip() for x in arg.split(',')]


    print('Starting benchmark for {0} trackers, evalTypes : {1}'.format(
        len(trackers), evalTypes))
    for evalType in evalTypes:
        seqNames = UAV_script.get_seq_names(loadSeqs, ANNO_DIR)
        seqs = UAV_script.load_seq_configs(seqNames, ANNO_DIR)
        ######################################################################
        trackerResults = run_trackers(trackers, seqs, evalType)
        ######################################################################
        for tracker in trackers:
            results = trackerResults[tracker]
            if len(results) > 0:
                ######################################################################
                evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType, SRC_DIR)
                ######################################################################
                print ("Result of Sequences\t -- '{0}'".format(tracker.name))
                for i, seq in enumerate(seqs):
                    try:
                        print('\t{0}:\'{1}\'{2}\taveCoverage : {3:.3f}%\taveErrCenter : {4:.3f}'.format(
                            i,
                            seq.name,
                            " " * (12 - len(seq.name)),
                            sum(seq.aveCoverage) / len(seq.aveCoverage) * 100,
                            sum(seq.aveErrCenter) / len(seq.aveErrCenter)))
                    except:
                        print('\t\'{0}\'  ERROR!!'.format(seq.name))

                print("Result of attributes\t -- '{0}'".format(tracker.name))
                for attr in attrList:
                    print("\t\'{}\'\t overlap : {:04.2f}% \t\t failures : {:04.2f}".format(attr.name, attr.overlap, attr.error))

                if SAVE_RESULT:
                    butil.save_scores(attrList)
Ejemplo n.º 5
0
def main(argv):
    if OVERWRITE_RESULT:
        trackers = [KMCTracker(feature_type='multi_cnn',
                               sub_feature_type='dsst',
                               model_path='./trained_models/CNN_Model_OBT100_multi_cnn_best_cifar_big_valid.h5',
                               adaptation_rate_range_max=0.0025,
                               adaptation_rate_scale_range_max=0.005,
                               padding=2.2,
                               sub_sub_feature_type='adapted_lr_hdt'
                               )]
    else:
        trackers = [Tracker(name='KMC_multi_cnn')]

    evalTypes = ['OPE']
    loadSeqs = 'TB100'
    try:
        opts, args = getopt.getopt(argv, "ht:e:s:", ["tracker=", "evaltype=", "sequence="])
    except getopt.GetoptError:
        print('usage : run_trackers.py -t <trackers> -s <sequences>' + '-e <evaltypes>')
        sys.exit(1)

    for opt, arg in opts:
        if opt == '-h':
            print('usage : run_trackers.py -t <trackers> -s <sequences>' + '-e <evaltypes>')
            sys.exit(0)
        elif opt in ("-t", "--tracker"):
            trackers = [x.strip() for x in arg.split(',')]
            # trackers = [arg]
        elif opt in ("-s", "--sequence"):
            loadSeqs = arg
            if loadSeqs != 'All' and loadSeqs != 'all' and \
                            loadSeqs != 'tb50' and loadSeqs != 'tb100' and \
                            loadSeqs != 'cvpr13':
                loadSeqs = [x.strip() for x in arg.split(',')]
        elif opt in ("-e", "--evaltype"):
            evalTypes = [x.strip() for x in arg.split(',')]

    if SETUP_SEQ:
        print('Setup sequences ...')
        butil.setup_seqs(loadSeqs)

    print('Starting benchmark for {0} trackers, evalTypes : {1}'.format(
        len(trackers), evalTypes))
    for evalType in evalTypes:
        seqNames = butil.get_seq_names(loadSeqs)
        seqs = butil.load_seq_configs(seqNames)
        ######################################################################
        trackerResults = run_trackers(trackers, seqs, evalType)
        ######################################################################
        for tracker in trackers:
            results = trackerResults[tracker]
            if len(results) > 0:
                ######################################################################
                evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType, SEQ_SRC)
                ######################################################################
                print ("Result of Sequences\t -- '{0}'".format(tracker.name))
                for i, seq in enumerate(seqs):
                    try:
                        print('\t{0}:\'{1}\'{2}\taveCoverage : {3:.3f}%\taveErrCenter : {4:.3f}'.format(
                            i,
                            seq.name,
                            " " * (12 - len(seq.name)),
                            sum(seq.aveCoverage) / len(seq.aveCoverage) * 100,
                            sum(seq.aveErrCenter) / len(seq.aveErrCenter)))
                    except:
                        print('\t\'{0}\'  ERROR!!'.format(seq.name))

                print("Result of attributes\t -- '{0}'".format(tracker.name))
                for attr in attrList:
                    print("\t\'{}\'\t overlap : {:04.2f}% \t\t failures : {:04.2f}".format(attr.name, attr.overlap, attr.error))

                if SAVE_RESULT:
                    butil.save_scores(attrList)
Ejemplo n.º 6
0
def main(argv):
    if OVERWRITE_RESULT:
        # trackers = [cvpr_2014_color_name()]
        # trackers = [bmvc_2014_pami_2014_fDSST(number_of_scales=17,
        #                                       padding=2.0,
        #                                       interpolate_response=True,
        #                                       kernel='linear',
        #                                       compressed_features='gray_hog')]
        trackers = [iccv_2015_SRDCF()]
    else:
        trackers = [Tracker(name='cvpr_2014_color_name')]

    evalTypes = ['OPE']
    loadSeqs = 'TB100'
    try:
        opts, args = getopt.getopt(argv, "ht:e:s:",
                                   ["tracker=", "evaltype=", "sequence="])
    except getopt.GetoptError:
        print('usage : run_trackers.py -t <trackers> -s <sequences>' +
              '-e <evaltypes>')
        sys.exit(1)

    for opt, arg in opts:
        if opt == '-h':
            print('usage : run_trackers.py -t <trackers> -s <sequences>' +
                  '-e <evaltypes>')
            sys.exit(0)
        elif opt in ("-t", "--tracker"):
            trackers = [x.strip() for x in arg.split(',')]
            # trackers = [arg]
        elif opt in ("-s", "--sequence"):
            loadSeqs = arg
            if loadSeqs != 'All' and loadSeqs != 'all' and \
                            loadSeqs != 'tb50' and loadSeqs != 'tb100' and \
                            loadSeqs != 'cvpr13':
                loadSeqs = [x.strip() for x in arg.split(',')]
        elif opt in ("-e", "--evaltype"):
            evalTypes = [x.strip() for x in arg.split(',')]

    if SETUP_SEQ:
        print('Setup sequences ...')
        butil.setup_seqs(loadSeqs)

    print('Starting benchmark for {0} trackers, evalTypes : {1}'.format(
        len(trackers), evalTypes))
    for evalType in evalTypes:
        seqNames = butil.get_seq_names(loadSeqs)
        seqs = butil.load_seq_configs(seqNames)
        ######################################################################
        trackerResults = run_trackers(trackers, seqs, evalType)
        ######################################################################
        for tracker in trackers:
            results = trackerResults[tracker]
            if len(results) > 0:
                ######################################################################
                evalResults, attrList = butil.calc_result(
                    tracker, seqs, results, evalType, SEQ_SRC)
                ######################################################################
                print("Result of Sequences\t -- '{0}'".format(tracker.name))
                for i, seq in enumerate(seqs):
                    try:
                        print(
                            '\t{0}:\'{1}\'{2}\taveCoverage : {3:.3f}%\taveErrCenter : {4:.3f}'
                            .format(
                                i, seq.name, " " * (12 - len(seq.name)),
                                sum(seq.aveCoverage) / len(seq.aveCoverage) *
                                100,
                                sum(seq.aveErrCenter) / len(seq.aveErrCenter)))
                    except:
                        print('\t\'{0}\'  ERROR!!'.format(seq.name))

                print("Result of attributes\t -- '{0}'".format(tracker.name))
                for attr in attrList:
                    print(
                        "\t\'{}\'\t overlap : {:04.2f}% \t\t failures : {:04.2f}"
                        .format(attr.name, attr.overlap, attr.error))

                if SAVE_RESULT:
                    butil.save_scores(attrList)