def run_trackers(tracker, seqs, evalType): numSeq = len(seqs) seqResults = [] ################################################## # chose sequence to run from below ################################################## for idxSeq in range(0, numSeq): subS = seqs[idxSeq] print('{0}:{1}, total frame: {2}'.format( idxSeq + 1, subS.name, subS.endFrame - subS.startFrame)) if not OVERWRITE_RESULT: trk_src = os.path.join(RESULT_SRC.format(evalType), tracker.name) result_src = os.path.join(trk_src, subS.name + '.json') if os.path.exists(result_src): r = UAV_script.load_seq_result(result_src) seqResults.append(r) continue #################### # we only re-run tracker with target size small than a threshold: tracker, res = run_KCF_variant(tracker, subS, debug=False) r = Result(tracker.name, subS.name, subS.startFrame, subS.endFrame, res['type'], evalType, res['res'], res['fps'], None) r.refresh_dict() seqResults.append(r) # end for subseqs if SAVE_RESULT: UAV_script.save_seq_result(RESULT_SRC, r) return seqResults
def main(): if OVERWRITE_RESULT: # tracker = KCFTracker(feature_type='multi_cnn', sub_feature_type='dsst', # sub_sub_feature_type='adapted_lr', load_model=True, vgglayer='', # model_path='./trained_models/CNN_Model_OBT100_multi_cnn_best_cifar_big_valid.h5', # name_suffix='_best_valid_CNN') tracker = KCFTracker(feature_type='multi_cnn', sub_feature_type='dnn_scale', load_model=True, model_path='./trained_models/CNN_Model_OBT100_multi_cnn_best_cifar_big_valid.h5', name_suffix='_best_valid_CNN') else: tracker = Tracker(name='KCFmulti_cnn_dsst_adapted_lr_best_valid_CNN') evalTypes = ['OPE'] if SETUP_SEQ: print('Setup sequences ...') Temple_color_script.setup_seqs(SRC_DIR) print('Starting benchmark for trackers: {0}'.format(tracker.name)) for evalType in evalTypes: seqNames = Temple_color_script.get_seq_names(SRC_DIR) seqs = Temple_color_script.load_seq_configs(seqNames, SRC_DIR) ###################################################################### results = run_trackers(tracker, seqs, evalType) ###################################################################### if len(results) > 0: ###################################################################### evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType, ANNO_DIR) ###################################################################### print ("Result of Sequences\t -- '{0}'".format(tracker.name)) for i, seq in enumerate(seqs): try: print('\t{0}:\'{1}\'{2}\taveCoverage : {3:.3f}%\taveErrCenter : {4:.3f}'.format( i, seq.name, " " * (12 - len(seq.name)), sum(seq.aveCoverage) / len(seq.aveCoverage) * 100, sum(seq.aveErrCenter) / len(seq.aveErrCenter))) except: print('\t\'{0}\' ERROR!!'.format(seq.name)) print("Result of attributes\t -- '{0}'".format(tracker.name)) for attr in attrList: print("\t\'{}\'\t overlap : {:04.2f}% \t\t failures : {:04.2f}".format(attr.name, attr.overlap, attr.error)) if SAVE_RESULT: UAV_script.save_scores(attrList, RESULT_SRC)
def main(): tracker = Tracker(name='KCFmulti_cnn_dsst_adapted_lr_best_valid_CNN') evalTypes = ['OPE'] if SETUP_SEQ: print('Setup sequences ...') Temple_color_script.setup_seqs(SRC_DIR) print('Starting benchmark for trackers: {0}'.format(tracker.name)) for evalType in evalTypes: seqNames = Temple_color_script.get_seq_names(SRC_DIR) seqs = Temple_color_script.load_seq_configs(seqNames, SRC_DIR) ###################################################################### results = run_trackers(tracker, seqs, evalType) ###################################################################### if len(results) > 0: ###################################################################### evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType, ANNO_DIR) ###################################################################### print("Result of Sequences\t -- '{0}'".format(tracker.name)) for i, seq in enumerate(seqs): try: print( '\t{0}:\'{1}\'{2}\taveCoverage : {3:.3f}%\taveErrCenter : {4:.3f}' .format( i, seq.name, " " * (12 - len(seq.name)), sum(seq.aveCoverage) / len(seq.aveCoverage) * 100, sum(seq.aveErrCenter) / len(seq.aveErrCenter))) except: print('\t\'{0}\' ERROR!!'.format(seq.name)) print("Result of attributes\t -- '{0}'".format(tracker.name)) for attr in attrList: print( "\t\'{}\'\t overlap : {:04.2f}% \t\t failures : {:04.2f}". format(attr.name, attr.overlap, attr.error)) if SAVE_RESULT: UAV_script.save_scores(attrList, RESULT_SRC)
def run_trackers(tracker, seqs, evalType): numSeq = len(seqs) seqResults = [] ################################################## # chose sequence to run from below ################################################## for idxSeq in range(0, numSeq): subS = seqs[idxSeq] print('{0}:{1}, total frame: {2}'.format(idxSeq + 1, subS.name, subS.endFrame - subS.startFrame)) trk_src = os.path.join(RESULT_SRC.format(evalType), tracker.name) result_src = os.path.join(trk_src, subS.name + '.json') if os.path.exists(result_src): r = UAV_script.load_seq_result(result_src) seqResults.append(r) continue return seqResults
def main(): if OVERWRITE_RESULT: # tracker = KCFTracker(feature_type='multi_cnn', sub_feature_type='dsst', # sub_sub_feature_type='adapted_lr', load_model=True, vgglayer='', # # model_path='./trained_models/CNN_Model_OBT100_multi_cnn_best_cifar_big_valid.h5', # name_suffix='_best_valid_CNN') # tracker = KCFTracker(feature_type='multi_cnn', sub_feature_type='dsst', # sub_sub_feature_type='adapted_lr', load_model=True, vgglayer='', # model_path='./trained_models/CNN_Model_OBT100_multi_cnn_best_cifar_big_valid.h5', # adaptation_rate_range_max=0.002, # adaptation_rate_scale_range_max=0.005, # saliency='grabcut', saliency_percent=0.5, # grabcut_mask_path='../../UAV/UAV123_10fps/grab_cut_figures/', # optical_flow=True) tracker = KCFTracker( feature_type='HDT', sub_feature_type='dsst', sub_sub_feature_type='adapted_lr', load_model=True, vgglayer='', model_path= './trained_models/CNN_Model_OBT100_multi_cnn_best_cifar_big_valid.h5', adaptation_rate_range_max=0.002, adaptation_rate_scale_range_max=0.005, saliency='grabcut', saliency_percent=0.5, grabcut_mask_path='../../UAV/UAV123_10fps/grab_cut_figures/', optical_flow=True) else: tracker = Tracker( name='KCFmulti_cnn_dsst_adapted_lr_grabcut_0.5_optical_flow') # evalTypes = ['OPE', 'SRE', 'TRE'] evalTypes = ['OPE'] loadSeqs = 'UAV123' if SETUP_SEQ: print('Setup sequences ...') UAV_script.setup_seqs(loadSeqs, SRC_DIR, ANNO_DIR, IMG_DIR) print('Starting benchmark for trackers: {0}, evalTypes : {1}'.format( tracker.name, evalTypes)) for evalType in evalTypes: seqNames = UAV_script.get_seq_names(loadSeqs, ANNO_DIR) seqs = UAV_script.load_seq_configs(seqNames, ANNO_DIR) ###################################################################### results = run_trackers(tracker, seqs, evalType) ###################################################################### if len(results) > 0: ###################################################################### evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType, SRC_DIR) ###################################################################### print("Result of Sequences\t -- '{0}'".format(tracker.name)) for i, seq in enumerate(seqs): try: print( '\t{0}:\'{1}\'{2}\taveCoverage : {3:.3f}%\taveErrCenter : {4:.3f}' .format( i, seq.name, " " * (12 - len(seq.name)), sum(seq.aveCoverage) / len(seq.aveCoverage) * 100, sum(seq.aveErrCenter) / len(seq.aveErrCenter))) except: print('\t\'{0}\' ERROR!!'.format(seq.name)) print("Result of attributes\t -- '{0}'".format(tracker.name)) for attr in attrList: print( "\t\'{}\'\t overlap : {:04.2f}% \t\t failures : {:04.2f}". format(attr.name, attr.overlap, attr.error)) if SAVE_RESULT: UAV_script.save_scores(attrList, RESULT_SRC)
def main(argv): trackers = [Tracker(name='SRDCF')] #trackers = [Tracker(name='KCFmulti_cnn_dsst_adapted_lr_best_valid_CNN')] evalTypes = ['OPE'] loadSeqs = 'UAV123' try: opts, args = getopt.getopt(argv, "ht:e:s:", ["tracker=", "evaltype=", "sequence="]) except getopt.GetoptError: print('usage : run_trackers.py -t <trackers> -s <sequences>' + '-e <evaltypes>') sys.exit(1) for opt, arg in opts: if opt == '-h': print('usage : run_trackers.py -t <trackers> -s <sequences>' + '-e <evaltypes>') sys.exit(0) elif opt in ("-t", "--tracker"): trackers = [x.strip() for x in arg.split(',')] # trackers = [arg] elif opt in ("-s", "--sequence"): loadSeqs = arg if loadSeqs != 'All' and loadSeqs != 'all' and \ loadSeqs != 'tb50' and loadSeqs != 'tb100' and \ loadSeqs != 'cvpr13': loadSeqs = [x.strip() for x in arg.split(',')] elif opt in ("-e", "--evaltype"): evalTypes = [x.strip() for x in arg.split(',')] print('Starting benchmark for {0} trackers, evalTypes : {1}'.format( len(trackers), evalTypes)) for evalType in evalTypes: seqNames = UAV_script.get_seq_names(loadSeqs, ANNO_DIR) seqs = UAV_script.load_seq_configs(seqNames, ANNO_DIR) ###################################################################### trackerResults = run_trackers(trackers, seqs, evalType) ###################################################################### for tracker in trackers: results = trackerResults[tracker] if len(results) > 0: ###################################################################### evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType, SRC_DIR) ###################################################################### print ("Result of Sequences\t -- '{0}'".format(tracker.name)) for i, seq in enumerate(seqs): try: print('\t{0}:\'{1}\'{2}\taveCoverage : {3:.3f}%\taveErrCenter : {4:.3f}'.format( i, seq.name, " " * (12 - len(seq.name)), sum(seq.aveCoverage) / len(seq.aveCoverage) * 100, sum(seq.aveErrCenter) / len(seq.aveErrCenter))) except: print('\t\'{0}\' ERROR!!'.format(seq.name)) print("Result of attributes\t -- '{0}'".format(tracker.name)) for attr in attrList: print("\t\'{}\'\t overlap : {:04.2f}% \t\t failures : {:04.2f}".format(attr.name, attr.overlap, attr.error)) if SAVE_RESULT: butil.save_scores(attrList)