def compareDefaultPlots(wildcard="lambda_SE"): wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob("./Runs/" + wildcard + "*.p") formatSave='pdf' regexp= re.compile("(.*\/)(.+)(.p)") #s0_e0.2 #s0_e0.5 #s0.1_e0.3 #s0.2_e0.2 #s0.3_e0.4 d=dict() runs=list() for runName in runsNames: m=re.match(regexp,runName) name=m.group(2) print name run = loadPickle(runName) run.trackerLabel=runName run.data['TRE'].data=[] run.data['SRE'].data=[] d[runName] = run runs.append(run) evaluator = Evaluator(dataset, runs) evaluator.evaluateSingleTracker(runs[0])
def evaluate(self, n=1000, successAndPrecisionPlotName='', histogramPlot=''): ''' Evaluate the dataset :return: accuracy and precision ''' listGT = self.dataset.data pr_x_list = list() pr_y_list = list() sc_x_list = list() sc_y_list = list() experimentNames = list() defaultExpList = list() sreExpList = list() treExpList = list() for listRun, name in zip(self.listOfExperiments, self.experimentNames): runs = listRun.data experimentNames.append(name) defaultExpList.append(runs['default']) sreExpList.append(runs['SRE']) treExpList.append(runs['TRE']) allExpList = list() allExpList.append(defaultExpList) allExpList.append(sreExpList) allExpList.append(treExpList) pr_x_all_list = list() pr_y_all_list = list() sc_x_all_list = list() sc_y_all_list = list() for exp in allExpList: # experiment type # exp e = Evaluator(self.dataset, exp) pr_x_list = list() pr_y_list = list() sc_x_list = list() sc_y_list = list() measures_specific_list = list() for listRun in exp: # different tracker runs (precision_x, precision_y, success_x, success_y) = e.evaluateSingleTracker(listRun, n) pr_x_list.append(precision_x) pr_y_list.append(precision_y) sc_x_list.append(success_x) sc_y_list.append(success_y) pr_x_all_list.append(pr_x_list) pr_y_all_list.append(pr_y_list) sc_x_all_list.append(sc_x_list) sc_y_all_list.append(sc_y_list)