def main(argv=None): if argv is None: argv = sys.argv wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" datasetType = 'wu2013' runName = './Runs/lambda=0.1_hogANDhist_int_f1.p' run=loadPickle(runName) #print run experimenType='default' run=run.data[experimenType] # vidName='jogging-2' dataset = Dataset(wu2013GroundTruth, datasetType) viz=VisualizeExperiment(dataset,run) #viz.show(vidName, experimenType) #viz.barplot() viz.precisionAndSuccessPlot(vidName)
def compareDefaultPlots(wildcard="lambda_SE"): wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob("./Runs/" + wildcard + "*.p") formatSave='pdf' regexp= re.compile("(.*\/)(.+)(.p)") #s0_e0.2 #s0_e0.5 #s0.1_e0.3 #s0.2_e0.2 #s0.3_e0.4 d=dict() runs=list() for runName in runsNames: m=re.match(regexp,runName) name=m.group(2) print name run = loadPickle(runName) run.trackerLabel=runName run.data['TRE'].data=[] run.data['SRE'].data=[] d[runName] = run runs.append(run) evaluator = Evaluator(dataset, runs) evaluator.evaluateSingleTracker(runs[0])
def generateAllVizualiations(): wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" folderForGraphs='./Visualizations/' datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob('./Runs/upd=3*.p') runsNames =['lambda_nonorm_s0_e0.5', 'lambda_nonorm_s0.2_e0.2', 'lambda_nonorm_s0.3_e0.4', 'lambda_nonorm_s0.4_e0.4', 'lambda_nonorm_s0.5_e0.3'] #runsNames =['lambda_SE_s0_e0.2', 'lambda_SE_s0_e0.5', # 'lambda_SE_s0.1_e0.3', 'lambda_SE_s0.2_e0.2', 'lambda_SE_s0.3_e0.4'] for i in range(0,len(runsNames)): runsNames[i]="./Runs/"+runsNames[i]+".p" formatSave='pdf' regexp= re.compile("(.*\/)(.+)(.p)") for runName in runsNames: m=re.match(regexp,runName) name=m.group(2) print name run = loadPickle(runName) viz = VisualizeAllExperiments(dataset, run) viz.barplot(savefile=folderForGraphs+name+"."+ formatSave)
def main(argv=None): if argv is None: argv = sys.argv wu2013GroundTruth = "/Users/Ivan/Files/Data/Tracking_benchmark" datasetType = 'wu2013' runName = './Runs/fk_hist_int_f1.p' run=loadPickle(runName) #print run experimenType='SRE' # vidName='jogging-2' dataset = Dataset(wu2013GroundTruth, datasetType) viz=VisualizeExperiment(dataset,run) viz.show(vidName, experimenType) viz.barplot()
def createSavedEvaluations(wildcard): wu2013results = "/Users/Ivan/Files/Results/Tracking/wu2013" wu2013GroundTruth = "/Users/Ivan/Files/Data/Tracking_benchmark" vot2014Results = "/Users/Ivan/Files/Results/Tracking/vot2014" vot2014GrounTruth = "/Users/Ivan/Files/Data/vot2014" datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob('./Runs/' + wildcard + '*.p') runs = list() # names = list() for runName in runsNames: run = loadPickle(runName) names.append(runName) print runName runs.append(run) evaluator = EvaluatorAllExperiments(dataset, runs, names) strSave = './Results/' evaluator.calculateMetricsAndSave(strSave)
def createSavedEvaluations(wildcard): wu2013results = "/Users/Ivan/Files/Results/Tracking/wu2013" wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" vot2014Results = "/Users/Ivan/Files/Results/Tracking/vot2014" vot2014GrounTruth = "/Users/Ivan/Files/Data/vot2014" datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) #runsNames =['lambda_gray_s0_e0.4', 'lambda_gray_s0.2_e0.4', # 'lambda_gray_s0.3_e0.3', 'lambda_gray_s0.4_e0.4', 'lambda_gray_s0.5_e0.3'] runsNames = list() for i in range(0, len(runsNames)): runsNames[i] = './Runs/' + runsNames[i] + ".p" runs = list() runsNames = glob.glob('./Runs/' + wildcard + '*.p') # names = list() for runName in runsNames: run = loadPickle(runName) names.append(runName) print runName runs.append(run) evaluator = EvaluatorAllExperiments(dataset, runs, names) strSave = './Results/' evaluator.calculateMetricsAndSave(strSave)
def plot_OPE_comparison(): wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) (runsNames, corrected_names) = getRunsForCVPR2016() runs = list() names = list() for runName in runsNames: run = loadPickle(runName) names.append(runName) runs.append(run) plotMetricsDict = dict() completeMetricDict = dict() alternativeNames = corrected_names if len(alternativeNames) == 0: for r in runs: plotMetricsDict[r.name] = r.plotMetricsDictEntry completeMetricDict[r.name] = r.completeMetricDictEntry else: for r, name in zip(runs, alternativeNames): plotMetricsDict[name] = r.plotMetricsDictEntry completeMetricDict[name] = r.completeMetricDictEntry success = list() precision = list() names = list() import re for name, value in plotMetricsDict.iteritems(): s = value['default'][5] p = value['default'][4] success.append(s) precision.append(p) if "ObjStruck with " in name: short_name = re.sub("ObjStruck with ", '', name) else: short_name = name names.append(short_name) df = pd.DataFrame({ 'name': names, 'success': success, 'precision': precision }) print df sns.set_style("whitegrid") plt.subplots_adjust(bottom=0.4) ax = sns.barplot(x="name", y="success", data=df, palette="Blues_d") locs, labels = plt.xticks() plt.setp(labels, rotation=-45) plt.show()
def plot_OPE_comparison(): wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) (runsNames, corrected_names)= getRunsForCVPR2016() runs = list() names=list() for runName in runsNames: run = loadPickle(runName) names.append(runName) runs.append(run) plotMetricsDict=dict() completeMetricDict=dict() alternativeNames=corrected_names if len(alternativeNames) == 0: for r in runs: plotMetricsDict[r.name]=r.plotMetricsDictEntry completeMetricDict[r.name]=r.completeMetricDictEntry else: for r,name in zip(runs,alternativeNames): plotMetricsDict[name]=r.plotMetricsDictEntry completeMetricDict[name]=r.completeMetricDictEntry success=list() precision=list() names=list() import re for name, value in plotMetricsDict.iteritems(): s=value['default'][5] p=value['default'][4] success.append(s) precision.append(p) if "ObjStruck with " in name: short_name=re.sub("ObjStruck with ",'',name) else: short_name=name names.append(short_name) df = pd.DataFrame({'name': names, 'success': success, 'precision':precision}) print df sns.set_style("whitegrid") plt.subplots_adjust(bottom=0.4) ax=sns.barplot(x="name", y="success", data=df,palette="Blues_d") locs, labels = plt.xticks() plt.setp(labels, rotation=-45) plt.show()
def plotStraddelingEdgeOPE(wildcard, savefilename='', format='pdf'): wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" folderForGraphs='./Visualizations/' datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob("./Results/" + wildcard + "*.p") #runsNames = glob.glob('./Runs/upd=3_hogANDhist_int*.p') formatSave='pdf' regexp= re.compile("(.*\/)(.+)(.p)") experimentType='default' d=dict() for runName in runsNames: m=re.match(regexp,runName) name=m.group(2) run = loadPickle(runName) results = run.plotMetricsDictEntry['default'] d[name]=results[4:6] (df1, df2)=reshuffleStraddlingEdgeOPEData(d) save = False # Initialize a grid of plots with an Axes for each walk grid = sns.FacetGrid(df1, col="$\lambda_e$", hue="$\lambda_e$", col_wrap=3, size=3) # Draw a horizontal line to show the starting point grid.map(plt.axhline, y=0.749, ls=":", c=".5") # Draw a line plot to show the trajectory of each random walk grid.map(plt.plot, "$\lambda_s$", "Precision", marker="o", ms=4) grid.set(xticks=np.arange(6)/float(10), yticks=[0.6, 0.65, 0.7, 0.75, 0.8, 0.85], xlim=(0, 0.5), ylim=(0.6, 0.85)) grid.fig.tight_layout(w_pad=1) # Adjust the tick positions and labels if savefilename=='': plt.show() else: plt.savefig(savefilename+"precision."+format) grid = sns.FacetGrid(df2, col="$\lambda_e$", hue="$\lambda_e$", col_wrap=3, size=3) grid.map(plt.axhline, y=0.59, ls=":", c=".5") grid.map(plt.plot, "$\lambda_s$", "Success", marker="o", ms=4) grid.set(xticks=np.arange(6)/float(10), yticks=[0.5, 0.55, 0.6, 0.65, 0.7], xlim=(0, 0.5), ylim=(0.5, 0.7)) grid.fig.tight_layout(w_pad=1) if savefilename=='': plt.show() else: plt.savefig(savefilename+"success."+format)
def plotOPE_SRE_TRE_Robust( saveFigureToFolder='/Users/Ivan/Code/Tracking/Antrack/doc/technical_reports/images/', format='png'): wu2013results = "/Users/Ivan/Files/Results/Tracking/wu2013" wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" vot2014Results = "/Users/Ivan/Files/Results/Tracking/vot2014" vot2014GrounTruth = "/Users/Ivan/Files/Data/vot2014" datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) (runsNames, corrected_names) = getRunsForCVPR2016_robust() runs = list() names = list() for runName in runsNames: run = loadPickle(runName) names.append(runName) #run = run.data[experimentType] runs.append(run) # run=loadPickle('./Runs/TLD.p') # runs.append(run) # names.append('./Runs/TLD.p') save = True evaluator = EvaluatorAllExperiments(dataset, list(), names) successAndPrecision = 'cvpr2016_SuccessAndPrecision_wu2013' histograms = 'cvpr2016_histogram_wu2013' i = format if save: evaluator.evaluateFromSave( runs, successAndPrecisionPlotName=saveFigureToFolder + successAndPrecision + '.' + i, histogramPlot=saveFigureToFolder + histograms + '.' + i, alternativeNames=corrected_names) else: evaluator.evaluateFromSave(runs, alternativeNames=corrected_names)
def getRuns(self,wildcard): runsNames = glob.glob(self.folder + wildcard + '*.p') runs = list() # names = list() for runName in runsNames: run = loadPickle(runName) names.append(runName) # run = run.data[experimentType] runs.append(run) plotMetricsDict = dict() completeMetricDict = dict() for r in runs: plotMetricsDict[r.name] = r.plotMetricsDictEntry completeMetricDict[r.name] = r.completeMetricDictEntry return (plotMetricsDict,completeMetricDict)
def plotOPE_SRE_TRE_Robust(saveFigureToFolder = '/Users/Ivan/Code/Tracking/Antrack/doc/technical_reports/images/', format='png'): wu2013results = "/Users/Ivan/Files/Results/Tracking/wu2013" wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" vot2014Results = "/Users/Ivan/Files/Results/Tracking/vot2014" vot2014GrounTruth = "/Users/Ivan/Files/Data/vot2014" datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) (runsNames, corrected_names)= getRunsForCVPR2016_robust() runs = list() names=list() for runName in runsNames: run = loadPickle(runName) names.append(runName) #run = run.data[experimentType] runs.append(run) # run=loadPickle('./Runs/TLD.p') # runs.append(run) # names.append('./Runs/TLD.p') save = True evaluator = EvaluatorAllExperiments(dataset, list(), names) successAndPrecision = 'cvpr2016_SuccessAndPrecision_wu2013' histograms = 'cvpr2016_histogram_wu2013' i = format if save: evaluator.evaluateFromSave(runs,successAndPrecisionPlotName=saveFigureToFolder+successAndPrecision+'.'+ i,histogramPlot=saveFigureToFolder+histograms+'.'+ i, alternativeNames=corrected_names) else: evaluator.evaluateFromSave(runs, alternativeNames=corrected_names)
def generateAllVizualiations(): wu2013GroundTruth = "/Users/Ivan/Files/Data/Tracking_benchmark" folderForGraphs='./Visualizations/' datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob('./Runs/upd*.p') formatSave='pdf' regexp= re.compile("(.*\/)(.+)(.p)") for runName in runsNames: m=re.match(regexp,runName) name=m.group(2) print name run = loadPickle(runName) viz = VisualizeAllExperiments(dataset, run) viz.barplot(savefile=folderForGraphs+name+"."+ formatSave)
def plotComparisonToOtherTrackers(dataset, saveFigureToFolders,save): # TODO: Add TLD tracker and, perhaps, SCM runsNames = ['SAMF','DSST', 'upd=3_hogANDhist_int_f1','a30_hogANDhist_int_f1'] runs = list() # names = list() for runName in runsNames: runName = './Results/' + runName + '.p' run = loadPickle(runName) names.append(runName) print runName # run = run.data[experimentType] runs.append(run) # run=loadPickle('./Runs/TLD.p') # runs.append(run) # names.append('./Runs/TLD.p') evaluator = EvaluatorAllExperiments(dataset, list(), names) # saveFormat = ['png', 'pdf'] saveFormat = ['eps'] successAndPrecision = 'SuccessAndPrecision_wu2013' histograms = 'histogram_wu2013' if save: for folder in saveFigureToFolders: evaluator.evaluateFromSave(runs,successAndPrecisionPlotName=folder+"/"+successAndPrecision+'.'+ saveFormat[0],histogramPlot=folder+ "/"+histograms+'.'+ saveFormat[0]) else: evaluator.evaluateFromSave(runs)
def generateDefaultVizualiations(wildcard): wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" folderForGraphs='./Visualizations/' datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob("./Runs/" + wildcard + "*.p") #runsNames = glob.glob('./Runs/upd=3_hogANDhist_int*.p') formatSave='pdf' regexp= re.compile("(.*\/)(.+)(.p)") for runName in runsNames: m=re.match(regexp,runName) name=m.group(2) print name run = loadPickle(runName) run.data['TRE'].data=[] run.data['SRE'].data=[] viz = VisualizeAllExperiments(dataset, run) viz.barplotDefault(savefile=folderForGraphs+name+"."+ formatSave)
def main(argv=None): if argv is None: argv = sys.argv wu2013GroundTruth = "/Users/Ivan/Files/Data/Tracking_benchmark" datasetType = 'wu2013' experimentType='default' runName = './Runs/a28_hist_int_f1.p' run = loadPickle(runName) # print run vidName = 'basketball' dataset = Dataset(wu2013GroundTruth, datasetType) viz = VisualizeAllExperiments(dataset, run) #viz.precisionAndSuccessPlot() # exp1=run.data['default'] # # vizOne=VisualizeExperiment(dataset,exp1) # # vizOne.precisionAndSuccessPlot(vidName) #viz.show(vidName,experimentRunNumber=0,experiment='default') #viz.show(vidName,100) runsNames = glob.glob('./Runs/*.p') for r in runsNames: print r
def plotSensitivitySpecific(paperPlot, baselineRun,wildcards,savefile=''): plist=list() slist=list() xvalsList=list() for wildcard in wildcards: (ps,ss, xValuess) = getPrecisionSuccessGivenWildCard(paperPlot, wildcard) plist.append(ps) slist.append(ss) xvalsList.append(xValuess) run = loadPickle(baselineRun) baseline_p=run.completeMetricDictEntry[0] baseline_s = run.completeMetricDictEntry[1] cm = plt.get_cmap('gist_rainbow') NUM_COLORS = len(wildcards)+ 1 titleFontSize = paperPlot.titleFontSize; headerFontSize = paperPlot.headerFontSize; axisFontSize = paperPlot.axisFontSize; lineWidth = paperPlot.lineWidth; legendSize = paperPlot.legendSize; labelsFontSize = paperPlot.labelsFontSize minY=paperPlot.minY maxY=paperPlot.maxY deltaPrecision=paperPlot.deltaPrecision # this should be just regular plot: value vs b # get values from names max_yticks = 5 # now everything is sorted fig, ax = plt.subplots(nrows=2, ncols=4, sharex=True, figsize=(13,5)) #plt.suptitle('Sensitivity analysis for parameter: '+wildcard,fontsize=paperPlot.axisFontSize+4) for xValues, p, i in zip(xvalsList, plist, range(0,len(wildcards))): #ax[0,i].subplot(2,4,i+1 ) ax[0,i].plot(xValues,p, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1.*i/NUM_COLORS)) ax[0,i].plot([min(min(xvalsList[0]),0), max(xvalsList[0])], [baseline_p, baseline_p], color='k', linestyle='--', linewidth=lineWidth) ax[0,i].set_ylim(minY+0.05+ deltaPrecision,maxY+ deltaPrecision-0.05) title = wildcards[i].replace("=",'') yloc = plt.MaxNLocator(max_yticks) if i != 0: ax[0, i].get_xaxis().set_visible(False) ax[0,i].get_yaxis().set_visible(False) ax[0,i].set_title(title.upper()) else: ax[0,i].set_ylabel('Precision', color='black') ax[0,i].set_title(title) ax[0,i].yaxis.set_major_locator(yloc) #plt.ylim((minY+ deltaPrecision,maxY+ deltaPrecision)) #plt.ylim((minY+ deltaPrecision,maxY+ deltaPrecision)) #ax2 = ax[0,3].twinx() #ax2.set_ylabel('Precision', color='black') #plt.xlabel(wildcard, fontsize=axisFontSize) #plt.ylabel("Precision", fontsize=axisFontSize) #plt.legend(['filter on','filter off']) #plt.subplot(2, 4, 5) for xValues, s, i in zip(xvalsList, slist, range(0,len(wildcards))): #ax[0,i].subplot(2,4,i+1 ) ax[1,i].plot(xValues,s, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1.*i/NUM_COLORS)) ax[1,i].plot([min(min(xvalsList[0]),0), max(xvalsList[0])], [baseline_s, baseline_s], color='k', linestyle='--', linewidth=lineWidth) ax[1,i].set_ylim(minY+0.05,maxY-0.05) title = wildcards[i].replace("=",'') yloc = plt.MaxNLocator(max_yticks) if i != 0: ax[1,i].get_yaxis().set_visible(False) else: ax[1,i].set_ylabel('Success', color='black') ax[1,i].yaxis.set_major_locator(yloc) #for xValues, s,i in zip(xvalsList, slist, range(0,len(wildcards))): # plt.plot(xValues,s, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1.*i/NUM_COLORS)) #plt.plot([min(min(xvalsList[0]), 0), max(xvalsList[0])], [baseline_s, baseline_s], color='k', linestyle='--', # linewidth=lineWidth) #plt.ylim((minY, maxY)) #plt.xlabel(wildcard, fontsize=axisFontSize) #plt.ylabel("Success", fontsize=axisFontSize) if savefile == '': plt.show() else: plt.savefig(savefile)
# trackerLabel="STR+f_hog" # wildcard = sys.argv[1] dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob('./Results/' + wildcard + '*.p') #runsNames = ['SAMF', 'Kernelized_filter', 'fk_hist_int_f0', 'fk_hist_int_f1','TLD'] runs = list() # names=list() for runName in runsNames: #runName= './Results/' + runName + '.p' run = loadPickle(runName) names.append(runName) print runName #run = run.data[experimentType] runs.append(run) # run=loadPickle('./Runs/TLD.p') # runs.append(run) # names.append('./Runs/TLD.p') evaluator = EvaluatorAllExperiments(dataset, list(), names) saveFigureToFolder = '/Users/Ivan/Code/personal-website/Projects/Object_aware_tracking/images/multiScale/' #saveFormat = ['png', 'pdf'] saveFormat=['pdf']
# wildcard = sys.argv[1] dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob('./Results/' + wildcard + '*.p') runsNames = getRunsForCVPR2016() #runsNames = ['SAMF', 'Kernelized_filter', 'fk_hist_int_f0', 'fk_hist_int_f1','TLD'] runs = list() # names = list() for runName in runsNames: #runName= './Results/' + runName + '.p' run = loadPickle(runName) names.append(runName) print runName #run = run.data[experimentType] runs.append(run) # run=loadPickle('./Runs/TLD.p') # runs.append(run) # names.append('./Runs/TLD.p') evaluator = EvaluatorAllExperiments(dataset, list(), names) #saveFigureToFolder = '/Users/Ivan/Code/personal-website/Projects/Object_aware_tracking/images/multiScale/' saveFigureToFolder = '/Users/Ivan/Code/Tracking/Antrack/doc/technical_reports/images/' #saveFormat = ['png', 'pdf'] saveFormat = ['png']
def plotSensitivityUpd(paperPlot, baselineRun, wildcard, savefile=''): (p, s, xValues) = getPrecisionSuccessGivenWildCard(paperPlot, wildcard) run = loadPickle(baselineRun) baseline_p = run.completeMetricDictEntry[0] baseline_s = run.completeMetricDictEntry[1] titleFontSize = paperPlot.titleFontSize headerFontSize = paperPlot.headerFontSize axisFontSize = paperPlot.axisFontSize lineWidth = paperPlot.lineWidth legendSize = paperPlot.legendSize labelsFontSize = paperPlot.labelsFontSize minY = paperPlot.minY maxY = paperPlot.maxY deltaPrecision = paperPlot.deltaPrecision cm = plt.get_cmap('gist_rainbow') NUM_COLORS = 3 max_yticks = 5 # now everything is sorted fig, ax = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=(5, 4)) fig.subplots_adjust(hspace=5) #plt.suptitle('Sensitivity analysis for parameter: '+wildcard,fontsize=paperPlot.axisFontSize+4) i = 0 ax[0].plot(xValues, p, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1. * i / NUM_COLORS)) ax[0].plot([min(min(xValues), 1), max(xValues)], [baseline_p, baseline_p], color='k', linestyle='--', linewidth=lineWidth) ax[0].set_ylim(minY + 0.05 + deltaPrecision, maxY + deltaPrecision - 0.05) title = wildcard.replace("=", '') yloc = plt.MaxNLocator(max_yticks) ax[i].set_ylabel('Precision', color='black') ax[i].yaxis.set_major_locator(yloc) #plt.ylim((minY+ deltaPrecision,maxY+ deltaPrecision)) #plt.ylim((minY+ deltaPrecision,maxY+ deltaPrecision)) #ax2 = ax[0,3].twinx() #ax2.set_ylabel('Precision', color='black') #plt.xlabel(wildcard, fontsize=axisFontSize) #plt.ylabel("Precision", fontsize=axisFontSize) #plt.legend(['filter on','filter off']) #plt.subplot(2, 4, 5) ax[1].plot(xValues, s, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1. * i / NUM_COLORS)) ax[1].plot([min(min(xValues), 1), max(xValues)], [baseline_s, baseline_s], color='k', linestyle='--', linewidth=lineWidth) ax[1].set_ylim(minY + 0.05, maxY - 0.05) title = wildcard.replace("=", '') yloc = plt.MaxNLocator(max_yticks) i = 0 ax[1].set_ylabel('Success', color='black') ax[1].yaxis.set_major_locator(yloc) #for xValues, s,i in zip(xvalsList, slist, range(0,len(wildcards))): # plt.plot(xValues,s, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1.*i/NUM_COLORS)) #plt.plot([min(min(xvalsList[0]), 0), max(xvalsList[0])], [baseline_s, baseline_s], color='k', linestyle='--', # linewidth=lineWidth) #plt.ylim((minY, maxY)) #plt.xlabel(wildcard, fontsize=axisFontSize) #plt.ylabel("Success", fontsize=axisFontSize) if savefile == '': plt.show() else: plt.savefig(savefile)
def plotSensitivitySpecific(paperPlot, baselineRun, wildcards, savefile=''): plist = list() slist = list() xvalsList = list() for wildcard in wildcards: (ps, ss, xValuess) = getPrecisionSuccessGivenWildCard(paperPlot, wildcard) plist.append(ps) slist.append(ss) xvalsList.append(xValuess) run = loadPickle(baselineRun) baseline_p = run.completeMetricDictEntry[0] baseline_s = run.completeMetricDictEntry[1] cm = plt.get_cmap('gist_rainbow') NUM_COLORS = len(wildcards) + 1 titleFontSize = paperPlot.titleFontSize headerFontSize = paperPlot.headerFontSize axisFontSize = paperPlot.axisFontSize lineWidth = paperPlot.lineWidth legendSize = paperPlot.legendSize labelsFontSize = paperPlot.labelsFontSize minY = paperPlot.minY maxY = paperPlot.maxY deltaPrecision = paperPlot.deltaPrecision # this should be just regular plot: value vs b # get values from names max_yticks = 5 # now everything is sorted fig, ax = plt.subplots(nrows=2, ncols=4, sharex=True, figsize=(13, 5)) #plt.suptitle('Sensitivity analysis for parameter: '+wildcard,fontsize=paperPlot.axisFontSize+4) for xValues, p, i in zip(xvalsList, plist, range(0, len(wildcards))): #ax[0,i].subplot(2,4,i+1 ) ax[0, i].plot(xValues, p, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1. * i / NUM_COLORS)) ax[0, i].plot([min(min(xvalsList[0]), 0), max(xvalsList[0])], [baseline_p, baseline_p], color='k', linestyle='--', linewidth=lineWidth) ax[0, i].set_ylim(minY + 0.05 + deltaPrecision, maxY + deltaPrecision - 0.05) title = wildcards[i].replace("=", '') yloc = plt.MaxNLocator(max_yticks) if i != 0: ax[0, i].get_xaxis().set_visible(False) ax[0, i].get_yaxis().set_visible(False) ax[0, i].set_title(title.upper()) else: ax[0, i].set_ylabel('Precision', color='black') ax[0, i].set_title(title) ax[0, i].yaxis.set_major_locator(yloc) #plt.ylim((minY+ deltaPrecision,maxY+ deltaPrecision)) #plt.ylim((minY+ deltaPrecision,maxY+ deltaPrecision)) #ax2 = ax[0,3].twinx() #ax2.set_ylabel('Precision', color='black') #plt.xlabel(wildcard, fontsize=axisFontSize) #plt.ylabel("Precision", fontsize=axisFontSize) #plt.legend(['filter on','filter off']) #plt.subplot(2, 4, 5) for xValues, s, i in zip(xvalsList, slist, range(0, len(wildcards))): #ax[0,i].subplot(2,4,i+1 ) ax[1, i].plot(xValues, s, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1. * i / NUM_COLORS)) ax[1, i].plot([min(min(xvalsList[0]), 0), max(xvalsList[0])], [baseline_s, baseline_s], color='k', linestyle='--', linewidth=lineWidth) ax[1, i].set_ylim(minY + 0.05, maxY - 0.05) title = wildcards[i].replace("=", '') yloc = plt.MaxNLocator(max_yticks) if i != 0: ax[1, i].get_yaxis().set_visible(False) else: ax[1, i].set_ylabel('Success', color='black') ax[1, i].yaxis.set_major_locator(yloc) #for xValues, s,i in zip(xvalsList, slist, range(0,len(wildcards))): # plt.plot(xValues,s, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1.*i/NUM_COLORS)) #plt.plot([min(min(xvalsList[0]), 0), max(xvalsList[0])], [baseline_s, baseline_s], color='k', linestyle='--', # linewidth=lineWidth) #plt.ylim((minY, maxY)) #plt.xlabel(wildcard, fontsize=axisFontSize) #plt.ylabel("Success", fontsize=axisFontSize) if savefile == '': plt.show() else: plt.savefig(savefile)
def plotStraddelingEdgeOPE(wildcard, savefilename='', format='pdf'): wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" folderForGraphs = './Visualizations/' datasetType = 'wu2013' dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob("./Results/" + wildcard + "*.p") #runsNames = glob.glob('./Runs/upd=3_hogANDhist_int*.p') formatSave = 'pdf' regexp = re.compile("(.*\/)(.+)(.p)") experimentType = 'default' d = dict() for runName in runsNames: m = re.match(regexp, runName) name = m.group(2) run = loadPickle(runName) results = run.plotMetricsDictEntry['default'] d[name] = results[4:6] (df1, df2) = reshuffleStraddlingEdgeOPEData(d) save = False # Initialize a grid of plots with an Axes for each walk grid = sns.FacetGrid(df1, col="$\lambda_e$", hue="$\lambda_e$", col_wrap=3, size=3) # Draw a horizontal line to show the starting point grid.map(plt.axhline, y=0.749, ls=":", c=".5") # Draw a line plot to show the trajectory of each random walk grid.map(plt.plot, "$\lambda_s$", "Precision", marker="o", ms=4) grid.set(xticks=np.arange(6) / float(10), yticks=[0.6, 0.65, 0.7, 0.75, 0.8, 0.85], xlim=(0, 0.5), ylim=(0.6, 0.85)) grid.fig.tight_layout(w_pad=1) # Adjust the tick positions and labels if savefilename == '': plt.show() else: plt.savefig(savefilename + "precision." + format) grid = sns.FacetGrid(df2, col="$\lambda_e$", hue="$\lambda_e$", col_wrap=3, size=3) grid.map(plt.axhline, y=0.59, ls=":", c=".5") grid.map(plt.plot, "$\lambda_s$", "Success", marker="o", ms=4) grid.set(xticks=np.arange(6) / float(10), yticks=[0.5, 0.55, 0.6, 0.65, 0.7], xlim=(0, 0.5), ylim=(0.5, 0.7)) grid.fig.tight_layout(w_pad=1) if savefilename == '': plt.show() else: plt.savefig(savefilename + "success." + format)
def plotSensitivityUpd(paperPlot, baselineRun, wildcard, savefile=''): (p,s, xValues) = getPrecisionSuccessGivenWildCard(paperPlot, wildcard) run = loadPickle(baselineRun) baseline_p=run.completeMetricDictEntry[0] baseline_s = run.completeMetricDictEntry[1] titleFontSize = paperPlot.titleFontSize; headerFontSize = paperPlot.headerFontSize; axisFontSize = paperPlot.axisFontSize; lineWidth = paperPlot.lineWidth; legendSize = paperPlot.legendSize; labelsFontSize = paperPlot.labelsFontSize minY=paperPlot.minY maxY=paperPlot.maxY deltaPrecision=paperPlot.deltaPrecision cm = plt.get_cmap('gist_rainbow') NUM_COLORS =3 max_yticks = 5 # now everything is sorted fig, ax = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=(5,4)) fig.subplots_adjust(hspace=5) #plt.suptitle('Sensitivity analysis for parameter: '+wildcard,fontsize=paperPlot.axisFontSize+4) i = 0 ax[0].plot(xValues,p, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1.*i/NUM_COLORS)) ax[0].plot([min(min(xValues),1), max(xValues)], [baseline_p, baseline_p], color='k', linestyle='--', linewidth=lineWidth) ax[0].set_ylim(minY+0.05+ deltaPrecision,maxY+ deltaPrecision-0.05) title = wildcard.replace("=",'') yloc = plt.MaxNLocator(max_yticks) ax[i].set_ylabel('Precision', color='black') ax[i].yaxis.set_major_locator(yloc) #plt.ylim((minY+ deltaPrecision,maxY+ deltaPrecision)) #plt.ylim((minY+ deltaPrecision,maxY+ deltaPrecision)) #ax2 = ax[0,3].twinx() #ax2.set_ylabel('Precision', color='black') #plt.xlabel(wildcard, fontsize=axisFontSize) #plt.ylabel("Precision", fontsize=axisFontSize) #plt.legend(['filter on','filter off']) #plt.subplot(2, 4, 5) ax[1].plot(xValues,s, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1.*i/NUM_COLORS)) ax[1].plot([min(min(xValues),1), max(xValues)], [baseline_s, baseline_s], color='k', linestyle='--', linewidth=lineWidth) ax[1].set_ylim(minY+0.05,maxY-0.05) title = wildcard.replace("=",'') yloc = plt.MaxNLocator(max_yticks) i = 0 ax[1].set_ylabel('Success', color='black') ax[1].yaxis.set_major_locator(yloc) #for xValues, s,i in zip(xvalsList, slist, range(0,len(wildcards))): # plt.plot(xValues,s, paperPlot.sensitivityColorAndPoints, linewidth=lineWidth, color=cm(1.*i/NUM_COLORS)) #plt.plot([min(min(xvalsList[0]), 0), max(xvalsList[0])], [baseline_s, baseline_s], color='k', linestyle='--', # linewidth=lineWidth) #plt.ylim((minY, maxY)) #plt.xlabel(wildcard, fontsize=axisFontSize) #plt.ylabel("Success", fontsize=axisFontSize) if savefile == '': plt.show() else: plt.savefig(savefile)
def plotsensitivity(self, baselineRun,wildcard,savefile=''): (plotMetricsDict, completeMetricDict) = self.getRuns(wildcard) run = loadPickle(baselineRun) baseline_p=run.completeMetricDictEntry[0] baseline_s = run.completeMetricDictEntry[1] cm = plt.get_cmap('gist_rainbow') titleFontSize = self.titleFontSize; headerFontSize = self.headerFontSize; axisFontSize = self.axisFontSize; lineWidth = self.lineWidth; legendSize = self.legendSize; labelsFontSize = self.labelsFontSize minY=self.minY maxY=self.maxY deltaPrecision=self.deltaPrecision # this should be just regular plot: value vs b # get values from names runNames = plotMetricsDict.keys() regExp=re.compile("([\D|=]+)(\d+)") xValues=list() for r in runNames: m=regExp.match(r) xValues.append((float)(m.group(2))) # sort xValues idx_sorted = [i[0] for i in sorted(enumerate(xValues), key=lambda x: x[1])] xValues=sorted(xValues) correctNames = [runNames[x] for x in idx_sorted] # now everything is sorted plt.figure(figsize=(13,9)) p=list() s=list() for name in correctNames: p.append(completeMetricDict[name][0]) s.append(completeMetricDict[name][1]) plt.suptitle('Sensitivity analysis for parameter: '+wildcard,fontsize=self.axisFontSize+4) plt.subplot(1,2,1) plt.plot(xValues,p, self.sensitivityColorAndPoints,linewidth=lineWidth) plt.plot([min(min(xValues),0), max(xValues)], [baseline_p, baseline_p], color='k', linestyle='--', linewidth=lineWidth) plt.ylim((minY+ deltaPrecision,maxY+ deltaPrecision)) #plt.title("Precision") plt.xlabel(wildcard, fontsize=axisFontSize) plt.ylabel("Precision", fontsize=axisFontSize) plt.legend(['filter on','filter off']) plt.subplot(1, 2, 2) plt.plot(xValues,s, self.sensitivityColorAndPoints, linewidth=lineWidth) plt.plot([min(min(xValues), 0), max(xValues)], [baseline_s, baseline_s], color='k', linestyle='--', linewidth=lineWidth) plt.ylim((minY, maxY)) #plt.title("Success") plt.xlabel(wildcard, fontsize=axisFontSize) plt.ylabel("Success", fontsize=axisFontSize) if savefile == '': plt.show() else: plt.savefig(savefile)
for v in d: z=VideoResult(v) videos.append(z) self.videos=videos def process(self,cpus=3): for v in self.videos: v.process() if __name__ == "__main__": wu2013GroundTruth = "/Users/Ivan/Files/Data/Tracking_benchmark" datasetType = 'wu2013' # dataset = Dataset(wu2013GroundTruth, datasetType) # d = dataset.dictData # # wholeDataset=DatasetResult(d) # wholeDataset.process() # saveName='objectness_gt_vs_else_pickle.p' # savePickle(wholeDataset,saveName) wholeDataset=loadPickle(saveName) # This experiment shows how does objecteness differs as a function of distance from the object center