コード例 #1
0
ファイル: VisualizeExperiment.py プロジェクト: brixen/Antrack
    def precisionAndSuccessPlotData(self, vidName, experimentType,experimentNumber=0,n=1000):
        """Get the data necessary for plotting precision and recall

        Args:
            vidName,n=1000

        Returns:
            (x_pr, y_pr, x_s, y_s)
        """

        gt_data = [x for x in self.dataset.data if x[0] == vidName][0]

        tracker_data = [x for x in self.run.data[experimentType].data if x[0] == vidName][0]

        (x_pr, y_pr, x_s, y_s) = Evaluator.evaluateSingleVideo(tracker_data, gt_data,
                                                               experimentNumber=0)

        for index in range(1,len(tracker_data[1])):
            (x_pr1, y_pr1, x_s1, y_s1) = Evaluator.evaluateSingleVideo(tracker_data, gt_data,
                                                                   experimentNumber=index)
            x_pr= x_pr+x_pr1
            y_pr=y_pr+y_pr1
            x_s=x_s+x_s1
            y_s=y_s+y_s1


        x_pr=x_pr/float(len(tracker_data[1]))
        y_pr = y_pr / float(len(tracker_data[1]))
        x_s = x_s / float(len(tracker_data[1]))
        y_s = y_s / float(len(tracker_data[1]))


        return (x_pr, y_pr, x_s, y_s)
コード例 #2
0
    def precisionAndSuccessPlotData(self, vidName, experimentType,experimentNumber=0,n=1000):
        """Get the data necessary for plotting precision and recall

        Args:
            vidName,n=1000

        Returns:
            (x_pr, y_pr, x_s, y_s)
        """

        gt_data = [x for x in self.dataset.data if x[0] == vidName][0]

        #tracker_data = [x for x in self.run.data[experimentType].data if x[0] == vidName][0]
        print vidName
        tracker_data = [x for x in self.run.data if x[0] == vidName][0]
        (x_pr, y_pr, x_s, y_s) = Evaluator.evaluateSingleVideo(tracker_data, gt_data,
                                                               experimentNumber=0)

        for index in range(1,len(tracker_data[1])):
            (x_pr1, y_pr1, x_s1, y_s1) = Evaluator.evaluateSingleVideo(tracker_data, gt_data,
                                                                   experimentNumber=index)
            x_pr= x_pr+x_pr1
            y_pr=y_pr+y_pr1
            x_s=x_s+x_s1
            y_s=y_s+y_s1


        x_pr=x_pr/float(len(tracker_data[1]))
        y_pr = y_pr / float(len(tracker_data[1]))
        x_s = x_s / float(len(tracker_data[1]))
        y_s = y_s / float(len(tracker_data[1]))


        return (x_pr, y_pr, x_s, y_s)
コード例 #3
0
def compareDefaultPlots(wildcard="lambda_SE"):
    wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013"
    datasetType = 'wu2013'
    dataset = Dataset(wu2013GroundTruth, datasetType)
    runsNames = glob.glob("./Runs/" + wildcard + "*.p")

    formatSave='pdf'

    regexp= re.compile("(.*\/)(.+)(.p)")
    #s0_e0.2
    #s0_e0.5
    #s0.1_e0.3
    #s0.2_e0.2
    #s0.3_e0.4
    d=dict()
    runs=list()
    for runName in runsNames:
        m=re.match(regexp,runName)
        name=m.group(2)
        print name
        run = loadPickle(runName)
        run.trackerLabel=runName
        run.data['TRE'].data=[]
        run.data['SRE'].data=[]
        d[runName] = run
        runs.append(run)
    evaluator = Evaluator(dataset, runs)
    evaluator.evaluateSingleTracker(runs[0])
コード例 #4
0
    def evaluate(self,
                 n=1000,
                 successAndPrecisionPlotName='',
                 histogramPlot=''):
        '''

        Evaluate the dataset

        :return: accuracy and precision
        '''

        completeMetricDict = dict()

        plotMetricsDict = dict()

        for run, experimentName in zip(self.listOfExperiments,
                                       self.experimentNames):

            fullP = 0
            fullS = 0

            ll = 0

            experimentDict = dict()

            for expName, experiment in run.data.iteritems():

                x_p_var = np.zeros((n, 1))
                y_p_var = np.zeros((n, 1))

                x_s_var = np.zeros((n, 1))
                y_s_var = np.zeros((n, 1))

                averageP = 0
                averageS = 0

                l = 0

                for videoData in experiment.data:

                    gt = [
                        x for x in self.dataset.data if x[0] == videoData[0]
                    ][0]

                    for expRunIndex in range(0, len(videoData[1])):
                        (x_pr, y_pr, x_s, y_s) = Evaluator.evaluateSingleVideo(
                            videoData, gt, experimentNumber=expRunIndex, n=n)

                        x_p_var = x_p_var + x_pr
                        y_p_var = y_p_var + y_pr

                        x_s_var = x_s_var + x_s
                        y_s_var = y_s_var + y_s

                        l = l + 1

                        (p1, s1) = Evaluator.getIntegralValues(
                            x_pr, y_pr, x_s, y_s)

                        averageP = averageP + p1
                        averageS = averageS + s1

                        if expName != 'default':
                            fullP = fullP + p1
                            fullS = fullS + s1
                            ll = ll + 1

                x_p_var = x_p_var / (float(l))
                y_p_var = y_p_var / (float(l))
                x_s_var = x_s_var / (float(l))
                y_s_var = y_s_var / (float(l))

                averageP = averageP / (float(l))
                averageS = averageS / (float(l))

                experimentDict[expName] = (x_p_var, y_p_var, x_s_var, y_s_var,
                                           averageP, averageS)

            fullP = fullP / (float(ll))
            fullS = fullS / (float(ll))

            completeMetricDict[experimentName] = (fullP, fullS)

            plotMetricsDict[experimentName] = experimentDict

            print experimentName
            print "===================="
            print "Precision: ", fullP
            print "Success: ", fullS

            for key, value in experimentDict.iteritems():
                print key
                print value[4], value[5]

            print "===================="

        # runEvaluation is

        self.createPlot(plotMetricsDict,
                        completeMetricDict,
                        savefilename=successAndPrecisionPlotName)
        self.createHistogramPlot(plotMetricsDict,
                                 completeMetricDict,
                                 savefilename=histogramPlot)
コード例 #5
0
    def calculateMetricsAndSave(self, savePath, n=1000):
        completeMetricDict = dict()

        plotMetricsDict = dict()

        for run, experimentName in zip(self.listOfExperiments,
                                       self.experimentNames):

            fullP = 0
            fullS = 0

            ll = 0

            experimentDict = dict()

            for expName, experiment in run.data.iteritems():

                # if there is only one experiment
                if len(experiment.data) == 0:
                    continue

                x_p_var = np.zeros(n)
                y_p_var = np.zeros(n)

                x_s_var = np.zeros(n)
                y_s_var = np.zeros(n)

                averageP = 0
                averageS = 0

                l = 0

                for videoData in experiment.data:

                    gt = [
                        x for x in self.dataset.data if x[0] == videoData[0]
                    ][0]

                    for expRunIndex in range(0, len(videoData[1])):
                        (x_pr, y_pr, x_s, y_s) = Evaluator.evaluateSingleVideo(
                            videoData, gt, experimentNumber=expRunIndex, n=n)

                        x_p_var = np.add(x_p_var, x_pr)
                        y_p_var = np.add(y_p_var, y_pr)

                        x_s_var = np.add(x_s_var, x_s)
                        y_s_var = np.add(y_s_var, y_s)

                        l = l + 1

                        (p1, s1) = Evaluator.getIntegralValues(
                            x_pr, y_pr, x_s, y_s)

                        averageP = averageP + p1
                        averageS = averageS + s1

                        if expName != 'default':
                            fullP = fullP + p1
                            fullS = fullS + s1
                            ll = ll + 1

                x_p_var = x_p_var / (float(l))
                y_p_var = y_p_var / (float(l))
                x_s_var = x_s_var / (float(l))
                y_s_var = y_s_var / (float(l))

                averageP = averageP / (float(l))
                averageS = averageS / (float(l))

                experimentDict[expName] = (x_p_var, y_p_var, x_s_var, y_s_var,
                                           averageP, averageS)

            if ll == 0:
                fullP = 0
                fullS = 0
            else:
                fullP = fullP / (float(ll))
                fullS = fullS / (float(ll))

            completeMetricDict[experimentName] = (fullP, fullS)

            plotMetricsDict[experimentName] = experimentDict

            print experimentName
            print "===================="
            print "Precision: ", fullP
            print "Success: ", fullS

            for key, value in experimentDict.iteritems():
                print key
                print value[4], value[5]

            print "===================="

            e = Evaluated(plotMetricsDict[experimentName],
                          completeMetricDict[experimentName], experimentName)

            e.save(savePath + "/" + experimentName + ".p")
コード例 #6
0
    def evaluate(self,
                 n=1000,
                 successAndPrecisionPlotName='',
                 histogramPlot=''):
        '''

        Evaluate the dataset

        :return: accuracy and precision
        '''

        listGT = self.dataset.data

        pr_x_list = list()
        pr_y_list = list()

        sc_x_list = list()
        sc_y_list = list()

        experimentNames = list()

        defaultExpList = list()
        sreExpList = list()
        treExpList = list()

        for listRun, name in zip(self.listOfExperiments, self.experimentNames):
            runs = listRun.data
            experimentNames.append(name)

            defaultExpList.append(runs['default'])
            sreExpList.append(runs['SRE'])
            treExpList.append(runs['TRE'])

        allExpList = list()
        allExpList.append(defaultExpList)
        allExpList.append(sreExpList)
        allExpList.append(treExpList)

        pr_x_all_list = list()
        pr_y_all_list = list()

        sc_x_all_list = list()
        sc_y_all_list = list()

        for exp in allExpList:  # experiment type
            # exp
            e = Evaluator(self.dataset, exp)

            pr_x_list = list()
            pr_y_list = list()

            sc_x_list = list()
            sc_y_list = list()

            measures_specific_list = list()
            for listRun in exp:  # different tracker runs
                (precision_x, precision_y, success_x,
                 success_y) = e.evaluateSingleTracker(listRun, n)

                pr_x_list.append(precision_x)
                pr_y_list.append(precision_y)

                sc_x_list.append(success_x)
                sc_y_list.append(success_y)

            pr_x_all_list.append(pr_x_list)
            pr_y_all_list.append(pr_y_list)

            sc_x_all_list.append(sc_x_list)
            sc_y_all_list.append(sc_y_list)
コード例 #7
0
ファイル: VisualizeExperiment.py プロジェクト: brixen/Antrack
    def barplot(self, n=1000):
        """Plots barplot with precision and success for specific run
        
        Args:
            n=1000
           
        Returns:
            nothing
        """

        precision=list()
        success=list()

        names=list()
        n_groups = len(self.dataset.data)

        for tracker_data in self.run.data:
            gt_data = [x for x in self.dataset.data if x[0] == tracker_data[0]][0]

        #for gt_data,tracker_data in zip(self.dataset.data,self.run.data):

            if gt_data[0]!=tracker_data[0]:

                print "Should be happening"
                return

            names.append(gt_data[0])
            (x_pr, y_pr, x_s, y_s) = Evaluator.evaluateSingleVideo(tracker_data, gt_data)

            p = np.trapz(y_pr, x=x_pr) / 50
            s = np.trapz(y_s, x=x_s)

            p = np.ma.round(p, 2)
            s = np.ma.round(s, 2)

            precision.append(p)
            success.append(s)


        rotation=90

        xTicksFontSize=12;

        index = np.arange(n_groups)
        plt.figure(figsize=(15,10))
        plt.suptitle(self.run.trackerLabel,fontsize=xTicksFontSize+6)
        plt.subplots_adjust(bottom=0.2)
        plt.subplot(1, 2, 1)


        idx_success = [i[0] for i in sorted(enumerate(success), key=lambda x: x[1])]
        idx_precision = [i[0] for i in sorted(enumerate(precision), key=lambda x: x[1])]

        successTrackerNames = [names[x] for x in idx_success]
        sorted_success = [success[x] for x in idx_success]

        precisionTrackerNames = [names[x] for x in idx_precision]
        sorted_precision = [precision[x] for x in idx_precision]

        plt.xticks(index, successTrackerNames, rotation=rotation, fontsize=xTicksFontSize)
        plt.bar(index, sorted_success, align="center")
        plt.ylim((0, 1))

        plt.yticks(fontsize=xTicksFontSize)
        mean_success=np.round(sum(success)/(1.0*len(success)),2)

        plt.title("Success "+"["+str(mean_success)+"]",fontsize=xTicksFontSize+4)
        plt.subplot(1, 2, 2)

        plt.bar(index, sorted_precision, align="center")
        plt.xticks(index, precisionTrackerNames, rotation=rotation,fontsize=xTicksFontSize)
        plt.ylim((0, 1))
        plt.yticks(fontsize=xTicksFontSize)
        mean_precision = np.round(sum(sorted_precision) / (1.0 * len(sorted_precision)), 2)
        plt.title("Precision " + "[" + str(mean_precision) + "]", fontsize=xTicksFontSize + 4)


        plt.show()
コード例 #8
0
    def barplot(self,n=1000,savefile=''):


        plt.figure(figsize=(13,9))

        rotation = 90

        xTicksFontSize = 10;

        index=1;
        import seaborn as sn


        mean_success=0
        mean_precision=0

        sList=list()
        pList=list()

        for expName,experiment in self.experiments.data.iteritems():


            print expName

            precision = list()
            success = list()
            names=list()


            for videoData in experiment.data:

                gt=[x for x in self.dataset.data if x[0]==videoData[0]][0]


                p=0
                s=0
                names.append(videoData[0])

                for expRunIndex in range(0,len(videoData[1])):
                    (x_pr, y_pr, x_s, y_s)=Evaluator.evaluateSingleVideo(videoData,gt,experimentNumber=expRunIndex, n=n)

                    p1= np.ma.round(np.trapz(y_pr, x=x_pr) / 51, 2)
                    s1= np.ma.round(np.trapz(y_s, x=x_s), 2)

                    p =p+ p1
                    s =s+ s1

                    if expName!='default':
                        sList.append(s1)
                        pList.append(p1)




                p=p/(float(len(videoData[1])))
                s = s/ (float(len(videoData[1])))

                precision.append(p)
                success.append(s)

                #break

            bothMetrics=[x+y for x,y in zip(success,precision)]
            # barplot precision
            n_groups = len(self.dataset.data)

            indexPlot = np.arange(n_groups)

            if index == 1:
                ax1 = plt.subplot(3, 2, index)
            else:
                plt.subplot(3, 2, index)

            idx_sorted = [i[0] for i in sorted(enumerate(bothMetrics), key=lambda x: x[1])]

            successTrackerNames = [names[x] for x in idx_sorted]
            sorted_success = [success[x] for x in idx_sorted]

            precisionTrackerNames = [names[x] for x in idx_sorted]
            sorted_precision = [precision[x] for x in idx_sorted]

            plt.xticks(indexPlot, successTrackerNames, rotation=rotation, fontsize=xTicksFontSize)

            plt.bar(indexPlot, sorted_success, align="center")

            plt.ylim((0, 1))

            plt.yticks(fontsize=xTicksFontSize)

            if expName!='default':
                mean_success = mean_success+ np.round(sum(success) / (1.0 * len(success)), 2)
                mean_precision=mean_precision+ np.round(sum(precision) / (1.0 * len(precision)), 2)
            #plt.title("Success " + "[" + str(mean_success) + "]", fontsize=xTicksFontSize + 4)

            index = index + 1;
            plt.subplot(3, 2, index)

            plt.xticks(indexPlot, precisionTrackerNames, rotation=rotation, fontsize=xTicksFontSize)
            plt.bar(indexPlot, sorted_precision, align="center")
            plt.ylim((0, 1))

            plt.yticks(fontsize=xTicksFontSize)
            if index == 2:
                ax2 = plt.subplot(3, 2, index)
            else:
                plt.subplot(3, 2, index)

            ax3 = plt.twinx()
            ax3.set_ylabel(expName, color='black',fontsize=xTicksFontSize+4)
            ax3.grid(b=False)


            # barplot success
            index=index+1;


        sFinal=np.round(sum(sList) / (float(len(sList))),2)
        pFinal = np.round(sum(pList) / (float(len(pList))), 2)

        ax1.set_title(
            "Success " + "[" + str(sFinal) + "] / " + self.experiments.data['default'].trackerLabel,
            fontsize=xTicksFontSize + 4)
        ax2.set_title("Precision " + "[" + str(pFinal) + "] / " + self.experiments.data[
            'default'].trackerLabel, fontsize=xTicksFontSize + 4)

        # ax1.set_title("Success " + "[" + str(mean_success/2.0) + "] / "+ self.experiments.data['default'].trackerLabel, fontsize=xTicksFontSize + 4)
        # ax2.set_title("Precision " + "[" + str(mean_precision / 2.0) + "] / "+ self.experiments.data[
        #     'default'].trackerLabel, fontsize=xTicksFontSize + 4)

        plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)

        if savefile=='':
            plt.show()
        else:
            plt.savefig(savefile)
コード例 #9
0
    def barplot(self, n=1000):
        """Plots barplot with precision and success for specific run
        
        Args:
            n=1000
           
        Returns:
            nothing
        """

        precision=list()
        success=list()

        names=list()
        n_groups = len(self.dataset.data)

        for tracker_data in self.run.data:
            gt_data = [x for x in self.dataset.data if x[0] == tracker_data[0]][0]

        #for gt_data,tracker_data in zip(self.dataset.data,self.run.data):

            if gt_data[0]!=tracker_data[0]:

                print "Should be happening"
                return

            names.append(gt_data[0])
            (x_pr, y_pr, x_s, y_s) = Evaluator.evaluateSingleVideo(tracker_data, gt_data)

            p = np.trapz(y_pr, x=x_pr) / 50
            s = np.trapz(y_s, x=x_s)

            p = np.ma.round(p, 2)
            s = np.ma.round(s, 2)

            precision.append(p)
            success.append(s)


        rotation=90

        xTicksFontSize=12;

        index = np.arange(n_groups)
        plt.figure(figsize=(15,10))
        plt.suptitle(self.run.trackerLabel,fontsize=xTicksFontSize+6)
        plt.subplots_adjust(bottom=0.2)
        plt.subplot(1, 2, 1)


        idx_success = [i[0] for i in sorted(enumerate(success), key=lambda x: x[1])]
        idx_precision = [i[0] for i in sorted(enumerate(precision), key=lambda x: x[1])]

        successTrackerNames = [names[x] for x in idx_success]
        sorted_success = [success[x] for x in idx_success]

        precisionTrackerNames = [names[x] for x in idx_precision]
        sorted_precision = [precision[x] for x in idx_precision]

        plt.xticks(index, successTrackerNames, rotation=rotation, fontsize=xTicksFontSize)
        plt.bar(index, sorted_success, align="center")
        plt.ylim((0, 1))

        plt.yticks(fontsize=xTicksFontSize)
        mean_success=np.round(sum(success)/(1.0*len(success)),2)

        plt.title("Success "+"["+str(mean_success)+"]",fontsize=xTicksFontSize+4)
        plt.subplot(1, 2, 2)

        plt.bar(index, sorted_precision, align="center")
        plt.xticks(index, precisionTrackerNames, rotation=rotation,fontsize=xTicksFontSize)
        plt.ylim((0, 1))
        plt.yticks(fontsize=xTicksFontSize)
        mean_precision = np.round(sum(sorted_precision) / (1.0 * len(sorted_precision)), 2)
        plt.title("Precision " + "[" + str(mean_precision) + "]", fontsize=xTicksFontSize + 4)


        plt.show()
コード例 #10
0
    def evaluate(self, n=1000, successAndPrecisionPlotName='', histogramPlot=''):

        '''

        Evaluate the dataset

        :return: accuracy and precision
        '''



        completeMetricDict=dict()

        plotMetricsDict=dict()

        for run,experimentName in zip(self.listOfExperiments,self.experimentNames):

            fullP=0
            fullS=0

            ll=0

            experimentDict=dict()

            for expName, experiment in run.data.iteritems():


                x_p_var=np.zeros((n,1))
                y_p_var= np.zeros((n, 1))

                x_s_var= np.zeros((n, 1))
                y_s_var= np.zeros((n, 1))


                averageP=0
                averageS=0


                l=0

                for videoData in experiment.data:

                    gt = [x for x in self.dataset.data if x[0] == videoData[0]][0]



                    for expRunIndex in range(0, len(videoData[1])):
                        (x_pr, y_pr, x_s, y_s) = Evaluator.evaluateSingleVideo(videoData, gt, experimentNumber=expRunIndex,
                                                                               n=n)

                        x_p_var= x_p_var+x_pr
                        y_p_var= y_p_var+y_pr

                        x_s_var=x_s_var+x_s
                        y_s_var=y_s_var+y_s


                        l=l+1

                        (p1,s1)=Evaluator.getIntegralValues(x_pr,y_pr,x_s,y_s)

                        averageP=averageP+p1
                        averageS=averageS+s1


                        if expName!='default':
                            fullP= fullP+p1
                            fullS=fullS+s1;
                            ll=ll+1


                x_p_var=x_p_var/(float(l))
                y_p_var = y_p_var / (float(l))
                x_s_var = x_s_var / (float(l))
                y_s_var = y_s_var / (float(l))

                averageP= averageP/(float(l))
                averageS=averageS/(float(l))

                experimentDict[expName]=(x_p_var,y_p_var,x_s_var,y_s_var,averageP,averageS)

            fullP= fullP/(float(ll))
            fullS = fullS / (float(ll))

            completeMetricDict[experimentName]=(fullP,fullS)

            plotMetricsDict[experimentName]=experimentDict


            print experimentName
            print "===================="
            print "Precision: ",fullP
            print "Success: ",fullS

            for key,value in experimentDict.iteritems():
                print key
                print value[4],value[5]

            print "===================="






        # runEvaluation is

        self.createPlot(plotMetricsDict,completeMetricDict,savefilename=successAndPrecisionPlotName)
        self.createHistogramPlot(plotMetricsDict, completeMetricDict, savefilename=histogramPlot)
コード例 #11
0
    def calculateMetricsAndSave(self,savePath,n=1000):
        completeMetricDict = dict()

        plotMetricsDict = dict()

        for run, experimentName in zip(self.listOfExperiments, self.experimentNames):

            fullP = 0
            fullS = 0

            ll = 0

            experimentDict = dict()

            for expName, experiment in run.data.iteritems():


                x_p_var = np.zeros(n)
                y_p_var = np.zeros(n)

                x_s_var = np.zeros(n)
                y_s_var = np.zeros(n)

                averageP = 0
                averageS = 0

                l = 0

                for videoData in experiment.data:

                    gt = [x for x in self.dataset.data if x[0] == videoData[0]][0]

                    for expRunIndex in range(0, len(videoData[1])):
                        (x_pr, y_pr, x_s, y_s) = Evaluator.evaluateSingleVideo(videoData, gt,
                                                                               experimentNumber=expRunIndex,
                                                                               n=n)

                        x_p_var =np.add(x_p_var, x_pr)
                        y_p_var = np.add(y_p_var,y_pr)

                        x_s_var = np.add(x_s_var,x_s)
                        y_s_var = np.add(y_s_var,y_s)

                        l = l + 1

                        (p1, s1) = Evaluator.getIntegralValues(x_pr, y_pr, x_s, y_s)

                        averageP = averageP + p1
                        averageS = averageS + s1

                        if expName != 'default':
                            fullP = fullP + p1
                            fullS = fullS + s1;
                            ll = ll + 1

                x_p_var = x_p_var / (float(l))
                y_p_var = y_p_var / (float(l))
                x_s_var = x_s_var / (float(l))
                y_s_var = y_s_var / (float(l))

                averageP = averageP / (float(l))
                averageS = averageS / (float(l))

                experimentDict[expName] = (x_p_var, y_p_var, x_s_var, y_s_var, averageP, averageS)

            fullP = fullP / (float(ll))
            fullS = fullS / (float(ll))

            completeMetricDict[experimentName] = (fullP, fullS)

            plotMetricsDict[experimentName] = experimentDict

            print experimentName
            print "===================="
            print "Precision: ", fullP
            print "Success: ", fullS

            for key, value in experimentDict.iteritems():
                print key
                print value[4], value[5]

            print "===================="

            e=Evaluated(plotMetricsDict[experimentName],completeMetricDict[experimentName],experimentName)

            e.save(savePath+"/"+experimentName+".p")
コード例 #12
0
    def evaluate(self, n=1000, successAndPrecisionPlotName='', histogramPlot=''):

        '''

        Evaluate the dataset

        :return: accuracy and precision
        '''

        listGT = self.dataset.data

        pr_x_list = list()
        pr_y_list = list()

        sc_x_list = list()
        sc_y_list = list();

        experimentNames = list()


        defaultExpList=list()
        sreExpList=list()
        treExpList=list()

        for listRun,name in zip(self.listOfExperiments,self.experimentNames):
            runs = listRun.data
            experimentNames.append(name)


            defaultExpList.append(runs['default'])
            sreExpList.append(runs['SRE'])
            treExpList.append(runs['TRE'])


        allExpList=list()
        allExpList.append(defaultExpList)
        allExpList.append(sreExpList)
        allExpList.append(treExpList)


        pr_x_all_list=list()
        pr_y_all_list=list()

        sc_x_all_list = list()
        sc_y_all_list = list()

        for exp in allExpList:    # experiment type
            # exp
            e=Evaluator(self.dataset,exp)

            pr_x_list = list()
            pr_y_list = list()

            sc_x_list = list()
            sc_y_list = list()


            measures_specific_list=list()
            for listRun in exp:   # different tracker runs
                (precision_x, precision_y, success_x, success_y) = e.evaluateSingleTracker(listRun, n)

                pr_x_list.append(precision_x)
                pr_y_list.append(precision_y)

                sc_x_list.append(success_x)
                sc_y_list.append(success_y)

            pr_x_all_list.append(pr_x_list)
            pr_y_all_list.append(pr_y_list)

            sc_x_all_list.append(sc_x_list)
            sc_y_all_list.append(sc_y_list)