class FScoreCurve(DependentAnalysis):

    beta = Float(default=1)
    prcurve = Include(PrecisionRecallCurve)

    @property
    def name(self):
        return "Tracking precision/recall"

    def describe(self):
        return Plot("Tracking F-score curve", "F", wrt="normalized threshold", minimal=0, maximal=1), None

    def compatible(self, experiment: Experiment):
        return isinstance(experiment, UnsupervisedExperiment)

    def dependencies(self):
        return self.prcurve,

    def join(self, experiment: Experiment, trackers: List[Tracker], sequences: List[Sequence], results):
        processed_results = []

        for result in results[0]:
            beta2 = (self.beta * self.beta)
            f_curve = [((1 + beta2) * pr_ * re_) / (beta2 * pr_ + re_) for pr_, re_ in result[0]]

            processed_results.append((f_curve, result[0][1]))

        return processed_results

    def axes(self):
        return Axis.TRACKERS,
Beispiel #2
0
class AverageAccuracy(SequenceAggregator):

    analysis = Include(AverageAccuracyPerSequence)
    weighted = Boolean(default=True)

    def compatible(self, experiment: Experiment):
        return isinstance(experiment, MultiRunExperiment)

    @property
    def title(self):
        return "Average accurarcy"

    def dependencies(self):
        return self.analysis,

    def describe(self):
        return Measure("Accuracy", "AUC", 0, 1, Sorting.DESCENDING),

    def aggregate(self, _: Tracker, sequences: List[Sequence], results: Grid):
        accuracy = 0
        frames = 0

        for i, sequence in enumerate(sequences):
            if results[i] is None:
                continue

            if self.weighted:
                accuracy += results[i][0] * len(sequence)
                frames += len(sequence)
            else:
                accuracy += results[i][0]
                frames += 1

        return accuracy / frames,
Beispiel #3
0
class EAOScoreMultiStart(DependentAnalysis):

    low = Integer()
    high = Integer()
    eaocurve = Include(EAOCurveMultiStart)

    @property
    def title(self):
        return "EAO analysis"

    def describe(self):
        return Measure("Expected average overlap", "EAO", minimal=0, maximal=1, direction=Sorting.DESCENDING),

    def compatible(self, experiment: Experiment):
        return isinstance(experiment, MultiStartExperiment)

    def dependencies(self):
        return self.eaocurve,

    def join(self, experiment: Experiment, trackers: List[Tracker], sequences: List[Sequence], results: List[Grid]):
        joined = Grid((len(trackers), ))

        for i, result in enumerate(results[0]):
            if result is None:
                continue
            joined[i] = (float(np.mean(result[0][self.low:self.high + 1])), )
        return joined

    def axes(self):
        return Axis.TRACKERS,
Beispiel #4
0
class PrecisionRecall(DependentAnalysis):

    prcurve = Include(PrecisionRecallCurve)
    fcurve = Include(FScoreCurve)

    @property
    def title(self):
        return "Tracking precision/recall"

    def describe(self):
        return Measure("Precision", "Pr", minimal=0, maximal=1, direction=Sorting.DESCENDING), \
             Measure("Recall", "Re", minimal=0, maximal=1, direction=Sorting.DESCENDING), \
             Measure("F Score", "F", minimal=0, maximal=1, direction=Sorting.DESCENDING)

    def compatible(self, experiment: Experiment):
        return isinstance(experiment, UnsupervisedExperiment)

    def dependencies(self):
        return self.prcurve, self.fcurve

    def join(self, experiment: Experiment, trackers: List[Tracker],
             sequences: List[Sequence], results):

        f_curves = results[1]
        pr_curves = results[0]

        joined = []

        for f_curve, pr_curve in zip(f_curves, pr_curves):
            # get optimal F-score and Pr and Re at this threshold
            f_score = max(f_curve[0])
            best_i = f_curve[0].index(f_score)
            re_score = pr_curve[0][best_i][0]
            pr_score = pr_curve[0][best_i][1]
            joined.append((pr_score, re_score, f_score))

        return joined

    def axes(self):
        return Axis.TRACKERS,
Beispiel #5
0
class FailureCount(SequenceAggregator):

    analysis = Include(FailuresPerSequence)

    def compatible(self, experiment: Experiment):
        return isinstance(experiment, SupervisedExperiment)

    def dependencies(self):
        return self.analysis,

    @property
    def title(self):
        return "Number of failures"

    def describe(self):
        return Measure("Failures", "F", 0, None, Sorting.ASCENDING),

    def aggregate(self, _: Tracker, sequences: List[Sequence], results: Grid):
        failures = 0

        for a in results:
            failures = failures + a[0]

        return failures,
Beispiel #6
0
class EAOScore(DependentAnalysis):

    eaocurve = Include(EAOCurve)
    low = Integer()
    high = Integer()

    @property
    def title(self):
        return "EAO analysis"

    def describe(self):
        return Measure("Expected average overlap", "EAO", 0, 1, Sorting.DESCENDING),

    def compatible(self, experiment: Experiment):
        return isinstance(experiment, SupervisedExperiment)

    def dependencies(self):
        return self.eaocurve,

    def join(self, experiment: Experiment, trackers: List[Tracker], sequences: List[Sequence], results: List[Grid]):
        return [(float(np.mean(x[0][self.low:self.high + 1])), ) for x in results[0]]

    def axes(self):
        return Axis.TRACKERS,