Beispiel #1
0
    def subcompute(self, experiment: Experiment, tracker: Tracker,
                   sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]:

        thresholds = dependencies[0, 0][0][0]  # dependencies[0][0, 0]

        trajectories = experiment.gather(tracker, sequence)

        if len(trajectories) == 0:
            raise MissingResultsException()

        precision = len(thresholds) * [float(0)]
        recall = len(thresholds) * [float(0)]
        for trajectory in trajectories:
            confidence = [
                trajectory.properties(i).get('confidence', 0)
                for i in range(len(trajectory))
            ]
            pr, re = compute_tpr_curves(trajectory.regions(), confidence,
                                        sequence, thresholds,
                                        self.ignore_unknown, self.bounded)
            for i in range(len(thresholds)):
                precision[i] += pr[i]
                recall[i] += re[i]


#         return [(re / len(trajectories), pr / len(trajectories)) for pr, re in zip(precision, recall)], thresholds
        return [(pr / len(trajectories), re / len(trajectories))
                for pr, re in zip(precision, recall)], thresholds
Beispiel #2
0
    def compute_partial(self, tracker: Tracker, experiment: Experiment,
                        sequence: Sequence):
        trajectories = experiment.gather(tracker, sequence)

        if len(trajectories) == 0:
            raise MissingResultsException()

        failures = 0
        for trajectory in trajectories:
            failures = failures + count_failures(trajectory.regions())

        return failures / len(trajectories), len(trajectories[0])
Beispiel #3
0
    def subcompute(self, experiment: Experiment, tracker: Tracker,
                   sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]:

        assert isinstance(experiment, SupervisedExperiment)

        trajectories = experiment.gather(tracker, sequence)

        if len(trajectories) == 0:
            raise MissingResultsException()

        failures = 0
        for trajectory in trajectories:
            failures = failures + count_failures(trajectory.regions())[0]

        return failures / len(trajectories), len(trajectories[0])
Beispiel #4
0
    def compute_partial(self, tracker: Tracker, experiment: Experiment,
                        sequence: Sequence):
        trajectories = experiment.gather(tracker, sequence)

        if len(trajectories) == 0:
            raise MissingResultsException()

        accuracy = 0
        failures = 0
        for trajectory in trajectories:
            failures += count_failures(trajectory.regions())[0]
            accuracy += compute_accuracy(trajectory.regions(), sequence,
                                         self._burnin, self._ignore_unknown,
                                         self._bounded)[0]

        return accuracy / len(trajectories), failures / len(trajectories), len(
            trajectories[0])
Beispiel #5
0
    def compute_partial(self, tracker: Tracker, experiment: Experiment,
                        sequence: Sequence):

        if isinstance(experiment, MultiRunExperiment):
            trajectories = experiment.gather(tracker, sequence)

            if len(trajectories) == 0:
                raise MissingResultsException()

            cummulative = 0
            for trajectory in trajectories:
                accuracy, _ = compute_accuracy(trajectory.regions(), sequence,
                                               self._burnin,
                                               self._ignore_unknown,
                                               self._bounded)
                cummulative = cummulative + accuracy

            return cummulative / len(trajectories), len(sequence)
Beispiel #6
0
    def subcompute(self, experiment: Experiment, tracker: Tracker,
                   sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]:

        assert isinstance(experiment, MultiRunExperiment)

        trajectories = experiment.gather(tracker, sequence)

        if len(trajectories) == 0:
            raise MissingResultsException()

        cummulative = 0
        for trajectory in trajectories:
            accuracy, _ = compute_accuracy(trajectory.regions(), sequence,
                                           self.burnin, self.ignore_unknown,
                                           self.bounded)
            cummulative = cummulative + accuracy

        return cummulative / len(trajectories),
Beispiel #7
0
    def subcompute(self, experiment: Experiment, tracker: Tracker,
                   sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]:

        scores_all = []
        trajectories = experiment.gather(tracker, sequence)

        if len(trajectories) == 0:
            raise MissingResultsException(
                "Missing results for sequence {}".format(sequence.name))

        for trajectory in trajectories:
            confidence = [
                trajectory.properties(i).get('confidence', 0)
                for i in range(len(trajectory))
            ]
            scores_all.extend(confidence)

        return scores_all,
Beispiel #8
0
    def compute_measure(self, tracker: Tracker, experiment: Experiment):
        from vot.region.utils import calculate_overlaps

        overlaps_all = []
        weights_all = []
        success_all = []

        for sequence in experiment.workspace.dataset:

            trajectories = experiment.gather(tracker, sequence)

            if len(trajectories) == 0:
                raise MissingResultsException()

            for trajectory in trajectories:

                overlaps = calculate_overlaps(
                    trajectory.regions(), sequence.groundtruth(),
                    (sequence.size) if self._bounded else None)
                fail_idxs, init_idxs = locate_failures_inits(
                    trajectory.regions())

                if len(fail_idxs) > 0:

                    for i in range(len(fail_idxs)):
                        overlaps_all.append(
                            overlaps[init_idxs[i]:fail_idxs[i]])
                        success_all.append(False)
                        weights_all.append(1)

                    # handle last initialization
                    if len(init_idxs) > len(fail_idxs):
                        # tracker was initilized, but it has not failed until the end of the sequence
                        overlaps_all.append(overlaps[init_idxs[-1]:])
                        success_all.append(True)
                        weights_all.append(1)

                else:
                    overlaps_all.append(overlaps)
                    success_all.append(True)
                    weights_all.append(1)

        return compute_eao(overlaps_all, weights_all, success_all,
                           self._interval_low, self._interval_high)[0]
Beispiel #9
0
    def subcompute(self, experiment: Experiment, tracker: Tracker,
                   sequences: List[Sequence],
                   dependencies: List[Grid]) -> Tuple[Any]:

        overlaps_all = []
        weights_all = []
        success_all = []

        for sequence in sequences:

            trajectories = experiment.gather(tracker, sequence)

            if len(trajectories) == 0:
                raise MissingResultsException()

            for trajectory in trajectories:

                overlaps = calculate_overlaps(
                    trajectory.regions(), sequence.groundtruth(),
                    (sequence.size) if self.bounded else None)
                fail_idxs, init_idxs = locate_failures_inits(
                    trajectory.regions())

                if len(fail_idxs) > 0:

                    for i in range(len(fail_idxs)):
                        overlaps_all.append(
                            overlaps[init_idxs[i]:fail_idxs[i]])
                        success_all.append(False)
                        weights_all.append(1)

                    # handle last initialization
                    if len(init_idxs) > len(fail_idxs):
                        # tracker was initilized, but it has not failed until the end of the sequence
                        overlaps_all.append(overlaps[init_idxs[-1]:])
                        success_all.append(True)
                        weights_all.append(1)

                else:
                    overlaps_all.append(overlaps)
                    success_all.append(True)
                    weights_all.append(1)

        return compute_eao_curve(overlaps_all, weights_all, success_all),
Beispiel #10
0
    def subcompute(self, experiment: Experiment, tracker: Tracker,
                   sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]:
        trajectories = experiment.gather(tracker, sequence)

        if len(trajectories) == 0:
            raise MissingResultsException()

        accuracy = 0
        failures = 0
        for trajectory in trajectories:
            failures += count_failures(trajectory.regions())[0]
            accuracy += compute_accuracy(trajectory.regions(), sequence,
                                         self.burnin, self.ignore_unknown,
                                         self.bounded)[0]

        ar = (math.exp(-(float(failures) / len(trajectories)) *
                       self.sensitivity), accuracy / len(trajectories))

        return accuracy / len(trajectories), failures / len(
            trajectories), ar, len(trajectories[0])
Beispiel #11
0
    def compute_measure(self, tracker: Tracker, experiment: Experiment):

        # calculate thresholds
        total_scores = 0
        for sequence in experiment.workspace.dataset:
            trajectories = experiment.gather(tracker, sequence)
            for trajectory in trajectories:
                total_scores += len(trajectory)

        # allocate memory for all scores
        scores_all = total_scores * [float(0)]

        idx = 0
        for sequence in experiment.workspace.dataset:
            trajectories = experiment.gather(tracker, sequence)
            for trajectory in trajectories:
                conf_ = [
                    trajectory.properties(i).get('confidence', 0)
                    for i in range(len(trajectory))
                ]
                scores_all[idx:idx + len(conf_)] = conf_
                idx += len(conf_)

        thresholds = determine_thresholds(scores_all, self._resolution)

        # calculate per-sequence Precision and Recall curves
        pr_curves = []
        re_curves = []

        for sequence in experiment.workspace.dataset:

            trajectories = experiment.gather(tracker, sequence)

            if len(trajectories) == 0:
                raise MissingResultsException()

            pr = len(thresholds) * [float(0)]
            re = len(thresholds) * [float(0)]
            for trajectory in trajectories:
                conf_ = [
                    trajectory.properties(i).get('confidence', 0)
                    for i in range(len(trajectory))
                ]
                pr_, re_ = compute_tpr_curves(trajectory.regions(), conf_,
                                              sequence, thresholds,
                                              self._ignore_unknown,
                                              self._bounded)
                pr = [p1 + p2 for p1, p2 in zip(pr, pr_)]
                re = [r1 + r2 for r1, r2 in zip(re, re_)]

            pr = [p1 / len(trajectories) for p1 in pr]
            re = [r1 / len(trajectories) for r1 in re]

            pr_curves.append(pr)
            re_curves.append(re)

        # calculate a single Precision, Recall and F-score curves for a given tracker
        # average Pr-Re curves over the sequences
        pr_curve = len(thresholds) * [float(0)]
        re_curve = len(thresholds) * [float(0)]

        for i in range(len(thresholds)):
            for j in range(len(pr_curves)):
                pr_curve[i] += pr_curves[j][i]
                re_curve[i] += re_curves[j][i]

        pr_curve = [pr_ / len(pr_curves) for pr_ in pr_curve]
        re_curve = [re_ / len(re_curves) for re_ in re_curve]
        f_curve = [(2 * pr_ * re_) / (pr_ + re_)
                   for pr_, re_ in zip(pr_curve, re_curve)]

        # get optimal F-score and Pr and Re at this threshold
        f_score = max(f_curve)
        best_i = f_curve.index(f_score)
        pr_score = pr_curve[best_i]
        re_score = re_curve[best_i]

        return pr_score, re_score, f_score
Beispiel #12
0
    def subcompute(self, experiment: Experiment, tracker: Tracker,
                   sequences: List[Sequence]):

        # calculate thresholds
        total_scores = 0
        for sequence in sequences:
            trajectories = experiment.gather(tracker, sequence)

            if len(trajectories) == 0:
                raise MissingResultsException(
                    "Missing results for sequence {}".format(sequence.name))

            for trajectory in trajectories:
                total_scores += len(trajectory)

        # allocate memory for all scores
        scores_all = total_scores * [float(0)]

        idx = 0
        for sequence in sequences:
            trajectories = experiment.gather(tracker, sequence)
            for trajectory in trajectories:
                conf_ = [
                    trajectory.properties(i).get('confidence', 0)
                    for i in range(len(trajectory))
                ]
                scores_all[idx:idx + len(conf_)] = conf_
                idx += len(conf_)

        thresholds = determine_thresholds(scores_all, self.resolution)

        # calculate per-sequence Precision and Recall curves
        pr_curves = []
        re_curves = []

        for sequence in sequences:

            trajectories = experiment.gather(tracker, sequence)

            if len(trajectories) == 0:
                raise MissingResultsException()

            pr = len(thresholds) * [float(0)]
            re = len(thresholds) * [float(0)]
            for trajectory in trajectories:
                conf_ = [
                    trajectory.properties(i).get('confidence', 0)
                    for i in range(len(trajectory))
                ]
                pr_, re_ = compute_tpr_curves(trajectory.regions(), conf_,
                                              sequence, thresholds,
                                              self.ignore_unknown,
                                              self.bounded)
                pr = [p1 + p2 for p1, p2 in zip(pr, pr_)]
                re = [r1 + r2 for r1, r2 in zip(re, re_)]

            pr = [p1 / len(trajectories) for p1 in pr]
            re = [r1 / len(trajectories) for r1 in re]

            pr_curves.append(pr)
            re_curves.append(re)

        # calculate a single Precision, Recall and F-score curves for a given tracker
        # average Pr-Re curves over the sequences
        pr_curve = len(thresholds) * [float(0)]
        re_curve = len(thresholds) * [float(0)]

        for i, _ in enumerate(thresholds):
            for j, _ in enumerate(pr_curves):
                pr_curve[i] += pr_curves[j][i]
                re_curve[i] += re_curves[j][i]

        curve = [(re / len(pr_curves), pr / len(pr_curves))
                 for pr, re in zip(pr_curve, re_curve)]

        return curve, thresholds