class SequenceAccuracy(SeparableAnalysis): burnin = Integer(default=10, val_min=0) ignore_unknown = Boolean(default=True) bounded = Boolean(default=True) def compatible(self, experiment: Experiment): return isinstance(experiment, MultiRunExperiment) @property def title(self): return "Sequence accurarcy" def describe(self): return Measure("Accuracy", "AUC", 0, 1, Sorting.DESCENDING), def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]: assert isinstance(experiment, MultiRunExperiment) trajectories = experiment.gather(tracker, sequence) if len(trajectories) == 0: raise MissingResultsException() cummulative = 0 for trajectory in trajectories: accuracy, _ = compute_accuracy(trajectory.regions(), sequence, self.burnin, self.ignore_unknown, self.bounded) cummulative = cummulative + accuracy return cummulative / len(trajectories),
class PrecisionRecallCurves(SeparableAnalysis): thresholds = Include(_Thresholds) ignore_unknown = Boolean(default=True) bounded = Boolean(default=True) @property def title(self): return "Tracking precision/recall" def describe(self): return Curve("Precision Recall curve", dimensions=2, abbreviation="PR", minimal=(0, 0), maximal=(1, 1), labels=("Recall", "Precision")), None def compatible(self, experiment: Experiment): return isinstance(experiment, UnsupervisedExperiment) def dependencies(self): return self.thresholds, def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]: thresholds = dependencies[0, 0][0][0] # dependencies[0][0, 0] trajectories = experiment.gather(tracker, sequence) if len(trajectories) == 0: raise MissingResultsException() precision = len(thresholds) * [float(0)] recall = len(thresholds) * [float(0)] for trajectory in trajectories: confidence = [ trajectory.properties(i).get('confidence', 0) for i in range(len(trajectory)) ] pr, re = compute_tpr_curves(trajectory.regions(), confidence, sequence, thresholds, self.ignore_unknown, self.bounded) for i in range(len(thresholds)): precision[i] += pr[i] recall[i] += re[i] # return [(re / len(trajectories), pr / len(trajectories)) for pr, re in zip(precision, recall)], thresholds return [(pr / len(trajectories), re / len(trajectories)) for pr, re in zip(precision, recall)], thresholds
class Stack(Attributee): title = String() dataset = String(default="") url = String(default="") deprecated = Boolean(default=False) experiments = Map(Object(experiment_resolver)) def __init__(self, name: str, workspace: "Workspace", **kwargs): self._workspace = workspace self._name = name super().__init__(**kwargs) @property def workspace(self): return self._workspace @property def name(self): return self._name def __iter__(self): return iter(self.experiments.values()) def __len__(self): return len(self.experiments) def __getitem__(self, identifier): return self.experiments[identifier]
class AverageAccuracy(SequenceAggregator): analysis = Include(SequenceAccuracy) weighted = Boolean(default=True) def compatible(self, experiment: Experiment): return isinstance(experiment, MultiRunExperiment) @property def title(self): return "Average accurarcy" def dependencies(self): return self.analysis, def describe(self): return Measure("Accuracy", "AUC", 0, 1, Sorting.DESCENDING), def aggregate(self, _: Tracker, sequences: List[Sequence], results: Grid): accuracy = 0 frames = 0 for i, sequence in enumerate(sequences): if results[i, 0] is None: continue if self.weighted: accuracy += results[i, 0][0] * len(sequence) frames += len(sequence) else: accuracy += results[i, 0][0] frames += 1 return accuracy / frames,
class AccuracyRobustness(SeparableAnalysis): sensitivity = Float(default=30, val_min=1) burnin = Integer(default=10, val_min=0) ignore_unknown = Boolean(default=True) bounded = Boolean(default=True) @property def title(self): return "AR analysis" def describe(self): return Measure("Accuracy", "A", minimal=0, maximal=1, direction=Sorting.DESCENDING), \ Measure("Robustness", "R", minimal=0, direction=Sorting.ASCENDING), \ Point("AR plot", dimensions=2, abbreviation="AR", minimal=(0, 0), \ maximal=(1, 1), labels=("Robustness", "Accuracy"), trait="ar"), \ None def compatible(self, experiment: Experiment): return isinstance(experiment, SupervisedExperiment) def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]: trajectories = experiment.gather(tracker, sequence) if len(trajectories) == 0: raise MissingResultsException() accuracy = 0 failures = 0 for trajectory in trajectories: failures += count_failures(trajectory.regions())[0] accuracy += compute_accuracy(trajectory.regions(), sequence, self.burnin, self.ignore_unknown, self.bounded)[0] ar = (math.exp(-(float(failures) / len(trajectories)) * self.sensitivity), accuracy / len(trajectories)) return accuracy / len(trajectories), failures / len( trajectories), ar, len(trajectories[0])
class MultiRunExperiment(Experiment): repetitions = Integer(val_min=1, default=1) early_stop = Boolean(default=True) def _can_stop(self, tracker: Tracker, sequence: Sequence): if not self.early_stop: return False trajectories = self.gather(tracker, sequence) if len(trajectories) < 3: return False for trajectory in trajectories[1:]: if not trajectory.equals(trajectories[0]): return False return True def scan(self, tracker: Tracker, sequence: Sequence): results = self.results(tracker, sequence) files = [] complete = True for i in range(1, self.repetitions + 1): name = "%s_%03d" % (sequence.name, i) if Trajectory.exists(results, name): files.extend(Trajectory.gather(results, name)) elif self._can_stop(tracker, sequence): break else: complete = False break return complete, files, results def gather(self, tracker: Tracker, sequence: Sequence): trajectories = list() results = self.results(tracker, sequence) for i in range(1, self.repetitions + 1): name = "%s_%03d" % (sequence.name, i) if Trajectory.exists(results, name): trajectories.append(Trajectory.read(results, name)) return trajectories
class EAOCurve(TrackerSeparableAnalysis): burnin = Integer(default=10, val_min=0) bounded = Boolean(default=True) @property def title(self): return "EAO Curve" def describe(self): return Plot("Expected Average Overlap", "EAO", minimal=0, maximal=1, trait="eao"), def compatible(self, experiment: Experiment): return isinstance(experiment, SupervisedExperiment) def subcompute(self, experiment: Experiment, tracker: Tracker, sequences: List[Sequence], dependencies: List[Grid]) -> Tuple[Any]: overlaps_all = [] weights_all = [] success_all = [] for sequence in sequences: trajectories = experiment.gather(tracker, sequence) if len(trajectories) == 0: raise MissingResultsException() for trajectory in trajectories: overlaps = calculate_overlaps( trajectory.regions(), sequence.groundtruth(), (sequence.size) if self.bounded else None) fail_idxs, init_idxs = locate_failures_inits( trajectory.regions()) if len(fail_idxs) > 0: for i in range(len(fail_idxs)): overlaps_all.append( overlaps[init_idxs[i]:fail_idxs[i]]) success_all.append(False) weights_all.append(1) # handle last initialization if len(init_idxs) > len(fail_idxs): # tracker was initilized, but it has not failed until the end of the sequence overlaps_all.append(overlaps[init_idxs[-1]:]) success_all.append(True) weights_all.append(1) else: overlaps_all.append(overlaps) success_all.append(True) weights_all.append(1) return compute_eao_curve(overlaps_all, weights_all, success_all),
class AccuracyRobustness(SeparableAnalysis): burnin = Integer(default=10, val_min=0) grace = Integer(default=10, val_min=0) bounded = Boolean(default=True) threshold = Float(default=0.1, val_min=0, val_max=1) @property def title(self): return "AR Analysis" def describe(self): return Measure("Accuracy", "A", minimal=0, maximal=1, direction=Sorting.DESCENDING), \ Measure("Robustness", "R", minimal=0, direction=Sorting.DESCENDING), \ Point("AR plot", dimensions=2, abbreviation="AR", minimal=(0, 0), maximal=(1, 1), labels=("Robustness", "Accuracy"), trait="ar"), \ None, None def compatible(self, experiment: Experiment): return isinstance(experiment, MultiStartExperiment) def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]: results = experiment.results(tracker, sequence) forward, backward = find_anchors(sequence, experiment.anchor) if not forward and not backward: raise RuntimeError("Sequence does not contain any anchors") robustness = 0 accuracy = 0 total = 0 for i, reverse in [(f, False) for f in forward] + [(f, True) for f in backward]: name = "%s_%08d" % (sequence.name, i) if not Trajectory.exists(results, name): raise MissingResultsException() if reverse: proxy = FrameMapSequence(sequence, list(reversed(range(0, i + 1)))) else: proxy = FrameMapSequence(sequence, list(range(i, sequence.length))) trajectory = Trajectory.read(results, name) overlaps = calculate_overlaps( trajectory.regions(), proxy.groundtruth(), (proxy.size) if self.burnin else None) grace = self.grace progress = len(proxy) for j, overlap in enumerate(overlaps): if overlap <= self.threshold and not proxy.groundtruth( j).is_empty(): grace = grace - 1 if grace == 0: progress = j + 1 - self.grace # subtract since we need actual point of the failure break else: grace = self.grace robustness += progress # simplified original equation: len(proxy) * (progress / len(proxy)) accuracy += sum(overlaps[0:progress]) total += len(proxy) ar = (robustness / total, accuracy / robustness if robustness > 0 else 0) return accuracy / robustness if robustness > 0 else 0, robustness / total, ar, robustness, len( sequence)
class EAOCurves(SeparableAnalysis): burnin = Integer(default=10, val_min=0) grace = Integer(default=10, val_min=0) bounded = Boolean(default=True) threshold = Float(default=0.1, val_min=0, val_max=1) high = Integer() @property def title(self): return "EAO Curve" def describe(self): return Plot("Expected average overlap", "EAO", minimal=0, maximal=1, wrt="frames", trait="eao"), def compatible(self, experiment: Experiment): return isinstance(experiment, MultiStartExperiment) def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]: results = experiment.results(tracker, sequence) forward, backward = find_anchors(sequence, experiment.anchor) if len(forward) == 0 and len(backward) == 0: raise RuntimeError("Sequence does not contain any anchors") overlaps_all = [] success_all = [] for i, reverse in [(f, False) for f in forward] + [(f, True) for f in backward]: name = "%s_%08d" % (sequence.name, i) if not Trajectory.exists(results, name): raise MissingResultsException() if reverse: proxy = FrameMapSequence(sequence, list(reversed(range(0, i + 1)))) else: proxy = FrameMapSequence(sequence, list(range(i, sequence.length))) trajectory = Trajectory.read(results, name) overlaps = calculate_overlaps(trajectory.regions(), proxy.groundtruth(), proxy.size if self.burnin else None) grace = self.grace progress = len(proxy) for j, overlap in enumerate(overlaps): if overlap <= self.threshold and not proxy.groundtruth( j).is_empty(): grace = grace - 1 if grace == 0: progress = j + 1 - self.grace # subtract since we need actual point of the failure break else: grace = self.grace success = True if progress < len(overlaps): # tracker has failed during this run overlaps[progress:] = (len(overlaps) - progress) * [float(0)] success = False overlaps_all.append(overlaps) success_all.append(success) return compute_eao_partial(overlaps_all, success_all, self.high), 1
class MultiStartFragments(SeparableAnalysis): burnin = Integer(default=10, val_min=0) grace = Integer(default=10, val_min=0) bounded = Boolean(default=True) threshold = Float(default=0.1, val_min=0, val_max=1) @property def title(self): return "Fragment Analysis" def describe(self): return Curve("Success", 2, "Sc", minimal=(0, 0), maximal=(1, 1), trait="points"), Curve("Accuracy", 2, "Ac", minimal=(0, 0), maximal=(1, 1), trait="points") def compatible(self, experiment: Experiment): return isinstance(experiment, MultiStartExperiment) def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence, dependencies: List[Grid]) -> Tuple[Any]: results = experiment.results(tracker, sequence) forward, backward = find_anchors(sequence, experiment.anchor) if not forward and not backward: raise RuntimeError("Sequence does not contain any anchors") accuracy = [] success = [] for i, reverse in [(f, False) for f in forward] + [(f, True) for f in backward]: name = "%s_%08d" % (sequence.name, i) if not Trajectory.exists(results, name): raise MissingResultsException() if reverse: proxy = FrameMapSequence(sequence, list(reversed(range(0, i + 1)))) else: proxy = FrameMapSequence(sequence, list(range(i, sequence.length))) trajectory = Trajectory.read(results, name) overlaps = calculate_overlaps( trajectory.regions(), proxy.groundtruth(), (proxy.size) if self.burnin else None) grace = self.grace progress = len(proxy) for j, overlap in enumerate(overlaps): if overlap <= self.threshold and not proxy.groundtruth( j).is_empty(): grace = grace - 1 if grace == 0: progress = j + 1 - self.grace # subtract since we need actual point of the failure break else: grace = self.grace success.append((i / len(sequence), progress / len(proxy))) accuracy.append( (i / len(sequence), sum(overlaps[0:progress] / len(proxy)))) return success, accuracy