class Experiment(Attributee): realtime = Nested(RealtimeConfig, default=None) noise = Nested(NoiseConfig, default=None) inject = Nested(InjectConfig, default=None) transformers = List(Object(transformer_resolver), default=[]) analyses = List(Object(analysis_resolver), default=[]) def __init__(self, _identifier: str, _storage: "LocalStorage", **kwargs): self._identifier = _identifier self._storage = _storage super().__init__(**kwargs) # TODO: validate analysis names @property def identifier(self) -> str: return self._identifier @property def storage(self) -> "Storage": return self._storage def _get_initialization(self, sequence: "Sequence", index: int): return sequence.groundtruth(index) def _get_runtime(self, tracker: "Tracker", sequence: "Sequence"): if not self.realtime is None: grace = to_number(self.realtime.grace, min_n=0) fps = to_number(self.realtime.fps, min_n=0, conversion=float) interval = 1 / float(sequence.metadata("fps", fps)) runtime = RealtimeTrackerRuntime(tracker.runtime(), grace, interval) else: runtime = tracker.runtime() return runtime @abstractmethod def execute(self, tracker: "Tracker", sequence: "Sequence", force: bool = False, callback: typing.Callable = None): raise NotImplementedError @abstractmethod def scan(self, tracker: "Tracker", sequence: "Sequence"): raise NotImplementedError def results(self, tracker: "Tracker", sequence: "Sequence") -> "Results": return self._storage.results(tracker, self, sequence) def log(self, identifier: str): return self._storage.substorage("logs").write( "{}_{:%Y-%m-%dT%H-%M-%S.%f%z}.log".format(identifier, datetime.now())) def transform(self, sequence: "Sequence"): for transformer in self.transformers: sequence = transformer(sequence) return sequence
class SupervisedExperiment(MultiRunExperiment): skip_initialize = Integer(val_min=1, default=1) skip_tags = List(String(), default=[]) failure_overlap = Float(val_min=0, val_max=1, default=0) def execute(self, tracker: Tracker, sequence: Sequence, force: bool = False, callback: Callable = None): results = self.results(tracker, sequence) with self._get_runtime(tracker, sequence) as runtime: for i in range(1, self.repetitions+1): name = "%s_%03d" % (sequence.name, i) if Trajectory.exists(results, name) and not force: continue if self._can_stop(tracker, sequence): return trajectory = Trajectory(sequence.length) frame = 0 while frame < sequence.length: _, properties, elapsed = runtime.initialize(sequence.frame(frame), self._get_initialization(sequence, frame)) properties["time"] = elapsed trajectory.set(frame, Special(Special.INITIALIZATION), properties) frame = frame + 1 while frame < sequence.length: region, properties, elapsed = runtime.update(sequence.frame(frame)) properties["time"] = elapsed if calculate_overlap(region, sequence.groundtruth(frame), sequence.size) <= self.failure_overlap: trajectory.set(frame, Special(Special.FAILURE), properties) frame = frame + self.skip_initialize if self.skip_tags: while frame < sequence.length: if not [t for t in sequence.tags(frame) if t in self.skip_tags]: break frame = frame + 1 break else: trajectory.set(frame, region, properties) frame = frame + 1 if callback: callback(i / self.repetitions) trajectory.write(results, name)
class ReportConfiguration(Attributee): style = Nested(StyleManager) sort = Nested(TrackerSorter) generators = List(Object(subclass=Generator), default=[])
class AttributeMultiStart(SequenceAveragingAnalysis): burnin = Integer(default=10, val_min=0) grace = Integer(default=10, val_min=0) bounded = Boolean(default=True) threshold = Float(default=0.1, val_min=0, val_max=1) tags = List(String()) @property def name(self): return "AR per-attribute analysis" def describe(self): accuracy = [ Measure("Accuracy: " + t, "A " + t, minimal=0, maximal=1, direction=Sorting.DESCENDING) for t in self.tags ] robustness = [ Measure("Robutsness" + t, "R " + t, minimal=0, maximal=1, direction=Sorting.DESCENDING) for t in self.tags ] length = [None] * len(self.tags) return tuple( functools.reduce( operator.add, [[a, r, n] for a, r, n in zip(accuracy, robustness, length)])) def compatible(self, experiment: Experiment): return isinstance(experiment, MultiStartExperiment) def collapse(self, tracker: Tracker, sequences: typing.List[Sequence], results: typing.List[tuple]): accuracy = Counter() robustness = Counter() attribute_total = Counter() for seq_acc, seq_rob, seq_attr_count in results: for t in seq_attr_count: accuracy[t] += (seq_acc[t] if t in seq_acc else 0) * seq_attr_count[t] robustness[t] += seq_rob * seq_attr_count[t] attribute_total[t] += seq_attr_count[t] accuracy = [accuracy[t] / attribute_total[t] for t in self.tags] robustness = [robustness[t] / attribute_total[t] for t in self.tags] length = [attribute_total[t] for t in self.tags] return tuple( functools.reduce( operator.add, [[a, r, n] for a, r, n in zip(accuracy, robustness, length)])) def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence): results = experiment.results(tracker, sequence) forward, backward = find_anchors(sequence, experiment.anchor) if len(forward) == 0 and len(backward) == 0: raise RuntimeError("Sequence does not contain any anchors") accuracy_ = Counter() tags_count_ = Counter() robustness_ = 0 total_ = 0 for i, reverse in [(f, False) for f in forward] + [(f, True) for f in backward]: name = "%s_%08d" % (sequence.name, i) if not Trajectory.exists(results, name): raise MissingResultsException() if reverse: proxy = FrameMapSequence(sequence, list(reversed(range(0, i + 1)))) else: proxy = FrameMapSequence(sequence, list(range(i, sequence.length))) trajectory = Trajectory.read(results, name) overlaps = calculate_overlaps(trajectory.regions(), proxy.groundtruth(), proxy.size if self.burnin else None) grace = self.grace progress = len(proxy) for j, overlap in enumerate(overlaps): if overlap <= self.threshold and not proxy.groundtruth( j).is_empty(): grace = grace - 1 if grace == 0: progress = j + 1 - self.grace # subtract since we need actual point of the failure break else: grace = self.grace for j in range(progress): overlap = overlaps[j] tags = proxy.tags(j) if len(tags) == 0: tags = ['empty'] for t in tags: accuracy_[t] += overlap tags_count_[t] += 1 robustness_ += progress total_ += len(proxy) seq_robustness = robustness_ / total_ seq_accuracy = {} for t in accuracy_: seq_accuracy[t] = accuracy_[t] / tags_count_[t] # calculate weights for each attribute attribute_counter = Counter() for frame_idx in range(len(sequence)): tags = sequence.tags(frame_idx) if len(tags) == 0: tags = ['empty'] for t in tags: attribute_counter[t] += 1 return seq_accuracy, seq_robustness, attribute_counter
class AttributeDifficultyLevelMultiStart(SequenceAveragingAnalysis): burnin = Integer(default=10, val_min=0) grace = Integer(default=10, val_min=0) bounded = Boolean(default=True) threshold = Float(default=0.1, val_min=0, val_max=1) fail_interval = Integer(default=30, val_min=1) tags = List(String()) @property def name(self): return "Attribute difficulty" def describe(self): return tuple([ Measure(t, t, minimal=0, maximal=1, direction=Sorting.DESCENDING) for t in self.tags ] + [None] * len(self.tags)) def compatible(self, experiment: Experiment): return isinstance(experiment, MultiStartExperiment) def collapse(self, tracker: Tracker, sequences: typing.List[Sequence], results: typing.List[tuple]): attribute_difficulty = Counter() attribute_counter = Counter() for seq_tags_not_failed, seq_tags_count, seq_attr_count in results: for tag in seq_tags_count: if tag in seq_tags_not_failed: seq_attr_difficulty = seq_tags_not_failed[ tag] / seq_tags_count[tag] else: seq_attr_difficulty = 0 attribute_difficulty[ tag] += seq_attr_difficulty * seq_attr_count[tag] attribute_counter[tag] += seq_attr_count[tag] return tuple([ attribute_difficulty[tag] / attribute_counter[tag] for tag in self.tags ] + [attribute_counter[tag] for tag in self.tags]) def subcompute(self, experiment: Experiment, tracker: Tracker, sequence: Sequence): results = experiment.results(tracker, sequence) forward, backward = find_anchors(sequence, experiment.anchor) if len(forward) == 0 and len(backward) == 0: raise RuntimeError("Sequence does not contain any anchors") tags_count = Counter() tags_not_failed = Counter() for i, reverse in [(f, False) for f in forward] + [(f, True) for f in backward]: name = "%s_%08d" % (sequence.name, i) if not Trajectory.exists(results, name): raise MissingResultsException() if reverse: proxy = FrameMapSequence(sequence, list(reversed(range(0, i + 1)))) else: proxy = FrameMapSequence(sequence, list(range(i, sequence.length))) trajectory = Trajectory.read(results, name) overlaps = calculate_overlaps(trajectory.regions(), proxy.groundtruth(), proxy.size if self.burnin else None) grace = self.grace progress = len(proxy) for j, overlap in enumerate(overlaps): if overlap <= self.threshold and not proxy.groundtruth( j).is_empty(): grace = grace - 1 if grace == 0: progress = j + 1 - self.grace # subtract since we need actual point of the failure break else: grace = self.grace for j in range(progress): tags = proxy.tags(j) if len(tags) == 0: tags = ['empty'] for t in tags: tags_count[t] += 1 if progress == len( proxy) or j < progress - self.fail_interval: tags_not_failed[t] += 1 attribute_counter = Counter() for frame_idx in range(len(sequence)): tags = sequence.tags(frame_idx) if len(tags) == 0: tags = ['empty'] for t in tags: attribute_counter[t] += 1 return tags_not_failed, tags_count, attribute_counter
class Workspace(Attributee): registry = List( String(transformer=lambda x, ctx: normalize_path( x, ctx["parent"].directory))) stack = StackLoader() sequences = String(default="sequences") report = Nested(ReportConfiguration) @staticmethod def initialize(directory, config=None, download=False): config_file = os.path.join(directory, "config.yaml") if os.path.isfile(config_file): raise WorkspaceException("Workspace already initialized") os.makedirs(directory, exist_ok=True) with open(config_file, 'w') as fp: yaml.dump(config if config is not None else dict(), fp) os.makedirs(os.path.join(directory, "sequences"), exist_ok=True) os.makedirs(os.path.join(directory, "results"), exist_ok=True) if not os.path.isfile(os.path.join(directory, "trackers.ini")): open(os.path.join(directory, "trackers.ini"), 'w').close() download = False if download: # Try do retrieve dataset from stack and download it stack_file = resolve_stack(config["stack"], directory) dataset_directory = normalize_path( config.get("sequences", "sequences"), directory) if stack_file is None: return dataset = None with open(stack_file, 'r') as fp: stack_metadata = yaml.load(fp, Loader=yaml.BaseLoader) dataset = stack_metadata["dataset"] if dataset: Workspace.download_dataset(dataset, dataset_directory) @staticmethod def download_dataset(dataset, directory): if os.path.exists(os.path.join(directory, "list.txt")): return False from vot.dataset import download_dataset download_dataset(dataset, directory) logger.info("Download completed") @staticmethod def load(directory): directory = normalize_path(directory) config_file = os.path.join(directory, "config.yaml") if not os.path.isfile(config_file): raise WorkspaceException("Workspace not initialized") with open(config_file, 'r') as fp: config = yaml.load(fp, Loader=yaml.BaseLoader) return Workspace(directory, **config) def __init__(self, directory, **kwargs): self._directory = directory self._storage = LocalStorage( directory) if directory is not None else VoidStorage() super().__init__(**kwargs) dataset_directory = normalize_path(self.sequences, directory) if not self.stack.dataset is None: Workspace.download_dataset(self.stack.dataset, dataset_directory) self._dataset = VOTDataset(dataset_directory) @property def directory(self) -> str: return self._directory @property def dataset(self) -> Dataset: return self._dataset @property def storage(self) -> LocalStorage: return self._storage def cache(self, identifier) -> LocalStorage: if not isinstance(identifier, str): identifier = class_fullname(identifier) return self._storage.substorage("cache").substorage(identifier) def list_results(self, registry: "Registry"): references = self._storage.substorage("results").folders() return registry.resolve(*references)