Exemple #1
0
def update_frame_error_result_metric_properties():
    # Start by finding the image source ids
    start_time = time.time()
    logging.getLogger(__name__).info(
        "Updating 'metric_properties' for all FrameErrorResult objects ...")
    logging.getLogger(__name__).info("Loading set of referenced metric IDs...")
    metric_ids = FrameErrorResult._mongometa.collection.distinct('metric')

    # Autoload the image source type
    logging.getLogger(__name__).info(
        f"Autoloading metric types ({time.time() - start_time}s) ...")
    autoload_modules(Metric, ids=metric_ids)

    logging.getLogger(__name__).info(
        f"Found {len(metric_ids)} metrics, updating information "
        f"({time.time() - start_time}s) ...")
    n_updated = 0
    for metric_id in metric_ids:
        metric = Metric.objects.get({'_id': metric_id})
        metric_properties = metric.get_properties()
        n_updated += FrameErrorResult.objects.raw({
            'metric': metric_id
        }).update(
            {
                '$set': {
                    'metric_properties': {
                        str(k): json_value(v)
                        for k, v in metric_properties.items()
                    }
                }
            },
            upsert=False)
    logging.getLogger(__name__).info(
        f"Updated {n_updated} FrameErrorResult objects in {time.time() - start_time}s "
    )
Exemple #2
0
 def load_referenced_models(self) -> None:
     """
     Load the result type so we can save the task
     :return:
     """
     with no_auto_dereference(ImportDatasetTask):
         if isinstance(self.result, bson.ObjectId):
             # result is an id and not a model, autoload the model
             autoload_modules(ImageSource, [self.result])
Exemple #3
0
def invalidate_systems_by_name(system_names: typing.Iterable[str]) -> int:
    system_names = list(system_names)
    autoload_modules(VisionSystem, classes=system_names)
    removed = VisionSystem.objects.raw({
        '_cls': {
            '$in': system_names
        }
    }).delete()
    logging.getLogger(__name__).info("removed {0} systems".format(removed))
    return removed
Exemple #4
0
def print_available_experiments() -> None:
    """
    Print out the list of valid experiment ids. These will be human-readable names
    :return: Nothing
    """
    autoload_modules(
        Experiment
    )  # To actually get all the experiments, pymodm needs to know their types.
    print("Valid experiments are:")
    for obj in Experiment.objects.all().only('_id').values():
        print("  {0}".format(obj['_id']))
Exemple #5
0
def invalidate_trial_results(
        trial_result_ids: typing.Iterable[bson.ObjectId]) -> int:
    trial_result_ids = list(trial_result_ids)
    autoload_modules(TrialResult, trial_result_ids)
    removed = TrialResult.objects.raw({
        '_id': {
            '$in': trial_result_ids
        }
    }).delete()
    logging.getLogger(__name__).info(
        "removed {0} trial results".format(removed))
    return removed
Exemple #6
0
def invalidate_metric_results(
        metric_result_ids: typing.Iterable[bson.ObjectId]) -> int:
    metric_result_ids = list(metric_result_ids)
    autoload_modules(MetricResult, metric_result_ids)
    removed = MetricResult.objects.raw({
        '_id': {
            '$in': metric_result_ids
        }
    }).delete()
    logging.getLogger(__name__).info(
        "removed {0} metric results".format(removed))
    return removed
Exemple #7
0
def get_columns_for_plots(
        plot_ids: typing.Iterable[bson.ObjectId]) -> typing.Set[str]:
    """
    For a set of plot ids, get the required columns for those plots.
    Handles auto-loading the plot types, and the plot objects themselves will go out of scope once this helper completes
    :param plot_ids: The list of plot ids to search for
    :return: A combined set of the required columns for all the given plots
    """
    plot_ids = list(plot_ids)
    autoload_modules(Plot, ids=plot_ids)
    plots = Plot.objects.raw({'_id': {'$in': plot_ids}})
    columns = set(column for plot in plots
                  for column in plot.get_required_columns())
    return columns
Exemple #8
0
 def get_result(self) -> typing.Union[ImageSource, None]:
     """
     Actually get the result object.
     This will auto-load the result model, and then attempt to construct it.
     :return:
     """
     with no_auto_dereference(ImportDatasetTask):
         if self.result is None:
             return None
         if isinstance(self.result, bson.ObjectId):
             # result is an id and not a model, autoload the model
             autoload_modules(ImageSource, [self.result])
     # This will now dereference correctly
     return self.result
Exemple #9
0
def update_frame_errors_system_properties():
    # Start with the frame error result, which will tell us which trial results have been measured
    start_time = time.time()
    logging.getLogger(__name__).info(
        "Updating 'system_properties' for all FrameError objects ...")
    logging.getLogger(__name__).info("Getting set of trial result ids...")
    trial_result_ids = FrameErrorResult._mongometa.collection.distinct(
        'trial_results')

    # Get the unique system ids
    logging.getLogger(__name__).info(
        f"Getting set of system ids ({time.time() - start_time})...")
    system_ids = TrialResult._mongometa.collection.distinct('system')

    # Autoload the types for the trial and systems
    logging.getLogger(__name__).info(
        f"Autoloading types ({time.time() - start_time})...")
    autoload_modules(VisionSystem, ids=system_ids)
    autoload_modules(TrialResult, ids=trial_result_ids)

    # Get the set of system ids used in those trials
    logging.getLogger(__name__).info(
        f"Updating information from {len(trial_result_ids)} trials "
        f"over {len(system_ids)} systems...")
    n_updated = 0
    for system_id in system_ids:
        system = VisionSystem.objects.get({'_id': system_id})
        for trial_result in TrialResult.objects.raw({
                '_id': {
                    '$in': trial_result_ids
                },
                'system': system_id
        }).only('_id', 'settings'):
            system_properties = system.get_properties(None,
                                                      trial_result.settings)
            n_updated += FrameError.objects.raw({
                'trial_result': trial_result.pk
            }).update(
                {
                    '$set': {
                        'system_properties': {
                            str(k): json_value(v)
                            for k, v in system_properties.items()
                        }
                    }
                },
                upsert=False)  # We definitely don't want to upsert
    logging.getLogger(__name__).info(
        f"Updated {n_updated} FrameError objects in {time.time() - start_time}s "
    )
Exemple #10
0
def main(task_id: str,
         config_file: str = 'config.yml',
         mongodb_host: str = None,
         mongodb_port: int = None):
    """
    Run a particular task.
    :args: Only argument is the id of the task to run
    :return:
    """
    task_id = ObjectId(task_id)

    # Load the configuration
    config = load_global_config(config_file)
    if __name__ == '__main__':
        # Only configure the logging if this is the main function, don't reconfigure
        logging.config.dictConfig(config['logging'])

    # Configure the database and the image manager
    dbconn.configure(config['database'],
                     override_host=mongodb_host,
                     override_port=mongodb_port)
    im_manager.configure(config['image_manager'])

    # Set up the path manager
    path_manager = PathManager(paths=config['paths'],
                               temp_folder=config['temp_folder'],
                               output_dir=config.get('output_dir', None))

    # Try and get the task object
    autoload_modules(Task, [task_id])  # Try and autoload the task subclass
    try:
        task = Task.objects.get({'_id': task_id})
    except Exception as ex:
        logging.getLogger(__name__).critical(
            "Exception occurred while loading Task({0}):\n{1}".format(
                str(task_id), traceback.format_exc()))
        raise ex

    # Since we got the task, try and run it
    try:
        task.run_task(path_manager)
    except Exception as ex:
        logging.getLogger(__name__).critical(
            "Exception occurred while running {0}({1}):\n{2}".format(
                type(task).__name__, str(task_id), traceback.format_exc()))
        task.mark_job_failed()
        raise ex
    finally:
        task.save()
Exemple #11
0
    def load_referenced_modules(self):
        logging.getLogger(__name__).info("Loading referenced models...")
        # Load the metric model
        metric_id = None
        with no_auto_dereference(MeasureTrialTask):
            if isinstance(self.metric, bson.ObjectId):
                metric_id = self.metric
        if metric_id is not None:
            autoload_modules(Metric, [metric_id])

        # Load the trial results models
        with no_auto_dereference(MeasureTrialTask):
            model_ids = list(
                set(tr_id for tr_id in self.trial_results
                    if isinstance(tr_id, bson.ObjectId)))
        if len(model_ids) > 0:
            autoload_modules(TrialResult, model_ids)
Exemple #12
0
    def load_referenced_models(self):
        """
        Go through the models referenced by this experiment and ensure their model types have been loaded.
        This is necessary or accessing any of the reference fields will cause an exception.
        Clean references will access the metric results and plots, so we want to ensure that those models are loaded.
        :return:
        """
        # Load metric result models
        with no_auto_dereference(type(self)):
            model_ids = set(result_id for result_id in self.metric_results
                            if isinstance(result_id, bson.ObjectId))
        if len(model_ids) > 0:
            autoload_modules(MetricResult, ids=list(model_ids))

        # Load plot models
        with no_auto_dereference(type(self)):
            model_ids = set(plot_id for plot_id in self.plots
                            if isinstance(plot_id, bson.ObjectId))
        if len(model_ids) > 0:
            autoload_modules(Plot, ids=list(model_ids))
Exemple #13
0
def invalidate_image_collections(
        image_source_ids: typing.Iterable[bson.ObjectId]) -> int:
    """
    Invalidate the data associated with a particular image source.
    This cascades to derived trial results, and from there to benchmark results.
    Also cleans out tasks.
    :param image_source_ids: The ids of the image sources to remove
    :return:
    """
    # Just delete the image collections, reference fields should handle the rest
    image_source_ids = list(image_source_ids)
    autoload_modules(ImageSource, image_source_ids)
    removed = ImageSource.objects.raw({
        '_id': {
            '$in': image_source_ids
        }
    }).delete()
    logging.getLogger(__name__).info(
        "removed {0} image sources".format(removed))
    return removed
Exemple #14
0
    def load_referenced_models(self):
        """
        Go through the models referenced by this experiment and ensure their model types have been loaded.
        This is necessary or accessing any of the reference fields will cause an exception
        :return:
        """
        # Load system models
        with no_auto_dereference(type(self)):
            model_ids = set(sys_id for sys_id in self.systems
                            if isinstance(sys_id, bson.ObjectId))
        if len(model_ids) > 0:
            autoload_modules(VisionSystem, ids=list(model_ids))

        # Load image source models
        with no_auto_dereference(type(self)):
            model_ids = set(source_id for source_id in self.image_sources
                            if isinstance(source_id, bson.ObjectId))
        if len(model_ids) > 0:
            autoload_modules(ImageSource, ids=list(model_ids))

        # Load metric models
        with no_auto_dereference(type(self)):
            model_ids = set(metric_id for metric_id in self.metrics
                            if isinstance(metric_id, bson.ObjectId))
        if len(model_ids) > 0:
            autoload_modules(Metric, ids=list(model_ids))

        # Load superclass models - which may reference the types we just loaded, so we do that first
        super(SimpleExperiment, self).load_referenced_models()
Exemple #15
0
def update_frame_error_result_image_source_properties():
    # Start by finding the image source ids
    start_time = time.time()
    logging.getLogger(__name__).info(
        "Updating 'image_source_properties' for all FrameErrorResult objects ..."
    )
    logging.getLogger(__name__).info(
        "Loading set of referenced image source IDs...")
    image_source_ids = FrameErrorResult._mongometa.collection.distinct(
        'image_source')

    # Autoload the image source type
    logging.getLogger(__name__).info(
        f"Autoloading image source types ({time.time() - start_time}s) ...")
    autoload_modules(ImageSource, ids=image_source_ids)

    logging.getLogger(__name__).info(
        f"Found {len(image_source_ids)} image sources, updating information "
        f"({time.time() - start_time}s) ...")
    n_updated = 0
    for image_source_id in image_source_ids:
        image_source = ImageSource.objects.get({'_id': image_source_id})
        image_source_properties = image_source.get_properties()
        n_updated += FrameErrorResult.objects.raw({
            'image_source':
            image_source_id
        }).update(
            {
                '$set': {
                    'image_source_properties': {
                        str(k): json_value(v)
                        for k, v in image_source_properties.items()
                    }
                }
            },
            upsert=False)
    logging.getLogger(__name__).info(
        f"Updated {n_updated} FrameErrorResult objects in {time.time() - start_time}s "
    )
Exemple #16
0
 def load_referenced_models(self) -> None:
     """
     Load the metric, trial, and result types so we can save the task
     :return:
     """
     with no_auto_dereference(MeasureTrialTask):
         if isinstance(self.metric, bson.ObjectId):
             # The metric is just an ID, we will need the model to
             autoload_modules(Metric, [self.metric])
         trials_to_load = [
             trial_id for trial_id in self.trial_results
             if isinstance(trial_id, bson.ObjectId)
         ]
         if len(trials_to_load) > 0:
             autoload_modules(TrialResult, trials_to_load)
         if isinstance(self.result, bson.ObjectId):
             # result is an id and not a model, autoload the model
             autoload_modules(MetricResult, [self.result])
Exemple #17
0
    def export_plot_data(experiment_name: str, cache_folder: Path) -> None:
        """
        Export the plot data for a particular experiment to the given folder
        :param experiment_name: The unique name of the experiment
        :param cache_folder: The folder to store the data in
        :return:
        """
        # Autoload the experiment type
        autoload_modules(Experiment, ids=[experiment_name])

        # Get the result ids from the experiment, bypassing the experiment object
        object_partials = list(
            Experiment.objects.raw({
                '_id': experiment_name
            }).only('metric_results', 'plots').values())
        metric_result_ids = [
            result_id for experiment_doc in object_partials
            for result_id in experiment_doc.get('metric_results', [])
        ]
        plot_ids = [
            plot_id for experiment_doc in object_partials
            for plot_id in experiment_doc.get('plots', [])
        ]
        if len(metric_result_ids) <= 0:
            logging.getLogger(__name__).info(
                f"No results for {experiment_name}, maybe re-run update_experiments?"
            )
            return
        logging.getLogger(__name__).info(
            f"Caching plot data for {experiment_name} in {cache_folder} ...")

        # Get the columns
        columns = get_columns_for_plots(plot_ids)
        if len(columns) <= 0:
            # No columns means no data and no plots
            logging.getLogger(__name__).info(
                f"No data to cache for {experiment_name}, are there any plots?"
            )
            return

        # Load the metric result types
        autoload_modules(MetricResult, ids=metric_result_ids)

        # Read all the results
        logging.getLogger(__name__).info(
            f"Accumulating columns [{columns}] over {len(metric_result_ids)} results..."
        )
        data = []
        for result_id in metric_result_ids:
            try:
                metric_result = MetricResult.objects.get({'_id': result_id})
            except MetricResult.DoesNotExist:
                continue
            if metric_result is not None:
                data.extend(metric_result.get_results(columns))
            del metric_result
        data = DataFrame(data)
        logging.getLogger(__name__).info(f"Collected {len(data)} rows.")

        # Save the data to file
        cache_file = Experiment.get_cache_file(cache_folder, experiment_name)
        if not cache_file.parent.exists():
            cache_file.parent.mkdir(parents=True)
        logging.getLogger(__name__).info(f"Saving data to {cache_file}")
        data.to_pickle(str(cache_file))
Exemple #18
0
def invalidate_systems(system_ids: typing.Iterable[bson.ObjectId]) -> int:
    system_ids = list(system_ids)
    autoload_modules(VisionSystem, system_ids)
    removed = VisionSystem.objects.raw({'_id': {'$in': system_ids}}).delete()
    logging.getLogger(__name__).info("removed {0} systems".format(removed))
    return removed
    def measure_results(
            self,
            trial_results: typing.Iterable[TrialResult]) -> FrameErrorResult:
        """
        Collect the errors
        TODO: Track the error introduced by a loop closure, somehow.
        Might need to track loop closures in the FrameResult
        :param trial_results: The results of several trials to aggregate
        :return:
        :rtype BenchmarkResult:
        """
        trial_results = list(trial_results)

        # preload model types for the models linked to the trial results.
        with no_auto_dereference(SLAMTrialResult):
            model_ids = set(tr.system for tr in trial_results
                            if isinstance(tr.system, bson.ObjectId))
            autoload_modules(VisionSystem, list(model_ids))
            model_ids = set(tr.image_source for tr in trial_results
                            if isinstance(tr.image_source, bson.ObjectId))
            autoload_modules(ImageSource, list(model_ids))

        # Check if the set of trial results is valid. Loads the models.
        invalid_reason = check_trial_collection(trial_results)
        if invalid_reason is not None:
            return MetricResult(metric=self,
                                trial_results=trial_results,
                                success=False,
                                message=invalid_reason)

        # Make sure we have a non-zero number of trials to measure
        if len(trial_results) <= 0:
            return MetricResult(metric=self,
                                trial_results=trial_results,
                                success=False,
                                message="Cannot measure zero trials.")

        # Ensure the trials all have the same number of results
        for repeat, trial_result in enumerate(trial_results[1:]):
            if len(trial_result.results) != len(trial_results[0].results):
                return MetricResult(
                    metric=self,
                    trial_results=trial_results,
                    success=False,
                    message=
                    f"Repeat {repeat + 1} has a different number of frames "
                    f"({len(trial_result.results)} != {len(trial_results[0].results)})"
                )

        # Load the system, it must be the same for all trials (see check_trial_collection)
        system = trial_results[0].system

        # Pre-load the image objects in a batch, to avoid loading them piecemeal later
        images = [image for _, image in trial_results[0].image_source]

        # Build mappings between frame result timestamps and poses for each trial
        timestamps_to_pose = [{
            frame_result.timestamp: frame_result.pose
            for frame_result in trial_result.results
        } for trial_result in trial_results]

        # Choose transforms between each trajectory and the ground truth
        estimate_origins_and_scales = [
            robust_align_trajectory_to_ground_truth(
                [
                    frame_result.estimated_pose
                    for frame_result in trial_result.results
                    if frame_result.estimated_pose is not None
                ], [
                    frame_result.pose for frame_result in trial_result.results
                    if frame_result.estimated_pose is not None
                ],
                compute_scale=not bool(trial_result.has_scale),
                use_symmetric_scale=True) for trial_result in trial_results
        ]
        motion_scales = [1.0] * len(trial_results)
        for idx in range(len(trial_results)):
            if not trial_results[idx].has_scale:
                motion_scales[idx] = robust_compute_motions_scale(
                    [
                        frame_result.estimated_motion
                        for frame_result in trial_results[idx].results
                        if frame_result.estimated_motion is not None
                    ],
                    [
                        frame_result.motion
                        for frame_result in trial_results[idx].results
                        if frame_result.estimated_motion is not None
                    ],
                )

        # Then, tally all the errors for all the computed trajectories
        estimate_errors = [[] for _ in range(len(trial_results))]
        image_columns = set()
        distances_lost = [[] for _ in range(len(trial_results))]
        times_lost = [[] for _ in range(len(trial_results))]
        frames_lost = [[] for _ in range(len(trial_results))]
        distances_found = [[] for _ in range(len(trial_results))]
        times_found = [[] for _ in range(len(trial_results))]
        frames_found = [[] for _ in range(len(trial_results))]

        is_tracking = [False for _ in range(len(trial_results))]
        tracking_frames = [0 for _ in range(len(trial_results))]
        tracking_distances = [0 for _ in range(len(trial_results))]
        prev_tracking_time = [0 for _ in range(len(trial_results))]
        current_tracking_time = [0 for _ in range(len(trial_results))]

        for frame_idx, frame_results in enumerate(
                zip(*(trial_result.results
                      for trial_result in trial_results))):
            # Get the estimated motions and absolute poses for each trial,
            # And convert them to the ground truth coordinate frame using
            # the scale, translation and rotation we chose
            scaled_motions = [
                tf.Transform(
                    location=frame_results[idx].estimated_motion.location *
                    motion_scales[idx],
                    rotation=frame_results[idx].estimated_motion.rotation_quat(
                        True),
                    w_first=True)
                if frame_results[idx].estimated_motion is not None else None
                for idx in range(len(frame_results))
            ]
            scaled_poses = [
                align_point(pose=frame_results[idx].estimated_pose,
                            shift=estimate_origins_and_scales[idx][0],
                            rotation=estimate_origins_and_scales[idx][1],
                            scale=estimate_origins_and_scales[idx][2])
                if frame_results[idx].estimated_pose is not None else None
                for idx in range(len(frame_results))
            ]

            # Find the average estimated motion for this frame across all the different trials
            # The average is not available for frames with only a single estimate
            non_null_motions = [
                motion for motion in scaled_motions if motion is not None
            ]
            if len(non_null_motions) > 1:
                average_motion = tf.compute_average_pose(non_null_motions)
            else:
                average_motion = None

            # Union the image columns for all the images for all the frame results
            image_columns |= set(
                column for frame_result in frame_results
                for column in frame_result.image.get_columns())

            for repeat_idx, frame_result in enumerate(frame_results):

                # Record how long the current tracking state has persisted
                if frame_idx <= 0:
                    # Cannot change to or from tracking on the first frame
                    is_tracking[repeat_idx] = (frame_result.tracking_state is
                                               TrackingState.OK)
                    prev_tracking_time[repeat_idx] = frame_result.timestamp
                elif is_tracking[
                        repeat_idx] and frame_result.tracking_state is not TrackingState.OK:
                    # This trial has become lost, add to the list and reset the counters
                    frames_found[repeat_idx].append(
                        tracking_frames[repeat_idx])
                    distances_found[repeat_idx].append(
                        tracking_distances[repeat_idx])
                    times_found[repeat_idx].append(
                        current_tracking_time[repeat_idx] -
                        prev_tracking_time[repeat_idx])
                    tracking_frames[repeat_idx] = 0
                    tracking_distances[repeat_idx] = 0
                    prev_tracking_time[repeat_idx] = current_tracking_time[
                        repeat_idx]
                    is_tracking[repeat_idx] = False
                elif not is_tracking[
                        repeat_idx] and frame_result.tracking_state is TrackingState.OK:
                    # This trial has started to track, record how long it was lost for
                    frames_lost[repeat_idx].append(tracking_frames[repeat_idx])
                    distances_lost[repeat_idx].append(
                        tracking_distances[repeat_idx])
                    times_lost[repeat_idx].append(
                        current_tracking_time[repeat_idx] -
                        prev_tracking_time[repeat_idx])
                    tracking_frames[repeat_idx] = 0
                    tracking_distances[repeat_idx] = 0
                    prev_tracking_time[repeat_idx] = current_tracking_time[
                        repeat_idx]
                    is_tracking[repeat_idx] = True

                # Update the current tracking information
                tracking_frames[repeat_idx] += 1
                tracking_distances[repeat_idx] += np.linalg.norm(
                    frame_result.motion.location)
                current_tracking_time[repeat_idx] = frame_result.timestamp

                # Turn loop closures into distances. We don't need to worry about origins because everything is GT frame
                if len(frame_result.loop_edges) > 0:
                    loop_distances, loop_angles = compute_loop_distances_and_angles(
                        frame_result.pose,
                        (
                            timestamps_to_pose[repeat_idx][timestamp]
                            for timestamp in frame_result.loop_edges
                            if timestamp in timestamps_to_pose[
                                repeat_idx]  # they should all be in there, but for safety, check
                        ))
                else:
                    loop_distances, loop_angles = [], []

                # Build the frame error
                frame_error = make_frame_error(
                    trial_result=trial_results[repeat_idx],
                    frame_result=frame_result,
                    image=images[frame_idx],
                    system=system,
                    repeat_index=repeat_idx,
                    loop_distances=loop_distances,
                    loop_angles=loop_angles,
                    # Compute the error in the absolute estimated pose (if available)
                    absolute_error=make_pose_error(
                        scaled_poses[repeat_idx],  # The
                        frame_result.pose)
                    if scaled_poses[repeat_idx] is not None else None,
                    # Compute the error of the motion relative to the true motion
                    relative_error=make_pose_error(scaled_motions[repeat_idx],
                                                   frame_result.motion)
                    if scaled_motions[repeat_idx] is not None else None,
                    # Compute the error between the motion and the average estimated motion
                    noise=make_pose_error(scaled_motions[repeat_idx],
                                          average_motion)
                    if scaled_motions[repeat_idx] is not None
                    and average_motion is not None else None,
                    systemic_error=make_pose_error(average_motion,
                                                   frame_result.motion)
                    if average_motion is not None else None)
                estimate_errors[repeat_idx].append(frame_error)

        # Add any accumulated tracking information left over at the end
        if len(trial_results[0].results) > 0:
            for repeat_idx, tracking in enumerate(is_tracking):
                if tracking:
                    frames_found[repeat_idx].append(
                        tracking_frames[repeat_idx])
                    distances_found[repeat_idx].append(
                        tracking_distances[repeat_idx])
                    times_found[repeat_idx].append(
                        current_tracking_time[repeat_idx] -
                        prev_tracking_time[repeat_idx])
                else:
                    frames_lost[repeat_idx].append(tracking_frames[repeat_idx])
                    distances_lost[repeat_idx].append(
                        tracking_distances[repeat_idx])
                    times_lost[repeat_idx].append(
                        current_tracking_time[repeat_idx] -
                        prev_tracking_time[repeat_idx])

        # Once we've tallied all the results, either succeed or fail based on the number of results.
        if len(estimate_errors) <= 0 or any(
                len(trial_errors) <= 0 for trial_errors in estimate_errors):
            return FrameErrorResult(
                metric=self,
                trial_results=trial_results,
                success=False,
                message="No measurable errors for these trajectories")
        return make_frame_error_result(
            metric=self,
            trial_results=trial_results,
            errors=[
                TrialErrors(frame_errors=estimate_errors[repeat],
                            frames_lost=frames_lost[repeat],
                            frames_found=frames_found[repeat],
                            times_lost=times_lost[repeat],
                            times_found=times_found[repeat],
                            distances_lost=distances_lost[repeat],
                            distances_found=distances_found[repeat])
                for repeat, trial_result in enumerate(trial_results)
            ])
Exemple #20
0
def invalidate_failed_metric_results() -> int:
    autoload_modules(MetricResult)
    removed = MetricResult.objects.raw({'success': False}).delete()
    logging.getLogger(__name__).info(
        "removed {0} metric results".format(removed))
    return removed