コード例 #1
0
def get_error_from_motion(motion: tf.Transform, gt_motion: tf.Transform, avg_motion: tf.Transform = None) \
        -> typing.Tuple[float, float, float, float, float, float, float, float, float, float, float, float]:
    """
    Given a motion, ground truth motion, and average estimated motion, extract 12 different error statistics
    :param motion:
    :param gt_motion:
    :param avg_motion:
    :return:
    """
    # Error
    trans_error = motion.location - gt_motion.location
    trans_error_length = np.linalg.norm(trans_error)
    trans_error_direction = np.arccos(
        min(
            1.0,
            max(
                -1.0,
                np.dot(trans_error / trans_error_length, gt_motion.location /
                       np.linalg.norm(gt_motion.location))))
    ) if trans_error_length > 0 else 0  # No error direction when there is no error
    rot_error = tf.quat_diff(motion.rotation_quat(w_first=True),
                             gt_motion.rotation_quat(w_first=True))

    # Noise
    if avg_motion is None:
        return (
            trans_error[0],
            trans_error[1],
            trans_error[2],
            trans_error_length,
            trans_error_direction,
            rot_error,

            # No average estimate,
            np.nan,
            np.nan,
            np.nan,
            np.nan,
            np.nan,
            np.nan)
    else:
        trans_noise = motion.location - avg_motion.location
        trans_noise_length = np.linalg.norm(trans_noise)
        trans_noise_direction = np.arccos(
            min(
                1.0,
                max(
                    -1.0,
                    np.dot(
                        trans_noise / trans_noise_length, gt_motion.location /
                        np.linalg.norm(gt_motion.location))))
        ) if trans_noise_length > 0 else 0  # No noise direction for 0 noise
        rot_noise = tf.quat_diff(motion.rotation_quat(w_first=True),
                                 avg_motion.rotation_quat(w_first=True))

        return (trans_error[0], trans_error[1], trans_error[2],
                trans_error_length, trans_error_direction, rot_error,
                trans_noise[0], trans_noise[1], trans_noise[2],
                trans_noise_length, trans_noise_direction, rot_noise)
コード例 #2
0
def make_pose_error(estimated_pose: tf.Transform, reference_pose: tf.Transform) -> PoseError:
    """
    Make a pose error object from an estimated
    :param estimated_pose:
    :param reference_pose:
    :return:
    """
    trans_error = estimated_pose.location - reference_pose.location
    trans_error_length = np.linalg.norm(trans_error)

    trans_error_direction = np.nan  # No direction if the vectors are the same
    if trans_error_length > 0:
        # Get the unit vector in the direction of the true location
        reference_norm = np.linalg.norm(reference_pose.location)
        if reference_norm > 0:
            unit_reference = reference_pose.location / reference_norm
            # Find the angle between the trans error and the true location
            dot_product = np.dot(trans_error / trans_error_length, unit_reference)
            trans_error_direction = np.arccos(
                # Clip to arccos range to avoid errors
                min(1.0, max(0.0, dot_product))
            )
    # Different to the trans_direction, this is the angle between the estimated orientation and true orientation
    rot_error = tf.quat_diff(estimated_pose.rotation_quat(w_first=True), reference_pose.rotation_quat(w_first=True))
    return PoseError(
        x=trans_error[0],
        y=trans_error[1],
        z=trans_error[2],
        length=trans_error_length,
        direction=trans_error_direction,
        rot=rot_error
    )
コード例 #3
0
    def benchmark_results(self, trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \
            -> arvet.core.benchmark.BenchmarkResult:
        """
        Collect the errors
        :param trial_results: The results of several trials to aggregate
        :return:
        :rtype BenchmarkResult:
        """
        trial_results = list(trial_results)
        invalid_reason = arvet.core.benchmark.check_trial_collection(
            trial_results)
        if invalid_reason is not None:
            return arvet.core.benchmark.FailedBenchmark(
                benchmark_id=self.identifier,
                trial_result_ids=[
                    trial_result.identifier for trial_result in trial_results
                ],
                reason=invalid_reason)

        ground_truth_motions = None

        # Collect together all the estimates for each frame from all the trial results
        estimates = {}
        for trial_result in trial_results:
            if ground_truth_motions is None:
                ground_truth_motions = trial_result.get_ground_truth_motions()
            computed_motions = trial_result.get_computed_camera_motions()
            tracking_statistics = trial_result.get_tracking_states()
            num_features = trial_result.num_features
            num_matches = trial_result.num_matches

            computed_keys = set(computed_motions.keys()) | set(
                tracking_statistics.keys())
            computed_keys |= set(num_features.keys()) | set(num_matches.keys())

            # Join together all the timestamps for the different things in the trial result
            # We find a unified set of timestamps, and then match back from those to each of the sets of data.
            unified_times = merge_timestamps(
                (computed_motions.keys(), tracking_statistics.keys(),
                 num_features.keys(), num_matches.keys()))
            to_computed_motions = {
                k: v
                for k, v in arvet.util.associate.associate(unified_times,
                                                           computed_motions,
                                                           offset=0,
                                                           max_difference=0.1)
            }
            to_tracking_statistics = {
                k: v
                for k, v in arvet.util.associate.associate(unified_times,
                                                           tracking_statistics,
                                                           offset=0,
                                                           max_difference=0.1)
            }
            to_num_features = {
                k: v
                for k, v in arvet.util.associate.associate(
                    unified_times, num_features, offset=0, max_difference=0.1)
            }
            to_num_matches = {
                k: v
                for k, v in arvet.util.associate.associate(
                    unified_times, num_matches, offset=0, max_difference=0.1)
            }

            matches = arvet.util.associate.associate(ground_truth_motions,
                                                     unified_times,
                                                     offset=0,
                                                     max_difference=0.1)
            for match in matches:
                if match[0] not in estimates:
                    estimates[match[0]] = {
                        'motion': [],
                        'tracking': [],
                        'num_features': [],
                        'num_matches': []
                    }

                if match[1] in to_computed_motions:
                    estimates[match[0]]['motion'].append(
                        computed_motions[to_computed_motions[match[1]]])

                # Express the tracking state as a number
                if match[1] in to_tracking_statistics:
                    if tracking_statistics[to_tracking_statistics[match[1]]] == \
                            arvet_slam.trials.slam.tracking_state.TrackingState.OK:
                        estimates[match[0]]['tracking'].append(1.0)
                    else:
                        estimates[match[0]]['tracking'].append(0.0)

                if match[1] in to_num_features:
                    estimates[match[0]]['num_features'].append(
                        num_features[to_num_features[match[1]]])
                if match[1] in to_num_matches:
                    estimates[match[0]]['num_matches'].append(
                        num_matches[to_num_matches[match[1]]])

        # Now that we have all the estimates, aggregate the errors
        frame_errors = {}
        for gt_time, estimates_obj in estimates.items():
            if len(estimates_obj['motion']) > 0:
                mean_estimated_motion = tf.compute_average_pose(
                    estimates_obj['motion'])
                location_errors = [
                    np.linalg.norm(motion.location -
                                   ground_truth_motions[gt_time].location)
                    for motion in estimates_obj['motion']
                ]
                angle_errors = [
                    tf.quat_diff(
                        motion.rotation_quat(w_first=True),
                        ground_truth_motions[gt_time].rotation_quat(
                            w_first=True))
                    for motion in estimates_obj['motion']
                ]
                location_noise = [
                    np.linalg.norm(motion.location -
                                   mean_estimated_motion.location)
                    for motion in estimates_obj['motion']
                ]
                rotation_noise = [
                    tf.quat_diff(
                        motion.rotation_quat(w_first=True),
                        mean_estimated_motion.rotation_quat(w_first=True))
                    for motion in estimates_obj['motion']
                ]
                motion_errors = (float(np.mean(location_errors)),
                                 float(np.std(location_errors)),
                                 float(np.mean(angle_errors)),
                                 float(np.std(angle_errors)),
                                 float(np.mean(location_noise)),
                                 float(np.std(location_noise)),
                                 float(np.mean(rotation_noise)),
                                 float(np.std(rotation_noise)))
            else:
                motion_errors = tuple(np.nan for _ in range(8))

            if len(estimates_obj['tracking']) > 0:
                p_lost = 1.0 - (np.sum(estimates_obj['tracking']) /
                                len(estimates_obj['tracking']))
            else:
                p_lost = np.nan

            if len(estimates_obj['num_features']) > 0:
                mean_features = np.mean(estimates_obj['num_features'])
                std_features = np.mean(estimates_obj['num_features'])
            else:
                mean_features = np.nan
                std_features = np.nan

            if len(estimates_obj['num_matches']) > 0:
                mean_matches = np.mean(estimates_obj['num_matches'])
                std_matches = np.mean(estimates_obj['num_matches'])
            else:
                mean_matches = np.nan
                std_matches = np.nan

            frame_errors[gt_time] = motion_errors + (
                p_lost, mean_features, std_features, mean_matches, std_matches,
                ground_truth_motions[gt_time].location[0],
                ground_truth_motions[gt_time].location[1],
                ground_truth_motions[gt_time].location[2],
                float(np.linalg.norm(ground_truth_motions[gt_time].location)),
                tf.quat_angle(
                    ground_truth_motions[gt_time].rotation_quat(True)))

        if len(frame_errors) <= 0:
            return arvet.core.benchmark.FailedBenchmark(
                benchmark_id=self.identifier,
                trial_result_ids=[
                    trial_result.identifier for trial_result in trial_results
                ],
                reason="No measurable errors for these trajectories")
        return FrameErrorsResult(benchmark_id=self.identifier,
                                 trial_result_ids=[
                                     trial_result.identifier
                                     for trial_result in trial_results
                                 ],
                                 frame_errors=frame_errors)
コード例 #4
0
    def _create_error_distribution_plots(
            self, db_client: arvet.database.client.DatabaseClient):
        import matplotlib.pyplot as pyplot

        noise_save_path = os.path.join(
            type(self).get_output_folder(), 'error distribution')
        os.makedirs(noise_save_path, exist_ok=True)

        logging.getLogger(__name__).info(
            "Plotting error distributions to {0} ...".format(noise_save_path))

        for system_name, system_id in self.systems.items():
            logging.getLogger(__name__).info(
                "    .... plotting for {0}".format(system_name))
            for dataset_name, dataset_id in self.datasets.items():
                all_computed_motions = []
                ground_truth_motions = None
                trial_result_list = self.get_trial_results(
                    system_id, dataset_id)
                if len(trial_result_list) <= 0:
                    continue

                # Collect all the computed motions
                for trial_result_id in trial_result_list:
                    trial_result = dh.load_object(db_client,
                                                  db_client.trials_collection,
                                                  trial_result_id)
                    if trial_result is not None and trial_result.success:
                        if ground_truth_motions is None:
                            ground_truth_motions = trial_result.get_ground_truth_motions(
                            )
                        all_computed_motions.append(
                            trial_result.get_computed_camera_motions())

                # Collect statistics on the error for each frame motion
                times = []
                trans_error = []
                rot_error = []
                for computed_motions in all_computed_motions:
                    matches = ass.associate(ground_truth_motions,
                                            computed_motions,
                                            offset=0,
                                            max_difference=0.1)
                    for match in matches:
                        times.append(match[0])
                        trans_error.append(
                            computed_motions[match[1]].location -
                            ground_truth_motions[match[0]].location)
                        rot_error.append(
                            tf.quat_diff(
                                computed_motions[match[1]].rotation_quat(True),
                                ground_truth_motions[match[0]].rotation_quat(
                                    True)))

                trans_error = np.array(trans_error)
                error_magnitudes = np.linalg.norm(trans_error, axis=1)

                # Plot translational noise histograms
                title = "{0} on {1} translational error distribution".format(
                    system_name, dataset_name)
                figure, axes = pyplot.subplots(1, 2, figsize=(20, 8), dpi=80)
                figure.suptitle(title)

                ax = axes[0]
                ax.set_title('per-axis error distribution')
                ax.set_xlabel('noise (m)')
                ax.set_ylabel('density')
                for data, colour in [(trans_error[:, 0], 'red'),
                                     (trans_error[:, 1], 'green'),
                                     (trans_error[:, 2], 'blue')]:
                    ax.hist(data,
                            density=True,
                            bins=300,
                            alpha=0.3,
                            color=colour)

                ax = axes[1]
                ax.set_title('total error distribution')
                ax.set_xlabel('noise (m)')
                ax.set_ylabel('density')
                ax.hist(error_magnitudes, density=True, bins=300, color='blue')

                pyplot.tight_layout()
                pyplot.subplots_adjust(top=0.90, right=0.99)
                figure.savefig(os.path.join(noise_save_path, title + '.png'))
                pyplot.close(figure)

                # Plot rotational noise histograms
                title = "{0} on {1} error distribution".format(
                    system_name, dataset_name)
                figure, axes = pyplot.subplots(1, 1, figsize=(16, 8), dpi=80)
                figure.suptitle(title)
                axes.set_xlabel('noise (rad)')
                axes.set_ylabel('density')
                axes.hist(rot_error, density=True, bins=300, color='blue')
                pyplot.tight_layout()
                pyplot.subplots_adjust(top=0.95, right=0.99)
                figure.savefig(os.path.join(noise_save_path, title + '.png'))
                pyplot.close(figure)

                # Plot noise vs time
                title = "{0} on {1} noise vs time".format(
                    system_name, dataset_name)
                figure, axes = pyplot.subplots(2, 1, figsize=(18, 10), dpi=80)
                figure.suptitle(title)
                ax = axes[0]
                ax.set_xlabel('time (s)')
                ax.set_ylabel('noise distance (m)')
                ax.plot(times,
                        error_magnitudes,
                        c='blue',
                        alpha=0.5,
                        marker='.',
                        markersize=2,
                        linestyle='None')

                ax = axes[1]
                ax.set_xlabel('time (s)')
                ax.set_ylabel('noise angle (rad)')
                ax.plot(times,
                        rot_error,
                        c='blue',
                        alpha=0.5,
                        marker='.',
                        markersize=2,
                        linestyle='None')

                pyplot.tight_layout()
                pyplot.subplots_adjust(top=0.95, right=0.99)

                figure.savefig(os.path.join(noise_save_path, title + '.png'))
                pyplot.close(figure)
コード例 #5
0
    def _create_absolute_error_plot(
            self, db_client: arvet.database.client.DatabaseClient):
        import matplotlib.pyplot as pyplot

        noise_save_path = os.path.join(
            type(self).get_output_folder(), 'absolute error distribution')
        os.makedirs(noise_save_path, exist_ok=True)

        logging.getLogger(__name__).info(
            "Plotting absolute error distributions to {0} ...".format(
                noise_save_path))

        for system_name, system_id in self.systems.items():
            logging.getLogger(__name__).info(
                "    .... plotting for {0}".format(system_name))
            for dataset_name, dataset_id in self.datasets.items():
                trial_result_list = self.get_trial_results(
                    system_id, dataset_id)
                if len(trial_result_list) <= 0:
                    continue

                # Collect error measurements for all trials
                times = []
                x_error = []
                y_error = []
                z_error = []
                error_magnitude = []
                error_angle = []
                for trial_result_id in trial_result_list:
                    trial_result = dh.load_object(db_client,
                                                  db_client.trials_collection,
                                                  trial_result_id)
                    if trial_result is not None and trial_result.success:
                        ground_truth_trajectory = trial_result.get_ground_truth_camera_poses(
                        )
                        computed_trajectory = trial_result.get_computed_camera_poses(
                        )
                        matches = ass.associate(ground_truth_trajectory,
                                                computed_trajectory,
                                                offset=0,
                                                max_difference=0.1)
                        for match in matches:
                            error = computed_trajectory[
                                match[1]].location - ground_truth_trajectory[
                                    match[0]].location
                            times.append(match[0])
                            x_error.append(error[0])
                            y_error.append(error[1])
                            z_error.append(error[2])
                            error_magnitude.append(np.linalg.norm(error))
                            error_angle.append(
                                tf.quat_diff(
                                    computed_trajectory[
                                        match[1]].rotation_quat(True),
                                    ground_truth_trajectory[
                                        match[0]].rotation_quat(True)))

                # Plot error vs motion in that direction
                title = "{0} on {1} noise distribution".format(
                    system_name, dataset_name)
                figure, axes = pyplot.subplots(1, 5, figsize=(40, 8), dpi=80)
                figure.suptitle(title)
                ax = axes[0]
                ax.set_title('x absolute error distribution')
                ax.set_xlabel('error (m)')
                ax.set_ylabel('Probability')
                ax.hist(x_error, density=True, bins=300, color='blue')

                ax = axes[1]
                ax.set_title('y absolute error distribution')
                ax.set_xlabel('error (m)')
                ax.set_ylabel('Probability')
                ax.hist(y_error, density=True, bins=300, color='blue')

                ax = axes[2]
                ax.set_title('z absolute error distribution')
                ax.set_xlabel('error (m)')
                ax.set_ylabel('Probability')
                ax.hist(z_error, density=True, bins=300, color='blue')

                ax = axes[3]
                ax.set_title('total absolute error distribution')
                ax.set_xlabel('error (m)')
                ax.set_ylabel('Probability')
                ax.hist(error_magnitude, density=True, bins=300, color='blue')

                ax = axes[4]
                ax.set_title('angle error distribution')
                ax.set_xlabel('angle error (rad)')
                ax.set_ylabel('Probability')
                ax.hist(error_angle, density=True, bins=300, color='blue')

                pyplot.tight_layout()
                pyplot.subplots_adjust(top=0.90, right=0.99)

                figure.savefig(os.path.join(noise_save_path, title + '.png'))
                pyplot.close(figure)

                # Plot noise vs time
                title = "{0} on {1} error vs time".format(
                    system_name, dataset_name)
                figure, axes = pyplot.subplots(2, 1, figsize=(18, 10), dpi=80)
                figure.suptitle(title)
                ax = axes[0]
                ax.set_xlabel('time (s)')
                ax.set_ylabel('error magnitude (m)')
                ax.plot(times,
                        error_magnitude,
                        c='blue',
                        alpha=0.5,
                        marker='.',
                        markersize=2,
                        linestyle='None')

                ax = axes[1]
                ax.set_xlabel('time (s)')
                ax.set_ylabel('error angle (rad)')
                ax.plot(times,
                        error_angle,
                        c='blue',
                        alpha=0.5,
                        marker='.',
                        markersize=2,
                        linestyle='None')

                pyplot.tight_layout()
                pyplot.subplots_adjust(top=0.95, right=0.99)

                figure.savefig(os.path.join(noise_save_path, title + '.png'))
                pyplot.close(figure)