Example #1
0
def analyse_ekf(
        ulog: ULog, check_levels: Dict[str, float], red_thresh: float = 1.0,
        amb_thresh: float = 0.5, min_flight_duration_seconds: float = 5.0,
        in_air_margin_seconds: float = 5.0, pos_checks_when_sensors_not_fused: bool = False) -> \
        Tuple[str, Dict[str, str], Dict[str, float], Dict[str, float]]:
    """
    :param ulog:
    :param check_levels:
    :param red_thresh:
    :param amb_thresh:
    :param min_flight_duration_seconds:
    :param in_air_margin_seconds:
    :param pos_checks_when_sensors_not_fused:
    :return:
    """

    try:
        estimator_status = ulog.get_dataset('estimator_status').data
        print('found estimator_status data')
    except:
        raise PreconditionError('could not find estimator_status data')

    try:
        _ = ulog.get_dataset('ekf2_innovations').data
        print('found ekf2_innovation data')
    except:
        raise PreconditionError('could not find ekf2_innovation data')

    try:
        in_air = InAirDetector(
            ulog, min_flight_time_seconds=min_flight_duration_seconds, in_air_margin_seconds=0.0)
        in_air_no_ground_effects = InAirDetector(
            ulog, min_flight_time_seconds=min_flight_duration_seconds,
            in_air_margin_seconds=in_air_margin_seconds)
    except Exception as e:
        raise PreconditionError(str(e))

    if in_air_no_ground_effects.take_off is None:
        raise PreconditionError('no airtime detected.')

    airtime_info = {
        'in_air_transition_time': round(in_air.take_off + in_air.log_start, 2),
        'on_ground_transition_time': round(in_air.landing + in_air.log_start, 2)}

    control_mode, innov_flags, gps_fail_flags = get_estimator_check_flags(estimator_status)

    sensor_checks, innov_fail_checks = find_checks_that_apply(
        control_mode, estimator_status,
        pos_checks_when_sensors_not_fused=pos_checks_when_sensors_not_fused)

    metrics = calculate_ecl_ekf_metrics(
        ulog, innov_flags, innov_fail_checks, sensor_checks, in_air, in_air_no_ground_effects,
        red_thresh=red_thresh, amb_thresh=amb_thresh)

    check_status, master_status = perform_ecl_ekf_checks(
        metrics, sensor_checks, innov_fail_checks, check_levels)

    return master_status, check_status, metrics, airtime_info
Example #2
0
def calculate_imu_metrics(ulog: ULog,
                          in_air_no_ground_effects: InAirDetector) -> dict:

    ekf2_innovation_data = ulog.get_dataset('ekf2_innovations').data

    estimator_status_data = ulog.get_dataset('estimator_status').data

    imu_metrics = dict()

    # calculates the median of the output tracking error ekf innovations
    for signal, result in [
        ('output_tracking_error[0]', 'output_obs_ang_err_median'),
        ('output_tracking_error[1]', 'output_obs_vel_err_median'),
        ('output_tracking_error[2]', 'output_obs_pos_err_median')
    ]:
        imu_metrics[result] = calculate_stat_from_signal(
            ekf2_innovation_data, 'ekf2_innovations', signal,
            in_air_no_ground_effects, np.median)

    # calculates peak and mean for IMU vibration checks
    for signal, result in [('vibe[0]', 'imu_coning'),
                           ('vibe[1]', 'imu_hfdang'),
                           ('vibe[2]', 'imu_hfdvel')]:
        peak = calculate_stat_from_signal(estimator_status_data,
                                          'estimator_status', signal,
                                          in_air_no_ground_effects, np.amax)
        if peak > 0.0:
            imu_metrics['{:s}_peak'.format(result)] = peak
            imu_metrics['{:s}_mean'.format(
                result)] = calculate_stat_from_signal(
                    estimator_status_data, 'estimator_status', signal,
                    in_air_no_ground_effects, np.mean)

    # IMU bias checks
    imu_metrics['imu_dang_bias_median'] = np.sqrt(
        np.sum([
            np.square(
                calculate_stat_from_signal(estimator_status_data,
                                           'estimator_status', signal,
                                           in_air_no_ground_effects,
                                           np.median))
            for signal in ['states[10]', 'states[11]', 'states[12]']
        ]))
    imu_metrics['imu_dvel_bias_median'] = np.sqrt(
        np.sum([
            np.square(
                calculate_stat_from_signal(estimator_status_data,
                                           'estimator_status', signal,
                                           in_air_no_ground_effects,
                                           np.median))
            for signal in ['states[13]', 'states[14]', 'states[15]']
        ]))

    return imu_metrics
Example #3
0
def testing_args():
    """
    arguments for testing.
    :return: test arguments
    """
    flight_logs_path = os.path.join(os.path.dirname(__file__), 'flight_logs')
    log_est_format_v1 = ULog(
        os.path.join(flight_logs_path, 'short_f450_log.ulg'))
    log_est_format_v2 = ULog(
        os.path.join(flight_logs_path, 'SITL_VTOL_standard_d7b958.ulg'))

    return {
        'log_est_format_v1': log_est_format_v1,
        'log_est_format_v2': log_est_format_v2
    }
def testing_args():
    """
    arguments for testing.
    :return: test arguments
    """
    flight_logs_path = os.path.join(os.path.dirname(__file__), 'flight_logs')
    log_est_format_v1 = ULog(
        os.path.join(flight_logs_path, 'short_f450_log.ulg'))
    log_est_format_v2 = ULog(
        os.path.join(flight_logs_path, 'estimator_innovations.ulg'))

    return {
        'est_format_version_1': log_est_format_v1,
        'est_format_version_2': log_est_format_v2
    }
    def __init__(self, ulog: ULog):
        """
        :param ulog:
        """
        super(EclCheckRunner, self).__init__()

        try:
            estimator_status_data = ulog.get_dataset('estimator_status').data
            print('found estimator_status data')

            control_mode_flags, innov_flags, _ = get_estimator_check_flags(
                estimator_status_data)

            self.append(
                MagnetometerCheck(ulog, innov_flags, control_mode_flags))
            self.append(
                MagneticHeadingCheck(ulog, innov_flags, control_mode_flags))
            self.append(VelocityCheck(ulog, innov_flags, control_mode_flags))
            self.append(PositionCheck(ulog, innov_flags, control_mode_flags))
            self.append(HeightCheck(ulog, innov_flags, control_mode_flags))
            self.append(
                HeightAboveGroundCheck(ulog, innov_flags, control_mode_flags))
            self.append(AirspeedCheck(ulog, innov_flags, control_mode_flags))
            self.append(SideSlipCheck(ulog, innov_flags, control_mode_flags))
            self.append(OpticalFlowCheck(ulog, innov_flags,
                                         control_mode_flags))
            self.append(IMU_Vibration_Check(ulog))
            self.append(IMU_Bias_Check(ulog))
            self.append(IMU_Output_Predictor_Check(ulog))
            self.append(NumericalCheck(ulog))
        except Exception as e:
            capture_message(str(e))
            self.error_message = str(e)
            self.analysis_status = -3
Example #6
0
    def __init__(self, ulog: ULog, min_flight_time_seconds: float = 0.0,
                 in_air_margin_seconds: float = 0.0) -> None:
        """
        initializes an InAirDetector instance.
        :param ulog:
        :param min_flight_time_seconds: set this value to return only airtimes that are at least
        min_flight_time_seconds long
        :param in_air_margin_seconds: removes a margin of in_air_margin_seconds from the airtime
        to avoid ground effects.
        """

        self._ulog = ulog
        self._min_flight_time_seconds = min_flight_time_seconds
        self._in_air_margin_seconds = in_air_margin_seconds

        try:
            self._vehicle_land_detected = ulog.get_dataset('vehicle_land_detected').data
            self._landed = self._vehicle_land_detected['landed']
        except:
            self._in_air = []
            raise PreconditionError(
                'InAirDetector: Could not find vehicle land detected message and/or landed field'
                ' and thus not find any airtime.')

        self._log_start = self._ulog.start_timestamp / 1.0e6

        self._in_air = self._detect_airtime()
Example #7
0
def calculate_ecl_ekf_metrics(
        ulog: ULog, innov_flags: Dict[str, float], innov_fail_checks: List[str],
        sensor_checks: List[str], in_air: InAirDetector, in_air_no_ground_effects: InAirDetector,
        multi_instance: int = 0, red_thresh: float = 1.0, amb_thresh: float = 0.5) -> Tuple[dict, dict, dict, dict]:

    sensor_metrics = calculate_sensor_metrics(
        ulog, sensor_checks, in_air, in_air_no_ground_effects,
        red_thresh=red_thresh, amb_thresh=amb_thresh)

    innov_fail_metrics = calculate_innov_fail_metrics(
        innov_flags, innov_fail_checks, in_air, in_air_no_ground_effects)

    imu_metrics = calculate_imu_metrics(ulog, multi_instance, in_air_no_ground_effects)

    estimator_status_data = ulog.get_dataset('estimator_status', multi_instance).data

    # Check for internal filter nummerical faults
    ekf_metrics = {'filter_faults_max': np.amax(estimator_status_data['filter_fault_flags'])}
    # TODO - process these bitmask's when they have been properly documented in the uORB topic
    # estimator_status['health_flags']
    # estimator_status['timeout_flags']

    # combine the metrics
    combined_metrics = dict()
    combined_metrics.update(imu_metrics)
    combined_metrics.update(sensor_metrics)
    combined_metrics.update(innov_fail_metrics)
    combined_metrics.update(ekf_metrics)

    return combined_metrics
Example #8
0
def calculate_ecl_ekf_metrics(
        ulog: ULog, innov_flags: Dict[str, float], innov_fail_checks: List[str],
        sensor_checks: List[str], in_air: InAirDetector, in_air_no_ground_effects: InAirDetector,
        red_thresh: float = 1.0, amb_thresh: float = 0.5) -> Tuple[dict, dict, dict, dict]:

    sensor_metrics = calculate_sensor_metrics(
        ulog, sensor_checks, in_air, in_air_no_ground_effects,
        red_thresh=red_thresh, amb_thresh=amb_thresh)

    innov_fail_metrics = calculate_innov_fail_metrics(
        innov_flags, innov_fail_checks, in_air, in_air_no_ground_effects)

    imu_metrics = calculate_imu_metrics(ulog, in_air_no_ground_effects)

    estimator_status_data = ulog.get_dataset('estimator_status').data

    # Check for internal filter nummerical faults
    ekf_metrics = {'filter_faults_max': np.amax(estimator_status_data['filter_fault_flags'])}
    # TODO - process these bitmask's when they have been properly documented in the uORB topic
    # estimator_status['health_flags']
    # estimator_status['timeout_flags']

    # combine the metrics
    combined_metrics = dict()
    combined_metrics.update(imu_metrics)
    combined_metrics.update(sensor_metrics)
    combined_metrics.update(innov_fail_metrics)
    combined_metrics.update(ekf_metrics)

    return combined_metrics
    def __init__(
            self, ulog: ULog, min_flight_time_seconds: float = 0.0,
            in_air_margin_seconds: float = 0.0) -> None:
        """
        initializes an InAirDetector instance.
        :param ulog:
        :param min_flight_time_seconds: set this value to return only airtimes that are at least
        min_flight_time_seconds long
        :param in_air_margin_seconds: removes a margin of in_air_margin_seconds from the airtime
        to avoid ground effects.
        """

        self._ulog = ulog
        self._min_flight_time_seconds = min_flight_time_seconds
        self._in_air_margin_seconds = in_air_margin_seconds

        try:
            self._vehicle_land_detected = ulog.get_dataset('vehicle_land_detected').data
            self._landed = self._vehicle_land_detected['landed']
        except:
            self._in_air = []
            raise PreconditionError(
                'InAirDetector: Could not find vehicle land detected message and/or landed field'
                ' and thus not find any airtime.')

        self._log_start = self._ulog.start_timestamp / 1.0e6

        self._in_air = self._detect_airtime()
def getAllData(logfile):
    log = ULog(logfile)

    v_local = np.matrix([getData(log, 'vehicle_local_position', 'vx'),
              getData(log, 'vehicle_local_position', 'vy'),
              getData(log, 'vehicle_local_position', 'vz')])

    t_v_local = ms2s(getData(log, 'vehicle_local_position', 'timestamp'))

    accel = np.matrix([getData(log, 'sensor_combined', 'accelerometer_m_s2[0]'),
              getData(log, 'sensor_combined', 'accelerometer_m_s2[1]'),
              getData(log, 'sensor_combined', 'accelerometer_m_s2[2]')])
    t_accel = ms2s(getData(log, 'sensor_combined', 'timestamp'))

    q = np.matrix([getData(log, 'vehicle_attitude', 'q[0]'),
              getData(log, 'vehicle_attitude', 'q[1]'),
              getData(log, 'vehicle_attitude', 'q[2]'),
              getData(log, 'vehicle_attitude', 'q[3]')])
    t_q = ms2s(getData(log, 'vehicle_attitude', 'timestamp'))

    dist_bottom = getData(log, 'vehicle_local_position', 'dist_bottom')
    t_dist_bottom = ms2s(getData(log, 'vehicle_local_position', 'timestamp'))

    (t_aligned, v_body_aligned, accel_aligned) = alignData(t_v_local, v_local, t_accel, accel, t_q, q, t_dist_bottom, dist_bottom)

    t_aligned -= t_aligned[0]

    return (t_aligned, v_body_aligned, accel_aligned)
Example #11
0
def getBarometerData(ulog: ULog) -> pd.DataFrame:

	vehicle_air_data = ulog.get_dataset("vehicle_air_data").data
	baro = pd.DataFrame({'timestamp': vehicle_air_data['timestamp'],
		'sensor' : 'baro',
		'baro_alt_meter': vehicle_air_data["baro_alt_meter"]})
	return baro
Example #12
0
def ulog2docx(ulg_file_name, doc_file_name=""):
    ulog = ULog(ulg_file_name)

    ### position plot
    config = {}
    config["layout"] = [1, 1]
    config["axeses"] = []
    axes = {}
    axes["title"] = None
    axes["xlabel"] = "East/m"
    axes["ylabel"] = "North/m"
    axes["lines"] = [{"xdata": []}]

    ### altitude estimate
    config = {}
    config["layout"] = [2, 1]
    config["suptitle"] = "Alt Estimate"
    config["axeses"] = []
    # axes 1
    axes = {}
    axes["title"] = ""
    axes["xlabel"] = "Time"
    axes["ylabel"] = "Alt/m"
    axes["lines"] = [{
        "xdata":
        "vehicle_global_position.timestamp",
        "ydata": [rescale, "vehicle_global_position.alt", 0.001, 0],
        "specs":
        "-r",
        "label":
        "Fused Alt"
    }, {
        "xdata": "vehicle_air_data.timestamp",
        "ydata": ["vehicle_air_data", "baro_alt_meter"],
        "specs": "-k",
        "label": "Baro Alt"
    }, {
        "xdata": ["vehicle_gps_position", "timestamp"],
        "process": lambda x: x * 0.001,
        "ydata": [lambda: "vehicle_gps_position", "alt"],
        "spec": "-b",
        "label": "GPS Alt"
    }]
    config["axeses"].append(axes)
    # axes 2
    axes = {}
    axes["title"] = ""
    axes["xlabel"] = "Time/sec"
    axes["ylabel"] = "Thrust"
    axes["lines"] = [{
        "xdata": ["actuator_controls_0", "timestamp"],
        "ydata": ["actuator_controls_0", "control[3]"],
        "spec": "-r",
        "process": None,
        "label": "Thrust[0,100]"
    }]
    config["axeses"].append(axes)
    # plot
    plot2d(ulog, config)
Example #13
0
def calculate_imu_metrics(ulog: ULog, multi_instance, in_air_no_ground_effects: InAirDetector) -> dict:

    estimator_status_data = ulog.get_dataset('estimator_status', multi_instance).data

    imu_metrics = dict()

    # calculates the median of the output tracking error ekf innovations
    for signal, result in [('output_tracking_error[0]', 'output_obs_ang_err_median'),
                           ('output_tracking_error[1]', 'output_obs_vel_err_median'),
                           ('output_tracking_error[2]', 'output_obs_pos_err_median')]:
        imu_metrics[result] = calculate_stat_from_signal(
            estimator_status_data, 'estimator_status', signal, in_air_no_ground_effects, np.median)


    # calculates peak and mean for IMU vibration checks
    for imu_status_instance in range(4):
        try:
            vehicle_imu_status_data = ulog.get_dataset('vehicle_imu_status', imu_status_instance).data

            if vehicle_imu_status_data['accel_device_id'][0] == estimator_status_data['accel_device_id'][0]:

                for signal, result in [('gyro_coning_vibration', 'imu_coning'),
                                       ('gyro_vibration_metric', 'imu_hfgyro'),
                                       ('accel_vibration_metric', 'imu_hfaccel')]:

                    peak = calculate_stat_from_signal(vehicle_imu_status_data, 'vehicle_imu_status', signal, in_air_no_ground_effects, np.amax)

                    if peak > 0.0:
                        imu_metrics['{:s}_peak'.format(result)] = peak
                        imu_metrics['{:s}_mean'.format(result)] = calculate_stat_from_signal(vehicle_imu_status_data, 'vehicle_imu_status', signal, in_air_no_ground_effects, np.mean)

        except:
            pass


    # IMU bias checks
    estimator_states_data = ulog.get_dataset('estimator_states', multi_instance).data

    imu_metrics['imu_dang_bias_median'] = np.sqrt(np.sum([np.square(calculate_stat_from_signal(
        estimator_states_data, 'estimator_states', signal, in_air_no_ground_effects, np.median))
        for signal in ['states[10]', 'states[11]', 'states[12]']]))
    imu_metrics['imu_dvel_bias_median'] = np.sqrt(np.sum([np.square(calculate_stat_from_signal(
        estimator_states_data, 'estimator_states', signal, in_air_no_ground_effects, np.median))
        for signal in ['states[13]', 'states[14]', 'states[15]']]))

    return imu_metrics
Example #14
0
def process_logdata_ekf(filename: str,
                        check_level_dict_filename: str,
                        check_table_filename: str,
                        plot: bool = True,
                        sensor_safety_margins: bool = True):

    ## load the log and extract the necessary data for the analyses
    try:
        ulog = ULog(filename)
    except:
        raise PreconditionError('could not open {:s}'.format(filename))

    try:
        # get the dictionary of fail and warning test thresholds from a csv file
        with open(check_level_dict_filename, 'r') as file:
            reader = csv.DictReader(file)
            check_levels = {
                row['check_id']: float(row['threshold'])
                for row in reader
            }
        print('Using test criteria loaded from {:s}'.format(
            check_level_dict_filename))
    except:
        raise PreconditionError(
            'could not find {:s}'.format(check_level_dict_filename))

    in_air_margin = 5.0 if sensor_safety_margins else 0.0
    # perform the ekf analysis
    master_status, check_status, metrics, airtime_info = analyse_ekf(
        ulog,
        check_levels,
        red_thresh=1.0,
        amb_thresh=0.5,
        min_flight_duration_seconds=5.0,
        in_air_margin_seconds=in_air_margin)

    test_results = create_results_table(check_table_filename, master_status,
                                        check_status, metrics, airtime_info)

    # write metadata to a .csv file
    with open('{:s}.mdat.csv'.format(filename), "w") as file:

        file.write("name,value,description\n")

        # loop through the test results dictionary and write each entry on a separate row, with data comma separated
        # save data in alphabetical order
        key_list = list(test_results.keys())
        key_list.sort()
        for key in key_list:
            file.write(key + "," + str(test_results[key][0]) + "," +
                       test_results[key][1] + "\n")
    print('Test results written to {:s}.mdat.csv'.format(filename))

    if plot:
        create_pdf_report(ulog, '{:s}.pdf'.format(filename))
        print('Plots saved to {:s}.pdf'.format(filename))

    return test_results
Example #15
0
def getAirspeedData(ulog: ULog) -> pd.DataFrame:

	airspeed = ulog.get_dataset("airspeed").data
	airspeed = pd.DataFrame({'timestamp': airspeed['timestamp'],
		'sensor' : 'airspeed',
		'true_as': airspeed["true_airspeed_m_s"],
		'indicated_as': airspeed["indicated_airspeed_m_s"]
		})
	return airspeed
Example #16
0
def getMagnetometerData(ulog: ULog) -> pd.DataFrame:

	vehicle_magnetometer = ulog.get_dataset("vehicle_magnetometer").data
	mag = pd.DataFrame({'timestamp': vehicle_magnetometer['timestamp'],
		'sensor' : 'mag',
		'magnetometer_ga[0]': vehicle_magnetometer["magnetometer_ga[0]"],
		'magnetometer_ga[1]': vehicle_magnetometer["magnetometer_ga[1]"],
		'magnetometer_ga[2]': vehicle_magnetometer["magnetometer_ga[2]"]})
	return mag
def test_in_air_detector_on_flight_log(testing_args):
    """
    tests the basics of the in air detector using the basic test flight log file.
    :param testing_args:
    :return:
    """
    basic_test_log_filename = os.path.join(
        testing_args['test_flight_logs_path'],
        testing_args['simple_test_flight_log'])
    ulog = ULog(basic_test_log_filename)
    original_take_offs(ulog)
Example #18
0
def getImuData(ulog: ULog) -> pd.DataFrame:

	sensor_combined = ulog.get_dataset("sensor_combined").data
	imu = pd.DataFrame({'timestamp': sensor_combined['timestamp'],
		'sensor' : 'imu',
		'accel_m_s2[0]': sensor_combined["accelerometer_m_s2[0]"],
		'accel_m_s2[1]': sensor_combined["accelerometer_m_s2[1]"],
		'accel_m_s2[2]': sensor_combined["accelerometer_m_s2[2]"],
		'gyro_rad[0]': sensor_combined["gyro_rad[0]"],
		'gyro_rad[1]': sensor_combined["gyro_rad[1]"],
		'gyro_rad[2]': sensor_combined["gyro_rad[2]"]})
	return imu
Example #19
0
def calculate_sensor_metrics(ulog: ULog,
                             sensor_checks: List[str],
                             in_air: InAirDetector,
                             in_air_no_ground_effects: InAirDetector,
                             multi_instance: int = 0,
                             red_thresh: float = 1.0,
                             amb_thresh: float = 0.5) -> Dict[str, float]:

    estimator_status_data = ulog.get_dataset('estimator_status',
                                             multi_instance).data

    sensor_metrics = dict()

    # calculates peak, mean, percentage above 0.5 std, and percentage above std metrics for
    # estimator status variables
    for signal, result_id in [('hgt_test_ratio', 'hgt'),
                              ('mag_test_ratio', 'mag'),
                              ('vel_test_ratio', 'vel'),
                              ('pos_test_ratio', 'pos'),
                              ('tas_test_ratio', 'tas'),
                              ('hagl_test_ratio', 'hagl')]:

        # only run sensor checks, if they apply.
        if result_id in sensor_checks:

            if result_id == 'mag' or result_id == 'hgt':
                in_air_detector = in_air_no_ground_effects
            else:
                in_air_detector = in_air

            # the percentage of samples above / below std dev
            sensor_metrics['{:s}_percentage_red'.format(
                result_id)] = calculate_stat_from_signal(
                    estimator_status_data, 'estimator_status', signal,
                    in_air_detector, lambda x: 100.0 * np.mean(x > red_thresh))
            sensor_metrics['{:s}_percentage_amber'.format(result_id)] = calculate_stat_from_signal(
                estimator_status_data, 'estimator_status', signal, in_air_detector,
                lambda x: 100.0 * np.mean(x > amb_thresh)) - \
                    sensor_metrics['{:s}_percentage_red'.format(result_id)]

            # the peak and mean ratio of samples above / below std dev
            peak = calculate_stat_from_signal(estimator_status_data,
                                              'estimator_status', signal,
                                              in_air_detector, np.amax)
            if peak > 0.0:
                sensor_metrics['{:s}_test_max'.format(result_id)] = peak
                sensor_metrics['{:s}_test_mean'.format(
                    result_id)] = calculate_stat_from_signal(
                        estimator_status_data, 'estimator_status', signal,
                        in_air_detector, np.mean)

    return sensor_metrics
Example #20
0
def getOpticalFlowData(ulog: ULog) -> pd.DataFrame:

	optical_flow = ulog.get_dataset("optical_flow").data
	flow = pd.DataFrame({'timestamp': optical_flow['timestamp'],
	'sensor' : 'flow',
	'pixel_flow_x_integral': optical_flow["pixel_flow_x_integral"],
	'pixel_flow_y_integral': optical_flow["pixel_flow_y_integral"],
	'gyro_x_rate_integral': optical_flow["gyro_x_rate_integral"],
	'gyro_y_rate_integral': optical_flow["gyro_y_rate_integral"],
	'gyro_z_rate_integral': optical_flow["gyro_z_rate_integral"],
	'quality': optical_flow["quality"]
		})
	return flow
Example #21
0
def getGpsData(ulog: ULog) -> pd.DataFrame:

	vehicle_gps_position = ulog.get_dataset("vehicle_gps_position").data
	gps = pd.DataFrame({'timestamp': vehicle_gps_position['timestamp'],
		'sensor' : 'gps',
		'alt': vehicle_gps_position["alt"],
		'lon': vehicle_gps_position["lon"],
		'lat': vehicle_gps_position["lat"],
		'vel_N': vehicle_gps_position["vel_n_m_s"],
		'vel_E': vehicle_gps_position["vel_e_m_s"],
		'vel_D': vehicle_gps_position["vel_d_m_s"],
		})
	return gps
Example #22
0
def check_if_field_name_exists_in_message(ulog: ULog, message: str,
                                          field_name: str) -> bool:
    """
        Check if a field is part of a message in a certain log
    """
    exists = True

    if message not in [elem.name for elem in ulog.data_list]:
        exists = False
    elif field_name not in ulog.get_dataset(message).data.keys():
        exists = False

    return exists
Example #23
0
def test_position_analyzer(testing_args):
    """

    :param testing_args:
    :return:
    """

    filename = os.path.join(testing_args['golden_flight_logs_path'],
                            testing_args['golden_flight_logs'][0])

    position_analyzer = PositionAnalyzer(ULog(filename))
    position_analyzer.set_min_ground_distance(0.2)
    assert position_analyzer.get_valid_position('sensor_combined') == [], \
        'returned valid positions were not empty'
Example #24
0
def getRangeFinderData(ulog: ULog) -> pd.DataFrame:

	range = pd.DataFrame()
	try:
		range_0 = ulog.get_dataset("distance_sensor", 0).data
		rng_0 = pd.DataFrame({'timestamp': range_0['timestamp'],
			'sensor' : 'range',
			'data': range_0["current_distance"],
			'quality': range_0["signal_quality"]
			})
		range = pd.concat([range, rng_0], ignore_index=True, sort=False)
	except:
		pass

	try:
		range_1 = ulog.get_dataset("distance_sensor", 1).data
		rng_1 = pd.DataFrame({'timestamp': range_1['timestamp'],
			'sensor' : 'range',
			'data': range_1["current_distance"],
			'quality': range_1["signal_quality"]
			})
		range = pd.concat([range, rng_1], ignore_index=True, sort=False)
	except:
		pass

	try:
		range_2 = ulog.get_dataset("distance_sensor", 2).data
		rng_2 = pd.DataFrame({'timestamp': range_2['timestamp'],
			'sensor' : 'range',
			'data': range_2["current_distance"],
			'quality': range_2["signal_quality"]
			})
		range = pd.concat([range, rng_2], ignore_index=True, sort=False)
	except:
		pass

	return range
Example #25
0
def calculate_imu_metrics(
        ulog: ULog, in_air_no_ground_effects: InAirDetector) -> dict:

    ekf2_innovation_data = ulog.get_dataset('ekf2_innovations').data

    estimator_status_data = ulog.get_dataset('estimator_status').data

    imu_metrics = dict()

    # calculates the median of the output tracking error ekf innovations
    for signal, result in [('output_tracking_error[0]', 'output_obs_ang_err_median'),
                           ('output_tracking_error[1]', 'output_obs_vel_err_median'),
                           ('output_tracking_error[2]', 'output_obs_pos_err_median')]:
        imu_metrics[result] = calculate_stat_from_signal(
            ekf2_innovation_data, 'ekf2_innovations', signal, in_air_no_ground_effects, np.median)

    # calculates peak and mean for IMU vibration checks
    for signal, result in [('vibe[0]', 'imu_coning'),
                           ('vibe[1]', 'imu_hfdang'),
                           ('vibe[2]', 'imu_hfdvel')]:
        peak = calculate_stat_from_signal(
            estimator_status_data, 'estimator_status', signal, in_air_no_ground_effects, np.amax)
        if peak > 0.0:
            imu_metrics['{:s}_peak'.format(result)] = peak
            imu_metrics['{:s}_mean'.format(result)] = calculate_stat_from_signal(
                estimator_status_data, 'estimator_status', signal,
                in_air_no_ground_effects, np.mean)

    # IMU bias checks
    imu_metrics['imu_dang_bias_median'] = np.sqrt(np.sum([np.square(calculate_stat_from_signal(
        estimator_status_data, 'estimator_status', signal, in_air_no_ground_effects, np.median))
        for signal in ['states[10]', 'states[11]', 'states[12]']]))
    imu_metrics['imu_dvel_bias_median'] = np.sqrt(np.sum([np.square(calculate_stat_from_signal(
        estimator_status_data, 'estimator_status', signal, in_air_no_ground_effects, np.median))
        for signal in ['states[13]', 'states[14]', 'states[15]']]))

    return imu_metrics
Example #26
0
def load_ulog_file(file_name):
    """ load an ULog file
    :return: ULog object
    """
    # The reason to put this method into helper is that the main module gets
    # (re)loaded on each page request. Thus the caching would not work there.

    # load only the messages we really need
    msg_filter = ['battery_status', 'estimator_status',
                  'sensor_combined', 'cpuload',
                  'vehicle_gps_position', 'vehicle_local_position',
                  'vehicle_global_position', 'vehicle_attitude', 
                  'vehicle_rates_setpoint', 
                  'vehicle_attitude_groundtruth',
                  'vehicle_local_position_groundtruth', 
                  'vehicle_status', 'airspeed', 
                  'rate_ctrl_status', 'vehicle_air_data',
                  'vehicle_magnetometer', 'system_power']

                  # has been removed , because useless:
                  #     position_setpoint_triplet
                  #     'actuator_controls_1' ,'actuator_controls_0','actuator_outputs'
                  #     distance_sensor
                  #     'vehicle_local_position_setpoint', 'vehicle_angular_velocity','vehicle_attitude_setpoint' 
                  #     'tecs_status'
                  #     'rc_channels', 'input_rc',
                  #     'manual_control_setpoint','vehicle_visual_odometry'
    try:
        ulog = ULog(file_name, msg_filter, disable_str_exceptions=False)
    except FileNotFoundError:
        print("Error: file %s not found" % file_name)
        raise

    # catch all other exceptions and turn them into an ULogException
    except Exception as error:
        traceback.print_exception(*sys.exc_info())
        raise ULogException()

    # filter messages with timestamp = 0 (these are invalid).
    # The better way is not to publish such messages in the first place, and fix
    # the code instead (it goes against the monotonicity requirement of ulog).
    # So we display the values such that the problem becomes visible.
#    for d in ulog.data_list:
#        t = d.data['timestamp']
#        non_zero_indices = t != 0
#        if not np.all(non_zero_indices):
#            d.data = np.compress(non_zero_indices, d.data, axis=0)

    return ulog
def check_if_field_name_exists_in_message(ulog: ULog, message_name: str,
                                          field_name: str) -> bool:
    """
        Check if a field is part of a message in a certain log
    """
    if field_name is None:
        return False

    msg_data = ulog.get_dataset(message_name).data
    field_name_list = dict.keys(msg_data)

    for elem in field_name_list:
        if field_name == field_name_wo_brackets(elem):
            return True

    return False
Example #28
0
def getVioData(ulog: ULog) -> pd.DataFrame:

	vehicle_visual_odometry = ulog.get_dataset("vehicle_visual_odometry").data
	vio = pd.DataFrame({'timestamp': vehicle_visual_odometry['timestamp'],
		'sensor' : 'vio',
		'x': vehicle_visual_odometry["x"],
		'y': vehicle_visual_odometry["y"],
		'z': vehicle_visual_odometry["z"],
		'qw': vehicle_visual_odometry["q[0]"],
		'qx': vehicle_visual_odometry["q[1]"],
		'qy': vehicle_visual_odometry["q[2]"],
		'qz': vehicle_visual_odometry["q[3]"],
		'vx': vehicle_visual_odometry["vx"],
		'vy': vehicle_visual_odometry["vy"],
		'vz': vehicle_visual_odometry["vz"]
		})
	return vio
def test_basics_in_air_detector(testing_args):
    """
    tests the basics of the in air detector on a dummy log file.
    :param testing_args:
    :return:
    """

    dummy_log_filename = os.path.join(testing_args['test_flight_logs_path'],
                                      testing_args['dummy_log_file'])

    ulog = ULog(dummy_log_filename)

    always_on_ground(ulog)
    always_in_air(ulog)
    start_in_air(ulog)
    take_off_at_second_time_stamp(ulog)
    multiple_take_offs(ulog)
Example #30
0
def getInputOutputData(logfile, axis, instance=0):
    log = ULog(logfile)

    y_data = get_data(log, 'vehicle_angular_velocity', 'xyz[{}]'.format(axis))
    t_y_data = ms2s(get_data(log, 'vehicle_angular_velocity', 'timestamp'))

    actuator_controls_n = 'actuator_controls_{}'.format(instance)
    u_data = get_data(log, actuator_controls_n, 'control[{}]'.format(axis))
    t_u_data = ms2s(get_data(log, actuator_controls_n, 'timestamp'))

    (t_aligned, u_aligned,
     y_aligned) = extract_identification_data(log, t_u_data, u_data, t_y_data,
                                              y_data, axis)

    t_aligned -= t_aligned[0]

    return (t_aligned, u_aligned, y_aligned)
Example #31
0
def convert_ulog2csv(ulog_file_name):
    """
    Coverts and ULog file to a CSV file.

    :param ulog_file_name: The ULog filename to open and read
    :param messages: A list of message names
    :param output: Output file path
    :param delimiter: CSV delimiter

    :return: None
    """

    msg_filter = None
    disable_str_exceptions=False
    delimiter = ','

    ulog = ULog(ulog_file_name, msg_filter, disable_str_exceptions)
    data = ulog.data_list

    for d in data:
        # use same field order as in the log, except for the timestamp
        data_keys = [f.field_name for f in d.field_data]
        data_keys.remove('timestamp')
      #  data_keys.insert(0, 'timestamp')  # we want timestamp at first position

        # we don't use np.savetxt, because we have multiple arrays with
        # potentially different data types. However the following is quite
        # slow...

        # write the header
        # print(delimiter.join(data_keys) + '\n')

        # write the data
        # last_elem = len(data_keys)-1
        # for i in range(len(d.data['timestamp'])):
        #     for k in range(len(data_keys)):
        #         csvfile.write(str(d.data[data_keys[k]][i]))
        #         if k != last_elem:
        #             csvfile.write(delimiter)
        #     csvfile.write('\n')
        print("Key =", data_keys)
        for g in data_keys:
            keylist.append(g)
#        print("Data =", d.data)
    print("Keylist =", keylist)
    print("One item", keylist[10])
Example #32
0
def calculate_sensor_metrics(
        ulog: ULog, sensor_checks: List[str], in_air: InAirDetector,
        in_air_no_ground_effects: InAirDetector, red_thresh: float = 1.0,
        amb_thresh: float = 0.5) -> Dict[str, float]:

    estimator_status_data = ulog.get_dataset('estimator_status').data

    sensor_metrics = dict()

    # calculates peak, mean, percentage above 0.5 std, and percentage above std metrics for
    # estimator status variables
    for signal, result_id in [('hgt_test_ratio', 'hgt'),
                              ('mag_test_ratio', 'mag'),
                              ('vel_test_ratio', 'vel'),
                              ('pos_test_ratio', 'pos'),
                              ('tas_test_ratio', 'tas'),
                              ('hagl_test_ratio', 'hagl')]:

        # only run sensor checks, if they apply.
        if result_id in sensor_checks:

            if result_id == 'mag' or result_id == 'hgt':
                in_air_detector = in_air_no_ground_effects
            else:
                in_air_detector = in_air

            # the percentage of samples above / below std dev
            sensor_metrics['{:s}_percentage_red'.format(result_id)] = calculate_stat_from_signal(
                estimator_status_data, 'estimator_status', signal, in_air_detector,
                lambda x: 100.0 * np.mean(x > red_thresh))
            sensor_metrics['{:s}_percentage_amber'.format(result_id)] = calculate_stat_from_signal(
                estimator_status_data, 'estimator_status', signal, in_air_detector,
                lambda x: 100.0 * np.mean(x > amb_thresh)) - \
                    sensor_metrics['{:s}_percentage_red'.format(result_id)]

            # the peak and mean ratio of samples above / below std dev
            peak = calculate_stat_from_signal(
                estimator_status_data, 'estimator_status', signal, in_air_detector, np.amax)
            if peak > 0.0:
                sensor_metrics['{:s}_test_max'.format(result_id)] = peak
                sensor_metrics['{:s}_test_mean'.format(result_id)] = calculate_stat_from_signal(
                    estimator_status_data, 'estimator_status', signal,
                    in_air_detector, np.mean)

    return sensor_metrics
Example #33
0
def info_to_df(ulog, verbose, file_name):
    """Show general information from an ULog"""

    time_s = int((ulog.last_timestamp - ulog.start_timestamp) / 1e6)
    print("time_s: ", time_s)

    file_size = os.path.getsize(
        file_log) / 1e6  # getsize give the size in bytes , converted th Mb
    log_debit = file_size / time_s  # so mb/s"
    print("file_size: " + str(file_size) + " mb \t log_debit: " +
          str(log_debit) + " mb/s")

    sd_log = ulog.initial_parameters["SDLOG_PROFILE"]
    print("sdlog_mode: ", sd_log)

    # print("")
    # print("{:<41} {:7}, {:10}".format("Name (multi id, message size in bytes)",
    #                                   "number of data points", "frequency"))

    data = []
    data_list_sorted = sorted(ulog.data_list,
                              key=lambda d: d.name + str(d.multi_id))
    for d in data_list_sorted:
        message_size = sum(
            [ULog.get_field_size(f.type_str) for f in d.field_data])
        num_data_points = len(d.data['timestamp'])
        name_id = "{:}".format(d.name)
        data.append(
            dict({
                'name': name_id,
                'mode': sd_log,
                'frequency': num_data_points / time_s,
                'mess_size': message_size,
                'file': file_name,
                'debit': log_debit
            }))
        #
        # print(" {:<40}  {:2f}".format(name_id, num_data_points / time_s))

    df = pd.DataFrame(data)
    return df
def getAllData(logfile):
    log = ULog(logfile)

    v_local = np.matrix([
        getData(log, 'vehicle_local_position', 'vx'),
        getData(log, 'vehicle_local_position', 'vy'),
        getData(log, 'vehicle_local_position', 'vz')
    ])

    t_local = ms2s(getData(log, 'vehicle_local_position', 'timestamp'))

    dist_bottom = getData(log, 'vehicle_local_position', 'dist_bottom')
    baro = getData(log, 'vehicle_air_data', 'baro_alt_meter')
    t_baro = ms2s(getData(log, 'vehicle_air_data', 'timestamp'))

    baro_bias = getData(log, 'estimator_baro_bias', 'bias')
    t_baro_bias = ms2s(getData(log, 'estimator_baro_bias', 'timestamp'))

    q = np.matrix([
        getData(log, 'vehicle_attitude', 'q[0]'),
        getData(log, 'vehicle_attitude', 'q[1]'),
        getData(log, 'vehicle_attitude', 'q[2]'),
        getData(log, 'vehicle_attitude', 'q[3]')
    ])
    t_q = ms2s(getData(log, 'vehicle_attitude', 'timestamp'))

    gnss_h = getData(log, 'vehicle_gps_position', 'alt') * 1e-3
    t_gnss = ms2s(getData(log, 'vehicle_gps_position', 'timestamp'))

    (t_aligned, v_body_aligned, baro_aligned, v_local_z_aligned,
     gnss_h_aligned, baro_bias_aligned) = alignData(t_local, v_local,
                                                    dist_bottom, t_q, q, baro,
                                                    t_baro, t_gnss, gnss_h,
                                                    t_baro_bias, baro_bias)

    t_aligned -= t_aligned[0]

    return (t_aligned, v_body_aligned, baro_aligned, v_local_z_aligned,
            gnss_h_aligned, baro_bias_aligned)
Example #35
0
def process_logdata_ekf(filename: str, plot: bool = False) -> List[dict]:
    """
    main function for processing the logdata for ekf analysis.
    :param filename:
    :param plot:
    :return:
    """
    try:
        ulog = ULog(filename)
    except Exception as e:
        raise PreconditionError('could not open {:s}'.format(filename)) from e

    test_results = analyse_logdata_ekf(ulog)

    with open('{:s}.json'.format(os.path.splitext(filename)[0]), 'w') as file:
        json.dump(test_results, file, indent=2)

    if plot:
        create_pdf_report(ulog, '{:s}.pdf'.format(filename))
        print('Plots saved to {:s}.pdf'.format(filename))

    return test_results
Example #36
0
def create_pdf_report(ulog: ULog, output_plot_filename: str) -> None:
    """
    creates a pdf report of the ekf analysis.
    :param ulog:
    :param output_plot_filename:
    :return:
    """

    # create summary plots
    # save the plots to PDF

    try:
        estimator_status = ulog.get_dataset('estimator_status').data
        print('found estimator_status data')
    except:
        raise PreconditionError('could not find estimator_status data')

    try:
        ekf2_innovations = ulog.get_dataset('ekf2_innovations').data
        print('found ekf2_innovation data')
    except:
        raise PreconditionError('could not find ekf2_innovation data')

    try:
        sensor_preflight = ulog.get_dataset('sensor_preflight').data
        print('found sensor_preflight data')
    except:
        raise PreconditionError('could not find sensor_preflight data')

    control_mode, innov_flags, gps_fail_flags = get_estimator_check_flags(estimator_status)

    status_time = 1e-6 * estimator_status['timestamp']

    b_finishes_in_air, b_starts_in_air, in_air_duration, in_air_transition_time, \
    on_ground_transition_time = detect_airtime(control_mode, status_time)

    with PdfPages(output_plot_filename) as pdf_pages:

        # plot IMU consistency data
        if ('accel_inconsistency_m_s_s' in sensor_preflight.keys()) and (
                'gyro_inconsistency_rad_s' in sensor_preflight.keys()):
            data_plot = TimeSeriesPlot(
                sensor_preflight, [['accel_inconsistency_m_s_s'], ['gyro_inconsistency_rad_s']],
                x_labels=['data index', 'data index'],
                y_labels=['acceleration (m/s/s)', 'angular rate (rad/s)'],
                plot_title='IMU Consistency Check Levels', pdf_handle=pdf_pages)
            data_plot.save()
            data_plot.close()

        # vertical velocity and position innovations
        data_plot = InnovationPlot(
            ekf2_innovations, [('vel_pos_innov[2]', 'vel_pos_innov_var[2]'),
                               ('vel_pos_innov[5]', 'vel_pos_innov_var[5]')],
            x_labels=['time (sec)', 'time (sec)'],
            y_labels=['Down Vel (m/s)', 'Down Pos (m)'], plot_title='Vertical Innovations',
            pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # horizontal velocity innovations
        data_plot = InnovationPlot(
            ekf2_innovations, [('vel_pos_innov[0]', 'vel_pos_innov_var[0]'),
                               ('vel_pos_innov[1]','vel_pos_innov_var[1]')],
            x_labels=['time (sec)', 'time (sec)'],
            y_labels=['North Vel (m/s)', 'East Vel (m/s)'],
            plot_title='Horizontal Velocity  Innovations', pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # horizontal position innovations
        data_plot = InnovationPlot(
            ekf2_innovations, [('vel_pos_innov[3]', 'vel_pos_innov_var[3]'), ('vel_pos_innov[4]',
                                                                              'vel_pos_innov_var[4]')],
            x_labels=['time (sec)', 'time (sec)'],
            y_labels=['North Pos (m)', 'East Pos (m)'], plot_title='Horizontal Position Innovations',
            pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # magnetometer innovations
        data_plot = InnovationPlot(
            ekf2_innovations, [('mag_innov[0]', 'mag_innov_var[0]'),
           ('mag_innov[1]', 'mag_innov_var[1]'), ('mag_innov[2]', 'mag_innov_var[2]')],
            x_labels=['time (sec)', 'time (sec)', 'time (sec)'],
            y_labels=['X (Gauss)', 'Y (Gauss)', 'Z (Gauss)'], plot_title='Magnetometer Innovations',
            pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # magnetic heading innovations
        data_plot = InnovationPlot(
            ekf2_innovations, [('heading_innov', 'heading_innov_var')],
            x_labels=['time (sec)'], y_labels=['Heading (rad)'],
            plot_title='Magnetic Heading Innovations', pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # air data innovations
        data_plot = InnovationPlot(
            ekf2_innovations,
            [('airspeed_innov', 'airspeed_innov_var'), ('beta_innov', 'beta_innov_var')],
            x_labels=['time (sec)', 'time (sec)'],
            y_labels=['innovation (m/sec)', 'innovation (rad)'],
            sub_titles=['True Airspeed Innovations', 'Synthetic Sideslip Innovations'],
            pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # optical flow innovations
        data_plot = InnovationPlot(
            ekf2_innovations, [('flow_innov[0]', 'flow_innov_var[0]'), ('flow_innov[1]',
                                                                        'flow_innov_var[1]')],
            x_labels=['time (sec)', 'time (sec)'],
            y_labels=['X (rad/sec)', 'Y (rad/sec)'],
            plot_title='Optical Flow Innovations', pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # plot normalised innovation test levels
        # define variables to plot
        variables = [['mag_test_ratio'], ['vel_test_ratio', 'pos_test_ratio'], ['hgt_test_ratio']]
        y_labels = ['mag', 'vel, pos', 'hgt']
        legend = [['mag'], ['vel', 'pos'], ['hgt']]
        if np.amax(estimator_status['hagl_test_ratio']) > 0.0:  # plot hagl test ratio, if applicable
            variables[-1].append('hagl_test_ratio')
            y_labels[-1] += ', hagl'
            legend[-1].append('hagl')

        if np.amax(estimator_status[
                       'tas_test_ratio']) > 0.0:  # plot airspeed sensor test ratio, if applicable
            variables.append(['tas_test_ratio'])
            y_labels.append('TAS')
            legend.append(['airspeed'])

        data_plot = CheckFlagsPlot(
            status_time, estimator_status, variables, x_label='time (sec)', y_labels=y_labels,
            plot_title='Normalised Innovation Test Levels', pdf_handle=pdf_pages, annotate=True,
            legend=legend
        )
        data_plot.save()
        data_plot.close()

        # plot control mode summary A
        data_plot = ControlModeSummaryPlot(
            status_time, control_mode, [['tilt_aligned', 'yaw_aligned'],
            ['using_gps', 'using_optflow', 'using_evpos'], ['using_barohgt', 'using_gpshgt',
             'using_rnghgt', 'using_evhgt'], ['using_magyaw', 'using_mag3d', 'using_magdecl']],
            x_label='time (sec)', y_labels=['aligned', 'pos aiding', 'hgt aiding', 'mag aiding'],
            annotation_text=[['tilt alignment', 'yaw alignment'], ['GPS aiding', 'optical flow aiding',
             'external vision aiding'], ['Baro aiding', 'GPS aiding', 'rangefinder aiding',
             'external vision aiding'], ['magnetic yaw aiding', '3D magnetoemter aiding',
             'magnetic declination aiding']], plot_title='EKF Control Status - Figure A',
            pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # plot control mode summary B
        # construct additional annotations for the airborne plot
        airborne_annotations = list()
        if np.amin(np.diff(control_mode['airborne'])) > -0.5:
            airborne_annotations.append(
                (on_ground_transition_time, 'air to ground transition not detected'))
        else:
            airborne_annotations.append((on_ground_transition_time, 'on-ground at {:.1f} sec'.format(
                on_ground_transition_time)))
        if in_air_duration > 0.0:
            airborne_annotations.append(((in_air_transition_time + on_ground_transition_time) / 2,
                                         'duration = {:.1f} sec'.format(in_air_duration)))
        if np.amax(np.diff(control_mode['airborne'])) < 0.5:
            airborne_annotations.append(
                (in_air_transition_time, 'ground to air transition not detected'))
        else:
            airborne_annotations.append(
                (in_air_transition_time, 'in-air at {:.1f} sec'.format(in_air_transition_time)))

        data_plot = ControlModeSummaryPlot(
            status_time, control_mode, [['airborne'], ['estimating_wind']],
            x_label='time (sec)', y_labels=['airborne', 'estimating wind'], annotation_text=[[], []],
            additional_annotation=[airborne_annotations, []],
            plot_title='EKF Control Status - Figure B', pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # plot innovation_check_flags summary
        data_plot = CheckFlagsPlot(
            status_time, innov_flags, [['vel_innov_fail', 'posh_innov_fail'], ['posv_innov_fail',
                                                                               'hagl_innov_fail'],
                                       ['magx_innov_fail', 'magy_innov_fail', 'magz_innov_fail',
                                        'yaw_innov_fail'], ['tas_innov_fail'], ['sli_innov_fail'],
                                       ['ofx_innov_fail',
                                        'ofy_innov_fail']], x_label='time (sec)',
            y_labels=['failed', 'failed', 'failed', 'failed', 'failed', 'failed'],
            y_lim=(-0.1, 1.1),
            legend=[['vel NED', 'pos NE'], ['hgt absolute', 'hgt above ground'],
                    ['mag_x', 'mag_y', 'mag_z', 'yaw'], ['airspeed'], ['sideslip'],
                    ['flow X', 'flow Y']],
            plot_title='EKF Innovation Test Fails', annotate=False, pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # gps_check_fail_flags summary
        data_plot = CheckFlagsPlot(
            status_time, gps_fail_flags,
            [['nsat_fail', 'gdop_fail', 'herr_fail', 'verr_fail', 'gfix_fail', 'serr_fail'],
             ['hdrift_fail', 'vdrift_fail', 'hspd_fail', 'veld_diff_fail']],
            x_label='time (sec)', y_lim=(-0.1, 1.1), y_labels=['failed', 'failed'],
            sub_titles=['GPS Direct Output Check Failures', 'GPS Derived Output Check Failures'],
            legend=[['N sats', 'GDOP', 'horiz pos error', 'vert pos error', 'fix type',
                     'speed error'], ['horiz drift', 'vert drift', 'horiz speed',
                                      'vert vel inconsistent']], annotate=False, pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # filter reported accuracy
        data_plot = CheckFlagsPlot(
            status_time, estimator_status, [['pos_horiz_accuracy', 'pos_vert_accuracy']],
            x_label='time (sec)', y_labels=['accuracy (m)'], plot_title='Reported Accuracy',
            legend=[['horizontal', 'vertical']], annotate=False, pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # Plot the EKF IMU vibration metrics
        scaled_estimator_status = {'vibe[0]': 1000. * estimator_status['vibe[0]'],
                                   'vibe[1]': 1000. * estimator_status['vibe[1]'],
                                   'vibe[2]': estimator_status['vibe[2]']
                                   }
        data_plot = CheckFlagsPlot(
            status_time, scaled_estimator_status, [['vibe[0]'], ['vibe[1]'], ['vibe[2]']],
            x_label='time (sec)', y_labels=['Del Ang Coning (mrad)', 'HF Del Ang (mrad)',
                                            'HF Del Vel (m/s)'], plot_title='IMU Vibration Metrics',
            pdf_handle=pdf_pages, annotate=True)
        data_plot.save()
        data_plot.close()

        # Plot the EKF output observer tracking errors
        scaled_innovations = {
            'output_tracking_error[0]': 1000. * ekf2_innovations['output_tracking_error[0]'],
            'output_tracking_error[1]': ekf2_innovations['output_tracking_error[1]'],
            'output_tracking_error[2]': ekf2_innovations['output_tracking_error[2]']
            }
        data_plot = CheckFlagsPlot(
            1e-6 * ekf2_innovations['timestamp'], scaled_innovations,
            [['output_tracking_error[0]'], ['output_tracking_error[1]'],
             ['output_tracking_error[2]']], x_label='time (sec)',
            y_labels=['angles (mrad)', 'velocity (m/s)', 'position (m)'],
            plot_title='Output Observer Tracking Error Magnitudes',
            pdf_handle=pdf_pages, annotate=True)
        data_plot.save()
        data_plot.close()

        # Plot the delta angle bias estimates
        data_plot = CheckFlagsPlot(
            1e-6 * estimator_status['timestamp'], estimator_status,
            [['states[10]'], ['states[11]'], ['states[12]']],
            x_label='time (sec)', y_labels=['X (rad)', 'Y (rad)', 'Z (rad)'],
            plot_title='Delta Angle Bias Estimates', annotate=False, pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # Plot the delta velocity bias estimates
        data_plot = CheckFlagsPlot(
            1e-6 * estimator_status['timestamp'], estimator_status,
            [['states[13]'], ['states[14]'], ['states[15]']],
            x_label='time (sec)', y_labels=['X (m/s)', 'Y (m/s)', 'Z (m/s)'],
            plot_title='Delta Velocity Bias Estimates', annotate=False, pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # Plot the earth frame magnetic field estimates
        declination, field_strength, inclination = magnetic_field_estimates_from_status(
            estimator_status)
        data_plot = CheckFlagsPlot(
            1e-6 * estimator_status['timestamp'],
            {'strength': field_strength, 'declination': declination, 'inclination': inclination},
            [['declination'], ['inclination'], ['strength']],
            x_label='time (sec)', y_labels=['declination (deg)', 'inclination (deg)',
                                            'strength (Gauss)'],
            plot_title='Earth Magnetic Field Estimates', annotate=False,
            pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # Plot the body frame magnetic field estimates
        data_plot = CheckFlagsPlot(
            1e-6 * estimator_status['timestamp'], estimator_status,
            [['states[19]'], ['states[20]'], ['states[21]']],
            x_label='time (sec)', y_labels=['X (Gauss)', 'Y (Gauss)', 'Z (Gauss)'],
            plot_title='Magnetometer Bias Estimates', annotate=False, pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()

        # Plot the EKF wind estimates
        data_plot = CheckFlagsPlot(
            1e-6 * estimator_status['timestamp'], estimator_status,
            [['states[22]'], ['states[23]']], x_label='time (sec)',
            y_labels=['North (m/s)', 'East (m/s)'], plot_title='Wind Velocity Estimates',
            annotate=False, pdf_handle=pdf_pages)
        data_plot.save()
        data_plot.close()