Exemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser(description='Estimate orientation offset between body and cam on gimbal')
    parser.add_argument('--res', required=True, help='path to result pickle')
    parser.add_argument('--skip', default=0, type=int, help='skip first x frames')
    parser.add_argument('--huber-coef', default=0, type=float, help='use pseudo huber loss')
    parser.add_argument('--nadir-looking', action='store_true', help='better plot for nadir looking cam')
    parser.add_argument('--init-ypr', default=[0.0, 0.0, 0.0], type=float, nargs=3)
    args = parser.parse_args()

    with open(args.res, 'rb') as fh:
        results, map3d, frame_names, meta_names, ground_truth, *_ = pickle.load(fh)

    # (kf.pose, kf.measure, kf.time, kf.id)
    meas_q = np.array([tools.ypr_to_q(*np.flip(meas.data[3:6])) for pose, meas, _, _ in results if meas and pose.post])
    est_q = np.array([(-pose.post).to_global(NokiaSensor.b2c).quat for pose, meas, _, _ in results if meas and pose.post])

    meas_q = meas_q[args.skip:]
    est_q = est_q[args.skip:]

    print('optimizing orientation offset based on %d measurements...' % (len(meas_q),))

    def costfun(x, meas_q, est_q):
        off_q = np.quaternion(*x).normalized()
        delta_angles = tools.angle_between_q_arr(est_q, meas_q * off_q)
        return delta_angles

    x0 = tools.ypr_to_q(*(np.array(args.init_ypr)/180*np.pi)).components
    res = least_squares(costfun, x0, verbose=2, ftol=1e-5, xtol=1e-5, gtol=1e-8, max_nfev=1000,
                        loss='huber' if args.huber_coef > 0 else 'linear', f_scale=args.huber_coef or 1.0,  #huber_coef,
                        args=(meas_q, est_q))

    off_q = np.quaternion(*res.x).normalized() if 1 else quaternion.one

    print('offset q: %s, ypr: %s' % (off_q, np.array(tools.q_to_ypr(off_q)) / np.pi * 180,))

    args.nadir_looking = False  # TODO: remove override
    nl_dq = tools.eul_to_q((-np.pi / 2,), 'y') if args.nadir_looking else quaternion.one
    est_ypr = np.array([tools.q_to_ypr(nl_dq.conj() * q) for q in est_q]) / np.pi * 180
    corr_ypr = np.array([tools.q_to_ypr(nl_dq.conj() * q * off_q) for q in meas_q]) / np.pi * 180

    plt.plot(est_ypr[:, 0], 'C0:')
    plt.plot(est_ypr[:, 1], 'C1:')
    plt.plot(est_ypr[:, 2], 'C2:')
    plt.plot(corr_ypr[:, 0], 'C0-')
    plt.plot(corr_ypr[:, 1], 'C1-')
    plt.plot(corr_ypr[:, 2], 'C2-')
    plt.show()
Exemplo n.º 2
0
    def initialize_frame(self, time, image, measure):
        nf = super(VisualGPSNav, self).initialize_frame(time, image, measure)
        if nf.measure:
            old_kf = [kf for kf in self.state.keyframes if kf.measure]
            nf.measure.time_adj = old_kf[-1].measure.time_adj if len(old_kf) > 0 else 0

            geodetic_origin = self.geodetic_origin
            lat, lon, alt = nf.measure.data[0:3]
            roll, pitch, yaw = nf.measure.data[3:6]
            b2c_roll, b2c_pitch, b2c_yaw = nf.measure.data[6:9]

            # world frame: +z up, +x is east, +y is north
            # body frame: +z down, +x is fw towards north, +y is right wing (east)
            # camera frame: +z into the image plane, -y is up, +x is right

            # wf2bf_q = tools.eul_to_q((np.pi, -np.pi / 2), 'xz')
            # bf2cf = Pose(None, tools.eul_to_q((np.pi / 2, np.pi / 2), 'yz'))

            if 1:
                w2b_bf_q = tools.ypr_to_q(yaw, pitch, roll)
                b2c_bf_q = tools.ypr_to_q(b2c_yaw, b2c_pitch, b2c_roll)
                yaw, pitch, roll = tools.q_to_ypr(w2b_bf_q * b2c_bf_q * (self.ori_off_q or quaternion.one))
                cf_w2c = tools.lla_ypr_to_loc_quat(geodetic_origin, [lat, lon, alt], [yaw, pitch, roll], b2c=self.bf2cf)
                if len(old_kf) > 0:
                    cf_w2c.vel = (cf_w2c.loc - (-old_kf[-1].pose.prior).loc) / (nf.time - old_kf[-1].time).total_seconds()
                else:
                    cf_w2c.vel = np.array([0, 0, 0])
                nf.pose.prior = -cf_w2c
            else:
                wf_body_r = tools.to_cartesian(lat, lon, alt, *geodetic_origin)
                bf_world_body_q = tools.ypr_to_q(yaw, pitch, roll)
                if 0:
                    cf_world_q = bf_cam_q.conj() * bf_world_body_q.conj()

                    cf_body_r = tools.q_times_v(bf_cam_q.conj(), -bf_cam_r)
                    cf_body_world_r = tools.q_times_v(cf_world_q * self.wf2bf_q.conj(), -wf_body_r)

                    nf.pose.prior = Pose(cf_body_r + cf_body_world_r, cf_world_q * bf_cam_q)
                else:
                    bf_world_q = bf_world_body_q.conj() * self.wf2bf_q.conj()
                    cf_world_q = bf_cam_q.conj() * bf_world_q

                    cf_body_r = tools.q_times_v(bf_cam_q.conj(), -bf_cam_r)
                    cf_body_world_r = tools.q_times_v(cf_world_q, -wf_body_r)
                    # bf_cam_q.conj() * bf_world_body_q.conj() * self.wf2bf_q.conj()

                    nf.pose.prior = Pose(cf_body_r + cf_body_world_r, bf_cam_q.conj() * bf_world_body_q.conj() * bf_cam_q)
                    # bf_cam_q.conj() * bf_world_body_q.conj() * bf_cam_q

                    # (NokiaSensor.w2b_q * NokiaSensor.b2c_q) * prior.quat.conj()
                    # self.wf2bf_q * bf_cam_q * (bf_cam_q.conj() * bf_world_body_q.conj() * bf_cam_q).conj()
                    # => self.wf2bf_q * bf_cam_q //*// bf_cam_q.conj() * bf_world_body_q * bf_cam_q //*//
                    # self.wf2bf_q * bf_cam_q //*// bf_cam_q.conj() * bf_world_body_q * bf_cam_q //*// bf_cam_q.conj() * bf_world_body_q.conj() * self.wf2bf_q.conj()

        return nf
Exemplo n.º 3
0
def render_itokawa(show=False):
    cam = Camera(4096,
                 4096,
                 aperture=1.5,
                 focal_length=120.8,
                 sensor_size=[12.288] * 2,
                 px_saturation_e=30e3)
    shape_model_file = [
        r'C:\projects\sispo\data\models\itokawa_16k.obj',
        r'C:\projects\sispo\data\models\itokawa_f3145728.obj',
    ][0]

    RenderEngine.REFLMOD_PARAMS[RenderEngine.REFLMOD_HAPKE] = [
        553.38,  # J         0
        26,  # th_p      26
        0.31,  # w         0.31
        -0.35,  # b         -0.35
        0,  # c         0
        0.86,  # B_SH0     0.86
        0.00021,  # hs        0.00021
        0,  # B_CB0     0
        0.005,  # hc        0.005
        1,  # K         1
    ]

    re = RenderEngine(cam.width, cam.height,
                      antialias_samples=0)  #, enable_extra_data=True)
    re.set_frustum(cam.x_fov, cam.y_fov, 0.05, 12)
    obj_idx = re.load_object(shape_model_file)

    if 1:
        g_sc_q = tools.angleaxis_to_q(
            [1.847799, -0.929873, 0.266931, -0.253146])
        #x, y, z = -13.182141, -64.694813, 116.263134
        x, y, z = 93.818, -8.695, 1.263
        g_ast_q = get_ast_q(x, y, z)
        g_sol_ast_v = np.array([146226194732.0, -68812326932.0, -477863381.0
                                ]) * 1e-3
        g_sol_sc_v = np.array([146226188132.0, -68812322442.0, -477863711.0
                               ]) * 1e-3
        g_sc_ast_v = g_sol_ast_v - g_sol_sc_v
        l_ast_sc_v = tools.q_times_v(
            SystemModel.sc2gl_q.conj() * g_sc_q.conj(), g_sc_ast_v)
        l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
        ) * g_ast_q * SystemModel.sc2gl_q
        l_light_v = tools.q_times_v(SystemModel.sc2gl_q.conj() * g_sc_q.conj(),
                                    g_sol_ast_v / np.linalg.norm(g_sol_ast_v))
    else:
        l_ast_sc_v = np.array([0, 0, -7.990 * 1])
        l_ast_q = tools.angleaxis_to_q((math.radians(20), 0, 1, 0))
        l_light_v = np.array([1, 0, -1]) / math.sqrt(2)

    sc_mode = 0
    a, b, c = [0] * 3
    ds, dq = 0.135, tools.ypr_to_q(math.radians(1.154), 0,
                                   math.radians(-5.643))
    #    ds, dq = 0.535, quaternion.one     # itokawa centered

    while True:
        img = re.render(obj_idx,
                        tools.q_times_v(dq, l_ast_sc_v * ds),
                        l_ast_q,
                        l_light_v,
                        flux_density=1.0,
                        gamma=1.0,
                        get_depth=False,
                        shadows=True,
                        textures=True,
                        reflection=RenderEngine.REFLMOD_HAPKE)

        #        data = re.render_extra_data(obj_idx, tools.q_times_v(dq, l_ast_sc_v*ds), l_ast_q, l_light_v)
        #        import matplotlib.pyplot as plt
        #        plt.imshow(data[:, :, 0])

        k = output(img, show, maxval=0.90)

        if k is None or k == 27:
            break

        tmp = 1 if k in (ord('a'), ord('s'), ord('q')) else -1
        if k in (ord('a'), ord('d')):
            if sc_mode:
                dq = tools.ypr_to_q(math.radians(tmp * 0.033), 0, 0) * dq
            else:
                b += tmp
        if k in (ord('w'), ord('s')):
            if sc_mode:
                dq = tools.ypr_to_q(0, 0, math.radians(tmp * 0.033)) * dq
            else:
                a += tmp
        if k in (ord('q'), ord('e')):
            if sc_mode:
                ds *= 0.9**tmp
            else:
                c += tmp
        if k == ord('i'):
            y, p, r = tools.q_to_ypr(dq)
            print('+c: %.3f, %.3f, %.3f' % (x + a, y + b, z + c))
            print('ds, h, v: %.3f, %.3f, %.3f' %
                  (ds, math.degrees(y), math.degrees(r)))

        g_ast_q = get_ast_q(x + a, y + b, z + c)
        l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
        ) * g_ast_q * SystemModel.sc2gl_q
Exemplo n.º 4
0
def traj_costfun(x,
                 width,
                 height,
                 pp_x,
                 pp_y,
                 poses,
                 pts3d,
                 pts2d,
                 cam_idxs,
                 pt3d_idxs,
                 meas_r,
                 meas_aa,
                 meas_idxs,
                 obs_kp,
                 ground_alt,
                 error_types,
                 def_fl=None,
                 def_dist=None,
                 plot=False):
    if def_fl is None and def_dist is None:
        fl, dist_coefs = abs(x[0]), x[1:]
    elif def_dist is None:
        fl, dist_coefs = def_fl, x
    else:
        fl, dist_coefs = abs(x[0]), def_dist

    # run ba
    new_poses, new_pts3d, _, _, ba_errs = run_ba(width, height, pp_x, pp_y, fl,
                                                 dist_coefs, poses, pts3d,
                                                 pts2d, cam_idxs, pt3d_idxs,
                                                 meas_r, meas_aa, meas_idxs)

    errs = []
    labels = []

    if ERROR_TYPE_REPROJECTION in error_types:
        err_repr = ERROR_COEFS[ERROR_TYPE_REPROJECTION] * np.nanmean(
            ba_errs[:, 0])
        errs.append(err_repr)
        labels.append('repr')

    if ERROR_TYPE_LOCATION in error_types:
        err_repr = ERROR_COEFS[ERROR_TYPE_LOCATION] * np.nanmean(
            np.linalg.norm(ba_errs[:, 1:4], axis=1))
        errs.append(err_repr)
        labels.append('loc')

    if {ERROR_TYPE_ALTITUDE, ERROR_TYPE_CURVATURE,
            ERROR_TYPE_DISPERSION}.intersection(error_types):
        new_pts3d = new_pts3d[obs_kp, :]

        # fit a plane, based on https://math.stackexchange.com/questions/99299/best-fitting-plane-given-a-set-of-points
        centroid = np.median(new_pts3d.T, axis=1, keepdims=True)
        svd = np.linalg.svd(new_pts3d.T - centroid)
        normal = svd[0][:, -1:].T

        # fit a parabolic surface to keypoints
        x0 = 0, *centroid.flatten(), *normal.flatten()
        res = least_squares(paraboloid_costfun,
                            x0,
                            verbose=0,
                            ftol=1e-5,
                            xtol=1e-5,
                            gtol=1e-8,
                            max_nfev=1000,
                            loss='huber',
                            f_scale=1.0,
                            args=(new_pts3d, ))

        # extract a plane that corresponds to the fitted parabolic surface (should be more accurate than the median based)
        c, x0, y0, z0, xn, yn, zn = res.x

        if ERROR_TYPE_CURVATURE in error_types:
            errs.append(ERROR_COEFS[ERROR_TYPE_CURVATURE] * c)
            labels.append('curv')

        if ERROR_TYPE_DISPERSION in error_types:
            displ = np.abs(res.fun)
            lim = np.quantile(
                displ,
                0.8)  # how well the keypoints lie on the estimated surface
            err_disp = np.mean(displ[displ < lim])
            errs.append(ERROR_COEFS[ERROR_TYPE_DISPERSION] * err_disp)
            labels.append('disp')

        if ERROR_TYPE_ALTITUDE in error_types:
            centroid = np.array([x0, y0, z0]).reshape((1, 3))
            normal = np.array([xn, yn, zn]).reshape((1, 3))
            normal /= np.linalg.norm(normal)

            pose_cf = new_poses[meas_idxs[-1], :]
            loc_wf = (
                -Pose(pose_cf[3:], tools.angleaxis_to_q(pose_cf[:3]))).loc

            is_takeoff = ERROR_TYPE_CURVATURE in error_types
            end_meas_alt = -meas_r[-1 if is_takeoff else 0,
                                   2] - ground_alt  # neg z-axis is altitude
            if 1:
                end_distance = np.abs(
                    normal.dot(loc_wf[:, None] - centroid.flatten()[:, None]))
                err_alt = (end_meas_alt - end_distance)[0, 0]
            else:
                err_alt = np.quantile(-new_pts3d[:, 2], 0.2) - ground_alt
            errs.append(ERROR_COEFS[ERROR_TYPE_ALTITUDE] * err_alt /
                        end_meas_alt)
            labels.append('alt')

    if ERROR_TYPE_PITCH in error_types:
        ypr = np.array(
            [tools.q_to_ypr(tools.angleaxis_to_q(p[:3]))
             for p in new_poses]) / np.pi * 180
        err_pitch = ypr[-1, 1] - ypr[0, 1]
        errs.append(ERROR_COEFS[ERROR_TYPE_PITCH] * err_pitch)
        labels.append('pitch')

    print('=== fl: %s, dist: %s, cost: %s ===' %
          (fl / 0.5, dist_coefs, dict(zip(labels, errs))))

    if plot:
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D

        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.plot(new_pts3d[:, 0], new_pts3d[:, 1], new_pts3d[:, 2], 'C0.')
        plt.show()

    return np.array(errs)
Exemplo n.º 5
0
                                              datetime.fromtimestamp(t0 + t),
                                              prior, quaternion.one)

        if res is not None:
            tq = res.quat * SystemModel.cv2gl_q
            est_q = tq.conj() * res.quat.conj() * tq
            err_q = ast_q.conj() * est_q
            err_angle = tools.angle_between_q(ast_q, est_q)
            est_v = -tools.q_times_v(tq.conj(), res.loc)
            err_v = est_v - ast_v

            #print('\n')
            # print('rea ypr: %s' % ' '.join('%.1fdeg' % math.degrees(a) for a in tools.q_to_ypr(cam_q)))
            # print('est ypr: %s' % ' '.join('%.1fdeg' % math.degrees(a) for a in tools.q_to_ypr(res_q)))
            print('rea ypr: %s' % ' '.join('%.1fdeg' % math.degrees(a)
                                           for a in tools.q_to_ypr(ast_q)))
            print('est ypr: %s' % ' '.join('%.1fdeg' % math.degrees(a)
                                           for a in tools.q_to_ypr(est_q)))
            # print('err ypr: %s' % ' '.join('%.2fdeg' % math.degrees(a) for a in tools.q_to_ypr(err_q)))
            print('err angle: %.2fdeg' % math.degrees(err_angle))
            # print('rea v: %s' % ' '.join('%.1fm' % a for a in cam_v*1000))
            # print('est v: %s' % ' '.join('%.1fm' % a for a in res_v*1000))
            print('rea v: %s' % ' '.join('%.1fm' % a for a in ast_v * 1000))
            print('est v: %s' % ' '.join('%.1fm' % a for a in est_v * 1000))
            # print('err v: %s' % ' '.join('%.2fm' % a for a in err_v*1000))
            print('err norm: %.2fm\n' % np.linalg.norm(err_v * 1000))

            if False and len(odo.state.map3d) > 0:
                pts3d = tools.q_times_mx(SystemModel.cv2gl_q.conj(),
                                         odo.get_3d_map_pts())
                tools.plot_vectors(pts3d)
Exemplo n.º 6
0
def plot_results(keyframes=None,
                 map3d=None,
                 frame_names=None,
                 meta_names=None,
                 ground_truth=None,
                 ba_errs=None,
                 file='result.pickle',
                 nadir_looking=False):

    import matplotlib.pyplot as plt
    from matplotlib import cm
    from mpl_toolkits.mplot3d import Axes3D

    # TODO: remove override
    nadir_looking = False

    if keyframes is None:
        with open(file, 'rb') as fh:
            keyframes, map3d, frame_names, meta_names, ground_truth, *ba_errs = pickle.load(
                fh)
            ba_errs = ba_errs[0] if len(ba_errs) else None

    w2b, b2c = NokiaSensor.w2b, NokiaSensor.b2c
    nl_dq = tools.eul_to_q(
        (-np.pi / 2, ), 'y') if nadir_looking else quaternion.one

    est_loc = np.ones((len(keyframes), 3)) * np.nan
    est_ori = np.ones((len(keyframes), 3)) * np.nan
    meas_loc = np.ones((len(keyframes), 3)) * np.nan
    meas_ori = np.ones((len(keyframes), 3)) * np.nan
    for i, kf in enumerate(keyframes):
        if kf and kf['pose'] and kf['pose'].post:
            if kf['pose'].method == VisualOdometry.POSE_2D3D:
                est_loc[i, :] = ((
                    -kf['pose'].post).to_global(b2c).to_global(w2b)).loc
                meas_loc[i, :] = ((
                    -kf['pose'].prior).to_global(b2c).to_global(w2b)).loc
                est_ori[i, :] = tools.q_to_ypr(
                    nl_dq.conj() * ((-kf['pose'].post).to_global(b2c)).quat)
                meas_ori[i, :] = tools.q_to_ypr(
                    nl_dq.conj() * ((-kf['pose'].prior).to_global(b2c)).quat)

    est_ori = est_ori / np.pi * 180
    meas_ori = meas_ori / np.pi * 180

    if nadir_looking:
        # TODO: better way, now somehow works heuristically
        est_ori = est_ori[:, (2, 0, 1)]
        meas_ori = meas_ori[:, (2, 0, 1)]

    fst = np.where(np.logical_not(np.isnan(est_loc[:, 0])))[0][0]
    idx = np.where([kf['pose'] is not None for kf in keyframes])[0]
    idx2 = np.where([
        kf['pose'] is not None and kf['meas'] is not None for kf in keyframes
    ])[0]
    t0 = keyframes[idx[0]]['time'].timestamp()

    t = np.array([keyframes[i]['time'].timestamp() - t0 for i in idx])
    t2 = np.array([
        keyframes[i]['time'].timestamp() - t0 + keyframes[i]['meas'].time_off +
        keyframes[i]['meas'].time_adj for i in idx2
    ])
    dt = np.array([keyframes[i]['meas'].time_adj for i in idx2])

    rng = np.nanmax(est_loc[idx, :2], axis=0) - np.nanmin(est_loc[idx, :2],
                                                          axis=0)
    rng2 = 0 if len(idx2) == 0 else np.nanmax(
        meas_loc[idx2, :2], axis=0) - np.nanmin(meas_loc[idx2, :2], axis=0)
    mrg = 0.05 * max(np.max(rng), np.max(rng2))
    min_x = min(np.nanmin(est_loc[idx, 0]),
                9e9 if len(idx2) == 0 else np.nanmin(meas_loc[idx2, 0])) - mrg
    max_x = max(np.nanmax(est_loc[idx, 0]),
                -9e9 if len(idx2) == 0 else np.nanmax(meas_loc[idx2, 0])) + mrg
    min_y = min(np.nanmin(est_loc[idx, 1]),
                9e9 if len(idx2) == 0 else np.nanmin(meas_loc[idx2, 1])) - mrg
    max_y = max(np.nanmax(est_loc[idx, 1]),
                -9e9 if len(idx2) == 0 else np.nanmax(meas_loc[idx2, 1])) + mrg
    min_z = min(np.nanmin(est_loc[idx, 2]),
                9e9 if len(idx2) == 0 else np.nanmin(meas_loc[idx2, 2])) - mrg
    max_z = max(np.nanmax(est_loc[idx, 2]),
                -9e9 if len(idx2) == 0 else np.nanmax(meas_loc[idx2, 2])) + mrg

    xx, yy = np.meshgrid(np.linspace(min_x, max_x, 10),
                         np.linspace(min_y, max_y, 10))
    zz = np.ones(xx.shape) * min(min_z + mrg / 2, 0)
    min_z = min(0, min_z)

    fig = plt.figure(1)
    ax = fig.add_subplot(111, projection='3d')
    ax.plot_surface(xx, yy, zz, zorder=2)
    line = ax.plot(est_loc[idx, 0],
                   est_loc[idx, 1],
                   est_loc[idx, 2],
                   'C0',
                   zorder=3)
    ax.scatter(est_loc[fst, 0],
               est_loc[fst, 1],
               est_loc[fst, 2],
               'C2',
               zorder=4)
    line2 = ax.plot(meas_loc[idx2, 0],
                    meas_loc[idx2, 1],
                    meas_loc[idx2, 2],
                    'C1',
                    zorder=5)

    ax.set_xlim(min_x, max_x)
    ax.set_ylim(min_y, max_y)
    ax.set_zlim(min_z, max_z)
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')
    tools.hover_annotate(fig, ax, line[0], frame_names)
    tools.hover_annotate(fig, ax, line2[0], [meta_names[i] for i in idx2])

    if 1:
        fig, axs = plt.subplots(1, 1)
        axs = [axs]
        axs[0].set_aspect('equal')
        rng_x = max_x - min_x
        rng_y = max_y - min_y

        if True or rng_x > rng_y:
            axs[0].plot(est_loc[fst, 0], est_loc[fst, 1], 'oC0', mfc='none')
            line = axs[0].plot(est_loc[idx, 0], est_loc[idx, 1],
                               'C0')  # , '+-')
            line2 = axs[0].plot(meas_loc[idx2, 0], meas_loc[idx2, 1],
                                'C1')  # , '+-')
            axs[0].set_xlim(min_x, max_x)
            axs[0].set_ylim(min_y, max_y)
            axs[0].set_xlabel('x')
            axs[0].set_ylabel('y')
        else:
            axs[0].plot(est_loc[fst, 1], est_loc[fst, 0], 'oC0', mfc='none')
            line = axs[0].plot(est_loc[idx, 1], est_loc[idx, 0],
                               'C0')  # , '+-')
            line2 = axs[0].plot(meas_loc[idx2, 1], meas_loc[idx2, 0],
                                'C1')  # , '+-')
            axs[0].set_xlim(min_y, max_y)
            axs[0].set_ylim(min_x, max_x)
            axs[0].set_xlabel('y')
            axs[0].set_ylabel('x')

        tools.hover_annotate(fig, axs[0], line[0], frame_names)
        tools.hover_annotate(fig, axs[0], line2[0],
                             [meta_names[i] for i in idx2])

        fig, axs = plt.subplots(2, 1, sharex=True)
        axs[0].plot(t[fst], est_loc[fst, 2], 'oC0', mfc='none')
        line = axs[0].plot(t, est_loc[idx, 2], 'C0')  # , '+-')
        line2 = axs[0].plot(t2, meas_loc[idx2, 2], 'C1')  # , '+-')
        tools.hover_annotate(fig, axs[0], line[0], frame_names)
        tools.hover_annotate(fig, axs[0], line2[0],
                             [meta_names[i] for i in idx2])
        axs[0].set_ylabel('z')
        axs[0].set_xlabel('t')

        if ba_errs is None:
            line = axs[1].plot(t2, dt, 'C0')  #, '+-')
            tools.hover_annotate(fig, axs[1], line[0],
                                 [meta_names[i] for i in idx2])
            axs[1].set_ylabel('dt')
        else:
            id2idx = {kf['id']: i for i, kf in enumerate(keyframes)}
            t3 = [
                keyframes[id2idx[int(id)]]['time'].timestamp() - t0
                for id in ba_errs[:, 0]
            ]

            axs[1].plot(t3, ba_errs[:, 1], 'C0', label='repr [px]')
            axs[1].plot(t3, ba_errs[:, 2], 'C1', label='loc [m]')
            axs[1].plot(t3, ba_errs[:, 3], 'C2', label='ori [deg]')
            axs[1].set_title('BA errors')
            axs[1].legend()
        axs[1].set_xlabel('t')

        fig, axs = plt.subplots(3, 1, sharex=True)
        for i, title in enumerate(('yaw', 'pitch', 'roll')):
            axs[i].plot(t[fst], est_ori[fst, i], 'oC0', mfc='none')
            line = axs[i].plot(t, est_ori[idx, i], 'C0')  # , '+-')
            line2 = axs[i].plot(t2, meas_ori[idx2, i], 'C1')  # , '+-')
            tools.hover_annotate(fig, axs[i], line[0], frame_names)
            tools.hover_annotate(fig, axs[i], line2[0],
                                 [meta_names[j] for j in idx2])
            axs[i].set_ylabel(title)
            axs[i].set_xlabel('t')

    plt.tight_layout()

    if map3d is not None and len(map3d) > 0:
        x, y, z = np.array(
            [tools.q_times_v(w2b.quat * b2c.quat, pt.pt3d) for pt in map3d]).T

        if 1:
            fig, ax = plt.subplots(1, 1)
            ax.set_aspect('equal')
            ax.set_xlabel("east", fontsize=12)
            ax.set_ylabel("north", fontsize=12)
            line = ax.scatter(x,
                              y,
                              s=20,
                              c=z,
                              marker='o',
                              vmin=-5.,
                              vmax=100.,
                              cmap=cm.get_cmap('jet'))  #, vmax=20.)
            fig.colorbar(line)
            tools.hover_annotate(fig, ax, line, ['%.1f' % v for v in z])
        else:
            fig = plt.figure()
            ax = fig.add_subplot(111, projection='3d')
            ax.plot(x, y, z, '.')
            ax.set_xlabel("east", fontsize=12)
            ax.set_ylabel("north", fontsize=12)
            ax.set_zlabel("alt", fontsize=12)

    plt.show()
    print('ok: %.1f%%, delta loc std: %.3e' % (
        100 * (1 - np.mean(np.isnan(est_loc[:, 0]))),
        np.nanstd(np.linalg.norm(np.diff(est_loc[:, :3], axis=0), axis=1)),
    ))
Exemplo n.º 7
0
def main():
    # parse arguments
    parser = argparse.ArgumentParser(
        description='Run visual odometry on a set of images')
    parser.add_argument('--data', '-d', metavar='DATA', help='path to data')
    parser.add_argument('--meta',
                        '-t',
                        metavar='META',
                        help='path to meta data')
    parser.add_argument('--video-toff',
                        '--dt',
                        type=float,
                        metavar='dT',
                        help='video time offset compared to metadata')
    parser.add_argument('--alt-scale',
                        '--as',
                        type=float,
                        default=1.0,
                        metavar='sc',
                        help='scale telemetry altitude with this value')
    parser.add_argument('--res',
                        '-r',
                        metavar='OUT',
                        help='path to the result pickle')
    parser.add_argument('--debug-out',
                        '-o',
                        metavar='OUT',
                        help='path to the debug output folder')
    parser.add_argument('--kapture',
                        metavar='OUT',
                        help='path to kapture-format export folder')
    parser.add_argument(
        '--verbosity',
        '-v',
        type=int,
        default=2,
        help=
        'verbosity level (0-4, 0-1: text only, 2:+debug imgs, 3: +keypoints, 4: +poses)'
    )
    parser.add_argument(
        '--high-quality',
        action='store_true',
        help='high quality settings with more keypoints detected')

    parser.add_argument('--mission',
                        '-mr',
                        choices=('hwproto', 'nokia', 'toynokia'),
                        help='select mission')
    parser.add_argument('--undist-img',
                        action='store_true',
                        help='undistort image instead of keypoints')
    parser.add_argument('--use-gimbal',
                        action='store_true',
                        help='gimbal data is ok, use it')
    parser.add_argument(
        '--drifting-gimbal',
        action='store_true',
        help=
        'gimbal orientation measure drifts, update orientation offset with each odometry result'
    )
    parser.add_argument('--nadir-looking',
                        action='store_true',
                        help='downwards looking cam')

    parser.add_argument(
        '--ori-off-ypr',
        type=float,
        nargs=3,
        help=
        'orientation offset in ypr (deg) to be applied in body frame to orientation measurements'
    )

    parser.add_argument('--cam-dist',
                        type=float,
                        nargs='*',
                        help='cam distortion coeffs')
    parser.add_argument('--cam-fl-x', type=float, help='cam focal length x')
    parser.add_argument('--cam-fl-y', type=float, help='cam focal length y')
    parser.add_argument('--cam-pp-x', type=float, help='cam principal point x')
    parser.add_argument('--cam-pp-y', type=float, help='cam principal point y')

    parser.add_argument('--first-frame',
                        '-f',
                        type=int,
                        default=0,
                        help='first frame (default: 0; -1: hardcoded value)')
    parser.add_argument('--last-frame',
                        '-l',
                        type=int,
                        help='last frame (default: None; -1: hardcoded end)')
    parser.add_argument('--skip',
                        '-s',
                        type=int,
                        default=1,
                        help='use only every xth frame (default: 1)')
    parser.add_argument('--plot-only',
                        action='store_true',
                        help='only plot the result')
    args = parser.parse_args()

    if args.verbosity > 1:
        import matplotlib.pyplot as plt
        from matplotlib import cm
        from mpl_toolkits.mplot3d import Axes3D

    # init odometry and data
    if args.mission == 'hwproto':
        mission = HardwarePrototype(args.data, last_frame=(155, 321, None)[0])
    elif args.mission == 'nokia':
        ff = (100, 250, 520, 750, 1250, 1500,
              1750)[0] if args.first_frame == -1 else args.first_frame
        lf = (240, 400, 640, 1000, 1500, 1750,
              2000)[1] if args.last_frame == -1 else args.last_frame

        cam_mx = None
        if args.cam_fl_x:
            fl_y = args.cam_fl_y or args.cam_fl_x
            pp_x = args.cam_pp_x or NokiaSensor.CAM_WIDTH / 2
            pp_y = args.cam_pp_y or NokiaSensor.CAM_HEIGHT / 2
            cam_mx = [[args.cam_fl_x, 0., pp_x], [0., fl_y, pp_y],
                      [0., 0., 1.]]
        mission = NokiaSensor(
            args.data,
            data_path=args.meta,
            video_toff=args.video_toff,
            use_gimbal=args.use_gimbal,
            undist_img=args.undist_img,
            cam_mx=cam_mx,
            cam_dist=args.cam_dist,
            verbosity=args.verbosity,
            high_quality=args.high_quality,
            alt_scale=args.alt_scale,
            ori_off_q=tools.ypr_to_q(*map(math.radians, args.ori_off_ypr))
            if args.ori_off_ypr else None,
            first_frame=ff,
            last_frame=lf)

    elif args.mission == 'toynokia':
        mission = ToyNokiaSensor(args.data,
                                 data_path=args.meta,
                                 video_toff=args.video_toff,
                                 first_frame=(100, 350, 850, 1650)[1],
                                 last_frame=(415, 500, 1250, 1850, 2000)[2])
    else:
        assert False, 'bad mission given: %s' % args.mission

    if args.debug_out:
        mission.odo._track_save_path = args.debug_out

    if args.nadir_looking:
        mission.odo._nadir_looking = True

    res_file = args.res or ('%s-result.pickle' % args.mission)
    if args.plot_only:
        plot_results(file=res_file, nadir_looking=args.nadir_looking)
        replay_keyframes(mission.odo.cam, file=res_file)
        return

    # run odometry
    prior = Pose(np.array([0, 0, 0]), quaternion.one)
    frame_names0 = []
    meta_names0 = []
    results0 = []
    ground_truth0 = []
    ori_offs = []
    ba_errs = []
    kfid2img = {}
    started = datetime.now()
    ax = None

    def ba_err_logger(frame_id, per_frame_ba_errs):
        per_frame_ba_errs = np.stack(
            (per_frame_ba_errs[:, 0],
             np.linalg.norm(per_frame_ba_errs[:, 1:4], axis=1),
             np.linalg.norm(per_frame_ba_errs[:, 4:7], axis=1) / np.pi * 180),
            axis=1)
        ba_errs.append([frame_id, *np.nanmean(per_frame_ba_errs, axis=0)])

    mission.odo.ba_err_logger = ba_err_logger
    vid_id = re.search(r'(^|\\|/)([\w-]+)\.mp4$', mission.video_path)

    for i, (img, t, name, meta, meta_name, gt) in enumerate(mission.data):
        if i % args.skip != 0:
            continue

        logger.info('')
        logger.info(name)
        frame_names0.append(name)
        meta_names0.append(meta_name)
        ground_truth0.append(gt)

        if vid_id[2] == 'HD_CAM-1__514659341_03_12_2020_16_12_44' and t > 1050:
            # hardcoding just to get dataset 10 to work
            mission.odo.ori_off_q = tools.ypr_to_q(math.radians(0), 0, 0)

        try:
            nf, *_ = mission.odo.process(img,
                                         datetime.fromtimestamp(mission.time0 +
                                                                t),
                                         measure=meta)

            if nf is not None and nf.id is not None:
                kfid2img[nf.id] = i

                if args.drifting_gimbal and nf.pose.post and meta:
                    est_w2c_bf_q = (-nf.pose.post).to_global(mission.b2c).quat
                    meas_w2c_bf_q = (-nf.pose.prior).to_global(
                        mission.b2c).quat * mission.odo.ori_off_q.conj()
                    new_ori_off_q = meas_w2c_bf_q.conj() * est_w2c_bf_q
                    filtered_ori_off_q = tools.mean_q(
                        [mission.odo.ori_off_q, new_ori_off_q], [0.8, 0.2])
                    if 0:
                        # TODO: debug, something wrong here as roll and pitch explode
                        mission.odo.ori_off_q = filtered_ori_off_q
                    else:
                        y, p, r = tools.q_to_ypr(filtered_ori_off_q)
                        mission.odo.ori_off_q = tools.ypr_to_q(y, 0, 0)
                    ori_offs.append(mission.odo.ori_off_q)
                    logger.info('new gimbal offset: %s' % (list(
                        map(lambda x: round(math.degrees(x), 2),
                            tools.q_to_ypr(mission.odo.ori_off_q))), ))

                pts3d = np.array([
                    pt.pt3d for pt in mission.odo.state.map3d.values()
                    if pt.active
                ])
                if len(pts3d) > 0 and nf.pose.post:
                    ground_alt = np.quantile(
                        -pts3d[:, 1], 0.2)  # neg-y is altitude in cam frame
                    drone_alt = -(-nf.pose.post).loc[1]
                    expected_dist = drone_alt - mission.coord0[2]
                    modeled_dist = drone_alt - ground_alt
                    logger.info(
                        'ground at %.1f mr (%.1f mr), drone alt %.1f mr (%.1f mr)'
                        % (ground_alt, mission.coord0[2], modeled_dist,
                           expected_dist))

                if args.verbosity > 3:
                    keyframes = mission.odo.all_keyframes()
                    post = np.zeros((len(keyframes), 7))
                    k, prior = 0, np.zeros((len(keyframes), 7))
                    for j, kf in enumerate(
                        [kf for kf in keyframes if kf.pose.post]):
                        post[j, :] = (-kf.pose.post).to_global(
                            NokiaSensor.b2c).to_global(
                                NokiaSensor.w2b).to_array()
                        if kf.measure is not None:
                            prior[k, :] = (-kf.pose.prior).to_global(
                                NokiaSensor.b2c).to_global(
                                    NokiaSensor.w2b).to_array()
                            k += 1
                    if ax is not None:
                        ax.clear()
                    ax = tools.plot_poses(post,
                                          axis=(0, 1, 0),
                                          up=(0, 0, 1),
                                          ax=ax,
                                          wait=False)
                    tools.plot_poses(prior[:k, :],
                                     axis=(0, 1, 0),
                                     up=(0, 0, 1),
                                     ax=ax,
                                     wait=False,
                                     colors=map(lambda c: (c, 0, 0, 0.5),
                                                np.linspace(.3, 1.0, k)))
                    plt.pause(0.05)

        except TypeError as e:
            if 0:
                nf, *_ = mission.odo.process(
                    img, datetime.fromtimestamp(mission.time0 + t), prior,
                    quaternion.one)
                if nf and nf.pose.post:
                    prior = nf.pose.post
            else:
                raise e

        results0.append(None if nf is None or nf.pose is None
                        or nf.pose.post is None else (
                            nf.pose, getattr(nf, 'measure', None), nf.time,
                            nf.id))

    try:
        mission.odo.flush_state()  # flush all keyframes and keypoints
        map3d = mission.odo.removed_keypoints
        #        results = [(kf.pose, kf.measure, kf.time, kf.id) for kf in mission.odo.removed_keyframes if kf.pose.post]
        keyframes = [
            dict(pose=kf.pose,
                 meas=kf.measure,
                 time=kf.time,
                 id=kf.id,
                 kps_uv=kf.kps_uv) for kf in mission.odo.removed_keyframes
            if kf.pose.post
        ]
        frame_names = [frame_names0[kfid2img[kf['id']]] for kf in keyframes]
        meta_names = [meta_names0[kfid2img[kf['id']]] for kf in keyframes]
        ground_truth = [ground_truth0[kfid2img[kf['id']]] for kf in keyframes]
        ba_errs = np.array(ba_errs)

        if 1:
            pts3d = np.array([pt.pt3d for pt in map3d])
            ground_alt = np.quantile(-pts3d[:, 1],
                                     0.5)  # neg-y is altitude in cam frame
            drone_alt = -(-keyframes[-1]['pose'].post).loc[1]  # word
            expected_dist = drone_alt - mission.coord0[2]
            modeled_dist = drone_alt - ground_alt
            logger.info(
                'ground at %.1f m (%.1f m), drone alt %.1f m (%.1f m)' %
                (ground_alt, mission.coord0[2], modeled_dist, expected_dist))

        if args.kapture:
            kapture = KaptureIO(args.kapture,
                                reset=True,
                                jpg_qlt=95,
                                scale=0.5)
            kapture.set_camera(1, 'cam', mission.cam)
            kapture.add_frames(mission.odo.removed_keyframes, map3d)
            kapture.write_to_dir()
    except AttributeError as e:
        if 0:
            map3d = None
            results = [(kf.pose, None, kf.time, kf.id)
                       for kf in mission.odo.all_keyframes()]
        else:
            raise e

    mission.odo.quit()
    if 0:
        # show results as given by the online version of the algorithm
        results, frame_names, meta_names, ground_truth = results0, frame_names0, meta_names0, ground_truth0

    logger.info('time spent: %.0fs' %
                (datetime.now() - started).total_seconds())

    repr_errs = np.concatenate([
        list(kf.repr_err.values()) for kf in mission.odo.removed_keyframes
        if len(kf.repr_err)
    ])
    err_q95 = np.quantile(np.linalg.norm(repr_errs, axis=1),
                          0.95) if len(repr_errs) else np.nan
    logger.info('95%% percentile repr err: %.3fpx' % (err_q95, ))

    interp = interp_loc(mission.odo.removed_keyframes, mission.time0)
    loc_est = np.array([
        np.ones((3, )) * np.nan if kf.pose.post is None else
        (-kf.pose.post).loc for kf in mission.odo.removed_keyframes
    ])
    loc_gps = np.array([
        interp(kf.time.timestamp() - mission.time0).flatten()
        for kf in mission.odo.removed_keyframes
    ]).squeeze()
    mean_loc_err = np.nanmean(np.linalg.norm(loc_est - loc_gps, axis=1))
    logger.info('mean loc err: %.3fm' % (mean_loc_err, ))
    logger.info('mean ba errs (repr, loc, ori): %.3fpx %.3fm %.3fdeg' %
                (*np.nanmean(ba_errs[:, 1:], axis=0), ))
    logger.info('latest ba errs (repr, loc, ori): %.3fpx %.3fm %.3fdeg' %
                (*ba_errs[-1, 1:], ))

    if 0:
        plt.figure(10)
        plt.plot(loc_est[:, 0], loc_est[:, 1])
        plt.plot(loc_gps[:, 0], loc_gps[:, 1])
        plt.show()

    if args.verbosity > 3:
        plt.show()  # stop to show last trajectory plot

    res_folder = os.path.dirname(res_file)
    if res_folder:
        os.makedirs(res_folder, exist_ok=True)
    with open(res_file, 'wb') as fh:
        pickle.dump(
            (keyframes, map3d, frame_names, meta_names, ground_truth, ba_errs),
            fh)

    if args.verbosity > 1:
        plot_results(keyframes,
                     map3d,
                     frame_names,
                     meta_names,
                     ground_truth,
                     ba_errs,
                     res_file,
                     nadir_looking=args.nadir_looking)
Exemplo n.º 8
0
    def flux_density(cam_q,
                     cam,
                     mask=None,
                     mag_cutoff=MAG_CUTOFF,
                     array=False,
                     undistorted=False,
                     order_by=None):
        """
        plots stars based on Tycho-2 database, gives out photon count per unit area given exposure time in seconds,
        cam_q is a quaternion in ICRS coord frame, x_fov and y_fov in degrees
        """

        # calculate query conditions for star ra and dec
        cam_dec, cam_ra, _ = tools.q_to_ypr(
            cam_q)  # camera boresight in ICRS coords
        d = np.linalg.norm((cam.x_fov, cam.y_fov)) / 2

        min_dec, max_dec = math.degrees(cam_dec) - d, math.degrees(cam_dec) + d
        dec_cond = '(dec BETWEEN %s AND %s)' % (min_dec, max_dec)

        # goes over the pole to the other side of the sphere, easy solution => ignore limit on ra
        skip_ra_cond = min_dec < -90 or max_dec > 90

        if skip_ra_cond:
            ra_cond = '1'
        else:
            min_ra, max_ra = math.degrees(cam_ra) - d, math.degrees(cam_ra) + d
            if min_ra < 0:
                ra_cond = '(ra < %s OR ra > %s)' % (max_ra,
                                                    (min_ra + 360) % 360)
            elif max_ra > 360:
                ra_cond = '(ra > %s OR ra < %s)' % (min_ra, max_ra % 360)
            else:
                ra_cond = '(ra BETWEEN %s AND %s)' % (min_ra, max_ra)

        conn = sqlite3.connect(Stars.STARDB)
        cursor = conn.cursor()
        # the magnitudes for tycho id xxxx-xxxxx-2 entries are bad as they are most likely taken from hip catalog that bundles all .*-(\d)
        results = cursor.execute(
            """
            SELECT x, y, z, mag_v""" +
            (", mag_b, t_eff, fe_h, log_g, dec, ra, id" if array else "") + """
            FROM deep_sky_objects
            WHERE """ + ("tycho like '%-1' AND " if Stars.STARDB ==
                         Stars.STARDB_TYC else "") + "mag_v < " +
            str(mag_cutoff) + " AND " + dec_cond + " AND " + ra_cond +
            ((" ORDER BY %s ASC" % order_by) if order_by is not None else ''))
        stars = np.array(results.fetchall())
        conn.close()

        flux_density = ([], None) if array else np.zeros(
            (cam.height, cam.width), dtype=np.float32)
        if len(stars) == 0:
            return flux_density

        stars[:, 0:3] = tools.q_times_mx(
            SystemModel.sc2gl_q.conj() * cam_q.conj(), stars[:, 0:3])
        stars_ixy_ = cam.calc_img_R(stars[:, 0:3], undistorted=undistorted)
        stars_ixy = np.round(stars_ixy_.astype(np.float)).astype(np.int)
        I = np.logical_and.reduce(
            (np.all(stars_ixy >= 0, axis=1), stars_ixy[:, 0] <= cam.width - 1,
             stars_ixy[:, 1] <= cam.height - 1))
        if array:
            cols = ('ix', 'iy', 'x', 'y', 'z', 'mag_v', 'mag_b', 't_eff',
                    'fe_h', 'log_g', 'dec', 'ra', 'id')
            return (np.hstack((stars_ixy_[I, :], stars[I, :])),
                    dict(zip(cols, range(len(cols)))))

        stars_ixy = stars_ixy[I, :]
        flux_density_per_star = Stars.magnitude_to_spectral_flux_density(
            stars[I, 3])
        for i, f in enumerate(flux_density_per_star):
            flux_density[stars_ixy[i, 1], stars_ixy[i, 0]] += f

        if mask is not None:
            flux_density[np.logical_not(mask)] = 0

        if True:
            # assume every star is like our sun, convert to total flux density [W/m2]
            solar_constant = 1360.8
            # sun magnitude from http://mips.as.arizona.edu/~cnaw/sun.html
            sun_flux_density = Stars.magnitude_to_spectral_flux_density(
                Stars.SUN_MAG_V)
            flux_density = flux_density * (solar_constant / sun_flux_density)

        return flux_density
Exemplo n.º 9
0
    def _match_stars(self,
                     stars,
                     max_dist=0.05,
                     max_mag_diff=2.0,
                     mag_cutoff=3.0,
                     plot=False):
        """ match stars based on proximity """
        merge_lim = 4
        all_stars, cols = Stars.flux_density(self.q,
                                             self.cam[0],
                                             array=True,
                                             undistorted=True,
                                             mag_cutoff=mag_cutoff + merge_lim,
                                             order_by='mag_v')
        if self.debug:
            db_img = np.sqrt(
                Stars.flux_density(self.q, self.cam[0], mag_cutoff=10.0))

        # override some star data, change None => nan
        for i, st in enumerate(all_stars):
            for j in range(len(st)):
                st[j] = np.nan if st[j] is None else st[j]
            if st[cols['id']] in self.override_star_data:
                for f in ('mag_v', 'mag_b', 't_eff', 'log_g', 'fe_h'):
                    od = self.override_star_data[st[cols['id']]]
                    if f in od:
                        all_stars[i][cols[f]] = od[f]

        # merge close stars
        all_stars = np.array(all_stars)
        points = np.array([(s[cols['ix']], s[cols['iy']]) for s in all_stars])
        D = tools.distance_mx(points, points)
        radius = 10 if self.cam[0].width > 300 else 2
        db_stars = []
        added = set()
        for i, s in enumerate(all_stars):
            if i in added:
                continue
            I = tuple(
                set(
                    np.where(
                        np.logical_and(
                            D[i, :] < radius, all_stars[:, cols['mag_v']] -
                            merge_lim < s[cols['mag_v']]))[0]) - added)
            cluster = [None] * (max(cols.values()) + 1)
            cluster[cols['id']] = tuple(all_stars[I,
                                                  cols['id']].astype(np.int))
            amag_v = 10**(-all_stars[I, cols['mag_v']] / 2.5)
            amag_b = 10**(-all_stars[I, cols['mag_b']] / 2.5)
            cluster[cols['mag_v']] = -2.5 * np.log10(np.sum(amag_v))
            cluster[cols['mag_b']] = -2.5 * np.log10(np.sum(amag_b))
            for c in ('ix', 'iy', 'dec', 'ra', 't_eff', 'fe_h', 'log_g'):
                E = np.where(all_stars[I, cols[c]] != None)[0]
                cluster[cols[c]] = np.sum(amag_v[E] *
                                          all_stars[I, cols[c]][E]) / np.sum(
                                              amag_v[E]) if len(E) else None
            if cluster[cols['mag_v']] < mag_cutoff:
                added.update(I)
                db_stars.append(cluster)

        img_st = np.array([(s['x'], s['y'], s['mag'], s['size'])
                           for s in stars])
        db_st = np.array([(s[cols['ix']], s[cols['iy']], s[cols['mag_v']])
                          for s in db_stars])

        # adjust mags to match, not easy to make match directly as unknown variable black level removed in image sensor
        #b0, b1 = np.min(img_st[:, 2]), np.min(db_st[:, 2])
        #d0, d1 = np.max(img_st[:, 2]), np.max(db_st[:, 2])
        #img_st[:, 2] = (img_st[:, 2] - b0) * (d1-b1)/(d0-b0) + b1
        #img_st[:, 2] = np.log10((10**img_st[:, 2] - 10**b0) * (10**d1-10**b1)/(10**d0-10**b0) + 10**b1)
        img_st[:, 2] = img_st[:, 2] - np.median(img_st[:, 2]) + np.median(
            db_st[:, 2])

        if self.cam[0].dist_coefs is not None:
            db_st[:, :2] = Camera.distort(
                db_st[:, :2], self.cam[0].dist_coefs,
                self.cam[0].intrinsic_camera_mx(legacy=False),
                self.cam[0].inv_intrinsic_camera_mx(legacy=False))

        M = (np.abs(
            np.repeat(
                np.expand_dims(img_st[:, 2:3], axis=0), len(db_st), axis=0) -
            np.repeat(
                np.expand_dims(db_st[:, 2:3], axis=1), len(img_st), axis=1))
             ).squeeze()
        D = np.repeat(np.expand_dims(img_st[:, :2], axis=0), len(db_st), axis=0) \
            - np.repeat(np.expand_dims(db_st[:, :2], axis=1), len(img_st), axis=1)
        D = np.sum(D**2, axis=2)
        D = D.flatten()
        D[M.flatten() > max_mag_diff] = np.inf
        D = D.reshape(M.shape)
        idxs = np.argmin(D, axis=0)
        max_dist = (self.image.shape[1] * max_dist)**2

        m_idxs = {}
        for i1, j in enumerate(idxs):
            dist = D[j, i1]
            if dist > max_dist or j in m_idxs and dist > D[j, m_idxs[j]]:
                # discard match if distance too high or better match for same db star available
                continue
            m_idxs[j] = i1

        matches = [None] * len(idxs)
        for j, i in m_idxs.items():
            matches[i] = db_stars[j]

        if self.debug and DEBUG_MATCHING or plot:
            if plot:
                norm = ImageNormalize(stretch=SqrtStretch())
                size = np.median(img_st[:, 3].astype('int'))
                data = np.mean(self.image.astype(np.float64) /
                               (2**self.bits - 1),
                               axis=2)

                ud_I = set(range(len(db_st))) - set(m_idxs.keys())
                d_I = set(range(len(img_st))) - set(m_idxs.values())
                # detected_pos = img_st[tuple(d_I), :2].astype('int')
                # matched_pos = img_st[tuple(m_idxs.values()), :2].astype('int')
                # undetected_pos = db_st[tuple(ud_I), :2].astype('int')

                plt.imshow(data, cmap='Greys', norm=norm)
                for i in d_I:  # detected
                    CircularAperture(img_st[i, :2].astype('int'),
                                     r=img_st[i, 3].astype('int')).plot(
                                         color='blue', lw=1.5, alpha=0.5)
                for i in m_idxs.values():  # matched
                    CircularAperture(img_st[i, :2].astype('int'),
                                     r=img_st[i, 3].astype('int')).plot(
                                         color='green', lw=1.5, alpha=0.5)
                for i in ud_I:  # undetected
                    CircularAperture(db_st[i, :2].astype('int'),
                                     r=size).plot(color='red',
                                                  lw=1.5,
                                                  alpha=0.5)
                plt.show()

            else:
                dec, ra, pa = map(math.degrees, tools.q_to_ypr(self.q))
                print('ra: %.1f, dec: %.1f, pa: %.1f' % (ra, dec, pa))

                sc, isc = 1, (1024 if 0 else 2800) / (self.image.shape[1] * 2)
                img = np.sqrt(self.image)
                img = ((img / np.max(img)) * 255).astype('uint8')
                img = cv2.resize(img,
                                 None,
                                 fx=isc,
                                 fy=isc,
                                 interpolation=cv2.INTER_AREA)
                cv2.drawKeypoints(img, [
                    cv2.KeyPoint(x * isc, y * isc, 60 * sc)
                    for x, y in db_st[:, :2]
                ], img, [0, 255, 0],
                                  cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
                cv2.drawKeypoints(img, [
                    cv2.KeyPoint(x * isc, y * isc, 60 * sc)
                    for x, y in img_st[:, :2]
                ], img, [255, 0, 0],
                                  cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
                for j, i in m_idxs.items():
                    cv2.line(
                        img,
                        tuple(np.round(img_st[i, :2] * isc).astype('int')),
                        tuple(np.round(db_st[j, :2] * isc).astype('int')),
                        [0, 255, 0])
                if DEBUG_MATCHING != 3:
                    for j, pt in enumerate(db_st[:, :2]):
                        cv2.putText(img, str(j),
                                    tuple(np.round(pt * isc).astype('int')),
                                    cv2.FONT_HERSHEY_SIMPLEX, 3 * sc,
                                    [0, 255, 0])
                else:
                    for i, pt in enumerate(img_st[:, :2]):
                        text = '%.2f' % img_st[i, 2]
                        #text = str(i)
                        cv2.putText(img, text,
                                    tuple(np.round(pt * isc).astype('int')),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1.6 * sc,
                                    [0, 0, 255])

                db_img = np.repeat(np.expand_dims(db_img, axis=2),
                                   self.image.shape[2],
                                   axis=2)
                db_img = ((db_img / np.max(db_img)) * 255).astype('uint8')
                db_img = cv2.resize(db_img,
                                    None,
                                    fx=isc,
                                    fy=isc,
                                    interpolation=cv2.INTER_AREA)

                for j, pt in enumerate(db_st[:, :2]):
                    if DEBUG_MATCHING == 1:
                        text = '&'.join([
                            Stars.get_catalog_id(id)
                            for id in db_stars[j][cols['id']]
                        ])
                    elif DEBUG_MATCHING == 2:
                        t_eff = db_stars[j][cols['t_eff']]
                        t_eff2 = Stars.effective_temp(
                            db_stars[j][cols['mag_b']] -
                            db_stars[j][cols['mag_v']])
                        #if 1:
                        #    print('%s Teff: %s (%.1f)' % (Stars.get_catalog_id(db_stars[j][cols['id']]), t_eff, t_eff2))
                        text = ('%dK' % t_eff) if t_eff else ('(%dK)' % t_eff2)
                    elif DEBUG_MATCHING == 3:
                        text = '%.2f' % db_stars[j][cols['mag_v']]
                    cv2.putText(
                        db_img, text,
                        tuple(
                            np.round(pt * isc +
                                     np.array([5, -5])).astype('int')),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.6 * sc, [255, 0, 0])

                cv2.drawKeypoints(db_img, [
                    cv2.KeyPoint(x * isc, y * isc, 60 * sc)
                    for x, y in db_st[:, :2]
                ], db_img, [0, 255, 0],
                                  cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

                img = np.hstack(
                    (db_img,
                     np.ones(
                         (img.shape[0], 1, img.shape[2]), dtype='uint8') * 255,
                     img))
                cv2.imshow('test', img)
                cv2.waitKey()

    #            plt.figure(1, (16, 12))
    #            plt.imshow(np.flip(img, axis=2))
    #            plt.tight_layout()
    #            plt.show()

        return matches, cols