Beispiel #1
0
    def calc_err(self, rvec, tvec, i1, j1, warn=False):
        q_res = tools.angleaxis_to_q(rvec)
        lat1, roll1 = self._fdb_sc_ast_perms[i1]
        lat2, roll2 = self._fdb_sc_ast_perms[j1]
        q_src = tools.ypr_to_q(lat1, 0, roll1)
        q_trg = tools.ypr_to_q(lat2, 0, roll2)
        q_rel = q_trg * q_src.conj()

        # q_res.x = -q_res.x
        # np.quaternion(0.707106781186547, 0, -0.707106781186547, 0)
        m = self.system_model
        q_frame = m.frm_conv_q(m.OPENGL_FRAME, m.OPENCV_FRAME)
        q_res = q_frame * q_res.conj() * q_frame.conj()

        err1 = math.degrees(
            tools.wrap_rads(tools.angle_between_q(q_res, q_rel)))
        err2 = np.linalg.norm(tvec -
                              np.array((0, 0, -self.render_z)).reshape((3, 1)))
        ok = not (abs(err1) > 10 or abs(err2) > 0.10 * abs(self.render_z))

        if not ok and warn:
            print(
                'at (%s, %s), err1: %.1fdeg, err2: %.1fkm\n\tq_real: %s\n\tq_est:  %s'
                % (i1, j1, err1, err2, q_rel, q_res))

        return ok, err1, err2
Beispiel #2
0
    def initialize_frame(self, time, image, measure):
        nf = super(VisualGPSNav, self).initialize_frame(time, image, measure)
        if nf.measure:
            old_kf = [kf for kf in self.state.keyframes if kf.measure]
            nf.measure.time_adj = old_kf[-1].measure.time_adj if len(old_kf) > 0 else 0

            geodetic_origin = self.geodetic_origin
            lat, lon, alt = nf.measure.data[0:3]
            roll, pitch, yaw = nf.measure.data[3:6]
            b2c_roll, b2c_pitch, b2c_yaw = nf.measure.data[6:9]

            # world frame: +z up, +x is east, +y is north
            # body frame: +z down, +x is fw towards north, +y is right wing (east)
            # camera frame: +z into the image plane, -y is up, +x is right

            # wf2bf_q = tools.eul_to_q((np.pi, -np.pi / 2), 'xz')
            # bf2cf = Pose(None, tools.eul_to_q((np.pi / 2, np.pi / 2), 'yz'))

            if 1:
                w2b_bf_q = tools.ypr_to_q(yaw, pitch, roll)
                b2c_bf_q = tools.ypr_to_q(b2c_yaw, b2c_pitch, b2c_roll)
                yaw, pitch, roll = tools.q_to_ypr(w2b_bf_q * b2c_bf_q * (self.ori_off_q or quaternion.one))
                cf_w2c = tools.lla_ypr_to_loc_quat(geodetic_origin, [lat, lon, alt], [yaw, pitch, roll], b2c=self.bf2cf)
                if len(old_kf) > 0:
                    cf_w2c.vel = (cf_w2c.loc - (-old_kf[-1].pose.prior).loc) / (nf.time - old_kf[-1].time).total_seconds()
                else:
                    cf_w2c.vel = np.array([0, 0, 0])
                nf.pose.prior = -cf_w2c
            else:
                wf_body_r = tools.to_cartesian(lat, lon, alt, *geodetic_origin)
                bf_world_body_q = tools.ypr_to_q(yaw, pitch, roll)
                if 0:
                    cf_world_q = bf_cam_q.conj() * bf_world_body_q.conj()

                    cf_body_r = tools.q_times_v(bf_cam_q.conj(), -bf_cam_r)
                    cf_body_world_r = tools.q_times_v(cf_world_q * self.wf2bf_q.conj(), -wf_body_r)

                    nf.pose.prior = Pose(cf_body_r + cf_body_world_r, cf_world_q * bf_cam_q)
                else:
                    bf_world_q = bf_world_body_q.conj() * self.wf2bf_q.conj()
                    cf_world_q = bf_cam_q.conj() * bf_world_q

                    cf_body_r = tools.q_times_v(bf_cam_q.conj(), -bf_cam_r)
                    cf_body_world_r = tools.q_times_v(cf_world_q, -wf_body_r)
                    # bf_cam_q.conj() * bf_world_body_q.conj() * self.wf2bf_q.conj()

                    nf.pose.prior = Pose(cf_body_r + cf_body_world_r, bf_cam_q.conj() * bf_world_body_q.conj() * bf_cam_q)
                    # bf_cam_q.conj() * bf_world_body_q.conj() * bf_cam_q

                    # (NokiaSensor.w2b_q * NokiaSensor.b2c_q) * prior.quat.conj()
                    # self.wf2bf_q * bf_cam_q * (bf_cam_q.conj() * bf_world_body_q.conj() * bf_cam_q).conj()
                    # => self.wf2bf_q * bf_cam_q //*// bf_cam_q.conj() * bf_world_body_q * bf_cam_q //*//
                    # self.wf2bf_q * bf_cam_q //*// bf_cam_q.conj() * bf_world_body_q * bf_cam_q //*// bf_cam_q.conj() * bf_world_body_q.conj() * self.wf2bf_q.conj()

        return nf
Beispiel #3
0
def main():
    cam_mx = np.array([[1580.356552, 0.000000, 994.026697],
                       [0.000000, 1580.553177, 518.938726],
                       [0.000000, 0.000000, 1.000000]])

    sc_v0, sc_v1 = np.array([0, 0, -100]), np.array([0, -5, -100])
    sc_q0, sc_q1 = tools.ypr_to_q(0, 0,
                                  0), tools.ypr_to_q(0, 0, math.radians(-2))

    w_q0, w_q1 = sc_q0.conj(), sc_q1.conj()
    w_v0, w_v1 = tools.q_times_v(w_q0, -sc_v0), tools.q_times_v(w_q1, -sc_v1)

    f0 = Frame(0, None, 1.0, PoseEstimate(None, Pose(w_v0, w_q0), 0))
    f1 = Frame(1, None, 1.0, PoseEstimate(None, Pose(w_v1, w_q1), 0))

    ref_pts4d = np.concatenate(
        (np.random.uniform(-30, 30, (100, 2)), np.zeros(
            (100, 1)), np.ones((100, 1))),
        axis=1)

    P0, P1 = cam_mx.dot(f0.to_mx()), cam_mx.dot(f1.to_mx())
    uvs0, uvs1 = P0.dot(ref_pts4d.T).T, P1.dot(ref_pts4d.T).T
    uvs0, uvs1 = uvs0[:, :2] / uvs0[:, 2:], uvs1[:, :2] / uvs1[:, 2:]

    plt.figure(1)
    for uv0, uv1 in zip(uvs0, uvs1):
        plt.plot([uv0[0], uv1[0]], [uv0[1], uv1[1]])
    plt.plot(uvs1[:, 0], uvs1[:, 1], '.')
    plt.gca().invert_yaxis()

    pts3d = []
    for uv0, uv1 in zip(uvs0, uvs1):
        kp4d = cv2.triangulatePoints(P0, P1, uv0.reshape((-1, 1, 2)),
                                     uv1.reshape((-1, 1, 2)))
        pt3d = (kp4d.T[:, :3] / kp4d.T[:, 3:])[0]
        pts3d.append(pt3d)

    pts3d = np.array(pts3d)

    fig = plt.figure(2)
    ax = fig.add_subplot(111, projection='3d')
    ax.plot(pts3d[:, 0], pts3d[:, 1], pts3d[:, 2], '.')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')
    plt.show()
    print('done')
Beispiel #4
0
def main():
    parser = argparse.ArgumentParser(description='Estimate orientation offset between body and cam on gimbal')
    parser.add_argument('--res', required=True, help='path to result pickle')
    parser.add_argument('--skip', default=0, type=int, help='skip first x frames')
    parser.add_argument('--huber-coef', default=0, type=float, help='use pseudo huber loss')
    parser.add_argument('--nadir-looking', action='store_true', help='better plot for nadir looking cam')
    parser.add_argument('--init-ypr', default=[0.0, 0.0, 0.0], type=float, nargs=3)
    args = parser.parse_args()

    with open(args.res, 'rb') as fh:
        results, map3d, frame_names, meta_names, ground_truth, *_ = pickle.load(fh)

    # (kf.pose, kf.measure, kf.time, kf.id)
    meas_q = np.array([tools.ypr_to_q(*np.flip(meas.data[3:6])) for pose, meas, _, _ in results if meas and pose.post])
    est_q = np.array([(-pose.post).to_global(NokiaSensor.b2c).quat for pose, meas, _, _ in results if meas and pose.post])

    meas_q = meas_q[args.skip:]
    est_q = est_q[args.skip:]

    print('optimizing orientation offset based on %d measurements...' % (len(meas_q),))

    def costfun(x, meas_q, est_q):
        off_q = np.quaternion(*x).normalized()
        delta_angles = tools.angle_between_q_arr(est_q, meas_q * off_q)
        return delta_angles

    x0 = tools.ypr_to_q(*(np.array(args.init_ypr)/180*np.pi)).components
    res = least_squares(costfun, x0, verbose=2, ftol=1e-5, xtol=1e-5, gtol=1e-8, max_nfev=1000,
                        loss='huber' if args.huber_coef > 0 else 'linear', f_scale=args.huber_coef or 1.0,  #huber_coef,
                        args=(meas_q, est_q))

    off_q = np.quaternion(*res.x).normalized() if 1 else quaternion.one

    print('offset q: %s, ypr: %s' % (off_q, np.array(tools.q_to_ypr(off_q)) / np.pi * 180,))

    args.nadir_looking = False  # TODO: remove override
    nl_dq = tools.eul_to_q((-np.pi / 2,), 'y') if args.nadir_looking else quaternion.one
    est_ypr = np.array([tools.q_to_ypr(nl_dq.conj() * q) for q in est_q]) / np.pi * 180
    corr_ypr = np.array([tools.q_to_ypr(nl_dq.conj() * q * off_q) for q in meas_q]) / np.pi * 180

    plt.plot(est_ypr[:, 0], 'C0:')
    plt.plot(est_ypr[:, 1], 'C1:')
    plt.plot(est_ypr[:, 2], 'C2:')
    plt.plot(corr_ypr[:, 0], 'C0-')
    plt.plot(corr_ypr[:, 1], 'C1-')
    plt.plot(corr_ypr[:, 2], 'C2-')
    plt.show()
Beispiel #5
0
 def set_orientation(self, q=None, angleaxis=None, dec_ra_pa=None):
     if q is not None:
         self.q = q
     elif angleaxis is not None:
         self.q = tools.angleaxis_to_q(angleaxis)
     else:
         assert dec_ra_pa is not None, 'no orientation given'
         dec, ra, pa = map(math.radians, dec_ra_pa)
         self.q = tools.ypr_to_q(dec, ra, pa)
Beispiel #6
0
def load_image_data(image_filename, table_file):
    cols = [
        "OBSERVATION_END_MET", "IMAGE_FILENAME", "OBSERVATION_END_TIME",
        "SPC_X", "SPC_Y", "SPC_Z", "AST_J2_X", "AST_J2_Y", "AST_J2_Z",
        "SPC_J2_X", "SPC_J2_Y", "SPC_J2_Z", "BODY_SURFACE_DISTANCE",
        "CENTER_LON", "CENTER_LAT", "CENTER_PIXEL_RES",
        "CELESTIAL_N_CLOCK_ANGLE", "BODY_POLE_CLOCK_ANGLE",
        "BODY_POLE_ASPECT_ANGLE", "SUN_DIR_CLOCK_ANGLE", "RIGHT_ASCENSION",
        "DECLINATION", "SUBSOLAR_LON", "SUBSOLAR_LAT", "INCIDENCE_ANGLE",
        "EMISSION_ANGLE", "PHASE_ANGLE", "SOLAR_ELONGATION", "SUB_SC_LON",
        "SUB_SC_LAT", "BODY_CENTER_DISTANCE", "PIXEL_OFFSET_X",
        "PIXEL_OFFSET_Y", "AST_SUN_ROT_ANGLE"
    ]
    idx = dict(zip(cols, range(len(cols))))

    with open(table_file, 'r') as fh:
        alldata = [re.split(r'\s+', row)[1:] for row in fh]

    d = None
    for row in alldata:
        if row[idx['IMAGE_FILENAME']] == image_filename:
            d = row
            break

    assert d is not None, 'data for image %s not found' % image_filename

    # spacecraft orientation, equatorial J2000
    sc_rot_ra = float(d[idx['RIGHT_ASCENSION']])
    sc_rot_dec = float(d[idx['DECLINATION']])
    sc_rot_cnca = float(d[idx['CELESTIAL_N_CLOCK_ANGLE']])
    sc_igrf_q = tools.ypr_to_q(
        rads(sc_rot_dec), rads(sc_rot_ra),
        -rads(sc_rot_cnca))  # same with rosetta lbls also

    sun_ast_igrf_r = np.array(
        [d[idx['AST_J2_X']], d[idx['AST_J2_Y']],
         d[idx['AST_J2_Z']]]).astype(np.float)
    sun_sc_igrf_r = np.array(
        [d[idx['SPC_J2_X']], d[idx['SPC_J2_Y']],
         d[idx['SPC_J2_Z']]]).astype(np.float)

    arr2str = lambda arr: '[%s]' % ', '.join(['%f' % v for v in arr])
    print('sun_ast_igrf_r: %s' % arr2str(sun_ast_igrf_r * 1e3))
    print('sun_sc_igrf_r: %s' % arr2str(sun_sc_igrf_r * 1e3))
    print('ast_sc_igrf_r: %s' % arr2str(
        (sun_sc_igrf_r - sun_ast_igrf_r) * 1e3))
    #     print('ast_axis_ra: %f' % degs(ast_axis_ra))
    #     print('ast_axis_dec: %f' % degs(ast_axis_dec))
    #     print('ast_zlon_ra: %f' % degs(ast_zlon_ra))

    aa = quaternion.as_rotation_vector(sc_igrf_q)
    angle = np.linalg.norm(aa)
    sc_angleaxis = [angle] + list(aa / angle)
    print('sc_angleaxis [rad]: %s' % arr2str(sc_angleaxis))
Beispiel #7
0
    def _render_params(self, discretize_tol=False, center_model=False):
        # called at self.render, override based on hidden field values

        #qfin = tools.fdb_relrot_to_render_q(self._sc_ast_lat, self._sc_ast_lon)
        qfin = tools.ypr_to_q(self._sc_ast_lat, 0, self._sc_ast_lon)
        #light_v = tools.fdb_light_to_render_light(self._light_lat, self._light_lon)
        light_v = tools.spherical2cartesian(self._light_lat, self._light_lon,
                                            1)

        # if qfin & light_v not reasonable, e.g. because solar elong < 45 deg:
        # seems that not needed, left here in case later notice that useful
        # raise InvalidSceneException()

        return (0, 0, self.render_z), qfin, light_v
Beispiel #8
0
    def scene_features(self, feat, maxmem, i1, i2):
        try:
            ref_img, depth = self.render_scene(i1, i2)
        except InvalidSceneException:
            return None

        # get keypoints and descriptors
        ref_kp, ref_desc, self._latest_detector = KeypointAlgo.detect_features(
            ref_img,
            feat,
            maxmem=maxmem,
            max_feats=self.MAX_FEATURES,
            for_ref=True)

        # save only 2d image coordinates, scrap scale, orientation etc
        ref_kp_2d = np.array([p.pt for p in ref_kp], dtype='float32')

        # get 3d coordinates
        ref_kp_3d = KeypointAlgo.inverse_project(self.system_model, ref_kp_2d,
                                                 depth, self.render_z,
                                                 self._ref_img_sc)

        if False:
            mm_dist = self.system_model.min_med_distance
            if False:
                pos = (0, 0, -mm_dist)
                qfin = tools.ypr_to_q(sc_ast_lat, 0, sc_ast_lon)
                light_v = tools.spherical2cartesian(light_lat, light_lon, 1)
                reimg = self.render_engine.render(self.obj_idx, pos, qfin,
                                                  light_v)
                img = np.concatenate(
                    (cv2.resize(ref_img,
                                (self.system_model.view_width,
                                 self.system_model.view_height)), reimg),
                    axis=1)
            else:
                ref_kp = [
                    cv2.KeyPoint(*self._cam.calc_img_xy(x, -y, -z - mm_dist),
                                 1) for x, y, z in ref_kp_3d
                ]
                img = cv2.drawKeypoints(ref_img,
                                        ref_kp,
                                        ref_img.copy(), (0, 0, 255),
                                        flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
            cv2.imshow('res', img)
            cv2.waitKey()

        return np.array(ref_desc), ref_kp_2d, ref_kp_3d
Beispiel #9
0
def relative_pose_err(sm, imgs):
    try:
        img0, img1 = [cv2.imread(img, cv2.IMREAD_UNCHANGED) for img in imgs]
        kp0, desc0, det = KeypointAlgo.detect_features(img0,
                                                       KeypointAlgo.AKAZE,
                                                       max_feats=1000,
                                                       for_ref=True,
                                                       maxmem=0)
        kp1, desc1, det = KeypointAlgo.detect_features(img1,
                                                       KeypointAlgo.AKAZE,
                                                       max_feats=1000,
                                                       for_ref=False,
                                                       maxmem=0)
        matches = KeypointAlgo.match_features(desc1,
                                              desc0,
                                              det.defaultNorm(),
                                              method='brute')

        depth0 = cv2.imread(imgs[0][:-4] + '.d.exr', cv2.IMREAD_UNCHANGED)
        kp0_3d = KeypointAlgo.inverse_project(
            sm, [kp0[m.trainIdx].pt for m in matches], depth0,
            np.nanmax(depth0), 1)
        kp1_2d = np.array([kp1[m.queryIdx].pt for m in matches],
                          dtype='float32')
        rvec, tvec, inliers = KeypointAlgo.solve_pnp_ransac(
            sm, kp1_2d, kp0_3d, 8)
    except PositioningException as e:
        inliers = []

    err = np.nan
    if len(inliers) >= KeypointAlgo.MIN_FEATURES:
        rel_q = true_relative_pose(sm, imgs)
        sc2cv_q = sm.frm_conv_q(sm.SPACECRAFT_FRAME, sm.OPENCV_FRAME)

        est_rel_cv_q = tools.angleaxis_to_q(rvec)
        est_rel_q1 = sc2cv_q * est_rel_cv_q * sc2cv_q.conj()

        # solvePnPRansac has some bug that apparently randomly gives 180deg wrong answer
        est_rel_q2 = sc2cv_q * est_rel_cv_q * tools.ypr_to_q(
            0, math.pi, 0) * sc2cv_q.conj()

        err = math.degrees(
            min(tools.angle_between_q(est_rel_q1, rel_q),
                tools.angle_between_q(est_rel_q2, rel_q)))

    return err
Beispiel #10
0
    def _set_sc_from_ast_rot_and_trans(self, rvec, tvec, discretization_err_q, rotate_sc=False):
        sm = self.system_model
        
        # rotate to gl frame from opencv camera frame
        gl2cv_q = sm.frm_conv_q(sm.OPENGL_FRAME, sm.OPENCV_FRAME)
        new_sc_pos = tools.q_times_v(gl2cv_q, tvec)

        # camera rotation in opencv frame
        cv_cam_delta_q = tools.angleaxis_to_q(rvec)

        # solvePnPRansac has some bug that apparently randomly gives 180deg wrong answer
        if new_sc_pos[2]>0:
            tpos = -new_sc_pos
            tdelta_q = cv_cam_delta_q * tools.ypr_to_q(0,math.pi,0)
            # print('Bug with solvePnPRansac, correcting:\n\t%s => %s\n\t%s => %s'%(
            #     new_sc_pos, tpos, tools.q_to_ypr(cv_cam_delta_q), tools.q_to_ypr(tdelta_q)))
            new_sc_pos = tpos
            cv_cam_delta_q = tdelta_q
        
        sm.spacecraft_pos = new_sc_pos
        
        err_q = discretization_err_q or np.quaternion(1, 0, 0, 0)
        if rotate_sc:
            sc2cv_q = sm.frm_conv_q(sm.SPACECRAFT_FRAME, sm.OPENCV_FRAME)
            sc_delta_q = err_q * sc2cv_q * cv_cam_delta_q.conj() * sc2cv_q.conj()
            sm.rotate_spacecraft(sc_delta_q)  # TODO: check that rotation ok, changed from sc_q = sc_q * dq to sc_q = dq * sc_q
        else:
            sc2cv_q = sm.frm_conv_q(sm.SPACECRAFT_FRAME, sm.OPENCV_FRAME)
            sc_q = sm.spacecraft_q

            frame_q = sc_q * err_q * sc2cv_q
            ast_delta_q = frame_q * cv_cam_delta_q * frame_q.conj()
            
            err_corr_q = sc_q * err_q.conj() * sc_q.conj()
            ast_q0 = sm.asteroid_q
            sm.rotate_asteroid(err_corr_q * ast_delta_q)

            if self.est_real_ast_orient:
                # so that can track rotation of 67P
                ast_q = sm.asteroid_q
                err_deg = math.degrees(tools.angle_between_q(ast_q0, ast_q))
                self.extra_values = list(quaternion.as_float_array(ast_q)) + [sm.time.value, err_deg]
Beispiel #11
0
    def costfun(x, debug=0, verbose=True):
        set_params(x)
        err = 0

        for phase_angle in np.radians(np.linspace(0, 150, img_n)):
            light = tools.q_times_v(tools.ypr_to_q(phase_angle, 0, 0),
                                    np.array([0, 0, -1]))
            synth1 = re.render(obj_idx,
                               pos,
                               np.quaternion(1, 0, 0, 0),
                               tools.normalize_v(light),
                               get_depth=False,
                               reflection=m_hapke)
            synth2 = re.render(obj_idx,
                               pos,
                               np.quaternion(1, 0, 0, 0),
                               tools.normalize_v(light),
                               get_depth=False,
                               reflection=m_ll)

            err_img = (synth1.astype('float') - synth2.astype('float'))**2
            err += np.mean(err_img)
            if debug:
                if debug % 2:
                    cv2.imshow(
                        'hapke vs ll',
                        np.concatenate((synth1.astype('uint8'), 255 * np.ones(
                            (synth2.shape[0], 1), dtype='uint8'), synth2),
                                       axis=1))
                if debug > 1:
                    err_img = err_img**0.2
                    cv2.imshow('err', err_img / np.max(err_img))
                cv2.waitKey()
        err /= img_n
        if verbose:
            print('%s => %f' %
                  (', '.join(['%.4e' % i for i in np.array(x) * scales]), err))
        return err
Beispiel #12
0
        if compare:
            title = 'real vs synthetic'
            img = np.concatenate(
                [real, 255 * np.ones(
                    (real.shape[0], 1), dtype='uint8')] + synth,
                axis=1)
        else:
            title = 'synthetic'
            img = synth[0]

        sc = min(1536 / img.shape[1], 1024 / img.shape[0])
        cv2.imshow(title, cv2.resize(img, None, fx=sc, fy=sc))
        cv2.waitKey()
    else:
        # try different light directions a fixed angle (d) away from default
        d = 10
        for a in np.linspace(0, 360 * 2, 36 * 2, endpoint=False):
            lat, lon, r = tools.cartesian2spherical(*sm.asteroid.real_position)
            qa = tools.ypr_to_q(lat, lon, 0)
            qd = tools.ypr_to_q(0, 0, np.radians(a)) * tools.ypr_to_q(
                np.radians(d), 0, 0)
            q = qa * qd * qa.conj()
            sm.asteroid.real_position = tools.q_times_v(
                q, sm.asteroid.real_position)
            img = cv2.resize(
                ab.render(shadows=True, reflection=RenderEngine.REFLMOD_HAPKE),
                (1024, 1024))
            cv2.imshow('s', img)
            cv2.waitKey()
        quit()
Beispiel #13
0
def render_67p(show=False):
    sm = RosettaSystemModel(hi_res_shape_model=False, res_mult=1.0)
    re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
    re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, 0.05, sm.max_distance)
    obj_idx = re.load_object(sm.asteroid.real_shape_model)

    RenderEngine.REFLMOD_PARAMS[RenderEngine.REFLMOD_HAPKE] = [
        553.38,  # J         0
        27,  # th_p      27
        0.034,  # w         0.034
        -0.08,  # b         -0.078577
        0,  # c         0
        2.25,  # B_SH0     2.25
        math.radians(0.061),  # hs        math.radians(0.061)
        0,  # B_CB0     0
        0.005,  # hc        0.005
        1,  # K         1
    ]

    g_sc_q = tools.angleaxis_to_q([1.892926, 0.781228, -0.540109, -0.312995])
    # x, y, z = 69.54, 64.11, 162.976134
    x, y, z = 146.540, 167.110, 154.976  # asteroid
    g_ast_q = get_ast_q(x, y, z)
    g_sol_ast_v = np.array([163613595198.0, 101637176823.0, 36457220690.0
                            ]) * 1e-3
    g_sol_sc_v = np.array([163613518304.0, 101637309778.0, 36457190373.0
                           ]) * 1e-3
    g_sc_ast_v = g_sol_ast_v - g_sol_sc_v
    l_ast_sc_v = tools.q_times_v(SystemModel.sc2gl_q.conj() * g_sc_q.conj(),
                                 g_sc_ast_v)
    l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
    ) * g_ast_q * SystemModel.sc2gl_q
    l_light_v = tools.q_times_v(SystemModel.sc2gl_q.conj() * g_sc_q.conj(),
                                g_sol_ast_v / np.linalg.norm(g_sol_ast_v))

    l_vx_ast_q = SystemModel.sc2gl_q.conj() * quaternion.one.conj(
    ) * g_ast_q * SystemModel.sc2gl_q
    print(str(l_vx_ast_q))
    particles = load_particles(
        r'C:\projects\sispo\data\models\Jets--ROS_CAM1_20150710T074301.exr',
        lf_ast_q=l_vx_ast_q,
        cell_size=0.066667)

    a, b, c = [0] * 3
    w = 10
    while True:
        print('%.3f, %.3f, %.3f' % (x + a, y + b, z + c))

        if particles is None and 0:
            img = re.render(obj_idx,
                            l_ast_sc_v,
                            l_ast_q,
                            l_light_v,
                            flux_density=1.0,
                            gamma=1.0,
                            get_depth=False,
                            shadows=True,
                            textures=False,
                            reflection=RenderEngine.REFLMOD_HAPKE)
        else:
            img = TestLoop.render_navcam_image_static(
                None,
                re, [obj_idx],
                rel_pos_v=l_ast_sc_v,
                rel_rot_q=l_ast_q,
                light_v=l_light_v,
                sc_q=g_sc_q,
                sun_distance=np.linalg.norm(g_sol_ast_v) * 1e3,
                exposure=None,
                gain=None,
                gamma=1.8,
                auto_gain=True,
                reflmod_params=RenderEngine.REFLMOD_PARAMS[
                    RenderEngine.REFLMOD_HAPKE],
                use_shadows=True,
                use_textures=False,
                cam=sm.cam,
                fluxes_only=True,
                stars=True,
                lens_effects=False,
                particles=particles,
                return_depth=False)

        k = output(img, show, maxval=0.50, gamma=1.8)
        #k = output(img, show, maxval=0.70, gamma=1.0)

        if k is None or k == 27:
            break
        if k in (ord('a'), ord('d')):
            b += (1 if k == ord('a') else -1) * w
        if k in (ord('w'), ord('s')):
            a += (1 if k == ord('s') else -1) * w
        if k in (ord('q'), ord('e')):
            c += (1 if k == ord('q') else -1) * w

        if 0:
            l_light_v = tools.q_times_v(
                SystemModel.sc2gl_q.conj() * tools.ypr_to_q(
                    math.radians(a), math.radians(b), math.radians(c)) *
                g_sc_q.conj(), g_sol_ast_v / np.linalg.norm(g_sol_ast_v))
        elif 1:
            g_ast_q = get_ast_q(x + a, y + b, z + c)
            l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
            ) * g_ast_q * SystemModel.sc2gl_q
Beispiel #14
0
def load_image_meta(src, sm):
    # params given in equatorial J2000 coordinates, details:
    # https://pds.nasa.gov/ds-view/pds/viewProfile.jsp
    #                                   ?dsid=RO-C-NAVCAM-2-ESC3-MTP021-V1.0

    with open(src, 'r') as f:
        config_data = f.read()

    config_data = '[meta]\n' + config_data
    config_data = re.sub(r'^/\*', '#', config_data, flags=re.M)
    config_data = re.sub(r'^\^', '', config_data, flags=re.M)
    config_data = re.sub(r'^(\w+):(\w+)', r'\1__\2', config_data, flags=re.M)
    config_data = re.sub(r'^END\s*$', '', config_data, flags=re.M)
    config_data = re.sub(r'^NOTE\s*=\s*"[^"]*"', '', config_data, flags=re.M)
    config_data = re.sub(r'^OBJECT\s*=\s*.*?END_OBJECT\s*=\s*\w+',
                         '',
                         config_data,
                         flags=re.M | re.S)
    config_data = re.sub(r' <(DEGREE|SECOND|KILOMETER)>', '', config_data)

    config = ConfigParser(converters={'tuple': literal_eval})
    config.read_string(config_data)

    image_time = config.get('meta', 'START_TIME')

    # spacecraft orientation, equatorial J2000
    sc_rot_ra = config.getfloat('meta', 'RIGHT_ASCENSION')
    sc_rot_dec = config.getfloat('meta', 'DECLINATION')
    sc_rot_cnca = config.getfloat('meta', 'CELESTIAL_NORTH_CLOCK_ANGLE')
    sc_igrf_q = tools.ypr_to_q(
        rads(sc_rot_dec), rads(sc_rot_ra),
        -rads(sc_rot_cnca))  # same with rosetta lbls also

    # from asteroid to spacecraft, asteroid body fixed coordinates
    # TODO: figure out why FOR SOME REASON distance is given ~30x too close
    ast_sc_dist = config.getfloat('meta', 'TARGET_CENTER_DISTANCE') * 30
    ast_sc_lat = config.getfloat('meta', 'SUB_SPACECRAFT_LATITUDE')
    ast_sc_lon = config.getfloat('meta', 'SUB_SPACECRAFT_LONGITUDE')
    ast_sc_bff_r = tools.spherical2cartesian(rads(ast_sc_lat),
                                             rads(ast_sc_lon), ast_sc_dist)

    ast_axis_img_clk_ang = config.getfloat('meta', 'BODY_POLE_CLOCK_ANGLE')
    ast_axis_img_plane_ang = config.getfloat(
        'meta', 'HAY__BODY_POLE_ASPECT_ANGLE')  # what is the use?

    # from sun to spacecraft, equatorial J2000
    ast_sun_dist = config.getfloat('meta', 'TARGET_HELIOCENTRIC_DISTANCE')
    ast_sun_lat = config.getfloat('meta', 'SUB_SOLAR_LATITUDE')
    ast_sun_lon = config.getfloat('meta', 'SUB_SOLAR_LONGITUDE')
    sun_ast_bff_r = -tools.spherical2cartesian(rads(ast_sun_lat),
                                               rads(ast_sun_lon), ast_sun_dist)
    sun_sc_bff_r = sun_ast_bff_r + ast_sc_bff_r

    ast_axis_sun_ang = config.getfloat('meta', 'HAY__BODY_POLE_SUN_ANGLE')
    a = config.getfloat('meta', 'SUB_SOLAR_AZIMUTH')  # what is this!?

    # TODO: continue here
    ast_axis_scf_q = tools.ypr_to_q(-rads(ast_sc_lat), -rads(ast_sc_lon), 0)
    # TODO: figure out: how to get roll as some ast_axis_img_clk_ang come from ast_sc_lat?
    ast_rot_scf_q = tools.ypr_to_q(0, 0, -rads(ast_axis_img_clk_ang))
    ast_scf_q = ast_axis_scf_q  #* ast_rot_scf_q

    dec = 90 - ast_sc_lat
    ra = -ast_sc_lon
    if dec > 90:
        dec = 90 + ast_sc_lat
        ra = tools.wrap_degs(ra + 180)

    print('ra: %f, dec: %f, zlra: %f' % (ra, dec, ast_axis_img_clk_ang))

    ast_igrf_q = ast_scf_q * sc_igrf_q
    sun_ast_igrf_r = tools.q_times_v(ast_igrf_q, sun_ast_bff_r)
    ast_sc_igrf_r = tools.q_times_v(ast_igrf_q, ast_sc_bff_r)
    sun_sc_igrf_r = tools.q_times_v(ast_igrf_q, sun_sc_bff_r)

    z_axis = np.array([0, 0, 1])
    x_axis = np.array([1, 0, 0])
    ast_axis_u = tools.q_times_v(ast_igrf_q, z_axis)
    ast_zlon_u = tools.q_times_v(ast_igrf_q, x_axis)
    ast_axis_dec, ast_axis_ra, _ = tools.cartesian2spherical(*ast_axis_u)
    ast_zlon_proj = tools.vector_rejection(ast_zlon_u, z_axis)
    ast_zlon_ra = tools.angle_between_v(ast_zlon_proj, x_axis)
    ast_zlon_ra *= 1 if np.cross(x_axis, ast_zlon_proj).dot(z_axis) > 0 else -1

    # frame where ast zero lat and lon point towards the sun?
    # ast_axis_ra = -ast_sun_lon
    # ast_axis_dec = 90 - ast_sun_lat
    # ast_axis_zero_lon_ra = 0

    arr2str = lambda arr: '[%s]' % ', '.join(['%f' % v for v in arr])

    print('sun_ast_bff_r: %s' % arr2str(sun_ast_bff_r * 1e3))
    print('sun_sc_bff_r: %s' % arr2str(sun_sc_bff_r * 1e3))
    print('ast_sc_bff_r: %s' % arr2str(ast_sc_bff_r * 1e3))
    # TODO: even the light is wrong, should be right based on the sun_ast and sun_sc vectors!!

    print('sun_ast_igrf_r: %s' % arr2str(sun_ast_igrf_r * 1e3))
    print('sun_sc_igrf_r: %s' % arr2str(sun_sc_igrf_r * 1e3))
    print('ast_sc_igrf_r: %s' % arr2str(ast_sc_igrf_r * 1e3))
    print('ast_axis_ra: %f' % degs(ast_axis_ra))
    print('ast_axis_dec: %f' % degs(ast_axis_dec))
    print('ast_zlon_ra: %f' % degs(ast_zlon_ra))

    aa = quaternion.as_rotation_vector(sc_igrf_q)
    angle = np.linalg.norm(aa)
    sc_angleaxis = [angle] + list(aa / angle)
    print('sc_angleaxis [rad]: %s' % arr2str(sc_angleaxis))
Beispiel #15
0
 def calc_q(r):
     lat, lon, r = tools.cartesian2spherical(*r)
     return tools.ypr_to_q(lat, lon, 0).conj()
Beispiel #16
0
 control = RenderController(outpath)
 control.create_scene('test_sc')
 control.set_scene_config({
     'debug': True,
     'flux_only': False,
     'normalize': False,
     'stars': True,
     'lens_effects': True,          # includes the sun
     'hapke_params': RenderObject.HAPKE_PARAMS,
 })
 control.create_camera('test_cam', scenes='test_sc')
 control.configure_camera('test_cam', lens=35.0, sensor=5e-3*1024)
 control.set_exposure(0.001 if target == 'sun' else 0.3 if target == 'sssb' else 50.0)
 control.set_resolution((1024, 1024))
 control.set_output_format('png', '8')
 control.set_sun_location(tools.q_times_v(tools.ypr_to_q(math.radians(-90+120), 0, 0), [1.496e11, 0, 0]))
 control.set_camera_location('test_cam')
 objfile1, url1 = os.path.join(datapath, 'ryugu+tex-d1-16k.obj'), 'https://drive.google.com/uc?authuser=0&id=1Lu48I4nnDKYtvArUN7TnfQ4b_MhSImKM&export=download'
 objfile2, url2 = os.path.join(datapath, 'ryugu+tex-d1-16k.mtl'), 'https://drive.google.com/uc?authuser=0&id=1qf0YMbx5nIceGETqhNqePyVNZAmqNyia&export=download'
 objfile3, url3 = os.path.join(datapath, 'ryugu.png'), 'https://drive.google.com/uc?authuser=0&id=19bT_Qd1MBfxM1wmnCgx6PT58ujnDFmsK&export=download'
 RenderController.download_file(url1, objfile1, maybe=True)
 RenderController.download_file(url2, objfile2, maybe=True)
 RenderController.download_file(url3, objfile3, maybe=True)
 obj = control.load_object(os.path.join(datapath, 'ryugu+tex-d1-16k.obj'), 'ryugu-16k')
 obj.location = (0, 0, 0)
 if target == 'sun':
     control.target_camera(control.SOL, "test_cam")
 elif target == 'sssb':
     control.target_camera(obj, "test_cam")
 else:
     control.set_camera_location("test_cam", None, (0, 1, 0, 0))
Beispiel #17
0
            plt.show()

        for hd in (
                48737, 35468, 39801
        ):  # Lambda Orionis (HD36861) Teff too high for model (37689K)
            fname = r'C:\projects\s100imgs\spectra\%s.fits' % hd
            fdat = fits.getdata(fname)
            teff, logg, feh = [stars[hd][f] for f in (f_teff, f_logg, f_feh)]
            if teff > 30000:
                logg = max(logg, 4.0)
            testf(fdat, teff, logg, feh or 0)

        quit()

#    cam = RosettaSystemModel(focused_attenuated=False).cam
    cam = DidymosSystemModel(use_narrow_cam=True).cam

    #    cam_q = tools.rand_q(math.radians(180))
    cam_q = quaternion.one
    for i in range(100):
        cam_q = tools.ypr_to_q(0, np.radians(1), 0) * cam_q
        flux_density = Stars.flux_density(cam_q, cam)
        img = cam.sense(flux_density, exposure=2, gain=2)

        img = np.clip(img * 255, 0, 255).astype('uint8')
        img = ImageProc.adjust_gamma(img, 1.8)

        sc = min(768 / cam.width, 768 / cam.height)
        cv2.imshow('stars', cv2.resize(img, None, fx=sc, fy=sc))
        cv2.waitKey()
    print('done')
Beispiel #18
0
def load_image_meta(src, sm):
    # params given in equatorial J2000 coordinates, details:
    # https://pds.nasa.gov/ds-view/pds/viewProfile.jsp
    #                                   ?dsid=RO-C-NAVCAM-2-ESC3-MTP021-V1.0

    with open(src, 'r') as f:
        config_data = f.read()

    config_data = '[meta]\n' + config_data
    config_data = re.sub(r'^/\*', '#', config_data, flags=re.M)
    config_data = re.sub(r'^\^', '', config_data, flags=re.M)
    config_data = re.sub(r'^(\w+):(\w+)', r'\1__\2', config_data, flags=re.M)
    config_data = re.sub(r'^END\s*$', '', config_data, flags=re.M)
    config_data = re.sub(r'^NOTE\s*=\s*"[^"]*"', '', config_data, flags=re.M)
    config_data = re.sub(r' <(deg|km)>','', config_data)

    config = ConfigParser(converters={'tuple':literal_eval})
    config.read_string(config_data)

    image_time = config.get('meta', 'IMAGE_TIME')

    # from sun to spacecraft, equatorial J2000
    sun_sc_eq_x, sun_sc_eq_y, sun_sc_eq_z = \
            -np.array(config.gettuple('meta', 'SC_SUN_POSITION_VECTOR'))
            
    if USE_ICRS:
        sun_sc_ec_p = np.array([sun_sc_eq_x, sun_sc_eq_y, sun_sc_eq_z])
    else:
        sc = SkyCoord(x=sun_sc_eq_x, y=sun_sc_eq_y, z=sun_sc_eq_z, unit='km',
                frame='icrs', representation_type='cartesian', obstime='J2000')\
                .transform_to('heliocentrictrueecliptic')\
                .represent_as('cartesian')
        sun_sc_ec_p = np.array([sc.x.value, sc.y.value, sc.z.value])
    sun_sc_dist = np.sqrt(np.sum(sun_sc_ec_p**2))
    
    # from spacecraft to asteroid, equatorial J2000
    sc_ast_x, sc_ast_y, sc_ast_z = \
            config.gettuple('meta', 'SC_TARGET_POSITION_VECTOR')

    # from asteroid to spacecraft, asteroid fixed body coordinates
    ast_sc_r = config.getfloat('meta', 'TARGET_CENTER_DISTANCE')
    ast_sc_lat = config.getfloat('meta', 'SUB_SPACECRAFT_LATITUDE')
    ast_sc_lon = config.getfloat('meta', 'SUB_SPACECRAFT_LONGITUDE')

    # spacecraft orientation, equatorial J2000
    sc_rot_ra = config.getfloat('meta', 'RIGHT_ASCENSION')
    sc_rot_dec = config.getfloat('meta', 'DECLINATION')
    sc_rot_cnca = config.getfloat('meta', 'CELESTIAL_NORTH_CLOCK_ANGLE')
    
    solar_elongation = config.getfloat('meta', 'SOLAR_ELONGATION')

    ## set time
    ##
    sm.asteroid.reset_to_defaults()
    half_range = sm.asteroid.rotation_period/2
    timestamp = Time(image_time, scale='utc', format='isot').unix
    sm.time.range = (timestamp - half_range, timestamp + half_range)
    sm.time.value = timestamp
    sm.time.real_value = timestamp

    ## set spacecraft orientation
    ##
    xc, yc, zc = 0, 0, 0
    #xc, yc, zc = -0.283, -0.127, 0     # ROS_CAM1_20150720T113057
    #xc, yc, zc = 0.2699, -0.09, 0  # ROS_CAM1_20150720T165249
    #xc, yc, zc = 0.09, -0.02, 0    # ROS_CAM1_20150720T064939

    if USE_ICRS:
        assert sc_rot_dec+xc<90 and sc_rot_dec+xc>-90, 'bad correction'
        sm.spacecraft_rot = (
            sc_rot_dec+xc,                   # axis lat
            (sc_rot_ra+yc+180)%360 - 180,    # axis lon
            (360-sc_rot_cnca+zc)%360 - 180,  # rotation
        )
    else:
        sc = SkyCoord(ra=sc_rot_ra*units.deg, dec=sc_rot_dec*units.deg,
                      frame='icrs', obstime='J2000')
        sc = sc.transform_to('barycentrictrueecliptic')
        assert sc.lat.value+xc<90 and sc.lat.value+xc>-90, 'bad correction'
        sm.spacecraft_rot = (
            sc.lat.value+xc,                 # axis lat
            (sc.lon.value+yc+180)%360 - 180, # axis lon
            (sc_rot_cnca+zc+180)%360 - 180,  # rotation
        )
        
    sm.real_spacecraft_rot = sm.spacecraft_rot

    ## set spacecraft position
    ##
    if USE_ICRS:
        sc_ast_ec_p = np.array([sc_ast_x, sc_ast_y, sc_ast_z])
    else:
        sc = SkyCoord(x=sc_ast_x, y=sc_ast_y, z=sc_ast_z, unit='km', frame='icrs',
                representation_type='cartesian', obstime='J2000')\
                .transform_to('barycentrictrueecliptic')\
                .represent_as('cartesian')
        sc_ast_ec_p = np.array([sc.x.value, sc.y.value, sc.z.value])

    sm.asteroid.real_position = sun_sc_ec_p + sc_ast_ec_p

    # s/c orientation
    sco = list(map(math.radians, sm.spacecraft_rot))
    scoq = tools.ypr_to_q(*sco)
    
    # project old position to new base vectors
    sc2gl_q = sm.frm_conv_q(sm.SPACECRAFT_FRAME, sm.OPENGL_FRAME)
    scub = tools.q_to_unitbase(scoq * sc2gl_q)
    scub_o = tools.q_to_unitbase(scoq)
    sc_ast_p = scub.dot(sc_ast_ec_p.transpose())

    sm.real_spacecraft_pos = sc_ast_p
    # if USE_IMG_LABEL_FOR_SC_POS:
    #    sm.spacecraft_pos = sc_ast_p
    ##
    ## done setting spacecraft position

    # use calculated asteroid axis as real axis
    sm.asteroid_rotation_from_model()
    sm.real_asteroid_axis = sm.asteroid_axis

    sm.asteroid.real_sc_ast_vertices = sm.sc_asteroid_vertices(real=True)

    if not np.isclose(float(Decimal(sm.time.value) - Decimal(sm.time.real_value)), 0):
        sm.time.real_value = sm.time.value
        if DEBUG:
            print('Strange Python problem where float memory values get corrupted a little in random places of code')

    if False:
        print((''
            + '\nsco:\n%s\n'
            + '\nscoq:\n%s\n'
            + '\nscub_o:\n%s\n'
            + '\nscub:\n%s\n'
            + '\nast_sc_ec_p:\n%s\n'
            + '\nast_sc_p:\n%s\n'
        ) % (
            sm.spacecraft_rot,
            scoq,
            scub,
            scub_o,
            sc_ast_ec_p,
            sc_ast_p,
        ))
    
    if DEBUG:
        lbl_sun_ast_v = (sun_sc_ec_p+sc_ast_ec_p)*1e3
        lbl_se, lbl_dir = tools.solar_elongation(lbl_sun_ast_v, scoq)
        
        m_elong, m_dir = sm.solar_elongation()
        mastp = sm.asteroid.position(sm.time.value)
        print((
            'solar elongation (deg), file: %.1f (%.1f), model: %.1f\n'
            + 'light direction (deg), file: %s, model: %s\n'
            + 'sun-asteroid loc (Gm), file: %s, model: %s\n'
            ) % (
            solar_elongation, math.degrees(lbl_se), math.degrees(m_elong),
            math.degrees(lbl_dir), math.degrees(m_dir),
            lbl_sun_ast_v*1e-9, (mastp)*1e-9,
        ))
        
        sm.save_state('none',printout=True)
    #quit()

    return locals()
Beispiel #19
0
 control.set_scene_config({
     'debug': True,
     'flux_only': False,
     'normalize': False,
     'stars': True,
     'lens_effects': True,  # includes the sun
     'brdf_params': RenderObject.HAPKE_PARAMS,
 })
 control.create_camera('test_cam', scenes='test_sc')
 control.configure_camera('test_cam', lens=35.0, sensor=5e-3 * 1024)
 control.set_exposure(0.001 if target == 'sun' else 0.3 if target ==
                      'sssb' else 50.0)
 control.set_resolution((1024, 1024))
 control.set_output_format('png', '8')
 control.set_sun_location(
     tools.q_times_v(tools.ypr_to_q(math.radians(-90 + 120), 0, 0),
                     [1.496e11, 0, 0]))
 control.set_camera_location('test_cam')
 objfile1, url1 = os.path.join(
     datapath, 'ryugu+tex-d1-16k.obj'
 ), 'https://drive.google.com/uc?authuser=0&id=1Lu48I4nnDKYtvArUN7TnfQ4b_MhSImKM&export=download'
 objfile2, url2 = os.path.join(
     datapath, 'ryugu+tex-d1-16k.mtl'
 ), 'https://drive.google.com/uc?authuser=0&id=1qf0YMbx5nIceGETqhNqePyVNZAmqNyia&export=download'
 objfile3, url3 = os.path.join(
     datapath, 'ryugu.png'
 ), 'https://drive.google.com/uc?authuser=0&id=19bT_Qd1MBfxM1wmnCgx6PT58ujnDFmsK&export=download'
 RenderController.download_file(url1, objfile1, maybe=True)
 RenderController.download_file(url2, objfile2, maybe=True)
 RenderController.download_file(url3, objfile3, maybe=True)
 obj = control.load_object(os.path.join(datapath, 'ryugu+tex-d1-16k.obj'),
Beispiel #20
0
def render_itokawa(show=False):
    cam = Camera(4096,
                 4096,
                 aperture=1.5,
                 focal_length=120.8,
                 sensor_size=[12.288] * 2,
                 px_saturation_e=30e3)
    shape_model_file = [
        r'C:\projects\sispo\data\models\itokawa_16k.obj',
        r'C:\projects\sispo\data\models\itokawa_f3145728.obj',
    ][0]

    RenderEngine.REFLMOD_PARAMS[RenderEngine.REFLMOD_HAPKE] = [
        553.38,  # J         0
        26,  # th_p      26
        0.31,  # w         0.31
        -0.35,  # b         -0.35
        0,  # c         0
        0.86,  # B_SH0     0.86
        0.00021,  # hs        0.00021
        0,  # B_CB0     0
        0.005,  # hc        0.005
        1,  # K         1
    ]

    re = RenderEngine(cam.width, cam.height,
                      antialias_samples=0)  #, enable_extra_data=True)
    re.set_frustum(cam.x_fov, cam.y_fov, 0.05, 12)
    obj_idx = re.load_object(shape_model_file)

    if 1:
        g_sc_q = tools.angleaxis_to_q(
            [1.847799, -0.929873, 0.266931, -0.253146])
        #x, y, z = -13.182141, -64.694813, 116.263134
        x, y, z = 93.818, -8.695, 1.263
        g_ast_q = get_ast_q(x, y, z)
        g_sol_ast_v = np.array([146226194732.0, -68812326932.0, -477863381.0
                                ]) * 1e-3
        g_sol_sc_v = np.array([146226188132.0, -68812322442.0, -477863711.0
                               ]) * 1e-3
        g_sc_ast_v = g_sol_ast_v - g_sol_sc_v
        l_ast_sc_v = tools.q_times_v(
            SystemModel.sc2gl_q.conj() * g_sc_q.conj(), g_sc_ast_v)
        l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
        ) * g_ast_q * SystemModel.sc2gl_q
        l_light_v = tools.q_times_v(SystemModel.sc2gl_q.conj() * g_sc_q.conj(),
                                    g_sol_ast_v / np.linalg.norm(g_sol_ast_v))
    else:
        l_ast_sc_v = np.array([0, 0, -7.990 * 1])
        l_ast_q = tools.angleaxis_to_q((math.radians(20), 0, 1, 0))
        l_light_v = np.array([1, 0, -1]) / math.sqrt(2)

    sc_mode = 0
    a, b, c = [0] * 3
    ds, dq = 0.135, tools.ypr_to_q(math.radians(1.154), 0,
                                   math.radians(-5.643))
    #    ds, dq = 0.535, quaternion.one     # itokawa centered

    while True:
        img = re.render(obj_idx,
                        tools.q_times_v(dq, l_ast_sc_v * ds),
                        l_ast_q,
                        l_light_v,
                        flux_density=1.0,
                        gamma=1.0,
                        get_depth=False,
                        shadows=True,
                        textures=True,
                        reflection=RenderEngine.REFLMOD_HAPKE)

        #        data = re.render_extra_data(obj_idx, tools.q_times_v(dq, l_ast_sc_v*ds), l_ast_q, l_light_v)
        #        import matplotlib.pyplot as plt
        #        plt.imshow(data[:, :, 0])

        k = output(img, show, maxval=0.90)

        if k is None or k == 27:
            break

        tmp = 1 if k in (ord('a'), ord('s'), ord('q')) else -1
        if k in (ord('a'), ord('d')):
            if sc_mode:
                dq = tools.ypr_to_q(math.radians(tmp * 0.033), 0, 0) * dq
            else:
                b += tmp
        if k in (ord('w'), ord('s')):
            if sc_mode:
                dq = tools.ypr_to_q(0, 0, math.radians(tmp * 0.033)) * dq
            else:
                a += tmp
        if k in (ord('q'), ord('e')):
            if sc_mode:
                ds *= 0.9**tmp
            else:
                c += tmp
        if k == ord('i'):
            y, p, r = tools.q_to_ypr(dq)
            print('+c: %.3f, %.3f, %.3f' % (x + a, y + b, z + c))
            print('ds, h, v: %.3f, %.3f, %.3f' %
                  (ds, math.degrees(y), math.degrees(r)))

        g_ast_q = get_ast_q(x + a, y + b, z + c)
        l_ast_q = SystemModel.sc2gl_q.conj() * g_sc_q.conj(
        ) * g_ast_q * SystemModel.sc2gl_q
Beispiel #21
0
def main():
    # parse arguments
    parser = argparse.ArgumentParser(
        description='Run visual odometry on a set of images')
    parser.add_argument('--data', '-d', metavar='DATA', help='path to data')
    parser.add_argument('--meta',
                        '-t',
                        metavar='META',
                        help='path to meta data')
    parser.add_argument('--video-toff',
                        '--dt',
                        type=float,
                        metavar='dT',
                        help='video time offset compared to metadata')
    parser.add_argument('--alt-scale',
                        '--as',
                        type=float,
                        default=1.0,
                        metavar='sc',
                        help='scale telemetry altitude with this value')
    parser.add_argument('--res',
                        '-r',
                        metavar='OUT',
                        help='path to the result pickle')
    parser.add_argument('--debug-out',
                        '-o',
                        metavar='OUT',
                        help='path to the debug output folder')
    parser.add_argument('--kapture',
                        metavar='OUT',
                        help='path to kapture-format export folder')
    parser.add_argument(
        '--verbosity',
        '-v',
        type=int,
        default=2,
        help=
        'verbosity level (0-4, 0-1: text only, 2:+debug imgs, 3: +keypoints, 4: +poses)'
    )
    parser.add_argument(
        '--high-quality',
        action='store_true',
        help='high quality settings with more keypoints detected')

    parser.add_argument('--mission',
                        '-mr',
                        choices=('hwproto', 'nokia', 'toynokia'),
                        help='select mission')
    parser.add_argument('--undist-img',
                        action='store_true',
                        help='undistort image instead of keypoints')
    parser.add_argument('--use-gimbal',
                        action='store_true',
                        help='gimbal data is ok, use it')
    parser.add_argument(
        '--drifting-gimbal',
        action='store_true',
        help=
        'gimbal orientation measure drifts, update orientation offset with each odometry result'
    )
    parser.add_argument('--nadir-looking',
                        action='store_true',
                        help='downwards looking cam')

    parser.add_argument(
        '--ori-off-ypr',
        type=float,
        nargs=3,
        help=
        'orientation offset in ypr (deg) to be applied in body frame to orientation measurements'
    )

    parser.add_argument('--cam-dist',
                        type=float,
                        nargs='*',
                        help='cam distortion coeffs')
    parser.add_argument('--cam-fl-x', type=float, help='cam focal length x')
    parser.add_argument('--cam-fl-y', type=float, help='cam focal length y')
    parser.add_argument('--cam-pp-x', type=float, help='cam principal point x')
    parser.add_argument('--cam-pp-y', type=float, help='cam principal point y')

    parser.add_argument('--first-frame',
                        '-f',
                        type=int,
                        default=0,
                        help='first frame (default: 0; -1: hardcoded value)')
    parser.add_argument('--last-frame',
                        '-l',
                        type=int,
                        help='last frame (default: None; -1: hardcoded end)')
    parser.add_argument('--skip',
                        '-s',
                        type=int,
                        default=1,
                        help='use only every xth frame (default: 1)')
    parser.add_argument('--plot-only',
                        action='store_true',
                        help='only plot the result')
    args = parser.parse_args()

    if args.verbosity > 1:
        import matplotlib.pyplot as plt
        from matplotlib import cm
        from mpl_toolkits.mplot3d import Axes3D

    # init odometry and data
    if args.mission == 'hwproto':
        mission = HardwarePrototype(args.data, last_frame=(155, 321, None)[0])
    elif args.mission == 'nokia':
        ff = (100, 250, 520, 750, 1250, 1500,
              1750)[0] if args.first_frame == -1 else args.first_frame
        lf = (240, 400, 640, 1000, 1500, 1750,
              2000)[1] if args.last_frame == -1 else args.last_frame

        cam_mx = None
        if args.cam_fl_x:
            fl_y = args.cam_fl_y or args.cam_fl_x
            pp_x = args.cam_pp_x or NokiaSensor.CAM_WIDTH / 2
            pp_y = args.cam_pp_y or NokiaSensor.CAM_HEIGHT / 2
            cam_mx = [[args.cam_fl_x, 0., pp_x], [0., fl_y, pp_y],
                      [0., 0., 1.]]
        mission = NokiaSensor(
            args.data,
            data_path=args.meta,
            video_toff=args.video_toff,
            use_gimbal=args.use_gimbal,
            undist_img=args.undist_img,
            cam_mx=cam_mx,
            cam_dist=args.cam_dist,
            verbosity=args.verbosity,
            high_quality=args.high_quality,
            alt_scale=args.alt_scale,
            ori_off_q=tools.ypr_to_q(*map(math.radians, args.ori_off_ypr))
            if args.ori_off_ypr else None,
            first_frame=ff,
            last_frame=lf)

    elif args.mission == 'toynokia':
        mission = ToyNokiaSensor(args.data,
                                 data_path=args.meta,
                                 video_toff=args.video_toff,
                                 first_frame=(100, 350, 850, 1650)[1],
                                 last_frame=(415, 500, 1250, 1850, 2000)[2])
    else:
        assert False, 'bad mission given: %s' % args.mission

    if args.debug_out:
        mission.odo._track_save_path = args.debug_out

    if args.nadir_looking:
        mission.odo._nadir_looking = True

    res_file = args.res or ('%s-result.pickle' % args.mission)
    if args.plot_only:
        plot_results(file=res_file, nadir_looking=args.nadir_looking)
        replay_keyframes(mission.odo.cam, file=res_file)
        return

    # run odometry
    prior = Pose(np.array([0, 0, 0]), quaternion.one)
    frame_names0 = []
    meta_names0 = []
    results0 = []
    ground_truth0 = []
    ori_offs = []
    ba_errs = []
    kfid2img = {}
    started = datetime.now()
    ax = None

    def ba_err_logger(frame_id, per_frame_ba_errs):
        per_frame_ba_errs = np.stack(
            (per_frame_ba_errs[:, 0],
             np.linalg.norm(per_frame_ba_errs[:, 1:4], axis=1),
             np.linalg.norm(per_frame_ba_errs[:, 4:7], axis=1) / np.pi * 180),
            axis=1)
        ba_errs.append([frame_id, *np.nanmean(per_frame_ba_errs, axis=0)])

    mission.odo.ba_err_logger = ba_err_logger
    vid_id = re.search(r'(^|\\|/)([\w-]+)\.mp4$', mission.video_path)

    for i, (img, t, name, meta, meta_name, gt) in enumerate(mission.data):
        if i % args.skip != 0:
            continue

        logger.info('')
        logger.info(name)
        frame_names0.append(name)
        meta_names0.append(meta_name)
        ground_truth0.append(gt)

        if vid_id[2] == 'HD_CAM-1__514659341_03_12_2020_16_12_44' and t > 1050:
            # hardcoding just to get dataset 10 to work
            mission.odo.ori_off_q = tools.ypr_to_q(math.radians(0), 0, 0)

        try:
            nf, *_ = mission.odo.process(img,
                                         datetime.fromtimestamp(mission.time0 +
                                                                t),
                                         measure=meta)

            if nf is not None and nf.id is not None:
                kfid2img[nf.id] = i

                if args.drifting_gimbal and nf.pose.post and meta:
                    est_w2c_bf_q = (-nf.pose.post).to_global(mission.b2c).quat
                    meas_w2c_bf_q = (-nf.pose.prior).to_global(
                        mission.b2c).quat * mission.odo.ori_off_q.conj()
                    new_ori_off_q = meas_w2c_bf_q.conj() * est_w2c_bf_q
                    filtered_ori_off_q = tools.mean_q(
                        [mission.odo.ori_off_q, new_ori_off_q], [0.8, 0.2])
                    if 0:
                        # TODO: debug, something wrong here as roll and pitch explode
                        mission.odo.ori_off_q = filtered_ori_off_q
                    else:
                        y, p, r = tools.q_to_ypr(filtered_ori_off_q)
                        mission.odo.ori_off_q = tools.ypr_to_q(y, 0, 0)
                    ori_offs.append(mission.odo.ori_off_q)
                    logger.info('new gimbal offset: %s' % (list(
                        map(lambda x: round(math.degrees(x), 2),
                            tools.q_to_ypr(mission.odo.ori_off_q))), ))

                pts3d = np.array([
                    pt.pt3d for pt in mission.odo.state.map3d.values()
                    if pt.active
                ])
                if len(pts3d) > 0 and nf.pose.post:
                    ground_alt = np.quantile(
                        -pts3d[:, 1], 0.2)  # neg-y is altitude in cam frame
                    drone_alt = -(-nf.pose.post).loc[1]
                    expected_dist = drone_alt - mission.coord0[2]
                    modeled_dist = drone_alt - ground_alt
                    logger.info(
                        'ground at %.1f mr (%.1f mr), drone alt %.1f mr (%.1f mr)'
                        % (ground_alt, mission.coord0[2], modeled_dist,
                           expected_dist))

                if args.verbosity > 3:
                    keyframes = mission.odo.all_keyframes()
                    post = np.zeros((len(keyframes), 7))
                    k, prior = 0, np.zeros((len(keyframes), 7))
                    for j, kf in enumerate(
                        [kf for kf in keyframes if kf.pose.post]):
                        post[j, :] = (-kf.pose.post).to_global(
                            NokiaSensor.b2c).to_global(
                                NokiaSensor.w2b).to_array()
                        if kf.measure is not None:
                            prior[k, :] = (-kf.pose.prior).to_global(
                                NokiaSensor.b2c).to_global(
                                    NokiaSensor.w2b).to_array()
                            k += 1
                    if ax is not None:
                        ax.clear()
                    ax = tools.plot_poses(post,
                                          axis=(0, 1, 0),
                                          up=(0, 0, 1),
                                          ax=ax,
                                          wait=False)
                    tools.plot_poses(prior[:k, :],
                                     axis=(0, 1, 0),
                                     up=(0, 0, 1),
                                     ax=ax,
                                     wait=False,
                                     colors=map(lambda c: (c, 0, 0, 0.5),
                                                np.linspace(.3, 1.0, k)))
                    plt.pause(0.05)

        except TypeError as e:
            if 0:
                nf, *_ = mission.odo.process(
                    img, datetime.fromtimestamp(mission.time0 + t), prior,
                    quaternion.one)
                if nf and nf.pose.post:
                    prior = nf.pose.post
            else:
                raise e

        results0.append(None if nf is None or nf.pose is None
                        or nf.pose.post is None else (
                            nf.pose, getattr(nf, 'measure', None), nf.time,
                            nf.id))

    try:
        mission.odo.flush_state()  # flush all keyframes and keypoints
        map3d = mission.odo.removed_keypoints
        #        results = [(kf.pose, kf.measure, kf.time, kf.id) for kf in mission.odo.removed_keyframes if kf.pose.post]
        keyframes = [
            dict(pose=kf.pose,
                 meas=kf.measure,
                 time=kf.time,
                 id=kf.id,
                 kps_uv=kf.kps_uv) for kf in mission.odo.removed_keyframes
            if kf.pose.post
        ]
        frame_names = [frame_names0[kfid2img[kf['id']]] for kf in keyframes]
        meta_names = [meta_names0[kfid2img[kf['id']]] for kf in keyframes]
        ground_truth = [ground_truth0[kfid2img[kf['id']]] for kf in keyframes]
        ba_errs = np.array(ba_errs)

        if 1:
            pts3d = np.array([pt.pt3d for pt in map3d])
            ground_alt = np.quantile(-pts3d[:, 1],
                                     0.5)  # neg-y is altitude in cam frame
            drone_alt = -(-keyframes[-1]['pose'].post).loc[1]  # word
            expected_dist = drone_alt - mission.coord0[2]
            modeled_dist = drone_alt - ground_alt
            logger.info(
                'ground at %.1f m (%.1f m), drone alt %.1f m (%.1f m)' %
                (ground_alt, mission.coord0[2], modeled_dist, expected_dist))

        if args.kapture:
            kapture = KaptureIO(args.kapture,
                                reset=True,
                                jpg_qlt=95,
                                scale=0.5)
            kapture.set_camera(1, 'cam', mission.cam)
            kapture.add_frames(mission.odo.removed_keyframes, map3d)
            kapture.write_to_dir()
    except AttributeError as e:
        if 0:
            map3d = None
            results = [(kf.pose, None, kf.time, kf.id)
                       for kf in mission.odo.all_keyframes()]
        else:
            raise e

    mission.odo.quit()
    if 0:
        # show results as given by the online version of the algorithm
        results, frame_names, meta_names, ground_truth = results0, frame_names0, meta_names0, ground_truth0

    logger.info('time spent: %.0fs' %
                (datetime.now() - started).total_seconds())

    repr_errs = np.concatenate([
        list(kf.repr_err.values()) for kf in mission.odo.removed_keyframes
        if len(kf.repr_err)
    ])
    err_q95 = np.quantile(np.linalg.norm(repr_errs, axis=1),
                          0.95) if len(repr_errs) else np.nan
    logger.info('95%% percentile repr err: %.3fpx' % (err_q95, ))

    interp = interp_loc(mission.odo.removed_keyframes, mission.time0)
    loc_est = np.array([
        np.ones((3, )) * np.nan if kf.pose.post is None else
        (-kf.pose.post).loc for kf in mission.odo.removed_keyframes
    ])
    loc_gps = np.array([
        interp(kf.time.timestamp() - mission.time0).flatten()
        for kf in mission.odo.removed_keyframes
    ]).squeeze()
    mean_loc_err = np.nanmean(np.linalg.norm(loc_est - loc_gps, axis=1))
    logger.info('mean loc err: %.3fm' % (mean_loc_err, ))
    logger.info('mean ba errs (repr, loc, ori): %.3fpx %.3fm %.3fdeg' %
                (*np.nanmean(ba_errs[:, 1:], axis=0), ))
    logger.info('latest ba errs (repr, loc, ori): %.3fpx %.3fm %.3fdeg' %
                (*ba_errs[-1, 1:], ))

    if 0:
        plt.figure(10)
        plt.plot(loc_est[:, 0], loc_est[:, 1])
        plt.plot(loc_gps[:, 0], loc_gps[:, 1])
        plt.show()

    if args.verbosity > 3:
        plt.show()  # stop to show last trajectory plot

    res_folder = os.path.dirname(res_file)
    if res_folder:
        os.makedirs(res_folder, exist_ok=True)
    with open(res_file, 'wb') as fh:
        pickle.dump(
            (keyframes, map3d, frame_names, meta_names, ground_truth, ba_errs),
            fh)

    if args.verbosity > 1:
        plot_results(keyframes,
                     map3d,
                     frame_names,
                     meta_names,
                     ground_truth,
                     ba_errs,
                     res_file,
                     nadir_looking=args.nadir_looking)
Beispiel #22
0
    def _update_ori(self, db_stars, cols, stars, tol=2e-4):
        """ fine tune orientation based on matches """
        if not MANUAL_ATTITUDE or not self.debug:
            K = self.cam[0].intrinsic_camera_mx(legacy=False)
            iK = self.cam[0].inv_intrinsic_camera_mx(legacy=False)
            I = np.where(np.array([s is not None for s in db_stars]))[0]
            db_pts = np.array([
                (db_stars[i][cols['ix']], db_stars[i][cols['iy']]) for i in I
            ])
            img_pts = np.array([(stars[i]['x'], stars[i]['y']) for i in I])

            # def solve(Y, X):
            #     L = np.linalg.cholesky(X.T.dot(X) + 1e-8*np.diag(np.ones(3)))
            #     iL = np.linalg.inv(L)
            #     A = np.dot(iL.T, iL.dot(X.T.dot(Y))).T
            #     dx, dy = A[:, 2]
            #     th0 = math.acos(np.clip((A[0, 0]+A[1, 1])/2, -1, 1))
            #     th1 = math.asin(np.clip((A[1, 0]-A[0, 1])/2, -1, 1))
            #     return dx, dy, (th0+th1)/2
            #            dx, dy, th = solve(img_pts, np.hstack((db_pts, np.ones((len(db_pts), 1)))))

            def cost_fn(x, Y, X, K, iK):
                dx, dy, th, *dist_coefs = x
                A = np.array([
                    [math.cos(th), -math.sin(th), dx],
                    [math.sin(th), math.cos(th), dy],
                ])
                Xdot = Camera.distort(X.dot(A.T), dist_coefs, K, iK)
                return tools.pseudo_huber_loss(np.linalg.norm(Y - Xdot,
                                                              axis=1),
                                               delta=3)

            dn = [0, 4, 5, 8,
                  12][0]  # select the level of detail of the distortion model
            dist_coefs = np.zeros(dn) if self.cam[
                0].dist_coefs is None else self.cam[0].dist_coefs[:dn]
            dist_coefs = np.pad(dist_coefs, (0, dn - len(dist_coefs)),
                                'constant')

            x0 = (0, 0, 0, *dist_coefs)
            x, _ = leastsq(cost_fn,
                           x0,
                           args=(img_pts,
                                 np.hstack((db_pts, np.ones(
                                     (len(db_pts), 1)))), K, iK))
            dx, dy, th, *dist_coefs = x

            for c in self.cam:
                c.dist_coefs = dist_coefs
            delta = (dy * math.radians(c.y_fov) / c.height,
                     dx * math.radians(c.x_fov) / c.width, -th)
            #cv2.waitKey()

        else:
            ctrl = {
                27: [0, 0, 0],  # esc
                115: [1, 0, 0],  # s
                119: [-1, 0, 0],  # w
                100: [0, 1, 0],  # d
                97: [0, -1, 0],  # a
                113: [0, 0, 1],  # q
                101: [0, 0, -1],  # e
            }
            for i in range(10):
                k = cv2.waitKey()
                if k in ctrl:
                    break
            delta = math.radians(0.25) * np.array(ctrl[k])

        self.q = self.q * tools.ypr_to_q(*delta)
        return np.max(np.abs(delta)) < tol