コード例 #1
0
ファイル: explorer.py プロジェクト: oknuutti/visnav-py
    def renderParams(self):
        m = self.systemModel

        # NOTE: with wide angle camera, would need to take into account
        #       im_xoff, im_yoff, im_width and im_height
        xc_off = (self.im_xoff + self.im_width / 2 - m.cam.width / 2)
        xc_angle = xc_off / m.cam.width * math.radians(m.cam.x_fov)

        yc_off = (self.im_yoff + self.im_height / 2 - m.cam.height / 2)
        yc_angle = yc_off / m.cam.height * math.radians(m.cam.y_fov)

        # first rotate around x-axis, then y-axis,
        # note that diff angle in image y direction corresponds to rotation
        # around x-axis and vise versa
        q_crop = (np.quaternion(math.cos(-yc_angle / 2), math.sin(
            -yc_angle / 2), 0, 0) * np.quaternion(math.cos(-xc_angle / 2), 0,
                                                  math.sin(-xc_angle / 2), 0))

        x = m.x_off.value
        y = m.y_off.value
        z = m.z_off.value

        # rotate offsets using q_crop
        x, y, z = tools.q_times_v(q_crop.conj(), np.array([x, y, z]))

        # maybe put object in center of view
        if self._center_model:
            x, y = 0, 0

        # get object rotation and turn it a bit based on cropping effect
        q, err_q = m.gl_sc_asteroid_rel_q(self._discretize_tol)
        if self._discretize_tol:
            self.latest_discretization_err_q = err_q

        qfin = (q * q_crop.conj())
        rv = tools.q_to_angleaxis(qfin)

        # light direction
        light, _ = m.gl_light_rel_dir(err_q)

        res = (light, (x, y, z), (math.degrees(rv[0]), ) + tuple(rv[1:]))
        return res
コード例 #2
0
    def get_pose(self, coord0, mes0, mes1, weight, full_rot):
        ypr0 = np.flip(mes0[3:6]) if full_rot else [mes0[5], 0, 0]
        ypr1 = np.flip(mes1[3:6]) if full_rot else [mes1[5], 0, 0]

        cf_w2c0 = tools.lla_ypr_to_loc_quat(coord0,
                                            mes0[:3],
                                            ypr0,
                                            b2c=self.b2c)
        cf_w2c1 = tools.lla_ypr_to_loc_quat(coord0,
                                            mes1[:3],
                                            ypr1,
                                            b2c=self.b2c)
        delta = cf_w2c1 - cf_w2c0
        aa = tools.q_to_angleaxis(delta.quat)
        aa[0] = tools.wrap_rads(aa[0]) * weight

        cf_w2c = Pose(cf_w2c0.loc * (1 - weight) + cf_w2c1.loc * weight,
                      tools.angleaxis_to_q(aa) * cf_w2c0.quat)
        cf_c2w = -cf_w2c

        return cf_c2w
コード例 #3
0
ファイル: flight_calib.py プロジェクト: oknuutti/hw_visnav
def get_ba_params(path, ff, lf, results, kapt, sensor_id):
    frames = [(id, fname[sensor_id])
              for id, fname in kapt.records_camera.items()
              if id >= ff and (lf is None or id < lf)]
    fname2id = {fname: id for id, fname in frames}

    poses = np.array([[
        *tools.q_to_angleaxis(kapt.trajectories[id][sensor_id].r, True),
        *kapt.trajectories[id][sensor_id].t
    ] for id, fname in frames]).astype(float)

    pts3d = kapt.points3d[:, :3]
    if SET_3D_POINT_ALT:
        pts3d[:, 1] = -TAKEOFF_LAWN_ALT

    feat = kapt.keypoints[FEATURE_NAME]
    uv_map = {}
    for id_f, fname in frames:
        uvs = image_keypoints_from_file(
            os.path.join(path, 'kapture', 'reconstruction', 'keypoints',
                         FEATURE_NAME, fname + '.kpt'), feat.dtype, feat.dsize)
        uv_map[id_f] = uvs

    f_uv = {}
    for id3, r in kapt.observations.items():
        for fname, id2 in r[FEATURE_NAME]:
            if fname in fname2id:
                id_f = fname2id[fname]
                f_uv.setdefault(id_f, {})[id3] = uv_map[id_f][id2, :]

    # f_uv = {id_f: {id3: uv_map[id_f][id2, :]
    #                     for id3 in range(len(pts3d))
    #                         for id2 in range(len(uv_map[id_f]))
    #                             if (fname, id2) in kapt.observations.get(id3, {}).get(VO_FEATURE_NAME, {})}
    #         for id_f, fname in frames}

    obs_kp = list(set.union(*[set(m.keys()) for m in f_uv.values()]))

    cam_idxs, pt3d_idxs, pts2d = list(
        map(
            np.array,
            zip(*[(i, id3, uv.flatten())
                  for i, (id_f, kps_uv) in enumerate(f_uv.items())
                  for id3, uv in kps_uv.items()])))

    meas_idxs = np.array([
        i for i, r in enumerate(results)
        if r['meas'] is not None and i < len(poses)
    ],
                         dtype=int)
    meas_q = {i: results[i]['pose'].prior.quat.conj() for i in meas_idxs}
    meas_r = np.array([
        tools.q_times_v(meas_q[i], -results[i]['pose'].prior.loc)
        for i in meas_idxs
    ],
                      dtype=np.float32)
    meas_aa = np.array(
        [tools.q_to_angleaxis(meas_q[i], compact=True) for i in meas_idxs],
        dtype=np.float32)

    return poses, pts3d, pts2d, cam_idxs, pt3d_idxs, meas_r, meas_aa, meas_idxs, obs_kp
コード例 #4
0
ファイル: renderer.py プロジェクト: oknuutti/synthspace
 def rotation_angleaxis(self):                       # better name as angle given first, then axis
     return tuple(tools.q_to_angleaxis(self.q)) if self.q else None
コード例 #5
0
ファイル: renderer.py プロジェクト: oknuutti/synthspace
 def rotation_axis_angle(self):
     return tuple(tools.q_to_angleaxis(self.q)) if self.q else None
コード例 #6
0
    def _bundle_adjustment(self, keyframes=None, current_only=False, same_thread=False):
        logger.info('starting bundle adjustment')
        skip_meas = False

        with self._new_frame_lock:
            if keyframes is None:
                if current_only:
                    keyframes = self.state.keyframes[-1:]
                else:
                    keyframes = self.state.keyframes

        # possibly do camera calibration before bundle adjustment
        if not self.cam_calibrated and len(keyframes) >= self.ba_interval:  #  self.ba_interval, self.max_keyframes
            # experimental, doesnt work
            self.calibrate_cam(keyframes)
            if 0:
                # disable for continuous calibration
                self.cam_calibrated = True

        # TODO:
        #     - keyframe pruning from the middle like in orb-slam
        #     - using a changing subset of keypoints like in HybVIO ()
        #     - supply initial points to LK-feature tracker based on predicted state and existing 3d points
        #     - inverse distance formulation, project covariance also (see vins)
        #     ?- speed and angular speed priors that limit them to reasonably low values
        #     - when get basic stuff working:
        #           - try ransac again with loose repr err param values
        #           - try time-diff optimization (feedback for frame-skipping at nokia.py)

        with self._new_frame_lock:
            dist_coefs = None
            if self.ba_dist_coef and not current_only and len(keyframes) >= self.max_keyframes:
                assert np.where(np.array(self.cam.dist_coefs) != 0)[0][-1] + 1 <= 2, 'too many distortion coefficients'
                dist_coefs = self.cam.dist_coefs[:2]

            keyframes, ids, poses_mx, pts3d, pts2d, v_pts2d, px_err_sd, cam_idxs, pt3d_idxs = \
                    self._get_visual_ba_args(keyframes, current_only, distorted=dist_coefs is not None)
            skip_pose_n = (1 if skip_meas else 0) if not current_only else 0

            if not skip_meas:
                meas_idxs = np.array([i for i, kf in enumerate(keyframes) if kf.measure is not None], dtype=int)
                meas_q = {i: keyframes[i].pose.prior.quat.conj() for i in meas_idxs}
                meas_r = np.array([tools.q_times_v(meas_q[i], -keyframes[i].pose.prior.loc) for i in meas_idxs])
                meas_aa = np.array([tools.q_to_angleaxis(meas_q[i], compact=True) for i in meas_idxs])
                t_off = np.array([keyframes[i].measure.time_off + keyframes[i].measure.time_adj for i in meas_idxs]).reshape((-1, 1))
                if 1:  # use velocity measure instead of pixel vel
                    v_pts2d = np.array([tools.q_times_v(meas_q[i], -keyframes[i].pose.prior.vel) for i in meas_idxs])
            else:
                meas_idxs, meas_r, meas_aa, t_off = [np.empty(0)] * 4

            if not current_only:
                rem_kf_ids = self.prune_keyframes()
                rem_kp_ids = self.prune_map3d(rem_kf_ids)
                logger.info('pruning %d keyframes and %d keypoints' % (len(rem_kf_ids), len(rem_kp_ids)))

        meas_idxs = meas_idxs.astype(int)
        meas_r = meas_r.reshape((-1, 3))
        meas_aa = meas_aa.reshape((-1, 3))

        args = (poses_mx, pts3d, pts2d, v_pts2d, cam_idxs, pt3d_idxs, self.cam.cam_mx, dist_coefs, px_err_sd, meas_r,
                meas_aa, t_off, meas_idxs, self.loc_err_sd, self.ori_err_sd)
        kwargs = dict(max_nfev=self.max_ba_fun_eval, skip_pose_n=skip_pose_n, huber_coef=(1, 5, 0.5),
                      poses_only=current_only, weighted_residuals=True)

        if self.ba_n_cam_intr and not current_only and len(keyframes) >= self.max_keyframes:
            kwargs['n_cam_intr'] = self.ba_n_cam_intr

        if self.enable_marginalization and not current_only:
            kf_ids = [kf.id for kf in keyframes]
            kwargs.update(dict(
                prior_k=self.state.ba_prior[0],
                prior_x=self.state.ba_prior[1],
                prior_r=self.state.ba_prior[2],
                prior_J=self.state.ba_prior[3],
                prior_cam_idxs=np.where(np.in1d(self.state.ba_prior_fids, kf_ids))[0],
                marginalize_pose_idxs=np.where(np.in1d(kf_ids, rem_kf_ids))[0],
                marginalize_pt3d_idxs=np.where(np.in1d(ids, rem_kp_ids))[0],
            ))

        poses_ba, pts3d_ba, dist_ba, cam_intr, t_off, new_prior, errs = \
            self._call_ba(vis_gps_bundle_adj, args, kwargs, parallize=not same_thread)

        if self.enable_marginalization and not current_only:
            self.state.ba_prior = new_prior
            self.state.ba_prior_fids = kf_ids

        if dist_ba is not None:
            print('\nDIST: %s\n' % dist_ba)
        if cam_intr is not None:
            print('\nCAM INTR: %s\n' % cam_intr)

        if 0 and not current_only:
            # show result
            from visnav.postproc import replay
            replay([kf.image for kf in keyframes], pts2d, self.cam, poses_ba, cam_idxs, pts3d_ba, pt3d_idxs,
                   frame_ids=[kf.id for kf in keyframes])

        with self._new_frame_lock:
            for i, dt in zip(meas_idxs, t_off):
                keyframes[i].measure.time_adj = dt - keyframes[i].measure.time_off

            self._update_poses(keyframes, ids, poses_ba, pts3d_ba, dist_ba, cam_intr, skip_pose_n=skip_pose_n, pop_ba_queue=not same_thread)

            if not current_only:
                self.del_keyframes(rem_kf_ids)
                self.del_keypoints(rem_kp_ids, kf_lim=None if self.enable_marginalization else self.min_retain_obs)

        if 0 and not current_only:
            # show result
            from visnav.run import replay_keyframes
            replay_keyframes(self.cam, self.all_keyframes(), self.all_pts3d())

        if self.ba_err_logger is not None and not current_only:
            self.ba_err_logger(keyframes[-1].id, errs)