示例#1
0
def rotation_between(v1, v2):
    """Returns a rotation matrix that rotates v2 around the z-axis to match v1."""
    angle1 = np.arctan2(v1[1], v1[0])
    angle2 = np.arctan2(v2[1], v2[0])
    angle = angle1 - angle2
    rot = quaternion.as_rotation_matrix(
        quaternion.from_rotation_vector(np.array([0.0, 0.0, angle])))
    return rot
示例#2
0
    def rotate_sensors_wrt_to_current_sensor_pose_around_Y(
            self, direction='ccw', my_angle=np.pi / 20):
        """
        :param direction: 'cw' or 'ccw'
        :param my_angle: The angle by which to rotate sensors from its current position - y axis
        :return:
        """

        if direction == 'ccw':
            rot_quat = quaternion.from_rotation_vector([0.0, my_angle, 0.0])
        elif direction == 'cw':
            rot_quat = quaternion.from_rotation_vector([0.0, -my_angle, 0.0])
        else:
            return
        self.rotate_sensors_wrt_to_current_sensor_pose(
            [rot_quat, rot_quat, rot_quat])
        return
示例#3
0
    def update(self, delta_time):
        """Animates the cube, accelerating it with gravity and rotating it."""
        self.velocity += GRAVITY * delta_time
        self.position.y += self.velocity * delta_time

        q = from_rotation_vector(
            (self.rotation_axis * self.rotation_speed * delta_time).to_np3())
        self.rotation = q * self.rotation
def create_gh_traj_ludewig(df: pd.DataFrame) -> PoseTrajectory:
    """Create GH PoseTrajectory from Elevation, PoE, Axial Rotation contained in df."""
    q_elev = q.from_rotation_vector(
        np.deg2rad(df['Elevation'].to_numpy())[..., np.newaxis] *
        np.array([1, 0, 0]))
    q_poe = q.from_rotation_vector(
        np.deg2rad(df['PoE'].to_numpy())[..., np.newaxis] *
        np.array([0, 0, 1]))
    q_axial = q.from_rotation_vector(
        np.deg2rad(df['Axial'].to_numpy())[..., np.newaxis] *
        np.array([0, 1, 0]))

    quat_traj = q.as_float_array(q_elev * q_poe * q_axial)
    pos = np.zeros((q_elev.size, 3))
    traj = PoseTrajectory.from_quat(pos, quat_traj)
    traj.long_axis = np.array([0, 1, 0])
    return traj
def create_st_traj_ludewig(df: pd.DataFrame) -> PoseTrajectory:
    """Create ST PoseTrajectory from Elevation, PoE, Axial Rotation contained in df."""
    q_repro = q.from_rotation_vector(
        np.deg2rad(df['ReProtraction'].to_numpy())[..., np.newaxis] *
        np.array([0, 1, 0]))
    q_latmed = q.from_rotation_vector(
        np.deg2rad(df['LatMedRot'].to_numpy())[..., np.newaxis] *
        np.array([1, 0, 0]))
    q_tilt = q.from_rotation_vector(
        np.deg2rad(df['Tilt'].to_numpy())[..., np.newaxis] *
        np.array([0, 0, 1]))

    quat_traj = q.as_float_array(q_repro * q_latmed * q_tilt)
    pos = np.zeros((q_repro.size, 3))
    traj = PoseTrajectory.from_quat(pos, quat_traj)
    traj.long_axis = np.array([0, 0, 1])
    return traj
示例#6
0
 def reset_dx_error_state(self):
     self.dxapp = np.zeros((15, ))
     # update also dx to keep a match between the two
     self.dx.p = self.dxapp[0:3]
     self.dx.v = self.dxapp[3:6]
     self.dx.q = quat.from_rotation_vector(self.dxapp[6:9])
     self.dx.ab = self.dxapp[9:12]
     self.dx.wb = self.dxapp[12:15]
示例#7
0
def WignerD_mm_fromvector(l, vect):
    """
    TODO doc
    """
    if use_moble_quaternion:
        return WignerD_mm(l, quaternion.from_rotation_vector(vect))
    else:
        return WignerD_mm(l, CQuat.from_rotvector(vect))
示例#8
0
 def test_root_pose_actor(self):
     act = RigidDynamic()
     r = TreeRobot()
     r.add_link(Link('l0', RigidDynamic()))
     r.attach_root_node_to_actor(act)
     pose = ((1, 2, 3), npq.from_rotation_vector([0.1, 0.2, 0.3]))
     act.set_global_pose(pose)
     self.assert_pose(pose, r.root_pose)
def calibrateRawIMU(acc, ori, pose):

    ######### **********the order of IMUs are: [left_lower_wrist, right_lower_wrist, left_lower_leg, right_loewr_leg, head, back]
    #SMPL_SENSOR = ['L_Shoulder', 'R_Shoulder', 'L_Knee', 'R_Knee', 'Head', 'Pelvis']
    SMPL_SENSOR = ['L_Elbow', 'R_Elbow', 'L_Knee', 'R_Knee', 'Head', 'Pelvis']

    # safety check if any frame has NAN Values
    frames2del = np.unique(np.where(np.isnan(ori) == True)[0])
    ori = np.delete(ori, frames2del, 0)
    acc = np.delete(acc, frames2del, 0)
    pose = np.delete(pose, frames2del, 0)
    pose = pose.reshape(-1, 24, 3)
    seq_len = len(ori)

    # calibration of pose parameters
    # ---------------------------------------------
    SMPL_MAJOR_JOINTS = [1, 2, 3, 4, 5, 6, 9, 12, 13, 14, 15, 16, 17, 18, 19]
    pose = pose[:, SMPL_MAJOR_JOINTS, :]
    qs = quaternion.from_rotation_vector(pose)
    pose_rot = np.reshape(quaternion.as_rotation_matrix(qs), [seq_len, 15, 9])
    pose = pose_rot.reshape(-1, 15, 3, 3)

    ############### calculation in Rotation Matrix #########################
    # 1. calib: R(T_I) = inv( R(I_S) * R(S_B) * R(B_T) ) [sensor 4: head for 1st frame]. rti is constant for all frames
    ris_head = ori[0, 4, :, :]
    rsb_head = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
    rbt_head = np.identity(3)
    rti = np.linalg.inv(np.linalg.multi_dot([ris_head, rsb_head, rbt_head]))
    # 2. R(T_S) = R(T_I) * R(I_S) for all sensors for all frames
    seq_len = len(ori)
    rts = np.asarray([
        np.dot(rti, ori[k, j, :, :])
        for k, j in itertools.product(range(seq_len), range(6))
    ]).reshape(-1, 6, 3, 3)
    # 3. calculate R(B_T) for all 6 joints
    rbt = myUtil.getGlobalBoneOriFromPose(pose[0], SMPL_SENSOR)

    # 4. calculate R(B_S) = R(B_T) * R(T_S) for the first frame which will be constant across all frames
    rbs = np.array([np.dot(rbt[k], rts[0, k]) for k in range(6)])

    # 5. calculate bone2smpl (all frames) : R(T_B) = R(T_S) * inv(R(B_S))
    rtb = np.asarray([
        np.dot(rts[k, j, :, :], rbs[j])
        for k, j in itertools.product(range(seq_len), range(6))
    ])
    calibrated_ori = rtb.reshape(-1, 6, 3, 3)

    # 6. normalize respect to root
    root_inv = np.linalg.inv(calibrated_ori[:, 5, :, :])
    # root_inv = np.transpose(calibrated_ori[:, 5], [0, 2, 1])
    norm_ori = np.asarray([
        np.matmul(root_inv[k], calibrated_ori[k, j, :, :])
        for k, j in itertools.product(range(seq_len), range(6))
    ])
    norm_ori = norm_ori.reshape(-1, 6, 3, 3)

    #return norm_acc[:,-1,:],ori_quat[:,-1,:],pose_quat
    return norm_ori[:, 0:5, :], pose
示例#10
0
def DeepRoute(path: str) -> Dataset:
    """`DeepRoute <https://gas.graviti.cn/dataset/graviti-open-dataset\
    /DeepRoute>`_ dataset.

    The file structure should be like::

        <path>
            pointcloud/
                00001.bin
                00002.bin
                ...
                10000.bin
            groundtruth/
                00001.txt
                00002.txt
                ...
                10000.txt

    Arguments:
        path: The root directory of the dataset.

    Returns:
        Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.

    """
    root_path = os.path.abspath(os.path.expanduser(path))

    dataset = Dataset(DATASET_NAME)
    dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json"))
    segment = dataset.create_segment()

    point_cloud_paths = glob(os.path.join(root_path, "pointcloud", "*.bin"))

    for point_cloud_path in point_cloud_paths:
        point_cloud_id = os.path.splitext(os.path.basename(point_cloud_path))[0]
        label_path = os.path.join(root_path, "groundtruth", f"{point_cloud_id}.txt")

        data = Data(point_cloud_path)
        data.label.box3d = []

        with open(label_path, encoding="utf-8") as fp:
            annotations = json.load(fp)["objects"]

        for annotation in annotations:
            bounding_box = annotation["bounding_box"]
            position = annotation["position"]

            label = LabeledBox3D(
                size=(bounding_box["length"], bounding_box["width"], bounding_box["height"]),
                translation=(position["x"], position["y"], position["z"]),
                rotation=from_rotation_vector((0, 0, annotation["heading"])),
                category=annotation["type"],
            )
            data.label.box3d.append(label)

        segment.append(data)

    return dataset
示例#11
0
def as_quaternion(rvecs, tvecs):
    pose_amount = len(rvecs)
    # Extrinsic parameter
    extr_q = np.zeros((pose_amount, 7))
    for i, (rvec, tvec) in enumerate(zip(rvecs, tvecs)):
        # extr_mat[i, 0:3, 0:3] = cv2.Rodrigues(rvec)[0]
        q = quaternion.from_rotation_vector(rvec.flatten())
        extr_q[i] = (tvec[0], tvec[1], tvec[2], q.x, q.y, q.z, q.w)
    return extr_q
示例#12
0
def initial_state_vector():
    y0 = np.zeros(21)
    y0[6:9] = np.array([0, -0.21 - 2.325, 0])
    q = qn.from_rotation_vector([
        [0, 0, np.deg2rad(90)],
        # [np.deg2rad(180), 0, 0],
    ])
    y0[9:13] = qn.as_float_array(np.prod(q))
    return y0
示例#13
0
def test_quaternion_angle_recovery(angles):
    axis = sph2cart(angles[1], angles[0])
    angle = N.radians(angles[2])
    q1 = Q.from_rotation_vector(axis * angle)

    assert equal(N.abs(q1.angle()), N.abs(angle))
    assert q1.w == N.cos(q1.angle() / 2)
    assert equal(q1.vec, axis * N.sin(angle / 2))
    assert euler_equal(quaternion_to_euler(q1), angles)
示例#14
0
    def __init__(self, parent, *args, **kwargs):
        super().__init__(parent, *args, **kwargs)
        self.size = 800
        self.setMinimumSize(self.size, self.size)

        self.center = QPointF(self.size / 2, self.size / 2)
        self.orientation = quaternion.from_rotation_vector(
            0 * normalize(np.array([0, 1, 1])))
        self.data = FDAIData()
示例#15
0
def main(img_path):
    sess = tf.Session()
    model = RunModel(config, sess=sess)
    kps = get_people(img_path)

    input_img, proc_param, img = preprocess_image(img_path, kps)
    # Add batch dimension: 1 x D x D x 3
    input_img = np.expand_dims(input_img, 0)

    joints, verts, cams, joints3d, theta = model.predict(input_img,
                                                         get_theta=True)

    theta = theta[0, 3:75]
    q = quaternion.from_rotation_vector(theta[3:6])
    print(q)
    q = quaternion.from_rotation_vector(theta[6:9])
    print(q)
    get_angles(joints3d, cams[0], proc_param)
示例#16
0
def test_pointgoal_with_gps_compass_sensor():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = [
        "POINTGOAL_WITH_GPS_COMPASS_SENSOR",
        "COMPASS_SENSOR",
        "GPS_SENSOR",
        "POINTGOAL_SENSOR",
    ]
    config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.DIMENSIONALITY = 3
    config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.GOAL_FORMAT = "CARTESIAN"

    config.TASK.POINTGOAL_SENSOR.DIMENSIONALITY = 3
    config.TASK.POINTGOAL_SENSOR.GOAL_FORMAT = "CARTESIAN"

    config.TASK.GPS_SENSOR.DIMENSIONALITY = 3

    config.freeze()
    with habitat.Env(config=config, dataset=None) as env:
        # start position is checked for validity for the specific test scene
        valid_start_position = [-1.3731, 0.08431, 8.60692]
        expected_pointgoal = [0.1, 0.2, 0.3]
        goal_position = np.add(valid_start_position, expected_pointgoal)

        # starting quaternion is rotated 180 degree along z-axis, which
        # corresponds to simulator using z-negative as forward action
        start_rotation = [0, 0, 0, 1]

        env.episode_iterator = iter([
            NavigationEpisode(
                episode_id="0",
                scene_id=config.SIMULATOR.SCENE,
                start_position=valid_start_position,
                start_rotation=start_rotation,
                goals=[NavigationGoal(position=goal_position)],
            )
        ])

        env.reset()
        for _ in range(100):
            obs = env.step(sample_non_stop_action(env.action_space))
            pointgoal = obs["pointgoal"]
            pointgoal_with_gps_compass = obs["pointgoal_with_gps_compass"]
            compass = float(obs["compass"][0])
            gps = obs["gps"]
            # check to see if taking non-stop actions will affect static point_goal
            assert np.allclose(
                pointgoal_with_gps_compass,
                quaternion_rotate_vector(
                    quaternion.from_rotation_vector(
                        compass * np.array([0, 1, 0])).inverse(),
                    pointgoal - gps,
                ),
                atol=1e-5,
            )
示例#17
0
    def setUp(self):
        self._rigs = kapture.Rigs()
        looking_not_straight = quaternion.from_rotation_vector([0, np.deg2rad(5.), 0])
        self._rigs['rig', 'phone'] = kapture.PoseTransform()  # do a nested rig
        self._rigs['phone', 'cam1'] = kapture.PoseTransform(t=[-10, 0, 0], r=looking_not_straight).inverse()
        self._rigs['phone', 'cam2'] = kapture.PoseTransform(t=[+10, 0, 0], r=looking_not_straight.inverse()).inverse()
        self._trajectories_rigs = kapture.Trajectories()
        for timestamp, ratio in enumerate(np.linspace(0., 1., num=8)):
            looking_around = quaternion.from_rotation_vector([0, np.deg2rad(360. * ratio), 0])
            self._trajectories_rigs[timestamp, 'rig'] = kapture.PoseTransform(t=[0, 0, -100.], r=looking_around)

        self._trajectories_cams = kapture.Trajectories()
        for timestamp, rig_id, pose_rig_from_world in kapture.flatten(self._trajectories_rigs, is_sorted=True):
            for rig_id2, cam_id, pose_cam_from_rig in kapture.flatten(self._rigs):
                if cam_id == 'phone':
                    continue
                pose_cam_from_world = kapture.PoseTransform.compose([pose_cam_from_rig, pose_rig_from_world])
                self._trajectories_cams[timestamp, cam_id] = pose_cam_from_world
示例#18
0
def quat_from_angle_axis(theta: float, axis: np.ndarray) -> qt.quaternion:
    r"""Creates a quaternion from angle axis format

    :param theta: The angle to rotate about the axis by
    :param axis: The axis to rotate about
    :return: The quaternion
    """
    axis = axis.astype(float)
    axis /= np.linalg.norm(axis)
    return qt.from_rotation_vector(theta * axis)
示例#19
0
 def Λp(self):
     res = []
     for i in range(4):
         q = qn.from_rotation_vector([
             [np.deg2rad(45 + 90 * i), 0, 0],
             [0, 0, np.deg2rad(-4.5)],
             [0, self.δ[i], 0],
         ])
         res.append(np.prod(q))
     return res
示例#20
0
 def test_rotation_with_quat(self):
     v1 = np.random.rand(3)
     v2 = np.random.rand(3)
     v1 = v1 / np.linalg.norm(v1)
     v2 = v2 / np.linalg.norm(v2)
     vector, angle = helpers.get_rotation_vector_and_angle(v1, v2)
     quat = quaternion.from_rotation_vector(vector * angle)
     v2_bis = ft.rotate_by_quaternion(quat, v1)
     for i in range(vector.shape[0]):
         self.assertAlmostEqual(v2[i], v2_bis[i])
示例#21
0
 def update_dx_and_P(self):
     self.dxapp = self.K.dot(self.epsilon[-1])
     # update also dx to keep a match between the two
     self.dx.p = self.dxapp[0:3]
     self.dx.v = self.dxapp[3:6]
     self.dx.q = quat.from_rotation_vector(self.dxapp[6:9])
     self.dx.ab = self.dxapp[9:12]
     self.dx.wb = self.dxapp[12:15]
     #
     self.P = (np.eye(15) - self.K.dot(self.H)).dot(self.P)
示例#22
0
    def _rotate(self, angles: _np.array) -> Frame:
        """

        Args:
            angles:

        Returns:
            the rotated frame (in place), allows method chaining
        """
        return self * _quaternion.from_rotation_vector(angles)
示例#23
0
 def test_rotation(self):
     r = quaternion.from_rotation_vector(
         [0, 0, np.pi])  # rotate 180° around Z axis
     # print(quaternion.as_rotation_matrix(r))
     pose_rotate = kapture.PoseTransform(r=r, t=[0, 0, 0])
     actual_points3d = pose_rotate.transform_points(self.expected_points3d)
     self.assertTrue(np.all(np.isclose(
         actual_points3d[:, 0:2], - self.expected_points3d[:, 0:2])))  # X, Y opposed
     self.assertTrue(np.all(np.isclose(
         actual_points3d[:, 2], self.expected_points3d[:, 2])))  # Z untouched
示例#24
0
    def get_observation(
        self,
        observations,
        *args: Any,
        episode: NavigationEpisode,
        **kwargs: Any,
    ) -> Optional[int]:

        episode_id = episode.episode_id
        scene_id = episode.scene_id
        if (self.curr_episode_id != episode_id) or (self.curr_scene_id !=
                                                    scene_id):
            self.curr_episode_id = episode_id
            self.curr_scene_id = scene_id
            self.goal_obs = []
            self.goal_pose = []
            for goal in episode.goals:
                position = goal.position
                euler = [0, 2 * np.pi * np.random.rand(), 0]
                rotation = q.from_rotation_vector(euler)
                obs = self._sim.get_observations_at(position, rotation)
                rgb_list = [
                    obs['rgb_%d' % (i)] for i in range(self.num_camera)
                ]
                rgb_array = np.concatenate(rgb_list, 1) / 255.
                if rgb_array.shape[1] > self.height * 4:
                    left = rgb_array.shape[1] - self.height * 4
                    slice = left // 2
                    rgb_array = rgb_array[:, slice:slice + self.height * 4]
                depth_list = [
                    obs['depth_%d' % (i)] for i in range(self.num_camera)
                ]
                depth_array = np.concatenate(depth_list, 1)
                if depth_array.shape[1] > self.height * 4:
                    left = depth_array.shape[1] - self.height * 4
                    slice = left // 2
                    depth_array = depth_array[:, slice:slice + self.height * 4]
                if 'semantic_0' in obs.keys():
                    semantic_list = [
                        obs['semantic_%d' % (i)]
                        for i in range(self.num_camera)
                    ]
                    semantic_array = np.expand_dims(
                        np.concatenate(semantic_list, 1), 2)
                #rgb_array = make_panoramic(obs['rgb_left'], obs['rgb'], obs['rgb_right'], self.torch)/255.
                #depth_array = make_panoramic(obs['depth_left'], obs['depth'], obs['depth_right'], self.torch)
                if self.torch:
                    goal_obs = torch.cat([rgb_array, depth_array], 2)
                else:
                    goal_obs = np.concatenate([rgb_array, depth_array], 2)
                if 'semantic_0' in obs.keys():
                    goal_obs = np.concatenate([goal_obs, semantic_array], 2)
                self.goal_obs.append(goal_obs)
                self.goal_pose.append([position, euler])
        return self.goal_obs
示例#25
0
def test_from_rotation_vector():
    np.random.seed(1234)
    n_tests = 1000
    vecs = np.random.uniform(high=math.pi/math.sqrt(3), size=n_tests*3).reshape((n_tests, 3))
    quats = np.zeros(vecs.shape[:-1]+(4,))
    quats[..., 1:] = vecs[...]
    quats = quaternion.as_quat_array(quats)
    quats = np.exp(quats/2)
    quat_vecs = quaternion.as_rotation_vector(quats)
    quats2 = quaternion.from_rotation_vector(quat_vecs)
    assert allclose(quats, quats2)
示例#26
0
 def return_as_3D_with_quat(self):
     p = np.append(self.p, 0.)  # append z position
     v = self.v * np.array((np.cos(self.theta), np.sin(self.theta), 0.))
     q = quat.from_rotation_vector(self.theta * np.array(
         (0, 0, 1)))  # rotation is only along the z-axis (2D model)
     w = np.array((0., 0., self.w))
     vd = self.vd * np.array(
         (np.cos(self.theta), np.sin(self.theta), 0.)) + skew(w).dot(
             v)  #Poisson relation
     wd = np.array((0., 0., self.wd))
     return p, v, q, w, vd, wd
示例#27
0
def quat_between_two_vectors(v, u) -> npq.quaternion:
    """ Computes rotation (represented by quaternion) that transforms unit vector v into the unit vector u.
        Based on: https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/476311#476311
    """
    vec = np.cross(v, u)
    if np.all(np.isclose(vec, np.zeros(3))):
        return npq.one
    ang_sine = np.linalg.norm(vec)
    ang_cosine = np.dot(v, u)
    ang = np.arctan2(ang_sine, ang_cosine)
    return npq.from_rotation_vector(vec / ang_sine * ang)
    def orientationControl(self, target):
        r = quater.from_float_array(target)
        r_vect = quater.as_rotation_vector(r)

        cop_vect = copy.copy(r_vect)
        r_vect[0] = cop_vect[1]
        r_vect[1] = cop_vect[0]
        r_vect[2] *= -1

        r = quater.from_rotation_vector(r_vect)
        out = quater.as_float_array(r)
        return out
示例#29
0
def get_angular_FFoV_PFoV(sat, t):
    """
    return angular positions (alpha, delta) of the fields of view as a
    function of time.
    """
    z_axis = np.array([0, 0, 1])
    attitude = sat.func_attitude(t)

    quat_PFoV = quaternion.from_rotation_vector(z_axis * const.Gamma_c / 2)
    quat_FFoV = quaternion.from_rotation_vector(z_axis * (-const.Gamma_c / 2))

    PFoV_SRS = ft.rotate_by_quaternion(quat_PFoV, np.array([1, 0, 0]))
    FFoV_SRS = ft.rotate_by_quaternion(quat_FFoV, np.array([1, 0, 0]))

    PFoV_CoMRS = ft.xyz_to_lmn(attitude, PFoV_SRS)
    FFoV_CoMRS = ft.xyz_to_lmn(attitude, FFoV_SRS)

    alpha_PFoV, delta_PFoV = ft.vector_to_alpha_delta(PFoV_CoMRS)
    alpha_FFoV, delta_FFoV = ft.vector_to_alpha_delta(FFoV_CoMRS)

    return alpha_PFoV, delta_PFoV, alpha_FFoV, delta_FFoV
示例#30
0
    def __call__(self, feat, targ, **kwargs):
        rv = np.random.random(3)
        na = np.linalg.norm(rv)
        if na < 1e-06:
            return feat

        angle = np.random.random() * self.max_angle * math.pi / 180
        rv = rv / na * math.sin(angle / 2.0)
        rot = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(rv))
        rows = feat.shape[0]
        feat = np.matmul(rot, feat.reshape([-1, 3]).T).T
        return feat.reshape([rows, -1]), targ
示例#31
0
文件: dataset.py 项目: xianyubai/fpl
def accumulate_egomotion(rots, vels):
    # Accumulate translation and rotation
    egos = []
    qa = np.quaternion(1, 0, 0, 0)
    va = np.array([0., 0., 0.])
    for rot, vel in zip(rots, vels):
        vel_rot = quaternion.rotate_vectors(qa, vel)
        va += vel_rot
        qa = qa * quaternion.from_rotation_vector(rot)
        egos.append(
            np.concatenate((quaternion.as_rotation_vector(qa), va), axis=0))
    return egos