Esempio n. 1
0
def get_sim_location(env):
    agent_state = env._sim.get_agent_state(0)
    x = -agent_state.position[2]
    y = -agent_state.position[0]
    axis = quaternion.as_euler_angles(agent_state.rotation)[0]
    if (axis % (2 * np.pi)) < 0.1 or (axis % (2 * np.pi)) > 2 * np.pi - 0.1:
        o = quaternion.as_euler_angles(agent_state.rotation)[1]
    else:
        o = 2 * np.pi - quaternion.as_euler_angles(agent_state.rotation)[1]
    if o > np.pi:
        o -= 2 * np.pi
    return x, y, o
Esempio n. 2
0
    def __init__(self, qx, qy, qz, qb):
        self.idNum = StarList.idNum
        StarList.idNum += 1

        self.qx = float(qx)
        self.qy = float(qy)
        self.qz = float(qz)
        self.qq = float(qb)

        q = np.quaternion(self.qx, self.qy, self.qz, self.qq)
        #        print(quaternion.as_euler_angles(q))
        self.x = quaternion.as_euler_angles(q)[0]
        self.y = quaternion.as_euler_angles(q)[1]
        self.z = quaternion.as_euler_angles(q)[2]
    def get_sim_location(self):
        """Returns x, y, o pose of the agent in the Habitat simulator."""

        agent_state = super().habitat_env.sim.get_agent_state(0)
        x = -agent_state.position[2]
        y = -agent_state.position[0]
        axis = quaternion.as_euler_angles(agent_state.rotation)[0]
        if (axis % (2 * np.pi)) < 0.1 or (axis %
                                          (2 * np.pi)) > 2 * np.pi - 0.1:
            o = quaternion.as_euler_angles(agent_state.rotation)[1]
        else:
            o = 2 * np.pi - quaternion.as_euler_angles(agent_state.rotation)[1]
        if o > np.pi:
            o -= 2 * np.pi
        return x, y, o
Esempio n. 4
0
 def get_sim_location(self):
     agent_state = super().habitat_env.sim.get_agent_state(0)
     # print("sim location, agent state ", agent_state)
     x = -agent_state.position[2]
     y = -agent_state.position[0]
     axis = quaternion.as_euler_angles(agent_state.rotation)[0]
     if (axis % (2 * np.pi)) < 0.1 or (axis %
                                       (2 * np.pi)) > 2 * np.pi - 0.1:
         o = quaternion.as_euler_angles(agent_state.rotation)[1]
     else:
         o = 2 * np.pi - quaternion.as_euler_angles(agent_state.rotation)[1]
     if o > np.pi:
         o -= 2 * np.pi
     # print("sim location in map, ", x, y, o)
     return x, y, o
Esempio n. 5
0
    def integrate_gyro(self, initwindow=0.5):
        # convert gyro to rad/sec
        gyrorad = self.gyro * np.pi/180.0

        qorient = np.zeros_like(self.t, dtype=np.quaternion)
        gvec = np.zeros_like(self.gyro)

        dt = 1.0 / self.sampfreq

        isfirst = self.t <= self.t[0] + initwindow
        qorient[0] = self.orientation_from_accel(np.mean(self.acc[isfirst, :], axis=0))

        for i, gyro1 in enumerate(gyrorad[1:, :], start=1):
            qprev = qorient[i-1]

            # quaternion angular change from the gyro
            qdotgyro = 0.5 * (qprev * np.quaternion(0, *gyro1))
            qorient[i] = qprev + qdotgyro * dt

            g1 = qorient[i].conj() * np.quaternion(0, 0, 0, -1) * qorient[i]
            gvec[i, :] = g1.components[1:]

        self.qorient = qorient
        self.orient_sensor = quaternion.as_euler_angles(qorient)
        self.gvec = gvec
        self.accdyn_sensor = self.acc - gvec
Esempio n. 6
0
    def goto_pose(self, pose: Pose, linear: bool = True, relative: bool = False) -> None:
        """Commands the YuMi to goto the given pose.

        Parameters
        ----------
        pose : RigidTransform
        linear : bool, optional
            If True, will use MoveL in RAPID to ensure linear path.
            Otherwise use MoveJ in RAPID, which does not ensure linear path.
            Defaults to True
        relative : bool, optional
            If True, will use goto_pose_relative by computing the delta pose from current pose to target pose.
            Defaults to False
        """
        if relative:
            cur_pose = self.get_pose()
            delta_pose = Pose.from_tr_matrix(cur_pose.inversed().as_tr_matrix() * pose.as_tr_matrix())
            rot = np.rad2deg(quaternion.as_euler_angles(delta_pose.orientation.as_quaternion()))
            self.goto_pose_delta(delta_pose.position, rot)
        else:
            body = self._get_pose_body(pose)
            if linear:
                cmd = CmdCodes.goto_pose_linear
            else:
                cmd = CmdCodes.goto_pose
            req = self._construct_req(cmd, body)
            self._request(req, timeout=self._motion_timeout)
Esempio n. 7
0
def main() -> None:

    dm = DobotMagician("id", "name", Pose(), DobotSettings("/dev/ttyUSB0"))

    joints = dm.robot_joints()
    pose = dm.get_end_effector_pose("default")
    print(pose)
    print(joints)
    print(quaternion.as_euler_angles(pose.orientation.as_quaternion()))

    print("--- Forward Kinematics -----------------------------")

    fk_pose = dm.forward_kinematics("", joints)

    dx = pose.position.x - fk_pose.position.x
    dy = pose.position.y - fk_pose.position.y
    dz = pose.position.z - fk_pose.position.z

    print("Position error: {:+.09f}".format(math.sqrt(dx**2 + dy**2 + dz**2)))

    print("dx: {:+.06f}".format(dx))
    print("dy: {:+.06f}".format(dy))
    print("dz: {:+.06f}".format(dz))

    print(fk_pose.orientation)

    print("--- Inverse Kinematics -----------------------------")

    ik_joints = dm.inverse_kinematics("", pose)

    assert len(ik_joints) == len(joints)

    for idx, (joint, ik_joint) in enumerate(zip(joints, ik_joints)):
        print("j{}: {:+.06f}".format(idx + 1, joint.value - ik_joint.value))
Esempio n. 8
0
    def integrate_gyro(self, initwindow=0.5):
        # convert gyro to rad/sec
        gyrorad = self.gyro * np.pi / 180.0

        qorient = np.zeros_like(self.t, dtype=np.quaternion)
        gvec = np.zeros_like(self.gyro)

        dt = 1.0 / self.sampfreq

        isfirst = self.t <= self.t[0] + initwindow
        qorient[0] = self.orientation_from_accel(
            np.mean(self.acc[isfirst, :], axis=0))

        for i, gyro1 in enumerate(gyrorad[1:, :], start=1):
            qprev = qorient[i - 1]

            # quaternion angular change from the gyro
            qdotgyro = 0.5 * (qprev * np.quaternion(0, *gyro1))
            qorient[i] = qprev + qdotgyro * dt

            g1 = qorient[i].conj() * np.quaternion(0, 0, 0, -1) * qorient[i]
            gvec[i, :] = g1.components[1:]

        self.qorient = qorient
        self.orient_sensor = quaternion.as_euler_angles(qorient)
        self.gvec = gvec
        self.accdyn_sensor = self.acc - gvec
Esempio n. 9
0
    def _gen_unnoisy_imu(self):
        """ Generates IMU data from visual trajectory, without noise. """

        self.vis_data.interpolate(self.num_imu_between_frames)
        interpolated = self.vis_data.interpolated
        dt = interpolated.t[1] - interpolated.t[0]

        # acceleration
        self.vx = np.gradient(interpolated.x, dt)
        self.vy = np.gradient(interpolated.y, dt)
        self.vz = np.gradient(interpolated.z, dt)

        self.ax = np.gradient(self.vx, dt)
        self.ay = np.gradient(self.vy, dt)
        self.az = np.gradient(self.vz, dt)

        # angular velocity
        euler = quaternion.as_euler_angles(interpolated.quats)
        rx, ry, rz = euler[:,0], euler[:,1], euler[:,2]
        self.gx = np.gradient(rx, dt)
        self.gy = np.gradient(ry, dt)
        self.gz = np.gradient(rz, dt)

        self.t = interpolated.t

        if self.filepath:
            self._write_to_file()
        self._flag_gen_unnoisy_imu = True
Esempio n. 10
0
    def _generate_goal(self):
        original_obj_pos, original_obj_quat = self._p.getBasePositionAndOrientation(
            self.object_bodies[self.goal_object_key])

        while True:
            target_xyz = self.np_random.uniform(self.robot.target_bound_lower,
                                                self.robot.target_bound_upper)
            # on the table
            target_xyz[-1] = self.object_initial_pos[self.goal_object_key][2]
            if np.linalg.norm(target_xyz - original_obj_pos) > 0.06:
                break
        target_obj_euler = quat.as_euler_angles(
            quat.as_quat_array([1., 0., 0., 0.]))
        target_obj_euler[-1] = self.np_random.uniform(-1.0, 1.0) * np.pi
        target_obj_quat = quat.as_float_array(
            quat.from_euler_angles(target_obj_euler))
        target_obj_quat = np.concatenate(
            [target_obj_quat[1:], [target_obj_quat[0]]], axis=-1)

        self.desired_goal = np.concatenate((target_xyz, target_obj_euler))

        if self.visualize_target:
            self.set_object_pose(
                self.object_bodies[self.goal_object_key + '_target'],
                target_xyz, target_obj_quat)
    def act(self, habitat_observation, goal_position):
        # Update internal state
        cc = 0

        update_is_ok = self.update_internal_state(habitat_observation)

        while not update_is_ok:
            update_is_ok = self.update_internal_state(habitat_observation)
            cc += 1
            if cc > 5:
                break

        current_pose = self.pose6D.detach().cpu().numpy().reshape(4, 4)
        self.position_history.append(current_pose)

        current_position = current_pose[0:3, -1]

        current_rotation = current_pose[0:3, 0:3]
        current_quat = quaternion.from_rotation_matrix(current_rotation)
        current_heading = quaternion.as_euler_angles(current_quat)[1]

        current_map = self.map2DObstacles.detach().cpu().numpy()

        best_action = self.follower.get_next_action(goal_pos=goal_position)

        return (current_position, current_heading, current_map, best_action)
Esempio n. 12
0
    def default_plotter(self,
                        data: xr.DataArray,
                        sup_title: Optional[str] = None,
                        figsize=(15, 7)):
        fig = plt.figure(figsize=figsize)
        data_values = data.values
        data_t = data.t.values
        plt.title(data.name)
        if sup_title:
            plt.suptitle(sup_title)

        line_styles = ["-", "--", ":", "-."]

        if np.ndim(data_values) != 1:
            # Shape the the values, the first dimension is time.
            shape = np.shape(data_values)[1:]
            legend = []
            for idx in product(*(range(dim_size) for dim_size in shape)):
                plt.plot(data_t, data_values[(slice(None), *idx)],
                         line_styles[idx[0] % 4])
                legend.append(", ".join(map(str, idx)))
            plt.legend(legend)
            return fig,
        else:
            if data.dtype == np.quaternion:
                plt.plot(data_t, quaternion.as_float_array(data_values))
                plt.legend(["q0", "q1", "q2", "q3"])
                fig2 = plt.figure(figsize=figsize)
                plt.title(f"{data.name} (euler)")
                plt.plot(data_t, quaternion.as_euler_angles(data_values))
                plt.legend(["x", "y", "z"])
                return fig, fig2
            else:
                plt.plot(data_t, data_values)
                return fig,
Esempio n. 13
0
def main():
    # rc = roslibpy.Ros('localhost', port=9090)
    client = roslibpy.Ros('10.244.1.176', port=9090)
    client.on_ready(lambda: print('is ROS connected: ', client.is_connected))
    client.run()

    # topic_o = '/odometry/filtered'
    topic_o = '/odom'
    ms = MobileClient(client, lambda r: print('reached goal', r), odom_topic=topic_o)
    ms.wait_for_ready(timeout=80)
    print('map_header: ', ms.map_header)
    print('odom:', ms.position, [math.degrees(v) for v in quaternion.as_euler_angles(ms.orientation)])

    ## you can set goal any time not only after call start().
    ms.start() ## make goal appended to queue, executable
#     ms.set_goal_relative_xy(0.5, 0, True) ## set scheduler a goal that go ahead 0.5 from robot body
#     ms.set_goal_relative_xy(-0.5, 1) ## relative (x:front:-0.5, y:left:1)
    ms.set_goal(ms.get_vec_q(-0.4,-0.6,0), ms.get_rot_q(0,0,math.pi/2))
    time.sleep(30)
#     ms.set_goal_relative_xy(0.5, 0, True)
#     time.sleep(30)
    ms.stop()
    print('finish')

    client.terminate()
Esempio n. 14
0
def get_map_location(env, resolution):
    bounds = env._sim.pathfinder.get_bounds()
    agent_state = env.sim.get_agent_state()
    agent_pos = agent_state.position

    x = (int)((agent_pos[2] - bounds[0][2]) / resolution)
    y = (int)((agent_pos[0] - bounds[0][0]) / resolution)

    agent_rot = env.sim.get_agent_state().rotation
    axis = quaternion.as_euler_angles(agent_rot)[0]
    if (axis % (2 * np.pi)) < 0.1 or (axis % (2 * np.pi)) > 2 * np.pi - 0.1:
        o = quaternion.as_euler_angles(agent_state.rotation)[1]
    else:
        o = 2 * np.pi - quaternion.as_euler_angles(agent_state.rotation)[1]
    if o > np.pi:
        o -= 2 * np.pi
    return x, y, o
    def _get_obs(self):
        # re-generate goals & images
        if self.regenerate_goal_when_step:
            self._generate_goal()
            if self.goal_image:
                self._generate_goal_image()

        assert self.desired_goal is not None

        state = []
        achieved_goal = []

        for object_key in self.manipulated_object_keys:
            # object state: (x, y, z), (a, b, c, w)
            obj_xyz, (a, b, c, w) = self._p.getBasePositionAndOrientation(self.object_bodies[object_key])
            obj_euler = quat.as_euler_angles(quat.as_quat_array([w, a, b, c]))
            state.append(obj_xyz)
            state.append(obj_euler)
            if object_key == self.goal_object_key:
                achieved_goal.append(obj_xyz)
                if self.orientation_informed_goal:
                    achieved_goal.append(obj_euler)

        state = np.concatenate(state)
        achieved_goal = np.concatenate(achieved_goal)
        assert achieved_goal.shape == self.desired_goal.shape

        obs_dict = {
            'observation': state.copy(),
            'policy_state': state.copy(),
            'achieved_goal': achieved_goal.copy(),
            'desired_goal': self.desired_goal.copy(),
        }

        if self.image_observation:
            self.robot.set_kuka_joint_state(self.robot.kuka_away_pose)

            images = []
            for cam_id in self.observation_cam_id:
                images.append(self.render(mode=self.render_mode, camera_id=cam_id))
            obs_dict['observation'] = images[0].copy()
            obs_dict['images'] = images
            obs_dict.update({'state': state.copy()})

            if self.goal_image:
                achieved_goal_img = self.render(mode=self.render_mode, camera_id=self.goal_cam_id)
                obs_dict.update({
                    'achieved_goal_img': achieved_goal_img.copy(),
                    'desired_goal_img': self.desired_goal_image.copy(),
                })

            if self.render_pcd:
                pcd = self.render(mode='pcd', camera_id=self.pcd_cam_id)
                obs_dict.update({'pcd': pcd.copy()})

            self.robot.set_kuka_joint_state(self.robot.kuka_rest_pose)

        return obs_dict
    def _compute_subtask_reward(self, gripper_xyz):
        goal_object_xyz, (a, b, c, w) = self._p.getBasePositionAndOrientation(self.object_bodies['rectangle'])
        goal_object_euler = quat.as_euler_angles(quat.as_quat_array([w, a, b, c]))
        grasp_target_xyz, _, _, _, _, _ = self._p.getLinkState(self.object_bodies['rectangle'], 0)
        grasp_target_xyz = np.array(grasp_target_xyz)
        slot_target_xyz, (a, b, c, w), _, _, _, _ = self._p.getLinkState(self.object_bodies['slot'], 3)
        slot_target_euler = quat.as_euler_angles(quat.as_quat_array([w, a, b, c]))
        slot_target_xyz = np.array(slot_target_xyz)

        # pick-up reward
        # this reward specifies grasping point & goal object height
        d_goal_obj_grip = np.linalg.norm(grasp_target_xyz - gripper_xyz, axis=-1) + np.abs(0.15 - goal_object_xyz[-1])
        # this reward only specifies goal object height
        # d_goal_obj_grip = np.abs(0.15 - goal_object_xyz[-1])
        reward_pick_up = -d_goal_obj_grip
        achieved_pick_up = (d_goal_obj_grip < self.distance_threshold)
        # reach reward
        reach_target_xyz = slot_target_xyz.copy()
        reach_target_xyz[-1] += 0.06
        d_goal_obj_reach_slot = np.linalg.norm(goal_object_xyz - reach_target_xyz, axis=-1) + \
                                np.linalg.norm(goal_object_euler - slot_target_euler.copy(), axis=-1)
        reward_reach = -d_goal_obj_reach_slot
        achieved_reach = (d_goal_obj_reach_slot < self.distance_threshold)
        # insert reward
        insert_target_xyz = slot_target_xyz.copy()
        insert_target_xyz[-1] += 0.03
        d_goal_obj_insert_slot = np.linalg.norm(goal_object_xyz - insert_target_xyz, axis=-1) + \
                                 np.linalg.norm(goal_object_euler - slot_target_euler.copy(), axis=-1)
        reward_insert = -d_goal_obj_insert_slot
        achieved_insert = (d_goal_obj_insert_slot < self.distance_threshold)

        return {
            'pick_up': np.clip(reward_pick_up, -15.0, 0.0),
            'pick_up_done': achieved_pick_up,
            'pick_up_desired_goal': np.concatenate([grasp_target_xyz, [0.15]]),
            'pick_up_achieved_goal': np.concatenate([gripper_xyz, [goal_object_xyz[-1]]]),
            'reach': np.clip(reward_reach, -15.0, 0.0),
            'reach_done': achieved_reach,
            'reach_desired_goal': np.concatenate([reach_target_xyz, slot_target_euler]),
            'reach_achieved_goal': np.concatenate([goal_object_xyz, goal_object_euler]),
            'insert': np.clip(reward_insert, -15.0, 0.0),
            'insert_done': achieved_insert,
            'insert_desired_goal': np.concatenate([insert_target_xyz, slot_target_euler]),
            'insert_achieved_goal': np.concatenate([goal_object_xyz, goal_object_euler])
        }
Esempio n. 17
0
def get_sim_location(env):
    bounds = env._sim.pathfinder.get_bounds()

    agent_state = env.sim.get_agent_state()
    agent_pos = agent_state.position

    x = -agent_pos[2]
    y = -agent_pos[0]

    agent_rot = env.sim.get_agent_state().rotation
    axis = quaternion.as_euler_angles(agent_rot)[0]
    if (axis % (2 * np.pi)) < 0.1 or (axis % (2 * np.pi)) > 2 * np.pi - 0.1:
        o = quaternion.as_euler_angles(agent_state.rotation)[1]
    else:
        o = 2 * np.pi - quaternion.as_euler_angles(agent_state.rotation)[1]
    if o > np.pi:
        o -= 2 * np.pi
    return x, y, o
Esempio n. 18
0
    def _inverse_kinematics(self, pose: Pose) -> list[Joint]:
        """Computes inverse kinematics.

        Works with robot-relative pose.

        Inspired by DobotKinematics.py from open-dobot project and DobotInverseKinematics.py from BenchBot.

        :param pose: IK target pose (relative to robot)
        :return: Inverse kinematics
        """

        self._check_orientation(pose)

        # TODO this is probably not working properly (use similar solution as in _check_orientation?)
        _, _, yaw = quaternion.as_euler_angles(
            pose.orientation.as_quaternion())

        x = pose.position.x
        y = pose.position.y
        z = pose.position.z + self.end_effector_length

        # pre-compute distances
        # radial position of end effector in the x-y plane
        r = math.sqrt(math.pow(x, 2) + math.pow(y, 2))
        rho_sq = pow(r - self.link_4_length, 2) + pow(z, 2)
        rho = math.sqrt(
            rho_sq)  # distance b/w the ends of the links joined at the elbow

        l2_sq = self.link_2_length**2
        l3_sq = self.link_3_length**2

        # law of cosines
        try:
            alpha = math.acos(
                (l2_sq + rho_sq - l3_sq) / (2.0 * self.link_2_length * rho))
            gamma = math.acos((l2_sq + l3_sq - rho_sq) /
                              (2.0 * self.link_2_length * self.link_3_length))
        except ValueError:
            raise DobotException("Failed to compute IK.")

        beta = math.atan2(z, r - self.link_4_length)

        # joint angles
        baseAngle = math.atan2(y, x)
        rearAngle = math.pi / 2 - beta - alpha
        frontAngle = math.pi / 2 - gamma

        joints = [
            Joint(Joints.J1, baseAngle),
            Joint(Joints.J2, rearAngle),
            Joint(Joints.J3, frontAngle),
            Joint(Joints.J4, -rearAngle - frontAngle),
            Joint(Joints.J5, yaw - baseAngle),
        ]
        self.validate_joints(joints)
        return joints
Esempio n. 19
0
    def get_ground_truth(self):
        """
        self.rock_mod_cache set from self.mod_rocks

        Return 1d numpy array of 9 elements for positions of all 3 rocks including:
            - rock x dist from cam
            - rock y dist from cam
            - rock z height from arena floor
        """
        cam_pos = self.model.cam_pos[0]

        #line_pos = self.floor_offset + np.array([0.0, 0.75, 0.0])
        #self.viewer.add_marker(pos=line_pos)

        ##r0_pos = self.floor_offset + self.model.body_pos[self.model.body_name2id('rock0')]
        ##r1_pos = self.floor_offset + self.model.body_pos[self.model.body_name2id('rock1')]
        ##r2_pos = self.floor_offset + self.model.body_pos[self.model.body_name2id('rock2')]
        ##r0_diff = r0_pos - cam_pos
        ##r1_diff = r1_pos - cam_pos
        ##r2_diff = r2_pos - cam_pos

        ground_truth = np.zeros(9, dtype=np.float32)
        for i, slot in enumerate(self.rock_mod_cache):
            name = slot[0]
            z_height = slot[1]

            pos = self.floor_offset + self.model.body_pos[
                self.model.body_name2id(name)]
            diff = pos - cam_pos

            # Project difference into camera coordinate frame
            cam_angle = quaternion.as_euler_angles(
                np.quaternion(*self.model.cam_quat[0]))[0]
            cam_angle += np.pi / 2
            in_cam_frame = np.zeros_like(diff)
            x = diff[1]
            y = -diff[0]
            in_cam_frame[0] = x * np.cos(cam_angle) + y * np.sin(cam_angle)
            in_cam_frame[1] = -x * np.sin(cam_angle) + y * np.cos(cam_angle)
            in_cam_frame[2] = z_height
            # simple check that change of frame is mathematically valid
            assert (np.isclose(np.sum(np.square(diff[:2])),
                               np.sum(np.square(in_cam_frame[:2]))))
            # swap positions to match ROS standard coordinates
            ground_truth[3 * i + 0] = in_cam_frame[0]
            ground_truth[3 * i + 1] = in_cam_frame[1]
            ground_truth[3 * i + 2] = in_cam_frame[2]
            text = "{0} x: {1:.2f} y: {2:.2f} height:{3:.2f}".format(
                name, ground_truth[3 * i + 0], ground_truth[3 * i + 1],
                z_height)
            ##text = "x: {0:.2f} y: {1:.2f} height:{2:.2f}".format(ground_truth[3*i+0], ground_truth[3*i+1], z_height)
            ##text = "height:{0:.2f}".format(z_height)
            if self.visualize:
                self.viewer.add_marker(pos=pos, label=text, rgba=np.zeros(4))

        return ground_truth
Esempio n. 20
0
def euler_from_2_vectors(v1, v2):
    v1 = v1 / np.linalg.norm(v1)
    v2 = v2 / np.linalg.norm(v2)
    # v = v1 + v2
    # v = v / np.linalg.norm(v)
    angle = np.dot(v1, v2)
    axis = np.cross(v1, v2)
    qua = pq.Quaternion(axis=axis, radians=angle)
    qua = np.quaternion(qua.w, qua.x, qua.y, qua.z)
    return q.as_euler_angles(qua)
Esempio n. 21
0
    def get_euler_angles(self, ref: Optional[Frame] = None) -> _np.ndarray:
        """

        Args:
            ref:

        Returns:

        """
        return _quaternion.as_euler_angles(self.get_quaternion(ref))
Esempio n. 22
0
 def _forward(self):
     """Advances simulator a step (NECESSARY TO MAKE CAMERA AND LIGHT MODDING WORK)
     And add some visualization"""
     self.sim.forward()
     if self.viewer and self.gui:
         # Get angle of camera and display it
         quat = np.quaternion(*self.model.cam_quat[0])
         ypr = quaternion.as_euler_angles(quat) * 180 / np.pi
         cam_pos = self.model.cam_pos[0]
         cam_fovy = self.model.cam_fovy[0]
Esempio n. 23
0
def jitter_quaternions(original_quaternion, rnd, angle=30.0):
    original_euler = quaternion.as_euler_angles(original_quaternion)
    euler_angles = np.array([
        (rnd.rand() - 0.5) * np.pi * angle / 180.0 + original_euler[0],
        (rnd.rand() - 0.5) * np.pi * angle / 180.0 + original_euler[1],
        (rnd.rand() - 0.5) * np.pi * angle / 180.0 + original_euler[2],
    ])
    quaternions = quaternion.from_euler_angles(euler_angles)

    return quaternions
Esempio n. 24
0
    def get_orientation(self,
                        method='madgwick',
                        initwindow=0.5,
                        beta=2.86,
                        lCa=(0.0, -0.3, -0.7),
                        Ca=None):
        if method.lower() == 'ekf':
            dt = np.mean(np.diff(self.t))
            if Ca is None:
                Ca = np.power(10, np.array(lCa)) / dt
            else:
                Ca = np.array(Ca) / dt
            self._get_orientation_ekf(Ca=Ca)

            inertialbasis = []

            for rpy in self.orient_sensor:
                QT = self._getQT(rpy)

                inertialbasis.append(QT.dot(np.eye(3)))

            inertialbasis = np.array(inertialbasis)

            self.worldbasis = np.matmul(self.chip2world_rot, inertialbasis)

            self.accdyn_world = np.matmul(self.chip2world_rot,
                                          self.accdyn_sensor.T).T
            self.accdyn = self.accdyn_world

        elif method.lower() in ['madgwick', 'integrate_gyro']:
            if method.lower() == 'integrate_gyro':
                beta = 0.0

            self._get_orientation_madgwick(initwindow=initwindow, beta=beta)

            qorient_world = self.qchip2world.conj(
            ) * self.qorient * self.qchip2world
            self.qorient_world = qorient_world

            self.orient_world = np.array(
                [quaternion.as_euler_angles(q1) for q1 in qorient_world])
            self.orient = self.orient_world

            # make accdyn into a quaternion with zero real part
            qacc = np.zeros((self.accdyn_sensor.shape[0], 4))
            qacc[:, 1:] = self.accdyn_sensor

            # rotate accdyn into the world coordinate system
            qaccdyn_world = self.qchip2world.conj() * quaternion.as_quat_array(
                qacc) * self.qchip2world
            self.accdyn_world = np.array(
                [q.components[1:] for q in qaccdyn_world])
            self.accdyn = self.accdyn_world

        return self.orient_world
Esempio n. 25
0
    def _get_orientation_madgwick(self, initwindow=0.5, beta=2.86):
        gyrorad = self.gyro * np.pi / 180.0
        betarad = beta * np.pi / 180.0

        qorient = np.zeros_like(self.t, dtype=np.quaternion)
        qgyro = np.zeros_like(self.t, dtype=np.quaternion)
        gvec = np.zeros_like(self.gyro)

        dt = 1.0 / self.sampfreq

        isfirst = self.t <= self.t[0] + initwindow
        qorient[0] = self.orientation_from_accel(
            np.mean(self.acc[isfirst, :], axis=0))

        for i, gyro1 in enumerate(gyrorad[1:, :], start=1):
            qprev = qorient[i - 1]

            acc1 = self.acc[i, :]
            acc1 = -acc1 / np.linalg.norm(acc1)

            # quaternion angular change from the gryo
            qdotgyro = 0.5 * (qprev * np.quaternion(0, *gyro1))
            qgyro[i] = qprev + qdotgyro * dt

            if beta > 0:
                # gradient descent algorithm corrective step
                qp = qprev.components
                F = np.array([
                    2 * (qp[1] * qp[3] - qp[0] * qp[2]) - acc1[0],
                    2 * (qp[0] * qp[1] + qp[2] * qp[3]) - acc1[1],
                    2 * (0.5 - qp[1]**2 - qp[2]**2) - acc1[2]
                ])
                J = np.array([[-2 * qp[2], 2 * qp[3], -2 * qp[0], 2 * qp[1]],
                              [2 * qp[1], 2 * qp[0], 2 * qp[3], 2 * qp[2]],
                              [0, -4 * qp[1], -4 * qp[2], 0]])

                step = np.dot(J.T, F)
                step = step / np.linalg.norm(step)
                step = np.quaternion(*step)

                qdot = qdotgyro - betarad * step
            else:
                qdot = qdotgyro

            qorient[i] = qprev + qdot * dt

        # get the gravity vector
        # gravity is -Z
        gvec = [(q.conj() * np.quaternion(0, 0, 0, -1) * q).components[1:]
                for q in qorient]

        self.qorient = qorient
        self.orient_sensor = quaternion.as_euler_angles(qorient)
        self.gvec = gvec
        self.accdyn_sensor = self.acc - gvec
def imu_callback(data):

    if init == 0:

        w = np.array([
            data.angular_velocity.x, data.angular_velocity.y,
            data.angular_velocity.z
        ])
        a = np.array([
            data.linear_acceleration.x, data.linear_acceleration.y,
            data.linear_acceleration.z
        ])
        t = data.header.stamp.secs + data.header.stamp.nsecs * 1e-9
        filter.prediction(a, w, t)
        x = filter.x_
        q = filter.q_
        # rospy.loginfo(filter.posOld)
        odom = Odometry()
        odom.pose.pose.position.x = x[0]
        odom.pose.pose.position.y = x[1]
        odom.pose.pose.position.z = x[2]
        odom.pose.pose.orientation.x = q[0]
        odom.pose.pose.orientation.y = q[1]
        odom.pose.pose.orientation.z = q[2]
        odom.pose.pose.orientation.w = q[3]
        odom.twist.twist.linear.x = x[3]
        odom.twist.twist.linear.y = x[4]
        odom.twist.twist.linear.z = x[5]
        odom.twist.twist.angular.x = 0
        odom.twist.twist.angular.y = 0
        odom.twist.twist.angular.z = 0
        pub_filter.publish(odom)

        bias = Vector3()
        bias.x = filter.x_[6, 0]
        bias.y = filter.x_[7, 0]
        bias.z = filter.x_[8, 0]
        pub_bias.publish(bias)

        heading = quaternion.as_euler_angles(quaternion.from_float_array(q))
        pub_heading.publish(heading[2])

        x_uwb = filter.posOld
        vel_uwb = filter.velOld
        odom_uwb = Odometry()
        odom_uwb.pose.pose.position.x = x_uwb[0]
        odom_uwb.pose.pose.position.y = x_uwb[1]
        odom_uwb.pose.pose.position.z = x_uwb[2]
        odom_uwb.twist.twist.linear.x = vel_uwb[0]
        odom_uwb.twist.twist.linear.y = vel_uwb[1]
        odom_uwb.twist.twist.linear.z = vel_uwb[2]
        odom_uwb.twist.twist.angular.x = 0
        odom_uwb.twist.twist.angular.y = 0
        odom_uwb.twist.twist.angular.z = 0
        pub_uwb.publish(odom_uwb)
    def _task_reset(self, test=False):
        if not self.objects_urdf_loaded:
            # don't reload object urdf
            self.objects_urdf_loaded = True
            self.object_bodies['workspace'] = self._p.loadURDF(
                os.path.join(self.object_assets_path, "workspace.urdf"),
                basePosition=self.object_initial_pos['workspace'][:3],
                baseOrientation=self.object_initial_pos['workspace'][3:])

            for object_key in self.manipulated_object_keys:
                self.object_bodies[object_key] = self._p.loadURDF(
                    os.path.join(self.object_assets_path, object_key+".urdf"),
                    basePosition=self.object_initial_pos[object_key][:3],
                    baseOrientation=self.object_initial_pos[object_key][3:])

            self.object_bodies[self.goal_object_key+'_target'] = self._p.loadURDF(
                os.path.join(self.object_assets_path, self.goal_object_key+"_target.urdf"),
                basePosition=self.object_initial_pos['target'][:3],
                baseOrientation=self.object_initial_pos['target'][3:])

            if not self.visualize_target:
                self.set_object_pose(self.object_bodies[self.goal_object_key+'_target'],
                                     [0.0, 0.0, -3.0],
                                     self.object_initial_pos['target'][3:])

        # randomize object positions
        object_poses = []
        object_quats = []
        for object_key in self.manipulated_object_keys:
            done = False
            while not done:
                new_object_xy = self.np_random.uniform(self.robot.object_bound_lower[:-1],
                                                       self.robot.object_bound_upper[:-1])
                object_not_overlap = []
                for pos in object_poses + [self.robot.end_effector_tip_initial_position]:
                    object_not_overlap.append((np.linalg.norm(new_object_xy - pos[:-1]) > 0.06))
                if all(object_not_overlap):
                    object_poses.append(np.concatenate((new_object_xy.copy(), [self.object_initial_pos[object_key][2]])))
                    done = True

            orientation_euler = quat.as_euler_angles(quat.as_quat_array([1., 0., 0., 0.]))
            orientation_euler[-1] = self.np_random.uniform(-1.0, 1.0) * np.pi
            orientation_quat_new = quat.as_float_array(quat.from_euler_angles(orientation_euler))
            orientation_quat_new = np.concatenate([orientation_quat_new[1:], [orientation_quat_new[0]]], axis=-1)
            object_quats.append(orientation_quat_new.copy())

            self.set_object_pose(self.object_bodies[object_key],
                                 object_poses[-1],
                                 orientation_quat_new)

        # generate goals & images
        self._generate_goal()
        if self.goal_image:
            self._generate_goal_image()
Esempio n. 28
0
def test_as_euler_angles():
    np.random.seed(1843)
    random_angles = [[np.random.uniform(-np.pi, np.pi),
                      np.random.uniform(-np.pi, np.pi),
                      np.random.uniform(-np.pi, np.pi)]
                     for i in range(5000)]
    for alpha, beta, gamma in random_angles:
        R1 = quaternion.from_euler_angles(alpha, beta, gamma)
        R2 = quaternion.from_euler_angles(*list(quaternion.as_euler_angles(R1)))
        d = quaternion.rotation_intrinsic_distance(R1, R2)
        assert d < 4e3*eps, ((alpha, beta, gamma), R1, R2, d)  # Can't use allclose here; we don't care about rotor sign
Esempio n. 29
0
def to_euler(state):
    steps = state.shape[0]
    pos = state[:, 0:3, :]
    quats = np.squeeze(state[:, 3:7, :], axis=-1)
    euler = np.zeros(shape=(steps, 3, 1))
    for i, q in enumerate(quats):
        quat = np.quaternion(q[3], q[0], q[1], q[2])
        euler[i, :, :] = np.expand_dims(as_euler_angles(quat),
                                        axis=-1) * 180 / np.pi

    vel = state[:, 7:13, :]
    state_euler = np.concatenate([pos, euler, vel], axis=1)
    return state_euler
Esempio n. 30
0
    def _get_orientation_madgwick(self, initwindow=0.5, beta=2.86):
        gyrorad = self.gyro * np.pi/180.0
        betarad = beta * np.pi/180.0

        qorient = np.zeros_like(self.t, dtype=np.quaternion)
        qgyro = np.zeros_like(self.t, dtype=np.quaternion)
        gvec = np.zeros_like(self.gyro)

        dt = 1.0 / self.sampfreq

        isfirst = self.t <= self.t[0] + initwindow
        qorient[0] = self.orientation_from_accel(np.mean(self.acc[isfirst, :], axis=0))

        for i, gyro1 in enumerate(gyrorad[1:, :], start=1):
            qprev = qorient[i-1]

            acc1 = self.acc[i, :]
            acc1 = -acc1 / np.linalg.norm(acc1)

            # quaternion angular change from the gryo
            qdotgyro = 0.5 * (qprev * np.quaternion(0, *gyro1))
            qgyro[i] = qprev + qdotgyro * dt

            if beta > 0:
                # gradient descent algorithm corrective step
                qp = qprev.components
                F = np.array([2*(qp[1]*qp[3] - qp[0]*qp[2]) - acc1[0],
                              2*(qp[0]*qp[1] + qp[2]*qp[3]) - acc1[1],
                              2*(0.5 - qp[1]**2 - qp[2]**2) - acc1[2]])
                J = np.array([[-2*qp[2], 2*qp[3], -2*qp[0], 2*qp[1]],
                               [2*qp[1], 2*qp[0], 2*qp[3], 2*qp[2]],
                               [0, -4*qp[1], -4*qp[2], 0]])

                step = np.dot(J.T, F)
                step = step / np.linalg.norm(step)
                step = np.quaternion(*step)

                qdot = qdotgyro - betarad * step
            else:
                qdot = qdotgyro

            qorient[i] = qprev + qdot * dt

        # get the gravity vector
        # gravity is -Z
        gvec = [(q.conj() * np.quaternion(0, 0, 0, -1) * q).components[1:] for q in qorient]

        self.qorient = qorient
        self.orient_sensor = quaternion.as_euler_angles(qorient)
        self.gvec = gvec
        self.accdyn_sensor = self.acc - gvec
Esempio n. 31
0
    def _generate_goal(self):
        (x, y, z), (a, b, c, w), _, _, _, _ = self._p.getLinkState(
            self.object_bodies['slot'], 2)
        target_obj_quat = np.array([a, b, c, w])
        orientation_euler = quat.as_euler_angles(
            quat.as_quat_array([w, a, b, c]))

        self.desired_goal = np.concatenate(
            [np.array([x, y, z]), orientation_euler], axis=-1)

        if self.visualize_target:
            self.set_object_pose(
                self.object_bodies[self.goal_object_key + '_target'],
                self.desired_goal[:3], target_obj_quat)
Esempio n. 32
0
    def save_trajectory(self, epoch, y_true, y_pred, begin, end):
        true_xy, pred_xy = y_true[begin:end, :2], y_pred[begin:end, :2]

        true_q = quaternion.as_quat_array(y_true[begin:end, [6, 3, 4, 5]])
        true_q = quaternion.as_euler_angles(true_q)[1]

        pred_q = quaternion.as_quat_array(y_pred[begin:end, [6, 3, 4, 5]])
        pred_q = quaternion.as_euler_angles(pred_q)[1]

        plt.clf()

        plt.plot(true_xy[:, 0], true_xy[:, 1], 'g-')
        plt.plot(pred_xy[:, 0], pred_xy[:, 1], 'r-')

        for ((x1, y1), (x2, y2)) in zip(true_xy, pred_xy):
            plt.plot([x1, x2], [y1, y2],
                     color='k',
                     linestyle='-',
                     linewidth=0.3,
                     alpha=0.2)

        plt.grid(True)
        plt.xlabel('x [m]')
        plt.ylabel('y [m]')
        plt.title('Top-down view of trajectory')
        plt.axis('equal')

        x_range = (np.min(true_xy[:, 0]) - .2, np.max(true_xy[:, 0]) + .2)
        y_range = (np.min(true_xy[:, 1]) - .2, np.max(true_xy[:, 1]) + .2)

        plt.xlim(x_range)
        plt.ylim(y_range)

        filename = 'epoch={epoch:04d}_begin={begin:04d}_end={end:04d}_trajectory.pdf' \
          .format(epoch=epoch, begin=begin, end=end)
        filename = os.path.join(self.plot_dir, filename)
        plt.savefig(filename)
Esempio n. 33
0
 def _forward(self):
     """Advances simulator a step (NECESSARY TO MAKE CAMERA AND LIGHT MODDING WORK)
     And add some visualization"""
     self.sim.forward()
     if self.viewer and self.gui:
         # Get angle of camera and display it 
         quat = np.quaternion(*self.model.cam_quat[0])
         ypr = quaternion.as_euler_angles(quat) * 180 / np.pi
         cam_pos = self.model.cam_pos[0]
         cam_fovy = self.model.cam_fovy[0]
         #self.viewer.add_marker(pos=cam_pos, label="CAM: {}{}".format(cam_pos, ypr))
         #self.viewer.add_marker(pos=cam_pos, label="CAM: {}".format(ypr))
         #self.viewer.add_marker(pos=cam_pos, label="CAM: {}".format(cam_pos))
         self.viewer.add_marker(pos=cam_pos, label="FOVY: {}, CAM: {}".format(cam_fovy, cam_pos))
         self.viewer.render()
Esempio n. 34
0
    def get_orientation(self, method='madgwick', initwindow=0.5, beta=2.86, lCa=(0.0, -0.3, -0.7), Ca=None):
        if method.lower() == 'ekf':
            dt = np.mean(np.diff(self.t))
            if Ca is None:
                Ca = np.power(10, np.array(lCa)) / dt
            else:
                Ca = np.array(Ca) / dt
            self._get_orientation_ekf(Ca=Ca)

            inertialbasis = []

            for rpy in self.orient_sensor:
                QT = self._getQT(rpy)

                inertialbasis.append(QT.dot(np.eye(3)))

            inertialbasis = np.array(inertialbasis)

            self.worldbasis = np.matmul(self.chip2world_rot, inertialbasis)

            self.accdyn_world = np.matmul(self.chip2world_rot, self.accdyn_sensor.T).T
            self.accdyn = self.accdyn_world

        elif method.lower() in ['madgwick', 'integrate_gyro']:
            if method.lower() == 'integrate_gyro':
                beta = 0.0

            self._get_orientation_madgwick(initwindow=initwindow, beta=beta)

            qorient_world = self.qchip2world.conj() * self.qorient * self.qchip2world
            self.qorient_world = qorient_world

            self.orient_world = np.array([quaternion.as_euler_angles(q1) for q1 in qorient_world])
            self.orient = self.orient_world

            # make accdyn into a quaternion with zero real part
            qacc = np.zeros((self.accdyn_sensor.shape[0], 4))
            qacc[:, 1:] = self.accdyn_sensor

            # rotate accdyn into the world coordinate system
            qaccdyn_world = self.qchip2world.conj() * quaternion.as_quat_array(qacc) * self.qchip2world
            self.accdyn_world = np.array([q.components[1:] for q in qaccdyn_world])
            self.accdyn = self.accdyn_world


        return self.orient_world