def ik_robot_eef_joint_cartesian_pose(self):
        """
        Returns the current cartesian pose of the last joint of the ik robot with respect
        to the base frame as a (pos, orn) tuple where orn is a x-y-z-w quaternion.
        """
        out = []
        for eff in [self.effector_right, self.effector_left]:
            eef_pos_in_world = np.array(p.getLinkState(self.ik_robot, eff)[0])
            eef_orn_in_world = np.array(p.getLinkState(self.ik_robot, eff)[1])
            eef_pose_in_world = T.pose2mat(
                (eef_pos_in_world, eef_orn_in_world))

            base_pos_in_world = np.array(
                p.getBasePositionAndOrientation(self.ik_robot)[0])
            base_orn_in_world = np.array(
                p.getBasePositionAndOrientation(self.ik_robot)[1])
            base_pose_in_world = T.pose2mat(
                (base_pos_in_world, base_orn_in_world))
            world_pose_in_base = T.pose_inv(base_pose_in_world)

            eef_pose_in_base = T.pose_in_A_to_pose_in_B(
                pose_A=eef_pose_in_world, pose_A_in_B=world_pose_in_base)
            out.extend(T.mat2pose(eef_pose_in_base))

        return out
    def ik_robot_eef_joint_cartesian_pose(self):
        """
        Calculates the current cartesian pose of the last joint of the ik robot with respect to the base frame as
        a (pos, orn) tuple where orn is a x-y-z-w quaternion

        Returns:
            2-tuple:

                - (np.array) position
                - (np.array) orientation
        """
        eef_pos_in_world = np.array(p.getLinkState(self.ik_robot, self.bullet_ee_idx,
                                                   physicsClientId=self.bullet_server_id)[0])
        eef_orn_in_world = np.array(p.getLinkState(self.ik_robot, self.bullet_ee_idx,
                                                   physicsClientId=self.bullet_server_id)[1])
        eef_pose_in_world = T.pose2mat((eef_pos_in_world, eef_orn_in_world))

        base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot,
                                                                     physicsClientId=self.bullet_server_id)[0])
        base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot,
                                                                     physicsClientId=self.bullet_server_id)[1])
        base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
        world_pose_in_base = T.pose_inv(base_pose_in_world)

        # Update reference to inverse orientation offset from pybullet base frame to world frame
        self.base_orn_offset_inv = T.quat2mat(T.quat_inverse(base_orn_in_world))

        # Update reference target orientation
        self.reference_target_orn = T.quat_multiply(self.reference_target_orn, base_orn_in_world)

        eef_pose_in_base = T.pose_in_A_to_pose_in_B(
            pose_A=eef_pose_in_world, pose_A_in_B=world_pose_in_base
        )

        return T.mat2pose(eef_pose_in_base)
Exemple #3
0
def get_real2sim_posquat(pos, quat):
    real_eef_pose = transform_utils.pose2mat((pos,quat))
    angle = np.deg2rad(90)
    direction_axis = [0, 0, 1]
    rotation_matrix = transform_utils.rotation_matrix(angle, direction_axis)

    real_pose_rotated = np.dot(rotation_matrix, real_eef_pose)
    real_eef_pos_rotated = deepcopy(real_pose_rotated)[:3, 3]
    real_eef_quat_rotated = deepcopy(transform_utils.mat2quat(real_pose_rotated))
    return real_eef_pos_rotated, real_eef_quat_rotated
    def ik_robot_eef_joint_cartesian_pose(self):
        """
        Returns the current cartesian pose of the last joint of the ik robot with respect to the base frame as
        a (pos, orn) tuple where orn is a x-y-z-w quaternion
        """
        eef_pos_in_world = np.array(p.getLinkState(self.ik_robot, 6)[0])
        eef_orn_in_world = np.array(p.getLinkState(self.ik_robot, 6)[1])
        eef_pose_in_world = T.pose2mat((eef_pos_in_world, eef_orn_in_world))

        base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
        base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
        base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
        world_pose_in_base = T.pose_inv(base_pose_in_world)

        eef_pose_in_base = T.pose_in_A_to_pose_in_B(
            pose_A=eef_pose_in_world, pose_A_in_B=world_pose_in_base
        )

        return T.mat2pose(eef_pose_in_base)
Exemple #5
0
    def bullet_base_pose_to_world_pose(self, pose_in_base):
        """
        Convert a pose in the base frame to a pose in the world frame.

        Args:
            pose_in_base: a (pos, orn) tuple.

        Returns:
            pose_in world: a (pos, orn) tuple.
        """
        pose_in_base = T.pose2mat(pose_in_base)

        base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
        base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
        base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))

        pose_in_world = T.pose_in_A_to_pose_in_B(
            pose_A=pose_in_base, pose_A_in_B=base_pose_in_world
        )
        return T.mat2pose(pose_in_world)
Exemple #6
0
    def bullet_base_pose_to_world_pose(self, pose_in_base):
        """
        Convert a pose in the base frame to a pose in the world frame.

        Args:
            pose_in_base (2-tuple): a (pos, orn) tuple.

        Returns:
            2-tuple: a (pos, orn) tuple reflecting robot pose in world coordinates
        """
        pose_in_base = T.pose2mat(pose_in_base)

        base_pos_in_world, base_orn_in_world = np.array(
            p.getBasePositionAndOrientation(self.ik_robot, physicsClientId=self.bullet_server_id)
        )

        base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))

        pose_in_world = T.pose_in_A_to_pose_in_B(pose_A=pose_in_base, pose_A_in_B=base_pose_in_world)
        return T.mat2pose(pose_in_world)
Exemple #7
0
def do_physical_simulation(_sim, _object_list, _action, _next_object_list, _viewer=None, test_horizon=20000, reset=True):
    state = _sim.get_state()
    if reset:
        for _obj in _object_list:
            if 'custom_table' not in _obj.name:
                pos_arr, quat_arr = T.mat2pose(_obj.pose)
                obj_qpos_addr = _sim.model.get_joint_qpos_addr(_obj.name)
                state.qpos[obj_qpos_addr[0]:obj_qpos_addr[0] + 3] = pos_arr
                state.qpos[obj_qpos_addr[0] + 3:obj_qpos_addr[0] + 7] = quat_arr[[3, 0, 1, 2]]

    if action['type'] == "pick":
        pass
    elif action['type'] == "place":
        held_obj_idx = get_held_object(_object_list)
        _obj = _next_object_list[held_obj_idx]
        pos_arr, quat_arr = T.mat2pose(deepcopy(_obj.pose))
        pos_arr[2] += 0.02
        obj_qpos_addr = _sim.model.get_joint_qpos_addr(_obj.name)
        state.qpos[obj_qpos_addr[0]:obj_qpos_addr[0] + 3] = pos_arr
        state.qpos[obj_qpos_addr[0] + 3:obj_qpos_addr[0] + 7] = quat_arr[[3, 0, 1, 2]]

    _sim.set_state(state)

    for _ in range(test_horizon):
        _sim.step()
        if _viewer is not None:
            _viewer.render()

    state = _sim.get_state()
    mis_posed_object_list = []
    sim_object_list = []
    for _obj in _next_object_list:
        sim_obj = deepcopy(_obj)
        if 'custom_table' not in _obj.name:
            pos_arr, quat_arr = T.mat2pose(_obj.pose)
            obj_qpos_addr = _sim.model.get_joint_qpos_addr(_obj.name)
            sim_pos_arr = state.qpos[obj_qpos_addr[0]:obj_qpos_addr[0] + 3]
            sim_quat_arr = state.qpos[obj_qpos_addr[0] + 3:obj_qpos_addr[0] + 7]
            sim_obj.pose = T.pose2mat([sim_pos_arr, sim_quat_arr[[1, 2, 3, 0]]])

            dist_diff = np.sqrt(np.sum((pos_arr - sim_pos_arr) ** 2))
            ang_diff = np.arccos(
                np.maximum(np.minimum(2. * np.sum(quat_arr * sim_quat_arr[[1, 2, 3, 0]]) ** 2 - 1., 1.), -1.))
            if dist_diff > 0.05 or ang_diff > np.pi / 20.:
                mis_posed_object_list.append(_obj)

        sim_object_list.append(sim_obj)

    return mis_posed_object_list, sim_object_list
Exemple #8
0
 def nut_to_eef_pos(obs_cache):
     # Immediately return default value if cache is empty
     if any([
             name not in obs_cache for name in [
                 f"{nut_name}_pos", f"{nut_name}_quat",
                 "world_pose_in_gripper"
             ]
     ]):
         return np.zeros(3)
     obj_pose = T.pose2mat(
         (obs_cache[f"{nut_name}_pos"], obs_cache[f"{nut_name}_quat"]))
     rel_pose = T.pose_in_A_to_pose_in_B(
         obj_pose, obs_cache["world_pose_in_gripper"])
     rel_pos, rel_quat = T.mat2pose(rel_pose)
     obs_cache[f"{nut_name}_to_{pf}eef_quat"] = rel_quat
     return rel_pos
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].

        Important keys:

            `'robot-state'`: contains robot-centric information.

            `'object-state'`: requires @self.use_object_obs to be True. Contains object-centric information.

            `'image'`: requires @self.use_camera_obs to be True. Contains a rendered frame from the simulation.

            `'depth'`: requires @self.use_camera_obs and @self.camera_depth to be True.
            Contains a rendered depth map from the simulation

        Returns:
            OrderedDict: Observations from the environment
        """
        di = super()._get_observation()

        # low-level object information
        if self.use_object_obs:
            # Get robot prefix
            pr = self.robots[0].robot_model.naming_prefix

            # remember the keys to collect into object info
            object_state_keys = []

            # for conversion to relative gripper frame
            gripper_pose = T.pose2mat((di[pr + "eef_pos"], di[pr + "eef_quat"]))
            world_pose_in_gripper = T.pose_inv(gripper_pose)

            for i, obj in enumerate(self.objects):

                if self.single_object_mode == 2 and self.object_id != i:
                    # Skip adding to observations
                    continue

                obj_str = obj.name
                obj_pos = np.array(self.sim.data.body_xpos[self.obj_body_id[obj_str]])
                obj_quat = T.convert_quat(
                    self.sim.data.body_xquat[self.obj_body_id[obj_str]], to="xyzw"
                )
                di["{}_pos".format(obj_str)] = obj_pos
                di["{}_quat".format(obj_str)] = obj_quat

                # get relative pose of object in gripper frame
                object_pose = T.pose2mat((obj_pos, obj_quat))
                rel_pose = T.pose_in_A_to_pose_in_B(object_pose, world_pose_in_gripper)
                rel_pos, rel_quat = T.mat2pose(rel_pose)
                di["{}_to_{}eef_pos".format(obj_str, pr)] = rel_pos
                di["{}_to_{}eef_quat".format(obj_str, pr)] = rel_quat

                object_state_keys.append("{}_pos".format(obj_str))
                object_state_keys.append("{}_quat".format(obj_str))
                object_state_keys.append("{}_to_{}eef_pos".format(obj_str, pr))
                object_state_keys.append("{}_to_{}eef_quat".format(obj_str, pr))

            if self.single_object_mode == 1:
                # Zero out other objects observations
                for obj in self.objects:
                    if obj.name == self.obj_to_use:
                        continue
                    else:
                        di["{}_pos".format(obj.name)] *= 0.0
                        di["{}_quat".format(obj.name)] *= 0.0
                        di["{}_to_{}eef_pos".format(obj.name, pr)] *= 0.0
                        di["{}_to_{}eef_quat".format(obj.name, pr)] *= 0.0

            di["object-state"] = np.concatenate([di[k] for k in object_state_keys])

        return di
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].
        
        Important keys:
            robot-state: contains robot-centric information.
            object-state: requires @self.use_object_obs to be True.
                contains object-centric information.
            image: requires @self.use_camera_obs to be True.
                contains a rendered frame from the simulation.
            depth: requires @self.use_camera_obs and @self.camera_depth to be True.
                contains a rendered depth map from the simulation
        """
        di = super()._get_observation()
        if self.use_camera_obs:
            camera_obs = self.sim.render(
                camera_name=self.camera_name,
                width=self.camera_width,
                height=self.camera_height,
                depth=self.camera_depth,
            )
            if self.camera_depth:
                di["image"], di["depth"] = camera_obs
            else:
                di["image"] = camera_obs

        # low-level object information
        if self.use_object_obs:

            # remember the keys to collect into object info
            object_state_keys = []

            # for conversion to relative gripper frame
            gripper_pose = T.pose2mat((di["eef_pos"], di["eef_quat"]))
            world_pose_in_gripper = T.pose_inv(gripper_pose)

            for i in range(len(self.item_names_org)):

                if self.single_object_mode == 2 and self.nut_id != i:
                    # skip observations
                    continue

                obj_str = str(self.item_names_org[i]) + "0"
                obj_pos = np.array(
                    self.sim.data.body_xpos[self.obj_body_id[obj_str]])
                obj_quat = T.convert_quat(
                    self.sim.data.body_xquat[self.obj_body_id[obj_str]],
                    to="xyzw")
                di["{}_pos".format(obj_str)] = obj_pos
                di["{}_quat".format(obj_str)] = obj_quat

                object_pose = T.pose2mat((obj_pos, obj_quat))
                rel_pose = T.pose_in_A_to_pose_in_B(object_pose,
                                                    world_pose_in_gripper)
                rel_pos, rel_quat = T.mat2pose(rel_pose)
                di["{}_to_eef_pos".format(obj_str)] = rel_pos
                di["{}_to_eef_quat".format(obj_str)] = rel_quat

                object_state_keys.append("{}_pos".format(obj_str))
                object_state_keys.append("{}_quat".format(obj_str))
                object_state_keys.append("{}_to_eef_pos".format(obj_str))
                object_state_keys.append("{}_to_eef_quat".format(obj_str))

            if self.single_object_mode == 1:
                # zero out other objs
                for obj_str, obj_mjcf in self.mujoco_objects.items():
                    if obj_str == self.obj_to_use:
                        continue
                    else:
                        di["{}_pos".format(obj_str)] *= 0.0
                        di["{}_quat".format(obj_str)] *= 0.0
                        di["{}_to_eef_pos".format(obj_str)] *= 0.0
                        di["{}_to_eef_quat".format(obj_str)] *= 0.0

            di["object-state"] = np.concatenate(
                [di[k] for k in object_state_keys])

        return di
Exemple #11
0
 def world_pose_in_gripper(obs_cache):
     return (T.pose_inv(
         T.pose2mat((obs_cache[f"{pf}eef_pos"],
                     obs_cache[f"{pf}eef_quat"])))
             if f"{pf}eef_pos" in obs_cache
             and f"{pf}eef_quat" in obs_cache else np.eye(4))
    def _update_orientation(self, name, component):
        """
        Update position for an object or a robot in renderer.
        
        Args:
            name (string): name of component
            component (nvisii entity or scene): Object in renderer and other info
                                                for object.
        """

        obj = component.obj
        parent_body_name = component.parent_body_name
        geom_pos = component.geom_pos
        geom_quat = component.geom_quat
        dynamic = component.dynamic

        if not dynamic:
            return

        self.body_tags = ['robot', 'pedestal', 'gripper', 'peg']

        if parent_body_name != 'worldbody':
            if self.tag_in_name(name):
                pos = self.env.sim.data.get_body_xpos(parent_body_name)
            else:
                pos = self.env.sim.data.get_geom_xpos(name)

            B = self.env.sim.data.body_xmat[self.env.sim.model.body_name2id(
                parent_body_name)].reshape((3, 3))
            quat_xyzw_body = mat2quat(B)
            quat_wxyz_body = np.array([
                quat_xyzw_body[3], quat_xyzw_body[0], quat_xyzw_body[1],
                quat_xyzw_body[2]
            ])  # wxyz
            nvisii_quat = nvisii.quat(*quat_wxyz_body) * nvisii.quat(
                *geom_quat)

            if self.tag_in_name(name):
                # Add position offset if there are position offset defined in the geom tag
                homo_mat = T.pose2mat((np.zeros(
                    (1, 3), dtype=np.float32), quat_xyzw_body))
                pos_offset = homo_mat @ np.array(
                    [geom_pos[0], geom_pos[1], geom_pos[2], 1.]).transpose()
                pos = pos + pos_offset[:3]

        else:
            pos = [0, 0, 0]
            nvisii_quat = nvisii.quat(1, 0, 0, 0)  # wxyz

        if isinstance(obj, nvisii.scene):

            # temp fix -- look into XML file for correct quat
            if 's_visual' in name:
                # single robot
                if len(self.env.robots) == 1:
                    nvisii_quat = nvisii.quat(0, 0.5, 0, 0)
                # two robots - 0
                elif len(self.env.robots) == 2 and 'robot_0' in name:
                    nvisii_quat = nvisii.quat(-0, 0.5, 0.5, 0)
                # two robots - 1
                else:
                    nvisii_quat = nvisii.quat(-0, 0.5, -0.5, 0)

            obj.transforms[0].set_position(nvisii.vec3(pos[0], pos[1], pos[2]))
            obj.transforms[0].set_rotation(nvisii_quat)
        else:
            obj.get_transform().set_position(
                nvisii.vec3(pos[0], pos[1], pos[2]))
            obj.get_transform().set_rotation(nvisii_quat)