Esempio n. 1
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].
        
        Important keys:
            robot-state: contains robot-centric information.
            object-state: requires @self.use_object_obs to be True.
                contains object-centric information.
            image: requires @self.use_camera_obs to be True.
                contains a rendered frame from the simulation.
            depth: requires @self.use_camera_obs and @self.camera_depth to be True.
                contains a rendered depth map from the simulation
        """
        di = super()._get_observation()

        # camera observations
        if self.use_camera_obs:
            camera_obs = self.sim.render(
                camera_name=self.camera_name,
                width=self.camera_width,
                height=self.camera_height,
                depth=self.camera_depth,
            )
            if self.camera_depth:
                di["image"], di["depth"] = camera_obs
            else:
                di["image"] = camera_obs

        # low-level object information
        if self.use_object_obs:
            # position and rotation of cylinder and hole
            hole_pos = self.sim.data.body_xpos[self.hole_body_id]
            hole_quat = T.convert_quat(
                self.sim.data.body_xquat[self.hole_body_id], to="xyzw")
            di["hole_pos"] = hole_pos
            di["hole_quat"] = hole_quat

            cyl_pos = self.sim.data.body_xpos[self.cyl_body_id]
            cyl_quat = T.convert_quat(
                self.sim.data.body_xquat[self.cyl_body_id], to="xyzw")
            di["cyl_to_hole"] = cyl_pos - hole_pos
            di["cyl_quat"] = cyl_quat

            # Relative orientation parameters
            t, d, cos = self._compute_orientation()
            di["angle"] = cos
            di["t"] = t
            di["d"] = d

            di["object-state"] = np.concatenate([
                di["hole_pos"],
                di["hole_quat"],
                di["cyl_to_hole"],
                di["cyl_quat"],
                [di["angle"]],
                [di["t"]],
                [di["d"]],
            ])

        return di
Esempio n. 2
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].

        Important keys:

            `'robot-state'`: contains robot-centric information.

            `'object-state'`: requires @self.use_object_obs to be True. Contains object-centric information.

            `'image'`: requires @self.use_camera_obs to be True. Contains a rendered frame from the simulation.

            `'depth'`: requires @self.use_camera_obs and @self.camera_depth to be True.
            Contains a rendered depth map from the simulation

        Returns:
            OrderedDict: Observations from the environment
        """
        di = super()._get_observation()

        # low-level object information
        if self.use_object_obs:
            # Get robot prefix
            if self.env_configuration == "bimanual":
                pr0 = self.robots[0].robot_model.naming_prefix + "left_"
                pr1 = self.robots[0].robot_model.naming_prefix + "right_"
            else:
                pr0 = self.robots[0].robot_model.naming_prefix
                pr1 = self.robots[1].robot_model.naming_prefix

            # position and rotation of peg and hole
            hole_pos = np.array(self.sim.data.body_xpos[self.hole_body_id])
            hole_quat = T.convert_quat(
                self.sim.data.body_xquat[self.hole_body_id], to="xyzw")
            di["hole_pos"] = hole_pos
            di["hole_quat"] = hole_quat

            peg_pos = np.array(self.sim.data.body_xpos[self.peg_body_id])
            peg_quat = T.convert_quat(
                self.sim.data.body_xquat[self.peg_body_id], to="xyzw")
            di["peg_to_hole"] = peg_pos - hole_pos
            di["peg_quat"] = peg_quat

            # Relative orientation parameters
            t, d, cos = self._compute_orientation()
            di["angle"] = cos
            di["t"] = t
            di["d"] = d

            di["object-state"] = np.concatenate([
                di["hole_pos"],
                di["hole_quat"],
                di["peg_to_hole"],
                di["peg_quat"],
                [di["angle"]],
                [di["t"]],
                [di["d"]],
            ])

        return di
Esempio n. 3
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].

        Important keys:

            `'robot-state'`: contains robot-centric information.

            `'object-state'`: requires @self.use_object_obs to be True. Contains object-centric information.

            `'image'`: requires @self.use_camera_obs to be True. Contains a rendered frame from the simulation.

            `'depth'`: requires @self.use_camera_obs and @self.camera_depth to be True.
            Contains a rendered depth map from the simulation

        Returns:
            OrderedDict: Observations from the environment
        """
        di = super()._get_observation()

        # low-level object information
        if self.use_object_obs:
            # Get robot prefix
            pr = self.robots[0].robot_model.naming_prefix

            # position and rotation of the first cube
            cubeA_pos = np.array(self.sim.data.body_xpos[self.cubeA_body_id])
            cubeA_quat = convert_quat(np.array(
                self.sim.data.body_xquat[self.cubeA_body_id]),
                                      to="xyzw")
            di["cubeA_pos"] = cubeA_pos
            di["cubeA_quat"] = cubeA_quat

            # position and rotation of the second cube
            cubeB_pos = np.array(self.sim.data.body_xpos[self.cubeB_body_id])
            cubeB_quat = convert_quat(np.array(
                self.sim.data.body_xquat[self.cubeB_body_id]),
                                      to="xyzw")
            di["cubeB_pos"] = cubeB_pos
            di["cubeB_quat"] = cubeB_quat

            # relative positions between gripper and cubes
            gripper_site_pos = np.array(
                self.sim.data.site_xpos[self.robots[0].eef_site_id])
            di[pr + "gripper_to_cubeA"] = gripper_site_pos - cubeA_pos
            di[pr + "gripper_to_cubeB"] = gripper_site_pos - cubeB_pos
            di["cubeA_to_cubeB"] = cubeA_pos - cubeB_pos

            di["object-state"] = np.concatenate([
                cubeA_pos,
                cubeA_quat,
                cubeB_pos,
                cubeB_quat,
                di[pr + "gripper_to_cubeA"],
                di[pr + "gripper_to_cubeB"],
                di["cubeA_to_cubeB"],
            ])

        return di
Esempio n. 4
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].
        
        Important keys:
            robot-state: contains robot-centric information.
        """
        di = super()._get_observation()
        # proprioceptive features
        di["joint_pos"] = np.array(
            [self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes])
        di["joint_vel"] = np.array(
            [self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes])
        robot_states = [
            np.sin(di["joint_pos"]),
            np.cos(di["joint_pos"]),
            di["joint_vel"],
        ]

        if self.has_gripper_right:
            di["right_gripper_qpos"] = np.array([
                self.sim.data.qpos[x]
                for x in self._ref_gripper_right_joint_pos_indexes
            ])
            di["right_gripper_qvel"] = np.array([
                self.sim.data.qvel[x]
                for x in self._ref_gripper_right_joint_vel_indexes
            ])
            di["right_eef_pos"] = np.array(
                self.sim.data.site_xpos[self.right_eef_site_id])
            di["right_eef_quat"] = T.convert_quat(
                self.sim.data.get_body_xquat("right_hand"), to="xyzw")
            robot_states.extend([
                di["right_gripper_qpos"], di["right_eef_pos"],
                di["right_eef_quat"]
            ])

        if self.has_gripper_left:
            di["left_gripper_qpos"] = np.array([
                self.sim.data.qpos[x]
                for x in self._ref_gripper_left_joint_pos_indexes
            ])
            di["left_gripper_qvel"] = np.array([
                self.sim.data.qvel[x]
                for x in self._ref_gripper_left_joint_vel_indexes
            ])
            di["left_eef_pos"] = np.array(
                self.sim.data.site_xpos[self.left_eef_site_id])
            di["left_eef_quat"] = T.convert_quat(
                self.sim.data.get_body_xquat("left_hand"), to="xyzw")
            robot_states.extend([
                di["left_gripper_qpos"], di["left_eef_pos"],
                di["left_eef_quat"]
            ])

        di["robot-state"] = np.concatenate(robot_states)
        return di
Esempio n. 5
0
    def reward(self, action=None):
        """
        Reward function for the task.

        Args:
            action (np array): [NOT USED]

        Returns:
            float: reward value
        """

        reward = 0.

        ee_current_ori = convert_quat(self._eef_xquat,
                                      to="wxyz")  # (w, x, y, z) quaternion
        ee_desired_ori = convert_quat(self.goal_quat, to="wxyz")

        # position
        self.pos_error = np.square(self.pos_error_mul *
                                   (self._eef_xpos[0:-1] - self.traj_pt[0:-1]))
        self.pos_reward = self.pos_reward_mul * np.exp(
            -1 * np.linalg.norm(self.pos_error))

        # orientation
        self.ori_error = self.ori_error_mul * distance_quat(
            ee_current_ori, ee_desired_ori)
        self.ori_reward = self.ori_reward_mul * np.exp(-1 * self.ori_error)

        # velocity
        self.vel_error = np.square(
            self.vel_error_mul * (self.vel_running_mean - self.goal_velocity))
        self.vel_reward = self.vel_reward_mul * np.exp(
            -1 * np.linalg.norm(self.vel_error))

        # force
        self.force_error = np.square(
            self.force_error_mul *
            (self.z_contact_force_running_mean - self.goal_contact_z_force))
        self.force_reward = self.force_reward_mul * np.exp(
            -1 *
            self.force_error) if self._check_probe_contact_with_torso() else 0

        # derivative force
        self.der_force_error = np.square(
            self.der_force_error_mul *
            (self.der_z_contact_force - self.goal_der_contact_z_force))
        self.der_force_reward = self.der_force_reward_mul * np.exp(
            -1 * self.der_force_error) if self._check_probe_contact_with_torso(
            ) else 0

        # add rewards
        reward += (self.pos_reward + self.ori_reward + self.vel_reward +
                   self.force_reward + self.der_force_reward)

        return reward
Esempio n. 6
0
    def put_raw_object_obs(self, di):
        # Extract position and velocity of the eef
        eef_pos_in_world = self.sim.data.get_body_xpos("right_hand")
        eef_xvelp_in_world = self.sim.data.get_body_xvelp("right_hand")

        # print('eef_pos_in_world', eef_pos_in_world)

        # Get the position, velocity, rotation  and rotational velocity of the object in the world frame
        object_pos_in_world = self.sim.data.body_xpos[self.cube_body_id]
        object_xvelp_in_world = self.sim.data.get_body_xvelp('cube')
        object_rot_in_world = self.sim.data.get_body_xmat('cube')

        # Get the z-angle with respect to the reference position and do sin-cosine encoding
        # world_rotation_in_reference = np.array([[0., 1., 0., ], [-1., 0., 0., ], [0., 0., 1., ]])
        # object_rotation_in_ref = world_rotation_in_reference.dot(object_rot_in_world)
        # object_euler_in_ref = T.mat2euler(object_rotation_in_ref)
        # z_angle = object_euler_in_ref[2]

        object_quat = convert_quat(self.sim.data.body_xquat[self.cube_body_id],
                                   to='xyzw')

        # Get the goal position in the world
        goal_site_pos_in_world = np.array(
            self.sim.data.site_xpos[self.goal_site_id])

        # Record observations into a dictionary
        di['goal_pos_in_world'] = goal_site_pos_in_world
        di['eef_pos_in_world'] = eef_pos_in_world
        di['eef_vel_in_world'] = eef_xvelp_in_world
        di['object_pos_in_world'] = object_pos_in_world
        di['object_vel_in_world'] = object_xvelp_in_world
        # di["z_angle"] = np.array([z_angle])
        di['object_quat'] = object_quat
Esempio n. 7
0
 def put_raw_object_obs(self, di):
     di['cube_pos'] = np.array(self.sim.data.body_xpos[self.cube_body_id])
     di['cube_quat'] = convert_quat(np.array(
         self.sim.data.body_xquat[self.cube_body_id]),
                                    to="xyzw")
     di['gripper_site_pos'] = np.array(
         self.sim.data.site_xpos[self.eef_site_id])
Esempio n. 8
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].
        Important keys:
            `'robot-state'`: contains robot-centric information.
            `'object-state'`: requires @self.use_object_obs to be True. Contains object-centric information.
            `'image'`: requires @self.use_camera_obs to be True. Contains a rendered frame from the simulation.
            `'depth'`: requires @self.use_camera_obs and @self.camera_depth to be True.
            Contains a rendered depth map from the simulation
        Returns:
            OrderedDict: Observations from the environment
        """
        di = super()._get_observation()

        robot = self.robots[0]

        if robot.has_gripper:

            # Checking if the UltrasoundProbeGripper is used
            if robot.gripper.dof == 0:
                # Remove unused keys (no joints in gripper)
                di.pop('robot0_gripper_qpos', None)
                di.pop('robot0_gripper_qvel', None)
                di['gripper_pos'] = np.array(
                    self.sim.data.get_body_xpos('gripper0_gripper_base'))
                di['gripper_velp'] = np.array(
                    self.sim.data.get_body_xvelp('gripper0_gripper_base'))
                di['gripper_quat'] = convert_quat(
                    self.sim.data.get_body_xquat('gripper0_gripper_base'),
                    to="xyzw")

            di['contact'] = self._check_gripper_contact()

        return di
Esempio n. 9
0
    def rotate_camera(self, point, axis, angle):
        """
        Rotate the camera view about a direction (in the camera frame).

        Args:
            point (None or 3-array): (x,y,z) cartesian coordinates about which to rotate camera in camera frame. If None,
                assumes the point is the current location of the camera
            axis (3-array): (ax,ay,az) axis about which to rotate camera in camera frame
            angle (float): how much to rotate about that direction

        Returns:
            2-tuple:
                pos: (x,y,z) updated camera position
                quat: (x,y,z,w) updated camera quaternion orientation
        """
        # current camera rotation + pos
        camera_pos = np.array(self.env.sim.data.get_mocap_pos(self.mover_body_name))
        camera_rot = T.quat2mat(T.convert_quat(self.env.sim.data.get_mocap_quat(self.mover_body_name), to="xyzw"))

        # rotate by angle and direction to get new camera rotation
        rad = np.pi * angle / 180.0
        R = T.rotation_matrix(rad, axis, point=point)
        camera_pose = np.zeros((4, 4))
        camera_pose[:3, :3] = camera_rot
        camera_pose[:3, 3] = camera_pos
        camera_pose = camera_pose @ R

        # Update camera pose
        pos, quat = camera_pose[:3, 3], T.mat2quat(camera_pose[:3, :3])
        self.set_camera_pose(pos=pos, quat=quat)

        return pos, quat
Esempio n. 10
0
    def _world_quat(self):
        """
        Grab the world orientation

        Returns:
            np.array: (x,y,z,w) world quaternion
        """
        return T.convert_quat(np.array([1, 0, 0, 0]), to="xyzw")
Esempio n. 11
0
    def _pot_quat(self):
        """
        Grab the orientation of the pot body.

        Returns:
            np.array: (x,y,z,w) quaternion of the pot body
        """
        return T.convert_quat(self.sim.data.body_xquat[self.pot_body_id], to="xyzw")
    def get_observations(self, di: OrderedDict):
        """
        Returns an OrderedDict containing robot observations [(name_string, np.array), ...].

        Important keys:

            `'robot-state'`: contains robot-centric information.

        Args:
            di (OrderedDict): Current set of observations from the environment

        Returns:
            OrderedDict: Augmented set of observations that include this robot's proprioceptive observations
        """
        # Get prefix from robot model to avoid naming clashes for multiple robots
        pf = self.robot_model.naming_prefix

        # proprioceptive features
        di[pf + "joint_pos"] = np.array(
            [self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes])
        di[pf + "joint_vel"] = np.array(
            [self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes])

        robot_states = [
            np.sin(di[pf + "joint_pos"]),
            np.cos(di[pf + "joint_pos"]),
            di[pf + "joint_vel"],
        ]

        for arm in self.arms:
            # Add in eef info
            di[pf + "_{}_".format(arm) + "eef_pos"] = np.array(
                self.sim.data.site_xpos[self.eef_site_id[arm]])
            di[pf + "_{}_".format(arm) + "eef_quat"] = T.convert_quat(
                self.sim.data.get_body_xquat(self.robot_model.eef_name[arm]),
                to="xyzw")
            robot_states.extend([
                di[pf + "_{}_".format(arm) + "eef_pos"],
                di[pf + "_{}_".format(arm) + "eef_quat"]
            ])

            # add in gripper information
            if self.has_gripper[arm]:
                di[pf + "_{}_".format(arm) + "gripper_qpos"] = np.array([
                    self.sim.data.qpos[x]
                    for x in self._ref_gripper_joint_pos_indexes[arm]
                ])
                di[pf + "_{}_".format(arm) + "gripper_qvel"] = np.array([
                    self.sim.data.qvel[x]
                    for x in self._ref_gripper_joint_vel_indexes[arm]
                ])
                robot_states.extend([
                    di[pf + "_{}_".format(arm) + "gripper_qpos"],
                    di[pf + "_{}_".format(arm) + "gripper_qvel"]
                ])

        di[pf + "robot-state"] = np.concatenate(robot_states)
        return di
Esempio n. 13
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].

        Important keys:
            robot-state: contains robot-centric information.
            object-state: requires @self.use_object_obs to be True.
                contains object-centric information.
            image: requires @self.use_camera_obs to be True.
                contains a rendered frame from the simulation.
            depth: requires @self.use_camera_obs and @self.camera_depth to be True.
                contains a rendered depth map from the simulation
        """
        di = super()._get_observation()
        # camera observations
        if self.use_camera_obs:
            camera_obs = self.sim.render(
                camera_name=self.camera_name,
                width=self.camera_width,
                height=self.camera_height,
                depth=self.camera_depth,
            )
            if self.camera_depth:
                di["image"], di["depth"] = camera_obs
            else:
                di["image"] = camera_obs

        # low-level object information
        if self.use_object_obs:
            # position and rotation of object
            cube_pos = np.array(self.sim.data.body_xpos[self.cube_body_id])
            cube_quat = T.convert_quat(
                self.sim.data.body_xquat[self.cube_body_id], to="xyzw"
            )
            di["cube_pos"] = cube_pos
            di["cube_quat"] = cube_quat

            di["l_eef_xpos"] = np.array(self._l_eef_xpos)
            di["r_eef_xpos"] = np.array(self._r_eef_xpos)
            di["handle_1_xpos"] = np.array(self._handle_1_xpos)
            di["handle_2_xpos"] = np.array(self._handle_2_xpos)
            di["l_gripper_to_handle"] = np.array(self._l_gripper_to_handle)
            di["r_gripper_to_handle"] = np.array(self._r_gripper_to_handle)

            di["object-state"] = np.concatenate(
                [
                    di["cube_pos"],
                    di["cube_quat"],
                    di["l_eef_xpos"],
                    di["r_eef_xpos"],
                    di["handle_1_xpos"],
                    di["handle_2_xpos"],
                    di["l_gripper_to_handle"],
                    di["r_gripper_to_handle"],
                ]
            )

        return di
Esempio n. 14
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].

        Important keys:

            `'robot-state'`: contains robot-centric information.

            `'object-state'`: requires @self.use_object_obs to be True. Contains object-centric information.

            `'image'`: requires @self.use_camera_obs to be True. Contains a rendered frame from the simulation.

            `'depth'`: requires @self.use_camera_obs and @self.camera_depth to be True.
            Contains a rendered depth map from the simulation

        Returns:
            OrderedDict: Observations from the environment
        """
        di = super()._get_observation()

        # low-level object information
        if self.use_object_obs:
            # Get robot prefix
            if self.env_configuration == "bimanual":
                pr0 = self.robots[0].robot_model.naming_prefix + "left_"
                pr1 = self.robots[0].robot_model.naming_prefix + "right_"
            else:
                pr0 = self.robots[0].robot_model.naming_prefix
                pr1 = self.robots[1].robot_model.naming_prefix

            # position and rotation of object
            cube_pos = np.array(self.sim.data.body_xpos[self.pot_body_id])
            cube_quat = T.convert_quat(
                self.sim.data.body_xquat[self.pot_body_id], to="xyzw")
            di["cube_pos"] = cube_pos
            di["cube_quat"] = cube_quat

            di[pr0 + "eef_xpos"] = self._eef0_xpos
            di[pr1 + "eef_xpos"] = self._eef1_xpos
            di["handle_0_xpos"] = np.array(self._handle_0_xpos)
            di["handle_1_xpos"] = np.array(self._handle_1_xpos)
            di[pr0 + "gripper_to_handle"] = np.array(self._gripper_0_to_handle)
            di[pr1 + "gripper_to_handle"] = np.array(self._gripper_1_to_handle)

            di["object-state"] = np.concatenate([
                di["cube_pos"],
                di["cube_quat"],
                di[pr0 + "eef_xpos"],
                di[pr1 + "eef_xpos"],
                di["handle_0_xpos"],
                di["handle_1_xpos"],
                di[pr0 + "gripper_to_handle"],
                di[pr1 + "gripper_to_handle"],
            ])

        return di
Esempio n. 15
0
    def _randomize_rotation(self, name):
        """
        Helper function to randomize orientation of a specific camera

        Args:
            name (str): Name of the camera to randomize for
        """
        # sample a small, random axis-angle delta rotation
        random_axis, random_angle = trans.random_axis_angle(angle_limit=self.rotation_perturbation_size, random_state=self.random_state)
        random_delta_rot = trans.quat2mat(trans.axisangle2quat(random_axis * random_angle))
        
        # compute new rotation and set it
        base_rot = trans.quat2mat(trans.convert_quat(self._defaults[name]['quat'], to='xyzw'))
        new_rot = random_delta_rot.T.dot(base_rot)
        new_quat = trans.convert_quat(trans.mat2quat(new_rot), to='wxyz')
        self.set_quat(
            name,
            new_quat,
        )
Esempio n. 16
0
def rotate_camera(env, direction, angle, camera_id):
    """
    Rotate the camera view about a direction (in the camera frame).
    :param direction: a 3-dim numpy array for where to move camera in camera frame
    :param angle: a float for how much to rotate about that direction
    :param camera_id: which camera to modify
    """

    # current camera rotation
    camera_rot = T.quat2mat(
        T.convert_quat(env.sim.data.get_mocap_quat("cameramover"), to='xyzw'))

    # rotate by angle and direction to get new camera rotation
    rad = np.pi * angle / 180.0
    R = T.rotation_matrix(rad, direction, point=None)
    camera_rot = camera_rot.dot(R[:3, :3])

    # set new rotation
    env.sim.data.set_mocap_quat(
        "cameramover", T.convert_quat(T.mat2quat(camera_rot), to='wxyz'))
    env.sim.forward()
Esempio n. 17
0
    def _get_body_pos(self, name, base=True):
        if base:
            pose = self.pose_in_base_from_name(name)
            body_pos = pose[:3, 3]
            body_quat = pose[:3,:3]
        else:
            body_id = self.sim.model.body_name2id(name)
            body_pos = np.array(self.sim.data.body_xpos[body_id])
            body_quat = convert_quat(
                    np.array(self.sim.data.body_xquat[body_id]), to="xyzw")

        return body_pos, body_quat
Esempio n. 18
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].
        Important keys:
            `'robot-state'`: contains robot-centric information.
            `'object-state'`: requires @self.use_object_obs to be True. Contains object-centric information.
            `'image'`: requires @self.use_camera_obs to be True. Contains a rendered frame from the simulation.
            `'depth'`: requires @self.use_camera_obs and @self.camera_depth to be True.
            Contains a rendered depth map from the simulation
        Returns:
            OrderedDict: Observations from the environment
        """
        di = super()._get_observation()

        # Get robot prefix
        pr = self.robots[0].robot_model.naming_prefix

        if self.robots[0].has_gripper:
            # Checking if the UltrasoundProbeGripper is used
            if self.robots[0].gripper.dof == 0:
                # Remove unused keys (no joints in gripper)
                di.pop('robot0_gripper_qpos', None)
                di.pop('robot0_gripper_qvel', None)

        # low-level object information
        if self.use_object_obs:

            # position and rotation of object
            goal_pos = np.array(
                self.sim.data.body_xpos[self.goal_cube_body_id])
            cube_pos = np.array(self.sim.data.body_xpos[self.cube_body_id])
            di["cube_pos"] = cube_pos
            di["goal_pos"] = goal_pos
            di[pr + "cube_to_goal"] = cube_pos - goal_pos

            cube_quat = convert_quat(np.array(
                self.sim.data.body_xquat[self.cube_body_id]),
                                     to="xyzw")

            gripper_site_pos = np.array(
                self.sim.data.site_xpos[self.robots[0].eef_site_id])
            di[pr + "gripper_to_cube"] = gripper_site_pos - cube_pos

            # Used for GymWrapper observations (Robot state will also default be added e.g. eef position)
            di["object-state"] = np.concatenate([
                cube_pos,
                cube_quat,
                goal_pos,
                di[pr + "gripper_to_cube"],
                di[pr + "gripper_to_cube"],
            ])
        return di
Esempio n. 19
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].

        Important keys:
            robot-state: contains robot-centric information.
            object-state: requires @self.use_object_obs to be True.
                contains object-centric information.
            image: requires @self.use_camera_obs to be True.
                contains a rendered frame from the simulation.
            depth: requires @self.use_camera_obs and @self.camera_depth to be True.
                contains a rendered depth map from the simulation
        """
        di = super()._get_observation()
        # camera observations
        if self.use_camera_obs:
            camera_obs = self.sim.render(
                camera_name=self.camera_name,
                width=self.camera_width,
                height=self.camera_height,
                depth=self.camera_depth,
            )
            if self.camera_depth:
                di["image"], di["depth"] = camera_obs
            else:
                di["image"] = camera_obs

        # low-level object information
        if self.use_object_obs:
            # position and rotation of object
            cube_pos = np.array(self.sim.data.body_xpos[self.cube_body_id])
            cube_quat = convert_quat(np.array(
                self.sim.data.body_xquat[self.cube_body_id]),
                                     to="xyzw")
            di["cube_pos"] = cube_pos
            di["cube_quat"] = cube_quat

            # For normalization purposes, similar to sawyer norm for robot
            di["cube_quat"] = di["cube_quat"] / np.linalg.norm(di["cube_quat"])
            if (di["cube_quat"][3] < 0):
                di["cube_quat"] = -di["cube_quat"]

            gripper_site_pos = np.array(
                self.sim.data.site_xpos[self.eef_site_id])
            di["gripper_to_cube"] = gripper_site_pos - cube_pos

            di["object-state"] = np.concatenate(
                [cube_pos, cube_quat, di["gripper_to_cube"]])

            di["lift_reach_reward"] = self.box_end  # reward for reaching ie. lifting

        return di
Esempio n. 20
0
def rotate_camera(env, direction, angle, cam_body_id):
    """
    Rotate the camera view about a direction (in the camera frame).

    Args:
        direction (np.array): 3-array for where to move camera in camera frame
        angle (float): how much to rotate about that direction
        cam_body_id (int): id corresponding to parent body of camera element
    """

    # current camera rotation
    camera_pos = np.array(env.sim.model.body_pos[cam_body_id])
    camera_rot = T.quat2mat(T.convert_quat(env.sim.model.body_quat[cam_body_id], to='xyzw'))

    # rotate by angle and direction to get new camera rotation
    rad = np.pi * angle / 180.0
    R = T.rotation_matrix(rad, direction, point=None)
    camera_rot = camera_rot.dot(R[:3, :3])

    # set new rotation
    env.sim.model.body_quat[cam_body_id] = T.convert_quat(T.mat2quat(camera_rot), to='wxyz')
    env.sim.forward()
Esempio n. 21
0
    def get_camera_pose(self):
        """
        Grab the current camera pose, which optionally includes position and / or quaternion

        Returns:
            2-tuple:
                - 3-array: (x,y,z) camera global cartesian pos
                - 4-array: (x,y,z,w) camera global quaternion orientation
        """
        # Grab values from sim
        pos = self.env.sim.data.get_mocap_pos(self.mover_body_name)
        quat = T.convert_quat(self.env.sim.data.get_mocap_quat(self.mover_body_name), to="xyzw")

        return pos, quat
Esempio n. 22
0
    def set_camera_pose(self, pos=None, quat=None):
        """
        Sets the camera pose, which optionally includes position and / or quaternion

        Args:
            pos (None or 3-array): If specified, should be the (x,y,z) global cartesian pos to set camera to
            quat (None or 4-array): If specified, should be the (x,y,z,w) global quaternion orientation to set camera to
        """
        if pos is not None:
            self.env.sim.data.set_mocap_pos(self.mover_body_name, pos)
        if quat is not None:
            self.env.sim.data.set_mocap_quat(self.mover_body_name, T.convert_quat(quat, to="wxyz"))

        # Make sure changes propagate in sim
        self.env.sim.forward()
Esempio n. 23
0
def move_camera(env, direction, scale, camera_id):
    """
    Move the camera view along a direction (in the camera frame).
    :param direction: a 3-dim numpy array for where to move camera in camera frame
    :param scale: a float for how much to move along that direction
    :param camera_id: which camera to modify
    """

    # current camera pose
    camera_pos = np.array(env.sim.data.get_mocap_pos("cameramover"))
    camera_rot = T.quat2mat(
        T.convert_quat(env.sim.data.get_mocap_quat("cameramover"), to='xyzw'))

    # move along camera frame axis and set new position
    camera_pos += scale * camera_rot.dot(direction)
    env.sim.data.set_mocap_pos("cameramover", camera_pos)
    env.sim.forward()
Esempio n. 24
0
    def _get_observation(self):
        """
        Returns an OrderedDict containing observations [(name_string, np.array), ...].

        Important keys:
            robot-state: contains robot-centric information.
        """

        di = super()._get_observation()
        # proprioceptive features
        di["joint_pos"] = np.array(
            [self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes])
        di["joint_vel"] = np.array(
            [self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes])

        robot_states = [
            np.sin(di["joint_pos"]),
            np.cos(di["joint_pos"]),
            di["joint_vel"],
        ]

        if self.has_gripper:
            di["gripper_qpos"] = np.array([
                self.sim.data.qpos[x]
                for x in self._ref_gripper_joint_pos_indexes
            ])
            di["gripper_qvel"] = np.array([
                self.sim.data.qvel[x]
                for x in self._ref_gripper_joint_vel_indexes
            ])

            di["eef_pos"] = np.array(self.sim.data.site_xpos[self.eef_site_id])
            di["eef_quat"] = T.convert_quat(
                self.sim.data.get_body_xquat("right_hand"), to="xyzw")

            # Normalization of quaternions and reverse sign if negs for w, see if this is right?
            di["eef_quat"] = di["eef_quat"] / np.linalg.norm(di["eef_quat"])
            if (di["eef_quat"][3] < 0):
                di["eef_quat"] = -di["eef_quat"]

            # add in gripper information
            robot_states.extend(
                [di["gripper_qpos"], di["eef_pos"], di["eef_quat"]])

        di["robot-state"] = np.concatenate(robot_states)
        return di
Esempio n. 25
0
def move_camera(env, direction, scale, cam_body_id):
    """
    Move the camera view along a direction (in the camera frame).

    Args:
        direction (np.arry): 3-array for where to move camera in camera frame
        scale (float): how much to move along that direction
        cam_body_id (int): id corresponding to parent body of camera element
    """

    # current camera pose
    camera_pos = np.array(env.sim.model.body_pos[cam_body_id])
    camera_rot = T.quat2mat(T.convert_quat(env.sim.model.body_quat[cam_body_id], to='xyzw'))

    # move along camera frame axis and set new position
    camera_pos += scale * camera_rot.dot(direction)
    env.sim.model.body_pos[cam_body_id] = camera_pos
    env.sim.forward()
Esempio n. 26
0
    def move_camera(self, direction, scale):
        """
        Move the camera view along a direction (in the camera frame).

        Args:
            direction (3-array): direction vector for where to move camera in camera frame
            scale (float): how much to move along that direction
        """
        # current camera rotation + pos
        camera_pos = np.array(self.env.sim.data.get_mocap_pos(self.mover_body_name))
        camera_quat = self.env.sim.data.get_mocap_quat(self.mover_body_name)
        camera_rot = T.quat2mat(T.convert_quat(camera_quat, to="xyzw"))

        # move along camera frame axis and set new position
        camera_pos += scale * camera_rot.dot(direction)
        self.set_camera_pose(pos=camera_pos)

        return camera_pos, camera_quat
Esempio n. 27
0
    def _get_geom_attrs(self):
        """
        Creates geom elements that will be passed to superclass CompositeObject constructor

        Returns:
            dict: args to be used by CompositeObject to generate geoms
        """
        full_size = np.array((
            self.body_half_size,
            self.body_half_size + self.handle_length * 2,
            self.body_half_size,
        ))
        # Initialize dict of obj args that we'll pass to the CompositeObject constructor
        base_args = {
            "total_size": full_size / 2.0,
            "name": self.name,
            "locations_relative_to_center": True,
            "obj_types": "all",
        }
        site_attrs = []
        obj_args = {}

        # Initialize geom lists
        self._handle0_geoms = []
        self._handle1_geoms = []

        # Add main pot body
        # Base geom
        name = f"base"
        self.pot_base = [name]
        add_to_dict(
            dic=obj_args,
            geom_types="box",
            geom_locations=(0, 0,
                            -self.body_half_size[2] + self.thickness / 2),
            geom_quats=(1, 0, 0, 0),
            geom_sizes=np.array([
                self.body_half_size[0], self.body_half_size[1],
                self.thickness / 2
            ]),
            geom_names=name,
            geom_rgbas=None if self.use_texture else self.rgba_body,
            geom_materials="pot_mat" if self.use_texture else None,
            geom_frictions=None,
            density=self.density,
        )

        # Walls
        x_off = np.array([
            0, -(self.body_half_size[0] - self.thickness / 2), 0,
            self.body_half_size[0] - self.thickness / 2
        ])
        y_off = np.array([
            -(self.body_half_size[1] - self.thickness / 2), 0,
            self.body_half_size[1] - self.thickness / 2, 0
        ])
        w_vals = np.array([
            self.body_half_size[1], self.body_half_size[0],
            self.body_half_size[1], self.body_half_size[0]
        ])
        r_vals = np.array([np.pi / 2, 0, -np.pi / 2, np.pi])
        for i, (x, y, w, r) in enumerate(zip(x_off, y_off, w_vals, r_vals)):
            add_to_dict(
                dic=obj_args,
                geom_types="box",
                geom_locations=(x, y, 0),
                geom_quats=T.convert_quat(T.axisangle2quat(np.array([0, 0,
                                                                     r])),
                                          to="wxyz"),
                geom_sizes=np.array(
                    [self.thickness / 2, w, self.body_half_size[2]]),
                geom_names=f"body{i}",
                geom_rgbas=None if self.use_texture else self.rgba_body,
                geom_materials="pot_mat" if self.use_texture else None,
                geom_frictions=None,
                density=self.density,
            )

        # Add handles
        main_bar_size = np.array([
            self.handle_width / 2 + self.handle_radius,
            self.handle_radius,
            self.handle_radius,
        ])
        side_bar_size = np.array(
            [self.handle_radius, self.handle_length / 2, self.handle_radius])
        handle_z = self.body_half_size[2] - self.handle_radius
        for i, (g_list, handle_side, rgba) in enumerate(
                zip([self._handle0_geoms, self._handle1_geoms], [1.0, -1.0],
                    [self.rgba_handle_0, self.rgba_handle_1])):
            handle_center = np.array(
                (0,
                 handle_side * (self.body_half_size[1] + self.handle_length),
                 handle_z))
            # Solid handle case
            if self.solid_handle:
                name = f"handle{i}"
                g_list.append(name)
                add_to_dict(
                    dic=obj_args,
                    geom_types="box",
                    geom_locations=handle_center,
                    geom_quats=(1, 0, 0, 0),
                    geom_sizes=np.array([
                        self.handle_width / 2, self.handle_length / 2,
                        self.handle_radius
                    ]),
                    geom_names=name,
                    geom_rgbas=None if self.use_texture else rgba,
                    geom_materials=f"handle{i}_mat"
                    if self.use_texture else None,
                    geom_frictions=(self.handle_friction, 0.005, 0.0001),
                    density=self.density,
                )
            # Hollow handle case
            else:
                # Center bar
                name = f"handle{i}_c"
                g_list.append(name)
                add_to_dict(
                    dic=obj_args,
                    geom_types="box",
                    geom_locations=handle_center,
                    geom_quats=(1, 0, 0, 0),
                    geom_sizes=main_bar_size,
                    geom_names=name,
                    geom_rgbas=None if self.use_texture else rgba,
                    geom_materials=f"handle{i}_mat"
                    if self.use_texture else None,
                    geom_frictions=(self.handle_friction, 0.005, 0.0001),
                    density=self.density,
                )
                # Side bars
                for bar_side, suffix in zip([-1.0, 1.0], ["-", "+"]):
                    name = f"handle{i}_{suffix}"
                    g_list.append(name)
                    add_to_dict(
                        dic=obj_args,
                        geom_types="box",
                        geom_locations=(
                            bar_side * self.handle_width / 2,
                            handle_side *
                            (self.body_half_size[1] + self.handle_length / 2),
                            handle_z,
                        ),
                        geom_quats=(1, 0, 0, 0),
                        geom_sizes=side_bar_size,
                        geom_names=name,
                        geom_rgbas=None if self.use_texture else rgba,
                        geom_materials=f"handle{i}_mat"
                        if self.use_texture else None,
                        geom_frictions=(self.handle_friction, 0.005, 0.0001),
                        density=self.density,
                    )
            # Add relevant site
            handle_site = self.get_site_attrib_template()
            handle_name = f"handle{i}"
            handle_site.update({
                "name":
                handle_name,
                "pos":
                array_to_string(handle_center -
                                handle_side * np.array([0, 0.005, 0])),
                "size":
                "0.005",
                "rgba":
                rgba,
            })
            site_attrs.append(handle_site)
            # Add to important sites
            self._important_sites[
                f"handle{i}"] = self.naming_prefix + handle_name

        # Add pot body site
        pot_site = self.get_site_attrib_template()
        center_name = "center"
        pot_site.update({
            "name": center_name,
            "size": "0.005",
        })
        site_attrs.append(pot_site)
        # Add to important sites
        self._important_sites["center"] = self.naming_prefix + center_name

        # Add back in base args and site args
        obj_args.update(base_args)
        obj_args["sites"] = site_attrs  # All sites are part of main (top) body

        # Return this dict
        return obj_args
Esempio n. 28
0
 def pot_quat(obs_cache):
     return T.convert_quat(self.sim.data.body_xquat[self.pot_body_id], to="xyzw")
Esempio n. 29
0
 def eef_quat(obs_cache):
     return T.convert_quat(self.sim.data.get_body_xquat(
         self.robot_model.eef_name),
                           to="xyzw")
 def cube_quat(obs_cache):
     return convert_quat(np.array(
         self.sim.data.body_xquat[self.cube_body_id]),
                         to="xyzw")