def _load_model(self): """ Loads an xml model, puts it in self.model """ super()._load_model() self.mujoco_robot.set_base_xpos([0, 0, 0]) # load model for table top workspace self.mujoco_arena = EmptyArena() # if self.use_indicator_object: # self.mujoco_arena.add_pos_indicator() # The sawyer robot has a pedestal, we want to align it with the table # TODO (chongyi zheng) # self.mujoco_arena.set_origin([0.16 + self.table_full_size[0] / 2, 0, 0]) # initialize objects of interest # TODO (chongyi zheng) # cube = BoxObject( # size_min=[0.020, 0.020, 0.020], # [0.015, 0.015, 0.015], # size_max=[0.022, 0.022, 0.022], # [0.018, 0.018, 0.018]) # rgba=[1, 0, 0, 1], # ) # self.mujoco_objects = OrderedDict([("cube", cube)]) # self.mujoco_objects = OrderedDict([]) # TODO (chongyi zheng) self.model = MujocoWorldBase() self.model.merge(self.mujoco_arena) self.model.merge(self.mujoco_robot)
def _load_model(self): """ Loads the peg and the hole models. """ super()._load_model() self.mujoco_robot.set_base_xpos([0, 0, 0]) # Add arena and robot self.model = MujocoWorldBase() self.arena = EmptyArena() if self.use_indicator_object: self.arena.add_pos_indicator() self.model.merge(self.arena) self.model.merge(self.mujoco_robot) # Load hole object self.hole_obj = self.hole.get_collision(name="hole", site=True) self.hole_obj.set("quat", "0 0 0.707 0.707") self.hole_obj.set("pos", "0.11 0 0.18") self.model.merge_asset(self.hole) self.model.worldbody.find(".//body[@name='left_hand']").append( self.hole_obj) # Load cylinder object self.cyl_obj = self.cylinder.get_collision(name="cylinder", site=True) self.cyl_obj.set("pos", "0 0 0.15") self.model.merge_asset(self.cylinder) self.model.worldbody.find(".//body[@name='right_hand']").append( self.cyl_obj) self.model.worldbody.find(".//geom[@name='cylinder']").set( "rgba", "0 1 0 1")
def drop_cube_on_body_demo(): world = MujocoWorldBase() arena = EmptyArena() arena.set_origin([0, 0, 0]) world.merge(arena) soft_torso = SoftTorsoObject() obj = soft_torso.get_collision() box = BoxObject() box_obj = box.get_collision() obj.append(new_joint(name='soft_torso_free_joint', type='free')) box_obj.append(new_joint(name='box_free_joint', type='free')) world.merge_asset(soft_torso) world.worldbody.append(obj) world.worldbody.append(box_obj) # Place torso on ground collision_soft_torso = world.worldbody.find("./body") collision_soft_torso.set("pos", array_to_string(np.array([-0.1, 0, 0.1]))) model = world.get_model(mode="mujoco_py") sim = MjSim(model) viewer = MjViewer(sim) for _ in range(10000): sim.step() viewer.render()
def _load_model(self): # Load the desired controller's default config as a dict controller_config = load_controller_config( default_controller="JOINT_VELOCITY") controller_config["output_max"] = 1.0 controller_config["output_min"] = -1.0 robot_noise = {"magnitude": [0.05] * 7, "type": "gaussian"} self.robot = SingleArm( robot_type="IIWA", idn=0, controller_config=controller_config, initial_qpos=[0.0, 0.7, 0.0, -1.4, 0.0, -0.56, 0.0], initialization_noise=robot_noise, gripper_type="PaddleGripper", gripper_visualization=True, control_freq=self.control_freq) self.robot.load_model() self.robot.robot_model.set_base_xpos([0, 0, 0]) self.arena = EmptyArena() self.arena.set_origin([0.8, 0, 0]) self.pingpong = BallObject(name="pingpong", size=[0.02], rgba=[0.8, 0.8, 0, 1], solref=[0.1, 0.03], solimp=[0, 0, 1], density=100) pingpong_model = self.pingpong.get_collision() pingpong_model.append( new_joint(name="pingpong_free_joint", type="free")) pingpong_model.set("pos", "0.8 0 2.0") # merge into one self.model = MujocoWorldBase() self.model.merge(self.robot.robot_model) self.model.merge(self.arena) self.model.worldbody.append(pingpong_model)
def _load_model(self): """ Loads the arena and pot object. """ super()._load_model() self.mujoco_robot.set_base_xpos([0, 0, 0]) self.mujoco_objects["Goal"].set_goal_xpos(self.goal_offset[0], self.goal_offset[1]) # load model for table top workspace self.model = MujocoWorldBase() self.mujoco_arena = EmptyArena() self.model = DoorTask( self.mujoco_arena, self.mujoco_robot, self.mujoco_objects, ) self.model.place_objects([0, 0, 0], [1, 0, 0, 0])
class JuggleEnv: def __init__(self): self.control_freq: float = 50.0 self.control_timestep: float = 1.0 / self.control_freq self.viewer = None self.horizon = 1000 self.target = np.array([0.8, 0.0, 1.9]) # load model self.robot: Robot = None self.arena: Arena = None self.pingpong: MujocoGeneratedObject = None self.model: MujocoWorldBase = None self._load_model() # initialize simulation self.mjpy_model = None self.sim: MjSim = None self.model_timestep: float = 0.0 self._initialize_sim() # reset robot, object and internel variables self.cur_time: float = 0.0 self.timestep: int = 0.0 self.done: bool = False self._pingpong_body_id: int = -1 self._paddle_body_id: int = -1 self._reset_internel() # internel variable for scoring self._below_plane = False self.plane_height = 1.5 def _load_model(self): # Load the desired controller's default config as a dict controller_config = load_controller_config( default_controller="JOINT_VELOCITY") controller_config["output_max"] = 1.0 controller_config["output_min"] = -1.0 robot_noise = {"magnitude": [0.05] * 7, "type": "gaussian"} self.robot = SingleArm( robot_type="IIWA", idn=0, controller_config=controller_config, initial_qpos=[0.0, 0.7, 0.0, -1.4, 0.0, -0.56, 0.0], initialization_noise=robot_noise, gripper_type="PaddleGripper", gripper_visualization=True, control_freq=self.control_freq) self.robot.load_model() self.robot.robot_model.set_base_xpos([0, 0, 0]) self.arena = EmptyArena() self.arena.set_origin([0.8, 0, 0]) self.pingpong = BallObject(name="pingpong", size=[0.02], rgba=[0.8, 0.8, 0, 1], solref=[0.1, 0.03], solimp=[0, 0, 1], density=100) pingpong_model = self.pingpong.get_collision() pingpong_model.append( new_joint(name="pingpong_free_joint", type="free")) pingpong_model.set("pos", "0.8 0 2.0") # merge into one self.model = MujocoWorldBase() self.model.merge(self.robot.robot_model) self.model.merge(self.arena) self.model.worldbody.append(pingpong_model) def _initialize_sim(self): # if we have an xml string, use that to create the sim. Otherwise, use the local model self.mjpy_model = self.model.get_model(mode="mujoco_py") # Create the simulation instance and run a single step to make sure changes have propagated through sim state self.sim = MjSim(self.mjpy_model) self.sim.step() self.robot.reset_sim(self.sim) self.model_timestep = self.sim.model.opt.timestep def _reset_internel(self): # reset robot self.robot.setup_references() self.robot.reset(deterministic=False) # reset pingpong pingpong_pos = self.target + np.random.rand(3) * 0.08 - 0.04 pingpong_quat = np.array([1.0, 0.0, 0.0, 0.0]) self.sim.data.set_joint_qpos( "pingpong_free_joint", np.concatenate([pingpong_pos, pingpong_quat])) # get handle for important parts self._pingpong_body_id = self.sim.model.body_name2id("pingpong") self._paddle_body_id = self.sim.model.body_name2id( "gripper0_paddle_body") # Setup sim time based on control frequency self.cur_time = 0 self.timestep = 0 self.done = False def reset(self): self.sim.reset() self._reset_internel() self.sim.forward() return self._get_observation() def _get_observation(self): di = OrderedDict() # get robot observation di = self.robot.get_observations(di) # get pingpong observation pingpong_pos = np.array( self.sim.data.body_xpos[self._pingpong_body_id]) di["pingpong_pos"] = pingpong_pos return di def step(self, action: np.ndarray): if self.done: raise ValueError("executing action in terminated episode") policy_step = True score = 0.0 for _ in range(int(self.control_timestep / self.model_timestep)): self.sim.forward() self.robot.control(action=action, policy_step=policy_step) # self.sim.data.ctrl[:] = action*5.0 self.sim.step() policy_step = False # check if the ball pass the plane h = self.sim.data.body_xpos[self._pingpong_body_id][2] self._below_plane |= h < self.plane_height if self._below_plane and h > self.plane_height: score = 1.0 self._below_plane = False self.timestep += 1 self.cur_time += self.control_timestep observation = self._get_observation() dist_xy = np.linalg.norm( (observation["robot0_eef_pos"] - observation["pingpong_pos"])[:2]) # paddle_height = observation["robot0_eef_pos"][2] self.done = self.timestep >= self.horizon or dist_xy > 0.2 reward = score # + 0 * (0.2 - dist_xy) return observation, reward, self.done, {} def render(self, mode="human"): if mode == "human": self._get_viewer().render() elif mode == "rgb_array": img = self.sim.render(1920, 1080) return img[::-1, :, ::-1] def _get_viewer(self): if self.viewer is None: self.viewer = MjViewer(self.sim) self.viewer.vopt.geomgroup[0] = 0 self.viewer._hide_overlay = True return self.viewer def close(self): self._destroy_viewer() def _destroy_viewer(self): if self.viewer is not None: glfw.destroy_window(self.viewer.window) self.viewer = None def seed(self): pass
from robosuite.models import MujocoWorldBase world = MujocoWorldBase() from robosuite.models.robots import Panda mujoco_robot = Panda() from robosuite.models.grippers import gripper_factory gripper = gripper_factory('PandaGripper') # gripper.hide_visualization() # this doesnt work mujoco_robot.add_gripper(gripper) mujoco_robot.set_base_xpos([0, 0, 0]) world.merge(mujoco_robot) from robosuite.models.arenas import TableArena mujoco_arena = TableArena() mujoco_arena.set_origin([0.8, 0, 0]) world.merge(mujoco_arena) from robosuite.models.objects import BallObject from robosuite.utils.mjcf_utils import new_joint sphere = BallObject(name="sphere", size=[0.04], rgba=[0, 0.5, 0.5, 1]).get_obj() sphere.set('pos', '1.0 0 1.0') world.worldbody.append(sphere)
def _load_model(self): """ Loads an xml model, puts it in self.model """ super()._load_model() # Adjust base pose(s) accordingly if self.env_configuration == "bimanual": xpos = self.robots[0].robot_model.base_xpos_offset["empty"] self.robots[0].robot_model.set_base_xpos(xpos) else: if self.env_configuration == "single-arm-opposed": # Set up robots facing towards each other by rotating them from their default position for robot, rotation in zip(self.robots, (np.pi / 2, -np.pi / 2)): xpos = robot.robot_model.base_xpos_offset["empty"] rot = np.array((0, 0, rotation)) xpos = T.euler2mat(rot) @ np.array(xpos) robot.robot_model.set_base_xpos(xpos) robot.robot_model.set_base_ori(rot) else: # "single-arm-parallel" configuration setting # Set up robots parallel to each other but offset from the center for robot, offset in zip(self.robots, (-0.25, 0.25)): xpos = robot.robot_model.base_xpos_offset["empty"] xpos = np.array(xpos) + np.array((0, offset, 0)) robot.robot_model.set_base_xpos(xpos) # Add arena and robot self.model = MujocoWorldBase() self.mujoco_arena = EmptyArena() if self.use_indicator_object: self.mujoco_arena.add_pos_indicator() self.model.merge(self.mujoco_arena) for robot in self.robots: self.model.merge(robot.robot_model) # initialize objects of interest self.hole = PlateWithHoleObject(name="hole", ) tex_attrib = { "type": "cube", } mat_attrib = { "texrepeat": "1 1", "specular": "0.4", "shininess": "0.1", } greenwood = CustomMaterial( texture="WoodGreen", tex_name="greenwood", mat_name="greenwood_mat", tex_attrib=tex_attrib, mat_attrib=mat_attrib, ) self.peg = CylinderObject( name="peg", size_min=(self.peg_radius[0], self.peg_length), size_max=(self.peg_radius[1], self.peg_length), material=greenwood, rgba=[0, 1, 0, 1], ) # Load hole object self.hole_obj = self.hole.get_collision(site=True) self.hole_obj.set("quat", "0 0 0.707 0.707") self.hole_obj.set("pos", "0.11 0 0.17") self.model.merge_asset(self.hole) # Load peg object self.peg_obj = self.peg.get_collision(site=True) self.peg_obj.set("pos", array_to_string((0, 0, self.peg_length))) self.model.merge_asset(self.peg) # Depending on env configuration, append appropriate objects to arms if self.env_configuration == "bimanual": self.model.worldbody.find(".//body[@name='{}']".format( self.robots[0].robot_model.eef_name["left"])).append( self.hole_obj) self.model.worldbody.find(".//body[@name='{}']".format( self.robots[0].robot_model.eef_name["right"])).append( self.peg_obj) else: self.model.worldbody.find(".//body[@name='{}']".format( self.robots[1].robot_model.eef_name)).append(self.hole_obj) self.model.worldbody.find(".//body[@name='{}']".format( self.robots[0].robot_model.eef_name)).append(self.peg_obj)
class TwoArmPegInHole(RobotEnv): """ This class corresponds to the peg-in-hole task for two robot arms. Args: robots (str or list of str): Specification for specific robot arm(s) to be instantiated within this env (e.g: "Sawyer" would generate one arm; ["Panda", "Panda", "Sawyer"] would generate three robot arms) Note: Must be either 2 single single-arm robots or 1 bimanual robot! env_configuration (str): Specifies how to position the robots within the environment. Can be either: :`'bimanual'`: Only applicable for bimanual robot setups. Sets up the (single) bimanual robot on the -x side of the table :`'single-arm-parallel'`: Only applicable for multi single arm setups. Sets up the (two) single armed robots next to each other on the -x side of the table :`'single-arm-opposed'`: Only applicable for multi single arm setups. Sets up the (two) single armed robots opposed from each others on the opposite +/-y sides of the table (Default option) controller_configs (str or list of dict): If set, contains relevant controller parameters for creating a custom controller. Else, uses the default controller for this specific task. Should either be single dict if same controller is to be used for all robots or else it should be a list of the same length as "robots" param gripper_types (str or list of str): type of gripper, used to instantiate gripper models from gripper factory. For this environment, setting a value other than the default (None) will raise an AssertionError, as this environment is not meant to be used with any gripper at all. gripper_visualizations (bool or list of bool): True if using gripper visualization. Useful for teleoperation. Should either be single bool if gripper visualization is to be used for all robots or else it should be a list of the same length as "robots" param initialization_noise (dict or list of dict): Dict containing the initialization noise parameters. The expected keys and corresponding value types are specified below: :`'magnitude'`: The scale factor of uni-variate random noise applied to each of a robot's given initial joint positions. Setting this value to `None` or 0.0 results in no noise being applied. If "gaussian" type of noise is applied then this magnitude scales the standard deviation applied, If "uniform" type of noise is applied then this magnitude sets the bounds of the sampling range :`'type'`: Type of noise to apply. Can either specify "gaussian" or "uniform" Should either be single dict if same noise value is to be used for all robots or else it should be a list of the same length as "robots" param :Note: Specifying "default" will automatically use the default noise settings. Specifying None will automatically create the required dict with "magnitude" set to 0.0. use_camera_obs (bool or list of bool): if True, every observation for a specific robot includes a rendered image. Should either be single bool if camera obs value is to be used for all robots or else it should be a list of the same length as "robots" param use_object_obs (bool): if True, include object (cube) information in the observation. reward_scale (None or float): Scales the normalized reward function by the amount specified. If None, environment reward remains unnormalized reward_shaping (bool): if True, use dense rewards. peg_radius (2-tuple): low and high limits of the (uniformly sampled) radius of the peg peg_length (float): length of the peg use_indicator_object (bool): if True, sets up an indicator object that is useful for debugging. has_renderer (bool): If true, render the simulation state in a viewer instead of headless mode. has_offscreen_renderer (bool): True if using off-screen rendering render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None' will result in the default angle being applied, which is useful as it can be dragged / panned by the user using the mouse render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise. render_visual_mesh (bool): True if rendering visual meshes in camera. False otherwise. control_freq (float): how many control signals to receive in every second. This sets the amount of simulation time that passes between every action input. horizon (int): Every episode lasts for exactly @horizon timesteps. ignore_done (bool): True if never terminating the environment (ignore @horizon). hard_reset (bool): If True, re-loads model, sim, and render object upon a reset call, else, only calls sim.reset and resets all robosuite-internal variables camera_names (str or list of str): name of camera to be rendered. Should either be single str if same name is to be used for all cameras' rendering or else it should be a list of cameras to render. :Note: At least one camera must be specified if @use_camera_obs is True. :Note: To render all robots' cameras of a certain type (e.g.: "robotview" or "eye_in_hand"), use the convention "all-{name}" (e.g.: "all-robotview") to automatically render all camera images from each robot's camera list). camera_heights (int or list of int): height of camera frame. Should either be single int if same height is to be used for all cameras' frames or else it should be a list of the same length as "camera names" param. camera_widths (int or list of int): width of camera frame. Should either be single int if same width is to be used for all cameras' frames or else it should be a list of the same length as "camera names" param. camera_depths (bool or list of bool): True if rendering RGB-D, and RGB otherwise. Should either be single bool if same depth setting is to be used for all cameras or else it should be a list of the same length as "camera names" param. Raises: AssertionError: [Gripper specified] ValueError: [Invalid number of robots specified] ValueError: [Invalid env configuration] ValueError: [Invalid robots for specified env configuration] """ def __init__( self, robots, env_configuration="single-arm-opposed", controller_configs=None, gripper_types=None, gripper_visualizations=False, initialization_noise="default", use_camera_obs=True, use_object_obs=True, reward_scale=1.0, reward_shaping=False, peg_radius=(0.015, 0.03), peg_length=0.13, use_indicator_object=False, has_renderer=False, has_offscreen_renderer=True, render_camera="frontview", render_collision_mesh=False, render_visual_mesh=True, control_freq=10, horizon=1000, ignore_done=False, hard_reset=True, camera_names="agentview", camera_heights=256, camera_widths=256, camera_depths=False, ): # First, verify that correct number of robots are being inputted self.env_configuration = env_configuration self._check_robot_configuration(robots) # Assert that the gripper type is None assert gripper_types is None, "Tried to specify gripper other than None in TwoArmPegInHole environment!" # reward configuration self.reward_scale = reward_scale self.reward_shaping = reward_shaping # whether to use ground-truth object states self.use_object_obs = use_object_obs # Save peg specs self.peg_radius = peg_radius self.peg_length = peg_length super().__init__( robots=robots, controller_configs=controller_configs, gripper_types=gripper_types, gripper_visualizations=gripper_visualizations, initialization_noise=initialization_noise, use_camera_obs=use_camera_obs, use_indicator_object=use_indicator_object, has_renderer=has_renderer, has_offscreen_renderer=has_offscreen_renderer, render_camera=render_camera, render_collision_mesh=render_collision_mesh, render_visual_mesh=render_visual_mesh, control_freq=control_freq, horizon=horizon, ignore_done=ignore_done, hard_reset=hard_reset, camera_names=camera_names, camera_heights=camera_heights, camera_widths=camera_widths, camera_depths=camera_depths, ) def reward(self, action): """ Reward function for the task. Sparse un-normalized reward: - a discrete reward of 5.0 is provided if the peg is inside the plate's hole - Note that we enforce that it's inside at an appropriate angle (cos(theta) > 0.95). Un-normalized summed components if using reward shaping: - Reaching: in [0, 1], to encourage the arms to approach each other - Perpendicular Distance: in [0,1], to encourage the arms to approach each other - Parallel Distance: in [0,1], to encourage the arms to approach each other - Alignment: in [0, 1], to encourage having the right orientation between the peg and hole. - Placement: in {0, 1}, nonzero if the peg is in the hole with a relatively correct alignment Note that the final reward is normalized and scaled by reward_scale / 5.0 as well so that the max score is equal to reward_scale """ reward = 0 # Right location and angle if self._check_success(): reward = 1.0 # use a shaping reward if self.reward_shaping: # Grab relevant values t, d, cos = self._compute_orientation() # reaching reward hole_pos = self.sim.data.body_xpos[self.hole_body_id] gripper_site_pos = self.sim.data.body_xpos[self.peg_body_id] dist = np.linalg.norm(gripper_site_pos - hole_pos) reaching_reward = 1 - np.tanh(1.0 * dist) reward += reaching_reward # Orientation reward reward += 1 - np.tanh(d) reward += 1 - np.tanh(np.abs(t)) reward += cos # if we're not reward shaping, we need to scale our sparse reward so that the max reward is identical # to its dense version else: reward *= 5.0 if self.reward_scale is not None: reward *= self.reward_scale / 5.0 return reward def _load_model(self): """ Loads an xml model, puts it in self.model """ super()._load_model() # Adjust base pose(s) accordingly if self.env_configuration == "bimanual": xpos = self.robots[0].robot_model.base_xpos_offset["empty"] self.robots[0].robot_model.set_base_xpos(xpos) else: if self.env_configuration == "single-arm-opposed": # Set up robots facing towards each other by rotating them from their default position for robot, rotation in zip(self.robots, (np.pi / 2, -np.pi / 2)): xpos = robot.robot_model.base_xpos_offset["empty"] rot = np.array((0, 0, rotation)) xpos = T.euler2mat(rot) @ np.array(xpos) robot.robot_model.set_base_xpos(xpos) robot.robot_model.set_base_ori(rot) else: # "single-arm-parallel" configuration setting # Set up robots parallel to each other but offset from the center for robot, offset in zip(self.robots, (-0.25, 0.25)): xpos = robot.robot_model.base_xpos_offset["empty"] xpos = np.array(xpos) + np.array((0, offset, 0)) robot.robot_model.set_base_xpos(xpos) # Add arena and robot self.model = MujocoWorldBase() self.mujoco_arena = EmptyArena() if self.use_indicator_object: self.mujoco_arena.add_pos_indicator() self.model.merge(self.mujoco_arena) for robot in self.robots: self.model.merge(robot.robot_model) # initialize objects of interest self.hole = PlateWithHoleObject(name="hole", ) tex_attrib = { "type": "cube", } mat_attrib = { "texrepeat": "1 1", "specular": "0.4", "shininess": "0.1", } greenwood = CustomMaterial( texture="WoodGreen", tex_name="greenwood", mat_name="greenwood_mat", tex_attrib=tex_attrib, mat_attrib=mat_attrib, ) self.peg = CylinderObject( name="peg", size_min=(self.peg_radius[0], self.peg_length), size_max=(self.peg_radius[1], self.peg_length), material=greenwood, rgba=[0, 1, 0, 1], ) # Load hole object self.hole_obj = self.hole.get_collision(site=True) self.hole_obj.set("quat", "0 0 0.707 0.707") self.hole_obj.set("pos", "0.11 0 0.17") self.model.merge_asset(self.hole) # Load peg object self.peg_obj = self.peg.get_collision(site=True) self.peg_obj.set("pos", array_to_string((0, 0, self.peg_length))) self.model.merge_asset(self.peg) # Depending on env configuration, append appropriate objects to arms if self.env_configuration == "bimanual": self.model.worldbody.find(".//body[@name='{}']".format( self.robots[0].robot_model.eef_name["left"])).append( self.hole_obj) self.model.worldbody.find(".//body[@name='{}']".format( self.robots[0].robot_model.eef_name["right"])).append( self.peg_obj) else: self.model.worldbody.find(".//body[@name='{}']".format( self.robots[1].robot_model.eef_name)).append(self.hole_obj) self.model.worldbody.find(".//body[@name='{}']".format( self.robots[0].robot_model.eef_name)).append(self.peg_obj) def _get_reference(self): """ Sets up references to important components. A reference is typically an index or a list of indices that point to the corresponding elements in a flatten array, which is how MuJoCo stores physical simulation data. """ super()._get_reference() # Additional object references from this env self.hole_body_id = self.sim.model.body_name2id("hole") self.peg_body_id = self.sim.model.body_name2id("peg") def _reset_internal(self): """ Resets simulation internal configurations. """ super()._reset_internal() def _get_observation(self): """ Returns an OrderedDict containing observations [(name_string, np.array), ...]. Important keys: `'robot-state'`: contains robot-centric information. `'object-state'`: requires @self.use_object_obs to be True. Contains object-centric information. `'image'`: requires @self.use_camera_obs to be True. Contains a rendered frame from the simulation. `'depth'`: requires @self.use_camera_obs and @self.camera_depth to be True. Contains a rendered depth map from the simulation Returns: OrderedDict: Observations from the environment """ di = super()._get_observation() # low-level object information if self.use_object_obs: # Get robot prefix if self.env_configuration == "bimanual": pr0 = self.robots[0].robot_model.naming_prefix + "left_" pr1 = self.robots[0].robot_model.naming_prefix + "right_" else: pr0 = self.robots[0].robot_model.naming_prefix pr1 = self.robots[1].robot_model.naming_prefix # position and rotation of peg and hole hole_pos = np.array(self.sim.data.body_xpos[self.hole_body_id]) hole_quat = T.convert_quat( self.sim.data.body_xquat[self.hole_body_id], to="xyzw") di["hole_pos"] = hole_pos di["hole_quat"] = hole_quat peg_pos = np.array(self.sim.data.body_xpos[self.peg_body_id]) peg_quat = T.convert_quat( self.sim.data.body_xquat[self.peg_body_id], to="xyzw") di["peg_to_hole"] = peg_pos - hole_pos di["peg_quat"] = peg_quat # Relative orientation parameters t, d, cos = self._compute_orientation() di["angle"] = cos di["t"] = t di["d"] = d di["object-state"] = np.concatenate([ di["hole_pos"], di["hole_quat"], di["peg_to_hole"], di["peg_quat"], [di["angle"]], [di["t"]], [di["d"]], ]) return di def _check_success(self): """ Check if peg is successfully aligned and placed within the hole Returns: bool: True if peg is placed in hole correctly """ t, d, cos = self._compute_orientation() return d < 0.06 and -0.12 <= t <= 0.14 and cos > 0.95 def _compute_orientation(self): """ Helper function to return the relative positions between the hole and the peg. In particular, the intersection of the line defined by the peg and the plane defined by the hole is computed; the parallel distance, perpendicular distance, and angle are returned. Returns: 3-tuple: - (float): parallel distance - (float): perpendicular distance - (float): angle """ peg_mat = self.sim.data.body_xmat[self.peg_body_id] peg_mat.shape = (3, 3) peg_pos = self.sim.data.body_xpos[self.peg_body_id] hole_pos = self.sim.data.body_xpos[self.hole_body_id] hole_mat = self.sim.data.body_xmat[self.hole_body_id] hole_mat.shape = (3, 3) v = peg_mat @ np.array([0, 0, 1]) v = v / np.linalg.norm(v) center = hole_pos + hole_mat @ np.array([0.1, 0, 0]) t = (center - peg_pos) @ v / (np.linalg.norm(v)**2) d = np.linalg.norm(np.cross(v, peg_pos - center)) / np.linalg.norm(v) hole_normal = hole_mat @ np.array([0, 0, 1]) return ( t, d, abs( np.dot(hole_normal, v) / np.linalg.norm(hole_normal) / np.linalg.norm(v)), ) def _peg_pose_in_hole_frame(self): """ A helper function that takes in a named data field and returns the pose of that object in the base frame. Returns: np.array: (4,4) matrix corresponding to the pose of the peg in the hole frame """ # World frame peg_pos_in_world = self.sim.data.get_body_xpos("peg") peg_rot_in_world = self.sim.data.get_body_xmat("peg").reshape((3, 3)) peg_pose_in_world = T.make_pose(peg_pos_in_world, peg_rot_in_world) # World frame hole_pos_in_world = self.sim.data.get_body_xpos("hole") hole_rot_in_world = self.sim.data.get_body_xmat("hole").reshape((3, 3)) hole_pose_in_world = T.make_pose(hole_pos_in_world, hole_rot_in_world) world_pose_in_hole = T.pose_inv(hole_pose_in_world) peg_pose_in_hole = T.pose_in_A_to_pose_in_B(peg_pose_in_world, world_pose_in_hole) return peg_pose_in_hole def _check_robot_configuration(self, robots): """ Sanity check to make sure the inputted robots and configuration is acceptable Args: robots (str or list of str): Robots to instantiate within this env """ robots = robots if type(robots) == list or type(robots) == tuple else [ robots ] if self.env_configuration == "single-arm-opposed" or self.env_configuration == "single-arm-parallel": # Specifically two robots should be inputted! is_bimanual = False if type(robots) is not list or len(robots) != 2: raise ValueError( "Error: Exactly two single-armed robots should be inputted " "for this task configuration!") elif self.env_configuration == "bimanual": is_bimanual = True # Specifically one robot should be inputted! if type(robots) is list and len(robots) != 1: raise ValueError( "Error: Exactly one bimanual robot should be inputted " "for this task configuration!") else: # This is an unknown env configuration, print error raise ValueError( "Error: Unknown environment configuration received. Only 'bimanual'," "'single-arm-parallel', and 'single-arm-opposed' are supported. Got: {}" .format(self.env_configuration)) # Lastly, check to make sure all inputted robot names are of their correct type (bimanual / not bimanual) for robot in robots: if check_bimanual(robot) != is_bimanual: raise ValueError( "Error: For {} configuration, expected bimanual check to return {}; " "instead, got {}.".format(self.env_configuration, is_bimanual, check_bimanual(robot)))
import robosuite as suite from robosuite.models import MujocoWorldBase from robosuite.models.robots import UR5e from robosuite.models.grippers import gripper_factory from robosuite.models.arenas import EmptyArena from mujoco_py import MjSim, MjViewer world = MujocoWorldBase() ur5 = UR5e() gripper = gripper_factory("RobotiqThreeFingerDexterousGripper") ur5.add_gripper(gripper) ur5.set_base_xpos([0,0,0]) world.merge(ur5) arena = EmptyArena() world.merge(arena) model = world.get_model() sim = MjSim(model) viewer = MjViewer(sim) while True: viewer.render()
from robosuite.models import MujocoWorldBase from robosuite.models import grippers world = MujocoWorldBase() #------------------------------------------------------------------------ from robosuite.models.robots import Panda mujoco_robot = Panda() mujoco_robot.set_base_xpos([0, 0, 0]) # xyz mujoco_robot.set_base_ori([0, 0, 0]) # rpy world.merge(mujoco_robot) # takes xml/list of xmls #------------------------------------------------------------------------ from robosuite.models.grippers import gripper_factory, gripper_model #------------------------------------------------------------------------ from robosuite.models.arenas import TableArena mujoco_arena = TableArena() mujoco_arena.set_origin([0.8, 0, 0]) world.merge(mujoco_arena) #------------------------------------------------------------------------ # must be xml.etree #------------------------------------------------------------------------ # model, simulate, view model = world.get_model(mode="mujoco_py") from mujoco_py import MjSim, MjViewer sim = MjSim(model) viewer = MjViewer(sim)
class BaxterPegInHole(BaxterEnv): """ This class corresponds to the peg in hole task for the Baxter robot. There's a cylinder attached to one gripper and a hole attached to the other one. """ def __init__(self, cylinder_radius=(0.015, 0.03), cylinder_length=0.13, use_object_obs=True, reward_shaping=True, **kwargs): """ Args: cylinder_radius (2-tuple): low and high limits of the (uniformly sampled) radius of the cylinder cylinder_length (float): length of the cylinder use_object_obs (bool): if True, include object information in the observation. reward_shaping (bool): if True, use dense rewards Inherits the Baxter environment; refer to other parameters described there. """ # initialize objects of interest self.hole = PlateWithHoleObject() cylinder_radius = np.random.uniform(0.015, 0.03) self.cylinder = CylinderObject( size_min=(cylinder_radius, cylinder_length), size_max=(cylinder_radius, cylinder_length), ) self.mujoco_objects = OrderedDict() # whether to use ground-truth object states self.use_object_obs = use_object_obs # reward configuration self.reward_shaping = reward_shaping super().__init__(gripper_left=None, gripper_right=None, **kwargs) def _load_model(self): """ Loads the peg and the hole models. """ super()._load_model() self.mujoco_robot.set_base_xpos([0, 0, 0]) # Add arena and robot self.model = MujocoWorldBase() self.arena = EmptyArena() if self.use_indicator_object: self.arena.add_pos_indicator() self.model.merge(self.arena) self.model.merge(self.mujoco_robot) # Load hole object self.hole_obj = self.hole.get_collision(name="hole", site=True) self.hole_obj.set("quat", "0 0 0.707 0.707") self.hole_obj.set("pos", "0.11 0 0.18") self.model.merge_asset(self.hole) self.model.worldbody.find(".//body[@name='left_hand']").append( self.hole_obj) # Load cylinder object self.cyl_obj = self.cylinder.get_collision(name="cylinder", site=True) self.cyl_obj.set("pos", "0 0 0.15") self.model.merge_asset(self.cylinder) self.model.worldbody.find(".//body[@name='right_hand']").append( self.cyl_obj) self.model.worldbody.find(".//geom[@name='cylinder']").set( "rgba", "0 1 0 1") def _get_reference(self): """ Sets up references to important components. A reference is typically an index or a list of indices that point to the corresponding elements in a flattened array, which is how MuJoCo stores physical simulation data. """ super()._get_reference() self.hole_body_id = self.sim.model.body_name2id("hole") self.cyl_body_id = self.sim.model.body_name2id("cylinder") def _reset_internal(self): """ Resets simulation internal configurations. """ super()._reset_internal() def _compute_orientation(self): """ Helper function to return the relative positions between the hole and the peg. In particular, the intersection of the line defined by the peg and the plane defined by the hole is computed; the parallel distance, perpendicular distance, and angle are returned. """ cyl_mat = self.sim.data.body_xmat[self.cyl_body_id] cyl_mat.shape = (3, 3) cyl_pos = self.sim.data.body_xpos[self.cyl_body_id] hole_pos = self.sim.data.body_xpos[self.hole_body_id] hole_mat = self.sim.data.body_xmat[self.hole_body_id] hole_mat.shape = (3, 3) v = cyl_mat @ np.array([0, 0, 1]) v = v / np.linalg.norm(v) center = hole_pos + hole_mat @ np.array([0.1, 0, 0]) t = (center - cyl_pos) @ v / (np.linalg.norm(v)**2) d = np.linalg.norm(np.cross(v, cyl_pos - center)) / np.linalg.norm(v) hole_normal = hole_mat @ np.array([0, 0, 1]) return ( t, d, abs( np.dot(hole_normal, v) / np.linalg.norm(hole_normal) / np.linalg.norm(v)), ) def reward(self, action): """ Reward function for the task. The sparse reward is 0 if the peg is outside the hole, and 1 if it's inside. We enforce that it's inside at an appropriate angle (cos(theta) > 0.95). The dense reward has four components. Reaching: in [0, 1], to encourage the arms to get together. Perpendicular and parallel distance: in [0,1], for the same purpose. Cosine of the angle: in [0, 1], to encourage having the right orientation. """ reward = 0 t, d, cos = self._compute_orientation() # Right location and angle if d < 0.06 and t >= -0.12 and t <= 0.14 and cos > 0.95: reward = 1 # use a shaping reward if self.reward_shaping: # reaching reward hole_pos = self.sim.data.body_xpos[self.hole_body_id] gripper_site_pos = self.sim.data.body_xpos[self.cyl_body_id] dist = np.linalg.norm(gripper_site_pos - hole_pos) reaching_reward = 1 - np.tanh(1.0 * dist) reward += reaching_reward # Orientation reward reward += 1 - np.tanh(d) reward += 1 - np.tanh(np.abs(t)) reward += cos return reward def _peg_pose_in_hole_frame(self): """ A helper function that takes in a named data field and returns the pose of that object in the base frame. """ # World frame peg_pos_in_world = self.sim.data.get_body_xpos("cylinder") peg_rot_in_world = self.sim.data.get_body_xmat("cylinder").reshape( (3, 3)) peg_pose_in_world = T.make_pose(peg_pos_in_world, peg_rot_in_world) # World frame hole_pos_in_world = self.sim.data.get_body_xpos("hole") hole_rot_in_world = self.sim.data.get_body_xmat("hole").reshape((3, 3)) hole_pose_in_world = T.make_pose(hole_pos_in_world, hole_rot_in_world) world_pose_in_hole = T.pose_inv(hole_pose_in_world) peg_pose_in_hole = T.pose_in_A_to_pose_in_B(peg_pose_in_world, world_pose_in_hole) return peg_pose_in_hole def _get_observation(self): """ Returns an OrderedDict containing observations [(name_string, np.array), ...]. Important keys: robot-state: contains robot-centric information. object-state: requires @self.use_object_obs to be True. contains object-centric information. image: requires @self.use_camera_obs to be True. contains a rendered frame from the simulation. depth: requires @self.use_camera_obs and @self.camera_depth to be True. contains a rendered depth map from the simulation """ di = super()._get_observation() # camera observations if self.use_camera_obs: camera_obs = self.sim.render( camera_name=self.camera_name, width=self.camera_width, height=self.camera_height, depth=self.camera_depth, ) if self.camera_depth: di["image"], di["depth"] = camera_obs else: di["image"] = camera_obs # low-level object information if self.use_object_obs: # position and rotation of cylinder and hole hole_pos = self.sim.data.body_xpos[self.hole_body_id] hole_quat = T.convert_quat( self.sim.data.body_xquat[self.hole_body_id], to="xyzw") di["hole_pos"] = hole_pos di["hole_quat"] = hole_quat cyl_pos = self.sim.data.body_xpos[self.cyl_body_id] cyl_quat = T.convert_quat( self.sim.data.body_xquat[self.cyl_body_id], to="xyzw") di["cyl_to_hole"] = cyl_pos - hole_pos di["cyl_quat"] = cyl_quat # Relative orientation parameters t, d, cos = self._compute_orientation() di["angle"] = cos di["t"] = t di["d"] = d di["object-state"] = np.concatenate([ di["hole_pos"], di["hole_quat"], di["cyl_to_hole"], di["cyl_quat"], [di["angle"]], [di["t"]], [di["d"]], ]) return di def _check_contact(self): """ Returns True if gripper is in contact with an object. """ collision = False contact_geoms = (self.gripper_right.contact_geoms() + self.gripper_left.contact_geoms()) for contact in self.sim.data.contact[:self.sim.data.ncon]: if (self.sim.model.geom_id2name(contact.geom1) in contact_geoms or self.sim.model.geom_id2name( contact.geom2) in contact_geoms): collision = True break return collision def _check_success(self): """ Returns True if task is successfully completed. """ t, d, cos = self._compute_orientation() return d < 0.06 and t >= -0.12 and t <= 0.14 and cos > 0.95
""" import xml.etree.ElementTree as ET from mujoco_py import MjSim, MjViewer from robosuite.models import MujocoWorldBase from robosuite.models.arenas.table_arena import TableArena from robosuite.models.grippers import PandaGripper, RethinkGripper from robosuite.models.objects import BoxObject from robosuite.utils.mjcf_utils import new_actuator, new_joint if __name__ == "__main__": # start with an empty world world = MujocoWorldBase() # add a table arena = TableArena(table_full_size=(0.4, 0.4, 0.05), table_offset=(0, 0, 0.1), has_legs=False) world.merge(arena) # add a gripper gripper = RethinkGripper() # Create another body with a slider joint to which we'll add this gripper gripper_body = ET.Element("body", name="gripper_base") gripper_body.set("pos", "0 0 0.3") gripper_body.set("quat", "0 0 1 0") # flip z gripper_body.append( new_joint(name="gripper_z_joint",
class SawyerViz(SawyerEnv): """ Sawyer robot arm visualization. """ def __init__( self, gripper_type="TwoFingerGripper", use_camera_obs=True, use_object_obs=True, reward_shaping=False, gripper_visualization=False, has_renderer=False, has_offscreen_renderer=True, render_collision_mesh=False, render_visual_mesh=True, control_freq=10, horizon=1000, ignore_done=False, camera_name="frontview", camera_height=256, camera_width=256, camera_depth=False, ): """ Args: gripper_type (str): type of gripper, used to instantiate gripper models from gripper factory. use_camera_obs (bool): if True, every observation includes a rendered image. use_object_obs (bool): if True, include object (cube) information in the observation. reward_shaping (bool): if True, use dense rewards. gripper_visualization (bool): True if using gripper visualization. Useful for teleoperation. has_renderer (bool): If true, render the simulation state in a viewer instead of headless mode. has_offscreen_renderer (bool): True if using off-screen rendering. render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise. render_visual_mesh (bool): True if rendering visual meshes in camera. False otherwise. control_freq (float): how many control signals to receive in every second. This sets the amount of simulation time that passes between every action input. horizon (int): Every episode lasts for exactly @horizon timesteps. ignore_done (bool): True if never terminating the environment (ignore @horizon). camera_name (str): name of camera to be rendered. Must be set if @use_camera_obs is True. camera_height (int): height of camera frame. camera_width (int): width of camera frame. camera_depth (bool): True if rendering RGB-D, and RGB otherwise. """ # settings for table top # TODO (chongyi zheng) # self.table_full_size = table_full_size # self.table_friction = table_friction # whether to use ground-truth object states # self.use_object_obs = use_object_obs # reward configuration self.reward_shaping = reward_shaping # TODO (chongyi zheng) # object placement initializer # if placement_initializer: # self.placement_initializer = placement_initializer # else: # # (chongyi zheng): move the pesky colorful object outside the camera field of view. # self.placement_initializer = UniformRandomSampler( # x_range=[-0.03, 0.03], # y_range=[-0.03, 0.03], # ensure_object_boundary_in_range=False, # z_rotation=True, # ) super().__init__( gripper_type=gripper_type, gripper_visualization=gripper_visualization, use_indicator_object=False, has_renderer=has_renderer, has_offscreen_renderer=has_offscreen_renderer, render_collision_mesh=render_collision_mesh, render_visual_mesh=render_visual_mesh, control_freq=control_freq, horizon=horizon, ignore_done=ignore_done, use_camera_obs=use_camera_obs, camera_name=camera_name, camera_height=camera_height, camera_width=camera_width, camera_depth=camera_depth, ) def _load_model(self): """ Loads an xml model, puts it in self.model """ super()._load_model() self.mujoco_robot.set_base_xpos([0, 0, 0]) # load model for table top workspace self.mujoco_arena = EmptyArena() # if self.use_indicator_object: # self.mujoco_arena.add_pos_indicator() # The sawyer robot has a pedestal, we want to align it with the table # TODO (chongyi zheng) # self.mujoco_arena.set_origin([0.16 + self.table_full_size[0] / 2, 0, 0]) # initialize objects of interest # TODO (chongyi zheng) # cube = BoxObject( # size_min=[0.020, 0.020, 0.020], # [0.015, 0.015, 0.015], # size_max=[0.022, 0.022, 0.022], # [0.018, 0.018, 0.018]) # rgba=[1, 0, 0, 1], # ) # self.mujoco_objects = OrderedDict([("cube", cube)]) # self.mujoco_objects = OrderedDict([]) # TODO (chongyi zheng) self.model = MujocoWorldBase() self.model.merge(self.mujoco_arena) self.model.merge(self.mujoco_robot) # task includes arena, robot, and objects of interest # self.model = TableTopTask( # self.mujoco_arena, # self.mujoco_robot, # self.mujoco_objects, # ) # self.model.place_objects() def _get_reference(self): """ Sets up references to important components. A reference is typically an index or a list of indices that point to the corresponding elements in a flatten array, which is how MuJoCo stores physical simulation data. """ # TODO (chongyi zheng) super()._get_reference() # self.cube_body_id = self.sim.model.body_name2id("cube") # self.l_finger_geom_ids = [ # self.sim.model.geom_name2id(x) for x in self.gripper.left_finger_geoms # ] # self.r_finger_geom_ids = [ # self.sim.model.geom_name2id(x) for x in self.gripper.right_finger_geoms # ] # self.cube_geom_id = self.sim.model.geom_name2id("cube") def _reset_internal(self): """ Resets simulation internal configurations. """ super()._reset_internal() # TODO (chongyi zheng) # reset positions of objects # self.model.place_objects() # reset joint positions # init_pos = np.array([-0.5538, -0.8208, 0.4155, 1.8409, -0.4955, 0.6482, 1.9628]) # init_pos += np.random.randn(init_pos.shape[0]) * 0.02 # self.sim.data.qpos[self._ref_joint_pos_indexes] = np.array(init_pos) def reward(self, action=None): """ Reward function for the task. The dense reward has three components. Reaching: in [0, 1], to encourage the arm to reach the cube Grasping: in {0, 0.25}, non-zero if arm is grasping the cube Lifting: in {0, 1}, non-zero if arm has lifted the cube The sparse reward only consists of the lifting component. Args: action (np array): unused for this task Returns: reward (float): the reward """ reward = 0. # TODO (chongyi zheng) # # sparse completion reward # if self._check_success(): # reward = 1.0 # # # use a shaping reward # if self.reward_shaping: # # # reaching reward # cube_pos = self.sim.data.body_xpos[self.cube_body_id] # gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id] # dist = np.linalg.norm(gripper_site_pos - cube_pos) # reaching_reward = 1 - np.tanh(10.0 * dist) # reward += reaching_reward # # # grasping reward # touch_left_finger = False # touch_right_finger = False # for i in range(self.sim.data.ncon): # c = self.sim.data.contact[i] # if c.geom1 in self.l_finger_geom_ids and c.geom2 == self.cube_geom_id: # touch_left_finger = True # if c.geom1 == self.cube_geom_id and c.geom2 in self.l_finger_geom_ids: # touch_left_finger = True # if c.geom1 in self.r_finger_geom_ids and c.geom2 == self.cube_geom_id: # touch_right_finger = True # if c.geom1 == self.cube_geom_id and c.geom2 in self.r_finger_geom_ids: # touch_right_finger = True # if touch_left_finger and touch_right_finger: # reward += 0.25 return reward def _get_observation(self): """ Returns an OrderedDict containing observations [(name_string, np.array), ...]. Important keys: robot-state: contains robot-centric information. object-state: requires @self.use_object_obs to be True. contains object-centric information. image: requires @self.use_camera_obs to be True. contains a rendered frame from the simulation. depth: requires @self.use_camera_obs and @self.camera_depth to be True. contains a rendered depth map from the simulation """ di = super()._get_observation() # camera observations if self.use_camera_obs: camera_obs = self.sim.render( camera_name=self.camera_name, width=self.camera_width, height=self.camera_height, depth=self.camera_depth, ) if self.camera_depth: di["image"], di["depth"] = camera_obs else: di["image"] = camera_obs # TODO (chongyi zheng) # low-level object information # if self.use_object_obs: # # position and rotation of object # cube_pos = np.array(self.sim.data.body_xpos[self.cube_body_id]) # cube_quat = convert_quat( # np.array(self.sim.data.body_xquat[self.cube_body_id]), to="xyzw" # ) # di["cube_pos"] = cube_pos # di["cube_quat"] = cube_quat # # gripper_site_pos = np.array(self.sim.data.site_xpos[self.eef_site_id]) # di["gripper_to_cube"] = gripper_site_pos - cube_pos # # di["object-state"] = np.concatenate( # [cube_pos, cube_quat, di["gripper_to_cube"]] # ) return di def _check_contact(self): """ Returns True if gripper is in contact with an object. """ collision = False for contact in self.sim.data.contact[:self.sim.data.ncon]: if (self.sim.model.geom_id2name( contact.geom1) in self.gripper.contact_geoms() or self.sim.model.geom_id2name( contact.geom2) in self.gripper.contact_geoms()): collision = True break return collision def _check_success(self): """ Returns True if task has been completed. """ # TODO (chongyi zheng) # cube_height = self.sim.data.body_xpos[self.cube_body_id][2] # table_height = self.table_full_size[2] # # # cube is higher than the table top above a margin # return cube_height > table_height + 0.04 raise NotImplementedError def _gripper_visualization(self): """ Do any needed visualization here. Overrides superclass implementations. """ # color the gripper site appropriately based on distance to cube if self.gripper_visualization: # TODO (chongyi zheng) # # get distance to cube # cube_site_id = self.sim.model.site_name2id("cube") # dist = np.sum( # np.square( # self.sim.data.site_xpos[cube_site_id] # - self.sim.data.get_site_xpos("grip_site") # ) # ) # set RGBA for the EEF site here # max_dist = 0.1 # scaled = (1.0 - min(dist / max_dist, 1.)) ** 15 scaled = 0.0 rgba = np.zeros(4) rgba[0] = 1 - scaled rgba[1] = scaled rgba[3] = 0.5 self.sim.model.site_rgba[self.eef_site_id] = rgba