Exemplo n.º 1
0
class RattlerCamera(object):
    def __init__(self):
        self._head_camera = VisionSensor('rattler_eye')
        self._head_camera_mask = None
        self._head_camera_state = 1

    def get_camera_state(self) -> int:
        return self._head_camera_state

    def set_camera_state(self, state: int) -> None:
        self._head_camera_state = state

    def set_camera_properties(self, conf: CameraConfig) -> None:
        if not (conf.rgb or conf.depth):
            self._head_camera.remove()
        else:
            self._head_camera.set_resolution(conf.image_size)
            self._head_camera.set_render_mode(conf.render_mode)

        if conf.mask:
            self._head_camera_mask = self._head_camera.copy()
            self._head_camera_mask.set_render_mode(
                RenderMode.OPENGL_COLOR_CODED)

    def capture_rgb(self) -> np.ndarray:
        return self._head_camera.capture_rgb()

    def capture_depth(self) -> np.ndarray:
        return self._head_camera.capture_depth()

    def mask(self) -> VisionSensor:
        return self._head_camera_mask
Exemplo n.º 2
0
class ArmECM(PyRep):
    def __init__(self, pr):

        #super(ArmECM, self).__init__(scenePath)
        """self.pr = PyRep()
        self.pr.launch(scenePath)
        self.pr.start()
        self.pr.step()"""
        self.pr = pr
        self.base_handle = Shape('L0_respondable_ECM')
        self.j1_handle = Joint('J1_ECM')
        self.j2_handle = Joint('J2_ECM')
        self.j3_handle = Joint('J3_ECM')
        self.j4_handle = Joint('J4_ECM')

        self.left_cam = VisionSensor('Vision_sensor_left')
        self.right_cam = VisionSensor('Vision_sensor_right')

    def getJointAngles(self):

        pos1 = self.j1_handle.get_joint_position()
        pos2 = self.j2_handle.get_joint_position()
        pos3 = self.j3_handle.get_joint_position()
        pos4 = self.j4_handle.get_joint_position()
        return np.array([pos1, pos2, pos3, pos4])

    def setJointAngles(self, jointAngles):

        self.j1_handle.set_joint_position(jointAngles[0])
        self.j2_handle.set_joint_position(jointAngles[1])
        self.j3_handle.set_joint_position(jointAngles[2])
        self.j4_handle.set_joint_position(jointAngles[3])

    def getCurrentPose(self):

        return self.left_cam.get_pose(relative_to=self.base_handle)

    def getStereoImagePairs(self):
        return self.left_cam.capture_rgb(), self.right_cam.capture_rgb()

    def getDepthImagePairs(self):
        return self.left_cam.capture_depth(True), self.right_cam.capture_depth(
            True)

    """def stopSim(self):
class Plane3D:
    def __init__(self, headless=False):
        self._pr = PyRep()
        self._pr.launch(scene_file=scene_file, headless=headless)
        self._pr.start()

        self.workspace_base = Shape("workspace")
        self.workspace = self._get_worksapce()

        self.camera = VisionSensor("camera")

        self.obstacles = []
        self.velocity_scale = 0
        self.repiration_cycle = 0

    def _get_worksapce(self):
        base_pos = self.workspace_base.get_position()
        bbox = self.workspace_base.get_bounding_box()
        min_pos = [bbox[2 * i] + base_pos[i] for i in range(3)]
        max_pos = [bbox[2 * i + 1] + base_pos[i] for i in range(3)]
        return [min_pos, max_pos]

    def reset(self, obstacle_num, velocity_scale, respiration_cycle=0):
        for obstacle in self.obstacles:
            obstacle.remove()
        self._pr.step()

        self.velocity_scale = velocity_scale
        self.repiration_cycle = respiration_cycle

        self.obstacles = []
        for i in range(obstacle_num):
            obs = Obstacle2D.create_random_obstacle(
                workspace=self.workspace,
                velocity_scale=velocity_scale,
                respiration_cycle=respiration_cycle)
            self._pr.step()
            self.obstacles.append(obs)

    def get_image(self):
        rgb = self.camera.capture_rgb()
        return rgb

    def step(self):
        # update config
        for obs in self.obstacles:
            if self.repiration_cycle > 0:
                obs.respire()
            if self.velocity_scale > 0:
                obs.keep_velocity()
        self._pr.step()
Exemplo n.º 4
0
 def get_rgb_depth(sensor: VisionSensor, get_rgb: bool, get_depth: bool,
                   rgb_noise: NoiseModel, depth_noise: NoiseModel):
     rgb = depth = None
     if sensor is not None and (get_rgb or get_depth):
         sensor.handle_explicitly()
         if get_rgb:
             rgb = sensor.capture_rgb()
             if rgb_noise is not None:
                 rgb = rgb_noise.apply(rgb)
         if get_depth:
             depth = sensor.capture_depth()
             if depth_noise is not None:
                 depth = depth_noise.apply(depth)
     return rgb, depth
Exemplo n.º 5
0
class TestVisionSensors(TestCore):
    def setUp(self):
        super().setUp()
        [self.pyrep.step() for _ in range(10)]
        self.cam = VisionSensor('cam0')

    def test_capture_rgb(self):
        img = self.cam.capture_rgb()
        self.assertEqual(img.shape, (16, 16, 3))
        #  Check that it's not a blank image
        self.assertFalse(img.min() == img.max() == 0.0)

    def test_capture_depth(self):
        img = self.cam.capture_depth()
        self.assertEqual(img.shape, (16, 16))
        # Check that it's not a blank depth map
        self.assertFalse(img.min() == img.max() == 1.0)
Exemplo n.º 6
0
path_model = os.path.join(
    os.path.expanduser('~'),
    'robotics_drl/data/youbot_all_final_21-08-2019_22-32-00/model.pkl')
actor.load_state_dict(torch.load(path_model))

actor.eval()
state = env.reset()
for i in range(2):
    state = env.reset()
    steps = 0
    while True:
        steps += 1
        with torch.no_grad():
            # state['low'] = state['low'].to(device)
            # state['high'] = state['high'].to(device)
            img = resize(camera_side.capture_rgb())
            save_image(img, 'y_%s.png' % steps)
            action, _, gate = actor(state.cuda(),
                                    log_prob=False,
                                    deterministic=True)
            print(
                sqrt(
                    np.sum(
                        np.array(
                            env.target_base.get_position(
                                relative_to=env.tip))**2)), gate)
        state, reward, done = env.step(action.squeeze().cpu().numpy())
        if reward == env.reward_termination:
            print('Task %s completed' % (i))
            break
Exemplo n.º 7
0
class LoCoBotCamera(Camera):
    """docstring for SimpleCamera"""

    def __init__(self, configs, simulator):

        self.sim = simulator.sim
        self.rgb_cam = VisionSensor("kinect_rgb")
        self.depth_cam = VisionSensor("kinect_depth")
        self.rgb_cam.set_render_mode(RenderMode.OPENGL3)
        self.depth_cam.set_render_mode(RenderMode.OPENGL3)

        # Pan and tilt related variables.
        self.pan_joint = Joint("LoCoBot_head_pan_joint")
        self.tilt_joint = Joint("LoCoBot_head_tilt_joint")

    def get_rgb(self):

        return self.rgb_cam.capture_rgb()

    def get_depth(self):

        return self.depth_cam.capture_depth()

    def get_rgb_depth(self):

        return self.get_rgb(), self.get_depth()

    def get_intrinsics(self):

        # Todo: Remove this after we fix intrinsics
        raise NotImplementedError
        """
		Returns the instrinsic matrix of the camera

		:return: the intrinsic matrix (shape: :math:`[3, 3]`)
		:rtype: np.ndarray
		"""
        # fx = self.configs['Camera.fx']
        # fy = self.configs['Camera.fy']
        # cx = self.configs['Camera.cx']
        # cy = self.configs['Camera.cy']
        Itc = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
        return Itc

    def pix_to_3dpt(self, rs, cs, in_cam=False):
        """
		Get the 3D points of the pixels in RGB images.

		:param rs: rows of interest in the RGB image.
		           It can be a list or 1D numpy array
		           which contains the row indices.
		           The default value is None,
		           which means all rows.
		:param cs: columns of interest in the RGB image.
		           It can be a list or 1D numpy array
		           which contains the column indices.
		           The default value is None,
		           which means all columns.
		:param in_cam: return points in camera frame,
		               otherwise, return points in base frame

		:type rs: list or np.ndarray
		:type cs: list or np.ndarray
		:type in_cam: bool

		:returns: tuple (pts, colors)

		          pts: point coordinates in world frame
		          (shape: :math:`[N, 3]`)

		          colors: rgb values for pts_in_cam
		          (shape: :math:`[N, 3]`)

		:rtype: tuple(np.ndarray, np.ndarray)
		"""

        raise NotImplementedError

    def get_current_pcd(self, in_cam=True):
        """
		Return the point cloud at current time step (one frame only)

		:param in_cam: return points in camera frame,
		               otherwise, return points in base frame

		:type in_cam: bool
		:returns: tuple (pts, colors)

		          pts: point coordinates in world frame (shape: :math:`[N, 3]`)

		          colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
		:rtype: tuple(np.ndarray, np.ndarray)
		"""

        raise NotImplementedError

    @property
    def state(self):
        """
		Return the current pan and tilt joint angles of the robot camera.

		:return:
		        pan_tilt: A list the form [pan angle, tilt angle]
		:rtype: list
		"""
        return self.get_state()

    def get_state(self):
        """
		Return the current pan and tilt joint angles of the robot camera.

		:return:
		        pan_tilt: A list the form [pan angle, tilt angle]
		:rtype: list
		"""
        return [self.get_pan(), self.get_tilt()]

    def get_pan(self):
        """
		Return the current pan joint angle of the robot camera.

		:return:
		        pan: Pan joint angle
		:rtype: float
		"""
        return self.pan_joint.get_joint_position()

    def get_tilt(self):
        """
		Return the current tilt joint angle of the robot camera.

		:return:
		        tilt: Tilt joint angle
		:rtype: float
		"""
        return self.tilt_joint.get_joint_position()

    def set_pan(self, pan, wait=True):
        """
		Sets the pan joint angle to the specified value.

		:param pan: value to be set for pan joint
		:param wait: wait until the pan angle is set to
		             the target angle.

		:type pan: float
		:type wait: bool
		"""

        self.pan_joint.set_joint_position(pan)
        # [self.sim.step() for _ in range(50)]

    def set_tilt(self, tilt, wait=True):
        """
		Sets the tilt joint angle to the specified value.

		:param tilt: value to be set for the tilt joint
		:param wait: wait until the tilt angle is set to
		             the target angle.

		:type tilt: float
		:type wait: bool
		"""

        self.tilt_joint.set_joint_position(tilt)

    def set_pan_tilt(self, pan, tilt, wait=True):
        """
		Sets both the pan and tilt joint angles to the specified values.

		:param pan: value to be set for pan joint
		:param tilt: value to be set for the tilt joint
		:param wait: wait until the pan and tilt angles are set to
		             the target angles.

		:type pan: float
		:type tilt: float
		:type wait: bool
		"""

        self.set_pan(pan)
        self.set_tilt(tilt)

    def reset(self):
        """
		This function resets the pan and tilt joints by actuating
		them to their home configuration.
		"""
        self.set_pan_tilt(self.configs.CAMERA.RESET_PAN, self.configs.CAMERA.RESET_TILT)
Exemplo n.º 8
0
class Scene(object):
    """Controls what is currently in the vrep scene. This is used for making
    sure that the tasks are easily reachable. This may be just replaced by
    environment. Responsible for moving all the objects. """
    def __init__(self,
                 pyrep: PyRep,
                 robot: Robot,
                 obs_config=ObservationConfig()):
        self._pyrep = pyrep
        self._robot = robot
        self._obs_config = obs_config
        self._active_task = None
        self._inital_task_state = None
        self._start_arm_joint_pos = robot.arm.get_joint_positions()
        self._starting_gripper_joint_pos = robot.gripper.get_joint_positions()
        self._workspace = Shape('workspace')
        self._workspace_boundary = SpawnBoundary([self._workspace])
        self._cam_over_shoulder_left = VisionSensor('cam_over_shoulder_left')
        self._cam_over_shoulder_right = VisionSensor('cam_over_shoulder_right')
        self._cam_wrist = VisionSensor('cam_wrist')
        self._cam_over_shoulder_left_mask = VisionSensor(
            'cam_over_shoulder_left_mask')
        self._cam_over_shoulder_right_mask = VisionSensor(
            'cam_over_shoulder_right_mask')
        self._cam_wrist_mask = VisionSensor('cam_wrist_mask')
        self._has_init_task = self._has_init_episode = False
        self._variation_index = 0

        self._initial_robot_state = (robot.arm.get_configuration_tree(),
                                     robot.gripper.get_configuration_tree())

        # Set camera properties from observation config
        self._set_camera_properties()

    def load(self, task: Task) -> None:
        """Loads the task and positions at the centre of the workspace.

        :param task: The task to load in the scene.
        """
        task.load()  # Load the task in to the scene

        # Set at the centre of the workspace
        task.get_base().set_position(self._workspace.get_position())

        self._inital_task_state = task.get_state()
        self._active_task = task
        self._initial_task_pose = task.boundary_root().get_orientation()
        self._has_init_task = self._has_init_episode = False
        self._variation_index = 0

    def unload(self) -> None:
        """Clears the scene. i.e. removes all tasks. """
        if self._active_task is not None:
            self._robot.gripper.release()
            if self._has_init_task:
                self._active_task.cleanup_()
            self._active_task.unload()
        self._active_task = None
        self._variation_index = 0

    def init_task(self) -> None:
        self._active_task.init_task()
        self._has_init_task = True
        self._variation_index = 0

    def init_episode(self,
                     index: int,
                     randomly_place: bool = True,
                     max_attempts: int = 5) -> List[str]:
        """Calls the task init_episode and puts randomly in the workspace.
        """

        self._variation_index = index

        if not self._has_init_task:
            self.init_task()

        # Try a few times to init and place in the workspace
        attempts = 0
        descriptions = None
        while attempts < max_attempts:
            descriptions = self._active_task.init_episode(index)
            try:
                if (randomly_place
                        and not self._active_task.is_static_workspace()):
                    self._place_task()
                self._active_task.validate()
                break
            except (BoundaryError, WaypointError) as e:
                self._active_task.cleanup_()
                self._active_task.restore_state(self._inital_task_state)
                attempts += 1
                if attempts >= max_attempts:
                    raise e

        # Let objects come to rest
        [self._pyrep.step() for _ in range(STEPS_BEFORE_EPISODE_START)]
        self._has_init_episode = True
        return descriptions

    def reset(self) -> None:
        """Resets the joint angles. """
        self._robot.gripper.release()

        arm, gripper = self._initial_robot_state
        self._pyrep.set_configuration_tree(arm)
        self._pyrep.set_configuration_tree(gripper)
        self._robot.arm.set_joint_positions(self._start_arm_joint_pos)
        self._robot.arm.set_joint_target_velocities(
            [0] * len(self._robot.arm.joints))
        self._robot.gripper.set_joint_positions(
            self._starting_gripper_joint_pos)
        self._robot.gripper.set_joint_target_velocities(
            [0] * len(self._robot.gripper.joints))

        if self._active_task is not None and self._has_init_task:
            self._active_task.cleanup_()
            self._active_task.restore_state(self._inital_task_state)
        [self._pyrep.step_ui() for _ in range(20)]
        self._active_task.set_initial_objects_in_scene()

    def get_observation(self) -> Observation:
        tip = self._robot.arm.get_tip()

        joint_forces = None
        if self._obs_config.joint_forces:
            fs = self._robot.arm.get_joint_forces()
            vels = self._robot.arm.get_joint_target_velocities()
            joint_forces = self._obs_config.joint_forces_noise.apply(
                np.array([-f if v < 0 else f for f, v in zip(fs, vels)]))

        ee_forces_flat = None
        if self._obs_config.gripper_touch_forces:
            ee_forces = self._robot.gripper.get_touch_sensor_forces()
            ee_forces_flat = []
            for eef in ee_forces:
                ee_forces_flat.extend(eef)
            ee_forces_flat = np.array(ee_forces_flat)

        lsc_ob = self._obs_config.left_shoulder_camera
        rsc_ob = self._obs_config.right_shoulder_camera
        wc_ob = self._obs_config.wrist_camera

        obs = Observation(
            left_shoulder_rgb=(lsc_ob.rgb_noise.apply(
                self._cam_over_shoulder_left.capture_rgb())
                               if lsc_ob.rgb else None),
            left_shoulder_depth=(lsc_ob.depth_noise.apply(
                self._cam_over_shoulder_left.capture_depth())
                                 if lsc_ob.depth else None),
            right_shoulder_rgb=(rsc_ob.rgb_noise.apply(
                self._cam_over_shoulder_right.capture_rgb())
                                if rsc_ob.rgb else None),
            right_shoulder_depth=(rsc_ob.depth_noise.apply(
                self._cam_over_shoulder_right.capture_depth())
                                  if rsc_ob.depth else None),
            wrist_rgb=(wc_ob.rgb_noise.apply(self._cam_wrist.capture_rgb())
                       if wc_ob.rgb else None),
            wrist_depth=(wc_ob.depth_noise.apply(
                self._cam_wrist.capture_depth()) if wc_ob.depth else None),
            left_shoulder_mask=(
                self._cam_over_shoulder_left_mask.capture_rgb()
                if lsc_ob.mask else None),
            right_shoulder_mask=(
                self._cam_over_shoulder_right_mask.capture_rgb()
                if rsc_ob.mask else None),
            wrist_mask=(self._cam_wrist_mask.capture_rgb()
                        if wc_ob.mask else None),
            joint_velocities=(self._obs_config.joint_velocities_noise.apply(
                np.array(self._robot.arm.get_joint_velocities()))
                              if self._obs_config.joint_velocities else None),
            joint_positions=(self._obs_config.joint_positions_noise.apply(
                np.array(self._robot.arm.get_joint_positions()))
                             if self._obs_config.joint_positions else None),
            joint_forces=(joint_forces
                          if self._obs_config.joint_forces else None),
            gripper_open_amount=((
                1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0)
                                 if self._obs_config.gripper_open_amount else
                                 None),
            gripper_pose=(np.array(tip.get_pose())
                          if self._obs_config.gripper_pose else None),
            gripper_touch_forces=(ee_forces_flat
                                  if self._obs_config.gripper_touch_forces else
                                  None),
            gripper_joint_positions=(
                np.array(self._robot.gripper.get_joint_positions())
                if self._obs_config.gripper_joint_positions else None),
            task_low_dim_state=(self._active_task.get_low_dim_state() if
                                self._obs_config.task_low_dim_state else None))
        obs = self._active_task.decorate_observation(obs)
        return obs

    def step(self):
        self._pyrep.step()
        self._active_task.step()

    def get_demo(self,
                 record: bool = True,
                 func=None,
                 randomly_place: bool = True) -> List[Observation]:
        """Returns a demo (list of observations)"""

        if not self._has_init_task:
            self.init_task()
        if not self._has_init_episode:
            self.init_episode(self._variation_index,
                              randomly_place=randomly_place)
        self._has_init_episode = False

        waypoints = self._active_task.get_waypoints()
        if len(waypoints) == 0:
            raise NoWaypointsError('No waypoints were found.',
                                   self._active_task)

        demo = []
        if record:
            self._pyrep.step()  # Need this here or get_force doesn't work...
            demo.append(self.get_observation())
        while True:
            success = False
            for i, point in enumerate(waypoints):

                point.start_of_path()
                try:
                    path = point.get_path()
                except ConfigurationPathError as e:
                    raise DemoError(
                        'Could not get a path for waypoint %d.' % i,
                        self._active_task) from e
                ext = point.get_ext()
                path.visualize()

                done = False
                success = False
                while not done:
                    done = path.step()
                    self.step()
                    self._demo_record_step(demo, record, func)
                    success, term = self._active_task.success()
                    if success:
                        break

                point.end_of_path()

                path.clear_visualization()

                if success:
                    # We can quit early because we have finished the task
                    break

                # TODO: need to decide how I do the gripper actions
                if len(ext) > 0:
                    contains_param = False
                    start_of_bracket = -1
                    gripper = self._robot.gripper
                    if 'open_gripper(' in ext:
                        gripper.release()
                        start_of_bracket = ext.index('open_gripper(') + 13
                        contains_param = ext[start_of_bracket] != ')'
                        if not contains_param:
                            done = False
                            while not done:
                                done = gripper.actuate(1.0, 0.04)
                                self._pyrep.step()
                                self._active_task.step()
                                if self._obs_config.record_gripper_closing:
                                    self._demo_record_step(demo, record, func)
                    elif 'close_gripper(' in ext:
                        start_of_bracket = ext.index('close_gripper(') + 14
                        contains_param = ext[start_of_bracket] != ')'
                        if not contains_param:
                            done = False
                            while not done:
                                done = gripper.actuate(0.0, 0.04)
                                self._pyrep.step()
                                self._active_task.step()
                                if self._obs_config.record_gripper_closing:
                                    self._demo_record_step(demo, record, func)

                    if contains_param:
                        rest = ext[start_of_bracket:]
                        num = float(rest[:rest.index(')')])
                        done = False
                        while not done:
                            done = gripper.actuate(num, 0.04)
                            self._pyrep.step()
                            self._active_task.step()
                            if self._obs_config.record_gripper_closing:
                                self._demo_record_step(demo, record, func)

                    if 'close_gripper(' in ext:
                        for g_obj in self._active_task.get_graspable_objects():
                            gripper.grasp(g_obj)

                    self._demo_record_step(demo, record, func)

            if not self._active_task.should_repeat_waypoints() or success:
                break

        # Some tasks may need additional physics steps
        # (e.g. ball rowling to goal)
        if not success:
            for _ in range(10):
                self._pyrep.step()
                self._active_task.step()
                self._demo_record_step(demo, record, func)
                success, term = self._active_task.success()
                if success:
                    break

        success, term = self._active_task.success()
        if not success:
            raise DemoError('Demo was completed, but was not successful.',
                            self._active_task)

        return demo

    def get_observation_config(self) -> ObservationConfig:
        return self._obs_config

    def _demo_record_step(self, demo_list, record, func):
        if record:
            demo_list.append(self.get_observation())
        if func is not None:
            func()

    def _set_camera_properties(self) -> None:
        def _set_rgb_props(rgb_cam: VisionSensor, rgb: bool, depth: bool,
                           conf: CameraConfig):
            if not (rgb or depth):
                rgb_cam.remove()
            else:
                rgb_cam.set_resolution(conf.image_size)
                rgb_cam.set_render_mode(conf.render_mode)

        def _set_mask_props(mask_cam: VisionSensor, mask: bool,
                            conf: CameraConfig):
            if not mask:
                mask_cam.remove()
            else:
                mask_cam.set_resolution(conf.image_size)

        _set_rgb_props(self._cam_over_shoulder_left,
                       self._obs_config.left_shoulder_camera.rgb,
                       self._obs_config.left_shoulder_camera.depth,
                       self._obs_config.left_shoulder_camera)
        _set_rgb_props(self._cam_over_shoulder_right,
                       self._obs_config.right_shoulder_camera.rgb,
                       self._obs_config.right_shoulder_camera.depth,
                       self._obs_config.right_shoulder_camera)
        _set_rgb_props(self._cam_wrist, self._obs_config.wrist_camera.rgb,
                       self._obs_config.wrist_camera.depth,
                       self._obs_config.wrist_camera)
        _set_mask_props(self._cam_over_shoulder_left_mask,
                        self._obs_config.left_shoulder_camera.mask,
                        self._obs_config.left_shoulder_camera)
        _set_mask_props(self._cam_over_shoulder_right_mask,
                        self._obs_config.right_shoulder_camera.mask,
                        self._obs_config.right_shoulder_camera)
        _set_mask_props(self._cam_wrist_mask,
                        self._obs_config.wrist_camera.mask,
                        self._obs_config.wrist_camera)

    def _place_task(self) -> None:
        self._workspace_boundary.clear()
        # Find a place in the robot workspace for task
        self._active_task.boundary_root().set_orientation(
            self._initial_task_pose)
        min_rot, max_rot = self._active_task.base_rotation_bounds()
        self._workspace_boundary.sample(self._active_task.boundary_root(),
                                        min_rotation=min_rot,
                                        max_rotation=max_rot)
Exemplo n.º 9
0
class SmartBot(NonHolonomicBase):
    velocity_limit = np.array([0.0, 15.0])
    nb_ultrasonic_sensor = 5
    ultrasonic_sensor_bound = np.array([0.02, 2.0])

    def __init__(self, count: int = 0, enable_vision: bool = False):
        super().__init__(count, 2, 'smartbot')

        self._enable_vision = enable_vision

        self._camera = VisionSensor('{}_camera'.format(self.get_name()))
        self._gyroscope = Gyroscope('{}_gyro_sensor'.format(self.get_name()))
        self._accelerometer = Accelerometer(
            '{}_accelerometer'.format(self.get_name()))

        self._ultrasonic_values = 2 * np.ones(self.nb_ultrasonic_sensor)
        self._ultrasonic_sensors = [ProximitySensor(
            '{}_ultrasonic_sensor_{}'.format(self.get_name(), i + 1))
            for i in range(self.nb_ultrasonic_sensor)]
        
        self._previous_joint_positions = self.get_joint_positions()

        self.initial_configuration = self.get_configuration_tree()

    @property
    def ultrasonic_distances(self) -> np.ndarray:
        """Reads proximity sensors values from robot.

        Returns:
            proximity_sensor_values: Array of proximity sensor values.
        """
        for i in range(self.nb_ultrasonic_sensor):
            dist = self._ultrasonic_sensors[i].read()
            if dist == -1.0:
                self._ultrasonic_values[i - 1] = self.ultrasonic_sensor_bound[1]
            else:
                self._ultrasonic_values[i - 1] = dist
        return self._ultrasonic_values

    @property
    def image(self) -> np.ndarray:
        """Reads image from camera mounted to robot.

        Returns:
            img: Image received from robot
        """
        return self._camera.capture_rgb()

    @property
    def accelerations(self) -> np.ndarray:
        """Reads accelerometer values from robot.

        Returns:
            accelerometer_values: Array of values received from accelerometer.
        """
        values = self._accelerometer.read()
        return np.round(values, 4)

    @property
    def angular_velocities(self) -> np.ndarray:
        """Reads gyroscope values from robot.

        Returns:
            gyroscope_values: Array of values received from gyroscope.
        """
        values = self._gyroscope.read()
        return np.round(values, 4)

    @property
    def encoder_ticks(self) -> np.ndarray:
        """Reads encoders ticks from robot.
        Returns:
            encoder_ticks: Current values of encoders ticks.
        """
        dphi = np.asarray(self.get_joint_positions()) \
               - np.asarray(self._previous_joint_positions)
        self._previous_joint_positions = self.get_joint_positions()

        def correct_angle(angle):
            new_angle = None
            if angle >= 0:
                new_angle = np.fmod((angle + np.pi), (2 * np.pi)) - np.pi

            if angle < 0:
                new_angle = np.fmod((angle - np.pi), (2 * np.pi)) + np.pi

            new_angle = np.round(angle, 3)
            return new_angle

        return np.asarray([correct_angle(angle) for angle in dphi])
Exemplo n.º 10
0
class StereoVisionRobot:
    def __init__(self,
                 min_distance,
                 max_distance,
                 default_joint_limit_type="none"):
        self.cam_left = VisionSensor("vs_cam_left#")
        self.cam_right = VisionSensor("vs_cam_right#")
        self.tilt_left = Joint("vs_eye_tilt_left#")
        self.tilt_right = Joint("vs_eye_tilt_right#")
        self.pan_left = Joint("vs_eye_pan_left#")
        self.pan_right = Joint("vs_eye_pan_right#")
        self.min_distance = min_distance
        self.max_distance = max_distance
        self.default_joint_limit_type = default_joint_limit_type
        self._pan_max = rad(10)
        self._tilt_max = rad(10)
        self._tilt_speed = 0
        self._pan_speed = 0
        self.episode_reset()

    # def episode_reset(self, vergence=True):
    def episode_reset(self):
        ### reset joints position / speeds
        self.reset_speed()
        self.tilt_right.set_joint_position(0)
        self.tilt_left.set_joint_position(0)
        fixation_distance = np.random.uniform(self.min_distance,
                                              self.max_distance)
        random_vergence = rad(to_angle(fixation_distance))
        self.pan_left.set_joint_position(random_vergence / 2)
        self.pan_right.set_joint_position(-random_vergence / 2)

    def reset_speed(self):
        self._tilt_speed = 0
        self._pan_speed = 0

    def _check_pan_limit(self, left, right, joint_limit_type=None):
        joint_limit_type = self.default_joint_limit_type if joint_limit_type is None else joint_limit_type
        if joint_limit_type == "stick":
            return np.clip(left, 0,
                           self._pan_max), np.clip(right, -self._pan_max, 0)
        if joint_limit_type == "none":
            return left, right

    def _check_tilt_limit(self, left, right, joint_limit_type=None):
        joint_limit_type = self.default_joint_limit_type if joint_limit_type is None else joint_limit_type
        if joint_limit_type == "stick":
            return np.clip(left, -self._tilt_max,
                           self._tilt_max), np.clip(right, -self._tilt_max,
                                                    self._tilt_max)
        if joint_limit_type == "none":
            return left, right

    def reset_vergence_position(self, joint_limit_type=None):
        mean = (self.pan_right.get_joint_position() +
                self.pan_left.get_joint_position()) / 2
        left, right = self._check_pan_limit(mean, mean, joint_limit_type)
        self.pan_left.set_joint_position(left)
        self.pan_right.set_joint_position(right)

    def set_vergence_position(self, alpha, joint_limit_type=None):
        rad_alpha = rad(alpha)
        mean = (self.pan_right.get_joint_position() +
                self.pan_left.get_joint_position()) / 2
        left, right = self._check_pan_limit(mean + rad_alpha / 2,
                                            mean - rad_alpha / 2,
                                            joint_limit_type)
        self.pan_left.set_joint_position(left)
        self.pan_right.set_joint_position(right)

    def set_delta_vergence_position(self, delta, joint_limit_type=None):
        rad_delta = rad(delta)
        left, right = self._check_pan_limit(
            self.pan_left.get_joint_position() + rad_delta / 2,
            self.pan_right.get_joint_position() - rad_delta / 2,
            joint_limit_type)
        self.pan_left.set_joint_position(left)
        self.pan_right.set_joint_position(right)

    def set_pan_position(self, alpha, joint_limit_type=None):
        rad_alpha = rad(alpha)
        vergence = self.pan_left.get_joint_position(
        ) - self.pan_right.get_joint_position()
        left, right = self._check_pan_limit((vergence / 2) + rad_alpha,
                                            -(vergence / 2) + rad_alpha,
                                            joint_limit_type)
        self.pan_left.set_joint_position(left)
        self.pan_right.set_joint_position(right)

    def set_delta_pan_speed(self, delta, joint_limit_type=None):
        self._pan_speed += rad(delta)
        left, right = self._check_pan_limit(
            self.pan_left.get_joint_position() + self._pan_speed,
            self.pan_right.get_joint_position() + self._pan_speed,
            joint_limit_type)
        self.pan_left.set_joint_position(left)
        self.pan_right.set_joint_position(right)

    def set_tilt_position(self, alpha, joint_limit_type=None):
        rad_alpha = rad(alpha)
        left, right = self._check_tilt_limit(rad_alpha, rad_alpha,
                                             joint_limit_type)
        self.tilt_left.set_joint_position(left)
        self.tilt_right.set_joint_position(right)

    def set_delta_tilt_speed(self, delta, joint_limit_type=None):
        self._tilt_speed += rad(delta)
        left, right = self._check_tilt_limit(
            self.tilt_left.get_joint_position() -
            self._tilt_speed,  # minus to get natural upward movement
            self.tilt_right.get_joint_position() -
            self._tilt_speed,  # minus to get natural upward movement
            joint_limit_type)
        self.tilt_left.set_joint_position(left)
        self.tilt_right.set_joint_position(right)

    def get_tilt_position(self):
        return deg(self.tilt_right.get_joint_position())

    def get_pan_position(self):
        return deg((self.pan_right.get_joint_position() +
                    self.pan_left.get_joint_position()) / 2)

    def get_vergence_position(self):
        return deg(self.pan_left.get_joint_position() -
                   self.pan_right.get_joint_position())

    def get_position(self):
        return self.tilt_position, self.pan_position, self.vergence_position

    def get_vergence_error(self, other_distance):
        return self.vergence_position - to_angle(other_distance)

    def get_tilt_speed(self):
        return deg(self._tilt_speed)

    def get_pan_speed(self):
        return deg(self._pan_speed)

    def get_speed(self):
        return self.tilt_speed, self.pan_speed

    def set_action(self, action, joint_limit_type=None):
        self.set_delta_tilt_speed(float(action[0]), joint_limit_type)
        self.set_delta_pan_speed(float(action[1]), joint_limit_type)
        self.set_delta_vergence_position(float(action[2]), joint_limit_type)

    def set_position(self, position, joint_limit_type):
        self.set_tilt_position(position[0], joint_limit_type)
        self.set_pan_position(position[1], joint_limit_type)
        self.set_vergence_position(position[2], joint_limit_type)

    def get_vision(self):
        return self.cam_left.capture_rgb(), self.cam_right.capture_rgb()

    tilt_position = property(get_tilt_position)
    pan_position = property(get_pan_position)
    vergence_position = property(get_vergence_position)
    position = property(get_position)
    tilt_speed = property(get_tilt_speed)
    pan_speed = property(get_pan_speed)
    speed = property(get_speed)
Exemplo n.º 11
0
class EnvPollos(Env):
    def __init__(self, ep_length=100):
        """
        Pollos environment for testing purposes
        :param dim: (int) the size of the dimensions you want to learn
        :param ep_length: (int) the length of each episodes in timesteps
        """
        # 5 points in 3D space forming the trajectory
        self.action_space = Box(low=-0.3, high=0.3, shape=(5,3), dtype=np.float32)
        self.observation_space = Box(low=np.array([-0.1, -0.2, 0, -1.5, 0.0, 0.0, -0.5, -0.2, -0.2]), 
                                     high=np.array([0.1, 1.7, 2.5, 0, 0.0, 0.0, 0.5, 0.2, 1.0]))
        
        #
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        self.agent = UR10()
        self.agent.max_velocity = 1.2
        self.joints = [Joint('UR10_joint1'), Joint('UR10_joint2'), Joint('UR10_joint3'), Joint('UR10_joint4'), Joint('UR10_joint5'), Joint('UR10_joint6')]
        self.joints_limits = [[j.get_joint_interval()[1][0],j.get_joint_interval()[1][0]+j.get_joint_interval()[1][1]] 
                              for j in self.joints]
        print(self.joints_limits)
        self.agent_hand = Shape('hand')
        self.agent.set_control_loop_enabled(True)
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.initial_agent_tip_position = self.agent.get_tip().get_position()
        self.initial_agent_tip_quaternion = self.agent.get_tip().get_quaternion()
        self.target = Dummy('UR10_target')
        self.initial_joint_positions = self.agent.get_joint_positions()
        self.pollo_target = Dummy('pollo_target')
        self.pollo = Shape('pollo')
        self.table_target = Dummy('table_target')
        self.initial_pollo_position = self.pollo.get_position()
        self.initial_pollo_orientation = self.pollo.get_quaternion()
        self.table_target = Dummy('table_target')
        # objects
        self.scene_objects = [Shape('table0'), Shape('Plane'), Shape('Plane0'), Shape('ConcretBlock')]
        #self.agent_tip = self.agent.get_tip()
        self.initial_distance = np.linalg.norm(np.array(self.initial_pollo_position)-np.array(self.initial_agent_tip_position))
        
        # camera 
        self.camera = VisionSensor('kinect_depth')
        self.camera_matrix_extrinsics = vrep.simGetObjectMatrix(self.camera.get_handle(),-1)
        self.np_camera_matrix_extrinsics = np.delete(np.linalg.inv(self.vrepToNP(self.camera_matrix_extrinsics)), 3, 0)
        width = 640.0
        height = 480.0
        angle = math.radians(57.0)
        focalx_px = (width/2.0) / math.tan(angle/2.0)
        focaly_px = (height/2.0) / math.tan(angle/2.0)
        self.np_camera_matrix_intrinsics = np.array([[-focalx_px, 0.0, 320.0],
                                                     [0.0, -focalx_px, 240.0],
                                                     [0.0, 0.0, 1.0]])
                                        
        self.reset()

    def reset(self):
        pos = list(np.random.uniform( [-0.1, -0.1, 0.0],  [0.1, 0.1, 0.1]) + self.initial_pollo_position)
        self.pollo.set_position(pos)
        self.pollo.set_quaternion(self.initial_pollo_orientation)
        self.agent.set_joint_positions(self.initial_joint_positions)
        self.initial_epoch_time = time.time()
        while True:         # wait for arm to stop
            self.pr.step()  # Step the physics simulation
            a = self.agent.get_joint_velocities()
            if not np.any(np.where( np.fabs(a) < 0.1, False, True )):
                break
        return self._get_state()

    def step(self, action): 
        print(action.shape, action)
        if action is None:
            self.pr.step()
            return self._get_state(), 0.0, False, {}
        
        if np.any(np.isnan(action)):
            print(action)
            return self._get_state(), -1.0, False, {}

        # create a path with tip and pollo at its endpoints and 5 adjustable points in the middle
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_robot_tip_position = np.array(self.agent.get_tip().get_position())
        np_robot_tip_orientation = np.array(self.agent.get_tip().get_orientation())
        c_path = CartesianPath.create()     
        c_path.insert_control_points(action[4])         # point after pollo to secure the grasp
        c_path.insert_control_points([list(np_pollo_target) + list(np_robot_tip_orientation)])       # pollo
        c_path.insert_control_points(action[0:3])   
        c_path.insert_control_points([list(np_robot_tip_position) + list(np_robot_tip_orientation)]) # robot hand
        try:
            self.path = self.agent.get_path_from_cartesian_path(c_path)
        except IKError as e:
            print('Agent::grasp    Could not find joint values')  
            return self._get_state(), -1, True, {}   
    
        # go through path
        reloj = time.time()
        while self.path.step():
            self.pr.step()                                   # Step the physics simulation
            if (time.time()-reloj) > 4:              
                return self._get_state(), -10.0, True, {}    # Too much time
            if any((self.agent_hand.check_collision(obj) for obj in self.scene_objects)):   # colisión con la mesa
                return self._get_state(), -10.0, True, {}   
        
        # path ok, compute reward
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_robot_tip_position = np.array(self.agent.get_tip().get_position())
        dist = np.linalg.norm(np_pollo_target-np_robot_tip_position)
        altura = np.clip((np_pollo_target[2] - self.initial_pollo_position[2]), 0, 0.5)
        
        if altura > 0.3:                                                     # pollo en mano
            return self._get_state(), altura, True, {}
        elif dist > self.initial_distance:                                   # mano se aleja
            return self._get_state(), -10.0, True, {}
        if time.time() - self.initial_epoch_time > 5:                        # too much time
            return self._get_state(), -10.0, True, {}
        
        # Reward 
        pollo_height = np.exp(-altura*10)  # para 1m pollo_height = 0.001, para 0m pollo_height = 1
        reward = - pollo_height - dist
        return self._get_state(), reward, False, {}

    def close(self):
        self.pr.stop()
        self.pr.shutdown()
    
    def render(self):
        print("RENDER")
        np_pollo_en_camara = self.getPolloEnCamara()
         
        # capture depth image
        depth = self.camera.capture_rgb()
        circ = plt.Circle((int(np_pollo_en_camara[0]), int(np_pollo_en_camara[1])),10)
        plt.clf()
        fig = plt.gcf()
        ax = fig.gca()
        ax.add_artist(circ)
        ax.imshow(depth, cmap = 'hot')
        plt.pause(0.000001)

    # Aux

    # transform env.pollo_target.get_position() to camera coordinates and project pollo_en_camera a image coordinates        
    def getPolloEnCamara(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_target_cam = self.np_camera_matrix_extrinsics.dot(np.append([np_pollo_target],1.0))
        np_pollo_en_camara = self.np_camera_matrix_intrinsics.dot(np_pollo_target_cam)
        np_pollo_en_camara = np_pollo_en_camara / np_pollo_en_camara[2]
        np_pollo_en_camara = np.delete(np_pollo_en_camara,2)
        return np_pollo_en_camara

    def getPolloEnCamaraEx(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_en_camara = self.np_camera_matrix_extrinsics.dot(np.append([np_pollo_target],1.0))
        return np_pollo_en_camara


    def _get_state(self):
        # Return state containing arm joint angles/velocities & target position
        # return (self.agent.get_joint_positions() + 
        #         self.agent.get_joint_velocities() +
        #         self.pollo_target.get_position())
        p = self.getPolloEnCamaraEx()
        j = self.agent.get_joint_positions()
        r = np.array([j[0],j[1],j[2],j[3],j[4],j[5],p[0],p[1],p[2]])
        return r
    
    def vrepToNP(self, c):
        return np.array([[c[0],c[4],c[8],c[3]],
                         [c[1],c[5],c[9],c[7]],
                         [c[2],c[6],c[10],c[11]],
                         [0,   0,   0,    1]])
Exemplo n.º 12
0
class SphericalVisionSensor(Object):
    """An object able capture 360 degree omni-directional images.
    """

    def __init__(self, name):
        super().__init__(name)

        # final omni-directional sensors
        self._sensor_depth = VisionSensor('%s_sensorDepth' % (self.get_name()))
        self._sensor_rgb = VisionSensor('%s_sensorRGB' % (self.get_name()))

        # directed sub-sensors
        names = ['front', 'top', 'back', 'bottom', 'left', 'right']
        self._six_sensors = [VisionSensor('%s_%s' % (self.get_name(), n)) for n in names]
        self._front = self._six_sensors[0]

        # directed sub-sensor handles list
        self._six_sensor_handles = [vs.get_handle() for vs in self._six_sensors]

        # set all to explicit handling
        self._set_all_to_explicit_handling()

        # assert sensor properties are matching
        self._assert_matching_resolutions()
        self._assert_matching_render_modes()
        self._assert_matching_entities_to_render()
        self._assert_matching_window_sizes()
        self._assert_matching_near_clipping_planes()
        self._assert_matching_far_clipping_planes()

        # omni resolution
        self._omni_resolution = self._sensor_rgb.get_resolution()

        # near and far clipping plane
        self._near_clipping_plane = self.get_near_clipping_plane()
        self._far_clipping_plane = self.get_far_clipping_plane()
        self._clipping_plane_diff = self._far_clipping_plane - self._near_clipping_plane

        # projective cam region image
        self._planar_to_radial_depth_scalars = self._get_planar_to_radial_depth_scalars()

        # flip config
        self._flip_x = True

    # Private #
    # --------#

    def _set_all_to_explicit_handling(self):
        self._sensor_depth.set_explicit_handling(1)
        self._sensor_rgb.set_explicit_handling(1)
        for sensor in self._six_sensors:
            sensor.set_explicit_handling(1)

    def _assert_matching_resolutions(self):
        assert self._sensor_depth.get_resolution() == self._sensor_rgb.get_resolution()
        front_sensor_res = self._front.get_resolution()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_resolution() == front_sensor_res

    def _assert_matching_render_modes(self):
        assert self._sensor_depth.get_render_mode() == self._sensor_rgb.get_render_mode()
        front_sensor_render_mode = self._front.get_render_mode()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_render_mode() == front_sensor_render_mode

    def _assert_matching_entities_to_render(self):
        assert self._sensor_depth.get_entity_to_render() == self._sensor_rgb.get_entity_to_render()
        front_sensor_entity_to_render = self._front.get_entity_to_render()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_entity_to_render() == front_sensor_entity_to_render

    def _assert_matching_window_sizes(self):
        assert self._sensor_depth.get_windowed_size() == self._sensor_rgb.get_windowed_size()

    def _assert_matching_near_clipping_planes(self):
        front_sensor_near = self._front.get_near_clipping_plane()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_near_clipping_plane() == front_sensor_near

    def _assert_matching_far_clipping_planes(self):
        front_sensor_far = self._front.get_far_clipping_plane()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_far_clipping_plane() == front_sensor_far

    def _get_requested_type(self) -> ObjectType:
        return ObjectType(sim.simGetObjectType(self.get_handle()))

    @staticmethod
    def _create_uniform_pixel_coords_image(image_dims):
        pixel_x_coords = np.reshape(np.tile(np.arange(image_dims[1]), [image_dims[0]]),
                                    (image_dims[0], image_dims[1], 1)).astype(np.float32)
        pixel_y_coords_ = np.reshape(np.tile(np.arange(image_dims[0]), [image_dims[1]]),
                                     (image_dims[1], image_dims[0], 1)).astype(np.float32)
        pixel_y_coords = np.transpose(pixel_y_coords_, (1, 0, 2))
        return np.concatenate((pixel_x_coords, pixel_y_coords), -1)

    def _get_planar_to_radial_depth_scalars(self):

        # reference: https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates

        # polar coord image
        coord_img = self._create_uniform_pixel_coords_image([self._omni_resolution[1], self._omni_resolution[0]])
        pixels_per_rad = self._omni_resolution[1] / np.pi
        polar_angles = coord_img / pixels_per_rad
        phi = polar_angles[..., 0:1]
        theta = polar_angles[..., 1:2]

        # image borders wrt the 6 projective cameras
        phis_rel = ((phi + np.pi/4) % (np.pi/2)) / (np.pi/2)
        lower_borders = np.arccos(1/((phis_rel*2-1)**2 + 2)**0.5)
        upper_borders = np.pi - lower_borders

        # omni image regions from the 6 projective cameras
        xy_region = np.logical_and(theta <= upper_borders, theta >= lower_borders)
        pos_x = np.logical_and(xy_region, np.logical_or(phi <= 45 * np.pi/180, phi >= (45 + 270) * np.pi/180))
        pos_y = np.logical_and(xy_region,
                               np.logical_and(phi >= 45 * np.pi/180, phi <= (45 + 90) * np.pi/180))
        neg_x = np.logical_and(xy_region,
                               np.logical_and(phi >= (45 + 90) * np.pi/180, phi <= (45 + 180) * np.pi/180))
        neg_y = np.logical_and(xy_region,
                               np.logical_and(phi >= (45 + 180) * np.pi/180, phi <= (45 + 270) * np.pi/180))
        pos_z = theta <= lower_borders
        neg_z = theta >= upper_borders

        # trig terms for conversion
        sin_theta = np.sin(theta)
        cos_theta = np.cos(theta)
        sin_phi = np.sin(phi)
        cos_phi = np.cos(phi)

        # reciprocal terms
        recip_sin_theta_cos_phi = 1/(sin_theta * cos_phi + MIN_DIVISOR)
        recip_sin_theta_sin_phi = 1/(sin_theta * sin_phi + MIN_DIVISOR)
        recip_cos_theta = 1/(cos_theta + MIN_DIVISOR)

        # planar to radial depth scalars
        return np.where(pos_x, recip_sin_theta_cos_phi,
                        np.where(neg_x, -recip_sin_theta_cos_phi,
                                 np.where(pos_y, recip_sin_theta_sin_phi,
                                          np.where(neg_y, -recip_sin_theta_sin_phi,
                                                   np.where(pos_z, recip_cos_theta,
                                                            np.where(neg_z, -recip_cos_theta,
                                                                     np.zeros_like(recip_cos_theta)))))))[..., 0]

    # Public #
    # -------#

    def handle_explicitly(self) -> None:
        """Handle spherical vision sensor explicitly.

          This enables capturing image (e.g., capture_rgb())
          without PyRep.step().
        """
        utils.script_call('handleSpherical@PyRep', PYREP_SCRIPT_TYPE,
                          [self._sensor_depth._handle, self._sensor_rgb._handle] + self._six_sensor_handles, [], [], [])

    def capture_rgb(self) -> np.ndarray:
        """Retrieves the rgb-image of a spherical vision sensor.

        :return: A numpy array of size (width, height, 3)
        """
        rgb = self._sensor_rgb.capture_rgb()
        if self._flip_x:
            return np.flip(rgb, 1)
        return rgb

    def capture_depth(self, in_meters=False) -> np.ndarray:
        """Retrieves the depth-image of a spherical vision sensor.

        :param in_meters: Whether the depth should be returned in meters.
        :return: A numpy array of size (width, height)
        """
        planar_depth = self._sensor_depth.capture_rgb()[:, :, 0]*self._clipping_plane_diff + self._near_clipping_plane
        radial_depth = planar_depth * self._planar_to_radial_depth_scalars
        if in_meters:
            if self._flip_x:
                return np.flip(radial_depth, 1)
            return radial_depth
        scaled_depth = (radial_depth - self._near_clipping_plane)/self._clipping_plane_diff
        if self._flip_x:
            return np.flip(scaled_depth, 1)
        return scaled_depth

    def get_resolution(self) -> List[int]:
        """ Return the spherical vision sensor's resolution.

        :return: Resolution [x, y]
        """
        return self._sensor_depth.get_resolution()

    def set_resolution(self, resolution: List[int]) -> None:
        """ Set the spherical vision sensor's resolution.

        :param resolution: New resolution [x, y]
        """
        if resolution[0] != 2*resolution[1]:
            raise Exception('Spherical vision sensors must have an X resolution 2x the Y resolution')
        if resolution[1] % 2 != 0:
            raise Exception('Spherical vision sensors must have an even Y resolution')
        box_sensor_resolution = [int(resolution[1]/2), int(resolution[1]/2)]
        for sensor in self._six_sensors:
            sensor.set_resolution(box_sensor_resolution)
        self._sensor_depth.set_resolution(resolution)
        self._sensor_rgb.set_resolution(resolution)
        self._omni_resolution = resolution
        self._planar_to_radial_depth_scalars = self._get_planar_to_radial_depth_scalars()

    def get_render_mode(self) -> RenderMode:
        """ Retrieves the spherical vision sensor's rendering mode

        :return: RenderMode for the current rendering mode.
        """
        return self._sensor_depth.get_render_mode()

    def set_render_mode(self, render_mode: RenderMode) -> None:
        """ Set the spherical vision sensor's rendering mode

        :param render_mode: The new sensor rendering mode, one of:
            RenderMode.OPENGL
            RenderMode.OPENGL_AUXILIARY
            RenderMode.OPENGL_COLOR_CODED
            RenderMode.POV_RAY
            RenderMode.EXTERNAL
            RenderMode.EXTERNAL_WINDOWED
            RenderMode.OPENGL3
            RenderMode.OPENGL3_WINDOWED
        """
        self._sensor_depth.set_render_mode(render_mode)
        self._sensor_rgb.set_render_mode(render_mode)
        for sensor in self._six_sensors:
            sensor.set_render_mode(render_mode)

    def get_windowed_size(self) -> Sequence[int]:
        """Get the size of windowed rendering for the omni-directional image.

        :return: The (x, y) resolution of the window. 0 for full-screen.
        """
        return self._sensor_depth.get_windowed_size()

    def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None:
        """Set the size of windowed rendering for the omni-directional image.

        :param resolution: The (x, y) resolution of the window.
            0 for full-screen.
        """
        self._sensor_depth.set_windowed_size(resolution)
        self._sensor_rgb.set_windowed_size(resolution)

    def get_near_clipping_plane(self) -> float:
        """ Get the spherical depth sensor's near clipping plane.

        :return: Near clipping plane (metres)
        """
        return self._front.get_near_clipping_plane()

    def set_near_clipping_plane(self, near_clipping: float) -> None:
        """ Set the spherical depth sensor's near clipping plane.

        :param near_clipping: New near clipping plane (in metres)
        """
        self._near_clipping_plane = near_clipping
        self._clipping_plane_diff = self._far_clipping_plane - near_clipping
        for sensor in self._six_sensors:
            sensor.set_near_clipping_plane(near_clipping)

    def get_far_clipping_plane(self) -> float:
        """ Get the Sensor's far clipping plane.

        :return: Near clipping plane (metres)
        """
        return self._front.get_far_clipping_plane()

    def set_far_clipping_plane(self, far_clipping: float) -> None:
        """ Set the spherical depth sensor's far clipping plane.

        :param far_clipping: New far clipping plane (in metres)
        """
        self._far_clipping_plane = far_clipping
        self._clipping_plane_diff = far_clipping - self._near_clipping_plane
        for sensor in self._six_sensors:
            sensor.set_far_clipping_plane(far_clipping)

    def set_entity_to_render(self, entity_to_render: int) -> None:
        """ Set the entity to render to the spherical vision sensor,
        this can be an object or more usefully a collection. -1 to render all objects in scene.

        :param entity_to_render: Handle of the entity to render
        """
        self._sensor_depth.set_entity_to_render(entity_to_render)
        self._sensor_rgb.set_entity_to_render(entity_to_render)

    def get_entity_to_render(self) -> None:
        """ Get the entity to render to the spherical vision sensor,
        this can be an object or more usefully a collection. -1 if all objects in scene are rendered.

        :return: Handle of the entity to render
        """
        return self._sensor_depth.get_entity_to_render()
Exemplo n.º 13
0
class environment(object):
    def __init__(self,
                 continuous_control=True,
                 obs_lowdim=True,
                 rpa=3,
                 frames=4):
        self.pr = PyRep()
        SCENE_FILE = join(dirname(abspath(__file__)), 'reacher_v2.ttt')
        self.pr.launch(SCENE_FILE, headless=True)
        self.pr.start()

        self.continuous_control = continuous_control
        self.target = Shape('target')
        self.tip = Dummy('Reacher_tip')
        self.camera = VisionSensor('Vision_sensor')
        self.agent = Reacher()

        self.done = False
        self.rpa = rpa
        self.obs_lowdim = obs_lowdim
        self.frames = frames
        self.prev_obs = []
        self.steps_ep = 0
        self.increment = 4 * pi / 180  # to radians
        self.action_all = [[self.increment, self.increment],
                           [-self.increment, -self.increment],
                           [0, self.increment], [0, -self.increment],
                           [self.increment, 0], [-self.increment, 0],
                           [-self.increment, self.increment],
                           [self.increment, -self.increment]]

    def threshold_check(self):
        for _ in range(5):
            self.reset_target_position(random_=True)
            while True:
                self.pr.step()
                tip_pos = self.tip_position()
                dist_tip_target = sqrt((tip_pos[0] - self.target_pos[0])**2 + \
                (tip_pos[1] - self.target_pos[1])**2)
                if dist_tip_target < 0.07:
                    reward = 1
                    self.done = True
                    break
                else:
                    reward = -dist_tip_target / 5
                print('Reward:%s' % reward)

    def render(self):
        return self.camera.capture_rgb() * 256  # (dim,dim,3)

    def get_observation(self):
        if self.obs_lowdim:
            joints_pos = self.agent.get_joint_positions()

            cos_joints = []
            sin_joints = []
            for theta in joints_pos:
                cos_joints.append(cos(theta))
                sin_joints.append(sin(theta))

            joints_vel = self.agent.get_joint_velocities()

            target_pos = self.target_position()
            tip_pos = self.tip_position()
            tip_target_vec = np.array(tip_pos) - np.array(target_pos)

            return torch.tensor(
                np.concatenate((cos_joints, sin_joints, joints_pos, joints_vel,
                                tip_target_vec[0:2]),
                               axis=0)).float()

        else:
            new_obs = resize(self.render()).view(-1, 64, 64)

            if self.steps_ep == 0:
                obs = new_obs.expand(self.frames, 64, 64)
            else:
                obs = self.prev_obs
                for i in range(self.frames - 1):
                    obs[i, :, :] = obs[i + 1, :, :]

                obs[self.frames - 1, :, :] = new_obs

            self.prev_obs = obs
            return obs.view(-1, 64, 64)

    def step(self, action):
        self.steps_ep += 1
        #TODO: change directly from pos to vel without changing in scene
        for action_rep in range(self.rpa):
            if self.continuous_control == True:
                self.agent.set_joint_target_velocities(action)  # radians/s

            else:
                position_all = self.action_all[action]
                joints_pos = self.agent.get_joint_positions()
                self.agent.set_joint_target_positions([
                    joints_pos[0] + position_all[0],
                    joints_pos[1] + position_all[1]
                ])  # radians

            self.pr.step()

        tip_pos = self.tip_position()
        tip_target_dist = sqrt((tip_pos[0] - self.target_pos[0])**2 + \
        (tip_pos[1] - self.target_pos[1])**2)

        if tip_target_dist < 0.07:
            reward = 1
            self.done = True
        else:
            reward = -tip_target_dist / 3

        state = self.get_observation()

        return state, reward, self.done

    def tip_position(self):
        return self.tip.get_position()

    def target_position(self):
        return self.target.get_position()

    def reset_target_position(self, random_=False, x=0.4, y=0.4):
        if random_ == True:
            xy_min = 0.12
            xy_max = 0.5325
            x = random.random() * (xy_max - xy_min) + xy_min
            y_max = sqrt(xy_max**2 - x**2)
            y_min = 0
            y = random.random() * (y_max - y_min) + y_min

            quadrant = random.randint(1, 4)
            if quadrant == 1:
                x = -x
                y = -y
            elif quadrant == 2:
                x = -x
                y = y
            elif quadrant == 3:
                x = x
                y = -y
            elif quadrant == 4:
                x = x
                y = y

        self.target.set_position([x, y, 0.0275])
        self.target_pos = self.target_position()
        self.done = False

    def reset_robot_position(self, random_=False, joints_pos=[0, 0]):
        if random_ == True:
            joints_pos = [random.random() * 2 * pi for _ in range(2)]

        self.agent.set_joint_positions(joints_pos)  # radians

        if self.continuous_control:
            for _ in range(2):
                self.agent.set_joint_target_velocities([0, 0])
                self.pr.step()

    def sample_action(self):
        return [
            1.3 * (3 * random.random() - 1.5),
            1.3 * (3 * random.random() - 1.5)
        ]

    def display(self):
        img = self.camera.capture_rgb()
        plt.imshow(img, interpolation='nearest')
        plt.axis('off')
        plt.show()
        plt.pause(0.01)

    def random_agent(self, episodes=10):
        steps_all = []
        for _ in range(episodes):
            steps = 0
            while True:
                action = random.randrange(len(self.action_all))
                reward = self.step(action)

                steps += 1
                if steps == 40:
                    break

                if reward == 1:
                    steps_all.append(steps)
                    break

        return sum(steps_all) / episodes

    def terminate(self):
        self.pr.stop()  # Stop the simulation
        self.pr.shutdown()

    def reset(self):
        self.steps_ep = 0
        self.reset_target_position(random_=True)
        self.reset_robot_position(random_=True)
        state = self.get_observation()
        return state

    def action_space(self):
        return 2

    def action_type(self):
        return 'continuous'

    def observation_space(self):
        return self.get_observation().shape

    def step_limit(self):
        return 60
Exemplo n.º 14
0
                              interpolate=True,
                              decal_mode=False,
                              repeat_along_u=True,
                              repeat_along_v=True,
                              uv_scaling=[1, 1])
                j.set_color([1, 1, 1])

            texture_id, texture_object = _get_texture(pr)
            color = [random() for _ in range(3)]
            for i in obj2:
                i.set_texture(texture=texture_id,
                              mapping_mode=TextureMappingMode.PLANE,
                              interpolate=True,
                              decal_mode=False)
                i.set_color([1, 1, 1])

            for _ in range(30):
                pr.step()
        else:
            [j.remove_texture() for j in walls]
            for i in obj2:
                i.remove_texture()
                i.set_color([176 / 255, 58 / 255, 46 / 255])
            floor.remove_texture()
            for _ in range(30):
                pr.step()

        rgb_img_rand = camera_arm.capture_rgb()
        img = Image.fromarray(np.uint8(rgb_img_rand * 255), 'RGB')
        img.save('test_rcan%s.png' % (k))
Exemplo n.º 15
0
class EnvPollos(Env):
    def __init__(self, ep_length=100):
        """
        Pollos environment for testing purposes
        :param dim: (int) the size of the dimensions you want to learn
        :param ep_length: (int) the length of each episodes in timesteps
        """
        self.vrep = vrep
        logging.basicConfig(level=logging.DEBUG)
        # they have to be simmetric. We interpret actions as angular increments
        #self.action_space = Box(low=np.array([-0.1, -1.7, -2.5, -1.5, 0.0, 0.0]),
        #                        high=np.array([0.1, 1.7, 2.5, 1.5, 0.0, 0.0]))
        inc = 0.1
        self.action_space = Box(low=np.array([-inc, -inc, -inc, -inc, 0,
                                              -inc]),
                                high=np.array([inc, inc, inc, inc, 0, inc]))

        self.observation_space = Box(low=np.array([-0.5, 0.0, -0.2]),
                                     high=np.array([0.5, 1.0, 1.0]))

        #
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        self.agent = UR10()
        self.agent.max_velocity = 1
        self.agent.set_control_loop_enabled(True)
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.joints = [
            Joint('UR10_joint1'),
            Joint('UR10_joint2'),
            Joint('UR10_joint3'),
            Joint('UR10_joint4'),
            Joint('UR10_joint5'),
            Joint('UR10_joint6')
        ]
        #self.joints_limits = [[j.get_joint_interval()[1][0],j.get_joint_interval()[1][0]+j.get_joint_interval()[1][1]]
        #                      for j in self.joints]
        self.high_joints_limits = [0.1, 1.7, 2.7, 0.0, 0.02, 0.3]
        self.low_joints_limits = [-0.1, -0.2, 0.0, -1.5, -0.02, -0.5]
        #self.agent_hand = Shape('hand')

        self.initial_agent_tip_position = self.agent.get_tip().get_position()
        self.initial_agent_tip_quaternion = self.agent.get_tip(
        ).get_quaternion()
        self.initial_agent_tip_euler = self.agent.get_tip().get_orientation()
        self.target = Dummy('UR10_target')
        self.initial_target_orientation = self.target.get_orientation()
        self.initial_joint_positions = self.agent.get_joint_positions()
        self.pollo_target = Dummy('pollo_target')
        self.pollo = Shape('pollo')
        #self.table_target = Dummy('table_target')
        self.initial_pollo_position = self.pollo.get_position()
        self.initial_pollo_orientation = self.pollo.get_quaternion()
        #self.table_target = Dummy('table_target')

        #self.succion = Shape('suctionPad')
        self.waypoints = [Dummy('dummy_gancho%d' % i) for i in range(4)]

        # objects
        #self.scene_objects = [Shape('table0'), Shape('Plane'), Shape('Plane0'), Shape('ConcretBlock')]
        #self.agent_tip = self.agent.get_tip()
        self.initial_distance = np.linalg.norm(
            np.array(self.initial_pollo_position) -
            np.array(self.initial_agent_tip_position))

        # camera
        self.camera = VisionSensor('kinect_depth')
        self.camera_matrix_extrinsics = vrep.simGetObjectMatrix(
            self.camera.get_handle(), -1)
        self.np_camera_matrix_extrinsics = np.delete(
            np.linalg.inv(self.vrepToNP(self.camera_matrix_extrinsics)), 3, 0)
        width = 640.0
        height = 480.0
        angle = math.radians(57.0)
        focalx_px = (width / 2.0) / math.tan(angle / 2.0)
        focaly_px = (height / 2.0) / math.tan(angle / 2.0)
        self.np_camera_matrix_intrinsics = np.array([[-focalx_px, 0.0, 320.0],
                                                     [0.0, -focalx_px, 240.0],
                                                     [0.0, 0.0, 1.0]])

        self.reset()

    def reset(self):
        pos = list(
            np.random.uniform([-0.1, -0.1, 0.0], [0.1, 0.1, 0.1]) +
            self.initial_pollo_position)
        self.pollo.set_position(pos)
        self.pollo.set_quaternion(self.initial_pollo_orientation)
        self.agent.set_joint_positions(self.initial_joint_positions)
        self.initial_epoch_time = time.time()
        while True:  # wait for arm to stop
            self.pr.step()  # Step the physics simulation
            a = self.agent.get_joint_velocities()
            if not np.any(np.where(np.fabs(a) < 0.1, False, True)):
                break
        print("ENV RESET DONE")
        return self._get_state()

    def step(self, action):
        if action is None:
            self.pr.step()
            return self._get_state(), 0.0, False, {}

        # check for nan
        if np.any(np.isnan(action)):
            print(action)
            return self._get_state(), -1.0, False, {}

        # action[1] = np.interp(action[1], [-1,7,1.7], [-0.2,1.7])
        # action[2] = np.interp(action[2], [-2.5,2.5], [0,2.5])
        # action[3] = np.interp(action[3], [-1.5,1.5], [-1.5,0])

        # add actions as increments to current joints value
        new_joints = np.array(
            self.agent.get_joint_positions()) + np.array(action)

        # check limits
        if np.any(np.greater(new_joints, self.high_joints_limits)) or np.any(
                np.less(new_joints, self.low_joints_limits)):
            logging.debug("New joints value out of limits r=-10")
            return self._get_state(), -10.0, True, {}

        # move the robot and wait to stop
        self.agent.set_joint_target_positions(new_joints)
        reloj = time.time()
        while True:  # wait for arm to stop
            self.pr.step()  # Step the physics simulation
            a = self.agent.get_joint_velocities()
            if not np.any(np.where(np.fabs(a) < 0.1, False,
                                   True)) or (time.time() - reloj) > 3:
                break

        # compute relevant magnitudes
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_robot_tip_position = np.array(self.agent.get_tip().get_position())
        dist = np.linalg.norm(np_pollo_target - np_robot_tip_position)
        altura = np.clip((np_pollo_target[2] - self.initial_pollo_position[2]),
                         0, 0.5)

        for obj in self.scene_objects:  # colisión con la mesa
            if self.agent_hand.check_collision(obj):
                logging.debug("Collision with objects r=-10")
                return self._get_state(), -10.0, True, {}
        if altura > 0.3:  # pollo en mano
            logging.debug("Height reached !!! r=0")
            return self._get_state(), 30.0, True, {}
        elif dist > self.initial_distance:  # mano se aleja
            logging.debug("Hand moving away from chicken r=-10")
            return self._get_state(), -10.0, True, {}
        if time.time() - self.initial_epoch_time > 5:  # too much time
            logging.debug("Time out. End of epoch r=-10")
            return self._get_state(), -10.0, True, {}

        # Reward
        #pollo_height = np.exp(-altura*20)  # para 0.3m pollo_height = 0.002, para 0m pollo_height = 1
        reward = altura - dist
        logging.debug("New joints value out of limits r=-10")
        return self._get_state(), reward, False, {}

    def close(self):
        self.pr.stop()
        self.pr.shutdown()

    def render(self):
        print("RENDER")
        np_pollo_en_camara = self.getPolloEnCamara()

        # capture depth image
        depth = self.camera.capture_rgb()
        circ = plt.Circle(
            (int(np_pollo_en_camara[0]), int(np_pollo_en_camara[1])), 10)
        plt.clf()
        fig = plt.gcf()
        ax = fig.gca()
        ax.add_artist(circ)
        ax.imshow(depth, cmap='hot')
        plt.pause(0.000001)

    # Aux

    # transform env.pollo_target.get_position() to camera coordinates and project pollo_en_camera a image coordinates
    def getPolloEnCamara(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_target_cam = self.np_camera_matrix_extrinsics.dot(
            np.append([np_pollo_target], 1.0))
        np_pollo_en_camara = self.np_camera_matrix_intrinsics.dot(
            np_pollo_target_cam)
        np_pollo_en_camara = np_pollo_en_camara / np_pollo_en_camara[2]
        np_pollo_en_camara = np.delete(np_pollo_en_camara, 2)
        return np_pollo_en_camara

    def getPolloEnCamaraEx(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_en_camara = self.np_camera_matrix_extrinsics.dot(
            np.append([np_pollo_target], 1.0))
        return np_pollo_en_camara

    def _get_state(self):
        # Return state containing arm joint angles/velocities & target position
        # return (self.agent.get_joint_positions() +
        #         self.agent.get_joint_velocities() +
        #         self.pollo_target.get_position())
        p = self.getPolloEnCamaraEx()
        j = self.agent.get_joint_positions()
        #r = np.array([p[0],p[1],p[2],j[0],j[1],j[2],j[3],j[4],j[5]])
        r = np.array([p[0], p[1], p[2]])
        return r

    def vrepToNP(self, c):
        return np.array([[c[0], c[4], c[8], c[3]], [c[1], c[5], c[9], c[7]],
                         [c[2], c[6], c[10], c[11]], [0, 0, 0, 1]])
Exemplo n.º 16
0
class PyrepDevice(object):
    """
    PyrepDevice handles communication with CoppeliaSim Vision Sensors via
    Pyrep. Retrieved images are converted to cv2 format and passed to registered
    callback functions.
    """

    @staticmethod
    def autodetect_nicoeyes():
        """
        Returns a tuple containing the detected names of left and right eye
        sensor

        :return: Devicenames as (left, right) tuple (None if not found)
        :rtype: tuple
        """
        eye_sensors = ["left_eye", "right_eye"]
        for i in range(len(eye_sensors)):
            if not VisionSensor.exists(eye_sensors[i]):
                logger.warning(
                    "Could not find vision sensor named '%s'", eye_sensors[i]
                )
                eye_sensors[i] = None
        return tuple(eye_sensors)

    def __init__(
        self,
        sensor_name,
        width=640,
        height=480,
        near_clipping_plane=1e-2,
        far_clipping_plane=10.0,
        view_angle=60.0,
    ):
        """
        Connects to Vision Sensor with given name and updates parameters
        accordingly.

        :param sensor_name: name (or handle) of the vision sensor in the scene
        :type sensor_name: str
        :param width: horizontal camera resolution
        :type width: int
        :param heigth: vertical camera resolution
        :type heigth: int
        :param near_clipping_plane: the minimum distance from which the sensor will be able to detect.
        :type near_clipping_plane: float
        :param far_clipping_plane: the maximum distance from which the sensor will be able to detect.
        :type far_clipping_plane: float
        :param view_angle: field of view of the camera in degree.
        :type view_angle: float
        """
        self._sensor = VisionSensor(sensor_name)
        self.resolution = (width, height)
        self.near_clipping_plane = near_clipping_plane
        self.far_clipping_plane = far_clipping_plane
        self.view_angle = view_angle
        self._callback = []
        self._removed_ids = []
        ref = weakref.ref(self, StepListener.unregister)
        StepListener.register(ref)

    @property
    def sensor_name(self):
        """
        :return: name of the sensor in the scene
        :rtype: str
        """
        return self._sensor.get_name()

    @property
    def resolution(self):
        """
        :return: camera resolution (width, height)
        :rtype: tuple(int, int)
        """
        return tuple(self._sensor.get_resolution())

    @resolution.setter
    def resolution(self, res):
        """
        :param res: camera resolution (width, height)
        :type res: tuple(int, int)
        """
        self._sensor.set_resolution(res)

    @property
    def near_clipping_plane(self):
        """
        :return: the minimum distance from which the sensor will be able to detect.
        :rtype: float
        """
        return self._sensor.get_near_clipping_plane()

    @near_clipping_plane.setter
    def near_clipping_plane(self, val):
        """
        :param val: the minimum distance from which the sensor will be able to detect.
        :type val: float
        """
        self._sensor.set_near_clipping_plane(val)

    @property
    def far_clipping_plane(self):
        """
        :return: the maximum distance from which the sensor will be able to detect.
        :rtype: float
        """
        return self._sensor.get_far_clipping_plane()

    @far_clipping_plane.setter
    def far_clipping_plane(self, val):
        """
        :param val: the maximum distance from which the sensor will be able to detect.
        :type val: float
        """
        self._sensor.set_far_clipping_plane(val)

    @property
    def view_angle(self):
        """
        :return: field of view of the camera in degree.
        :rtype: float
        """
        return self._sensor.get_perspective_angle()

    @view_angle.setter
    def view_angle(self, val):
        """
        :param val: field of view of the camera in degree.
        :type val: float
        """
        self._sensor.set_perspective_angle(val)

    def get_image(self):
        """
        Captures an image for the current simulation step from the
        vision sensor and converts it to cv2 format

        :return: the image
        :rtype: cv2 image
        """
        frame = self._sensor.capture_rgb()
        frame = np.uint8(frame[:, :, [2, 1, 0]] * 255)
        return frame

    def step(self):
        """
        Executes all registered callbacks with the sensor image for the current
        simulation step.
        """
        frame = self.get_image()
        id_start = 0
        # iterate through callbacks while skipping removed ids (if any)
        for id_end in self._removed_ids:
            for id in range(id_start, id_end):
                self._callback[id](frame)
            id_start = id_end
        # iterate through (remaining) callbacks
        for id in range(id_start, len(self._callback)):
            self._callback[id](frame)

    def add_callback(self, function):
        """
        Adds a callback for the event loop

        Whenever step is called, all registered callbacks are executed.

        The callback must take exactly 1 argument through which it receives the
        frame.

        :param function: Function to add as callback
        :type function: function

        :return: id of the added callback
        :rtype: int
        """
        if not (inspect.isfunction(function) or inspect.ismethod(function)):
            logger.error("Trying to add non-function callback")
            raise ValueError("Trying to add non-function callback")
        if len(self._removed_ids) == 0:
            self._callback += [function]
            id = len(self._callback) - 1
        else:
            id = min(self._removed_ids)
            self._callback[id]
            self._removed_ids.remove(id)
        logger.info("Added callback with id %i", id)
        return id

    def remove_callback(self, id):
        """
        Removes callback with given id

        :param id: id of the callback
        :type id: int
        """
        if id in self._removed_ids or id not in range(0, len(self._callback)):
            logger.warning("Callback with id %i does not exist", id)
            return
        if id == len(self._callback) - 1:
            i = id
            for removed in reversed(self._removed_ids):
                if i - 1 != removed:
                    break
                self._removed_ids[:-1]
                i -= 1
            self._callback = self._callback[:i]
        else:
            self._callback[id] = None
            self._removed_ids += [id]
            self._removed_ids.sort()
        logger.info("Removed callback with id %i", id)

    def clean_callbacks(self):
        """
        Removes all saved callbacks
        """
        self._callback.clear()
        self._removed_ids.clear()
        logger.info("Cleared callbacks")
Exemplo n.º 17
0
class Env:
    def __init__(self, cfg):
        self.enabled_joints = cfg.agent.enabled_joints
        self._launch(cfg.env.env_path, cfg.env.headless)
        self._setup_robot()
        self._setup_vision("Vision_sensor")
        self._setup_actions(cfg.agent.n_discrete_actions, cfg.agent.vel_min,
                            cfg.agent.vel_max)
        self._setup_debug_cameras("debug_vis1", "debug_vis2")
        self._setup_target()
        self._setup_distractor()
        self._setup_table()

    def _setup_distractor(self):
        self.distractor = Shape('distractor')
        self.hide_distractor()

    def _setup_table(self):
        self.table = Shape("diningTable")
        self.table_near = False
        self.table_initial_pos = self.table.get_position()

    def _setup_target(self):
        self.target = Shape('target')

    def _setup_robot(self):
        self.robot = Panda()
        self.joint_init = self.robot.get_joint_positions()
        self.robot.set_control_loop_enabled(False)
        self.robot.set_motor_locked_at_zero_velocity(True)
        self.tip = self.robot.get_tip()

    def _setup_vision(self, vis_name):
        self.vision = VisionSensor(vis_name)

    def _launch(self, path, headless):
        self.pr = PyRep()
        self.pr.launch(path, headless=headless)
        self.pr.start()

    def _setup_actions(self, n_discrete_actions, vel_min, vel_max):
        self.poss_actions = np.linspace(vel_min, vel_max, n_discrete_actions)

    def _convert_action(self, action):
        return self.poss_actions[action]

    def _setup_debug_cameras(self, name0, name1):
        self.vis_debug0 = VisionSensor(name0)
        self.vis_debug1 = VisionSensor(name1)

    def step(self, action):
        converted_action = self._convert_action(action)
        action = np.zeros(7)
        action[self.enabled_joints] = converted_action
        self.robot.set_joint_target_velocities(action)
        self.pr.step()
        reward = self._calculate_reward()
        rgb = self.vision.capture_rgb()
        reward = 0
        done = False

        # todo: change to include more meaningful info
        return rgb, reward, done, {}

    def show_distractor(self):
        self.distractor.set_position([0.15, 0., 1.])

    def hide_distractor(self):
        self.distractor.set_position([-1.5, 0., 1.])

    def toggle_table(self):
        if self.table_near:
            self._move_table_far()
        else:
            self._move_table_near()

    def _move_table_near(self):
        print("moving table near")
        pos = self.table_initial_pos
        pos[0] -= 1
        self.table.set_position(pos)

    def _move_table_far(self):
        self.table.set_position(self.table_initial_pos)

    def _calculate_reward(self):
        ax, ay, az = self.tip.get_position()
        tx, ty, tz = self.target.get_position()
        # Reward is negative distance to target
        reward = -np.sqrt((ax - tx)**2 + (ay - ty)**2 + (az - tz)**2)

        return reward

    def reset(self):
        self.robot.set_joint_positions(self.joint_init)
        rgb = self.vision.capture_rgb()

        return rgb

    def get_debug_images(self):
        debug0 = self.vis_debug0.capture_rgb()
        debug1 = self.vis_debug1.capture_rgb()

        return debug0, debug1

    def get_joint_positions(self):
        pos = np.array(self.robot.get_joint_positions())
        pos = pos[self.enabled_joints]

        return pos
Exemplo n.º 18
0
class PioneerEnv(object):
    def __init__(self,
                 escene_name='proximity_sensor.ttt',
                 start=[100, 100],
                 goal=[180, 500],
                 rand_area=[100, 450],
                 path_resolution=5.0,
                 margin=0.,
                 margin_to_goal=0.5,
                 action_max=[.5, 1.],
                 action_min=[0., 0.],
                 _load_path=True,
                 path_name="PathNodes",
                 type_of_planning="PID",
                 type_replay_buffer="random",
                 max_laser_range=1.0,
                 headless=False):

        SCENE_FILE = join(dirname(abspath(__file__)), escene_name)

        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=headless)
        self.pr.start()
        self.agent = Pioneer("Pioneer_p3dx",
                             type_of_planning=type_of_planning,
                             type_replay_buffer=type_replay_buffer)
        self.agent.set_control_loop_enabled(False)
        self.initial_joint_positions = self.agent.get_joint_positions()
        self.initial_position = self.agent.get_position()
        print(f"Agent initial position: {self.initial_position}")

        self.vision_map = VisionSensor("VisionMap")
        self.vision_map.handle_explicitly()
        self.vision_map_handle = self.vision_map.get_handle()

        self.floor = Shape("Floor_respondable")

        self.perspective_angle = self.vision_map.get_perspective_angle  # degrees
        self.resolution = self.vision_map.get_resolution  # list [resx, resy]
        self.margin = margin
        self.margin_to_goal = margin_to_goal
        self.rand_area = rand_area
        self.start = start
        self.goal = goal
        self.type_of_planning = type_of_planning
        self.max_laser_range = max_laser_range
        self.max_angle_orientation = np.pi
        scene_image = self.vision_map.capture_rgb() * 255
        scene_image = np.flipud(scene_image)

        self.rew_weights = [1, 10000, 5000]

        self.start_position = Dummy("Start").get_position()
        self.goal_position = Dummy("Goal").get_position()  # [x, y, z]

        self.local_goal_aux = Dummy("Local_goal_aux")

        self.local_goal_aux_pos_list = [[-3.125, 2.175, 0], [2.9, 3.575, 0],
                                        self.goal_position, self.goal_position]
        self.ind_local_goal_aux = 0

        self.max_distance = get_distance([-7.5, -4.1, 0.], self.goal_position)
        self.distance_to_goal_m1 = get_distance(self.start_position,
                                                self.goal_position)

        self.c_lin_vel = self.c_ang_vel = self.b_lin_vel = self.b_ang_vel = 0.

        self.action_max = action_max
        self.action_min = action_min

        if type_of_planning == 'PID':
            self.planning_info_logger = get_logger("./loggers",
                                                   "Planning_Info.log")
            self.image_path = None
            if exists("./paths/" + path_name + ".npy") and _load_path:
                print("Load Path..")
                self.image_path = np.load("./paths/" + path_name + ".npy",
                                          allow_pickle=True)
            else:
                print("Planning...")
                self.image_path = self.Planning(
                    scene_image,
                    self.start,
                    self.goal,
                    self.rand_area,
                    path_resolution=path_resolution,
                    logger=self.planning_info_logger)

            assert self.image_path is not None, "path should not be a Nonetype"

            self.real_path = self.path_image2real(self.image_path,
                                                  self.start_position)
            # project in coppelia sim
            sim_drawing_points = 0
            point_size = 10  #[pix]
            duplicate_tolerance = 0
            parent_obj_handle = self.floor.get_handle()
            max_iter_count = 999999
            ambient_diffuse = (255, 0, 0)
            blue_ambient_diffuse = (0, 0, 255)
            point_container = sim.simAddDrawingObject(
                sim_drawing_points,
                point_size,
                duplicate_tolerance,
                parent_obj_handle,
                max_iter_count,
                ambient_diffuse=ambient_diffuse)

            local_point_container = sim.simAddDrawingObject(
                sim_drawing_points,
                point_size,
                duplicate_tolerance,
                parent_obj_handle,
                max_iter_count,
                ambient_diffuse=blue_ambient_diffuse)

            # debug
            for point in self.real_path:
                point_data = (point[0], point[1], 0)
                sim.simAddDrawingObjectItem(point_container, point_data)
            # You need to get the real coord in the real world

            assert local_point_container is not None, "point container shouldn't be empty"
            self.agent.load_point_container(local_point_container)
            self.agent.load_path(self.real_path)

    def reset(self):
        self.pr.stop()
        if self.type_of_planning == 'PID':
            self.agent.local_goal_reset()
        self.pr.start()
        self.pr.step()
        sensor_state, distance_to_goal, orientation_to_goal = self._get_state()
        self.distance_to_goal_m1 = distance_to_goal
        self.orientation_to_goal_m1 = orientation_to_goal
        sensor_state_n, distance_to_goal, orientation_to_goal = self.normalize_states(
            sensor_state, distance_to_goal, orientation_to_goal)
        # pos_x, pos_y = self.agent.get_position()[:-1]
        return np.hstack((sensor_state_n, distance_to_goal,
                          orientation_to_goal)), sensor_state

    def step(self, action, pf_f=0):
        self.agent.set_joint_target_velocities(action)
        self.pr.step()  # Step the physics simulation
        scene_image = self.vision_map.capture_rgb() * 255  # numpy -> [w, h, 3]
        sensor_state, distance_to_goal, orientation_to_goal = self._get_state()
        self.b_lin_vel, self.b_ang_vel = self.c_lin_vel, self.c_ang_vel
        self.c_lin_vel, self.c_ang_vel = self.agent.get_velocity(
        )  # 3d coordinates
        self.c_lin_vel = np.linalg.norm(self.c_lin_vel)
        self.c_ang_vel = self.c_ang_vel[-1]  # w/r z
        reward, done = self._get_reward(sensor_state, distance_to_goal,
                                        orientation_to_goal, pf_f)
        sensor_state_n, distance_to_goal, orientation_to_goal = self.normalize_states(
            sensor_state, distance_to_goal, orientation_to_goal)
        # pos_x, pos_y = self.agent.get_position()[:-1]
        state = np.hstack(
            (sensor_state_n, distance_to_goal, orientation_to_goal))
        return state, reward, scene_image, done, sensor_state

    def _get_state(self):
        sensor_state = np.array([
            proxy_sensor.read()
            for proxy_sensor in self.agent.proximity_sensors
        ])  # list of distances. -1 if not detect anything
        goal_transformed = world_to_robot_frame(
            self.agent.get_position(), self.goal_position,
            self.agent.get_orientation()[-1])
        # distance_to_goal = get_distance(goal_transformed[:-1], np.array([0,0])) # robot frame
        distance_to_goal = get_distance(self.agent.get_position()[:-1],
                                        self.goal_position[:-1])
        orientation_to_goal = np.arctan2(goal_transformed[1],
                                         goal_transformed[0])
        return sensor_state, distance_to_goal, orientation_to_goal

    def _get_reward(self,
                    sensor_state,
                    distance_to_goal,
                    orientation_to_goal,
                    pf_f=0):
        done = False

        r_local_goal = self.update_local_goal_aux()

        r_target = -(distance_to_goal - self.distance_to_goal_m1)
        r_vel = self.c_lin_vel / self.action_max[0]
        # r_orientation = - (orientation_to_goal - self.orientation_to_goal_m1)
        reward = self.rew_weights[0] * (r_local_goal + r_vel)  # - pf_f/20)
        # distance_to_goal = get_distance(agent_position[:-1], goal[:-1])
        self.distance_to_goal_m1 = distance_to_goal
        self.orientation_to_goal_m1 = orientation_to_goal
        # collision check
        if self.collision_check(sensor_state, self.margin):
            reward = -1 * self.rew_weights[1]
            done = True
        # goal achievement
        if distance_to_goal < self.margin_to_goal:
            reward = self.rew_weights[2]
            done = True
        return reward, done

    def shutdown(self):
        self.pr.stop()
        self.pr.shutdown()

    def model_update(self, method="DDPG", pretraining_loop=False):

        if method == "DDPG": self.agent.trainer.update()
        elif method == "IL":
            loss = self.agent.trainer.IL_update()
            return loss
        elif method == "CoL":
            loss = self.agent.trainer.CoL_update(pretraining_loop)
        else:
            raise NotImplementedError()

    def normalize_states(self, sensor_state, distance_to_goal,
                         orientation_to_goal):
        sensor_state = self.normalize_laser(sensor_state, self.norm_func)
        distance_to_goal = self.norm_func(distance_to_goal, self.max_distance)
        orientation_to_goal = self.norm_func(orientation_to_goal,
                                             self.max_angle_orientation)
        return sensor_state, distance_to_goal, orientation_to_goal

    def normalize_laser(self, obs, norm_func):
        return np.array([
            norm_func(o, self.max_laser_range) if o >= 0 else -1 for o in obs
        ])

    def norm_func(self, x, max_value):
        return 2 * (1 - min(x, max_value) / max_value) - 1

    def set_margin(self, margin):
        self.margin = margin

    # def set_start_goal_position(self, start_pos, goal_pos):

    def update_local_goal_aux(self):
        pos = self.agent.get_position()[:-1]
        distance = get_distance(pos, self.local_goal_aux.get_position()[:-1])
        if distance < 0.5:
            self.local_goal_aux.set_position(
                self.local_goal_aux_pos_list[self.ind_local_goal_aux])
            self.ind_local_goal_aux += 1
        return -1 * distance**2

    @staticmethod
    def collision_check(observations, margin):
        if observations.sum() == -1 * observations.shape[0]:
            return False
        elif observations[observations >= 0].min() < margin:
            return True
        else:
            return False

    @staticmethod
    def Planning(Map,
                 start,
                 goal,
                 rand_area,
                 path_resolution=5.0,
                 logger=None):
        """
        :parameter Map(ndarray): Image that planning over with
        """
        Map = Image.fromarray(Map.astype(np.uint8)).convert('L')
        path, n_paths = main(Map,
                             start,
                             goal,
                             rand_area,
                             path_resolution=path_resolution,
                             logger=logger,
                             show_animation=False)

        if path is not None:
            np.save("./paths/PathNodes_" + str(n_paths) + ".npy", path)
            return path
        else:
            logger.info("Not found Path")
            return None

    @staticmethod
    def path_image2real(image_path, start):
        """
        image_path: np.array[pix] points of the image path
        start_array: np.array
        """
        scale = 13.0 / 512  # [m/pix]

        x_init = start[0]
        y_init = start[1]
        deltas = [(image_path[i + 1][0] - image_path[i][0],
                   image_path[i + 1][1] - image_path[i][1])
                  for i in range(image_path.shape[0] - 1)]

        path = np.zeros((image_path.shape[0], 3))
        path[0, :] = np.array([x_init, y_init, 0])
        for i in range(1, image_path.shape[0]):
            path[i, :] = np.array([
                path[i - 1, 0] + deltas[i - 1][0] * scale,
                path[i - 1, 1] - deltas[i - 1][1] * scale, 0
            ])

        rot_mat = np.diagflat([-1, -1, 1])
        tras_mat = np.zeros_like(path)
        tras_mat[:, 0] = np.ones_like(path.shape[0]) * 0.3
        tras_mat[:, 1] = np.ones_like(path.shape[0]) * 4.65
        path = path @ rot_mat + tras_mat
        return np.flip(path, axis=0)
Exemplo n.º 19
0
class Camera():
    def __init__(self,
                 name,
                 resolution,
                 perspective_angle,
                 min_depth,
                 max_depth,
                 near_clipping=0.2,
                 far_clipping=3):
        self.base = Dummy(name)
        self.cam_rgb = VisionSensor(name + "_rgb")
        self.cam_depth = VisionSensor(name + "_depth")
        self.cam_mask = VisionSensor(name + "_mask")
        self.cam_all = [self.cam_rgb, self.cam_depth, self.cam_mask]
        self.min_depth = min_depth
        self.max_depth = max_depth
        self.set_resolution(resolution)
        self.set_perspective_angle(perspective_angle)
        self.set_near_clipping_plane(near_clipping)
        self.set_far_clipping_plane(far_clipping)

    def get_resolution(self):
        return self._resolution

    def set_resolution(self, resolution):
        for cam in self.cam_all:
            cam.set_resolution(resolution)
        self._resolution = resolution

    def get_perspective_angle(self):
        return self._perspective_angle

    def set_perspective_angle(self, angle: float):
        for cam in self.cam_all:
            cam.set_perspective_angle(angle)
        self._perspective_angle = angle

    def set_near_clipping_plane(self, near_clipping):
        self.cam_rgb.set_near_clipping_plane(near_clipping)
        self.cam_mask.set_near_clipping_plane(near_clipping)
        self.cam_depth.set_near_clipping_plane(self.min_depth)

    def set_far_clipping_plane(self, far_clipping):
        self.cam_rgb.set_far_clipping_plane(far_clipping)
        self.cam_mask.set_far_clipping_plane(far_clipping)
        self.cam_depth.set_far_clipping_plane(self.max_depth)

    def capture_rgb(self):
        return np.uint8(255 * self.cam_rgb.capture_rgb())[:, :, ::-1]

    def capture_depth(self):
        return np.uint8(255 * self.cam_depth.capture_depth())

    def capture_mask(self):
        mask = np.uint8(255 * self.cam_mask.capture_rgb())
        mask = mask[:, :, 1]
        mask[mask != 0] = 1
        return mask

    def set_pose(self, pose, relative_to=None):
        pos = pose[:3]
        qx, qy, qz, qw = pose[3:]
        quat_ori = Quaternion([qw, qx, qy, qz])
        quat_rotate_z = Quaternion(axis=[0, 0, 1], angle=np.radians(180))
        quat = quat_ori * quat_rotate_z
        qw, qx, qy, qz = quat.elements
        quat = [qx, qy, qz, qw]
        # quat = pose[3:]
        self.base.set_pose([*pos] + [*quat], relative_to=relative_to)

    def get_image(self):
        rgb = self.capture_rgb()
        depth = self.capture_depth()
        mask = self.capture_mask()
        return rgb, depth, mask
Exemplo n.º 20
0
class SphericalVisionSensor(Object):
    """An object able capture 360 degree omni-directional images.
    """
    def __init__(self, name):
        super().__init__(name)

        # final omni-directional sensors
        self._sensor_depth = VisionSensor('%s_sensorDepth' % (self.get_name()))
        self._sensor_rgb = VisionSensor('%s_sensorRGB' % (self.get_name()))

        # directed sub-sensors
        names = ['front', 'top', 'back', 'bottom', 'left', 'right']
        self._six_sensors = [
            VisionSensor('%s_%s' % (self.get_name(), n)) for n in names
        ]
        self._front = self._six_sensors[0]

        # directed sub-sensor handles list
        self._six_sensor_handles = [
            vs.get_handle() for vs in self._six_sensors
        ]

        # set all to explicit handling
        self._set_all_to_explicit_handling()

        # assert sensor properties are matching
        self._assert_matching_resolutions()
        self._assert_matching_render_modes()
        self._assert_matching_entities_to_render()
        self._assert_matching_window_sizes()
        self._assert_matching_near_clipping_planes()
        self._assert_matching_far_clipping_planes()

        # near and far clipping plane
        self._near_clipping_plane = self.get_near_clipping_plane()
        self._far_clipping_plane = self.get_far_clipping_plane()
        self._clipping_plane_diff = self._far_clipping_plane - self._near_clipping_plane

    # Private #
    # --------#

    def _set_all_to_explicit_handling(self):
        self._sensor_depth.set_explicit_handling(1)
        self._sensor_rgb.set_explicit_handling(1)
        for sensor in self._six_sensors:
            sensor.set_explicit_handling(1)

    def _assert_matching_resolutions(self):
        assert self._sensor_depth.get_resolution(
        ) == self._sensor_rgb.get_resolution()
        front_sensor_res = self._front.get_resolution()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_resolution() == front_sensor_res

    def _assert_matching_render_modes(self):
        assert self._sensor_depth.get_render_mode(
        ) == self._sensor_rgb.get_render_mode()
        front_sensor_render_mode = self._front.get_render_mode()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_render_mode() == front_sensor_render_mode

    def _assert_matching_entities_to_render(self):
        assert self._sensor_depth.get_entity_to_render(
        ) == self._sensor_rgb.get_entity_to_render()
        front_sensor_entity_to_render = self._front.get_entity_to_render()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_entity_to_render(
            ) == front_sensor_entity_to_render

    def _assert_matching_window_sizes(self):
        assert self._sensor_depth.get_windowed_size(
        ) == self._sensor_rgb.get_windowed_size()

    def _assert_matching_near_clipping_planes(self):
        front_sensor_near = self._front.get_near_clipping_plane()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_near_clipping_plane() == front_sensor_near

    def _assert_matching_far_clipping_planes(self):
        front_sensor_far = self._front.get_far_clipping_plane()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_far_clipping_plane() == front_sensor_far

    def _get_requested_type(self) -> ObjectType:
        return ObjectType(sim.simGetObjectType(self.get_handle()))

    # Public #
    # -------#

    def handle_explicitly(self) -> None:
        """Handle spherical vision sensor explicitly.

          This enables capturing image (e.g., capture_rgb())
          without PyRep.step().
        """
        utils.script_call(
            'handleSpherical@PyRep', PYREP_SCRIPT_TYPE,
            [self._sensor_depth._handle, self._sensor_rgb._handle] +
            self._six_sensor_handles, [], [], [])

    def capture_rgb(self) -> np.ndarray:
        """Retrieves the rgb-image of a spherical vision sensor.

        :return: A numpy array of size (width, height, 3)
        """
        return self._sensor_rgb.capture_rgb()

    def capture_depth(self, in_meters=False) -> np.ndarray:
        """Retrieves the depth-image of a spherical vision sensor.

        :param in_meters: Whether the depth should be returned in meters.
        :return: A numpy array of size (width, height)
        """
        if in_meters:
            return self._sensor_depth.capture_rgb(
            )[:, :, 0] * self._clipping_plane_diff + self._near_clipping_plane
        return self._sensor_depth.capture_rgb()[:, :, 0]

    def get_resolution(self) -> List[int]:
        """ Return the spherical vision sensor's resolution.

        :return: Resolution [x, y]
        """
        return self._sensor_depth.get_resolution()

    def set_resolution(self, resolution: List[int]) -> None:
        """ Set the spherical vision sensor's resolution.

        :param resolution: New resolution [x, y]
        """
        if resolution[0] != 2 * resolution[1]:
            raise Exception(
                'Spherical vision sensors must have an X resolution 2x the Y resolution'
            )
        if resolution[1] % 2 != 0:
            raise Exception(
                'Spherical vision sensors must have an even Y resolution')
        box_sensor_resolution = [
            int(resolution[1] / 2),
            int(resolution[1] / 2)
        ]
        for sensor in self._six_sensors:
            sensor.set_resolution(box_sensor_resolution)
        self._sensor_depth.set_resolution(resolution)
        self._sensor_rgb.set_resolution(resolution)

    def get_render_mode(self) -> RenderMode:
        """ Retrieves the spherical vision sensor's rendering mode

        :return: RenderMode for the current rendering mode.
        """
        return self._sensor_depth.get_render_mode()

    def set_render_mode(self, render_mode: RenderMode) -> None:
        """ Set the spherical vision sensor's rendering mode

        :param render_mode: The new sensor rendering mode, one of:
            RenderMode.OPENGL
            RenderMode.OPENGL_AUXILIARY
            RenderMode.OPENGL_COLOR_CODED
            RenderMode.POV_RAY
            RenderMode.EXTERNAL
            RenderMode.EXTERNAL_WINDOWED
            RenderMode.OPENGL3
            RenderMode.OPENGL3_WINDOWED
        """
        self._sensor_depth.set_render_mode(render_mode)
        self._sensor_rgb.set_render_mode(render_mode)
        for sensor in self._six_sensors:
            sensor.set_render_mode(render_mode)

    def get_windowed_size(self) -> Sequence[int]:
        """Get the size of windowed rendering for the omni-directional image.

        :return: The (x, y) resolution of the window. 0 for full-screen.
        """
        return self._sensor_depth.get_windowed_size()

    def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None:
        """Set the size of windowed rendering for the omni-directional image.

        :param resolution: The (x, y) resolution of the window.
            0 for full-screen.
        """
        self._sensor_depth.set_windowed_size(resolution)
        self._sensor_rgb.set_windowed_size(resolution)

    def get_near_clipping_plane(self) -> float:
        """ Get the spherical depth sensor's near clipping plane.

        :return: Near clipping plane (metres)
        """
        return self._front.get_near_clipping_plane()

    def set_near_clipping_plane(self, near_clipping: float) -> None:
        """ Set the spherical depth sensor's near clipping plane.

        :param near_clipping: New near clipping plane (in metres)
        """
        self._near_clipping_plane = near_clipping
        self._clipping_plane_diff = self._far_clipping_plane - near_clipping
        for sensor in self._six_sensors:
            sensor.set_near_clipping_plane(near_clipping)

    def get_far_clipping_plane(self) -> float:
        """ Get the Sensor's far clipping plane.

        :return: Near clipping plane (metres)
        """
        return self._front.get_far_clipping_plane()

    def set_far_clipping_plane(self, far_clipping: float) -> None:
        """ Set the spherical depth sensor's far clipping plane.

        :param far_clipping: New far clipping plane (in metres)
        """
        self._far_clipping_plane = far_clipping
        self._clipping_plane_diff = far_clipping - self._near_clipping_plane
        for sensor in self._six_sensors:
            sensor.set_far_clipping_plane(far_clipping)

    def set_entity_to_render(self, entity_to_render: int) -> None:
        """ Set the entity to render to the spherical vision sensor,
        this can be an object or more usefully a collection. -1 to render all objects in scene.

        :param entity_to_render: Handle of the entity to render
        """
        self._sensor_depth.set_entity_to_render(entity_to_render)
        self._sensor_rgb.set_entity_to_render(entity_to_render)

    def get_entity_to_render(self) -> None:
        """ Get the entity to render to the spherical vision sensor,
        this can be an object or more usefully a collection. -1 if all objects in scene are rendered.

        :return: Handle of the entity to render
        """
        return self._sensor_depth.get_entity_to_render()
Exemplo n.º 21
0
class TestVisionSensors(TestCore):
    def setUp(self):
        super().setUp()
        [self.pyrep.step() for _ in range(10)]
        self.cam = VisionSensor('cam0')

    def test_handle_explicitly(self):
        cam = VisionSensor.create((640, 480))

        # blank image
        rgb = cam.capture_rgb()
        self.assertEqual(rgb.sum(), 0)

        # non-blank image
        cam.set_explicit_handling(value=1)
        cam.handle_explicitly()
        rgb = cam.capture_rgb()
        self.assertNotEqual(rgb.sum(), 0)

        cam.remove()

    def test_capture_rgb(self):
        img = self.cam.capture_rgb()
        self.assertEqual(img.shape, (16, 16, 3))
        #  Check that it's not a blank image
        self.assertFalse(img.min() == img.max() == 0.0)

    def test_capture_depth(self):
        img = self.cam.capture_depth()
        self.assertEqual(img.shape, (16, 16))
        # Check that it's not a blank depth map
        self.assertFalse(img.min() == img.max() == 1.0)

    def test_create(self):
        cam = VisionSensor.create([640, 480],
                                  perspective_mode=True,
                                  view_angle=35.0,
                                  near_clipping_plane=0.25,
                                  far_clipping_plane=5.0,
                                  render_mode=RenderMode.OPENGL3)
        self.assertEqual(cam.capture_rgb().shape, (480, 640, 3))
        self.assertEqual(cam.get_perspective_mode(),
                         PerspectiveMode.PERSPECTIVE)
        self.assertAlmostEqual(cam.get_perspective_angle(), 35.0, 3)
        self.assertEqual(cam.get_near_clipping_plane(), 0.25)
        self.assertEqual(cam.get_far_clipping_plane(), 5.0)
        self.assertEqual(cam.get_render_mode(), RenderMode.OPENGL3)

    def test_get_set_resolution(self):
        self.cam.set_resolution([320, 240])
        self.assertEqual(self.cam.get_resolution(), [320, 240])
        self.assertEqual(self.cam.capture_rgb().shape, (240, 320, 3))

    def test_get_set_perspective_mode(self):
        for perspective_mode in PerspectiveMode:
            self.cam.set_perspective_mode(perspective_mode)
            self.assertEqual(self.cam.get_perspective_mode(), perspective_mode)

    def test_get_set_render_mode(self):
        for render_mode in [RenderMode.OPENGL, RenderMode.OPENGL3]:
            self.cam.set_render_mode(render_mode)
            self.assertEqual(self.cam.get_render_mode(), render_mode)

    def test_get_set_perspective_angle(self):
        self.cam.set_perspective_angle(45.0)
        self.assertAlmostEqual(self.cam.get_perspective_angle(), 45.0, 3)

    def test_get_set_orthographic_size(self):
        self.cam.set_orthographic_size(1.0)
        self.assertEqual(self.cam.get_orthographic_size(), 1.0)

    def test_get_set_near_clipping_plane(self):
        self.cam.set_near_clipping_plane(0.5)
        self.assertEqual(self.cam.get_near_clipping_plane(), 0.5)

    def test_get_set_far_clipping_plane(self):
        self.cam.set_far_clipping_plane(0.5)
        self.assertEqual(self.cam.get_far_clipping_plane(), 0.5)

    def test_get_set_windowed_size(self):
        self.cam.set_windowed_size((640, 480))
        self.assertEqual(self.cam.get_windowed_size(), (640, 480))

    def test_get_set_entity_to_render(self):
        self.cam.set_entity_to_render(-1)
        self.assertEqual(self.cam.get_entity_to_render(), -1)
Exemplo n.º 22
0
class EnvPollos(Env):
    def __init__(self, ep_length=100):
        """
        Pollos environment for testing purposes
        :param dim: (int) the size of the dimensions you want to learn
        :param ep_length: (int) the length of each episodes in timesteps
        """
        logging.basicConfig(level=logging.DEBUG)

        #
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        self.agent = UR10()
        self.agent.max_velocity = 0.8
        self.agent.set_control_loop_enabled(True)
        #self.agent.set_motor_locked_at_zero_velocity(True)

        self.MAX_INC = 0.2
        self.joints = [
            Joint('UR10_joint1'),
            Joint('UR10_joint2'),
            Joint('UR10_joint3'),
            Joint('UR10_joint4'),
            Joint('UR10_joint5'),
            Joint('UR10_joint6')
        ]
        #self.joints_limits = [[j.get_joint_interval()[1][0],j.get_joint_interval()[1][0]+j.get_joint_interval()[1][1]]
        #                      for j in self.joints]
        self.high_joints_limits = [0.1, 1.7, 2.7, 0.0, 0.02, 0.3]
        self.low_joints_limits = [-0.1, -0.2, 0.0, -1.5, -0.02, -0.5]
        self.initial_joint_positions = self.agent.get_joint_positions()

        self.agent_hand = Shape('hand')
        self.initial_agent_tip_position = self.agent.get_tip().get_position()
        self.initial_agent_tip_quaternion = self.agent.get_tip(
        ).get_quaternion()

        self.target = Dummy('UR10_target')

        self.pollo_target = Dummy('pollo_target')
        self.pollo = Shape('pollo')
        self.initial_pollo_position = self.pollo.get_position()
        self.initial_pollo_orientation = self.pollo.get_quaternion()

        self.table_target = Dummy('table_target')
        self.table_target = Dummy('table_target')

        # objects to check collisions
        self.scene_objects = [
            Shape('table0'),
            Shape('Plane'),
            Shape('Plane0'),
            Shape('ConcretBlock')
        ]

        self.initial_distance = np.linalg.norm(
            np.array(self.initial_pollo_position) -
            np.array(self.initial_agent_tip_position))

        # camera
        self.camera = VisionSensor('kinect_depth')
        self.camera_matrix_extrinsics = vrep.simGetObjectMatrix(
            self.camera.get_handle(), -1)
        self.np_camera_matrix_extrinsics = np.delete(
            np.linalg.inv(self.vrepToNP(self.camera_matrix_extrinsics)), 3, 0)
        width = 640.0
        height = 480.0
        angle = math.radians(57.0)
        focalx_px = (width / 2.0) / math.tan(angle / 2.0)
        focaly_px = (height / 2.0) / math.tan(angle / 2.0)
        self.np_camera_matrix_intrinsics = np.array([[-focalx_px, 0.0, 320.0],
                                                     [0.0, -focalx_px, 240.0],
                                                     [0.0, 0.0, 1.0]])

        self.reset()

    def reset(self):
        pos = list(
            np.random.uniform([-0.1, -0.1, 0.0], [0.1, 0.1, 0.1]) +
            self.initial_pollo_position)
        self.pollo.set_position(pos)
        self.pollo.set_quaternion(self.initial_pollo_orientation)
        self.agent.set_joint_positions(self.initial_joint_positions)
        self.initial_epoch_time = time.time()
        while True:  # wait for arm to stop
            self.pr.step()  # Step the physics simulation
            a = self.agent.get_joint_velocities()
            if not np.any(np.where(np.fabs(a) < 0.1, False, True)):
                break

        return self._get_state()

    def step(self, action):
        if action is None:
            print(self.total_reward)
            return self._get_state(), -10.0, True, {}

        # check for nan
        if np.any(np.isnan(action)):
            print("NAN values ", action)
            self.NANS_COMING = True
            return self._get_state(), -10.0, True, {}

        # check for strange values
        # if np.any(np.greater(action, self.MAX_INC)) or np.any(np.less(action, -self.MAX_INC)):
        #     print("Strange values ", action)
        #     self.NANS_COMING = True
        #     return self._get_state(), -10.0, True, {}

        # check for NAN in VREP get_position()
        if np.any(np.isnan(self.agent.get_tip().get_position())):
            print("NAN values in get_position()", action,
                  self.agent.get_tip().get_position())
            return self._get_state(), -10.0, True, {}

        self.pr.step()
        return self._get_state(), 0, True, {}

    def close(self):
        self.pr.stop()
        self.pr.shutdown()

    def render(self):
        print("RENDER")
        np_pollo_en_camara = self.getPolloEnCamara()

        # capture depth image
        depth = self.camera.capture_rgb()
        circ = plt.Circle(
            (int(np_pollo_en_camara[0]), int(np_pollo_en_camara[1])), 10)
        plt.clf()
        fig = plt.gcf()
        ax = fig.gca()
        ax.add_artist(circ)
        ax.imshow(depth, cmap='hot')
        plt.pause(0.000001)

    # Aux

    # transform env.pollo_target.get_position() to camera coordinates and project pollo_en_camera a image coordinates
    def getPolloEnCamara(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_target_cam = self.np_camera_matrix_extrinsics.dot(
            np.append([np_pollo_target], 1.0))
        np_pollo_en_camara = self.np_camera_matrix_intrinsics.dot(
            np_pollo_target_cam)
        np_pollo_en_camara = np_pollo_en_camara / np_pollo_en_camara[2]
        np_pollo_en_camara = np.delete(np_pollo_en_camara, 2)
        return np_pollo_en_camara

    def getPolloEnCamaraEx(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_en_camara = self.np_camera_matrix_extrinsics.dot(
            np.append([np_pollo_target], 1.0))
        return np_pollo_en_camara

    def _get_state(self):
        # Return state containing arm joint angles/velocities & target position
        # return (self.agent.get_joint_positions() +
        #         self.agent.get_joint_velocities() +
        #         self.pollo_target.get_position())
        p = self.getPolloEnCamaraEx()
        j = self.agent.get_joint_positions()
        #r = np.array([p[0],p[1],p[2],j[0],j[1],j[2],j[3],j[4],j[5]])
        r = np.array([p[0], p[1], p[2]])
        return r

    def vrepToNP(self, c):
        return np.array([[c[0], c[4], c[8], c[3]], [c[1], c[5], c[9], c[7]],
                         [c[2], c[6], c[10], c[11]], [0, 0, 0, 1]])
Exemplo n.º 23
0
class ReacherEnv(object):
    def __init__(self,
                 headless,
                 control_mode='end_position',
                 visual_control=False):
        '''
        :visual_control: bool, controlled by visual state or not (vector state).
        '''
        self.reward_offset = 10.0
        self.reward_range = self.reward_offset  # reward range for register gym env when using vectorized env wrapper
        self.fall_down_offset = 0.1  # for judging the target object fall off the table
        self.metadata = []  # gym env format
        self.visual_control = visual_control
        self.control_mode = control_mode
        self.pr = PyRep()
        if control_mode == 'end_position':  # need to use different scene, the one with all joints in inverse kinematics mode
            SCENE_FILE = join(dirname(abspath(__file__)),
                              './scenes/sawyer_reacher_rl_new_ik.ttt')
        elif control_mode == 'joint_velocity':  # the scene with all joints in force/torche mode for forward kinematics
            SCENE_FILE = join(dirname(abspath(__file__)),
                              './scenes/sawyer_reacher_rl_new.ttt')
        self.pr.launch(SCENE_FILE, headless=headless)
        self.pr.start()
        self.agent = Sawyer()
        self.gripper = BaxterGripper()
        self.gripper_left_pad = Shape(
            'BaxterGripper_leftPad')  # the pad on the gripper finger
        self.proximity_sensor = ProximitySensor(
            'BaxterGripper_attachProxSensor'
        )  # need the name of the sensor here
        self.vision_sensor = VisionSensor(
            'Vision_sensor')  # need the name of the sensor here
        if control_mode == 'end_position':
            self.agent.set_control_loop_enabled(True)  # if false, won't work
            self.action_space = np.zeros(
                4)  # 3 DOF end position control + 1 rotation of gripper
        elif control_mode == 'joint_velocity':
            self.agent.set_control_loop_enabled(False)
            self.action_space = np.zeros(
                8)  # 7 DOF velocity control + 1 rotation of gripper
        else:
            raise NotImplementedError
        if self.visual_control == False:
            self.observation_space = np.zeros(
                17
            )  # position and velocity of 7 joints + position of the target
        else:
            self.observation_space = np.zeros(100)  # dim of img!
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.target = Shape('target')  # object
        self.tip_target = Dummy(
            'Sawyer_target')  # the target point of the tip to move towards
        # self.table = Shape('diningTable')
        self.agent_ee_tip = self.agent.get_tip()
        self.tip_pos = self.agent_ee_tip.get_position()
        self.tip_quat = self.agent_ee_tip.get_quaternion(
        )  # tip rotation as quaternion, if not control the rotation

        # set a proper initial gesture/tip position
        if control_mode == 'end_position':
            # agent_position=self.agent.get_position()
            # initial_pos_offset = [0.4, 0.3, 0.2]  # initial relative position of gripper to the whole arm
            # initial_pos = [(a + o) for (a, o) in zip(agent_position, initial_pos_offset)]
            initial_pos = [0.3, 0.1, 0.9]
            self.tip_target.set_position(initial_pos)
            # one big step for rotation setting is enough, with reset_dynamics=True, set the rotation instantaneously
            self.tip_target.set_orientation(
                [0, 3.1415, 1.5707],
                reset_dynamics=True)  # make gripper face downwards
            self.pr.step()
        elif control_mode == 'joint_velocity':
            self.initial_joint_positions = [
                0.001815199851989746, -1.4224984645843506, 0.704303503036499,
                2.54307222366333, 2.972468852996826, -0.4989511966705322,
                4.105560302734375
            ]
            self.agent.set_joint_positions(self.initial_joint_positions)

        self.initial_joint_positions = self.agent.get_joint_positions()
        self.initial_tip_positions = self.agent_ee_tip.get_position()
        self.initial_target_positions = self.target.get_position()

    def _get_state(self):
        # Return state containing arm joint angles/velocities & target position
        return np.array(self.agent.get_joint_positions() +
                        self.agent.get_joint_velocities() +
                        self.target.get_position())

    def _is_holding(self):
        '''
         Return is holding the target or not, return bool.
        '''

        # Nothe that the collision check is not always accurate all the time,
        # for continuous conllision, maybe only the first 4-5 frames of collision can be detected
        pad_collide_object = self.gripper_left_pad.check_collision(self.target)
        if pad_collide_object and self.proximity_sensor.is_detected(
                self.target) == True:
            return True
        else:
            return False

    def _get_visual_state(self):
        # Return a numpy array of size (width, height, 3)
        return self.vision_sensor.capture_rgb(
        )  # A numpy array of size (width, height, 3)

    def _is_holding(self):
        # Return is holding the target or not, return bool

        # Nothe that the collision check is not always accurate all the time,
        # for continuous conllision, maybe only the first 4-5 frames of collision can be detected
        pad_collide_object = self.gripper_left_pad.check_collision(self.target)
        if pad_collide_object and self.proximity_sensor.is_detected(
                self.target) == True:
            return True
        else:
            return False

    # def _move(self, action):
    #     '''
    #     Move the tip according to the action with inverse kinematics for 'end_position' control;
    #     with control of tip target in inverse kinematics mode instead of using .solve_ik() in forward kinematics mode.
    #     '''
    #     robot_moving_unit=0.01  # the amount of single step move of robot, not accurate
    #     moving_loop_itr=int(np.sum(np.abs(action[:3]))/robot_moving_unit)+1  # adaptive number of moving steps, with minimal of 1 step.
    #     # print(moving_loop_itr)
    #     small_step = list(1./moving_loop_itr*np.array(action))  # break the action into small steps, as the robot cannot move to the target position within one frame
    #     pos=self.agent_ee_tip.get_position()

    #     '''
    #     there is a mismatch between the object set_orientation() and get_orientation():
    #     the (x,y,z) in set_orientation() will be (y,x,-z) in get_orientation().
    #     '''
    #     ori_z=-self.agent_ee_tip.get_orientation()[2] # the minus is because the mismatch between the set and get
    #     assert len(small_step) == len(pos)+1  # 3 values for position, 1 value for rotation

    #     for _ in range(moving_loop_itr):
    #         for idx in range(len(pos)):
    #             pos[idx] += small_step[idx]
    #         self.tip_target.set_position(pos)
    #         self.pr.step()
    #         ''' deprecated! no need to use small steps for the rotation with reset_dynamics=True'''
    #         # ori_z+=small_step[3]  # change the orientation along z-axis with a small step
    #         # self.tip_target.set_orientation([0,3.1415,ori_z], reset_dynamics=True)  # make gripper face downwards
    #         # self.pr.step()
    #     ''' one big step for z-rotation is enough, with reset_dynamics=True, set the rotation instantaneously '''
    #     ori_z+=action[3]
    #     self.tip_target.set_orientation([0,3.1415,ori_z], reset_dynamics=True)  # make gripper face downwards
    #     self.pr.step()

    def _move(self, action):
        ''' 
        Move the tip according to the action with inverse kinematics for 'end_position' control;
        with control of tip target in inverse kinematics mode instead of using .solve_ik() in forward kinematics mode.
        Mode 2: a close-loop control, using ik.
        '''
        pos = self.gripper.get_position()
        bounding_offset = 0.1
        step_factor = 0.2  # small step factor mulitplied on the gradient step calculated by inverse kinematics
        max_itr = 20  # maximum moving iterations
        max_error = 0.1  # upper bound of distance error for movement at each call
        rotation_norm = 5.  # factor for normalization of rotation values
        # check if state+action will be within of the bounding box, if so, move normally; else no action.
        #  x_min < x < x_max  and  y_min < y < y_max  and  z > z_min
        if pos[0]+action[0]>POS_MIN[0]-bounding_offset and pos[0]+action[0]<POS_MAX[0]+bounding_offset  \
            and pos[1]+action[1] > POS_MIN[1]-bounding_offset and pos[1]+action[1] < POS_MAX[1]+bounding_offset  \
            and pos[2]+action[2] > POS_MIN[2]-bounding_offset:
            ''' 
            there is a mismatch between the object set_orientation() and get_orientation():
            the (x,y,z) in set_orientation() will be (y,x,-z) in get_orientation().
            '''
            ori_z = -self.agent_ee_tip.get_orientation()[
                2]  # the minus is because the mismatch between the set and get
            target_pos = np.array(self.agent_ee_tip.get_position()) + np.array(
                action[:3])
            diff = 1
            itr = 0
            # print('before: ', ori_z)
            while np.sum(np.abs(diff)) > max_error and itr < max_itr:
                itr += 1
                # set pos in small step
                cur_pos = self.agent_ee_tip.get_position()
                diff = target_pos - cur_pos
                pos = cur_pos + step_factor * diff
                self.tip_target.set_position(pos.tolist())
                self.pr.step()
            ''' one big step for z-rotation is enough '''
            ori_z += rotation_norm * action[3]
            self.tip_target.set_orientation([0, np.pi, ori_z
                                             ])  # make gripper face downwards
            self.pr.step()

        else:
            # print("Potential Movement Out of the Bounding Box!")
            pass  # no action if potentially out of the bounding box

    def reset(self):
        # Get a random position within a cuboid and set the target position
        max_itr = 10
        pos = list(np.random.uniform(POS_MIN, POS_MAX))
        self.target.set_position(pos)
        self.target.set_orientation([0, 0, 0])
        self.pr.step()
        # changing the color or texture for domain randomization
        self.target.set_color(
            np.random.uniform(low=0, high=1, size=3).tolist()
        )  # set [r,g,b] 3 channel values of object color
        # set end position to be initialized
        if self.control_mode == 'end_position':  # JointMode.IK
            self.agent.set_control_loop_enabled(True)
            self.tip_target.set_position(
                self.initial_tip_positions
            )  # cannot set joint positions directly due to in ik mode nor force/torch mode
            self.pr.step()
            # prevent stuck case
            itr = 0
            while np.sum(
                    np.abs(
                        np.array(self.agent_ee_tip.get_position() -
                                 np.array(self.initial_tip_positions)))
            ) > 0.1 and itr < max_itr:
                itr += 1
                self.step(np.random.uniform(
                    -0.2, 0.2,
                    4))  # take random actions for preventing the stuck cases
                self.pr.step()
        elif self.control_mode == 'joint_velocity':  # JointMode.FORCE
            self.agent.set_joint_positions(
                self.initial_joint_positions
            )  # sometimes the gripper is stuck, cannot get back to initial
            self.pr.step()

        # self.table.set_collidable(True)
        self.gripper_left_pad.set_collidable(
            True
        )  # set the pad on the gripper to be collidable, so as to check collision
        self.target.set_collidable(True)
        if np.sum(self.gripper.get_open_amount()) < 1.5:
            self.gripper.actuate(1, velocity=0.5)  # open the gripper
            self.pr.step()
        if self.visual_control:
            return self._get_visual_state()
        else:
            return self._get_state()

    def step(self, action):
        '''
        Move the robot arm according to the action.
        If control_mode=='joint_velocity', action is 7 dim of joint velocity values + 1 dim of gripper rotation.
        if control_mode=='end_position', action is 3 dim of tip (end of robot arm) position values + 1 dim of gripper rotation.
        '''
        if self.control_mode == 'end_position':
            if action is None or action.shape[0] != 4:
                action = list(np.random.uniform(-0.1, 0.1, 4))  # random
            self._move(action)
        elif self.control_mode == 'joint_velocity':
            if action is None or action.shape[0] != 7:
                print('No actions or wrong action dimensions!')
                action = list(np.random.uniform(-0.1, 0.1, 7))  # random
            self.agent.set_joint_target_velocities(
                action)  # Execute action on arm
            self.pr.step()
        else:
            raise NotImplementedError

        ax, ay, az = self.gripper.get_position()
        tx, ty, tz = self.target.get_position()
        # Reward is negative distance to target
        distance = (ax - tx)**2 + (ay - ty)**2 + (az - tz)**2
        done = False

        # print(self.proximity_sensor.is_detected(self.target))
        current_vision = self.vision_sensor.capture_rgb(
        )  # capture a screenshot of the view with vision sensor
        plt.imshow(current_vision)
        plt.savefig('./img/vision.png')

        reward = 0
        # close the gripper if close enough to the object and the object is detected with the proximity sensor
        if distance < 0.1 and self.proximity_sensor.is_detected(
                self.target) == True:
            # make sure the gripper is open before grasping
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()

            self.gripper.actuate(
                0, velocity=0.5
            )  # if done, close the hand, 0 for close and 1 for open.
            self.pr.step()  # Step the physics simulation

            if self._is_holding():
                # reward for hold here!
                reward += 10
                done = True

            else:
                self.gripper.actuate(1, velocity=0.5)
                self.pr.step()

        elif np.sum(
                self.gripper.get_open_amount()
        ) < 1.5:  # if gripper is closed due to collision or esle, open it; .get_open_amount() return list of gripper joint values
            self.gripper.actuate(1, velocity=0.5)
            self.pr.step()

        else:
            pass

        reward -= np.sqrt(distance)

        if tz < self.initial_target_positions[
                2] - self.fall_down_offset:  # the object fall off the table
            done = True
            reward = -self.reward_offset

        if self.visual_control:
            return self._get_visual_state(), reward, done, {}
        else:
            return self._get_state(), reward, done, {}

    def shutdown(self):
        self.pr.stop()
        self.pr.shutdown()
Exemplo n.º 24
0
def simulate(scene_dir, cls_indices):
    # read 3d point cloud vertices npy (for visualization)
    vertex_npy = np.load("mesh_data/custom_vertex.npy")

    # loop over all scene files in scenes directory
    for scene_path in os.listdir(scene_dir):
        # check whether it's a scene file or not
        if not scene_path[-3:] == 'ttt':
            continue

        # create an output directory for each scene
        scene_out_dir = os.path.join('./sim_data/', scene_path[:-4])
        if not os.path.isdir(scene_out_dir):
            os.mkdir(scene_out_dir)

        # launch scene file
        SCENE_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.join(scene_dir, scene_path))
        pr = PyRep()
        pr.launch(SCENE_FILE, headless=True)
        pr.start()

        pr.step()

        # define vision sensor
        camera = VisionSensor('cam')
        # set camera absolute pose to world coordinates
        camera.set_pose([0,0,0,0,0,0,1])
        pr.step()

        # define scene shapes
        shapes = []
        shapes.append(Shape('Shape1'))
        shapes.append(Shape('Shape2'))
        shapes.append(Shape('Shape3'))
        shapes.append(Shape('Shape4'))
        pr.step()

        print("Getting vision sensor intrinsics ...")
        # get vision sensor parameters
        cam_res = camera.get_resolution()
        cam_per_angle = camera.get_perspective_angle()
        ratio = cam_res[0]/cam_res[1]
        cam_angle_x = 0.0
        cam_angle_y = 0.0
        if (ratio > 1):
            cam_angle_x = cam_per_angle
            cam_angle_y = 2 * degrees(atan(tan(radians(cam_per_angle / 2)) / ratio))
        else:
            cam_angle_x = 2 * degrees(atan(tan(radians(cam_per_angle / 2)) / ratio))
            cam_angle_y = cam_per_angle
        # get vision sensor intrinsic matrix
        cam_focal_x = float(cam_res[0] / 2.0) / tan(radians(cam_angle_x / 2.0))
        cam_focal_y = float(cam_res[1] / 2.0) / tan(radians(cam_angle_y / 2.0))
        intrinsics = np.array([[cam_focal_x, 0.00000000e+00, float(cam_res[0]/2.0)],
                                [0.00000000e+00, cam_focal_y, float(cam_res[1]/2.0)],
                                [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

        # loop to get 5000 samples per scene
        for i in range(5000):
            print("Randomizing objects' poses ...")
            # set random pose to objects in the scene
            obj_colors = []
            for shape in shapes:
                shape.set_pose([
                        random.uniform(-0.25,0.25), random.uniform(-0.25,0.25), random.uniform(0.25,2),
                        random.uniform(-1,1), random.uniform(-1,1), random.uniform(-1,1),
                        random.uniform(-1,1)
                    ])
                obj_colors.append([random.uniform(0,1), random.uniform(0,1), random.uniform(0,1)])
                pr.step()

            print("Reading vision sensor RGB signal ...")
            # read vision sensor RGB image
            img = camera.capture_rgb()
            img = np.uint8(img * 255.0)

            print("Reading vision sensor depth signal ...")
            # read vision sensor depth image
            depth = camera.capture_depth()
            depth = np.uint8(depth * 255.0)

            print("Generating front mask for RGB signal ...")
            # generate RGB front mask
            front_mask = np.sum(img, axis=2)
            front_mask[front_mask != 0] = 255
            front_mask = Image.fromarray(np.uint8(front_mask))
            alpha_img = Image.fromarray(img)
            alpha_img.putalpha(front_mask)

            print("Saving sensor output ...")
            # save sensor output
            alpha_img.save(scene_out_dir+f'/{str(i).zfill(6)}-color.png')
            imsave(scene_out_dir+f'/{str(i).zfill(6)}-depth.png', depth)
            
            print("Getting objects' poses ...")
            # get poses for all objects in scene
            poses = []
            for shape in shapes:
                poses.append(get_object_pose(shape, camera))
            pose_mat = np.stack(poses, axis=2)
            # save pose visualization on RGB image
            img_viz = visualize_predictions(poses, cls_indices[scene_path], img, vertex_npy, intrinsics)
            imsave(scene_out_dir+f'/{str(i).zfill(6)}-viz.png', img_viz)

            print("Saving meta-data ...")
            # save meta-data .mat
            meta_dict = {
                'cls_indexes'      : np.array(cls_indices[scene_path]),
                'intrinsic_matrix' : intrinsics,
                'poses'            : pose_mat
            }
            savemat(scene_out_dir+f'/{str(i).zfill(6)}-meta.mat', meta_dict)

            print("Performing semantic segmentation of RGB signal ...")
            # perform semantic segmentation of RGB image
            seg_img = camera.capture_rgb()
            seg_img = perform_segmentation(seg_img, cls_indices[scene_path], poses, vertex_npy, intrinsics)
            imsave(scene_out_dir+f'/{str(i).zfill(6)}-label.png', seg_img)

        # stop simulation
        pr.stop()
        pr.shutdown()
Exemplo n.º 25
0
 def get_mask(sensor: VisionSensor, mask_fn):
     mask = None
     if sensor is not None:
         sensor.handle_explicitly()
         mask = mask_fn(sensor.capture_rgb())
     return mask
Exemplo n.º 26
0
class robotEnv(object):
    def __init__(self, scene_name, reward_dense, boundary):
        self.pr = PyRep()
        SCENE_FILE = join(dirname(abspath(__file__)), scene_name)
        self.pr.launch(SCENE_FILE, headless=True)
        self.pr.start()
        self.pr.set_simulation_timestep(0.05)

        if scene_name != 'robot_scene.ttt':  #youbot_navig2
            home_dir = os.path.expanduser('~')
            os.chdir(join(home_dir, 'robotics_drl'))
            self.pr.import_model('robot.ttm')

        # Vision sensor handles
        self.camera_top = VisionSensor('Vision_sensor')
        # self.camera_top.set_render_mode(RenderMode.OPENGL3)
        # self.camera_top.set_resolution([256,256])

        self.camera_arm = VisionSensor('Vision_sensor1')
        self.camera_arm.set_render_mode(RenderMode.OPENGL3)
        self.camera_arm.set_resolution([256, 256])

        self.reward_dense = reward_dense
        self.reward_termination = 1 if self.reward_dense else 0
        self.boundary = boundary

        # Robot links
        robot_links = []
        links_size = [3, 5, 5, 1]
        for i in range(len(links_size)):
            robot_links.append(
                [Shape('link%s_%s' % (i, j)) for j in range(links_size[i])])

        links_color = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
        color_i = 0
        for j in robot_links:
            if color_i > 2:
                color_i = 0
            for i in j:
                i.remove_texture()
                i.set_color(links_color[color_i])
            color_i += 1

    def render(self, view='top'):
        if view == 'top':
            img = self.camera_top.capture_rgb() * 256  # (dim,dim,3)
        elif view == 'arm':
            img = self.camera_arm.capture_rgb()
        return img

    def terminate(self):
        self.pr.stop()  # Stop the simulation
        self.pr.shutdown()

    def sample_action(self):
        return [(2 * random.random() - 1) for _ in range(self.action_space)]

    def rand_bound(self):
        x = random.uniform(-self.boundary, self.boundary)
        y = random.uniform(-self.boundary, self.boundary)
        orientation = random.random() * 2 * pi
        return x, y, orientation

    def action_type(self):
        return 'continuous'

    def observation_space(self):
        _, obs = self.get_observation()
        return obs.shape

    def step_limit(self):
        return 250