Пример #1
0
 def _set_rgb_props(rgb_cam: VisionSensor, rgb: bool, depth: bool,
                    conf: CameraConfig):
     if not (rgb or depth):
         rgb_cam.remove()
     else:
         rgb_cam.set_resolution(conf.image_size)
         rgb_cam.set_render_mode(conf.render_mode)
Пример #2
0
class RattlerCamera(object):
    def __init__(self):
        self._head_camera = VisionSensor('rattler_eye')
        self._head_camera_mask = None
        self._head_camera_state = 1

    def get_camera_state(self) -> int:
        return self._head_camera_state

    def set_camera_state(self, state: int) -> None:
        self._head_camera_state = state

    def set_camera_properties(self, conf: CameraConfig) -> None:
        if not (conf.rgb or conf.depth):
            self._head_camera.remove()
        else:
            self._head_camera.set_resolution(conf.image_size)
            self._head_camera.set_render_mode(conf.render_mode)

        if conf.mask:
            self._head_camera_mask = self._head_camera.copy()
            self._head_camera_mask.set_render_mode(
                RenderMode.OPENGL_COLOR_CODED)

    def capture_rgb(self) -> np.ndarray:
        return self._head_camera.capture_rgb()

    def capture_depth(self) -> np.ndarray:
        return self._head_camera.capture_depth()

    def mask(self) -> VisionSensor:
        return self._head_camera_mask
Пример #3
0
class SphericalVisionSensor(Object):
    """An object able capture 360 degree omni-directional images.
    """

    def __init__(self, name):
        super().__init__(name)

        # final omni-directional sensors
        self._sensor_depth = VisionSensor('%s_sensorDepth' % (self.get_name()))
        self._sensor_rgb = VisionSensor('%s_sensorRGB' % (self.get_name()))

        # directed sub-sensors
        names = ['front', 'top', 'back', 'bottom', 'left', 'right']
        self._six_sensors = [VisionSensor('%s_%s' % (self.get_name(), n)) for n in names]
        self._front = self._six_sensors[0]

        # directed sub-sensor handles list
        self._six_sensor_handles = [vs.get_handle() for vs in self._six_sensors]

        # set all to explicit handling
        self._set_all_to_explicit_handling()

        # assert sensor properties are matching
        self._assert_matching_resolutions()
        self._assert_matching_render_modes()
        self._assert_matching_entities_to_render()
        self._assert_matching_window_sizes()
        self._assert_matching_near_clipping_planes()
        self._assert_matching_far_clipping_planes()

        # omni resolution
        self._omni_resolution = self._sensor_rgb.get_resolution()

        # near and far clipping plane
        self._near_clipping_plane = self.get_near_clipping_plane()
        self._far_clipping_plane = self.get_far_clipping_plane()
        self._clipping_plane_diff = self._far_clipping_plane - self._near_clipping_plane

        # projective cam region image
        self._planar_to_radial_depth_scalars = self._get_planar_to_radial_depth_scalars()

        # flip config
        self._flip_x = True

    # Private #
    # --------#

    def _set_all_to_explicit_handling(self):
        self._sensor_depth.set_explicit_handling(1)
        self._sensor_rgb.set_explicit_handling(1)
        for sensor in self._six_sensors:
            sensor.set_explicit_handling(1)

    def _assert_matching_resolutions(self):
        assert self._sensor_depth.get_resolution() == self._sensor_rgb.get_resolution()
        front_sensor_res = self._front.get_resolution()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_resolution() == front_sensor_res

    def _assert_matching_render_modes(self):
        assert self._sensor_depth.get_render_mode() == self._sensor_rgb.get_render_mode()
        front_sensor_render_mode = self._front.get_render_mode()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_render_mode() == front_sensor_render_mode

    def _assert_matching_entities_to_render(self):
        assert self._sensor_depth.get_entity_to_render() == self._sensor_rgb.get_entity_to_render()
        front_sensor_entity_to_render = self._front.get_entity_to_render()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_entity_to_render() == front_sensor_entity_to_render

    def _assert_matching_window_sizes(self):
        assert self._sensor_depth.get_windowed_size() == self._sensor_rgb.get_windowed_size()

    def _assert_matching_near_clipping_planes(self):
        front_sensor_near = self._front.get_near_clipping_plane()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_near_clipping_plane() == front_sensor_near

    def _assert_matching_far_clipping_planes(self):
        front_sensor_far = self._front.get_far_clipping_plane()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_far_clipping_plane() == front_sensor_far

    def _get_requested_type(self) -> ObjectType:
        return ObjectType(sim.simGetObjectType(self.get_handle()))

    @staticmethod
    def _create_uniform_pixel_coords_image(image_dims):
        pixel_x_coords = np.reshape(np.tile(np.arange(image_dims[1]), [image_dims[0]]),
                                    (image_dims[0], image_dims[1], 1)).astype(np.float32)
        pixel_y_coords_ = np.reshape(np.tile(np.arange(image_dims[0]), [image_dims[1]]),
                                     (image_dims[1], image_dims[0], 1)).astype(np.float32)
        pixel_y_coords = np.transpose(pixel_y_coords_, (1, 0, 2))
        return np.concatenate((pixel_x_coords, pixel_y_coords), -1)

    def _get_planar_to_radial_depth_scalars(self):

        # reference: https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates

        # polar coord image
        coord_img = self._create_uniform_pixel_coords_image([self._omni_resolution[1], self._omni_resolution[0]])
        pixels_per_rad = self._omni_resolution[1] / np.pi
        polar_angles = coord_img / pixels_per_rad
        phi = polar_angles[..., 0:1]
        theta = polar_angles[..., 1:2]

        # image borders wrt the 6 projective cameras
        phis_rel = ((phi + np.pi/4) % (np.pi/2)) / (np.pi/2)
        lower_borders = np.arccos(1/((phis_rel*2-1)**2 + 2)**0.5)
        upper_borders = np.pi - lower_borders

        # omni image regions from the 6 projective cameras
        xy_region = np.logical_and(theta <= upper_borders, theta >= lower_borders)
        pos_x = np.logical_and(xy_region, np.logical_or(phi <= 45 * np.pi/180, phi >= (45 + 270) * np.pi/180))
        pos_y = np.logical_and(xy_region,
                               np.logical_and(phi >= 45 * np.pi/180, phi <= (45 + 90) * np.pi/180))
        neg_x = np.logical_and(xy_region,
                               np.logical_and(phi >= (45 + 90) * np.pi/180, phi <= (45 + 180) * np.pi/180))
        neg_y = np.logical_and(xy_region,
                               np.logical_and(phi >= (45 + 180) * np.pi/180, phi <= (45 + 270) * np.pi/180))
        pos_z = theta <= lower_borders
        neg_z = theta >= upper_borders

        # trig terms for conversion
        sin_theta = np.sin(theta)
        cos_theta = np.cos(theta)
        sin_phi = np.sin(phi)
        cos_phi = np.cos(phi)

        # reciprocal terms
        recip_sin_theta_cos_phi = 1/(sin_theta * cos_phi + MIN_DIVISOR)
        recip_sin_theta_sin_phi = 1/(sin_theta * sin_phi + MIN_DIVISOR)
        recip_cos_theta = 1/(cos_theta + MIN_DIVISOR)

        # planar to radial depth scalars
        return np.where(pos_x, recip_sin_theta_cos_phi,
                        np.where(neg_x, -recip_sin_theta_cos_phi,
                                 np.where(pos_y, recip_sin_theta_sin_phi,
                                          np.where(neg_y, -recip_sin_theta_sin_phi,
                                                   np.where(pos_z, recip_cos_theta,
                                                            np.where(neg_z, -recip_cos_theta,
                                                                     np.zeros_like(recip_cos_theta)))))))[..., 0]

    # Public #
    # -------#

    def handle_explicitly(self) -> None:
        """Handle spherical vision sensor explicitly.

          This enables capturing image (e.g., capture_rgb())
          without PyRep.step().
        """
        utils.script_call('handleSpherical@PyRep', PYREP_SCRIPT_TYPE,
                          [self._sensor_depth._handle, self._sensor_rgb._handle] + self._six_sensor_handles, [], [], [])

    def capture_rgb(self) -> np.ndarray:
        """Retrieves the rgb-image of a spherical vision sensor.

        :return: A numpy array of size (width, height, 3)
        """
        rgb = self._sensor_rgb.capture_rgb()
        if self._flip_x:
            return np.flip(rgb, 1)
        return rgb

    def capture_depth(self, in_meters=False) -> np.ndarray:
        """Retrieves the depth-image of a spherical vision sensor.

        :param in_meters: Whether the depth should be returned in meters.
        :return: A numpy array of size (width, height)
        """
        planar_depth = self._sensor_depth.capture_rgb()[:, :, 0]*self._clipping_plane_diff + self._near_clipping_plane
        radial_depth = planar_depth * self._planar_to_radial_depth_scalars
        if in_meters:
            if self._flip_x:
                return np.flip(radial_depth, 1)
            return radial_depth
        scaled_depth = (radial_depth - self._near_clipping_plane)/self._clipping_plane_diff
        if self._flip_x:
            return np.flip(scaled_depth, 1)
        return scaled_depth

    def get_resolution(self) -> List[int]:
        """ Return the spherical vision sensor's resolution.

        :return: Resolution [x, y]
        """
        return self._sensor_depth.get_resolution()

    def set_resolution(self, resolution: List[int]) -> None:
        """ Set the spherical vision sensor's resolution.

        :param resolution: New resolution [x, y]
        """
        if resolution[0] != 2*resolution[1]:
            raise Exception('Spherical vision sensors must have an X resolution 2x the Y resolution')
        if resolution[1] % 2 != 0:
            raise Exception('Spherical vision sensors must have an even Y resolution')
        box_sensor_resolution = [int(resolution[1]/2), int(resolution[1]/2)]
        for sensor in self._six_sensors:
            sensor.set_resolution(box_sensor_resolution)
        self._sensor_depth.set_resolution(resolution)
        self._sensor_rgb.set_resolution(resolution)
        self._omni_resolution = resolution
        self._planar_to_radial_depth_scalars = self._get_planar_to_radial_depth_scalars()

    def get_render_mode(self) -> RenderMode:
        """ Retrieves the spherical vision sensor's rendering mode

        :return: RenderMode for the current rendering mode.
        """
        return self._sensor_depth.get_render_mode()

    def set_render_mode(self, render_mode: RenderMode) -> None:
        """ Set the spherical vision sensor's rendering mode

        :param render_mode: The new sensor rendering mode, one of:
            RenderMode.OPENGL
            RenderMode.OPENGL_AUXILIARY
            RenderMode.OPENGL_COLOR_CODED
            RenderMode.POV_RAY
            RenderMode.EXTERNAL
            RenderMode.EXTERNAL_WINDOWED
            RenderMode.OPENGL3
            RenderMode.OPENGL3_WINDOWED
        """
        self._sensor_depth.set_render_mode(render_mode)
        self._sensor_rgb.set_render_mode(render_mode)
        for sensor in self._six_sensors:
            sensor.set_render_mode(render_mode)

    def get_windowed_size(self) -> Sequence[int]:
        """Get the size of windowed rendering for the omni-directional image.

        :return: The (x, y) resolution of the window. 0 for full-screen.
        """
        return self._sensor_depth.get_windowed_size()

    def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None:
        """Set the size of windowed rendering for the omni-directional image.

        :param resolution: The (x, y) resolution of the window.
            0 for full-screen.
        """
        self._sensor_depth.set_windowed_size(resolution)
        self._sensor_rgb.set_windowed_size(resolution)

    def get_near_clipping_plane(self) -> float:
        """ Get the spherical depth sensor's near clipping plane.

        :return: Near clipping plane (metres)
        """
        return self._front.get_near_clipping_plane()

    def set_near_clipping_plane(self, near_clipping: float) -> None:
        """ Set the spherical depth sensor's near clipping plane.

        :param near_clipping: New near clipping plane (in metres)
        """
        self._near_clipping_plane = near_clipping
        self._clipping_plane_diff = self._far_clipping_plane - near_clipping
        for sensor in self._six_sensors:
            sensor.set_near_clipping_plane(near_clipping)

    def get_far_clipping_plane(self) -> float:
        """ Get the Sensor's far clipping plane.

        :return: Near clipping plane (metres)
        """
        return self._front.get_far_clipping_plane()

    def set_far_clipping_plane(self, far_clipping: float) -> None:
        """ Set the spherical depth sensor's far clipping plane.

        :param far_clipping: New far clipping plane (in metres)
        """
        self._far_clipping_plane = far_clipping
        self._clipping_plane_diff = far_clipping - self._near_clipping_plane
        for sensor in self._six_sensors:
            sensor.set_far_clipping_plane(far_clipping)

    def set_entity_to_render(self, entity_to_render: int) -> None:
        """ Set the entity to render to the spherical vision sensor,
        this can be an object or more usefully a collection. -1 to render all objects in scene.

        :param entity_to_render: Handle of the entity to render
        """
        self._sensor_depth.set_entity_to_render(entity_to_render)
        self._sensor_rgb.set_entity_to_render(entity_to_render)

    def get_entity_to_render(self) -> None:
        """ Get the entity to render to the spherical vision sensor,
        this can be an object or more usefully a collection. -1 if all objects in scene are rendered.

        :return: Handle of the entity to render
        """
        return self._sensor_depth.get_entity_to_render()
Пример #4
0
class RLBenchEnv(gym.Env):
    """An gym wrapper for RLBench."""

    metadata = {'render.modes': ['human']}

    def __init__(self,
                 task_class,
                 observation_mode='state',
                 action_mode='joint',
                 multi_action_space=False,
                 discrete_action_space=False):
        self.num_skip_control = 20
        self.epi_obs = []
        self.obs_record = True
        self.obs_record_id = None

        self._observation_mode = observation_mode
        self.obs_config = ObservationConfig()
        if observation_mode == 'state':
            self.obs_config.set_all_high_dim(False)
            self.obs_config.set_all_low_dim(True)
        elif observation_mode == 'state-goal':
            self.obs_config.set_all_high_dim(False)
            self.obs_config.set_all_low_dim(True)
            self.obs_config.set_goal_info(True)
        elif observation_mode == 'vision':
            self.obs_config.set_all(False)
            self.obs_config.set_camera_rgb(True)
        elif observation_mode == 'both':
            self.obs_config.set_all(True)
        else:
            raise ValueError('Unrecognised observation_mode: %s.' %
                             observation_mode)

        self._action_mode = action_mode
        self.ac_config = None
        if action_mode == 'joint':
            self.ac_config = ActionConfig(
                SnakeRobotActionConfig.ABS_JOINT_POSITION)
        elif action_mode == 'trigon':
            self.ac_config = ActionConfig(
                SnakeRobotActionConfig.TRIGON_MODEL_PARAM,
                is_discrete=discrete_action_space)
        else:
            raise ValueError('Unrecognised action_mode: %s.' % action_mode)

        self.env = Environment(action_config=self.ac_config,
                               obs_config=self.obs_config,
                               headless=True)
        self.env.launch()
        self.task = self.env.get_task(task_class)
        self.max_episode_steps = self.task.episode_len
        _, obs = self.task.reset()

        if action_mode == 'joint':
            self.action_space = spaces.Box(
                low=-1.7,
                high=1.7,
                shape=(self.ac_config.action_size, ),
                dtype=np.float32)
        elif action_mode == 'trigon':
            if multi_action_space:
                # action_space1 = spaces.MultiBinary(n=1)
                low1 = np.array([-0.8, -0.8])
                high1 = np.array([0.8, 0.8])
                action_space1 = spaces.Box(low=low1,
                                           high=high1,
                                           dtype=np.float32)
                # low = np.array([-0.8, -0.8, 1.0, 3.0, -50, -10, -0.1, -0.1])
                # high = np.array([0.8, 0.8, 3.0, 5.0, 50, 10, 0.1, 0.1])
                low2 = np.array([1.0])
                high2 = np.array([3.0])
                action_space2 = spaces.Box(low=low2,
                                           high=high2,
                                           dtype=np.float32)
                self.action_space = spaces.Tuple(
                    (action_space1, action_space2))
            elif discrete_action_space:
                self.action_space = spaces.MultiDiscrete([4, 4, 4])
            else:
                # low = np.array([0.0, -0.8, -0.8, 1.0, 3.0, -50, -10, -0.1, -0.1])
                # high = np.array([1.0, 0.8, 0.8, 3.0, 5.0, 50, 10, 0.1, 0.1])
                low = np.array([-1, -1, -1])
                high = np.array([1, 1, 1])
                self.action_space = spaces.Box(low=low,
                                               high=high,
                                               dtype=np.float32)

        if observation_mode == 'state' or observation_mode == 'state-goal':
            self.observation_space = spaces.Box(
                low=-np.inf,
                high=np.inf,
                shape=(obs.get_low_dim_data().shape[0] *
                       self.num_skip_control, ))
            if observation_mode == 'state-goal':
                self.goal_dim = obs.get_goal_dim()
        elif observation_mode == 'vision':
            self.observation_space = spaces.Box(
                low=0, high=1, shape=obs.head_camera_rgb.shape)
        elif observation_mode == 'both':
            self.observation_space = spaces.Dict({
                "state":
                spaces.Box(low=-np.inf,
                           high=np.inf,
                           shape=obs.get_low_dim_data().shape),
                "rattler_eye_rgb":
                spaces.Box(low=0, high=1, shape=obs.head_camera_rgb.shape),
                "rattler_eye_depth":
                spaces.Box(low=0, high=1, shape=obs.head_camera_depth.shape),
            })

        self._gym_cam = None

    def _extract_obs(self, obs):
        if self._observation_mode == 'state':
            return obs.get_low_dim_data()
        elif self._observation_mode == 'state-goal':
            obs_goal = {'observation': obs.get_low_dim_data()}
            obs_goal.update(obs.get_goal_data())
            return obs_goal
        elif self._observation_mode == 'vision':
            return obs.head_camera_rgb
        elif self._observation_mode == 'both':
            return {
                "state": obs.get_low_dim_data(),
                "rattler_eye_rgb": obs.head_camera_rgb,
                "rattle_eye_depth": obs.head_camera_depth,
            }

    def render(self, mode='human'):
        if self._gym_cam is None:
            # # Add the camera to the scene
            self._gym_cam = VisionSensor('monitor')
            self._gym_cam.set_resolution([640, 640])
            self._gym_cam.set_render_mode(RenderMode.EXTERNAL_WINDOWED)

    def reset(self):
        obs_data_group = []
        obs_data_dict = {
            'observation': [],
            'desired_goal': None,
            'achieved_goal': None
        }
        descriptions, obs = self.task.reset()
        self.epi_obs.append(obs)
        obs_data = self._extract_obs(obs)
        for _ in range(self.num_skip_control):
            # obs_data_group.extend(obs_data)
            if isinstance(obs_data, list) or isinstance(obs_data, np.ndarray):
                obs_data_group.extend(obs_data)
            elif isinstance(obs_data, dict):
                obs_data_dict['observation'].extend(obs_data['observation'])
                obs_data_dict['desired_goal'] = obs_data['desired_goal']
                obs_data_dict['achieved_goal'] = obs_data['achieved_goal']
        ret_obs = obs_data_group if len(obs_data_group) else obs_data_dict
        del descriptions  # Not used
        return ret_obs

    def step(self, action):
        obs_data_group = []
        obs_data_dict = {
            'observation': [],
            'desired_goal': None,
            'achieved_goal': None
        }
        reward_group = []
        terminate = False
        for _ in range(self.num_skip_control):
            obs, reward, step_terminate, success = self.task.step(action)
            self.epi_obs.append(obs)
            obs_data = self._extract_obs(obs)
            if isinstance(obs_data, list) or isinstance(obs_data, np.ndarray):
                obs_data_group.extend(obs_data)
            elif isinstance(
                    obs_data,
                    dict):  # used for hierarchical reinforcement algorithm
                obs_data_dict['observation'].extend(obs_data['observation'])
                obs_data_dict['desired_goal'] = obs_data['desired_goal']
                obs_data_dict['achieved_goal'] = obs_data['achieved_goal']
            reward_group.append(reward)
            terminate |= step_terminate
            if terminate:
                if self.obs_record and success:  # record a successful experience
                    self.record_obs("RobotPos")
                self.epi_obs = []
                break
        ret_obs = obs_data_group if len(obs_data_group) else obs_data_dict
        return ret_obs, np.mean(reward_group), terminate, {}

    def close(self):
        self.env.shutdown()

    # def load_env_param(self):
    #     self.env.load_env_param()

    def compute_reward(self, achieved_goal=None, desired_goal=None, info=None):
        assert achieved_goal is not None
        assert desired_goal is not None
        return self.task.compute_reward(achieved_goal, desired_goal)

    def record_obs(self, obs_part):
        if self.obs_record_id is None:
            record_filenames = glob.glob("./obs_record/obs_record_*.txt")
            record_filenames.sort(key=lambda filename: int(
                filename.split('_')[-1].split('.')[0]))
            if len(record_filenames) == 0:
                self.obs_record_id = 1
            else:
                last_id = int(
                    record_filenames[-1].split('_')[-1].split('.')[0])
                self.obs_record_id = last_id + 1
        else:
            self.obs_record_id += 1
        filename = './obs_record/obs_record_' + str(
            self.obs_record_id) + '.txt'
        obs_record_file = open(filename, 'w')

        if obs_part == 'All':
            pass
        if obs_part == 'RobotPos':
            robot_pos_arr = []
            for obs in self.epi_obs:
                robot_pos = obs.get_2d_robot_pos()
                robot_pos_arr.append(robot_pos)
            target_pos = self.task.get_goal()
            robot_pos_arr.append(
                target_pos)  # The last line records the target position
            robot_pos_arr = np.array(robot_pos_arr)
            np.savetxt(obs_record_file, robot_pos_arr, fmt="%f")
        obs_record_file.close()
Пример #5
0
class TestVisionSensors(TestCore):
    def setUp(self):
        super().setUp()
        [self.pyrep.step() for _ in range(10)]
        self.cam = VisionSensor('cam0')

    def test_handle_explicitly(self):
        cam = VisionSensor.create((640, 480))

        # blank image
        rgb = cam.capture_rgb()
        self.assertEqual(rgb.sum(), 0)

        # non-blank image
        cam.set_explicit_handling(value=1)
        cam.handle_explicitly()
        rgb = cam.capture_rgb()
        self.assertNotEqual(rgb.sum(), 0)

        cam.remove()

    def test_capture_rgb(self):
        img = self.cam.capture_rgb()
        self.assertEqual(img.shape, (16, 16, 3))
        #  Check that it's not a blank image
        self.assertFalse(img.min() == img.max() == 0.0)

    def test_capture_depth(self):
        img = self.cam.capture_depth()
        self.assertEqual(img.shape, (16, 16))
        # Check that it's not a blank depth map
        self.assertFalse(img.min() == img.max() == 1.0)

    def test_create(self):
        cam = VisionSensor.create([640, 480],
                                  perspective_mode=True,
                                  view_angle=35.0,
                                  near_clipping_plane=0.25,
                                  far_clipping_plane=5.0,
                                  render_mode=RenderMode.OPENGL3)
        self.assertEqual(cam.capture_rgb().shape, (480, 640, 3))
        self.assertEqual(cam.get_perspective_mode(),
                         PerspectiveMode.PERSPECTIVE)
        self.assertAlmostEqual(cam.get_perspective_angle(), 35.0, 3)
        self.assertEqual(cam.get_near_clipping_plane(), 0.25)
        self.assertEqual(cam.get_far_clipping_plane(), 5.0)
        self.assertEqual(cam.get_render_mode(), RenderMode.OPENGL3)

    def test_get_set_resolution(self):
        self.cam.set_resolution([320, 240])
        self.assertEqual(self.cam.get_resolution(), [320, 240])
        self.assertEqual(self.cam.capture_rgb().shape, (240, 320, 3))

    def test_get_set_perspective_mode(self):
        for perspective_mode in PerspectiveMode:
            self.cam.set_perspective_mode(perspective_mode)
            self.assertEqual(self.cam.get_perspective_mode(), perspective_mode)

    def test_get_set_render_mode(self):
        for render_mode in [RenderMode.OPENGL, RenderMode.OPENGL3]:
            self.cam.set_render_mode(render_mode)
            self.assertEqual(self.cam.get_render_mode(), render_mode)

    def test_get_set_perspective_angle(self):
        self.cam.set_perspective_angle(45.0)
        self.assertAlmostEqual(self.cam.get_perspective_angle(), 45.0, 3)

    def test_get_set_orthographic_size(self):
        self.cam.set_orthographic_size(1.0)
        self.assertEqual(self.cam.get_orthographic_size(), 1.0)

    def test_get_set_near_clipping_plane(self):
        self.cam.set_near_clipping_plane(0.5)
        self.assertEqual(self.cam.get_near_clipping_plane(), 0.5)

    def test_get_set_far_clipping_plane(self):
        self.cam.set_far_clipping_plane(0.5)
        self.assertEqual(self.cam.get_far_clipping_plane(), 0.5)

    def test_get_set_windowed_size(self):
        self.cam.set_windowed_size((640, 480))
        self.assertEqual(self.cam.get_windowed_size(), (640, 480))

    def test_get_set_entity_to_render(self):
        self.cam.set_entity_to_render(-1)
        self.assertEqual(self.cam.get_entity_to_render(), -1)
Пример #6
0
class LoCoBotCamera(Camera):
    """docstring for SimpleCamera"""

    def __init__(self, configs, simulator):

        self.sim = simulator.sim
        self.rgb_cam = VisionSensor("kinect_rgb")
        self.depth_cam = VisionSensor("kinect_depth")
        self.rgb_cam.set_render_mode(RenderMode.OPENGL3)
        self.depth_cam.set_render_mode(RenderMode.OPENGL3)

        # Pan and tilt related variables.
        self.pan_joint = Joint("LoCoBot_head_pan_joint")
        self.tilt_joint = Joint("LoCoBot_head_tilt_joint")

    def get_rgb(self):

        return self.rgb_cam.capture_rgb()

    def get_depth(self):

        return self.depth_cam.capture_depth()

    def get_rgb_depth(self):

        return self.get_rgb(), self.get_depth()

    def get_intrinsics(self):

        # Todo: Remove this after we fix intrinsics
        raise NotImplementedError
        """
		Returns the instrinsic matrix of the camera

		:return: the intrinsic matrix (shape: :math:`[3, 3]`)
		:rtype: np.ndarray
		"""
        # fx = self.configs['Camera.fx']
        # fy = self.configs['Camera.fy']
        # cx = self.configs['Camera.cx']
        # cy = self.configs['Camera.cy']
        Itc = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
        return Itc

    def pix_to_3dpt(self, rs, cs, in_cam=False):
        """
		Get the 3D points of the pixels in RGB images.

		:param rs: rows of interest in the RGB image.
		           It can be a list or 1D numpy array
		           which contains the row indices.
		           The default value is None,
		           which means all rows.
		:param cs: columns of interest in the RGB image.
		           It can be a list or 1D numpy array
		           which contains the column indices.
		           The default value is None,
		           which means all columns.
		:param in_cam: return points in camera frame,
		               otherwise, return points in base frame

		:type rs: list or np.ndarray
		:type cs: list or np.ndarray
		:type in_cam: bool

		:returns: tuple (pts, colors)

		          pts: point coordinates in world frame
		          (shape: :math:`[N, 3]`)

		          colors: rgb values for pts_in_cam
		          (shape: :math:`[N, 3]`)

		:rtype: tuple(np.ndarray, np.ndarray)
		"""

        raise NotImplementedError

    def get_current_pcd(self, in_cam=True):
        """
		Return the point cloud at current time step (one frame only)

		:param in_cam: return points in camera frame,
		               otherwise, return points in base frame

		:type in_cam: bool
		:returns: tuple (pts, colors)

		          pts: point coordinates in world frame (shape: :math:`[N, 3]`)

		          colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
		:rtype: tuple(np.ndarray, np.ndarray)
		"""

        raise NotImplementedError

    @property
    def state(self):
        """
		Return the current pan and tilt joint angles of the robot camera.

		:return:
		        pan_tilt: A list the form [pan angle, tilt angle]
		:rtype: list
		"""
        return self.get_state()

    def get_state(self):
        """
		Return the current pan and tilt joint angles of the robot camera.

		:return:
		        pan_tilt: A list the form [pan angle, tilt angle]
		:rtype: list
		"""
        return [self.get_pan(), self.get_tilt()]

    def get_pan(self):
        """
		Return the current pan joint angle of the robot camera.

		:return:
		        pan: Pan joint angle
		:rtype: float
		"""
        return self.pan_joint.get_joint_position()

    def get_tilt(self):
        """
		Return the current tilt joint angle of the robot camera.

		:return:
		        tilt: Tilt joint angle
		:rtype: float
		"""
        return self.tilt_joint.get_joint_position()

    def set_pan(self, pan, wait=True):
        """
		Sets the pan joint angle to the specified value.

		:param pan: value to be set for pan joint
		:param wait: wait until the pan angle is set to
		             the target angle.

		:type pan: float
		:type wait: bool
		"""

        self.pan_joint.set_joint_position(pan)
        # [self.sim.step() for _ in range(50)]

    def set_tilt(self, tilt, wait=True):
        """
		Sets the tilt joint angle to the specified value.

		:param tilt: value to be set for the tilt joint
		:param wait: wait until the tilt angle is set to
		             the target angle.

		:type tilt: float
		:type wait: bool
		"""

        self.tilt_joint.set_joint_position(tilt)

    def set_pan_tilt(self, pan, tilt, wait=True):
        """
		Sets both the pan and tilt joint angles to the specified values.

		:param pan: value to be set for pan joint
		:param tilt: value to be set for the tilt joint
		:param wait: wait until the pan and tilt angles are set to
		             the target angles.

		:type pan: float
		:type tilt: float
		:type wait: bool
		"""

        self.set_pan(pan)
        self.set_tilt(tilt)

    def reset(self):
        """
		This function resets the pan and tilt joints by actuating
		them to their home configuration.
		"""
        self.set_pan_tilt(self.configs.CAMERA.RESET_PAN, self.configs.CAMERA.RESET_TILT)
Пример #7
0
class CoppeliaEnv(gym.Env):
    def __init__(self, cnf, init=False):
        # Allows us to restart sim in different force_mode without recreating sim threads
        if not init:
            self._sim = self._start_sim(**cnf.sim)
        self._prepare_parameters(**cnf.params)
        self._prepare_robot(self._sub_mock, self._gripper_range)
        self._prepare_shapes(self._render, self._flat_agent)
        self._prepare_observation_space()
        self._prepare_action_space()
        self._prepare_subgoal_ranges(**cnf.subgoals)
        if cnf.params.ee_pos and cnf.params.ee_j_pos:
            raise Exception(f'Decide on your state space! ee_pos:{cnf.params.ee_pos} ee_j_pos:{cnf.params.ee_j_pos}')

    def step(self, action, reward_fn):
        if self._needs_reset:
            raise Exception('You should reset the environment before you step further.')
        self._apply_action(action)
        self._sim.step()
        reward = self._get_rew(action, self._task)
        done = self._get_done()
        observation = self._get_observation()
        info = self._get_info()
        self._timestep += 1
        self._total_it +=1 
        if reward_fn:
            self._reversal = True
        else:
            self._reversal = False
        if self._render and self._double_buttons:
            if self._state_b1 == 1:
                self._target.set_color([1, 0, 1])
            if self._state_b2 == 1:
                self._target2.set_color([1, 0, 1])
        return observation, reward, done, info

    def reset(self, evalmode=False):
        '''Resets the environment to its initial state by setting all the object positions 
        explicitly.
        Configuration trees are used as an efficient way of recreating the robot on episode
        reset if a part breaks during simulation.
        :param evalmode: If True the target on the table will stay in a specific position.'''
        # This resets the gripper to its initial state, even if it broke during table touches
        self._sim.set_configuration_tree(self._initial_arm_conftree)
        self._sim.set_configuration_tree(self._initial_gripper_conftree)
        if self._render and self._double_buttons:
            self._reset_button_colors()
        state = self._reset(evalmode)
        # Control flow for task success
        self.success = False
        self.mega_reward = True
        self._state_b1 = 0
        self._state_b2 = [0 if self._double_buttons else 1][0]
        self._stop_counter = COUNTER
        return state

    def render(self, mode='human'):
        '''gym render function. To render the simulator during simulation, call render(mode='human') once.
        To create rgb pictures, call the function every time you want to render a frame.'''
        if self._gym_cam is None:
            # Add the camera to the scene
            cam_placeholder = Dummy.create()
            cam_placeholder.set_pose([0, -0.5, 5, 1, 0, 0, 0])
            self._gym_cam = VisionSensor.create([640, 360])
            self._gym_cam2 = VisionSensor('Vision_sensor')
            self._gym_cam.set_pose(cam_placeholder.get_pose())
            self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
            self._gym_cam2.set_render_mode(RenderMode.OPENGL3_WINDOWED)
            if mode == "rgb_array":
                self._gym_cam.set_render_mode(RenderMode.OPENGL3)
        if mode == "rgb_array":
            return self._gym_cam.capture_rgb()

    def close(self):
        '''Shuts simulator completely down'''
        self._sim.stop()
        self._sim.shutdown()

    def seed(self, seed):
        '''Would change the seed of the RNGs but this simulator is a physics simulator, so there should be no RNGs.'''
        pass
    
    def set_goal(self, goal):
        '''Set a visual sphere in the environment to visualize the goal of the meta-agent. Only works if *_render* is true.'''
        if not self._render:
            raise Exception('Do not set goal if you are not rendering. It will not even be present in the simulator.')
        self._meta_goal.set_position(goal, relative_to=None)

    def _start_sim(self, scene_file, render_scene_file, headless, sim_timestep, render):
        '''Starts the simulation. We use different scene files for training and rendering, as unused objects still 
        slow down the simulation.
        :param sim_timestep: The timestep between actionable frames. The physics simulation has a timestep of 5ms,
        but the control commands are repeated until the *sim_timestep* is reached'''
        sim = PyRep()
        scene_file = [scene_file if not render else render_scene_file][0]
        scene_file = join(dirname(abspath(__file__)), scene_file)
        sim.launch(scene_file, headless=headless)
        # Need sim_timestep set to custom in CoppeliaSim Scene for this method to work.
        sim.set_simulation_timestep(dt=sim_timestep)
        sim.start()
        return sim  

    def _init_step(self):
        '''Need to take a first step in the created simulator so that observations can get created properly.'''
        self._robot.set_joint_target_velocities(np.zeros(shape=[self._max_vel.shape[0],], dtype=np.int32))
        self._sim.step()

    def _prepare_shapes(self, render, flat_agent):
        '''Create python handles for the objects in the scene.'''
        self._table = Shape('customizableTable')
        self._target = Shape('target')
        self._target.set_color([0.84,0.15,0.16])
        self._pos_b1 = self._target.get_position()
        self._target_init_pose = self._target.get_pose()
        if self._double_buttons:
            self._target2 = Shape('target1')
            self._pos_b2 = self._target2.get_position()
            self._target_init_pose2 = self._target2.get_pose()
        self._gym_cam = None
        self._target.set_renderable(True)
        self._target.set_dynamic(False)
        self._target.set_respondable(False)
        if self._double_buttons:
            self._target2.set_renderable(True)
            self._target2.set_dynamic(False)
            self._target2.set_respondable(False)
        if render and not flat_agent and not self._init:
            self._init = True
            print("RENDER")
            from pyrep.const import PrimitiveShape
            self._meta_goal = Shape.create(PrimitiveShape.SPHERE, [0.1,0.1,0.1], renderable=True,
                                           respondable=False, color=[0.22,0.34,0.14]) #color=[0,0.05,1])
            self._meta_goal.set_dynamic(False)

    def _prepare_observation_space(self):
        # Do initial step so that observations get generated 
        self._init_step()
        self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=self._get_observation().shape)

    def _prepare_action_space(self):
        if self.force_mode:
            bound = np.hstack([self._max_torque, 1])
        else:
            bound = np.hstack([self._max_vel, 1])
        self.action_space = gym.spaces.Box(low=-bound, high=bound)
    
    def _prepare_robot(self, sub_mock, gripper_range):
        self._robot = robot.Robot(self.force_mode, self._max_torque, self._max_vel, gripper_range, sub_mock)
        self._init_pos = self._robot.get_joint_positions(gripper_special=False)
        self._initial_arm_conftree = self._robot.bot[0].get_configuration_tree()
        self._initial_gripper_conftree = self._robot.bot[1].get_configuration_tree()

    def _prepare_parameters(self, **kwargs):
        # Float or integer params
        kwargs = {f'_{key}': value for key, value in kwargs.items()}
        kwargs['force_mode'] = kwargs.pop('_force')
        for key in kwargs.keys():
            setattr(self, key, kwargs[key])
        # Params that need special types
        self._max_vel = np.array(self._max_vel, np.float64) * (np.pi / 180)  # API uses rad / s
        self._max_torque = np.array(self._max_torque, np.float64)
        self._distance_fn = self._get_distance_fn(self._distance_function)
        self.time_limit = [600 if 'two' in self._task else 300][0]
        self._state_max = np.array(self._state_max, np.float32)
        self._state_min = np.array(self._state_min, np.float32)
        # Control flow
        self._timestep = 0
        self._needs_reset = True
        self._init = False
        self._total_it = 0
        self._reversal = False
        self._double_buttons = bool('two' in self._task)
        # Need these before reset to get observation.shape
        self._state_b1 = 0
        self._state_b2 = 0
        self._init_gripper = [6.499e-1, -6.276e-1, 1.782]
        # For state normalization
        self._state_mean = np.load('./environments/state_mean_two_button.npy')
        self._state_std = np.load('./environments/state_sigma_two_button.npy')

    def _prepare_subgoal_ranges(self, ee_goal, j_goal, ej_goal):
        '''Generate subgoal ranges that the HIRO uses to determine its subgoal dimensions.  Not useful
        for other algorithms.'''
        if self._spherical_coord:
            self.subgoal_ranges = [1 for x in range(3)]
        elif  self._ee_pos:
            self.subgoal_ranges = [ee_goal for x in range(3)]
        elif self._ee_j_pos:
            self.subgoal_ranges = [ej_goal[1] for x in range(ej_goal[0])]
        else:
            self.subgoal_ranges = [j_goal for x in range(7)]
        if not self._double_buttons:
            self.target_dim = self._pos_b1.shape[0] - 1
        else:
            #self.target_dim = (self._ep_target_pos.shape[0] - 1) * 2 + 2 + 1
            self.target_dim = (self._pos_b1.shape[0] - 1) * 2 + 2 
        self.subgoal_dim = len(self.subgoal_ranges)

    def _apply_action(self, action):
        ''' Assume action.shape to be [N,] where N-1 is the number of joints in the robotic arm, and action[N]
        indicates the open or close amount of the gripper. This should be changed for 2 arms.
        We treat the gripper as being underactuated. This means that we do not have control over the 2 joints,
        but can only move the two joints together as one control action. cf. Darmstadt Underactuated Sake Gripper Paper.'''
        try:
            assert tf.reduce_all(action <= self.action_space.high + 0.0001) 
            assert tf.reduce_all(action >= self.action_space.low - 0.0001)
        except:
            print("Attention, action_space out of high, low, bounds. Your robot probably broke.")
        if self._sub_mock:
            # Use a simulated perfect sub-agent with PID controllers
            self._robot.set_joint_target_positions(action[:-1])
            return 
        if not self.force_mode:
            # velocity control mode
            self._robot.set_joint_target_velocities(action[:-1])
        else:
            # In Force Mode, set target_velocities to maximal possible value, then modulate maximal torques 
            signed_vels = np.sign(np.array(action[:-1])) * self._max_vel 
            self._robot.bot[0].set_joint_forces(np.abs(action[:-1]))
            self._robot.set_joint_target_velocities(signed_vels)
        # Gripper is handled with underactuation
        self._robot.actuate(action[-1])

    def _get_rew(self, action, task):
        if task == 'dense':
            return self._get_rew_dense(action)

        if task == 'sparse_one_button':
            return self._get_rew_sparse_one_button(action)

        if task == 'dense_two_button':
            if not self._reversal:
                self._state_b1 = False
                self._state_b2 = True
            else:
                self._state_b1 = True
                self._state_b2 = False

            return self._get_rew_dense_two_button(action)

        if task == 'sparse_two_button':
            if not self._reversal:
                self._state_b1 = False
                self._state_b2 = True
            else:
                self._state_b1 = True
                self._state_b2 = False

            return self._get_rew_sparse_two_button(action)

        if task == 'sparse_two_button_sequential':
            return self._get_rew_sparse_two_button_sequential_reset_counter_wrapper(action)

        raise Exception('''Pick one of the valid reward types:
                        1) dense 
                        2) sparse_one_button
                        3) dense_one_button NOPE
                        4) sparse_two_button
                        5) sparse_two_button_sequential''')

    def _get_rew_dense(self, action):
        '''A dense reward based on the distance between the end-effector
        and the box is given. A success is recorded if 
        d < *self._touch_distance*'''
        dist = - self._get_distance(self._pos_b1)
        if tf.abs(dist) < self._touch_distance:
            self._state_b1 = True
        return  dist - self._action_regularizer * tf.square(tf.norm(action))

    def _get_rew_sparse_one_button(self, action):
        '''A sparse reward is given if the distance between the end-effector
        and the box is below *self._touch_distance*'''
        rew = -1.
        # One button touch task
        if self._get_distance(self._pos_b1) < self._touch_distance:
            rew += 1
            self._state_b1 = True
        return rew

    def _get_rew_dense_two_button(self, action):
        '''Compute a sparse reward based on two boxes where you just have
        to make contact with the right one. There is a negative reward
        for the incorrect contact and the episode only ends after correct 
        contact.'''
        if not self._reversal:
            rew = - self._get_distance(self._pos_b1)
            if self._get_distance(self._pos_b2) < self._touch_distance:
                rew -= 1000
        else:
            rew = - self._get_distance(self._pos_b2)
            if self._get_distance(self._pos_b1) < self._touch_distance:
                rew -= 1000
        if self._distance_query_switcher(0) < self._touch_distance:
            if not self._reversal:
                self._state_b1 = 1
            else:
                self._state_b2 = 1
            #print('button 1 pressed')
        if self._get_distance(self._pos_b2)  < self._touch_distance:
            pass
            #print('button 2 pressed')
        return rew

    def _get_rew_sparse_two_button(self, action):
        '''Compute a sparse reward based on two boxes where you just have
        to make contact with the right one. There is a negative reward
        for the incorrect contact and the episode only ends after correct 
        contact.'''
        rew = -1.
        if self._distance_query_switcher(0) < self._touch_distance and not self._state_b1 == 1:
            self._state_b1 = 1
            print('button 1 pressed')
            rew += 1
        if self._get_distance(self._pos_b2)  < self._touch_distance:
            print('button 2 pressed')
            rew -= -1
        return rew

    def _get_rew_sparse_two_button_sequential_reset_counter_wrapper(self, action):
        '''If *self._stop_counter* is enabled, this wrapper blocks button
        touches and resulting rewards for *COUNTER* steps after every failure.'''
        if self._stop_counter >= COUNTER:
            rew, punishment = self._get_rew_sparse_two_button_sequential(action)
            if punishment and self._reset_on_wrong_sequence:
                self._reset_counter_settings()
        if self._reset_on_wrong_sequence:
            self._stop_counter += 1
        return rew

    def _reset_counter_settings(self):
        '''Resets the appropriate settings if *self._reset_on_wrong_sequence* is 
        enabled and an incorrect touching was detected.'''
        self._state_b1 = False
        self._state_b2 = False
        self.mega_reward = True
        self._stop_counter = 0
        if self._render:
            self._reset_button_colors()

    def _get_rew_sparse_two_button_sequential(self, action):
        '''A sparse reward is given if box 1 is touched before box 2.
        The success condition evaluates to *True* no matter what order the 
        boxes were touched in. This ensures the validity of the MDP.'''
        punishment = False
        rew = -1.
        if self._distance_query_switcher(0) < self._touch_distance and not self._state_b1 == 1:
            self._state_b1 = 1
            print('button 1 pressed')
        if self._distance_query_switcher(1) < self._touch_distance and not self._state_b2 == 1:
            self._state_b2 = 1
            print('button 2 pressed')
        if self._state_b2 == 1 and not self._state_b1 == 1:
            self.mega_reward = False
        if (self._state_b1 == 1 and self._state_b2 == 1) and self.mega_reward:
            rew += 50
            print('MEGA reward')
        if (self._state_b1 == 1 and self._state_b2 == 1) and not self.mega_reward:
            print('FAILURE Punishment')
            rew -= self._punishment
            punishment = True
        return rew, punishment

    def _distance_query_switcher(self, box):
        '''Switches the boxes used for distance and reward computation
        if the *self._reversal* signal is given.'''
        if not self._reversal:
            if box == 0:
                return self._get_distance(self._pos_b1)
            else:
                return self._get_distance(self._pos_b2)
        else:
            if box == 0:
                return self._get_distance(self._pos_b2)
            else:
                return self._get_distance(self._pos_b1)
    
    def _get_done(self):
        '''Compute a *done* which is returned and a *success* variable, which is internally saved.
        A proper MDP should distinguish between successfull episodes where a certain condition
        was met (i.e. button touched) and episodes which ended because of a timelimit. We use
        *done* to indicate that an episode should be reset and *success* to indicate if 
        the value of a terminal state should be set to zero. Alternatively, one could
        add the timestep to the state. cf. Time Limits in Reinforcement Learning, Pardo et al.'''
        self._needs_reset = True
        if self._state_b1 == 1 and self._state_b2 == 1:
            print("Success")
            self.success = True
        elif self._timestep >= self.time_limit - 1:
            pass
        else:
            self._needs_reset = False
        return self._needs_reset

    def _get_distance(self, target_pos):
        grip_pos = np.array(self._robot.get_ee_position(), dtype=np.float32)
        return self._distance_fn(grip_pos, target_pos)
    
    def _get_distance_fn(self, func_string):
        if func_string == 'euclid':
            return self._euclid_distance
        elif func_string == 'huber':
            return self._huber_distance
        else:
            raise Exception('Non valid distance measure specified. Allowed are: *euclid* and *huber*')

    def _euclid_distance(self, grip_pos, target_pos):
        '''Returns L2 distance between arm tip and target.'''
        return euclid(grip_pos - target_pos)

    def _huber_distance(self, grip_pos, target_pos, delta=1.):
        'Returns distance between arm tip and target using the Huber distance function.'
        dist = tf.constant(grip_pos - target_pos, dtype=tf.float32)
        return huber(dist, delta)

    def _get_observation(self):
        ''' Compute observation. 
        *ee_j_pos* means end-effector and joint positions and velocities are known.
        The *else* branch corresponds to the case that only the joint positions and 
        velocities are known.
        The returned observation is either [robot_state, box_position]
        or in the double button case
        [robot_state, box1_position, box1_active_status, box2_position, box1_active_status]'''
        if self._ee_j_pos:
            qpos = np.concatenate([self._robot.get_ee_position(), self._robot.get_joint_positions()])
            qvel = np.concatenate([self._robot.get_ee_velocity()[0], self._robot.get_joint_velocities()])
            #observation = np.array(np.concatenate([qpos, qvel, np.array([self._timestep])]), dtype=np.float32)
            observation = np.array(np.concatenate([qpos, qvel]), dtype=np.float32)
        else:
            qpos = self._robot.get_joint_positions()
            qvel = self._robot.get_joint_velocities() 
            observation = np.concatenate([qpos, qvel])
        if not self._double_buttons:
            target = self._pos_b1[:-1]
        else:
            target = np.concatenate([self._pos_b1[:-1], [self._state_b1], self._pos_b2[:-1],
                                     [self._state_b2]], axis=0)
            #target = np.concatenate([self._pos_b1[:-1], self._pos_b2[:-1]], axis=0)
        ret = np.array(np.concatenate([observation, target]), dtype=np.float32)
        if self._normalize:
            #ret = self._normalizer(ret)
            ret = self._linear_scaler(ret)
        return ret

    def _normalizer(self, x):
        return (x - self._state_mean) / (self._state_std)

    def _linear_scaler(self, x):
        return (x - self._state_min) / (0.5*(self._state_max - self._state_min))-1


   
    def _reset_target(self, evalmode):
        pose = self._target_init_pose.copy()
        if self._double_buttons:
            pose2 = self._target_init_pose2.copy()
        if self._random_target and not evalmode or self._random_eval_target and evalmode:
            x, y = self._sample_in_circular_reach()
            pose[:2] = [x, y]
            self._target.set_pose(pose)
            if self._double_buttons:
                self._set_double_target(pose, pose2)
                self._target2.set_pose(pose2)
        else:
            self._target.set_pose(pose)
            if self._double_buttons:
                self._target2.set_pose(pose2)
        if not self._double_buttons:
            return np.array(self._target.get_position(), dtype=np.float32), None
        else:
            return np.array(self._target.get_position(), dtype=np.float32), np.array(self._target2.get_position(),
                                                                                        dtype=np.float32)
    def _set_double_target(self, pose, pose2):
        iterations = 0
        while True:
            iterations += 1
            x, y = self._sample_in_circular_reach()
            pose2[:2] = [x, y]
            if euclid(pose[:2] - pose2[:2]) >= self._minimum_dist:
                      break
            if iterations >= 10000:
                print('Limit exceeded for target setup. Look at *_reset_target()*')
                break

    def _reset_button_colors(self):
        self._target.set_color([0, 0, 1])
        self._target2.set_color([1, 0, 0])

    def _get_info(self):
        return ''
    
    def _reset(self, evalmode):
        ''' Reset target velocities BEFORE positions. As arm and gripper are reset independently 
         in *set_position()*
         there is an additional simulation timestep between them. If the velocities are not reset properly, the
         reset position of the robot will drift. This drift is small for the arm but can cause the gripper to
         explode and destabilize the simulation.'''
        self._robot.set_joint_target_velocities(np.zeros(shape=self._init_pos.shape))
        self._robot.set_position(self._init_pos)
        self._pos_b1, self._pos_b2 = self._reset_target(evalmode)
        self._sim.step()
        self._needs_reset = False
        self._timestep = 0
        return self._get_observation()
    
    def _sample_in_circular_reach(self):
        '''Uses polar coordinates to sample in a circular region around the arm. Radius was measured
        by letting the arm flail around for some time and then drawing a line from the two recorded points
        that were furthest apart.'''
        r = np.random.uniform(0.3, 1.867/2 - 0.1)
        theta = np.random.uniform(-np.pi, np.pi)
        x = r * np.cos(theta)
        y = r * np.sin(theta)
        return x + 0.622, y - 0.605 
Пример #8
0
class SphericalVisionSensor(Object):
    """An object able capture 360 degree omni-directional images.
    """
    def __init__(self, name):
        super().__init__(name)

        # final omni-directional sensors
        self._sensor_depth = VisionSensor('%s_sensorDepth' % (self.get_name()))
        self._sensor_rgb = VisionSensor('%s_sensorRGB' % (self.get_name()))

        # directed sub-sensors
        names = ['front', 'top', 'back', 'bottom', 'left', 'right']
        self._six_sensors = [
            VisionSensor('%s_%s' % (self.get_name(), n)) for n in names
        ]
        self._front = self._six_sensors[0]

        # directed sub-sensor handles list
        self._six_sensor_handles = [
            vs.get_handle() for vs in self._six_sensors
        ]

        # set all to explicit handling
        self._set_all_to_explicit_handling()

        # assert sensor properties are matching
        self._assert_matching_resolutions()
        self._assert_matching_render_modes()
        self._assert_matching_entities_to_render()
        self._assert_matching_window_sizes()
        self._assert_matching_near_clipping_planes()
        self._assert_matching_far_clipping_planes()

        # near and far clipping plane
        self._near_clipping_plane = self.get_near_clipping_plane()
        self._far_clipping_plane = self.get_far_clipping_plane()
        self._clipping_plane_diff = self._far_clipping_plane - self._near_clipping_plane

    # Private #
    # --------#

    def _set_all_to_explicit_handling(self):
        self._sensor_depth.set_explicit_handling(1)
        self._sensor_rgb.set_explicit_handling(1)
        for sensor in self._six_sensors:
            sensor.set_explicit_handling(1)

    def _assert_matching_resolutions(self):
        assert self._sensor_depth.get_resolution(
        ) == self._sensor_rgb.get_resolution()
        front_sensor_res = self._front.get_resolution()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_resolution() == front_sensor_res

    def _assert_matching_render_modes(self):
        assert self._sensor_depth.get_render_mode(
        ) == self._sensor_rgb.get_render_mode()
        front_sensor_render_mode = self._front.get_render_mode()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_render_mode() == front_sensor_render_mode

    def _assert_matching_entities_to_render(self):
        assert self._sensor_depth.get_entity_to_render(
        ) == self._sensor_rgb.get_entity_to_render()
        front_sensor_entity_to_render = self._front.get_entity_to_render()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_entity_to_render(
            ) == front_sensor_entity_to_render

    def _assert_matching_window_sizes(self):
        assert self._sensor_depth.get_windowed_size(
        ) == self._sensor_rgb.get_windowed_size()

    def _assert_matching_near_clipping_planes(self):
        front_sensor_near = self._front.get_near_clipping_plane()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_near_clipping_plane() == front_sensor_near

    def _assert_matching_far_clipping_planes(self):
        front_sensor_far = self._front.get_far_clipping_plane()
        for sensor in self._six_sensors[1:]:
            assert sensor.get_far_clipping_plane() == front_sensor_far

    def _get_requested_type(self) -> ObjectType:
        return ObjectType(sim.simGetObjectType(self.get_handle()))

    # Public #
    # -------#

    def handle_explicitly(self) -> None:
        """Handle spherical vision sensor explicitly.

          This enables capturing image (e.g., capture_rgb())
          without PyRep.step().
        """
        utils.script_call(
            'handleSpherical@PyRep', PYREP_SCRIPT_TYPE,
            [self._sensor_depth._handle, self._sensor_rgb._handle] +
            self._six_sensor_handles, [], [], [])

    def capture_rgb(self) -> np.ndarray:
        """Retrieves the rgb-image of a spherical vision sensor.

        :return: A numpy array of size (width, height, 3)
        """
        return self._sensor_rgb.capture_rgb()

    def capture_depth(self, in_meters=False) -> np.ndarray:
        """Retrieves the depth-image of a spherical vision sensor.

        :param in_meters: Whether the depth should be returned in meters.
        :return: A numpy array of size (width, height)
        """
        if in_meters:
            return self._sensor_depth.capture_rgb(
            )[:, :, 0] * self._clipping_plane_diff + self._near_clipping_plane
        return self._sensor_depth.capture_rgb()[:, :, 0]

    def get_resolution(self) -> List[int]:
        """ Return the spherical vision sensor's resolution.

        :return: Resolution [x, y]
        """
        return self._sensor_depth.get_resolution()

    def set_resolution(self, resolution: List[int]) -> None:
        """ Set the spherical vision sensor's resolution.

        :param resolution: New resolution [x, y]
        """
        if resolution[0] != 2 * resolution[1]:
            raise Exception(
                'Spherical vision sensors must have an X resolution 2x the Y resolution'
            )
        if resolution[1] % 2 != 0:
            raise Exception(
                'Spherical vision sensors must have an even Y resolution')
        box_sensor_resolution = [
            int(resolution[1] / 2),
            int(resolution[1] / 2)
        ]
        for sensor in self._six_sensors:
            sensor.set_resolution(box_sensor_resolution)
        self._sensor_depth.set_resolution(resolution)
        self._sensor_rgb.set_resolution(resolution)

    def get_render_mode(self) -> RenderMode:
        """ Retrieves the spherical vision sensor's rendering mode

        :return: RenderMode for the current rendering mode.
        """
        return self._sensor_depth.get_render_mode()

    def set_render_mode(self, render_mode: RenderMode) -> None:
        """ Set the spherical vision sensor's rendering mode

        :param render_mode: The new sensor rendering mode, one of:
            RenderMode.OPENGL
            RenderMode.OPENGL_AUXILIARY
            RenderMode.OPENGL_COLOR_CODED
            RenderMode.POV_RAY
            RenderMode.EXTERNAL
            RenderMode.EXTERNAL_WINDOWED
            RenderMode.OPENGL3
            RenderMode.OPENGL3_WINDOWED
        """
        self._sensor_depth.set_render_mode(render_mode)
        self._sensor_rgb.set_render_mode(render_mode)
        for sensor in self._six_sensors:
            sensor.set_render_mode(render_mode)

    def get_windowed_size(self) -> Sequence[int]:
        """Get the size of windowed rendering for the omni-directional image.

        :return: The (x, y) resolution of the window. 0 for full-screen.
        """
        return self._sensor_depth.get_windowed_size()

    def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None:
        """Set the size of windowed rendering for the omni-directional image.

        :param resolution: The (x, y) resolution of the window.
            0 for full-screen.
        """
        self._sensor_depth.set_windowed_size(resolution)
        self._sensor_rgb.set_windowed_size(resolution)

    def get_near_clipping_plane(self) -> float:
        """ Get the spherical depth sensor's near clipping plane.

        :return: Near clipping plane (metres)
        """
        return self._front.get_near_clipping_plane()

    def set_near_clipping_plane(self, near_clipping: float) -> None:
        """ Set the spherical depth sensor's near clipping plane.

        :param near_clipping: New near clipping plane (in metres)
        """
        self._near_clipping_plane = near_clipping
        self._clipping_plane_diff = self._far_clipping_plane - near_clipping
        for sensor in self._six_sensors:
            sensor.set_near_clipping_plane(near_clipping)

    def get_far_clipping_plane(self) -> float:
        """ Get the Sensor's far clipping plane.

        :return: Near clipping plane (metres)
        """
        return self._front.get_far_clipping_plane()

    def set_far_clipping_plane(self, far_clipping: float) -> None:
        """ Set the spherical depth sensor's far clipping plane.

        :param far_clipping: New far clipping plane (in metres)
        """
        self._far_clipping_plane = far_clipping
        self._clipping_plane_diff = far_clipping - self._near_clipping_plane
        for sensor in self._six_sensors:
            sensor.set_far_clipping_plane(far_clipping)

    def set_entity_to_render(self, entity_to_render: int) -> None:
        """ Set the entity to render to the spherical vision sensor,
        this can be an object or more usefully a collection. -1 to render all objects in scene.

        :param entity_to_render: Handle of the entity to render
        """
        self._sensor_depth.set_entity_to_render(entity_to_render)
        self._sensor_rgb.set_entity_to_render(entity_to_render)

    def get_entity_to_render(self) -> None:
        """ Get the entity to render to the spherical vision sensor,
        this can be an object or more usefully a collection. -1 if all objects in scene are rendered.

        :return: Handle of the entity to render
        """
        return self._sensor_depth.get_entity_to_render()
Пример #9
0
        # a_all[idx].set_detectable(1)
        # a_all[idx].set_renderable(1)
        # a_all[idx].set_dynamic(0)
    return a_all, b


# pr = PyRep()
# SCENE_FILE = join(dirname(abspath(__file__)), 'test_rcan.ttt')
# pr.launch(SCENE_FILE,headless=True)
# pr.start()
# pr.set_simulation_timestep(0.5)
env = youBotAll(scene_name='scene1_5x5.ttt', boundary=2)
pr = env.pr
camera_arm = env.camera_arm
camera_arm = VisionSensor('Vision_sensor1')
camera_arm.set_render_mode(RenderMode.OPENGL3_WINDOWED)
camera_arm.set_resolution([512, 512])

# obj1 = Shape('Shape0')
# obj2 = Shape('Shape')
# pos = obj2.get_position()
# obj2.remove()
obj2, obj2_coll = add_object(1)
obj2_coll.set_position([1, 0, 0])

floor = Shape('floor')
walls = [Shape('wall%s' % i) for i in range(4)]

texture_id, texture_object = _get_texture(pr)
floor.set_texture(texture=texture_id,
                  mapping_mode=TextureMappingMode.PLANE,
Пример #10
0
class robotEnv(object):
    def __init__(self, scene_name, reward_dense, boundary):
        self.pr = PyRep()
        SCENE_FILE = join(dirname(abspath(__file__)), scene_name)
        self.pr.launch(SCENE_FILE, headless=True)
        self.pr.start()
        self.pr.set_simulation_timestep(0.05)

        if scene_name != 'robot_scene.ttt':  #youbot_navig2
            home_dir = os.path.expanduser('~')
            os.chdir(join(home_dir, 'robotics_drl'))
            self.pr.import_model('robot.ttm')

        # Vision sensor handles
        self.camera_top = VisionSensor('Vision_sensor')
        # self.camera_top.set_render_mode(RenderMode.OPENGL3)
        # self.camera_top.set_resolution([256,256])

        self.camera_arm = VisionSensor('Vision_sensor1')
        self.camera_arm.set_render_mode(RenderMode.OPENGL3)
        self.camera_arm.set_resolution([256, 256])

        self.reward_dense = reward_dense
        self.reward_termination = 1 if self.reward_dense else 0
        self.boundary = boundary

        # Robot links
        robot_links = []
        links_size = [3, 5, 5, 1]
        for i in range(len(links_size)):
            robot_links.append(
                [Shape('link%s_%s' % (i, j)) for j in range(links_size[i])])

        links_color = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
        color_i = 0
        for j in robot_links:
            if color_i > 2:
                color_i = 0
            for i in j:
                i.remove_texture()
                i.set_color(links_color[color_i])
            color_i += 1

    def render(self, view='top'):
        if view == 'top':
            img = self.camera_top.capture_rgb() * 256  # (dim,dim,3)
        elif view == 'arm':
            img = self.camera_arm.capture_rgb()
        return img

    def terminate(self):
        self.pr.stop()  # Stop the simulation
        self.pr.shutdown()

    def sample_action(self):
        return [(2 * random.random() - 1) for _ in range(self.action_space)]

    def rand_bound(self):
        x = random.uniform(-self.boundary, self.boundary)
        y = random.uniform(-self.boundary, self.boundary)
        orientation = random.random() * 2 * pi
        return x, y, orientation

    def action_type(self):
        return 'continuous'

    def observation_space(self):
        _, obs = self.get_observation()
        return obs.shape

    def step_limit(self):
        return 250