class RattlerCamera(object):
    def __init__(self):
        self._head_camera = VisionSensor('rattler_eye')
        self._head_camera_mask = None
        self._head_camera_state = 1

    def get_camera_state(self) -> int:
        return self._head_camera_state

    def set_camera_state(self, state: int) -> None:
        self._head_camera_state = state

    def set_camera_properties(self, conf: CameraConfig) -> None:
        if not (conf.rgb or conf.depth):
            self._head_camera.remove()
        else:
            self._head_camera.set_resolution(conf.image_size)
            self._head_camera.set_render_mode(conf.render_mode)

        if conf.mask:
            self._head_camera_mask = self._head_camera.copy()
            self._head_camera_mask.set_render_mode(
                RenderMode.OPENGL_COLOR_CODED)

    def capture_rgb(self) -> np.ndarray:
        return self._head_camera.capture_rgb()

    def capture_depth(self) -> np.ndarray:
        return self._head_camera.capture_depth()

    def mask(self) -> VisionSensor:
        return self._head_camera_mask
Beispiel #2
0
class ArmECM(PyRep):
    def __init__(self, pr):

        #super(ArmECM, self).__init__(scenePath)
        """self.pr = PyRep()
        self.pr.launch(scenePath)
        self.pr.start()
        self.pr.step()"""
        self.pr = pr
        self.base_handle = Shape('L0_respondable_ECM')
        self.j1_handle = Joint('J1_ECM')
        self.j2_handle = Joint('J2_ECM')
        self.j3_handle = Joint('J3_ECM')
        self.j4_handle = Joint('J4_ECM')

        self.left_cam = VisionSensor('Vision_sensor_left')
        self.right_cam = VisionSensor('Vision_sensor_right')

    def getJointAngles(self):

        pos1 = self.j1_handle.get_joint_position()
        pos2 = self.j2_handle.get_joint_position()
        pos3 = self.j3_handle.get_joint_position()
        pos4 = self.j4_handle.get_joint_position()
        return np.array([pos1, pos2, pos3, pos4])

    def setJointAngles(self, jointAngles):

        self.j1_handle.set_joint_position(jointAngles[0])
        self.j2_handle.set_joint_position(jointAngles[1])
        self.j3_handle.set_joint_position(jointAngles[2])
        self.j4_handle.set_joint_position(jointAngles[3])

    def getCurrentPose(self):

        return self.left_cam.get_pose(relative_to=self.base_handle)

    def getStereoImagePairs(self):
        return self.left_cam.capture_rgb(), self.right_cam.capture_rgb()

    def getDepthImagePairs(self):
        return self.left_cam.capture_depth(True), self.right_cam.capture_depth(
            True)

    """def stopSim(self):
Beispiel #3
0
 def get_rgb_depth(sensor: VisionSensor, get_rgb: bool, get_depth: bool,
                   rgb_noise: NoiseModel, depth_noise: NoiseModel):
     rgb = depth = None
     if sensor is not None and (get_rgb or get_depth):
         sensor.handle_explicitly()
         if get_rgb:
             rgb = sensor.capture_rgb()
             if rgb_noise is not None:
                 rgb = rgb_noise.apply(rgb)
         if get_depth:
             depth = sensor.capture_depth()
             if depth_noise is not None:
                 depth = depth_noise.apply(depth)
     return rgb, depth
Beispiel #4
0
class TestVisionSensors(TestCore):
    def setUp(self):
        super().setUp()
        [self.pyrep.step() for _ in range(10)]
        self.cam = VisionSensor('cam0')

    def test_capture_rgb(self):
        img = self.cam.capture_rgb()
        self.assertEqual(img.shape, (16, 16, 3))
        #  Check that it's not a blank image
        self.assertFalse(img.min() == img.max() == 0.0)

    def test_capture_depth(self):
        img = self.cam.capture_depth()
        self.assertEqual(img.shape, (16, 16))
        # Check that it's not a blank depth map
        self.assertFalse(img.min() == img.max() == 1.0)
Beispiel #5
0
# current_pose = get_2d_pose(robot)
target_pose = get_2d_pose(goal)

current_pose =  robot.get_position(vision_sensor)[:2] + robot.get_orientation()[-1:]
current_pose = (-current_pose[0], current_pose[1], current_pose[2])

print(current_pose)
print(get_2d_pose(robot))

pr.step()
origin_x, _, origin_y, _, _, _ = vision_sensor.get_bounding_box()

# Setup occ_grid
occ_grid = OccupancyGrid(origin_x, origin_y)

depth = vision_sensor.capture_depth()
occ_grid.fromDepth(depth)

'''
dij_solver = Dijkstra(occ_grid, current_pose, target_pose)
moves = dij_solver.solve()
for i in moves:
  pr.step()
  set_2d_pose(robot, (i[0], i[1], current_pose[2]))
  time.sleep(0.05)
'''

'''
# RRT Solver
rrt_solver = RRT(pr, robot, current_pose, target_pose, [-0.5,0.5], [-0.5,0.5])
moves = rrt_solver.solve()
Beispiel #6
0
class EnvPollos(Env):
    def __init__(self, ep_length=100):
        """
        Pollos environment for testing purposes
        :param dim: (int) the size of the dimensions you want to learn
        :param ep_length: (int) the length of each episodes in timesteps
        """
        logging.basicConfig(level=logging.DEBUG)
        # they have to be simmetric. We interpret actions as angular increments
        #self.action_space = Box(low=np.array([-0.1, -1.7, -2.5, -1.5, 0.0, 0.0]),
        #                        high=np.array([0.1, 1.7, 2.5, 1.5, 0.0, 0.0]))
        inc = 0.1
        self.action_space = Box(low=np.array(
            [-inc, -inc, -inc, -inc, 0.0, 0.0]),
                                high=np.array([inc, inc, inc, inc, 0.0, 0.0]))
        self.observation_space = Box(low=0,
                                     high=255,
                                     shape=(480, 640, 1),
                                     dtype=np.uint8)

        # self.observation_space = Box(low=np.array([-0.1, -0.2, 0, -1.5, 0.0, 0.0, -0.5, -0.2, -0.2]),
        #                              high=np.array([0.1, 1.7, 2.5, 0, 0.0, 0.0, 0.5, 0.2, 1.0]))

        #
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        self.agent = UR10()
        self.agent.max_velocity = 1.2
        self.agent.set_control_loop_enabled(True)
        self.agent.set_motor_locked_at_zero_velocity(True)
        self.joints = [
            Joint('UR10_joint1'),
            Joint('UR10_joint2'),
            Joint('UR10_joint3'),
            Joint('UR10_joint4'),
            Joint('UR10_joint5'),
            Joint('UR10_joint6')
        ]
        #self.joints_limits = [[j.get_joint_interval()[1][0],j.get_joint_interval()[1][0]+j.get_joint_interval()[1][1]]
        #                      for j in self.joints]
        self.high_joints_limits = [0.1, 1.7, 2.7, 0.0, 0.1, 0.1]
        self.low_joints_limits = [-0.1, -0.2, 0.0, -1.5, -0.1, -0.1]
        self.agent_hand = Shape('hand')

        self.initial_agent_tip_position = self.agent.get_tip().get_position()
        self.initial_agent_tip_quaternion = self.agent.get_tip(
        ).get_quaternion()
        self.target = Dummy('UR10_target')
        self.initial_joint_positions = self.agent.get_joint_positions()
        self.pollo_target = Dummy('pollo_target')
        self.pollo = Shape('pollo')
        self.table_target = Dummy('table_target')
        self.initial_pollo_position = self.pollo.get_position()
        self.initial_pollo_orientation = self.pollo.get_quaternion()
        self.table_target = Dummy('table_target')
        # objects
        self.scene_objects = [
            Shape('table0'),
            Shape('Plane'),
            Shape('Plane0'),
            Shape('ConcretBlock')
        ]
        #self.agent_tip = self.agent.get_tip()
        self.initial_distance = np.linalg.norm(
            np.array(self.initial_pollo_position) -
            np.array(self.initial_agent_tip_position))

        # camera
        self.camera = VisionSensor('kinect_depth')
        self.camera_matrix_extrinsics = vrep.simGetObjectMatrix(
            self.camera.get_handle(), -1)
        self.np_camera_matrix_extrinsics = np.delete(
            np.linalg.inv(self.vrepToNP(self.camera_matrix_extrinsics)), 3, 0)
        width = 640.0
        height = 480.0
        angle = math.radians(57.0)
        focalx_px = (width / 2.0) / math.tan(angle / 2.0)
        focaly_px = (height / 2.0) / math.tan(angle / 2.0)
        self.np_camera_matrix_intrinsics = np.array([[-focalx_px, 0.0, 320.0],
                                                     [0.0, -focalx_px, 240.0],
                                                     [0.0, 0.0, 1.0]])

        self.reset()

    def reset(self):
        pos = list(
            np.random.uniform([-0.1, -0.1, 0.0], [0.1, 0.1, 0.1]) +
            self.initial_pollo_position)
        self.pollo.set_position(pos)
        self.pollo.set_quaternion(self.initial_pollo_orientation)
        self.agent.set_joint_positions(self.initial_joint_positions)
        self.initial_epoch_time = time.time()
        while True:  # wait for arm to stop
            self.pr.step()  # Step the physics simulation
            a = self.agent.get_joint_velocities()
            if not np.any(np.where(np.fabs(a) < 0.1, False, True)):
                break
        # a los dos mil reset hay que recargar el ROBOT porque se descoyunta
        #if self.num_resets > 2000:

        self.total_reward = 0.0
        print('----------------')

        return self._get_state()

    def step(self, action):
        if action is None:
            self.pr.step()
            return self._get_state(), 0.0, False, {}

        # check for nan
        if np.any(np.isnan(action)):
            print(action)
            return self._get_state(), -1.0, False, {}

        # action[1] = np.interp(action[1], [-1,7,1.7], [-0.2,1.7])
        # action[2] = np.interp(action[2], [-2.5,2.5], [0,2.5])
        # action[3] = np.interp(action[3], [-1.5,1.5], [-1.5,0])

        # add actions as increments to current joints value
        new_joints = np.array(
            self.agent.get_joint_positions()) + np.array(action)

        # check limits
        if np.any(np.greater(new_joints, self.high_joints_limits)) or np.any(
                np.less(new_joints, self.low_joints_limits)):
            logging.debug("New joints value out of limits r=-10")
            return self._get_state(), -10.0, True, {}

        # move the robot and wait to stop
        self.agent.set_joint_target_positions(new_joints)
        reloj = time.time()
        while True:  # wait for arm to stop
            self.pr.step()  # Step the physics simulation
            a = self.agent.get_joint_velocities()
            if not np.any(np.where(np.fabs(a) < 0.1, False,
                                   True)) or (time.time() - reloj) > 3:
                break

        # compute relevant magnitudes
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_robot_tip_position = np.array(self.agent.get_tip().get_position())
        dist = np.linalg.norm(np_pollo_target - np_robot_tip_position)
        altura = np.clip((np_pollo_target[2] - self.initial_pollo_position[2]),
                         0, 0.5)

        for obj in self.scene_objects:  # colisión con la mesa
            if self.agent_hand.check_collision(obj):
                logging.debug("Collision with objects r=-10")
                self.total_reward = self.total_reward + -10.0
                print(self.total_reward)
                return self._get_state(), -10.0, True, {}
        if altura > 0.3:  # pollo en mano
            logging.debug("Height reached !!! r=30")
            self.total_reward = self.total_reward + 30.0
            print(self.total_reward)
            return self._get_state(), 30.0, True, {}
        elif dist > self.initial_distance:  # mano se aleja
            logging.debug("Hand moving away from chicken r=-10")
            self.total_reward = self.total_reward + -10.0
            print(self.total_reward)
            return self._get_state(), -10.0, True, {}
        used_time = time.time() - self.initial_epoch_time
        if used_time > 5:  # too much time
            logging.debug("Time out. End of epoch r=-10")
            if altura > 0.01:
                self.total_reward = self.total_reward + altura
            else:
                self.total_reward = self.total_reward - 10
            print(self.total_reward)
            return self._get_state(), -10.0, True, {}

        # Reward
        #pollo_height = np.exp(-altura*20)  # para 0.3m pollo_height = 0.002, para 0m pollo_height = 1
        reward = altura - dist - used_time
        self.total_reward = self.total_reward + reward
        #print(self.total_reward)
        return self._get_state(), reward, False, {}

    def close(self):
        self.pr.stop()
        self.pr.shutdown()

    def render(self):
        print("RENDER")
        np_pollo_en_camara = self.getPolloEnCamara()

        # capture depth image
        depth = self.camera.capture_rgb()
        circ = plt.Circle(
            (int(np_pollo_en_camara[0]), int(np_pollo_en_camara[1])), 10)
        plt.clf()
        fig = plt.gcf()
        ax = fig.gca()
        ax.add_artist(circ)
        ax.imshow(depth, cmap='hot')
        plt.pause(0.000001)

    # Aux

    # transform env.pollo_target.get_position() to camera coordinates and project pollo_en_camera a image coordinates
    def getPolloEnCamara(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_target_cam = self.np_camera_matrix_extrinsics.dot(
            np.append([np_pollo_target], 1.0))
        np_pollo_en_camara = self.np_camera_matrix_intrinsics.dot(
            np_pollo_target_cam)
        np_pollo_en_camara = np_pollo_en_camara / np_pollo_en_camara[2]
        np_pollo_en_camara = np.delete(np_pollo_en_camara, 2)
        return np_pollo_en_camara

    def getPolloEnCamaraEx(self):
        np_pollo_target = np.array(self.pollo_target.get_position())
        np_pollo_en_camara = self.np_camera_matrix_extrinsics.dot(
            np.append([np_pollo_target], 1.0))
        return np_pollo_en_camara

    def _get_state(self):
        depth = self.camera.capture_depth()
        scaler = MinMaxScaler(feature_range=(0, 250))
        scaler = scaler.fit(depth)
        depth_scaled = scaler.transform(depth)
        depth_scaled = np.array(depth_scaled).reshape(480, 640, 1)
        return depth_scaled.astype('uint8')

    def vrepToNP(self, c):
        return np.array([[c[0], c[4], c[8], c[3]], [c[1], c[5], c[9], c[7]],
                         [c[2], c[6], c[10], c[11]], [0, 0, 0, 1]])
Beispiel #7
0
class TestVisionSensors(TestCore):
    def setUp(self):
        super().setUp()
        [self.pyrep.step() for _ in range(10)]
        self.cam = VisionSensor('cam0')

    def test_handle_explicitly(self):
        cam = VisionSensor.create((640, 480))

        # blank image
        rgb = cam.capture_rgb()
        self.assertEqual(rgb.sum(), 0)

        # non-blank image
        cam.set_explicit_handling(value=1)
        cam.handle_explicitly()
        rgb = cam.capture_rgb()
        self.assertNotEqual(rgb.sum(), 0)

        cam.remove()

    def test_capture_rgb(self):
        img = self.cam.capture_rgb()
        self.assertEqual(img.shape, (16, 16, 3))
        #  Check that it's not a blank image
        self.assertFalse(img.min() == img.max() == 0.0)

    def test_capture_depth(self):
        img = self.cam.capture_depth()
        self.assertEqual(img.shape, (16, 16))
        # Check that it's not a blank depth map
        self.assertFalse(img.min() == img.max() == 1.0)

    def test_create(self):
        cam = VisionSensor.create([640, 480],
                                  perspective_mode=True,
                                  view_angle=35.0,
                                  near_clipping_plane=0.25,
                                  far_clipping_plane=5.0,
                                  render_mode=RenderMode.OPENGL3)
        self.assertEqual(cam.capture_rgb().shape, (480, 640, 3))
        self.assertEqual(cam.get_perspective_mode(),
                         PerspectiveMode.PERSPECTIVE)
        self.assertAlmostEqual(cam.get_perspective_angle(), 35.0, 3)
        self.assertEqual(cam.get_near_clipping_plane(), 0.25)
        self.assertEqual(cam.get_far_clipping_plane(), 5.0)
        self.assertEqual(cam.get_render_mode(), RenderMode.OPENGL3)

    def test_get_set_resolution(self):
        self.cam.set_resolution([320, 240])
        self.assertEqual(self.cam.get_resolution(), [320, 240])
        self.assertEqual(self.cam.capture_rgb().shape, (240, 320, 3))

    def test_get_set_perspective_mode(self):
        for perspective_mode in PerspectiveMode:
            self.cam.set_perspective_mode(perspective_mode)
            self.assertEqual(self.cam.get_perspective_mode(), perspective_mode)

    def test_get_set_render_mode(self):
        for render_mode in [RenderMode.OPENGL, RenderMode.OPENGL3]:
            self.cam.set_render_mode(render_mode)
            self.assertEqual(self.cam.get_render_mode(), render_mode)

    def test_get_set_perspective_angle(self):
        self.cam.set_perspective_angle(45.0)
        self.assertAlmostEqual(self.cam.get_perspective_angle(), 45.0, 3)

    def test_get_set_orthographic_size(self):
        self.cam.set_orthographic_size(1.0)
        self.assertEqual(self.cam.get_orthographic_size(), 1.0)

    def test_get_set_near_clipping_plane(self):
        self.cam.set_near_clipping_plane(0.5)
        self.assertEqual(self.cam.get_near_clipping_plane(), 0.5)

    def test_get_set_far_clipping_plane(self):
        self.cam.set_far_clipping_plane(0.5)
        self.assertEqual(self.cam.get_far_clipping_plane(), 0.5)

    def test_get_set_windowed_size(self):
        self.cam.set_windowed_size((640, 480))
        self.assertEqual(self.cam.get_windowed_size(), (640, 480))

    def test_get_set_entity_to_render(self):
        self.cam.set_entity_to_render(-1)
        self.assertEqual(self.cam.get_entity_to_render(), -1)
Beispiel #8
0
class LoCoBotCamera(Camera):
    """docstring for SimpleCamera"""

    def __init__(self, configs, simulator):

        self.sim = simulator.sim
        self.rgb_cam = VisionSensor("kinect_rgb")
        self.depth_cam = VisionSensor("kinect_depth")
        self.rgb_cam.set_render_mode(RenderMode.OPENGL3)
        self.depth_cam.set_render_mode(RenderMode.OPENGL3)

        # Pan and tilt related variables.
        self.pan_joint = Joint("LoCoBot_head_pan_joint")
        self.tilt_joint = Joint("LoCoBot_head_tilt_joint")

    def get_rgb(self):

        return self.rgb_cam.capture_rgb()

    def get_depth(self):

        return self.depth_cam.capture_depth()

    def get_rgb_depth(self):

        return self.get_rgb(), self.get_depth()

    def get_intrinsics(self):

        # Todo: Remove this after we fix intrinsics
        raise NotImplementedError
        """
		Returns the instrinsic matrix of the camera

		:return: the intrinsic matrix (shape: :math:`[3, 3]`)
		:rtype: np.ndarray
		"""
        # fx = self.configs['Camera.fx']
        # fy = self.configs['Camera.fy']
        # cx = self.configs['Camera.cx']
        # cy = self.configs['Camera.cy']
        Itc = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
        return Itc

    def pix_to_3dpt(self, rs, cs, in_cam=False):
        """
		Get the 3D points of the pixels in RGB images.

		:param rs: rows of interest in the RGB image.
		           It can be a list or 1D numpy array
		           which contains the row indices.
		           The default value is None,
		           which means all rows.
		:param cs: columns of interest in the RGB image.
		           It can be a list or 1D numpy array
		           which contains the column indices.
		           The default value is None,
		           which means all columns.
		:param in_cam: return points in camera frame,
		               otherwise, return points in base frame

		:type rs: list or np.ndarray
		:type cs: list or np.ndarray
		:type in_cam: bool

		:returns: tuple (pts, colors)

		          pts: point coordinates in world frame
		          (shape: :math:`[N, 3]`)

		          colors: rgb values for pts_in_cam
		          (shape: :math:`[N, 3]`)

		:rtype: tuple(np.ndarray, np.ndarray)
		"""

        raise NotImplementedError

    def get_current_pcd(self, in_cam=True):
        """
		Return the point cloud at current time step (one frame only)

		:param in_cam: return points in camera frame,
		               otherwise, return points in base frame

		:type in_cam: bool
		:returns: tuple (pts, colors)

		          pts: point coordinates in world frame (shape: :math:`[N, 3]`)

		          colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
		:rtype: tuple(np.ndarray, np.ndarray)
		"""

        raise NotImplementedError

    @property
    def state(self):
        """
		Return the current pan and tilt joint angles of the robot camera.

		:return:
		        pan_tilt: A list the form [pan angle, tilt angle]
		:rtype: list
		"""
        return self.get_state()

    def get_state(self):
        """
		Return the current pan and tilt joint angles of the robot camera.

		:return:
		        pan_tilt: A list the form [pan angle, tilt angle]
		:rtype: list
		"""
        return [self.get_pan(), self.get_tilt()]

    def get_pan(self):
        """
		Return the current pan joint angle of the robot camera.

		:return:
		        pan: Pan joint angle
		:rtype: float
		"""
        return self.pan_joint.get_joint_position()

    def get_tilt(self):
        """
		Return the current tilt joint angle of the robot camera.

		:return:
		        tilt: Tilt joint angle
		:rtype: float
		"""
        return self.tilt_joint.get_joint_position()

    def set_pan(self, pan, wait=True):
        """
		Sets the pan joint angle to the specified value.

		:param pan: value to be set for pan joint
		:param wait: wait until the pan angle is set to
		             the target angle.

		:type pan: float
		:type wait: bool
		"""

        self.pan_joint.set_joint_position(pan)
        # [self.sim.step() for _ in range(50)]

    def set_tilt(self, tilt, wait=True):
        """
		Sets the tilt joint angle to the specified value.

		:param tilt: value to be set for the tilt joint
		:param wait: wait until the tilt angle is set to
		             the target angle.

		:type tilt: float
		:type wait: bool
		"""

        self.tilt_joint.set_joint_position(tilt)

    def set_pan_tilt(self, pan, tilt, wait=True):
        """
		Sets both the pan and tilt joint angles to the specified values.

		:param pan: value to be set for pan joint
		:param tilt: value to be set for the tilt joint
		:param wait: wait until the pan and tilt angles are set to
		             the target angles.

		:type pan: float
		:type tilt: float
		:type wait: bool
		"""

        self.set_pan(pan)
        self.set_tilt(tilt)

    def reset(self):
        """
		This function resets the pan and tilt joints by actuating
		them to their home configuration.
		"""
        self.set_pan_tilt(self.configs.CAMERA.RESET_PAN, self.configs.CAMERA.RESET_TILT)
Beispiel #9
0
class Scene(object):
    """Controls what is currently in the vrep scene. This is used for making
    sure that the tasks are easily reachable. This may be just replaced by
    environment. Responsible for moving all the objects. """
    def __init__(self,
                 pyrep: PyRep,
                 robot: Robot,
                 obs_config=ObservationConfig()):
        self._pyrep = pyrep
        self._robot = robot
        self._obs_config = obs_config
        self._active_task = None
        self._inital_task_state = None
        self._start_arm_joint_pos = robot.arm.get_joint_positions()
        self._starting_gripper_joint_pos = robot.gripper.get_joint_positions()
        self._workspace = Shape('workspace')
        self._workspace_boundary = SpawnBoundary([self._workspace])
        self._cam_over_shoulder_left = VisionSensor('cam_over_shoulder_left')
        self._cam_over_shoulder_right = VisionSensor('cam_over_shoulder_right')
        self._cam_wrist = VisionSensor('cam_wrist')
        self._cam_over_shoulder_left_mask = VisionSensor(
            'cam_over_shoulder_left_mask')
        self._cam_over_shoulder_right_mask = VisionSensor(
            'cam_over_shoulder_right_mask')
        self._cam_wrist_mask = VisionSensor('cam_wrist_mask')
        self._has_init_task = self._has_init_episode = False
        self._variation_index = 0

        self._initial_robot_state = (robot.arm.get_configuration_tree(),
                                     robot.gripper.get_configuration_tree())

        # Set camera properties from observation config
        self._set_camera_properties()

    def load(self, task: Task) -> None:
        """Loads the task and positions at the centre of the workspace.

        :param task: The task to load in the scene.
        """
        task.load()  # Load the task in to the scene

        # Set at the centre of the workspace
        task.get_base().set_position(self._workspace.get_position())

        self._inital_task_state = task.get_state()
        self._active_task = task
        self._initial_task_pose = task.boundary_root().get_orientation()
        self._has_init_task = self._has_init_episode = False
        self._variation_index = 0

    def unload(self) -> None:
        """Clears the scene. i.e. removes all tasks. """
        if self._active_task is not None:
            self._robot.gripper.release()
            if self._has_init_task:
                self._active_task.cleanup_()
            self._active_task.unload()
        self._active_task = None
        self._variation_index = 0

    def init_task(self) -> None:
        self._active_task.init_task()
        self._has_init_task = True
        self._variation_index = 0

    def init_episode(self,
                     index: int,
                     randomly_place: bool = True,
                     max_attempts: int = 5) -> List[str]:
        """Calls the task init_episode and puts randomly in the workspace.
        """

        self._variation_index = index

        if not self._has_init_task:
            self.init_task()

        # Try a few times to init and place in the workspace
        attempts = 0
        descriptions = None
        while attempts < max_attempts:
            descriptions = self._active_task.init_episode(index)
            try:
                if (randomly_place
                        and not self._active_task.is_static_workspace()):
                    self._place_task()
                self._active_task.validate()
                break
            except (BoundaryError, WaypointError) as e:
                self._active_task.cleanup_()
                self._active_task.restore_state(self._inital_task_state)
                attempts += 1
                if attempts >= max_attempts:
                    raise e

        # Let objects come to rest
        [self._pyrep.step() for _ in range(STEPS_BEFORE_EPISODE_START)]
        self._has_init_episode = True
        return descriptions

    def reset(self) -> None:
        """Resets the joint angles. """
        self._robot.gripper.release()

        arm, gripper = self._initial_robot_state
        self._pyrep.set_configuration_tree(arm)
        self._pyrep.set_configuration_tree(gripper)
        self._robot.arm.set_joint_positions(self._start_arm_joint_pos)
        self._robot.arm.set_joint_target_velocities(
            [0] * len(self._robot.arm.joints))
        self._robot.gripper.set_joint_positions(
            self._starting_gripper_joint_pos)
        self._robot.gripper.set_joint_target_velocities(
            [0] * len(self._robot.gripper.joints))

        if self._active_task is not None and self._has_init_task:
            self._active_task.cleanup_()
            self._active_task.restore_state(self._inital_task_state)
        [self._pyrep.step_ui() for _ in range(20)]
        self._active_task.set_initial_objects_in_scene()

    def get_observation(self) -> Observation:
        tip = self._robot.arm.get_tip()

        joint_forces = None
        if self._obs_config.joint_forces:
            fs = self._robot.arm.get_joint_forces()
            vels = self._robot.arm.get_joint_target_velocities()
            joint_forces = self._obs_config.joint_forces_noise.apply(
                np.array([-f if v < 0 else f for f, v in zip(fs, vels)]))

        ee_forces_flat = None
        if self._obs_config.gripper_touch_forces:
            ee_forces = self._robot.gripper.get_touch_sensor_forces()
            ee_forces_flat = []
            for eef in ee_forces:
                ee_forces_flat.extend(eef)
            ee_forces_flat = np.array(ee_forces_flat)

        lsc_ob = self._obs_config.left_shoulder_camera
        rsc_ob = self._obs_config.right_shoulder_camera
        wc_ob = self._obs_config.wrist_camera

        obs = Observation(
            left_shoulder_rgb=(lsc_ob.rgb_noise.apply(
                self._cam_over_shoulder_left.capture_rgb())
                               if lsc_ob.rgb else None),
            left_shoulder_depth=(lsc_ob.depth_noise.apply(
                self._cam_over_shoulder_left.capture_depth())
                                 if lsc_ob.depth else None),
            right_shoulder_rgb=(rsc_ob.rgb_noise.apply(
                self._cam_over_shoulder_right.capture_rgb())
                                if rsc_ob.rgb else None),
            right_shoulder_depth=(rsc_ob.depth_noise.apply(
                self._cam_over_shoulder_right.capture_depth())
                                  if rsc_ob.depth else None),
            wrist_rgb=(wc_ob.rgb_noise.apply(self._cam_wrist.capture_rgb())
                       if wc_ob.rgb else None),
            wrist_depth=(wc_ob.depth_noise.apply(
                self._cam_wrist.capture_depth()) if wc_ob.depth else None),
            left_shoulder_mask=(
                self._cam_over_shoulder_left_mask.capture_rgb()
                if lsc_ob.mask else None),
            right_shoulder_mask=(
                self._cam_over_shoulder_right_mask.capture_rgb()
                if rsc_ob.mask else None),
            wrist_mask=(self._cam_wrist_mask.capture_rgb()
                        if wc_ob.mask else None),
            joint_velocities=(self._obs_config.joint_velocities_noise.apply(
                np.array(self._robot.arm.get_joint_velocities()))
                              if self._obs_config.joint_velocities else None),
            joint_positions=(self._obs_config.joint_positions_noise.apply(
                np.array(self._robot.arm.get_joint_positions()))
                             if self._obs_config.joint_positions else None),
            joint_forces=(joint_forces
                          if self._obs_config.joint_forces else None),
            gripper_open_amount=((
                1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0)
                                 if self._obs_config.gripper_open_amount else
                                 None),
            gripper_pose=(np.array(tip.get_pose())
                          if self._obs_config.gripper_pose else None),
            gripper_touch_forces=(ee_forces_flat
                                  if self._obs_config.gripper_touch_forces else
                                  None),
            gripper_joint_positions=(
                np.array(self._robot.gripper.get_joint_positions())
                if self._obs_config.gripper_joint_positions else None),
            task_low_dim_state=(self._active_task.get_low_dim_state() if
                                self._obs_config.task_low_dim_state else None))
        obs = self._active_task.decorate_observation(obs)
        return obs

    def step(self):
        self._pyrep.step()
        self._active_task.step()

    def get_demo(self,
                 record: bool = True,
                 func=None,
                 randomly_place: bool = True) -> List[Observation]:
        """Returns a demo (list of observations)"""

        if not self._has_init_task:
            self.init_task()
        if not self._has_init_episode:
            self.init_episode(self._variation_index,
                              randomly_place=randomly_place)
        self._has_init_episode = False

        waypoints = self._active_task.get_waypoints()
        if len(waypoints) == 0:
            raise NoWaypointsError('No waypoints were found.',
                                   self._active_task)

        demo = []
        if record:
            self._pyrep.step()  # Need this here or get_force doesn't work...
            demo.append(self.get_observation())
        while True:
            success = False
            for i, point in enumerate(waypoints):

                point.start_of_path()
                try:
                    path = point.get_path()
                except ConfigurationPathError as e:
                    raise DemoError(
                        'Could not get a path for waypoint %d.' % i,
                        self._active_task) from e
                ext = point.get_ext()
                path.visualize()

                done = False
                success = False
                while not done:
                    done = path.step()
                    self.step()
                    self._demo_record_step(demo, record, func)
                    success, term = self._active_task.success()
                    if success:
                        break

                point.end_of_path()

                path.clear_visualization()

                if success:
                    # We can quit early because we have finished the task
                    break

                # TODO: need to decide how I do the gripper actions
                if len(ext) > 0:
                    contains_param = False
                    start_of_bracket = -1
                    gripper = self._robot.gripper
                    if 'open_gripper(' in ext:
                        gripper.release()
                        start_of_bracket = ext.index('open_gripper(') + 13
                        contains_param = ext[start_of_bracket] != ')'
                        if not contains_param:
                            done = False
                            while not done:
                                done = gripper.actuate(1.0, 0.04)
                                self._pyrep.step()
                                self._active_task.step()
                                if self._obs_config.record_gripper_closing:
                                    self._demo_record_step(demo, record, func)
                    elif 'close_gripper(' in ext:
                        start_of_bracket = ext.index('close_gripper(') + 14
                        contains_param = ext[start_of_bracket] != ')'
                        if not contains_param:
                            done = False
                            while not done:
                                done = gripper.actuate(0.0, 0.04)
                                self._pyrep.step()
                                self._active_task.step()
                                if self._obs_config.record_gripper_closing:
                                    self._demo_record_step(demo, record, func)

                    if contains_param:
                        rest = ext[start_of_bracket:]
                        num = float(rest[:rest.index(')')])
                        done = False
                        while not done:
                            done = gripper.actuate(num, 0.04)
                            self._pyrep.step()
                            self._active_task.step()
                            if self._obs_config.record_gripper_closing:
                                self._demo_record_step(demo, record, func)

                    if 'close_gripper(' in ext:
                        for g_obj in self._active_task.get_graspable_objects():
                            gripper.grasp(g_obj)

                    self._demo_record_step(demo, record, func)

            if not self._active_task.should_repeat_waypoints() or success:
                break

        # Some tasks may need additional physics steps
        # (e.g. ball rowling to goal)
        if not success:
            for _ in range(10):
                self._pyrep.step()
                self._active_task.step()
                self._demo_record_step(demo, record, func)
                success, term = self._active_task.success()
                if success:
                    break

        success, term = self._active_task.success()
        if not success:
            raise DemoError('Demo was completed, but was not successful.',
                            self._active_task)

        return demo

    def get_observation_config(self) -> ObservationConfig:
        return self._obs_config

    def _demo_record_step(self, demo_list, record, func):
        if record:
            demo_list.append(self.get_observation())
        if func is not None:
            func()

    def _set_camera_properties(self) -> None:
        def _set_rgb_props(rgb_cam: VisionSensor, rgb: bool, depth: bool,
                           conf: CameraConfig):
            if not (rgb or depth):
                rgb_cam.remove()
            else:
                rgb_cam.set_resolution(conf.image_size)
                rgb_cam.set_render_mode(conf.render_mode)

        def _set_mask_props(mask_cam: VisionSensor, mask: bool,
                            conf: CameraConfig):
            if not mask:
                mask_cam.remove()
            else:
                mask_cam.set_resolution(conf.image_size)

        _set_rgb_props(self._cam_over_shoulder_left,
                       self._obs_config.left_shoulder_camera.rgb,
                       self._obs_config.left_shoulder_camera.depth,
                       self._obs_config.left_shoulder_camera)
        _set_rgb_props(self._cam_over_shoulder_right,
                       self._obs_config.right_shoulder_camera.rgb,
                       self._obs_config.right_shoulder_camera.depth,
                       self._obs_config.right_shoulder_camera)
        _set_rgb_props(self._cam_wrist, self._obs_config.wrist_camera.rgb,
                       self._obs_config.wrist_camera.depth,
                       self._obs_config.wrist_camera)
        _set_mask_props(self._cam_over_shoulder_left_mask,
                        self._obs_config.left_shoulder_camera.mask,
                        self._obs_config.left_shoulder_camera)
        _set_mask_props(self._cam_over_shoulder_right_mask,
                        self._obs_config.right_shoulder_camera.mask,
                        self._obs_config.right_shoulder_camera)
        _set_mask_props(self._cam_wrist_mask,
                        self._obs_config.wrist_camera.mask,
                        self._obs_config.wrist_camera)

    def _place_task(self) -> None:
        self._workspace_boundary.clear()
        # Find a place in the robot workspace for task
        self._active_task.boundary_root().set_orientation(
            self._initial_task_pose)
        min_rot, max_rot = self._active_task.base_rotation_bounds()
        self._workspace_boundary.sample(self._active_task.boundary_root(),
                                        min_rotation=min_rot,
                                        max_rotation=max_rot)
Beispiel #10
0
def simulate(scene_dir, cls_indices):
    # read 3d point cloud vertices npy (for visualization)
    vertex_npy = np.load("mesh_data/custom_vertex.npy")

    # loop over all scene files in scenes directory
    for scene_path in os.listdir(scene_dir):
        # check whether it's a scene file or not
        if not scene_path[-3:] == 'ttt':
            continue

        # create an output directory for each scene
        scene_out_dir = os.path.join('./sim_data/', scene_path[:-4])
        if not os.path.isdir(scene_out_dir):
            os.mkdir(scene_out_dir)

        # launch scene file
        SCENE_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.join(scene_dir, scene_path))
        pr = PyRep()
        pr.launch(SCENE_FILE, headless=True)
        pr.start()

        pr.step()

        # define vision sensor
        camera = VisionSensor('cam')
        # set camera absolute pose to world coordinates
        camera.set_pose([0,0,0,0,0,0,1])
        pr.step()

        # define scene shapes
        shapes = []
        shapes.append(Shape('Shape1'))
        shapes.append(Shape('Shape2'))
        shapes.append(Shape('Shape3'))
        shapes.append(Shape('Shape4'))
        pr.step()

        print("Getting vision sensor intrinsics ...")
        # get vision sensor parameters
        cam_res = camera.get_resolution()
        cam_per_angle = camera.get_perspective_angle()
        ratio = cam_res[0]/cam_res[1]
        cam_angle_x = 0.0
        cam_angle_y = 0.0
        if (ratio > 1):
            cam_angle_x = cam_per_angle
            cam_angle_y = 2 * degrees(atan(tan(radians(cam_per_angle / 2)) / ratio))
        else:
            cam_angle_x = 2 * degrees(atan(tan(radians(cam_per_angle / 2)) / ratio))
            cam_angle_y = cam_per_angle
        # get vision sensor intrinsic matrix
        cam_focal_x = float(cam_res[0] / 2.0) / tan(radians(cam_angle_x / 2.0))
        cam_focal_y = float(cam_res[1] / 2.0) / tan(radians(cam_angle_y / 2.0))
        intrinsics = np.array([[cam_focal_x, 0.00000000e+00, float(cam_res[0]/2.0)],
                                [0.00000000e+00, cam_focal_y, float(cam_res[1]/2.0)],
                                [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

        # loop to get 5000 samples per scene
        for i in range(5000):
            print("Randomizing objects' poses ...")
            # set random pose to objects in the scene
            obj_colors = []
            for shape in shapes:
                shape.set_pose([
                        random.uniform(-0.25,0.25), random.uniform(-0.25,0.25), random.uniform(0.25,2),
                        random.uniform(-1,1), random.uniform(-1,1), random.uniform(-1,1),
                        random.uniform(-1,1)
                    ])
                obj_colors.append([random.uniform(0,1), random.uniform(0,1), random.uniform(0,1)])
                pr.step()

            print("Reading vision sensor RGB signal ...")
            # read vision sensor RGB image
            img = camera.capture_rgb()
            img = np.uint8(img * 255.0)

            print("Reading vision sensor depth signal ...")
            # read vision sensor depth image
            depth = camera.capture_depth()
            depth = np.uint8(depth * 255.0)

            print("Generating front mask for RGB signal ...")
            # generate RGB front mask
            front_mask = np.sum(img, axis=2)
            front_mask[front_mask != 0] = 255
            front_mask = Image.fromarray(np.uint8(front_mask))
            alpha_img = Image.fromarray(img)
            alpha_img.putalpha(front_mask)

            print("Saving sensor output ...")
            # save sensor output
            alpha_img.save(scene_out_dir+f'/{str(i).zfill(6)}-color.png')
            imsave(scene_out_dir+f'/{str(i).zfill(6)}-depth.png', depth)
            
            print("Getting objects' poses ...")
            # get poses for all objects in scene
            poses = []
            for shape in shapes:
                poses.append(get_object_pose(shape, camera))
            pose_mat = np.stack(poses, axis=2)
            # save pose visualization on RGB image
            img_viz = visualize_predictions(poses, cls_indices[scene_path], img, vertex_npy, intrinsics)
            imsave(scene_out_dir+f'/{str(i).zfill(6)}-viz.png', img_viz)

            print("Saving meta-data ...")
            # save meta-data .mat
            meta_dict = {
                'cls_indexes'      : np.array(cls_indices[scene_path]),
                'intrinsic_matrix' : intrinsics,
                'poses'            : pose_mat
            }
            savemat(scene_out_dir+f'/{str(i).zfill(6)}-meta.mat', meta_dict)

            print("Performing semantic segmentation of RGB signal ...")
            # perform semantic segmentation of RGB image
            seg_img = camera.capture_rgb()
            seg_img = perform_segmentation(seg_img, cls_indices[scene_path], poses, vertex_npy, intrinsics)
            imsave(scene_out_dir+f'/{str(i).zfill(6)}-label.png', seg_img)

        # stop simulation
        pr.stop()
        pr.shutdown()
class Camera():
    def __init__(self,
                 name,
                 resolution,
                 perspective_angle,
                 min_depth,
                 max_depth,
                 near_clipping=0.2,
                 far_clipping=3):
        self.base = Dummy(name)
        self.cam_rgb = VisionSensor(name + "_rgb")
        self.cam_depth = VisionSensor(name + "_depth")
        self.cam_mask = VisionSensor(name + "_mask")
        self.cam_all = [self.cam_rgb, self.cam_depth, self.cam_mask]
        self.min_depth = min_depth
        self.max_depth = max_depth
        self.set_resolution(resolution)
        self.set_perspective_angle(perspective_angle)
        self.set_near_clipping_plane(near_clipping)
        self.set_far_clipping_plane(far_clipping)

    def get_resolution(self):
        return self._resolution

    def set_resolution(self, resolution):
        for cam in self.cam_all:
            cam.set_resolution(resolution)
        self._resolution = resolution

    def get_perspective_angle(self):
        return self._perspective_angle

    def set_perspective_angle(self, angle: float):
        for cam in self.cam_all:
            cam.set_perspective_angle(angle)
        self._perspective_angle = angle

    def set_near_clipping_plane(self, near_clipping):
        self.cam_rgb.set_near_clipping_plane(near_clipping)
        self.cam_mask.set_near_clipping_plane(near_clipping)
        self.cam_depth.set_near_clipping_plane(self.min_depth)

    def set_far_clipping_plane(self, far_clipping):
        self.cam_rgb.set_far_clipping_plane(far_clipping)
        self.cam_mask.set_far_clipping_plane(far_clipping)
        self.cam_depth.set_far_clipping_plane(self.max_depth)

    def capture_rgb(self):
        return np.uint8(255 * self.cam_rgb.capture_rgb())[:, :, ::-1]

    def capture_depth(self):
        return np.uint8(255 * self.cam_depth.capture_depth())

    def capture_mask(self):
        mask = np.uint8(255 * self.cam_mask.capture_rgb())
        mask = mask[:, :, 1]
        mask[mask != 0] = 1
        return mask

    def set_pose(self, pose, relative_to=None):
        pos = pose[:3]
        qx, qy, qz, qw = pose[3:]
        quat_ori = Quaternion([qw, qx, qy, qz])
        quat_rotate_z = Quaternion(axis=[0, 0, 1], angle=np.radians(180))
        quat = quat_ori * quat_rotate_z
        qw, qx, qy, qz = quat.elements
        quat = [qx, qy, qz, qw]
        # quat = pose[3:]
        self.base.set_pose([*pos] + [*quat], relative_to=relative_to)

    def get_image(self):
        rgb = self.capture_rgb()
        depth = self.capture_depth()
        mask = self.capture_mask()
        return rgb, depth, mask