def setParams(self, params): SCENE_FILE = '../../etc/basic_scene.ttt' self.pr = PyRep() self.pr.launch(SCENE_FILE, headless=False) self.pr.start() self.robot = TurtleBot() self.drone = Shape('Quadricopter') self.cameras = {} self.front_camera_name = "frontCamera" cam = VisionSensor(self.front_camera_name) self.cameras["frontCamera"] = {"handle": cam, "id": 0, "angle": np.radians(cam.get_perspective_angle()), "width": cam.get_resolution()[0], "height": cam.get_resolution()[1], "focal": (cam.get_resolution()[0] / 2) / np.tan(np.radians(cam.get_perspective_angle() / 2)), "rgb": np.array(0), "depth": np.ndarray(0)} self.joystick_newdata = [] self.speed_robot = [] self.speed_robot_ant = [] self.last_received_data_time = 0 self.once = False
def setParams(self, params): SCENE_FILE = '../etc/gen3-robolab.ttt' self.pr = PyRep() self.pr.launch(SCENE_FILE, headless=False) self.pr.start() self.mode = 0 self.bloqueo = True #self.robot = Viriato() #self.robot = YouBot() self.robot_object = Shape("gen3") self.cameras = {} self.camera_arm_name = "camera_arm" cam = VisionSensor(self.camera_arm_name) self.cameras[self.camera_arm_name] = { "handle": cam, "id": 0, "angle": np.radians(cam.get_perspective_angle()), "width": cam.get_resolution()[0], "height": cam.get_resolution()[1], "focal": (cam.get_resolution()[0] / 2) / np.tan(np.radians(cam.get_perspective_angle() / 2)), "rgb": np.array(0), "depth": np.ndarray(0) } self.joystick_newdata = [] self.last_received_data_time = 0
class SphericalVisionSensor(Object): """An object able capture 360 degree omni-directional images. """ def __init__(self, name): super().__init__(name) # final omni-directional sensors self._sensor_depth = VisionSensor('%s_sensorDepth' % (self.get_name())) self._sensor_rgb = VisionSensor('%s_sensorRGB' % (self.get_name())) # directed sub-sensors names = ['front', 'top', 'back', 'bottom', 'left', 'right'] self._six_sensors = [VisionSensor('%s_%s' % (self.get_name(), n)) for n in names] self._front = self._six_sensors[0] # directed sub-sensor handles list self._six_sensor_handles = [vs.get_handle() for vs in self._six_sensors] # set all to explicit handling self._set_all_to_explicit_handling() # assert sensor properties are matching self._assert_matching_resolutions() self._assert_matching_render_modes() self._assert_matching_entities_to_render() self._assert_matching_window_sizes() self._assert_matching_near_clipping_planes() self._assert_matching_far_clipping_planes() # omni resolution self._omni_resolution = self._sensor_rgb.get_resolution() # near and far clipping plane self._near_clipping_plane = self.get_near_clipping_plane() self._far_clipping_plane = self.get_far_clipping_plane() self._clipping_plane_diff = self._far_clipping_plane - self._near_clipping_plane # projective cam region image self._planar_to_radial_depth_scalars = self._get_planar_to_radial_depth_scalars() # flip config self._flip_x = True # Private # # --------# def _set_all_to_explicit_handling(self): self._sensor_depth.set_explicit_handling(1) self._sensor_rgb.set_explicit_handling(1) for sensor in self._six_sensors: sensor.set_explicit_handling(1) def _assert_matching_resolutions(self): assert self._sensor_depth.get_resolution() == self._sensor_rgb.get_resolution() front_sensor_res = self._front.get_resolution() for sensor in self._six_sensors[1:]: assert sensor.get_resolution() == front_sensor_res def _assert_matching_render_modes(self): assert self._sensor_depth.get_render_mode() == self._sensor_rgb.get_render_mode() front_sensor_render_mode = self._front.get_render_mode() for sensor in self._six_sensors[1:]: assert sensor.get_render_mode() == front_sensor_render_mode def _assert_matching_entities_to_render(self): assert self._sensor_depth.get_entity_to_render() == self._sensor_rgb.get_entity_to_render() front_sensor_entity_to_render = self._front.get_entity_to_render() for sensor in self._six_sensors[1:]: assert sensor.get_entity_to_render() == front_sensor_entity_to_render def _assert_matching_window_sizes(self): assert self._sensor_depth.get_windowed_size() == self._sensor_rgb.get_windowed_size() def _assert_matching_near_clipping_planes(self): front_sensor_near = self._front.get_near_clipping_plane() for sensor in self._six_sensors[1:]: assert sensor.get_near_clipping_plane() == front_sensor_near def _assert_matching_far_clipping_planes(self): front_sensor_far = self._front.get_far_clipping_plane() for sensor in self._six_sensors[1:]: assert sensor.get_far_clipping_plane() == front_sensor_far def _get_requested_type(self) -> ObjectType: return ObjectType(sim.simGetObjectType(self.get_handle())) @staticmethod def _create_uniform_pixel_coords_image(image_dims): pixel_x_coords = np.reshape(np.tile(np.arange(image_dims[1]), [image_dims[0]]), (image_dims[0], image_dims[1], 1)).astype(np.float32) pixel_y_coords_ = np.reshape(np.tile(np.arange(image_dims[0]), [image_dims[1]]), (image_dims[1], image_dims[0], 1)).astype(np.float32) pixel_y_coords = np.transpose(pixel_y_coords_, (1, 0, 2)) return np.concatenate((pixel_x_coords, pixel_y_coords), -1) def _get_planar_to_radial_depth_scalars(self): # reference: https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates # polar coord image coord_img = self._create_uniform_pixel_coords_image([self._omni_resolution[1], self._omni_resolution[0]]) pixels_per_rad = self._omni_resolution[1] / np.pi polar_angles = coord_img / pixels_per_rad phi = polar_angles[..., 0:1] theta = polar_angles[..., 1:2] # image borders wrt the 6 projective cameras phis_rel = ((phi + np.pi/4) % (np.pi/2)) / (np.pi/2) lower_borders = np.arccos(1/((phis_rel*2-1)**2 + 2)**0.5) upper_borders = np.pi - lower_borders # omni image regions from the 6 projective cameras xy_region = np.logical_and(theta <= upper_borders, theta >= lower_borders) pos_x = np.logical_and(xy_region, np.logical_or(phi <= 45 * np.pi/180, phi >= (45 + 270) * np.pi/180)) pos_y = np.logical_and(xy_region, np.logical_and(phi >= 45 * np.pi/180, phi <= (45 + 90) * np.pi/180)) neg_x = np.logical_and(xy_region, np.logical_and(phi >= (45 + 90) * np.pi/180, phi <= (45 + 180) * np.pi/180)) neg_y = np.logical_and(xy_region, np.logical_and(phi >= (45 + 180) * np.pi/180, phi <= (45 + 270) * np.pi/180)) pos_z = theta <= lower_borders neg_z = theta >= upper_borders # trig terms for conversion sin_theta = np.sin(theta) cos_theta = np.cos(theta) sin_phi = np.sin(phi) cos_phi = np.cos(phi) # reciprocal terms recip_sin_theta_cos_phi = 1/(sin_theta * cos_phi + MIN_DIVISOR) recip_sin_theta_sin_phi = 1/(sin_theta * sin_phi + MIN_DIVISOR) recip_cos_theta = 1/(cos_theta + MIN_DIVISOR) # planar to radial depth scalars return np.where(pos_x, recip_sin_theta_cos_phi, np.where(neg_x, -recip_sin_theta_cos_phi, np.where(pos_y, recip_sin_theta_sin_phi, np.where(neg_y, -recip_sin_theta_sin_phi, np.where(pos_z, recip_cos_theta, np.where(neg_z, -recip_cos_theta, np.zeros_like(recip_cos_theta)))))))[..., 0] # Public # # -------# def handle_explicitly(self) -> None: """Handle spherical vision sensor explicitly. This enables capturing image (e.g., capture_rgb()) without PyRep.step(). """ utils.script_call('handleSpherical@PyRep', PYREP_SCRIPT_TYPE, [self._sensor_depth._handle, self._sensor_rgb._handle] + self._six_sensor_handles, [], [], []) def capture_rgb(self) -> np.ndarray: """Retrieves the rgb-image of a spherical vision sensor. :return: A numpy array of size (width, height, 3) """ rgb = self._sensor_rgb.capture_rgb() if self._flip_x: return np.flip(rgb, 1) return rgb def capture_depth(self, in_meters=False) -> np.ndarray: """Retrieves the depth-image of a spherical vision sensor. :param in_meters: Whether the depth should be returned in meters. :return: A numpy array of size (width, height) """ planar_depth = self._sensor_depth.capture_rgb()[:, :, 0]*self._clipping_plane_diff + self._near_clipping_plane radial_depth = planar_depth * self._planar_to_radial_depth_scalars if in_meters: if self._flip_x: return np.flip(radial_depth, 1) return radial_depth scaled_depth = (radial_depth - self._near_clipping_plane)/self._clipping_plane_diff if self._flip_x: return np.flip(scaled_depth, 1) return scaled_depth def get_resolution(self) -> List[int]: """ Return the spherical vision sensor's resolution. :return: Resolution [x, y] """ return self._sensor_depth.get_resolution() def set_resolution(self, resolution: List[int]) -> None: """ Set the spherical vision sensor's resolution. :param resolution: New resolution [x, y] """ if resolution[0] != 2*resolution[1]: raise Exception('Spherical vision sensors must have an X resolution 2x the Y resolution') if resolution[1] % 2 != 0: raise Exception('Spherical vision sensors must have an even Y resolution') box_sensor_resolution = [int(resolution[1]/2), int(resolution[1]/2)] for sensor in self._six_sensors: sensor.set_resolution(box_sensor_resolution) self._sensor_depth.set_resolution(resolution) self._sensor_rgb.set_resolution(resolution) self._omni_resolution = resolution self._planar_to_radial_depth_scalars = self._get_planar_to_radial_depth_scalars() def get_render_mode(self) -> RenderMode: """ Retrieves the spherical vision sensor's rendering mode :return: RenderMode for the current rendering mode. """ return self._sensor_depth.get_render_mode() def set_render_mode(self, render_mode: RenderMode) -> None: """ Set the spherical vision sensor's rendering mode :param render_mode: The new sensor rendering mode, one of: RenderMode.OPENGL RenderMode.OPENGL_AUXILIARY RenderMode.OPENGL_COLOR_CODED RenderMode.POV_RAY RenderMode.EXTERNAL RenderMode.EXTERNAL_WINDOWED RenderMode.OPENGL3 RenderMode.OPENGL3_WINDOWED """ self._sensor_depth.set_render_mode(render_mode) self._sensor_rgb.set_render_mode(render_mode) for sensor in self._six_sensors: sensor.set_render_mode(render_mode) def get_windowed_size(self) -> Sequence[int]: """Get the size of windowed rendering for the omni-directional image. :return: The (x, y) resolution of the window. 0 for full-screen. """ return self._sensor_depth.get_windowed_size() def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None: """Set the size of windowed rendering for the omni-directional image. :param resolution: The (x, y) resolution of the window. 0 for full-screen. """ self._sensor_depth.set_windowed_size(resolution) self._sensor_rgb.set_windowed_size(resolution) def get_near_clipping_plane(self) -> float: """ Get the spherical depth sensor's near clipping plane. :return: Near clipping plane (metres) """ return self._front.get_near_clipping_plane() def set_near_clipping_plane(self, near_clipping: float) -> None: """ Set the spherical depth sensor's near clipping plane. :param near_clipping: New near clipping plane (in metres) """ self._near_clipping_plane = near_clipping self._clipping_plane_diff = self._far_clipping_plane - near_clipping for sensor in self._six_sensors: sensor.set_near_clipping_plane(near_clipping) def get_far_clipping_plane(self) -> float: """ Get the Sensor's far clipping plane. :return: Near clipping plane (metres) """ return self._front.get_far_clipping_plane() def set_far_clipping_plane(self, far_clipping: float) -> None: """ Set the spherical depth sensor's far clipping plane. :param far_clipping: New far clipping plane (in metres) """ self._far_clipping_plane = far_clipping self._clipping_plane_diff = far_clipping - self._near_clipping_plane for sensor in self._six_sensors: sensor.set_far_clipping_plane(far_clipping) def set_entity_to_render(self, entity_to_render: int) -> None: """ Set the entity to render to the spherical vision sensor, this can be an object or more usefully a collection. -1 to render all objects in scene. :param entity_to_render: Handle of the entity to render """ self._sensor_depth.set_entity_to_render(entity_to_render) self._sensor_rgb.set_entity_to_render(entity_to_render) def get_entity_to_render(self) -> None: """ Get the entity to render to the spherical vision sensor, this can be an object or more usefully a collection. -1 if all objects in scene are rendered. :return: Handle of the entity to render """ return self._sensor_depth.get_entity_to_render()
class TestVisionSensors(TestCore): def setUp(self): super().setUp() [self.pyrep.step() for _ in range(10)] self.cam = VisionSensor('cam0') def test_handle_explicitly(self): cam = VisionSensor.create((640, 480)) # blank image rgb = cam.capture_rgb() self.assertEqual(rgb.sum(), 0) # non-blank image cam.set_explicit_handling(value=1) cam.handle_explicitly() rgb = cam.capture_rgb() self.assertNotEqual(rgb.sum(), 0) cam.remove() def test_capture_rgb(self): img = self.cam.capture_rgb() self.assertEqual(img.shape, (16, 16, 3)) # Check that it's not a blank image self.assertFalse(img.min() == img.max() == 0.0) def test_capture_depth(self): img = self.cam.capture_depth() self.assertEqual(img.shape, (16, 16)) # Check that it's not a blank depth map self.assertFalse(img.min() == img.max() == 1.0) def test_create(self): cam = VisionSensor.create([640, 480], perspective_mode=True, view_angle=35.0, near_clipping_plane=0.25, far_clipping_plane=5.0, render_mode=RenderMode.OPENGL3) self.assertEqual(cam.capture_rgb().shape, (480, 640, 3)) self.assertEqual(cam.get_perspective_mode(), PerspectiveMode.PERSPECTIVE) self.assertAlmostEqual(cam.get_perspective_angle(), 35.0, 3) self.assertEqual(cam.get_near_clipping_plane(), 0.25) self.assertEqual(cam.get_far_clipping_plane(), 5.0) self.assertEqual(cam.get_render_mode(), RenderMode.OPENGL3) def test_get_set_resolution(self): self.cam.set_resolution([320, 240]) self.assertEqual(self.cam.get_resolution(), [320, 240]) self.assertEqual(self.cam.capture_rgb().shape, (240, 320, 3)) def test_get_set_perspective_mode(self): for perspective_mode in PerspectiveMode: self.cam.set_perspective_mode(perspective_mode) self.assertEqual(self.cam.get_perspective_mode(), perspective_mode) def test_get_set_render_mode(self): for render_mode in [RenderMode.OPENGL, RenderMode.OPENGL3]: self.cam.set_render_mode(render_mode) self.assertEqual(self.cam.get_render_mode(), render_mode) def test_get_set_perspective_angle(self): self.cam.set_perspective_angle(45.0) self.assertAlmostEqual(self.cam.get_perspective_angle(), 45.0, 3) def test_get_set_orthographic_size(self): self.cam.set_orthographic_size(1.0) self.assertEqual(self.cam.get_orthographic_size(), 1.0) def test_get_set_near_clipping_plane(self): self.cam.set_near_clipping_plane(0.5) self.assertEqual(self.cam.get_near_clipping_plane(), 0.5) def test_get_set_far_clipping_plane(self): self.cam.set_far_clipping_plane(0.5) self.assertEqual(self.cam.get_far_clipping_plane(), 0.5) def test_get_set_windowed_size(self): self.cam.set_windowed_size((640, 480)) self.assertEqual(self.cam.get_windowed_size(), (640, 480)) def test_get_set_entity_to_render(self): self.cam.set_entity_to_render(-1) self.assertEqual(self.cam.get_entity_to_render(), -1)
def setParams(self, params): SCENE_FILE = params["scene_dir"] self.pr = PyRep() self.pr.launch(SCENE_FILE, headless=False) self.pr.start() self.cameras = {} cam = VisionSensor("Camera_Shoulder") self.cameras["Camera_Shoulder"] = { "handle": cam, "id": 1, "angle": np.radians(cam.get_perspective_angle()), "width": cam.get_resolution()[0], "height": cam.get_resolution()[1], "depth": 3, "focal": cam.get_resolution()[0] / np.tan(np.radians(cam.get_perspective_angle())), "position": cam.get_position(), "rotation": cam.get_quaternion(), "image_rgb": np.array(0), "image_rgbd": np.array(0), "depth": np.ndarray(0) } self.grasping_objects = {} can = Shape("can") self.grasping_objects["002_master_chef_can"] = { "handle": can, "sim_pose": None, "pred_pose_rgb": None, "pred_pose_rgbd": None } with (open("objects_pcl.pickle", "rb")) as file: self.object_pcl = pickle.load(file) self.intrinsics = np.array( [[ self.cameras["Camera_Shoulder"]["focal"], 0.00000000e+00, self.cameras["Camera_Shoulder"]["width"] / 2.0 ], [ 0.00000000e+00, self.cameras["Camera_Shoulder"]["focal"], self.cameras["Camera_Shoulder"]["height"] / 2.0 ], [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) self.arm_ops = { "MoveToHome": 1, "MoveToObj": 2, "CloseGripper": 3, "OpenGripper": 4 } self.grasping_iter = 10 self.arm_base = Shape("gen3") self.arm_target = Dummy("target") self.gripper = Joint("RG2_openCloseJoint")
def simulate(scene_dir, cls_indices): # read 3d point cloud vertices npy (for visualization) vertex_npy = np.load("mesh_data/custom_vertex.npy") # loop over all scene files in scenes directory for scene_path in os.listdir(scene_dir): # check whether it's a scene file or not if not scene_path[-3:] == 'ttt': continue # create an output directory for each scene scene_out_dir = os.path.join('./sim_data/', scene_path[:-4]) if not os.path.isdir(scene_out_dir): os.mkdir(scene_out_dir) # launch scene file SCENE_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.join(scene_dir, scene_path)) pr = PyRep() pr.launch(SCENE_FILE, headless=True) pr.start() pr.step() # define vision sensor camera = VisionSensor('cam') # set camera absolute pose to world coordinates camera.set_pose([0,0,0,0,0,0,1]) pr.step() # define scene shapes shapes = [] shapes.append(Shape('Shape1')) shapes.append(Shape('Shape2')) shapes.append(Shape('Shape3')) shapes.append(Shape('Shape4')) pr.step() print("Getting vision sensor intrinsics ...") # get vision sensor parameters cam_res = camera.get_resolution() cam_per_angle = camera.get_perspective_angle() ratio = cam_res[0]/cam_res[1] cam_angle_x = 0.0 cam_angle_y = 0.0 if (ratio > 1): cam_angle_x = cam_per_angle cam_angle_y = 2 * degrees(atan(tan(radians(cam_per_angle / 2)) / ratio)) else: cam_angle_x = 2 * degrees(atan(tan(radians(cam_per_angle / 2)) / ratio)) cam_angle_y = cam_per_angle # get vision sensor intrinsic matrix cam_focal_x = float(cam_res[0] / 2.0) / tan(radians(cam_angle_x / 2.0)) cam_focal_y = float(cam_res[1] / 2.0) / tan(radians(cam_angle_y / 2.0)) intrinsics = np.array([[cam_focal_x, 0.00000000e+00, float(cam_res[0]/2.0)], [0.00000000e+00, cam_focal_y, float(cam_res[1]/2.0)], [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) # loop to get 5000 samples per scene for i in range(5000): print("Randomizing objects' poses ...") # set random pose to objects in the scene obj_colors = [] for shape in shapes: shape.set_pose([ random.uniform(-0.25,0.25), random.uniform(-0.25,0.25), random.uniform(0.25,2), random.uniform(-1,1), random.uniform(-1,1), random.uniform(-1,1), random.uniform(-1,1) ]) obj_colors.append([random.uniform(0,1), random.uniform(0,1), random.uniform(0,1)]) pr.step() print("Reading vision sensor RGB signal ...") # read vision sensor RGB image img = camera.capture_rgb() img = np.uint8(img * 255.0) print("Reading vision sensor depth signal ...") # read vision sensor depth image depth = camera.capture_depth() depth = np.uint8(depth * 255.0) print("Generating front mask for RGB signal ...") # generate RGB front mask front_mask = np.sum(img, axis=2) front_mask[front_mask != 0] = 255 front_mask = Image.fromarray(np.uint8(front_mask)) alpha_img = Image.fromarray(img) alpha_img.putalpha(front_mask) print("Saving sensor output ...") # save sensor output alpha_img.save(scene_out_dir+f'/{str(i).zfill(6)}-color.png') imsave(scene_out_dir+f'/{str(i).zfill(6)}-depth.png', depth) print("Getting objects' poses ...") # get poses for all objects in scene poses = [] for shape in shapes: poses.append(get_object_pose(shape, camera)) pose_mat = np.stack(poses, axis=2) # save pose visualization on RGB image img_viz = visualize_predictions(poses, cls_indices[scene_path], img, vertex_npy, intrinsics) imsave(scene_out_dir+f'/{str(i).zfill(6)}-viz.png', img_viz) print("Saving meta-data ...") # save meta-data .mat meta_dict = { 'cls_indexes' : np.array(cls_indices[scene_path]), 'intrinsic_matrix' : intrinsics, 'poses' : pose_mat } savemat(scene_out_dir+f'/{str(i).zfill(6)}-meta.mat', meta_dict) print("Performing semantic segmentation of RGB signal ...") # perform semantic segmentation of RGB image seg_img = camera.capture_rgb() seg_img = perform_segmentation(seg_img, cls_indices[scene_path], poses, vertex_npy, intrinsics) imsave(scene_out_dir+f'/{str(i).zfill(6)}-label.png', seg_img) # stop simulation pr.stop() pr.shutdown()
class PyrepDevice(object): """ PyrepDevice handles communication with CoppeliaSim Vision Sensors via Pyrep. Retrieved images are converted to cv2 format and passed to registered callback functions. """ @staticmethod def autodetect_nicoeyes(): """ Returns a tuple containing the detected names of left and right eye sensor :return: Devicenames as (left, right) tuple (None if not found) :rtype: tuple """ eye_sensors = ["left_eye", "right_eye"] for i in range(len(eye_sensors)): if not VisionSensor.exists(eye_sensors[i]): logger.warning( "Could not find vision sensor named '%s'", eye_sensors[i] ) eye_sensors[i] = None return tuple(eye_sensors) def __init__( self, sensor_name, width=640, height=480, near_clipping_plane=1e-2, far_clipping_plane=10.0, view_angle=60.0, ): """ Connects to Vision Sensor with given name and updates parameters accordingly. :param sensor_name: name (or handle) of the vision sensor in the scene :type sensor_name: str :param width: horizontal camera resolution :type width: int :param heigth: vertical camera resolution :type heigth: int :param near_clipping_plane: the minimum distance from which the sensor will be able to detect. :type near_clipping_plane: float :param far_clipping_plane: the maximum distance from which the sensor will be able to detect. :type far_clipping_plane: float :param view_angle: field of view of the camera in degree. :type view_angle: float """ self._sensor = VisionSensor(sensor_name) self.resolution = (width, height) self.near_clipping_plane = near_clipping_plane self.far_clipping_plane = far_clipping_plane self.view_angle = view_angle self._callback = [] self._removed_ids = [] ref = weakref.ref(self, StepListener.unregister) StepListener.register(ref) @property def sensor_name(self): """ :return: name of the sensor in the scene :rtype: str """ return self._sensor.get_name() @property def resolution(self): """ :return: camera resolution (width, height) :rtype: tuple(int, int) """ return tuple(self._sensor.get_resolution()) @resolution.setter def resolution(self, res): """ :param res: camera resolution (width, height) :type res: tuple(int, int) """ self._sensor.set_resolution(res) @property def near_clipping_plane(self): """ :return: the minimum distance from which the sensor will be able to detect. :rtype: float """ return self._sensor.get_near_clipping_plane() @near_clipping_plane.setter def near_clipping_plane(self, val): """ :param val: the minimum distance from which the sensor will be able to detect. :type val: float """ self._sensor.set_near_clipping_plane(val) @property def far_clipping_plane(self): """ :return: the maximum distance from which the sensor will be able to detect. :rtype: float """ return self._sensor.get_far_clipping_plane() @far_clipping_plane.setter def far_clipping_plane(self, val): """ :param val: the maximum distance from which the sensor will be able to detect. :type val: float """ self._sensor.set_far_clipping_plane(val) @property def view_angle(self): """ :return: field of view of the camera in degree. :rtype: float """ return self._sensor.get_perspective_angle() @view_angle.setter def view_angle(self, val): """ :param val: field of view of the camera in degree. :type val: float """ self._sensor.set_perspective_angle(val) def get_image(self): """ Captures an image for the current simulation step from the vision sensor and converts it to cv2 format :return: the image :rtype: cv2 image """ frame = self._sensor.capture_rgb() frame = np.uint8(frame[:, :, [2, 1, 0]] * 255) return frame def step(self): """ Executes all registered callbacks with the sensor image for the current simulation step. """ frame = self.get_image() id_start = 0 # iterate through callbacks while skipping removed ids (if any) for id_end in self._removed_ids: for id in range(id_start, id_end): self._callback[id](frame) id_start = id_end # iterate through (remaining) callbacks for id in range(id_start, len(self._callback)): self._callback[id](frame) def add_callback(self, function): """ Adds a callback for the event loop Whenever step is called, all registered callbacks are executed. The callback must take exactly 1 argument through which it receives the frame. :param function: Function to add as callback :type function: function :return: id of the added callback :rtype: int """ if not (inspect.isfunction(function) or inspect.ismethod(function)): logger.error("Trying to add non-function callback") raise ValueError("Trying to add non-function callback") if len(self._removed_ids) == 0: self._callback += [function] id = len(self._callback) - 1 else: id = min(self._removed_ids) self._callback[id] self._removed_ids.remove(id) logger.info("Added callback with id %i", id) return id def remove_callback(self, id): """ Removes callback with given id :param id: id of the callback :type id: int """ if id in self._removed_ids or id not in range(0, len(self._callback)): logger.warning("Callback with id %i does not exist", id) return if id == len(self._callback) - 1: i = id for removed in reversed(self._removed_ids): if i - 1 != removed: break self._removed_ids[:-1] i -= 1 self._callback = self._callback[:i] else: self._callback[id] = None self._removed_ids += [id] self._removed_ids.sort() logger.info("Removed callback with id %i", id) def clean_callbacks(self): """ Removes all saved callbacks """ self._callback.clear() self._removed_ids.clear() logger.info("Cleared callbacks")
def setParams(self, params): SCENE_FILE = '../../etc/informatica.ttt' self.pr = PyRep() self.pr.launch(SCENE_FILE, headless=False) self.pr.start() self.robot_object = Shape("Pioneer") self.back_left_wheel = Joint("p3at_back_left_wheel_joint") self.back_right_wheel = Joint("p3at_back_right_wheel_joint") self.front_left_wheel = Joint("p3at_front_left_wheel_joint") self.front_right_wheel = Joint("p3at_front_right_wheel_joint") self.radius = 110 # mm self.semi_width = 140 # mm # cameras self.cameras_write = {} self.cameras_read = {} self.front_left_camera_name = "pioneer_camera_left" cam = VisionSensor(self.front_left_camera_name) self.cameras_write[self.front_left_camera_name] = { "handle": cam, "id": 0, "angle": np.radians(cam.get_perspective_angle()), "width": cam.get_resolution()[0], "height": cam.get_resolution()[1], "focal": (cam.get_resolution()[0] / 2) / np.tan(np.radians(cam.get_perspective_angle() / 2)), "rgb": np.array(0), "depth": np.ndarray(0) } self.front_right_camera_name = "pioneer_camera_right" cam = VisionSensor(self.front_right_camera_name) self.cameras_write[self.front_right_camera_name] = { "handle": cam, "id": 1, "angle": np.radians(cam.get_perspective_angle()), "width": cam.get_resolution()[0], "height": cam.get_resolution()[1], "focal": (cam.get_resolution()[0] / 2) / np.tan(np.radians(cam.get_perspective_angle() / 2)), "rgb": np.array(0), "depth": np.ndarray(0) } self.cameras_read = self.cameras_write.copy() self.mutex_c = Lock() # PoseEstimation self.robot_full_pose_write = RoboCompFullPoseEstimation.FullPoseEuler() self.robot_full_pose_read = RoboCompFullPoseEstimation.FullPoseEuler() self.mutex = Lock() self.ldata = [] self.joystick_newdata = [] self.speed_robot = [] self.speed_robot_ant = [] self.last_received_data_time = 0
class SphericalVisionSensor(Object): """An object able capture 360 degree omni-directional images. """ def __init__(self, name): super().__init__(name) # final omni-directional sensors self._sensor_depth = VisionSensor('%s_sensorDepth' % (self.get_name())) self._sensor_rgb = VisionSensor('%s_sensorRGB' % (self.get_name())) # directed sub-sensors names = ['front', 'top', 'back', 'bottom', 'left', 'right'] self._six_sensors = [ VisionSensor('%s_%s' % (self.get_name(), n)) for n in names ] self._front = self._six_sensors[0] # directed sub-sensor handles list self._six_sensor_handles = [ vs.get_handle() for vs in self._six_sensors ] # set all to explicit handling self._set_all_to_explicit_handling() # assert sensor properties are matching self._assert_matching_resolutions() self._assert_matching_render_modes() self._assert_matching_entities_to_render() self._assert_matching_window_sizes() self._assert_matching_near_clipping_planes() self._assert_matching_far_clipping_planes() # near and far clipping plane self._near_clipping_plane = self.get_near_clipping_plane() self._far_clipping_plane = self.get_far_clipping_plane() self._clipping_plane_diff = self._far_clipping_plane - self._near_clipping_plane # Private # # --------# def _set_all_to_explicit_handling(self): self._sensor_depth.set_explicit_handling(1) self._sensor_rgb.set_explicit_handling(1) for sensor in self._six_sensors: sensor.set_explicit_handling(1) def _assert_matching_resolutions(self): assert self._sensor_depth.get_resolution( ) == self._sensor_rgb.get_resolution() front_sensor_res = self._front.get_resolution() for sensor in self._six_sensors[1:]: assert sensor.get_resolution() == front_sensor_res def _assert_matching_render_modes(self): assert self._sensor_depth.get_render_mode( ) == self._sensor_rgb.get_render_mode() front_sensor_render_mode = self._front.get_render_mode() for sensor in self._six_sensors[1:]: assert sensor.get_render_mode() == front_sensor_render_mode def _assert_matching_entities_to_render(self): assert self._sensor_depth.get_entity_to_render( ) == self._sensor_rgb.get_entity_to_render() front_sensor_entity_to_render = self._front.get_entity_to_render() for sensor in self._six_sensors[1:]: assert sensor.get_entity_to_render( ) == front_sensor_entity_to_render def _assert_matching_window_sizes(self): assert self._sensor_depth.get_windowed_size( ) == self._sensor_rgb.get_windowed_size() def _assert_matching_near_clipping_planes(self): front_sensor_near = self._front.get_near_clipping_plane() for sensor in self._six_sensors[1:]: assert sensor.get_near_clipping_plane() == front_sensor_near def _assert_matching_far_clipping_planes(self): front_sensor_far = self._front.get_far_clipping_plane() for sensor in self._six_sensors[1:]: assert sensor.get_far_clipping_plane() == front_sensor_far def _get_requested_type(self) -> ObjectType: return ObjectType(sim.simGetObjectType(self.get_handle())) # Public # # -------# def handle_explicitly(self) -> None: """Handle spherical vision sensor explicitly. This enables capturing image (e.g., capture_rgb()) without PyRep.step(). """ utils.script_call( 'handleSpherical@PyRep', PYREP_SCRIPT_TYPE, [self._sensor_depth._handle, self._sensor_rgb._handle] + self._six_sensor_handles, [], [], []) def capture_rgb(self) -> np.ndarray: """Retrieves the rgb-image of a spherical vision sensor. :return: A numpy array of size (width, height, 3) """ return self._sensor_rgb.capture_rgb() def capture_depth(self, in_meters=False) -> np.ndarray: """Retrieves the depth-image of a spherical vision sensor. :param in_meters: Whether the depth should be returned in meters. :return: A numpy array of size (width, height) """ if in_meters: return self._sensor_depth.capture_rgb( )[:, :, 0] * self._clipping_plane_diff + self._near_clipping_plane return self._sensor_depth.capture_rgb()[:, :, 0] def get_resolution(self) -> List[int]: """ Return the spherical vision sensor's resolution. :return: Resolution [x, y] """ return self._sensor_depth.get_resolution() def set_resolution(self, resolution: List[int]) -> None: """ Set the spherical vision sensor's resolution. :param resolution: New resolution [x, y] """ if resolution[0] != 2 * resolution[1]: raise Exception( 'Spherical vision sensors must have an X resolution 2x the Y resolution' ) if resolution[1] % 2 != 0: raise Exception( 'Spherical vision sensors must have an even Y resolution') box_sensor_resolution = [ int(resolution[1] / 2), int(resolution[1] / 2) ] for sensor in self._six_sensors: sensor.set_resolution(box_sensor_resolution) self._sensor_depth.set_resolution(resolution) self._sensor_rgb.set_resolution(resolution) def get_render_mode(self) -> RenderMode: """ Retrieves the spherical vision sensor's rendering mode :return: RenderMode for the current rendering mode. """ return self._sensor_depth.get_render_mode() def set_render_mode(self, render_mode: RenderMode) -> None: """ Set the spherical vision sensor's rendering mode :param render_mode: The new sensor rendering mode, one of: RenderMode.OPENGL RenderMode.OPENGL_AUXILIARY RenderMode.OPENGL_COLOR_CODED RenderMode.POV_RAY RenderMode.EXTERNAL RenderMode.EXTERNAL_WINDOWED RenderMode.OPENGL3 RenderMode.OPENGL3_WINDOWED """ self._sensor_depth.set_render_mode(render_mode) self._sensor_rgb.set_render_mode(render_mode) for sensor in self._six_sensors: sensor.set_render_mode(render_mode) def get_windowed_size(self) -> Sequence[int]: """Get the size of windowed rendering for the omni-directional image. :return: The (x, y) resolution of the window. 0 for full-screen. """ return self._sensor_depth.get_windowed_size() def set_windowed_size(self, resolution: Sequence[int] = (0, 0)) -> None: """Set the size of windowed rendering for the omni-directional image. :param resolution: The (x, y) resolution of the window. 0 for full-screen. """ self._sensor_depth.set_windowed_size(resolution) self._sensor_rgb.set_windowed_size(resolution) def get_near_clipping_plane(self) -> float: """ Get the spherical depth sensor's near clipping plane. :return: Near clipping plane (metres) """ return self._front.get_near_clipping_plane() def set_near_clipping_plane(self, near_clipping: float) -> None: """ Set the spherical depth sensor's near clipping plane. :param near_clipping: New near clipping plane (in metres) """ self._near_clipping_plane = near_clipping self._clipping_plane_diff = self._far_clipping_plane - near_clipping for sensor in self._six_sensors: sensor.set_near_clipping_plane(near_clipping) def get_far_clipping_plane(self) -> float: """ Get the Sensor's far clipping plane. :return: Near clipping plane (metres) """ return self._front.get_far_clipping_plane() def set_far_clipping_plane(self, far_clipping: float) -> None: """ Set the spherical depth sensor's far clipping plane. :param far_clipping: New far clipping plane (in metres) """ self._far_clipping_plane = far_clipping self._clipping_plane_diff = far_clipping - self._near_clipping_plane for sensor in self._six_sensors: sensor.set_far_clipping_plane(far_clipping) def set_entity_to_render(self, entity_to_render: int) -> None: """ Set the entity to render to the spherical vision sensor, this can be an object or more usefully a collection. -1 to render all objects in scene. :param entity_to_render: Handle of the entity to render """ self._sensor_depth.set_entity_to_render(entity_to_render) self._sensor_rgb.set_entity_to_render(entity_to_render) def get_entity_to_render(self) -> None: """ Get the entity to render to the spherical vision sensor, this can be an object or more usefully a collection. -1 if all objects in scene are rendered. :return: Handle of the entity to render """ return self._sensor_depth.get_entity_to_render()