Ejemplo n.º 1
0
    def setParams(self, params):
        SCENE_FILE = '../../etc/basic_scene.ttt'

        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        
        self.robot = TurtleBot()
        self.drone = Shape('Quadricopter')
        self.cameras = {}
        self.front_camera_name = "frontCamera"
        cam = VisionSensor(self.front_camera_name)
        self.cameras["frontCamera"] = {"handle": cam,
                                       "id": 0,
                                       "angle": np.radians(cam.get_perspective_angle()),
                                       "width": cam.get_resolution()[0],
                                       "height": cam.get_resolution()[1],
                                       "focal": (cam.get_resolution()[0] / 2) / np.tan(np.radians(cam.get_perspective_angle() / 2)),
                                       "rgb": np.array(0),
                                       "depth": np.ndarray(0)}

        self.joystick_newdata = []
        self.speed_robot = []
        self.speed_robot_ant = []
        self.last_received_data_time = 0

        self.once = False
Ejemplo n.º 2
0
    def setParams(self, params):

        SCENE_FILE = '../etc/gen3-robolab.ttt'
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()
        self.mode = 0
        self.bloqueo = True

        #self.robot = Viriato()
        #self.robot = YouBot()
        self.robot_object = Shape("gen3")

        self.cameras = {}
        self.camera_arm_name = "camera_arm"
        cam = VisionSensor(self.camera_arm_name)
        self.cameras[self.camera_arm_name] = {
            "handle":
            cam,
            "id":
            0,
            "angle":
            np.radians(cam.get_perspective_angle()),
            "width":
            cam.get_resolution()[0],
            "height":
            cam.get_resolution()[1],
            "focal": (cam.get_resolution()[0] / 2) /
            np.tan(np.radians(cam.get_perspective_angle() / 2)),
            "rgb":
            np.array(0),
            "depth":
            np.ndarray(0)
        }

        self.joystick_newdata = []
        self.last_received_data_time = 0
Ejemplo n.º 3
0
class TestVisionSensors(TestCore):
    def setUp(self):
        super().setUp()
        [self.pyrep.step() for _ in range(10)]
        self.cam = VisionSensor('cam0')

    def test_handle_explicitly(self):
        cam = VisionSensor.create((640, 480))

        # blank image
        rgb = cam.capture_rgb()
        self.assertEqual(rgb.sum(), 0)

        # non-blank image
        cam.set_explicit_handling(value=1)
        cam.handle_explicitly()
        rgb = cam.capture_rgb()
        self.assertNotEqual(rgb.sum(), 0)

        cam.remove()

    def test_capture_rgb(self):
        img = self.cam.capture_rgb()
        self.assertEqual(img.shape, (16, 16, 3))
        #  Check that it's not a blank image
        self.assertFalse(img.min() == img.max() == 0.0)

    def test_capture_depth(self):
        img = self.cam.capture_depth()
        self.assertEqual(img.shape, (16, 16))
        # Check that it's not a blank depth map
        self.assertFalse(img.min() == img.max() == 1.0)

    def test_create(self):
        cam = VisionSensor.create([640, 480],
                                  perspective_mode=True,
                                  view_angle=35.0,
                                  near_clipping_plane=0.25,
                                  far_clipping_plane=5.0,
                                  render_mode=RenderMode.OPENGL3)
        self.assertEqual(cam.capture_rgb().shape, (480, 640, 3))
        self.assertEqual(cam.get_perspective_mode(),
                         PerspectiveMode.PERSPECTIVE)
        self.assertAlmostEqual(cam.get_perspective_angle(), 35.0, 3)
        self.assertEqual(cam.get_near_clipping_plane(), 0.25)
        self.assertEqual(cam.get_far_clipping_plane(), 5.0)
        self.assertEqual(cam.get_render_mode(), RenderMode.OPENGL3)

    def test_get_set_resolution(self):
        self.cam.set_resolution([320, 240])
        self.assertEqual(self.cam.get_resolution(), [320, 240])
        self.assertEqual(self.cam.capture_rgb().shape, (240, 320, 3))

    def test_get_set_perspective_mode(self):
        for perspective_mode in PerspectiveMode:
            self.cam.set_perspective_mode(perspective_mode)
            self.assertEqual(self.cam.get_perspective_mode(), perspective_mode)

    def test_get_set_render_mode(self):
        for render_mode in [RenderMode.OPENGL, RenderMode.OPENGL3]:
            self.cam.set_render_mode(render_mode)
            self.assertEqual(self.cam.get_render_mode(), render_mode)

    def test_get_set_perspective_angle(self):
        self.cam.set_perspective_angle(45.0)
        self.assertAlmostEqual(self.cam.get_perspective_angle(), 45.0, 3)

    def test_get_set_orthographic_size(self):
        self.cam.set_orthographic_size(1.0)
        self.assertEqual(self.cam.get_orthographic_size(), 1.0)

    def test_get_set_near_clipping_plane(self):
        self.cam.set_near_clipping_plane(0.5)
        self.assertEqual(self.cam.get_near_clipping_plane(), 0.5)

    def test_get_set_far_clipping_plane(self):
        self.cam.set_far_clipping_plane(0.5)
        self.assertEqual(self.cam.get_far_clipping_plane(), 0.5)

    def test_get_set_windowed_size(self):
        self.cam.set_windowed_size((640, 480))
        self.assertEqual(self.cam.get_windowed_size(), (640, 480))

    def test_get_set_entity_to_render(self):
        self.cam.set_entity_to_render(-1)
        self.assertEqual(self.cam.get_entity_to_render(), -1)
Ejemplo n.º 4
0
    def setParams(self, params):
        SCENE_FILE = params["scene_dir"]
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()

        self.cameras = {}
        cam = VisionSensor("Camera_Shoulder")
        self.cameras["Camera_Shoulder"] = {
            "handle":
            cam,
            "id":
            1,
            "angle":
            np.radians(cam.get_perspective_angle()),
            "width":
            cam.get_resolution()[0],
            "height":
            cam.get_resolution()[1],
            "depth":
            3,
            "focal":
            cam.get_resolution()[0] /
            np.tan(np.radians(cam.get_perspective_angle())),
            "position":
            cam.get_position(),
            "rotation":
            cam.get_quaternion(),
            "image_rgb":
            np.array(0),
            "image_rgbd":
            np.array(0),
            "depth":
            np.ndarray(0)
        }

        self.grasping_objects = {}
        can = Shape("can")
        self.grasping_objects["002_master_chef_can"] = {
            "handle": can,
            "sim_pose": None,
            "pred_pose_rgb": None,
            "pred_pose_rgbd": None
        }

        with (open("objects_pcl.pickle", "rb")) as file:
            self.object_pcl = pickle.load(file)

        self.intrinsics = np.array(
            [[
                self.cameras["Camera_Shoulder"]["focal"], 0.00000000e+00,
                self.cameras["Camera_Shoulder"]["width"] / 2.0
            ],
             [
                 0.00000000e+00, self.cameras["Camera_Shoulder"]["focal"],
                 self.cameras["Camera_Shoulder"]["height"] / 2.0
             ], [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

        self.arm_ops = {
            "MoveToHome": 1,
            "MoveToObj": 2,
            "CloseGripper": 3,
            "OpenGripper": 4
        }

        self.grasping_iter = 10

        self.arm_base = Shape("gen3")
        self.arm_target = Dummy("target")
        self.gripper = Joint("RG2_openCloseJoint")
Ejemplo n.º 5
0
def simulate(scene_dir, cls_indices):
    # read 3d point cloud vertices npy (for visualization)
    vertex_npy = np.load("mesh_data/custom_vertex.npy")

    # loop over all scene files in scenes directory
    for scene_path in os.listdir(scene_dir):
        # check whether it's a scene file or not
        if not scene_path[-3:] == 'ttt':
            continue

        # create an output directory for each scene
        scene_out_dir = os.path.join('./sim_data/', scene_path[:-4])
        if not os.path.isdir(scene_out_dir):
            os.mkdir(scene_out_dir)

        # launch scene file
        SCENE_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.join(scene_dir, scene_path))
        pr = PyRep()
        pr.launch(SCENE_FILE, headless=True)
        pr.start()

        pr.step()

        # define vision sensor
        camera = VisionSensor('cam')
        # set camera absolute pose to world coordinates
        camera.set_pose([0,0,0,0,0,0,1])
        pr.step()

        # define scene shapes
        shapes = []
        shapes.append(Shape('Shape1'))
        shapes.append(Shape('Shape2'))
        shapes.append(Shape('Shape3'))
        shapes.append(Shape('Shape4'))
        pr.step()

        print("Getting vision sensor intrinsics ...")
        # get vision sensor parameters
        cam_res = camera.get_resolution()
        cam_per_angle = camera.get_perspective_angle()
        ratio = cam_res[0]/cam_res[1]
        cam_angle_x = 0.0
        cam_angle_y = 0.0
        if (ratio > 1):
            cam_angle_x = cam_per_angle
            cam_angle_y = 2 * degrees(atan(tan(radians(cam_per_angle / 2)) / ratio))
        else:
            cam_angle_x = 2 * degrees(atan(tan(radians(cam_per_angle / 2)) / ratio))
            cam_angle_y = cam_per_angle
        # get vision sensor intrinsic matrix
        cam_focal_x = float(cam_res[0] / 2.0) / tan(radians(cam_angle_x / 2.0))
        cam_focal_y = float(cam_res[1] / 2.0) / tan(radians(cam_angle_y / 2.0))
        intrinsics = np.array([[cam_focal_x, 0.00000000e+00, float(cam_res[0]/2.0)],
                                [0.00000000e+00, cam_focal_y, float(cam_res[1]/2.0)],
                                [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])

        # loop to get 5000 samples per scene
        for i in range(5000):
            print("Randomizing objects' poses ...")
            # set random pose to objects in the scene
            obj_colors = []
            for shape in shapes:
                shape.set_pose([
                        random.uniform(-0.25,0.25), random.uniform(-0.25,0.25), random.uniform(0.25,2),
                        random.uniform(-1,1), random.uniform(-1,1), random.uniform(-1,1),
                        random.uniform(-1,1)
                    ])
                obj_colors.append([random.uniform(0,1), random.uniform(0,1), random.uniform(0,1)])
                pr.step()

            print("Reading vision sensor RGB signal ...")
            # read vision sensor RGB image
            img = camera.capture_rgb()
            img = np.uint8(img * 255.0)

            print("Reading vision sensor depth signal ...")
            # read vision sensor depth image
            depth = camera.capture_depth()
            depth = np.uint8(depth * 255.0)

            print("Generating front mask for RGB signal ...")
            # generate RGB front mask
            front_mask = np.sum(img, axis=2)
            front_mask[front_mask != 0] = 255
            front_mask = Image.fromarray(np.uint8(front_mask))
            alpha_img = Image.fromarray(img)
            alpha_img.putalpha(front_mask)

            print("Saving sensor output ...")
            # save sensor output
            alpha_img.save(scene_out_dir+f'/{str(i).zfill(6)}-color.png')
            imsave(scene_out_dir+f'/{str(i).zfill(6)}-depth.png', depth)
            
            print("Getting objects' poses ...")
            # get poses for all objects in scene
            poses = []
            for shape in shapes:
                poses.append(get_object_pose(shape, camera))
            pose_mat = np.stack(poses, axis=2)
            # save pose visualization on RGB image
            img_viz = visualize_predictions(poses, cls_indices[scene_path], img, vertex_npy, intrinsics)
            imsave(scene_out_dir+f'/{str(i).zfill(6)}-viz.png', img_viz)

            print("Saving meta-data ...")
            # save meta-data .mat
            meta_dict = {
                'cls_indexes'      : np.array(cls_indices[scene_path]),
                'intrinsic_matrix' : intrinsics,
                'poses'            : pose_mat
            }
            savemat(scene_out_dir+f'/{str(i).zfill(6)}-meta.mat', meta_dict)

            print("Performing semantic segmentation of RGB signal ...")
            # perform semantic segmentation of RGB image
            seg_img = camera.capture_rgb()
            seg_img = perform_segmentation(seg_img, cls_indices[scene_path], poses, vertex_npy, intrinsics)
            imsave(scene_out_dir+f'/{str(i).zfill(6)}-label.png', seg_img)

        # stop simulation
        pr.stop()
        pr.shutdown()
Ejemplo n.º 6
0
class PyrepDevice(object):
    """
    PyrepDevice handles communication with CoppeliaSim Vision Sensors via
    Pyrep. Retrieved images are converted to cv2 format and passed to registered
    callback functions.
    """

    @staticmethod
    def autodetect_nicoeyes():
        """
        Returns a tuple containing the detected names of left and right eye
        sensor

        :return: Devicenames as (left, right) tuple (None if not found)
        :rtype: tuple
        """
        eye_sensors = ["left_eye", "right_eye"]
        for i in range(len(eye_sensors)):
            if not VisionSensor.exists(eye_sensors[i]):
                logger.warning(
                    "Could not find vision sensor named '%s'", eye_sensors[i]
                )
                eye_sensors[i] = None
        return tuple(eye_sensors)

    def __init__(
        self,
        sensor_name,
        width=640,
        height=480,
        near_clipping_plane=1e-2,
        far_clipping_plane=10.0,
        view_angle=60.0,
    ):
        """
        Connects to Vision Sensor with given name and updates parameters
        accordingly.

        :param sensor_name: name (or handle) of the vision sensor in the scene
        :type sensor_name: str
        :param width: horizontal camera resolution
        :type width: int
        :param heigth: vertical camera resolution
        :type heigth: int
        :param near_clipping_plane: the minimum distance from which the sensor will be able to detect.
        :type near_clipping_plane: float
        :param far_clipping_plane: the maximum distance from which the sensor will be able to detect.
        :type far_clipping_plane: float
        :param view_angle: field of view of the camera in degree.
        :type view_angle: float
        """
        self._sensor = VisionSensor(sensor_name)
        self.resolution = (width, height)
        self.near_clipping_plane = near_clipping_plane
        self.far_clipping_plane = far_clipping_plane
        self.view_angle = view_angle
        self._callback = []
        self._removed_ids = []
        ref = weakref.ref(self, StepListener.unregister)
        StepListener.register(ref)

    @property
    def sensor_name(self):
        """
        :return: name of the sensor in the scene
        :rtype: str
        """
        return self._sensor.get_name()

    @property
    def resolution(self):
        """
        :return: camera resolution (width, height)
        :rtype: tuple(int, int)
        """
        return tuple(self._sensor.get_resolution())

    @resolution.setter
    def resolution(self, res):
        """
        :param res: camera resolution (width, height)
        :type res: tuple(int, int)
        """
        self._sensor.set_resolution(res)

    @property
    def near_clipping_plane(self):
        """
        :return: the minimum distance from which the sensor will be able to detect.
        :rtype: float
        """
        return self._sensor.get_near_clipping_plane()

    @near_clipping_plane.setter
    def near_clipping_plane(self, val):
        """
        :param val: the minimum distance from which the sensor will be able to detect.
        :type val: float
        """
        self._sensor.set_near_clipping_plane(val)

    @property
    def far_clipping_plane(self):
        """
        :return: the maximum distance from which the sensor will be able to detect.
        :rtype: float
        """
        return self._sensor.get_far_clipping_plane()

    @far_clipping_plane.setter
    def far_clipping_plane(self, val):
        """
        :param val: the maximum distance from which the sensor will be able to detect.
        :type val: float
        """
        self._sensor.set_far_clipping_plane(val)

    @property
    def view_angle(self):
        """
        :return: field of view of the camera in degree.
        :rtype: float
        """
        return self._sensor.get_perspective_angle()

    @view_angle.setter
    def view_angle(self, val):
        """
        :param val: field of view of the camera in degree.
        :type val: float
        """
        self._sensor.set_perspective_angle(val)

    def get_image(self):
        """
        Captures an image for the current simulation step from the
        vision sensor and converts it to cv2 format

        :return: the image
        :rtype: cv2 image
        """
        frame = self._sensor.capture_rgb()
        frame = np.uint8(frame[:, :, [2, 1, 0]] * 255)
        return frame

    def step(self):
        """
        Executes all registered callbacks with the sensor image for the current
        simulation step.
        """
        frame = self.get_image()
        id_start = 0
        # iterate through callbacks while skipping removed ids (if any)
        for id_end in self._removed_ids:
            for id in range(id_start, id_end):
                self._callback[id](frame)
            id_start = id_end
        # iterate through (remaining) callbacks
        for id in range(id_start, len(self._callback)):
            self._callback[id](frame)

    def add_callback(self, function):
        """
        Adds a callback for the event loop

        Whenever step is called, all registered callbacks are executed.

        The callback must take exactly 1 argument through which it receives the
        frame.

        :param function: Function to add as callback
        :type function: function

        :return: id of the added callback
        :rtype: int
        """
        if not (inspect.isfunction(function) or inspect.ismethod(function)):
            logger.error("Trying to add non-function callback")
            raise ValueError("Trying to add non-function callback")
        if len(self._removed_ids) == 0:
            self._callback += [function]
            id = len(self._callback) - 1
        else:
            id = min(self._removed_ids)
            self._callback[id]
            self._removed_ids.remove(id)
        logger.info("Added callback with id %i", id)
        return id

    def remove_callback(self, id):
        """
        Removes callback with given id

        :param id: id of the callback
        :type id: int
        """
        if id in self._removed_ids or id not in range(0, len(self._callback)):
            logger.warning("Callback with id %i does not exist", id)
            return
        if id == len(self._callback) - 1:
            i = id
            for removed in reversed(self._removed_ids):
                if i - 1 != removed:
                    break
                self._removed_ids[:-1]
                i -= 1
            self._callback = self._callback[:i]
        else:
            self._callback[id] = None
            self._removed_ids += [id]
            self._removed_ids.sort()
        logger.info("Removed callback with id %i", id)

    def clean_callbacks(self):
        """
        Removes all saved callbacks
        """
        self._callback.clear()
        self._removed_ids.clear()
        logger.info("Cleared callbacks")
Ejemplo n.º 7
0
    def setParams(self, params):

        SCENE_FILE = '../../etc/informatica.ttt'

        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=False)
        self.pr.start()

        self.robot_object = Shape("Pioneer")
        self.back_left_wheel = Joint("p3at_back_left_wheel_joint")
        self.back_right_wheel = Joint("p3at_back_right_wheel_joint")
        self.front_left_wheel = Joint("p3at_front_left_wheel_joint")
        self.front_right_wheel = Joint("p3at_front_right_wheel_joint")
        self.radius = 110  # mm
        self.semi_width = 140  # mm

        # cameras
        self.cameras_write = {}
        self.cameras_read = {}

        self.front_left_camera_name = "pioneer_camera_left"
        cam = VisionSensor(self.front_left_camera_name)
        self.cameras_write[self.front_left_camera_name] = {
            "handle":
            cam,
            "id":
            0,
            "angle":
            np.radians(cam.get_perspective_angle()),
            "width":
            cam.get_resolution()[0],
            "height":
            cam.get_resolution()[1],
            "focal": (cam.get_resolution()[0] / 2) /
            np.tan(np.radians(cam.get_perspective_angle() / 2)),
            "rgb":
            np.array(0),
            "depth":
            np.ndarray(0)
        }

        self.front_right_camera_name = "pioneer_camera_right"
        cam = VisionSensor(self.front_right_camera_name)
        self.cameras_write[self.front_right_camera_name] = {
            "handle":
            cam,
            "id":
            1,
            "angle":
            np.radians(cam.get_perspective_angle()),
            "width":
            cam.get_resolution()[0],
            "height":
            cam.get_resolution()[1],
            "focal": (cam.get_resolution()[0] / 2) /
            np.tan(np.radians(cam.get_perspective_angle() / 2)),
            "rgb":
            np.array(0),
            "depth":
            np.ndarray(0)
        }

        self.cameras_read = self.cameras_write.copy()
        self.mutex_c = Lock()

        # PoseEstimation
        self.robot_full_pose_write = RoboCompFullPoseEstimation.FullPoseEuler()
        self.robot_full_pose_read = RoboCompFullPoseEstimation.FullPoseEuler()
        self.mutex = Lock()

        self.ldata = []
        self.joystick_newdata = []
        self.speed_robot = []
        self.speed_robot_ant = []
        self.last_received_data_time = 0