def test_stereoCamera(self, params):
        return
        cam, p = params
        camGroup = ct.CameraGroup(cam.projection,
                                  (cam.orientation, ct.SpatialOrientation()))
        cam1 = camGroup[0]
        cam2 = camGroup[1]

        cam1.tilt_deg = 0
        cam1.roll_deg = 0
        cam1.heading_deg = 0

        cam2.tilt_deg = 0
        cam2.roll_deg = 0
        cam2.heading_deg = 0
        cam2.pos_x_m = 10

        p = np.array(p)
        # transform point
        p1, p2 = camGroup.imagesFromSpace(p)
        p3 = camGroup.spaceFromImages(p1, p2)
        # points behind the camera are allowed to be nan
        p1_single = camGroup[0].spaceFromImage(camGroup[0].imageFromSpace(p))
        p2_single = camGroup[1].spaceFromImage(camGroup[1].imageFromSpace(p))

        p[np.isnan(p1_single)] = np.nan
        p[np.isnan(p2_single)] = np.nan
        np.testing.assert_almost_equal(
            p,
            p3,
            4,
            err_msg=
            "Points from two cameras cannot be projected to the space and back"
        )
Beispiel #2
0
 def test_parameterSet(self):
     cam = ct.Camera(ct.RectilinearProjection(), ct.SpatialOrientation())
     cam.defaults.elevation_m = 99
     assert cam.elevation_m == 99
     assert cam.defaults.elevation_m == 99
     cam.elevation_m = 10
     assert cam.elevation_m == 10
     assert cam.defaults.elevation_m == 99
     cam.elevation_m = None
     assert cam.elevation_m == 99
     assert cam.defaults.elevation_m == 99
     # test if the parameter sets are liked properly
     cam.orientation.elevation_m = 900
     assert cam.elevation_m == 900
     cam.elevation_m = 800
     assert cam.orientation.elevation_m == 800
     # test non parameter
     cam.foo = 99
     assert cam.foo == 99
     cam.defaults.foo = 99
     assert cam.defaults.foo == 99
     self.assertRaises(AttributeError, lambda: cam.bla)
     self.assertRaises(AttributeError, lambda: cam.parameters.bla)
     self.assertRaises(AttributeError, lambda: cam.projection.bla)
     self.assertRaises(AttributeError, lambda: cam.orientation.bla)
     self.assertRaises(AttributeError, lambda: cam.defaults.bla)
Beispiel #3
0
def orientation(draw,
                elevation=st.floats(0, 1000),
                tilt_deg=st.floats(0, 90),
                roll_deg=st.floats(-180, 180),
                heading_deg=st.floats(-360, 360),
                x_m=st.floats(-100, 100),
                y_m=st.floats(-100, 100)):
    return ct.SpatialOrientation(draw(elevation), draw(tilt_deg),
                                 draw(roll_deg), draw(heading_deg), draw(x_m),
                                 draw(y_m))
    def test_init_cam(self):
        # intrinsic camera parameters
        f = 6.2
        sensor_size = (6.17, 4.55)
        image_size = (3264, 2448)

        # initialize the camera
        cam = ct.Camera(
            ct.RectilinearProjection(focallength_mm=f,
                                     image_width_px=image_size[1],
                                     image_height_px=image_size[0],
                                     sensor_width_mm=sensor_size[1],
                                     sensor_height_mm=sensor_size[0]),
            ct.SpatialOrientation())
Beispiel #5
0
def camera_down_with_world_points(draw,
                                  projection=projection(),
                                  n=st.one_of(st.integers(1, 1000),
                                              st.just(1))):
    camera = ct.Camera(projection=draw(projection),
                       orientation=ct.SpatialOrientation())
    n = draw(n)
    # the points can either be
    if n == 1:
        points = draw(
            st.tuples(st.floats(-100, 100), st.floats(-100, 100),
                      st.floats(-100, 100)))
    else:
        points = draw(
            st_np.arrays(dtype="float",
                         shape=st.tuples(st.just(n), st.just(3)),
                         elements=st.floats(-100, 100)))
    return camera, points
Beispiel #6
0
    def updateCameraParameters(self):
        """
        update camera dictionary for calculation - uses potentially user modified data from gui
        """

        # update current cam parameters
        self.cam = dotdict()
        self.cam.fov_h_deg = getFloat(self.leFOV_horizontal.text())
        self.cam.fov_v_deg = getFloat(self.leFOV_vertical.text())
        self.cam.sensor_w_mm = getFloat(self.leSensor_width.text())
        self.cam.sensor_h_mm = getFloat(self.leSensor_height.text())
        self.cam.focallength_mm = getFloat(self.leFocallength.text())
        self.cam.img_w_px = getInteger(self.leImage_width.text())
        self.cam.img_h_px = getInteger(self.leImage_height.text())
        self.cam.center_x_px = self.cam.img_w_px / 2. + getFloat(
            self.leOffsetX.text())
        self.cam.center_y_px = self.cam.img_h_px / 2. + getFloat(
            self.leOffsetY.text())

        self.cam.heading_deg = getFloat(self.leCamPan.text())
        self.cam.tilt_deg = getFloat(self.leCamTilt.text())
        self.cam.roll_deg = getFloat(self.leCamRoll.text())

        self.cam.elevation_m = getFloat(self.leCamElevation.text())
        self.cam.pos_x_m = getFloat(self.leCamPosX.text())
        self.cam.pos_y_m = getFloat(self.leCamPosY.text())

        self.cam.projection = self.cbProjection.itemText(
            self.cbProjection.currentIndex())

        self.cam.gps_lat = getFloat(self.leCamLat.text())
        self.cam.gps_lon = getFloat(self.leCamLon.text())

        if self.cam.sensor_h_mm is None or self.cam.sensor_w_mm is None:
            self.calcSensorDimensionsFromFOV()

        # save parameters as options to DB
        self.setOption('DM_focallength_mm', self.cam.focallength_mm)
        self.setOption('DM_fov_h_deg', self.cam.fov_h_deg)
        self.setOption('DM_fov_v_deg', self.cam.fov_v_deg)
        self.setOption('DM_sensor_w_mm', self.cam.sensor_w_mm)
        self.setOption('DM_sensor_h_mm', self.cam.sensor_h_mm)
        self.setOption('DM_img_w_px', self.cam.img_w_px)
        self.setOption('DM_img_h_px', self.cam.img_h_px)
        self.setOption('DM_offset_x_px', getFloat(self.leOffsetX.text()))
        self.setOption('DM_offset_y_px', getFloat(self.leOffsetY.text()))

        print("Camera:")
        print(json.dumps(self.cam, indent=4, sort_keys=True))

        self.position = dotdict()
        self.position.cam_elevation = getFloat(self.leCamElevation.text())
        self.position.plane_elevation = getFloat(self.lePlaneElevation.text())

        # save parameters to options
        self.setOption('DM_cam_elevation', self.position.cam_elevation)
        self.setOption('DM_plane_elevation', self.position.plane_elevation)

        print("Position:")
        print(json.dumps(self.position, indent=4, sort_keys=True))

        print(self.cam)

        # update camera parameters
        # self.camera = ct.CameraTransform(self.cam.focallength_mm, [self.cam.sensor_w_mm, self.cam.sensor_h_mm],[self.cam.img_w_px, self.cam.img_h_px])
        orientation = ct.SpatialOrientation(heading_deg=self.cam.heading_deg,
                                            tilt_deg=self.cam.tilt_deg,
                                            roll_deg=self.cam.roll_deg,
                                            elevation_m=self.cam.elevation_m,
                                            pos_x_m=self.cam.pos_x_m,
                                            pos_y_m=self.cam.pos_y_m)
        if self.cam.projection == "Cylindrical":
            projection = ct.CylindricalProjection(
                focallength_mm=self.cam.focallength_mm,
                image_width_px=self.cam.img_w_px,
                image_height_px=self.cam.img_h_px,
                sensor_width_mm=self.cam.sensor_w_mm,
                sensor_height_mm=self.cam.sensor_h_mm,
                center_x_px=self.cam.center_x_px,
                center_y_px=self.cam.center_y_px)
        elif self.cam.projection == "Equirectangular":
            projection = ct.EquirectangularProjection(
                focallength_mm=self.cam.focallength_mm,
                image_width_px=self.cam.img_w_px,
                image_height_px=self.cam.img_h_px,
                sensor_width_mm=self.cam.sensor_w_mm,
                sensor_height_mm=self.cam.sensor_h_mm,
                center_x_px=self.cam.center_x_px,
                center_y_px=self.cam.center_y_px)

        else:  # default to rectilinear projection
            print("Defaulting to rectiliniear")
            projection = ct.RectilinearProjection(
                focallength_mm=self.cam.focallength_mm,
                image_width_px=self.cam.img_w_px,
                image_height_px=self.cam.img_h_px,
                sensor_width_mm=self.cam.sensor_w_mm,
                sensor_height_mm=self.cam.sensor_h_mm,
                center_x_px=self.cam.center_x_px,
                center_y_px=self.cam.center_y_px)
        self.camera = ct.Camera(orientation=orientation, projection=projection)
        self.camera.setGPSpos(lat=self.cam.gps_lat,
                              lon=self.cam.gps_lon,
                              elevation=self.cam.elevation_m)
    def __init__(self, camera_params):

        self.bridge = CvBridge()
        for camera_id in range(camera_params['n_cameras']):
            setattr(self, 'last_image_%02i' % (camera_id, ),
                    [[None, None, None]])
        self.last_model_states = ModelStates()
        self.spawned_objects = []

        # Camera IDs and corresponding variables holding images
        self.camera_type = camera_params['camera_type']
        self.update_rate = camera_params['update_rate']
        self.image_dims = camera_params['camera_resolution'] + (3, )
        self.camera_dict = {'%02i' % (camera_id,): getattr(self, 'last_image_%02i'\
            % (camera_id,)) for camera_id in range(camera_params['n_cameras'])}

        # ROS Subscribers, Publishers, Services
        for camera_id in range(camera_params['n_cameras']):
            if self.camera_type in ['rgb', 'both']:
                img_callback_rgb = self.__image_callback_wrapper(
                    camera_id, 'rgb')
                topic_name_rgb = '/robot/camera_rgb_%02i' % (camera_id, )
                setattr(
                    self, '__image_rgb_%02i_sub' % (camera_id, ),
                    rospy.Subscriber(topic_name_rgb,
                                     Image,
                                     img_callback_rgb,
                                     queue_size=1))
            if self.camera_type in ['dvs', 'both']:
                img_callback_dvs = self.__image_callback_wrapper(
                    camera_id, 'dvs')
                topic_name_dvs = '/robot/camera_dvs_%02i/events' % (
                    camera_id, )
                setattr(
                    self, '__image_dvs_%02i_sub' % (camera_id, ),
                    rospy.Subscriber(topic_name_dvs,
                                     EventArray,
                                     img_callback_dvs,
                                     queue_size=1))

        self.__set_model_state_pub = rospy.Publisher('/gazebo/set_model_state',
                                                     ModelState,
                                                     queue_size=1)

        rospy.wait_for_service('gazebo/get_model_state', 10.0)
        self.__get_model_state_srv = rospy.ServiceProxy(
            'gazebo/get_model_state', GetModelState)

        # 3D voxel to 2D camera pixel projection parameters (useful for segmentation dataset)
        self.cam_transform = []
        for camera_id in range(camera_params['n_cameras']):
            cam_pose = self.get_object_pose('camera_%02i' % (camera_id))
            cam_pos = cam_pose.position
            cam_ori = cam_pose.orientation
            roll, pitch, yaw = euler_from_quaternion(
                [cam_ori.x, cam_ori.y, cam_ori.z, cam_ori.w])
            (roll, pitch, yaw) = (angle_in_rad * 180 / np.pi
                                  for angle_in_rad in (roll, pitch, yaw))

            cam_projection = ct.RectilinearProjection(
                focallength_px=camera_params['focal_length_px'],
                image=camera_params['camera_resolution'])
            cam_orientation = ct.SpatialOrientation(pos_x_m=cam_pos.x,
                                                    pos_y_m=cam_pos.y,
                                                    elevation_m=cam_pos.z,
                                                    roll_deg=roll,
                                                    tilt_deg=90 - pitch,
                                                    heading_deg=90 - yaw)
            cam_lens = ct.BrownLensDistortion(k1=0.0,
                                              k2=0.0,
                                              k3=0.0,
                                              projection=cam_projection)
            self.cam_transform.append(
                ct.Camera(projection=cam_projection,
                          orientation=cam_orientation,
                          lens=cam_lens))

        rospy.init_node('cdp4_data_collection')
        rospy.wait_for_service('/gazebo/get_world_properties')

        self.__physics_state = rospy.ServiceProxy(
            '/gazebo/get_world_properties', GetWorldProperties)

        while self.__physics_state().sim_time < 2:
            print('Waiting for simulation to be started')
            rospy.sleep(2)

        self.__path_to_models = os.getenv('HOME') + '/.gazebo/models/'

        rospy.wait_for_service('gazebo/set_model_state', 10.0)
        self.__set_model_state_srv = rospy.ServiceProxy(
            'gazebo/set_model_state', SetModelState)

        rospy.wait_for_service('gazebo/spawn_sdf_entity')
        self.__spawn_model_srv = rospy.ServiceProxy('/gazebo/spawn_sdf_entity',
                                                    SpawnEntity)

        rospy.wait_for_service('gazebo/set_visual_properties')
        self.__change_color_srv = rospy.ServiceProxy(
            'gazebo/set_visual_properties', SetVisualProperties)
Beispiel #8
0
def simple_orientation(draw,
                       elevation=st.floats(0, 1000),
                       tilt_deg=st.floats(0, 90)):
    return ct.SpatialOrientation(draw(elevation), draw(tilt_deg))