def _draw_clouds_actors(self): # point cloud actors: for datasource, actor in self.viewport.pclActors.items(): if not self.controls.showActor[datasource]: continue package = actor['packages'] cloud = actor['cloud'] sensor = self.platform.sensors[platform_utils.extract_sensor_id( datasource)] sensor.extrinsics_dirty.connect(cloud.makeDirty) if datasource != self.ds_name: ref_sensor = self.platform.sensors[ platform_utils.extract_sensor_id(self.ds_name)] ref_sensor.extrinsics_dirty.connect(sensor.extrinsics_dirty) sample: PointCloud = self._get_sample(datasource) cloud.undistortRefTs = int(self.sample.timestamp) cloud.sample.variant = sample if '-rgb' in datasource: #TODO: generalize how colors are obtained from the sample colors = np.ones((sample.size, 4)) colors[:, 0] = sample.get_field('r') / 255 colors[:, 1] = sample.get_field('g') / 255 colors[:, 2] = sample.get_field('b') / 255 cloud._colors.set_ndarray(colors) if isinstance(sample, Echo): package.variant = sample.masked # FIXME: port 2d viewers to das.api too
def __init__(self, cfg: dict, use_test_set: bool = False): self.cfg = cfg self.use_test_set = use_test_set datasets = self.cfg['DATASET'][ 'TRAIN_SET'] if not use_test_set else self.cfg['DATASET'][ 'TEST_SET'] cfg_sync = self.cfg['DATASET']['SYNCHRONIZATION'] sensors = [ extract_sensor_id(ds) for ds in cfg_sync['sync_labels'] + cfg_sync['interp_labels'] ] cfg_vd = None if 'VIRTUAL_DATASOURCES' in self.cfg['DATASET']: cfg_vd = self.cfg['DATASET']['VIRTUAL_DATASOURCES'] self.platform = platform.SynchronizedGroup( datasets, **cfg_sync, include=sensors, preload=True, virtual_datasources_config=cfg_vd) if 'TRAIN_FRAME_SELECTION' in self.cfg['DATASET'] and not use_test_set: indices = filter_training_frames(self.cfg) self.platform = platform.Filtered(self.platform, indices) self.data_augmentation = False self.insert_files = {}
def get_rgb_from_camera_projection(self, camera: str, undistort: bool = False, return_mask: bool = False): """Returns the rgb data for each point from its projected position in camera. Args: camera: (str) name of the camera datasource (ex: 'flir_bbfc_flimg') undistort: (bool) if True, motion compensation is applied to the points before the projection (default is False) return_mask: (bool) if True, also returns the mask that only includes points inside the camera fov. Returns: rgb: A Nx3 array, where N is the number of points in the point cloud. RGB data is in the range [0,255] mask (optional): a Nx1 array of booleans. Values are True where points are inside the camera fov. False elsewhere. """ image_sample = self.datasource.sensor.pf[camera].get_at_timestamp( self.timestamp) pcloud = self.point_cloud( referential=platform.extract_sensor_id(camera), undistort=undistort) projection, mask = image_sample.project_pts(pcloud, mask_fov=True, output_mask=True) projection = projection.astype(int) rgb = np.zeros((pcloud.shape[0], 3)) image = image_sample.raw_image() rgb[mask, :] = image[projection[:, 1], projection[:, 0]] if return_mask: return rgb, mask return rgb
def _set_cylindrical_projection(self): pf = self.datasources[self.dependencies[0]].sensor.platform cameras = list( map(lambda camera: platform_utils.extract_sensor_id(camera), self.dependencies)) intrinsics_calibrations = list( map(lambda camera: pf.intrinsics[camera]['matrix'], cameras)) distortion_coefficients = list( map(lambda camera: pf.intrinsics[camera]['distortion'], cameras)) extrinsic_calibrations = list( map( lambda camera: pf.extrinsics[camera][cameras[Pos.CENTER.value] ], cameras)) self.cylindrical_projection = CylindricalProjection( intrinsics_calibrations, distortion_coefficients, extrinsic_calibrations, self.config)
def main(cfg, state, dataset, input_datasource): # FIXME: crash if test set is a directory of multiple datasets dataset = cfg['DATASET']['TEST_SET'][ 0] if dataset == 'test_set' else dataset pf = Platform(dataset) if 'VIRTUAL_DATASOURCES' in cfg['DATASET']: pf.add_virtual_datasources(cfg['DATASET']['VIRTUAL_DATASOURCES']) vds = DasPredictor(cfg, state, input_datasource) sensor_name = extract_sensor_id(cfg['DATASET']['LABEL']) pf[sensor_name].add_datasource(vds, vds.ds_type) v = Viewer(None, pf) v.run()
def __init__(self, reference_sensor: str, dependencies: list, undistort: bool = False): """Constructor Args: reference_sensor (str): The name of the sensor (e.g. 'pixell_bfc'). dependencies (list): A list of the datasource names. The first element should be a point cloud datasource (e.g. 'pixell_bfc_ech') The second element should be a camera image datasource (e.g. 'flir_bfc_img') undistort (bool): if True, motion compensentation is applied to the pcloud before the camera projection. Doesn't affect the point positions, only the rgb data. """ super(RGBCloud, self).__init__(f'xyzit-rgb', dependencies, None) self.reference_sensor = reference_sensor self.original_pcloud_datasource = dependencies[0] self.original_image_datasource = dependencies[1] self.camera_name = platform.extract_sensor_id(dependencies[1]) self.undistort = undistort self.dtype = np.dtype([('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('i', 'u2'), ('t', 'u8'), ('r', 'u8'), ('g', 'u8'), ('b', 'u8')])