def __init__(self, window, platform, synchronized, ds_name): super(TracesWindow, self).__init__(window, platform) self.window.setTitle(ds_name) self.synchronized = synchronized self.viewport = self.window.viewport self.controls = self.window.controls self.ds_name = ds_name self.backend = self.window.findChild(QObject, "figure") self.figure = self.backend.getFigure() self.ax = [self.figure.add_subplot(s) for s in [211, 212]] self.datasource = self.platform[self.ds_name] sensor_name, sensor_pos, trr_ds_type = platform_utils.parse_datasource_name( self.ds_name) self.ech_ds_name = f'{sensor_name}_{sensor_pos}_ech' self.has_echoes = self.ech_ds_name in self.platform.datasource_names() self.virtual_ech_ds_name = f'{sensor_name}_{sensor_pos}_ech-{trr_ds_type}' self.has_virtual_echoes = self.virtual_ech_ds_name in self.platform.datasource_names( ) if self.has_virtual_echoes: self.window.useVirtualEchoes.visible = True self.helper = None self.image = None self.hover_coords = None self.hovering = False self.selection = [] self.trace_processing = None self.drawn_traces = [] if self.datasource.sensor.static_noise is None or self.datasource.sensor.static_noise == 0: self.window.removeStaticVisible = False
def label_names(self): '''Converts the category numbers in their corresponding names (e.g. 0 -> 'pedestrian') and returns the list of names for all boxes in the sample''' label_source_name = categories.get_source(platform_utils.parse_datasource_name(self.datasource.label)[2]) try: return [categories.CATEGORIES[label_source_name][str(category_number)]['name'] for category_number in self.raw['data']['classes']] except: LoggingManager.instance().warning(f"Can not find the CATEGORIES and NAMES of {label_source_name}.")
def __update_seg_2d(self, sample, image): datasources = [ ds_name for ds_name, show in self.show_seg_2d.items() if show ] for ds_name in datasources: seg_sample = self.platform[ds_name].get_at_timestamp( sample.timestamp) _, _, ds_type = parse_datasource_name(ds_name) annotation_source = categories.get_source(ds_type) if np.abs(np.int64(sample.timestamp) - seg_sample.timestamp) <= 1e6: if 'poly2d' in ds_name: raw = seg_sample.raw poly2d = raw['data'] if 'confidence' in raw: mask = raw['confidence'] > self.conf_threshold poly2d = poly2d[mask] elif 'seg2d' in ds_name: poly2d = seg_sample.poly2d(self.conf_threshold) for poly in poly2d: name, color = categories.get_name_color( annotation_source, poly['classes']) if self.category_filter is not '': if name not in self.category_filter: break color = np.array(color) / 255 patch = Polygon(poly['polygon'], closed=True, linewidth=1, edgecolor=color, facecolor=list(color) + [0.15]) self.ax.add_patch(patch)
def _draw_frustrum(self): _, _, ds_type = platform_utils.parse_datasource_name(self.ds_name) self.sample = self._get_sample(self.ds_name) if 'ech' in ds_type and not hasattr(self, 'frustrum'): lcax = self.sample.datasource.sensor specs = self.sample.specs if lcax.angle_chart: cache = self.sample.cache() correct_v_angles = lcax.get_corrected_projection_data( self.sample.timestamp, cache, 'angles') v_cell_size, h_cell_size = clouds.v_h_cell_size_rad(specs) i, v = clouds.frustrum( clouds.custom_frustrum_directions(correct_v_angles, v_cell_size, h_cell_size, dtype=np.float64), 40) else: i, v = clouds.frustrum( clouds.frustrum_directions(specs['v_fov'], specs['h_fov'], dtype=np.float64)) if self.sample.orientation is not None: v = (self.sample.orientation @ v.T).T frustrum = CustomActors.lines(i, v, color=QColor("lightgray")) self.frustrum = self.viewport.actors.addActor(frustrum)
def __get_datasource_to_show_seg3d(self, datasource_name): sensor_name, pos, ds_type = parse_datasource_name(datasource_name) if f'{sensor_name}_{pos}_ech' in self.show_actor: return f'{sensor_name}_{pos}_ech' if f'{sensor_name}_{pos}_xyzit' in self.show_actor: return f'{sensor_name}_{pos}_xyzit' #TODO: Edge case, handle this better raise Exception('It is impossible to show 3d segmentation if there is not an echo or xyzit datasource in dataset')
def __update_bbox_3d(self, sample:Image): for ds_name, show in self.show_bbox_3d.items(): if not show: continue box_source = categories.get_source(parse_datasource_name(ds_name)[2]) box3d_sample:Box3d = self.platform[ds_name].get_at_timestamp(sample.timestamp) if np.abs(float(box3d_sample.timestamp) - float(sample.timestamp)) > 1e6: continue box3d = box3d_sample.set_referential(self.datasource, ignore_orientation=True) category_numbers = box3d.get_category_numbers() poly_collection = [] color_collection = [] for box_index in range(len(box3d)): center = box3d.get_centers()[box_index] dimension = box3d.get_dimensions()[box_index] rotation = box3d.get_rotations()[box_index] confidence = box3d.get_confidences()[box_index] category_name, color = categories.get_name_color(box_source, category_numbers[box_index]) id = box3d.get_ids()[box_index] if confidence: if confidence < self.conf_threshold: continue if self.category_filter is not '': if category_name not in self.category_filter: continue color = np.array(color)/255 if self.use_box_colors: color = utils.to_numpy(QColor(self.box_3d_colors[ds_name]))[:3] vertices = linalg.bbox_to_8coordinates(center, dimension, rotation) p, mask_fov = sample.project_pts(vertices, mask_fov=False, output_mask=True, undistorted=self.undistortimage, margin=1000) if p[mask_fov].shape[0] < 8: continue faces = [[0,1,3,2],[0,1,5,4],[0,2,6,4],[7,3,1,5],[7,5,4,6],[7,6,2,3]] for face in faces: poly = np.vstack([p[face[0]],p[face[1]],p[face[2]],p[face[3]],p[face[0]]]) poly_collection.append(poly) color_collection.append(color) if self.box_labels_size > 0: text_label = category_name if id: text_label += f" {id}" if confidence: text_label += f" ({int(confidence*100)}%)" txt = self.ax.text(p[:,0].min(),p[:,1].min(), text_label, color='w', fontweight='bold', fontsize=self.box_labels_size, clip_on=True) txt.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='k')]) alpha = 0.05 facecolors = [list(c)+[alpha] for c in color_collection] poly_collection = PolyCollection(poly_collection, linewidths=0.5, edgecolors=color_collection, facecolors=facecolors) self.ax.add_collection(poly_collection)
def __update_box2D(self, sample, image, box): datasources = [ ds_name for ds_name, show in self.show_bbox_2d.items() if show ] for ds_name in datasources: _, _, ds_type = parse_datasource_name(ds_name) box_source = categories.get_source(ds_type) box2d_sample = self.platform[ds_name].get_at_timestamp( sample.timestamp) if np.abs(np.int64(sample.timestamp) - box2d_sample.timestamp) <= 1e6: raw = box2d_sample.raw if 'confidence' in raw: mask = (raw['confidence'] > self.conf_threshold) box2d = raw['data'][mask] else: box2d = raw['data'] if len(box2d) > 0: for i, box in enumerate(box2d): top = (box['x'] - box['h'] / 2) * image.shape[0] left = (box['y'] - box['w'] / 2) * image.shape[1] name, color = categories.get_name_color( box_source, box['classes']) if self.category_filter is not '': if name not in self.category_filter: continue color = np.array(color) / 255 if self.use_box_colors: color = utils.to_numpy( QColor(self.box_2d_colors[ds_name]))[:3] if 'confidence' in raw: conf = raw['confidence'][mask][i] name = f"{name}({conf:.3f})" rect = Rectangle((left, top), box['w'] * image.shape[1], box['h'] * image.shape[0], linewidth=1, edgecolor=color, facecolor=list(color) + [0.15]) self.ax.add_patch(rect) if self.box_labels_size > 0: txt = self.ax.text(left, top, name + ':' + str(box['id']), color='w', fontweight='bold', fontsize=self.box_labels_size, clip_on=True) txt.set_path_effects([ PathEffects.withStroke(linewidth=1, foreground='k') ])
def _draw_sementic_segmentation_actors(self): # segmentation 3d actors: seg_actors = self.viewport.segActors for datasource, actor in seg_actors.items(): if not self.controls.showSeg3D[datasource]: continue package = actor['packages'] cloud = actor['cloud'] ds_name, pos, _ = platform_utils.parse_datasource_name(datasource) if f'{ds_name}_{pos}_ech' in self.viewport.pclActors: pcl_ds = f'{ds_name}_{pos}_ech' cloud.method = 'quad_cloud' elif f'{ds_name}_{pos}_xyzit' in self.viewport.pclActors: pcl_ds = f'{ds_name}_{pos}_xyzit' cloud.method = 'get_point_cloud' elif f'{ds_name}_{pos}_xyzvcfar' in self.viewport.pclActors: pcl_ds = f'{ds_name}_{pos}_xyzvcfar' cloud.method = 'get_point_cloud' elif f'{ds_name}_{pos}_xyzvi' in self.viewport.pclActors: pcl_ds = f'{ds_name}_{pos}_xyzvi' cloud.method = 'get_point_cloud' pcl_sample = self._get_sample(pcl_ds) seg_sample = self._get_sample(datasource) len_seg3d = seg_sample.raw['data'].shape[0] len_pcl = pcl_sample.masked['data'].shape[0] if len_seg3d != len_pcl: print( f'Warning. The length ({len_seg3d}) of the segmentation 3D data' + f'does not match the length ({len_pcl}) of the point cloud.' ) # TODO: categoryFilter is not applied to segmentation3d here cloud.undistortRefTs = int(self.sample.timestamp) cloud.sample.variant = pcl_sample cloud.seg3DSample.variant = seg_sample if isinstance(pcl_sample, Echo): package.variant = pcl_sample.masked
def colored_image(self, resolution: tuple = None): polygons = self.raw _, _, ds_type = platform.parse_datasource_name(self.datasource.label) poly_source = categories.get_source(ds_type) image = np.zeros( (polygons['resolution'][0], polygons['resolution'][1], 3), dtype=np.uint8) for poly in polygons['data']: name, color = categories.get_name_color(poly_source, poly['classes']) color = np.array(color) / 255 cv2.fillPoly(image, [poly['polygon']], color) return self.resize_mask( image, resolution) if resolution is not None else image
def mask_category(self, category): classes = self.raw['data']['classes'] if type(category) is int: return classes == category elif type(category) is str: _,_,ds_type = platform.parse_datasource_name(self.datasource.label) seg_source = categories.get_source(ds_type) try: category_number = categories.get_category_number(seg_source, category) except: category_number = -1 return classes == category_number else: raise ValueError('The category must be either an integer or a string.')
def set_custom_viewports(self, ds_names, callbacks): QQmlProperty.write(self.leddar_vp.root, "customViewports", ds_names) self.custom_viewport_windows = {} while not all(k in self.custom_viewport_windows for k in ds_names): QApplication.processEvents() self.custom_viewport_windows = QQmlProperty.read( self.leddar_vp.root, "customViewportWindows").toVariant() self.custom_datasources = {} for i, ds in enumerate(ds_names): w = self.custom_viewport_windows[ds] cb = DasCallback(w, self.pf, ds) cb.vp = QQmlProperty.read(w, "viewport") cb.cursor_callback = callbacks[i] def update(context): cursor = int(QQmlProperty.read(context.window, "cursor")) context.cursor_callback(cursor, context) cb.wrap_callback(update) cb.callback() cb.connect_to(w.cursorChanged) cb.connect_to(w.visibleChanged) self.callbacks["custom_viewport_windows"][ds] = cb sensor_type, position, datasource = platform_utils.parse_datasource_name( ds) self.custom_datasources[ds] = { 'sensor_name': f"{sensor_type}_{position}", 'ds_name': datasource, 'full_ds_name': ds, 'size': len(self.synchronized) } QQmlProperty.write(self.leddar_vp.root, "customDatasources", self.custom_datasources) return self.callbacks["custom_viewport_windows"]
def colors(self, mode=None): _,_,ds_type = platform.parse_datasource_name(self.datasource.label) seg_source = categories.get_source(ds_type) classes = self.raw['data']['classes'] colors = np.zeros((len(classes),4)) for c in np.unique(classes): name, color = categories.get_name_color(seg_source, c) color = np.array(color)/255. # color channels between 0 and 1 color = np.append(color, 1) # alpha (opacity) = 1 ind = np.where(classes == c)[0] colors[ind] = color if mode=='quad_cloud': colors = clouds.quad_stack(colors) return colors
def _update_intervals(self): sensor_type, pos, ds_type = platform_utils.parse_datasource_name( self.ds_name) lcax = self.platform[f'{sensor_type}_{pos}'] def to_intervals(slices): return [[(slices[i * 2]), (slices[i * 2 + 1])] for i in range(len(slices) // 2)] lcax.config['dist_reject_intervals'] = to_intervals( self.controls.distIntervals) lcax.config['amp_reject_intervals'] = to_intervals( self.controls.ampIntervals) lcax[ds_type].invalidate_caches() self._update()
def __update_box2D(self, sample, image): for ds_name, show in self.show_bbox_2d.items(): if not show: continue box_source = categories.get_source(parse_datasource_name(ds_name)[2]) box2d:Box2d = self.platform[ds_name].get_at_timestamp(sample.timestamp) if np.abs(float(box2d.timestamp) - float(sample.timestamp)) > 1e6: continue category_numbers = box2d.get_category_numbers() for box_index in range(len(box2d)): center = box2d.get_centers()[box_index] dimension = box2d.get_dimensions()[box_index] confidence = box2d.get_confidences()[box_index] category_name, color = categories.get_name_color(box_source, category_numbers[box_index]) id = box2d.get_ids()[box_index] if confidence: if confidence < self.conf_threshold: continue if self.category_filter is not '': if category_name not in self.category_filter: continue color = np.array(color)/255 if self.use_box_colors: color = utils.to_numpy(QColor(self.box_3d_colors[ds_name]))[:3] top = (center[0]-dimension[0]/2)*image.shape[0] left = (center[1]-dimension[1]/2)*image.shape[1] rect = Rectangle((left,top), dimension[1]*image.shape[1], dimension[0]*image.shape[0], linewidth=1, edgecolor=color, facecolor=list(color)+[0.15]) self.ax.add_patch(rect) if self.box_labels_size > 0: text_label = category_name if id: text_label += f" {id}" if confidence: text_label += f" ({int(confidence*100)}%)" txt = self.ax.text(left, top, text_label, color='w', fontweight='bold', fontsize=self.box_labels_size, clip_on=True) txt.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='k')])
def mask_category(self, category: str, resolution: tuple = None, confidence_threshold: float = 0.5): polygons = self.raw _, _, ds_type = platform.parse_datasource_name(self.datasource.label) poly_source = categories.get_source(ds_type) mask = np.zeros((polygons['resolution'][0], polygons['resolution'][1]), dtype=np.uint8) for i, poly in enumerate(polygons['data']): if 'confidence' in polygons: if polygons['confidence'][i] < confidence_threshold: continue name, color = categories.get_name_color(poly_source, poly['classes']) if name == category: cv2.fillPoly(mask, [poly['polygon']], 1) return self.resize_mask(mask, resolution) if resolution is not None else mask
def sensor_type(self): return platform.parse_datasource_name(self.datasource.label)[0]
def _draw_bounding_box_actors(self): ## Attemp to add Box in Viewer API ## WIP to have a better implementation for datasource, actors in self.viewport.bboxActors.items(): actors['actor'].clearActors() if not self.controls.showBBox3D[datasource]: continue sample = self._get_sample(datasource) if np.abs( np.int64(self._ds_name_sample.timestamp) - sample.timestamp) <= 1e6: _, _, ds_type = platform_utils.parse_datasource_name( datasource) box_source = categories.get_source(ds_type) raw = sample.raw bbox = sample.mapto(self.ds_name, ignore_orientation=True) mask = (bbox['flags'] >= 0) if 'confidence' in raw: mask = mask & (sample.raw['confidence'] > int(self.controls.confThreshold) / 100.0) if (self.controls.showIoU) and (self.controls.refdsIoU is not ''): scores_iou = sample.compute_iou(box=self._get_sample( self.controls.refdsIoU), return_max=True, map2yaw=None) if len(bbox[mask]) > 0: for i, box in enumerate(bbox[mask]): c = box['c'] d = box['d'] r = box['r'] has_attributes = 'attributes' in raw if has_attributes: attributes = raw['attributes'][mask][i] name, color = categories.get_name_color( box_source, box['classes']) if self.controls.categoryFilter is not '': if name not in self.controls.categoryFilter: continue color = QColor.fromRgb(*color) text_color = QColor('white') if self.controls.useBoxColors: text_color = color = QColor( self.controls.box3DColors[datasource]) if 'confidence' in raw: conf = raw['confidence'][mask][i] name = f'{name}({conf:.3f})' if (self.controls.showIoU) and ( self.controls.refdsIoU is not ''): name = f'{name}[IoU={scores_iou[mask][i]:.3f}]' bbox_actor, text_anchor = CustomActors.bbox( c, d, r, color=color, return_anchor=True) bbox_actor.effect.lineWidth = 2 if has_attributes: bbox_actor.hovered.connect( self._update_cursor(actors['cursor'], attributes)) tf = linalg.tf_from_pos_euler(text_anchor) actors['actor'].addActor(bbox_actor) if self.controls.boxLabelsSize > 0: text_actor = CustomActors.text( name, color=text_color, origin=[0, 0, 0], v=[0, -1, 0], matrix=utils.from_numpy(tf), scale=0.1, font_size=self.controls.boxLabelsSize, line_width=3, is_billboard=True) actors['actor'].addActor(text_actor)
def _draw_bounding_box_actors(self): for ds_name, actors in self.viewport.bboxActors.items(): actors['actor'].clearActors() if not self.controls.showBBox3D[ds_name]: continue box_source = categories.get_source( platform_utils.parse_datasource_name(ds_name)[2]) box3d_sample: Box3d = self.platform[ds_name].get_at_timestamp( self.sample.timestamp) if np.abs( float(box3d_sample.timestamp) - float(self.sample.timestamp)) > 1e6: continue box3d = box3d_sample.set_referential(self.ds_name, ignore_orientation=True) category_numbers = box3d.get_category_numbers() for box_index in range(len(box3d)): center = box3d.get_centers()[box_index] dimension = box3d.get_dimensions()[box_index] rotation = box3d.get_rotations()[box_index] confidence = box3d.get_confidences()[box_index] category_name, color = categories.get_name_color( box_source, category_numbers[box_index]) id = box3d.get_ids()[box_index] if confidence: if confidence < int(self.controls.confThreshold) / 100.0: continue if self.controls.categoryFilter is not '': if category_name not in self.controls.categoryFilter: continue color = QColor.fromRgb(*color) text_color = QColor('white') if self.controls.useBoxColors: text_color = color = QColor( self.controls.box3DColors[ds_name]) bbox_actor, text_anchor = CustomActors.bbox(center, dimension, rotation, color=color, return_anchor=True) bbox_actor.effect.lineWidth = 2 tf = linalg.tf_from_pos_euler(text_anchor) actors['actor'].addActor(bbox_actor) if self.controls.boxLabelsSize > 0: text_label = category_name if id: text_label += f" {id}" if confidence: text_label += f" ({int(confidence)}%)" text_actor = CustomActors.text( text_label, color=text_color, origin=[0, 0, 0], v=[0, -1, 0], matrix=utils.from_numpy(tf), scale=0.1, font_size=self.controls.boxLabelsSize, line_width=3, is_billboard=True, ) actors['actor'].addActor(text_actor)
def connect(self): if self.platform.is_live(): if not isinstance(self.platform[self.ds_name], VirtualDatasource): self.platform[self.ds_name].ds.connect(self._update) else: self.platform[self.platform[ self.ds_name].dependencies[0]].ds.connect(self._update) else: self.add_connection(self.window.cursorChanged.connect( self._update)) self.add_connection(self.window.visibleChanged.connect(self._update)) self.add_connection( self.controls.showActorChanged.connect(self._update)) self.add_connection( self.controls.showBBox3DChanged.connect(self._update)) self.add_connection( self.controls.showSeg3DChanged.connect(self._update)) self.add_connection( self.controls.showLanesChanged.connect(self._update)) self.add_connection( self.controls.useBoxColorsChanged.connect(self._update)) self.add_connection( self.controls.boxLabelsSizeChanged.connect(self._update)) self.add_connection(self.controls.videoChanged.connect(self._update)) self.add_connection( self.controls.confThresholdChanged.connect(self._update)) self.add_connection(self.controls.showIoUChanged.connect(self._update)) self.add_connection( self.controls.categoryFilterChanged.connect(self._update)) self.add_connection( self.controls.submitVoxelMapMemory.clicked.connect( self._update_voxel_map)) self.add_connection( self.controls.voxelSizeChanged.connect(self._update_voxel_map)) self.add_connection( self.controls.amplitudeTypeChanged.connect( self._update_amplitude_type)) sensor_type, pos, ds_type = platform_utils.parse_datasource_name( self.ds_name) if ds_type.startswith('ech'): lcax = self.platform[f'{sensor_type}_{pos}'] # load actual values in UI self.controls.distIntervals = [ i for sub in lcax.config['dist_reject_intervals'] for i in sub ] self.controls.ampIntervals = [ i for sub in lcax.config['amp_reject_intervals'] for i in sub ] self.add_connection( self.controls.distIntervalsChanged.connect( self._update_intervals)) self.add_connection( self.controls.ampIntervalsChanged.connect( self._update_intervals)) if ds_type.startswith('xyzit-voxmap'): # load actual values in UI self.controls.voxelMapMemory = str( self.platform[self.ds_name].memory) self.controls.voxelMapSkip = str(self.platform[self.ds_name].skip) self.controls.voxelSizeText = str( self.platform[self.ds_name].voxel_size) self.controls.voxelSize = float( np.log10(self.platform[self.ds_name].voxel_size)) if 'radar' in self.platform._sensors.keys(): self.controls.amplitudeTypeVisible = True self._update()
def __update_bbox_3d(self, sample, image, box): datasources = [ ds_name for ds_name, show in self.show_bbox_3d.items() if show ] for ds_name in datasources: _, _, ds_type = parse_datasource_name(ds_name) box_source = categories.get_source(ds_type) if box_source not in categories.CATEGORIES: #FIXME: this should not be here box_source = 'deepen' box3d_sample = self.platform[ds_name].get_at_timestamp( sample.timestamp) if np.abs(np.int64(sample.timestamp) - box3d_sample.timestamp) <= 1e6: raw = box3d_sample.raw box3d = box3d_sample.mapto(self.datasource, ignore_orientation=True) mask = (box3d['flags'] >= 0) if 'confidence' in raw: mask = mask & (raw['confidence'] > self.conf_threshold) if len(box3d[mask]) > 0: poly_collection = [] color_collection = [] for i, box in enumerate(box3d[mask]): name, color = categories.get_name_color( box_source, box['classes']) if self.category_filter is not '' and name not in self.category_filter: break color = np.array(color) / 255 if self.use_box_colors: color = utils.to_numpy( QColor(self.box_3d_colors[ds_name]))[:3] if 'confidence' in raw: conf = raw['confidence'][mask][i] name = f"{name}({conf:.3f})" vertices = linalg.bbox_to_8coordinates( box['c'], box['d'], box['r']) p, mask_fov = sample.project_pts( vertices, mask_fov=False, output_mask=True, undistorted=self.undistortimage, margin=1000) if p[mask_fov].shape[0] < 8: continue faces = [[0, 1, 3, 2], [0, 1, 5, 4], [0, 2, 6, 4], [7, 3, 1, 5], [7, 5, 4, 6], [7, 6, 2, 3]] for face in faces: poly = np.vstack([ p[face[0]], p[face[1]], p[face[2]], p[face[3]], p[face[0]] ]) poly_collection.append(poly) color_collection.append(color) if self.box_labels_size > 0: txt = self.ax.text(p[:, 0].min(), p[:, 1].min(), name + ':' + str(box['id']), color='w', fontweight='bold', fontsize=self.box_labels_size, clip_on=True) txt.set_path_effects([ PathEffects.withStroke(linewidth=1, foreground='k') ]) alpha = 0.05 facecolors = [list(c) + [alpha] for c in color_collection] poly_collection = PolyCollection( poly_collection, linewidths=0.5, edgecolors=color_collection, facecolors=facecolors) self.ax.add_collection(poly_collection)