def get_visualization(self, analysis, result_path, data_path, project, main_window): """ This function should show the complete Visualization """ w = QWidget() w.setLayout(QVBoxLayout(w)) w.layout().addWidget( QLabel( "Color CIE-Lab:".rjust(20) + str(analysis.get_adata()['color_lab']), w)) w.layout().addWidget( QLabel( "Color BGR:".rjust(20) + str(analysis.get_adata()['color_bgr']), w)) w.layout().addWidget( QLabel( "Saturation Luebbe:".rjust(20) + str(analysis.get_adata()['saturation_l']), w)) w.layout().addWidget( QLabel( "Saturation FilmCo:".rjust(20) + str(analysis.get_adata()['saturation_p']), w)) view = EGraphicsView(w) view.set_image( numpy_to_pixmap( np.array(([[analysis.get_adata()['color_bgr']] * 100] * 25)).astype(np.uint8))) w.layout().addWidget(view) return [ VisualizationTab(widget=w, name="Color-Features", use_filter=False, controls=None) ]
def set_img_movie(self, img): self.img_movie = img if img is None: return if self.project is not None and self.project.headless_mode: return fx = CACHE_WIDTH / img.shape[1] img = cv2.resize(img, None, None, fx, fx, cv2.INTER_CUBIC) self.img_movie = img if img.shape[2] == 3: self.onImageSet.emit(self, self.img_movie, numpy_to_pixmap(img)) elif img.shape[2] == 4: self.onImageSet.emit( self, self.img_movie, numpy_to_pixmap(img, cvt=cv2.COLOR_BGRA2RGBA, with_alpha=True))
def get_preview(self, analysis: IAnalysisJobAnalysis): """ This should return the Widget that is shown in the Inspector when the analysis is selected """ w = QWidget() w.setLayout(QVBoxLayout(w)) w.layout().addWidget( QLabel( "Color CIE-Lab:".rjust(20) + str(analysis.get_adata()['color_lab']), w)) w.layout().addWidget( QLabel( "Color BGR:".rjust(20) + str(analysis.get_adata()['color_bgr']), w)) w.layout().addWidget( QLabel( "Saturation Luebbe:".rjust(20) + str(analysis.get_adata()['saturation_l']), w)) w.layout().addWidget( QLabel( "Saturation FilmCo:".rjust(20) + str(analysis.get_adata()['saturation_p']), w)) view = EGraphicsView(w) view.set_image( numpy_to_pixmap( np.array(([[analysis.get_adata()['color_bgr']] * 100] * 25)).astype(np.uint8))) w.layout().addWidget(view) return w
def get_visualization(self, analysis, result_path, data_path, project, main_window): """ This function should show the complete Visualization """ widget = EGraphicsView(None, auto_frame=True, has_context_menu=True) n = 20 colormap = get_colormap(n) data = analysis.get_adata() img = np.zeros(shape=data.shape + (3, ), dtype=np.float32) for i in range(n): img[data == i] = colormap[i][:3] img = (img * 255).astype(np.uint8) img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGR) if isinstance(analysis.target_container, Screenshot): scr = analysis.target_container.get_img_movie_orig_size() scr = cv2.resize(scr, img.shape[:2][::-1], interpolation=cv2.INTER_CUBIC).astype(np.float32) img = ((scr * 0.7) + (img * 0.3)).astype(np.uint8) widget.set_image(numpy_to_pixmap(img)) return [ VisualizationTab(widget=widget, name="Semantic Segmentation Mask", use_filter=False, controls=None) ]
def get_preview(self, analysis: IAnalysisJobAnalysis): """ This should return the Widget that is shown in the Inspector when the analysis is selected """ widget = EGraphicsView(None, auto_frame=True) n = 20 colormap = get_colormap(n) data = analysis.get_adata() img = np.zeros(shape=data.shape + (3, ), dtype=np.float32) for i in range(n): img[data == i] = colormap[i][:3] img = (img * 255).astype(np.uint8) img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGR) if isinstance(analysis.target_container, Screenshot): scr = analysis.target_container.get_img_movie_orig_size() scr = cv2.resize(scr, img.shape[:2][::-1], interpolation=cv2.INTER_CUBIC).astype(np.float32) img = ((scr * 0.7) + (img * 0.3)).astype(np.uint8) widget.set_image(numpy_to_pixmap(img)) return widget
def on_slider_change(self): if self.cap is None: return self.cap.set(cv2.CAP_PROP_POS_FRAMES, int(self.pos_slider.value() * (self.duration / 1000))) ret, frame = self.cap.read() if frame is not None: self.onFrameChanged.emit(numpy_to_pixmap(frame)) self.get_rect()
def get_preview(self, analysis: IAnalysisJobAnalysis): """ This should return the Widget that is shown in the Inspector when the analysis is selected """ view = EGraphicsView(None) pixmap = numpy_to_pixmap(analysis.get_adata()) view.set_image(pixmap) return view
def get_visualization(self, analysis, result_path, data_path, project, main_window): """ This function should show the complete Visualization """ view = EGraphicsView(None) pixmap = numpy_to_pixmap(analysis.get_adata()) view.set_image(pixmap) return [VisualizationTab(widget=view, name="Z-Projection", use_filter=False, controls=None)]
def get_visualization(self, analysis, result_path, data_path, project, main_window): """ This function should show the complete Visualization """ widget = EGraphicsView(None, auto_frame=True) widget.set_image( numpy_to_pixmap( self.barcode_to_image(analysis.get_adata()['barcode']))) return [ VisualizationTab(widget=widget, name="Barcode", use_filter=False, controls=None) ]
def get_preview(self, analysis: IAnalysisJobAnalysis): """ This should return the Widget that is shown in the Inspector when the analysis is selected """ if analysis.parameters['interpolation'] == "Cubic": interpolation = cv2.INTER_CUBIC else: interpolation = cv2.INTER_LINEAR image = self.barcode_to_image(analysis.get_adata()['barcode'], 400, image_height=50, interpolation=interpolation) barcode_pixm = numpy_to_pixmap(image) view = QGraphicsView(QGraphicsScene()) view.scene().addPixmap(barcode_pixm) return view
def set_classification_object(self, clobj, recompute=False, hdf5_cache=None): if not recompute and clobj.unique_id in self.masked_cache: result = self.masked_cache[clobj.unique_id] elif clobj is None or clobj.semantic_segmentation_labels[0] == "": result = self.img_movie else: result = None cached = hdf5_cache.get_screenshot(clobj.get_id(), self.unique_id) if cached is None: a = self.get_semantic_segmentations( clobj.semantic_segmentation_labels[0]) lbls = clobj.semantic_segmentation_labels[1] if len(clobj.semantic_segmentation_labels[0]) == len(lbls): result = self.img_movie if a is not None: mask = a.get_adata() masked = apply_mask(self.img_movie, mask, lbls) h = CACHE_WIDTH / masked.shape[0] * masked.shape[1] masked = cv2.resize(masked, (int(h), CACHE_WIDTH), interpolation=cv2.INTER_CUBIC) hdf5_cache.dump_screenshot(clobj.get_id(), self.unique_id, masked) result = masked else: result = cached if result is not None: self.masked_cache[clobj.unique_id] = result self.onImageSet.emit( self, result, numpy_to_pixmap(result, cvt=cv2.COLOR_BGRA2RGBA, with_alpha=True)) return result