def _analyze(self, cellanalyzer): super(PositionPicker, self)._analyze() n_images = 0 stopwatch = StopWatch(start=True) crd = Coordinate(self.plate_id, self.position, self._frames, list(set(self.ch_mapping.values()))) for frame, channels in self._imagecontainer( \ crd, interrupt_channel=True, interrupt_zslice=True): if self.is_aborted(): return 0 else: txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1, len(self._frames)) self.update_status({'progress': self._frames.index(frame)+1, 'text': txt, 'interval': stopwatch.interim()}) stopwatch.reset(start=True) # initTimepoint clears channel_registry cellanalyzer.initTimepoint(frame) self.register_channels(cellanalyzer, channels) image = cellanalyzer.collectObjects(self.plate_id, self.position, self.sample_readers, self.learner, byTime=True) if image is not None: n_images += 1 msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame) self.set_image(image, msg)
def _analyze(self, cellanalyzer): n_images = 0 stopwatch = StopWatch(start=True) crd = Coordinate(self.plate_id, self.position, self._frames, list(set(self.ch_mapping.values()))) for frame, channels in self._imagecontainer( \ crd, interrupt_channel=True, interrupt_zslice=True): if self.isAborted(): self.clear() return 0 else: txt = 'T %d (%d/%d)' % (frame, self._frames.index(frame) + 1, len(self._frames)) self.statusUpdate(progress=self._frames.index(frame) + 1, text=txt, interval=stopwatch.interim()) stopwatch.reset(start=True) cellanalyzer.initTimepoint(frame) self.register_channels(cellanalyzer, channels) cellanalyzer.process() self.setup_classifiers() for chname, clf in self.classifiers.iteritems(): try: cellanalyzer.classify_objects(clf, chname) except KeyError as e: pass
def _analyze(self, cellanalyzer): thread = QThread.currentThread() imax = sum([len(n) for n in self.sample_positions.values()]) thread.statusUpdate(min=0, max=imax) stopwatch = StopWatch(start=True) crd = Coordinate(self.plate_id, self.position, self._frames, list(set(self.ch_mapping.values()))) for frame, channels in self._imagecontainer( \ crd, interrupt_channel=True, interrupt_zslice=True): thread.interruption_point() txt = '%s, %s, T %d, (%d/%d)' \ %(self.plate_id, self.position, frame, self._frames.index(frame)+1, len(self._frames)) thread.statusUpdate(meta="Classifier training: ", text=txt, interval=stopwatch.interim(), increment=True) stopwatch.reset(start=True) # initTimepoint clears channel_registry cellanalyzer.initTimepoint(frame) self.register_channels(cellanalyzer, channels) cellanalyzer.collectObjects(self.plate_id, self.position, self.sample_readers, self.learner)
def _on_anntable_changed(self, current, previous): if not current is None: if self._imagecontainer.has_multiple_plates: offset = 0 plate = self._ann_table.item(current.row(), self.COLUMN_ANN_PLATE).text() else: offset = 1 plate = self._imagecontainer.plates[0] col = self.COLUMN_ANN_POSITION - offset position = self._ann_table.item(current.row(), col).text() col = self.COLUMN_ANN_TIME - offset time = int(self._ann_table.item(current.row(), col).text()) coordinate = Coordinate(plate=plate, position=position, time=time) try: self.browser.set_coordinate(coordinate) except: exception(self, "Selected coordinate was not found. " "Make sure the data and annotation match and " "that the data was scanned/imported correctly.")
def _analyze(self, cellanalyzer): super(PositionAnalyzer, self)._analyze() n_images = 0 stopwatch = StopWatch(start=True) crd = Coordinate(self.plate_id, self.position, self._frames, list(set(self.ch_mapping.values()))) minimal_effort = self.settings.get('Output', 'minimal_effort') and self.settings.get('Output', 'hdf5_reuse') for frame, channels in self._imagecontainer( \ crd, interrupt_channel=True, interrupt_zslice=True): if self.is_aborted(): self.clear() return 0 else: txt = 'T %d (%d/%d)' %(frame, self._frames.index(frame)+1, len(self._frames)) self.update_status({'progress': self._frames.index(frame)+1, 'text': txt, 'interval': stopwatch.interim()}) stopwatch.reset(start=True) cellanalyzer.initTimepoint(frame) self.register_channels(cellanalyzer, channels) cellanalyzer.process() self.logger.info(" - Frame %d, cellanalyzer.process (ms): %3d" \ %(frame, stopwatch.interval()*1000)) n_images += 1 images = [] if self.settings('Processing', 'tracking'): region = self.settings('Tracking', 'region') samples = self.timeholder[frame][PrimaryChannel.NAME].get_region(region) self._tracker.track_next_frame(frame, samples) if self.settings('Tracking', 'tracking_visualization'): size = cellanalyzer.getImageSize(PrimaryChannel.NAME) nframes = self.settings('Tracking', 'tracking_visualize_track_length') radius = self.settings('Tracking', 'tracking_centroid_radius') img_conn, img_split = self._tracker.render_tracks( frame, size, nframes, radius) images += [(img_conn, '#FFFF00', 1.0), (img_split, '#00FFFF', 1.0)] self.logger.info(" - Frame %d, Tracking (ms): %3d" \ %(frame, stopwatch.interval()*1000)) # can't cluster on a per frame basis if self.settings("EventSelection", "supervised_event_selection"): for clf in self.classifiers.itervalues(): cellanalyzer.classify_objects(clf) self.logger.info(" - Frame %d, Classification (ms): %3d" \ % (frame, stopwatch.interval()*1000)) self.settings.set_section('General') # want emit all images at once if not minimal_effort: imgs = {} imgs.update(self.render_classification_images(cellanalyzer, images, frame)) imgs.update(self.render_contour_images(cellanalyzer, images, frame)) msg = 'PL %s - P %s - T %05d' %(self.plate_id, self.position, frame) self.set_image(imgs, msg, 50) if self.settings('Output', 'rendering_channel_gallery'): self.render_channel_gallery(cellanalyzer, frame) if self.settings('Output', 'rendering_labels_discwrite'): cellanalyzer.exportLabelImages(self._labels_dir) cellanalyzer.purge(features=self.export_features) self.logger.info(" - Frame %d, rest (ms): %3d" \ %(frame, stopwatch.interval()*1000)) self.logger.info(" - Frame %d, duration (ms): %3d" \ %(frame, stopwatch.interim()*1000)) return n_images
def full_tracks(self, timeholder, visitor_data, position, outdir): shutil.rmtree(outdir, True) makedirs(outdir) for start_id, data in visitor_data.iteritems(): for idx, track in enumerate(data['_full_tracks']): has_header = False line1 = [] line2 = [] line3 = [] frame, obj_label = Tracker.split_nodeid(start_id)[:2] filename = 'P%s__T%05d__O%04d__B%02d.txt' \ %(position, frame, obj_label, idx+1) f = file(join(outdir, filename), 'w') for node_id in track: frame, obj_id = Tracker.split_nodeid(node_id) coordinate = Coordinate(position=position, time=frame) prefix = [ frame, self.meta_data.get_timestamp_relative(coordinate), obj_id ] prefix_names = ['frame', 'time', 'objID'] items = [] for channel in timeholder[frame].values(): for region_id in channel.region_names(): region = channel.get_region(region_id) if obj_id in region: flkp = self._map_feature_names( region.feature_names) if not has_header: keys = ['classLabel', 'className'] if channel.NAME == 'Primary': keys += ['centerX', 'centerY'] keys += flkp.keys() line1 += [channel.NAME.upper()] * len(keys) line2 += [str(region_id)] * len(keys) line3 += keys obj = region[obj_id] features = region.features_by_name( obj_id, flkp.values()) values = [ x if not x is None else '' for x in [obj.iLabel, obj.strClassName] ] if channel.NAME == 'Primary': values += [ obj.oCenterAbs[0], obj.oCenterAbs[1] ] values += list(features) items.extend(values) if not has_header: has_header = True prefix_str = [''] * len(prefix) line1 = prefix_str + line1 line2 = prefix_str + line2 line3 = prefix_names + line3 f.write('%s\n' % CSVParams.sep.join(line1)) f.write('%s\n' % CSVParams.sep.join(line2)) f.write('%s\n' % CSVParams.sep.join(line3)) f.write( '%s\n' % CSVParams.sep.join([str(i) for i in prefix + items])) f.close()
def _data_per_channel(self, timeholder, event_data, filename, channel_name, region_name, feature_names, position): eventid = event_data['eventId'] event_frame, _ = Tracker.split_nodeid(eventid) has_split = 'splitId' in event_data header_names = ['Frame', 'Timestamp', 'isEvent'] if has_split: header_names.append('isSplit') if event_data['splitId'] is not None: split_frame, _ = Tracker.split_nodeid(event_data['splitId']) else: split_frame = None table = [] # zip nodes with same time together for nodeids in zip(*event_data['tracks']): objids = [] frame = None for nodeid in nodeids: node_frame, objid = Tracker.split_nodeid(nodeid) if frame is None: frame = node_frame else: assert frame == node_frame objids.append(objid) channel = timeholder[frame][channel_name] sample_holder = channel.get_region(region_name) if feature_names is None: feature_names = sample_holder.feature_names if CSVParams.objId not in header_names: # setup header line header_names.append(CSVParams.objId) header_names += [ CSVParams.class_ % x for x in ['name', 'label', 'probability'] ] # only feature_names scales according to settings header_names += [ CSVParams.feature % fn for fn in feature_names ] header_names += [ CSVParams.tracking % tf for tf in CSVParams.tracking_features ] coordinate = Coordinate(position=position, time=frame) data = { 'Frame': frame, 'Timestamp': self.meta_data.get_timestamp_relative(coordinate), 'isEvent': int(frame == event_frame) } if has_split: data['isSplit'] = int(frame == split_frame) #for iIdx, iObjId in enumerate(lstObjectIds): objid = objids[0] if objid in sample_holder: sample = sample_holder[objid] data[CSVParams.objId] = objid # classification data if sample.iLabel is not None: data[CSVParams.class_ % 'label'] = sample.iLabel data[CSVParams.class_ % 'name'] = sample.strClassName data[CSVParams.class_ %'probability'] = \ ','.join(['%d:%.5f' % (int(x),y) for x,y in sample.dctProb.iteritems()]) common_ftr = [ f for f in set(sample_holder.feature_names).intersection( feature_names) ] features = sample_holder.features_by_name(objid, common_ftr) for feature, fname in zip(features, common_ftr): data[CSVParams.feature % fname] = feature # features not calculated are exported as NAN diff_ftr = [ f for f in set(feature_names).difference( sample_holder.feature_names) ] for df in diff_ftr: data[CSVParams.feature % df] = float("NAN") # object tracking data (absolute center) data[CSVParams.tracking % 'center_x'] = sample.oCenterAbs[0] data[CSVParams.tracking % 'center_y'] = sample.oCenterAbs[1] data[CSVParams.tracking % 'upperleft_x'] = sample.oRoi.upperLeft[0] data[CSVParams.tracking % 'upperleft_y'] = sample.oRoi.upperLeft[1] data[CSVParams.tracking % 'lowerright_x'] = sample.oRoi.lowerRight[0] data[CSVParams.tracking % 'lowerright_y'] = sample.oRoi.lowerRight[1] else: # we rather skip the entire event in case the object ID is not valid return table.append(data) if len(table) > 0: with open(filename, 'wb') as fp: writer = csv.DictWriter(fp, fieldnames=header_names, delimiter=CSVParams.sep) writer.writeheader() writer.writerows(table)
def __init__(self, settings, imagecontainer): super(Browser, self).__init__() frame = QFrame(self) self.setCentralWidget(frame) self._settings = settings self._imagecontainer = imagecontainer self._show_objects = False self._object_region = None self.coordinate = Coordinate() self.grabGesture(Qt.SwipeGesture) self.setStyleSheet("QStatusBar { border-top: 1px solid gray; }") layout = QVBoxLayout(frame) layout.setContentsMargins(0, 0, 0, 0) splitter = QSplitter(Qt.Horizontal, frame) #splitter.setSizePolicy(QSizePolicy(QSizePolicy.Minimum, # QSizePolicy.Expanding)) layout.addWidget(splitter) frame = QFrame(self) frame_side = QStackedWidget(splitter) #splitter.setChildrenCollapsible(False) splitter.addWidget(frame) splitter.addWidget(frame_side) splitter.setStretchFactor(0, 1) splitter.setStretchFactor(1, 0) splitter.setSizes([-1, 80]) self.coordinate.plate = self._imagecontainer.plates[0] self._imagecontainer.set_plate(self.coordinate.plate) self.coordinate.channel = self._imagecontainer.channels[0] meta_data = self._imagecontainer.get_meta_data() self.max_time = meta_data.times[-1] self.min_time = meta_data.times[0] self.max_frame = meta_data.dim_t - 1 layout = QGridLayout(frame) layout.setContentsMargins(0, 0, 0, 0) self.image_viewer = ImageViewer(frame, auto_resize=True) layout.addWidget(self.image_viewer, 0, 0) #self.image_viewer.image_mouse_dblclk.connect(self._on_dbl_clk) self.image_viewer.zoom_info_updated.connect(self.on_zoom_info_updated) self._t_slider = QSlider(Qt.Horizontal, frame) self._t_slider.setMinimum(self.min_time) self._t_slider.setMaximum(self.max_time) # self._t_slider.setMaximum(self.max_frame) self._t_slider.setTickPosition(QSlider.TicksBelow) self._t_slider.valueChanged.connect(self.on_time_changed_by_slider, Qt.DirectConnection) if self._imagecontainer.has_timelapse: self._t_slider.show() else: self._t_slider.hide() layout.addWidget(self._t_slider, 1, 0) self.coordinate.position = meta_data.positions[0] self.coordinate.time = self._t_slider.minimum() # menus act_next_t = self.create_action('Next Time-point', shortcut=QKeySequence('Right'), slot=self.on_act_next_t) act_prev_t = self.create_action('Previous Time-point', shortcut=QKeySequence('Left'), slot=self.on_act_prev_t) act_next_pos = self.create_action('Next Position', shortcut=QKeySequence('Shift+Down'), slot=self.on_act_next_pos) act_prev_pos = self.create_action('Previous Position', shortcut=QKeySequence('Shift+Up'), slot=self.on_act_prev_pos) act_next_plate = self.create_action( 'Next Plate', shortcut=QKeySequence('Shift+Alt+Down'), slot=self.on_act_next_plate) act_prev_plate = self.create_action( 'Previous Plate', shortcut=QKeySequence('Shift+Alt+Up'), slot=self.on_act_prev_plate) act_resize = self.create_action('Automatically Resize', shortcut=QKeySequence('SHIFT+CTRL+R'), slot=self.on_act_autoresize, signal='triggered(bool)', checkable=True, checked=True) self._act_resize = act_resize act_zoomfit = self.create_action('Zoom to Fit', shortcut=QKeySequence('CTRL+0'), slot=self.on_act_zoomfit) act_zoom100 = self.create_action('Actual Size', shortcut=QKeySequence('CTRL+1'), slot=self.on_act_zoom100) act_zoomin = self.create_action('Zoom In', shortcut=QKeySequence('CTRL++'), slot=self.on_act_zoomin) act_zoomout = self.create_action('Zoom Out', shortcut=QKeySequence('CTRL+-'), slot=self.on_act_zoomout) act_refresh = self.create_action('Refresh', shortcut=QKeySequence('F5'), slot=self.on_refresh) act_fullscreen = self.create_action('Full Screen', shortcut=QKeySequence('CTRL+F'), slot=self.on_act_fullscreen, signal='triggered(bool)', checkable=True, checked=False) self._act_fullscreen = act_fullscreen act_show_contours = self.create_action( 'Show Object Contours', shortcut=QKeySequence('ALT+C'), slot=self.on_act_show_contours, signal='triggered(bool)', checkable=True, checked=self.image_viewer.show_contours) self._act_show_contours = act_show_contours act_anti = self.create_action('Antialiasing', shortcut=QKeySequence('CTRL+ALT+A'), slot=self.on_act_antialiasing, signal='triggered(bool)', checkable=True, checked=True) act_smooth = self.create_action('Smooth Transform', shortcut=QKeySequence('CTRL+ALT+S'), slot=self.on_act_smoothtransform, signal='triggered(bool)', checkable=True, checked=True) view_menu = self.menuBar().addMenu('&View') self.add_actions(view_menu, ( act_resize, None, act_zoom100, act_zoomfit, act_zoomin, act_zoomout, None, act_prev_t, act_next_t, act_prev_pos, act_next_pos, act_prev_plate, act_next_plate, None, act_refresh, act_fullscreen, None, act_show_contours, None, act_anti, act_smooth, )) self._statusbar = QStatusBar(self) self.setStatusBar(self._statusbar) # tool bar toolbar = self.addToolBar('Toolbar') toolbar.setMovable(False) toolbar.setFloatable(False) region_names = [] for prefix in ['primary', 'secondary', 'tertiary']: region_names.extend(['%s - %s' % (prefix.capitalize(), name) \ for name in REGION_INFO.names[prefix]]) # FIXME: something went wrong with setting up the current region self._object_region = region_names[0].split(' - ') # create a new ModuleManager with a QToolbar and QStackedFrame self._module_manager = ModuleManager(toolbar, frame_side) NavigationModule(self._module_manager, self, self._imagecontainer) DisplayModule(self._module_manager, self, self._imagecontainer, region_names) AnnotationModule(self._module_manager, self, self._settings, self._imagecontainer) # set the Navigation module activated self._module_manager.activate_tab(NavigationModule.NAME) # process and display the first image self._process_image()
def __init__(self, settings, imagecontainer, parent=None): super(Browser, self).__init__(parent) self.setWindowTitle('Annotation Browser') frame = QFrame(self) self.setCentralWidget(frame) self._settings = settings self._imagecontainer = imagecontainer # These params are used by process_image and contour visualization self._detect_objects = False self._show_objects_by = 'color' self._object_region = None self._contour_color = '#000000' self._show_objects = True self.coordinate = Coordinate() self.grabGesture(Qt.SwipeGesture) self.setStyleSheet("QStatusBar { border-top: 1px solid gray; }") layout = QVBoxLayout(frame) layout.setContentsMargins(0, 0, 0, 0) splitter = QSplitter(Qt.Horizontal, frame) layout.addWidget(splitter) frame = QFrame(self) frame_side = QStackedWidget(splitter) splitter.addWidget(frame) splitter.addWidget(frame_side) splitter.setStretchFactor(0, 1) splitter.setStretchFactor(1, 0) splitter.setSizes([-1, 80]) self.coordinate.plate = self._imagecontainer.plates[0] self._imagecontainer.set_plate(self.coordinate.plate) self.coordinate.channel = self._imagecontainer.channels[0] meta_data = self._imagecontainer.get_meta_data() self.max_time = meta_data.times[-1] self.min_time = meta_data.times[0] self.max_frame = meta_data.dim_t - 1 layout = QGridLayout(frame) layout.setContentsMargins(0, 0, 0, 0) self.image_viewers = { 'image': ImageViewer(frame, auto_resize=True), 'gallery': GalleryViewer(frame) } self.image_viewer = self.image_viewers['image'] layout.addWidget(self.image_viewer, 0, 0) self.image_viewer.zoom_info_updated.connect(self.on_zoom_info_updated) self._t_slider = TSlider(Qt.Horizontal, frame) self._t_slider.setMinimum(self.min_time) self._t_slider.setMaximum(self.max_time) self._t_slider.setTickPosition(QSlider.NoTicks) self._t_slider.newValue.connect(self.on_time_changed_by_slider, Qt.DirectConnection) self._t_slider.valueChanged.connect(self.timeToolTip) self._imagecontainer.check_dimensions() if self._imagecontainer.has_timelapse: self._t_slider.show() else: self._t_slider.hide() layout.addWidget(self._t_slider, 1, 0) self.coordinate.position = meta_data.positions[0] self.coordinate.time = self._t_slider.minimum() # menus act_close = self.create_action('Close', shortcut=QKeySequence('CTRL+C'), slot=self.close) act_next_t = self.create_action('Next Time-point', shortcut=QKeySequence('Right'), slot=self.on_act_next_t) act_prev_t = self.create_action('Previous Time-point', shortcut=QKeySequence('Left'), slot=self.on_act_prev_t) act_next_pos = self.create_action('Next Position', shortcut=QKeySequence('Shift+Down'), slot=self.on_act_next_pos) act_prev_pos = self.create_action('Previous Position', shortcut=QKeySequence('Shift+Up'), slot=self.on_act_prev_pos) act_next_plate = self.create_action( 'Next Plate', shortcut=QKeySequence('Shift+Alt+Down'), slot=self.on_act_next_plate) act_prev_plate = self.create_action( 'Previous Plate', shortcut=QKeySequence('Shift+Alt+Up'), slot=self.on_act_prev_plate) act_resize = self.create_action('Automatically Resize', shortcut=QKeySequence('SHIFT+CTRL+R'), slot=self.on_act_autoresize, signal='triggered(bool)', checkable=True, checked=True) self._act_resize = act_resize act_zoomfit = self.create_action('Zoom to Fit', shortcut=QKeySequence('CTRL+0'), slot=self.on_act_zoomfit) act_zoom100 = self.create_action('Actual Size', shortcut=QKeySequence('CTRL+1'), slot=self.on_act_zoom100) act_zoomin = self.create_action('Zoom In', shortcut=QKeySequence('CTRL++'), slot=self.on_act_zoomin) act_zoomout = self.create_action('Zoom Out', shortcut=QKeySequence('CTRL+-'), slot=self.on_act_zoomout) act_refresh = self.create_action('Refresh', shortcut=QKeySequence('F5'), slot=self.on_refresh) act_fullscreen = self.create_action('Full Screen', shortcut=QKeySequence('CTRL+F'), slot=self.on_act_fullscreen, signal='triggered(bool)', checkable=True, checked=False) self._act_fullscreen = act_fullscreen act_show_contours = self.create_action( 'Show Object Contours', shortcut=QKeySequence('ALT+C'), slot=self.on_act_show_contours, signal='triggered(bool)', checkable=True, checked=self.image_viewer.show_contours) self._act_show_contours = act_show_contours act_anti = self.create_action('Antialiasing', shortcut=QKeySequence('CTRL+ALT+A'), slot=self.on_act_antialiasing, signal='triggered(bool)', checkable=True, checked=True) act_smooth = self.create_action('Smooth Transform', shortcut=QKeySequence('CTRL+ALT+S'), slot=self.on_act_smoothtransform, signal='triggered(bool)', checkable=True, checked=True) view_menu = self.menuBar().addMenu('&View') self.add_actions( view_menu, (act_resize, None, act_zoom100, act_zoomfit, act_zoomin, act_zoomout, None, act_prev_t, act_next_t, act_prev_pos, act_next_pos, act_prev_plate, act_next_plate, None, act_refresh, act_fullscreen, None, act_show_contours, None, act_anti, act_smooth, None, act_close)) self._statusbar = QStatusBar(self) self.setStatusBar(self._statusbar) toolbar = self.addToolBar('Toolbar') toolbar.setObjectName('Toolbar') toolbar.setMovable(False) toolbar.setFloatable(False) # fallback if no Segmentation plugins have been specified rdict = self._region_names() if len(rdict) > 0: self._object_region = rdict.keys()[0].split(' - ') else: self._object_region = ('Primary', 'primary') # create a new ModuleManager with a QToolbar and QStackedFrame self._module_manager = ModuleManager(toolbar, frame_side) NavigationModule(self._module_manager, self, self._imagecontainer) defautl_display_module = DisplayModule(self._module_manager, self, self._imagecontainer, rdict) self.set_display_module(defautl_display_module) AnnotationModule(self._module_manager, self, self._settings, self._imagecontainer) try: CellH5EventModule(self._module_manager, self, self._settings, self._imagecontainer) except Exception as e: QMessageBox.warning(self, "Warning", str(e)) # set the Navigation module activated self._module_manager.activate_tab(NavigationModule.NAME) self.layout = layout # process and display the first image self._restore_geometry() self._process_image()
def _analyze(self, cellanalyzer): super(PositionAnalyzer, self)._analyze() n_images = 0 stopwatch = StopWatch(start=True) crd = Coordinate(self.plate_id, self.position, self._frames, list(set(self.ch_mapping.values()))) for frame, channels in self._imagecontainer( \ crd, interrupt_channel=True, interrupt_zslice=True): if self.is_aborted(): self.clear() return 0 else: txt = 'T %d (%d/%d)' % (frame, self._frames.index(frame) + 1, len(self._frames)) self.update_status({ 'progress': self._frames.index(frame) + 1, 'text': txt, 'interval': stopwatch.interim() }) stopwatch.reset(start=True) cellanalyzer.initTimepoint(frame) self.register_channels(cellanalyzer, channels) cellanalyzer.process() n_images += 1 images = [] if self.settings.get('Processing', 'tracking'): region = self.settings.get('Tracking', 'tracking_regionname') samples = self.timeholder[frame][ PrimaryChannel.NAME].get_region(region) self._tracker.track_next_frame(frame, samples) if self.settings.get('Tracking', 'tracking_visualization'): size = cellanalyzer.getImageSize(PrimaryChannel.NAME) nframes = self.settings.get( 'Tracking', 'tracking_visualize_track_length') radius = self.settings.get('Tracking', 'tracking_centroid_radius') img_conn, img_split = self._tracker.render_tracks( frame, size, nframes, radius) images += [(img_conn, '#FFFF00', 1.0), (img_split, '#00FFFF', 1.0)] for clf in self.classifiers.itervalues(): cellanalyzer.classify_objects(clf) ############################################################## # FIXME - part for browser if not self._myhack is None: self.render_browser(cellanalyzer) ############################################################## self.settings.set_section('General') self.render_classification_images(cellanalyzer, images, frame) self.render_contour_images(cellanalyzer, images, frame) if self.settings.get('Output', 'rendering_channel_gallery'): self.render_channel_gallery(cellanalyzer, frame) if self.settings.get('Output', 'rendering_labels_discwrite'): cellanalyzer.exportLabelImages(self._labels_dir) self.logger.info(" - Frame %d, duration (ms): %3d" \ %(frame, stopwatch.interim()*1000)) cellanalyzer.purge(features=self.export_features) return n_images
def _analyze(self, cellanalyzer): thread = QThread.currentThread() stopwatch = StopWatch(start=True) crd = Coordinate(self.plate_id, self.position, self._frames, list(set(self.ch_mapping.values()))) for frame, channels in self._imagecontainer( \ crd, interrupt_channel=True, interrupt_zslice=True): self.interruptionPoint() txt = '%s, %s, T %d (%d/%d)' \ %(self.plate_id, self.position, frame, self._frames.index(frame)+1, len(self._frames)) self.statusUpdate(text=txt, interval=stopwatch.interim(), increment=True) stopwatch.reset(start=True) cellanalyzer.initTimepoint(frame) self.register_channels(cellanalyzer, channels) cellanalyzer.process() self.logger.debug(" - Frame %d, cellanalyzer.process (ms): %3d" \ %(frame, stopwatch.interval()*1000)) images = [] if self.settings('Processing', 'tracking'): apc = AppPreferences() region = self.settings('Tracking', 'region') samples = self.timeholder[frame][ PrimaryChannel.NAME].get_region(region) self._tracker.track_next_frame(frame, samples) if apc.display_tracks: size = cellanalyzer.getImageSize(PrimaryChannel.NAME) img_conn, img_split = self._tracker.render_tracks( frame, size, apc.track_length, apc.cradius) images += [(img_conn, '#FFFF00', 1.0), (img_split, '#00FFFF', 1.0)] self.logger.debug(" - Frame %d, Tracking (ms): %3d" \ %(frame, stopwatch.interval()*1000)) # can't cluster on a per frame basis if self.settings("EventSelection", "supervised_event_selection"): for channel, clf in self.classifiers.iteritems(): cellanalyzer.classify_objects(clf, channel) self.logger.debug(" - Frame %d, Classification (ms): %3d" \ % (frame, stopwatch.interval()*1000)) self.settings.set_section('General') # want emit all images at once imgs = {} imgs.update( self.render_classification_images(cellanalyzer, images, frame)) imgs.update(self.render_contour_images(cellanalyzer, images, frame)) msg = 'PL %s - P %s - T %05d' % (self.plate_id, self.position, frame) self.setImage(imgs, msg, 50) cellanalyzer.purge(features=self.export_features) self.logger.debug(" - Frame %d, duration (ms): %3d" \ %(frame, stopwatch.interim()*1000))