def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings(self, name, has_timelapse) settings.set_section('ObjectDetection') prim_id = PrimaryChannel.NAME settings.set_section('Processing') settings.set2('tracking', True) settings.set2('tracking_synchronize_trajectories', False) settings.set_section('Tracking') region_name = settings.get2('tracking_regionname') settings.set_section('General') settings.set2('rendering_class', {}) settings.set2('rendering', {}) settings.set_section('Classification') settings.set2('collectsamples', False) settings.set('Output', 'hdf5_create_file', False) show_ids = settings.get('Output', 'rendering_contours_showids') show_ids_class = settings.get('Output', 'rendering_class_showids') if name == self.PROCESS_TRACKING: # tracking only invokes the primary channel settings.set_section('Processing') settings.set2('primary_classification', False) settings.set2('primary_featureextraction', True) settings.set2('secondary_processChannel', False) settings.set2('secondary_featureextraction', False) settings.set2('secondary_classification', False) settings.set2('tertiary_processChannel', False) settings.set2('tertiary_featureextraction', False) settings.set2('tertiary_classification', False) settings.set2('merged_processChannel', False) settings.set2('merged_classification', False) settings.set('Output', 'events_export_gallery_images', False) settings.set('General', 'rendering', {'primary_contours': {PrimaryChannel.NAME: {'raw': ('#FFFFFF', 1.0), 'contours': {region_name: ('#FF0000', 1, show_ids)}}}}) else: settings.set_section('Processing') settings.set2('primary_featureextraction', True) settings.set2('primary_classification', True) settings.set2('tracking_synchronize_trajectories', True) cl_rnd = {'primary_classification': {PrimaryChannel.NAME: {'raw': ('#FFFFFF', 1.0), 'contours': [('primary', 'class_label', 1, False), ('primary', '#000000', 1, show_ids_class)]}}, } settings.set('General', 'rendering_class', cl_rnd) settings.set_section('Processing') self._channel_render_settings(settings, SecondaryChannel.NAME, show_ids_class) self._channel_render_settings(settings, TertiaryChannel.NAME, show_ids_class) self._channel_render_settings(settings, MergedChannel.NAME, show_ids_class) return settings
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings( \ self, name, has_timelapse) # turn on tracking and event seletion settings.set('Processing', 'tracking', True) settings.set('Processing', 'eventselection', True) settings.set('General', 'rendering_class', {}) settings.set('General', 'rendering', {}) settings.set('Classification', 'collectsamples', False) settings.set('Output', 'hdf5_create_file', False) settings.set('Output', 'events_export_gallery_images', False) # only primary channel for event selection settings.set('Processing', 'secondary_featureextraction', False) settings.set('Processing', 'secondary_classification', False) settings.set('General', 'process_secondary', False) settings.set('Processing', 'tertiary_featureextraction', False) settings.set('Processing', 'tertiary_classification', False) settings.set('General', 'process_tertiary', False) settings.set('Processing', 'merged_classification', False) settings.set('General', 'process_merged', False) show_ids = settings.get('Output', 'rendering_contours_showids') show_ids_class = settings.get('Output', 'rendering_class_showids') render_contours = { PrimaryChannel.NAME: { 'raw': ('#FFFFFF', 1.0), 'contours': { 'primary': ('#FF0000', 1, show_ids) } } } render_class = { PrimaryChannel.NAME: { 'raw': ('#FFFFFF', 1.0), 'contours': [('primary', 'class_label', 1, False), ('primary', '#000000', 1, show_ids_class)] } } # setting up primary channel and live rendering if settings.get('EventSelection', 'unsupervised_event_selection'): settings.set('Processing', 'primary_featureextraction', True) settings.set('Processing', 'primary_classification', True) settings.set('Processing', 'secondary_classification', False) settings.set('Processing', 'tertiary_classification', False) settings.set('Processing', 'merged_classification', False) settings.set('General', 'rendering', {'primary_contours': render_contours}) elif settings.get('EventSelection', 'supervised_event_selection'): settings.set('Processing', 'primary_featureextraction', True) settings.set('Processing', 'primary_classification', True) settings.set('General', 'rendering_class', {'primary_classification': render_class}) return settings
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings(self, name, has_timelapse) settings.set('Processing', 'tracking', True) settings.set('Processing', 'eventselection', False) settings.set('General', 'rendering_class', {}) settings.set('General', 'rendering', {}) settings.set('Classification', 'collectsamples', False) settings.set('Output', 'hdf5_create_file', False) # tracking only invokes the primary channel settings.set('Processing', 'primary_classification', False) settings.set('Processing', 'primary_featureextraction', True) settings.set('General', 'process_secondary', False) settings.set('Processing', 'secondary_featureextraction', False) settings.set('Processing', 'secondary_classification', False) settings.set('General', 'process_tertiary', False) settings.set('Processing', 'tertiary_featureextraction', False) settings.set('Processing', 'tertiary_classification', False) settings.set('General', 'process_merged', False) settings.set('Processing', 'merged_classification', False) settings.set('Output', 'events_export_gallery_images', False) region_name = settings.get('Tracking', 'region') show_ids = settings.get('Output', 'rendering_contours_showids') pct = {'primary_contours': {CH_PRIMARY[0].title(): {'raw': ('#FFFFFF', 1.0), 'contours': {region_name: ('#FF0000', 1, show_ids)}}}} settings.set('General', 'rendering', pct) return settings
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings(self, name, has_timelapse) settings.set_section('ObjectDetection') settings.set_section('Processing') for prefix in ['primary', 'secondary', 'tertiary']: settings.set2('%s_featureextraction' % prefix, False) settings.set2('%s_classification' % prefix, False) settings.set2('tracking', False) settings.set_section('Classification') settings.set2('collectsamples', False) settings.set_section('General') settings.set2('rendering_class', {}) settings.set('Output', 'events_export_gallery_images', False) settings.set('Output', 'hdf5_create_file', False) settings.set('Output', 'export_object_counts', False) settings.set('Output', 'export_file_names', False) settings.set('Output', 'export_object_details', False) settings.set('Output', 'export_tracking_as_dot', False) settings.set('Output', 'export_track_data', False) show_ids = settings.get('Output', 'rendering_contours_showids') current_tab = self._tab.current_index # turn of merged channel settings.set('General', 'process_merged', False) if current_tab == 0: settings.set('General', 'process_secondary', False) settings.set('General', 'process_tertiary', False) prefix = 'primary' elif current_tab == 1: settings.set('General', 'process_secondary', True) settings.set('General', 'process_tertiary', False) prefix = 'secondary' else: settings.set('General', 'process_secondary', True) settings.set('General', 'process_tertiary', True) prefix = 'tertiary' region_info = self.plugin_mgr.region_info colors = region_info.colors rdn = dict([('%s_contours_%s' % (prefix, x), {prefix.capitalize(): {'raw': ('#FFFFFF', 1.0), 'contours': [(x, colors[x] , 1, show_ids)] } } ) for x in region_info.names[prefix]]) settings.set('General', 'rendering', rdn) return settings
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings( self, name, has_timelapse) settings.set_section('ObjectDetection') settings.set_section('Processing') for prefix in ['primary', 'secondary', 'tertiary']: settings.set2('%s_featureextraction' % prefix, False) settings.set2('%s_classification' % prefix, False) settings.set2('tracking', False) settings.set_section('Classification') settings.set2('collectsamples', False) settings.set_section('General') settings.set2('rendering_class', {}) settings.set('Output', 'events_export_gallery_images', False) settings.set('Output', 'hdf5_create_file', False) settings.set('Output', 'export_object_counts', False) settings.set('Output', 'export_file_names', False) settings.set('Output', 'export_object_details', False) settings.set('Output', 'export_tracking_as_dot', False) settings.set('Output', 'export_track_data', False) show_ids = settings.get('Output', 'rendering_contours_showids') current_tab = self._tab.current_index # turn of merged channel settings.set('General', 'process_merged', False) if current_tab == 0: settings.set('General', 'process_secondary', False) settings.set('General', 'process_tertiary', False) prefix = 'primary' elif current_tab == 1: settings.set('General', 'process_secondary', True) settings.set('General', 'process_tertiary', False) prefix = 'secondary' else: settings.set('General', 'process_secondary', True) settings.set('General', 'process_tertiary', True) prefix = 'tertiary' region_info = self.plugin_mgr.region_info colors = region_info.colors rdn = dict([('%s_contours_%s' % (prefix, x), { prefix.capitalize(): { 'raw': ('#FFFFFF', 1.0), 'contours': [(x, colors[x], 1, show_ids)] } }) for x in region_info.names[prefix]]) settings.set('General', 'rendering', rdn) return settings
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings(self, name, has_timelapse) settings.set_section('Processing') if settings.get2('primary_classification'): settings.set2('primary_errorcorrection', True) if not settings.get2('secondary_processchannel'): settings.set2('secondary_classification', False) settings.set2('secondary_errorcorrection', False) elif settings.get2('secondary_classification'): settings.set2('secondary_errorcorrection', True) return settings
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings( \ self, name, has_timelapse) # turn on tracking and event seletion settings.set('Processing', 'tracking', True) settings.set('Processing', 'eventselection', True) settings.set('General', 'rendering_class', {}) settings.set('General', 'rendering', {}) settings.set('Classification', 'collectsamples', False) settings.set('Output', 'hdf5_create_file', False) settings.set('Output', 'events_export_gallery_images', False) # only primary channel for event selection settings.set('Processing', 'secondary_featureextraction', False) settings.set('Processing', 'secondary_classification', False) settings.set('General', 'process_secondary', False) settings.set('Processing', 'tertiary_featureextraction', False) settings.set('Processing', 'tertiary_classification', False) settings.set('General', 'process_tertiary', False) settings.set('Processing', 'merged_classification', False) settings.set('General', 'process_merged', False) show_ids = settings.get('Output', 'rendering_contours_showids') show_ids_class = settings.get('Output', 'rendering_class_showids') render_contours = {PrimaryChannel.NAME: {'raw': ('#FFFFFF', 1.0), 'contours': {'primary': ('#FF0000', 1, show_ids)}}} render_class = {PrimaryChannel.NAME: {'raw': ('#FFFFFF', 1.0), 'contours': [('primary', 'class_label', 1, False), ('primary', '#000000', 1, show_ids_class)]}} # setting up primary channel and live rendering if settings.get('EventSelection', 'unsupervised_event_selection'): settings.set('Processing', 'primary_featureextraction', True) settings.set('Processing', 'primary_classification', True) settings.set('Processing', 'secondary_classification', False) settings.set('Processing', 'tertiary_classification', False) settings.set('Processing', 'merged_classification', False) settings.set('General', 'rendering', {'primary_contours': render_contours}) elif settings.get('EventSelection', 'supervised_event_selection'): settings.set('Processing', 'primary_featureextraction', True) settings.set('Processing', 'primary_classification', True) settings.set('General', 'rendering_class', {'primary_classification': render_class}) return settings
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings(self, name, has_timelapse) settings.set_section('ObjectDetection') prim_id = PrimaryChannel.NAME sec_id = SecondaryChannel.NAME sec_regions = [v for k,v in SECONDARY_REGIONS.iteritems() if settings.get2(k)] tert_id = TertiaryChannel.NAME tert_regions = [v for k,v in TERTIARY_REGIONS.iteritems() if settings.get2(k)] settings.set_section('Processing') for prefix in ['primary', 'secondary', 'tertiary']: settings.set2('%s_featureextraction' % prefix, False) settings.set2('%s_classification' % prefix, False) settings.set2('tracking', False) settings.set_section('Classification') settings.set2('collectsamples', False) settings.set_section('General') settings.set2('rendering_class', {}) #settings.set2('rendering_discwrite', True) #settings.set2('rendering_class_discwrite', True) settings.set('Output', 'events_export_gallery_images', False) show_ids = settings.get('Output', 'rendering_contours_showids') #settings.set('Output', 'export_object_details', False) #settings.set('Output', 'export_object_counts', False) current_tab = self._tab.currentIndex() print current_tab if current_tab == 0: settings.set('Processing', 'secondary_processchannel', False) settings.set('Processing', 'tertiary_processchannel', False) settings.set('General', 'rendering', {'primary_contours': {prim_id: {'raw': ('#FFFFFF', 1.0), 'contours': {'primary': ('#FF0000', 1, show_ids)}}}}) elif current_tab == 1: settings.set('Processing', 'secondary_processchannel', True) settings.set('Processing', 'tertiary_processchannel', False) settings.set('General', 'rendering', dict([('secondary_contours_%s' % x, {sec_id: {'raw': ('#FFFFFF', 1.0), 'contours': [(x, SECONDARY_COLORS[x] , 1, show_ids)] }}) for x in sec_regions])) else: settings.set('Processing', 'secondary_processChannel', True) settings.set('Processing', 'tertiary_processchannel', True) settings.set('General', 'rendering', dict([('tertiary_contours_%s' % x, {tert_id: {'raw': ('#FFFFFF', 1.0), 'contours': [(x, SECONDARY_COLORS[x] , 1, show_ids)] }}) for x in tert_regions])) return settings
def _process_image(self, ): self.image_viewer.remove_objects() settings = BaseProcessorFrame.get_special_settings(self._settings) settings.set_section('General') settings.set2('constrain_positions', True) settings.set2('positions', self.coordinate.position) settings.set2('skip_finished', False) settings.set2('framerange', True) settings.set2('framerange_begin', self.coordinate.time) settings.set2('framerange_end', self.coordinate.time) settings.set_section('Processing') _classify_objects = self._show_objects_by == 'classification' settings.set2('primary_classification', _classify_objects) settings.set2('secondary_classification', _classify_objects) settings.set2('tertiary_classification', _classify_objects) settings.set2('merged_classification', _classify_objects) settings.set2('primary_featureextraction', _classify_objects) settings.set2('secondary_featureextraction', _classify_objects) settings.set2('objectdetection', self._detect_objects) settings.set2('tracking', False) settings.set('Output', 'hdf5_create_file', False) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) nchannels = len(self._imagecontainer.channels) # XXX channel mapping unclear # processing channel <--> color channel # i.e problems if 2 processing channels have the same color if nchannels == 2: settings.set('General', 'process_secondary', True) elif nchannels >= 3: settings.set('General', 'process_secondary', True) settings.set('General', 'process_tertiary', True) settings.set('General', 'rendering', {}) analyzer = AnalyzerBrowser(self.coordinate.plate, settings, self._imagecontainer) res = None try: QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) res = analyzer() self.render_browser(res) except Exception, e: import traceback traceback.print_exc() QMessageBox.critical(self, "Error", str(e)) raise
def _process_image(self, ): self.image_viewer.remove_objects() settings = BaseProcessorFrame.get_special_settings(self._settings) settings.set_section('General') settings.set2('constrain_positions', True) settings.set2('positions', self.coordinate.position) settings.set2('skip_finished', False) settings.set2('framerange', True) settings.set2('framerange_begin', self.coordinate.time) settings.set2('framerange_end', self.coordinate.time) settings.set_section('Processing') _classify_objects = self._show_objects_by == 'classification' settings.set2('primary_classification', _classify_objects ) settings.set2('secondary_classification', _classify_objects) settings.set2('tertiary_classification', _classify_objects) settings.set2('merged_classification', _classify_objects) settings.set2('primary_featureextraction', _classify_objects) settings.set2('secondary_featureextraction', _classify_objects) settings.set2('objectdetection', self._detect_objects) settings.set2('tracking', False) settings.set('Output', 'hdf5_create_file', False) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) nchannels = len(self._imagecontainer.channels) # XXX channel mapping unclear # processing channel <--> color channel # i.e problems if 2 processing channels have the same color if nchannels == 2: settings.set('General', 'process_secondary', True) elif nchannels >= 3: settings.set('General', 'process_secondary', True) settings.set('General', 'process_tertiary', True) settings.set('General', 'rendering', {}) analyzer = AnalyzerBrowser(self.coordinate.plate, settings, self._imagecontainer) res = None try: QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) res = analyzer() self.render_browser(res) except Exception, e: import traceback traceback.print_exc() QMessageBox.critical(self, "Error", str(e)) raise
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings(self, name, has_timelapse) # turn on tracking and event seletion settings.set("Processing", "tracking", True) settings.set("Processing", "eventselection", True) settings.set("General", "rendering_class", {}) settings.set("General", "rendering", {}) settings.set("Classification", "collectsamples", False) settings.set("Output", "hdf5_create_file", False) settings.set("Output", "events_export_gallery_images", False) # only primary channel for event selection settings.set("Processing", "secondary_featureextraction", False) settings.set("Processing", "secondary_classification", False) settings.set("General", "process_secondary", False) settings.set("Processing", "tertiary_featureextraction", False) settings.set("Processing", "tertiary_classification", False) settings.set("General", "process_tertiary", False) settings.set("Processing", "merged_classification", False) settings.set("General", "process_merged", False) show_ids = settings.get("Output", "rendering_contours_showids") show_ids_class = settings.get("Output", "rendering_class_showids") render_contours = { PrimaryChannel.NAME: {"raw": ("#FFFFFF", 1.0), "contours": {"primary": ("#FF0000", 1, show_ids)}} } render_class = { PrimaryChannel.NAME: { "raw": ("#FFFFFF", 1.0), "contours": [("primary", "class_label", 1, False), ("primary", "#000000", 1, show_ids_class)], } } # setting up primary channel and live rendering if settings.get("EventSelection", "unsupervised_event_selection"): settings.set("Processing", "primary_featureextraction", True) settings.set("Processing", "primary_classification", True) settings.set("Processing", "secondary_classification", False) settings.set("Processing", "tertiary_classification", False) settings.set("Processing", "merged_classification", False) settings.set("General", "rendering", {"primary_contours": render_contours}) elif settings.get("EventSelection", "supervised_event_selection"): settings.set("Processing", "primary_featureextraction", True) settings.set("Processing", "primary_classification", True) settings.set("General", "rendering_class", {"primary_classification": render_class}) return settings
def get_export_settings(cls, settings, has_timelapse=True): settings = BaseProcessorFrame.get_special_settings( settings, has_timelapse) settings.set('General', 'version', VERSION) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) show_ids = settings.get('Output', 'rendering_contours_showids') show_ids_class = settings.get('Output', 'rendering_class_showids') # set propertys of merged channel to the same as for Primary # unfortunately REGION_INFO is like a global variable d = {} for prefix in CH_PRIMARY + CH_OTHER: if prefix == CH_PRIMARY[0] \ or settings.get('Processing', '%s_processchannel' % prefix): for x in reginfo.names[prefix]: d = { '%s_contours_%s' % (prefix, x): { prefix.capitalize(): { 'raw': ('#FFFFFF', 1.0), 'contours': [(x, reginfo.colors[x], 1, show_ids)] } } } settings.get('General', 'rendering').update(d) if settings.get('Processing', '%s_classification' % prefix): for x in reginfo.names[prefix]: if x == settings.get('Classification', '%s_classification_regionname' % prefix) or \ prefix == CH_VIRTUAL[0]: d = { '%s_classification_%s' % (prefix, x): { prefix.capitalize(): { 'raw': ('#FFFFFF', 1.0), 'contours': [(x, 'class_label', 1, False), (x, '#000000', 1, show_ids_class)] } } } settings.get('General', 'rendering_class').update(d) # setup rendering properties for merged channel # want the same rendering properties as for the primary channel! if settings.get('Processing', 'merged_processchannel'): regions = cls._merged_regions(settings) d = { 'merged_contours_%s' % '-'.join(regions): { "Merged": { 'raw': ('#FFFFFF', 1.0), 'contours': [(regions, reginfo.colors["primary"], 1, show_ids)] } } } settings.get("General", "rendering").update(d) if settings.get('Processing', 'merged_classification'): d = { 'merged_classification_%s' % '-'.join(regions): { "Merged": { 'raw': ('#FFFFFF', 1.0), 'contours': [(regions, 'class_label', 1, False), (regions, '#000000', 1, show_ids_class)] } } } settings.get("General", "rendering_class").update(d) if has_timelapse: # generate raw images of selected channels (later used for gallery images) if settings.get('Output', 'events_export_gallery_images'): for prefix in CHANNEL_PREFIX: if prefix == 'primary' or settings.get( 'Processing', '%s_processchannel' % prefix): settings.get('General', 'rendering').update({ prefix: { prefix.capitalize(): { 'raw': ('#FFFFFF', 1.0) } } }) return settings
def _process_image(self, ): self.image_viewer.remove_objects() settings = BaseProcessorFrame.get_special_settings(self._settings) settings.set_section('General') settings.set2('constrain_positions', True) settings.set2('positions', self.coordinate.position) settings.set2('redofailedonly', False) settings.set2('framerange', True) settings.set2('framerange_begin', self.coordinate.time) settings.set2('framerange_end', self.coordinate.time) settings.set_section('Processing') _classify_objects = self._show_objects_by == 'classification' settings.set2('primary_classification', _classify_objects ) settings.set2('secondary_classification', _classify_objects) settings.set2('tertiary_classification', _classify_objects) settings.set2('merged_classification', _classify_objects) settings.set2('primary_featureextraction', _classify_objects) settings.set2('secondary_featureextraction', _classify_objects) settings.set2('objectdetection', self._detect_objects) settings.set2('tracking', False) settings.set_section('Output') settings.set2('rendering_contours_discwrite', False) settings.set2('rendering_class_discwrite', False) settings.set2('export_object_counts', False) settings.set2('export_object_details', False) settings.set2('export_track_data', False) settings.set2('hdf5_create_file', False) settings.set_section('Classification') settings.set2('collectsamples', False) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) settings.set('Output', 'events_export_gallery_images', False) # turn of output: settings.set('Output', 'export_object_counts', False) settings.set('Output', 'export_object_details', False) settings.set('Output', 'export_file_names', False) settings.set('Output', 'events_export_gallery_images', False) settings.set('Output', 'export_track_data', False) settings.set('Output', 'export_tracking_as_dot', False) nchannels = len(self._imagecontainer.channels) # XXX channel mapping unclear # processing channel <--> color channel # i.e problems if 2 processing channels have the same color if nchannels == 2: settings.set('General', 'process_secondary', True) elif nchannels >= 3: settings.set('General', 'process_secondary', True) settings.set('General', 'process_tertiary', True) settings.set('General', 'rendering', {}) analyzer = AnalyzerBrowser(self.coordinate.plate, settings, self._imagecontainer) res = None try: QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) res = analyzer.processPositions() self.render_browser(res) except Exception, e: import traceback from cecog.gui.util import exception traceback.print_exc() exception(self, str(e)) raise
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings( \ self, name, has_timelapse) settings.set('Processing', 'primary_classification', False) settings.set('Processing', 'secondary_classification', False) settings.set('Processing', 'tracking', False) settings.set('Classification', 'collectsamples', False) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) settings.set('Output', 'events_export_gallery_images', False) settings.set('Output', 'hdf5_create_file', False) settings.set('Output', 'rendering_channel_gallery', False) current_tab = self._tab.current_index if current_tab == 0: prefix = 'primary' settings.set('Processing', 'primary_featureextraction', True) settings.set('Processing', 'secondary_featureextraction', False) settings.set('Processing', 'tertiary_featureextraction', False) settings.set('General', 'process_secondary', False) settings.set('General', 'process_tertiary', False) settings.set('General', 'process_merged', False) rdn = {"%s_%s" %(prefix, settings.get("Classification", "%s_classification_regionname" %prefix)): {}} elif current_tab == 1: prefix = 'secondary' settings.set('Processing', 'primary_featureextraction', False) settings.set('Processing', 'secondary_featureextraction', True) settings.set('General', 'process_secondary', True) settings.set('Processing', 'tertiary_featureextraction', False) settings.set('General', 'process_tertiary', False) settings.set('General', 'process_merged', False) # to setup the rending of the image currently processed rdn = {"%s_%s" %(prefix, settings.get("Classification", "%s_classification_regionname" %prefix)): {}} elif current_tab == 2: prefix = 'tertiary' seg_region = settings.get('Classification', 'tertiary_classification_regionname') settings.set('Processing', 'primary_featureextraction', False) settings.set('Processing', 'secondary_featureextraction', False) settings.set('General', 'process_secondary', True) settings.set('Processing', 'tertiary_featureextraction', True) settings.set('General', 'process_tertiary', True) settings.set('General', 'process_merged', False) rdn = {"%s_%s" %(prefix, settings.get("Classification", "%s_classification_regionname" %prefix)): {}} else: # checkboxes in merged channel tab pch = settings.get('Classification', 'merge_primary') sch = settings.get('Classification', 'merge_secondary') tch = settings.get('Classification', 'merge_tertiary') settings.set('Processing', 'primary_featureextraction', pch) settings.set('Processing', 'secondary_featureextraction', sch) settings.set('General', 'process_secondary', sch) settings.set('Processing', 'tertiary_featureextraction', tch) settings.set('General', 'process_tertiary', tch) settings.set('General', 'process_merged', True) prefix = 'merged' rdn = {} for pfx in (CH_PRIMARY+CH_OTHER): if settings.get("Classification", "merge_%s" %pfx): rdn["%s_%s" %(pfx, settings.get("Classification","merged_%s_region" %pfx))] = {} settings.set('Classification', 'collectsamples_prefix', prefix) if name == self.TESTING: rdn = dict() settings.set('Processing', '%s_classification' % prefix, True) settings.set('General', 'rendering_class', self._class_rendering_params(prefix, settings)) else: settings.set('Classification', 'collectsamples', True) settings.set('General', 'positions', '') settings.set('General', 'rendering', rdn) return settings
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings( \ self, name, has_timelapse) settings.set('Processing', 'primary_classification', False) settings.set('Processing', 'secondary_classification', False) settings.set('Processing', 'tracking', False) settings.set('Classification', 'collectsamples', False) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) settings.set('Output', 'events_export_gallery_images', False) settings.set('Output', 'hdf5_create_file', False) settings.set('Output', 'rendering_channel_gallery', False) current_tab = self._tab.current_index if current_tab == 0: prefix = 'primary' settings.set('Processing', 'primary_featureextraction', True) settings.set('Processing', 'secondary_featureextraction', False) settings.set('Processing', 'tertiary_featureextraction', False) settings.set('General', 'process_secondary', False) settings.set('General', 'process_tertiary', False) settings.set('General', 'process_merged', False) rdn = { "%s_%s" % (prefix, settings.get( "Classification", "%s_classification_regionname" % prefix)): {} } elif current_tab == 1: prefix = 'secondary' settings.set('Processing', 'primary_featureextraction', False) settings.set('Processing', 'secondary_featureextraction', True) settings.set('General', 'process_secondary', True) settings.set('Processing', 'tertiary_featureextraction', False) settings.set('General', 'process_tertiary', False) settings.set('General', 'process_merged', False) # to setup the rending of the image currently processed rdn = { "%s_%s" % (prefix, settings.get( "Classification", "%s_classification_regionname" % prefix)): {} } elif current_tab == 2: prefix = 'tertiary' seg_region = settings.get('Classification', 'tertiary_classification_regionname') settings.set('Processing', 'primary_featureextraction', False) settings.set('Processing', 'secondary_featureextraction', False) settings.set('General', 'process_secondary', True) settings.set('Processing', 'tertiary_featureextraction', True) settings.set('General', 'process_tertiary', True) settings.set('General', 'process_merged', False) rdn = { "%s_%s" % (prefix, settings.get( "Classification", "%s_classification_regionname" % prefix)): {} } else: # checkboxes in merged channel tab pch = settings.get('Classification', 'merge_primary') sch = settings.get('Classification', 'merge_secondary') tch = settings.get('Classification', 'merge_tertiary') settings.set('Processing', 'primary_featureextraction', pch) settings.set('Processing', 'secondary_featureextraction', sch) settings.set('General', 'process_secondary', sch) settings.set('Processing', 'tertiary_featureextraction', tch) settings.set('General', 'process_tertiary', tch) settings.set('General', 'process_merged', True) prefix = 'merged' rdn = {} for pfx in (CH_PRIMARY + CH_OTHER): if settings.get("Classification", "merge_%s" % pfx): rdn["%s_%s" % (pfx, settings.get("Classification", "merged_%s_region" % pfx))] = {} settings.set('Classification', 'collectsamples_prefix', prefix) if name == self.PROCESS_TESTING: rdn = dict() settings.set('Processing', '%s_classification' % prefix, True) settings.set('General', 'rendering_class', self._class_rendering_params(prefix, settings)) else: settings.set('Classification', 'collectsamples', True) settings.set('General', 'positions', '') settings.set('General', 'rendering', rdn) return settings
def _process_image(self, ): self.image_viewer.remove_objects() settings = BaseProcessorFrame.get_special_settings(self._settings) settings.set_section('General') settings.set2('constrain_positions', True) settings.set2('positions', self.coordinate.position) settings.set2('redofailedonly', False) settings.set2('framerange', True) settings.set2('framerange_begin', self.coordinate.time) settings.set2('framerange_end', self.coordinate.time) settings.set_section('Processing') _classify_objects = self._show_objects_by == 'classification' settings.set2('primary_classification', _classify_objects) settings.set2('secondary_classification', _classify_objects) settings.set2('tertiary_classification', _classify_objects) settings.set2('merged_classification', _classify_objects) settings.set2('primary_featureextraction', _classify_objects) settings.set2('secondary_featureextraction', _classify_objects) settings.set2('objectdetection', self._detect_objects) settings.set2('tracking', False) settings.set_section('Output') settings.set2('rendering_contours_discwrite', False) settings.set2('rendering_class_discwrite', False) settings.set2('export_object_counts', False) settings.set2('export_object_details', False) settings.set2('export_track_data', False) settings.set2('hdf5_create_file', False) settings.set_section('Classification') settings.set2('collectsamples', False) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) settings.set('Output', 'events_export_gallery_images', False) # turn of output: settings.set('Output', 'export_object_counts', False) settings.set('Output', 'export_object_details', False) settings.set('Output', 'export_file_names', False) settings.set('Output', 'events_export_gallery_images', False) settings.set('Output', 'export_track_data', False) settings.set('Output', 'export_tracking_as_dot', False) nchannels = len(self._imagecontainer.channels) # XXX channel mapping unclear # processing channel <--> color channel # i.e problems if 2 processing channels have the same color if nchannels == 2: settings.set('General', 'process_secondary', True) elif nchannels >= 3: settings.set('General', 'process_secondary', True) settings.set('General', 'process_tertiary', True) settings.set('General', 'rendering', {}) analyzer = AnalyzerBrowser(self.coordinate.plate, settings, self._imagecontainer) res = None try: QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) res = analyzer.processPositions() self.render_browser(res) except Exception, e: import traceback from cecog.gui.util import exception traceback.print_exc() exception(self, str(e)) raise
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings( self, name, has_timelapse) settings.set_section('ObjectDetection') prim_id = PrimaryChannel.NAME settings.set_section('Processing') settings.set2('tracking', True) settings.set2('tracking_synchronize_trajectories', False) settings.set_section('Tracking') region_name = settings.get2('tracking_regionname') settings.set_section('General') settings.set2('rendering_class', {}) settings.set2('rendering', {}) settings.set_section('Classification') settings.set2('collectsamples', False) settings.set('Output', 'hdf5_create_file', False) show_ids = settings.get('Output', 'rendering_contours_showids') show_ids_class = settings.get('Output', 'rendering_class_showids') if name == self.PROCESS_TRACKING: # tracking only invokes the primary channel settings.set_section('Processing') settings.set2('primary_classification', False) settings.set2('primary_featureextraction', True) settings.set2('secondary_processChannel', False) settings.set2('secondary_featureextraction', False) settings.set2('secondary_classification', False) settings.set2('tertiary_processChannel', False) settings.set2('tertiary_featureextraction', False) settings.set2('tertiary_classification', False) settings.set2('merged_processChannel', False) settings.set2('merged_classification', False) settings.set('Output', 'events_export_gallery_images', False) settings.set( 'General', 'rendering', { 'primary_contours': { PrimaryChannel.NAME: { 'raw': ('#FFFFFF', 1.0), 'contours': { region_name: ('#FF0000', 1, show_ids) } } } }) else: settings.set_section('Processing') settings.set2('primary_featureextraction', True) settings.set2('primary_classification', True) settings.set2('tracking_synchronize_trajectories', True) cl_rnd = { 'primary_classification': { PrimaryChannel.NAME: { 'raw': ('#FFFFFF', 1.0), 'contours': [('primary', 'class_label', 1, False), ('primary', '#000000', 1, show_ids_class)] } }, } settings.set('General', 'rendering_class', cl_rnd) settings.set_section('Processing') self._channel_render_settings(settings, SecondaryChannel.NAME, show_ids_class) self._channel_render_settings(settings, TertiaryChannel.NAME, show_ids_class) self._channel_render_settings(settings, MergedChannel.NAME, show_ids_class) return settings
def get_export_settings(self, settings, has_timelapse=True): settings = BaseProcessorFrame.get_special_settings(settings, has_timelapse) settings.set('General', 'version', version) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) show_ids = settings.get('Output', 'rendering_contours_showids') show_ids_class = settings.get('Output', 'rendering_class_showids') # set properties of merged channel to the same as for Primary for prefix in CH_PRIMARY+CH_OTHER: if prefix == CH_PRIMARY[0] \ or settings.get('General', 'process_%s' %prefix): d = {} # render settings for contours for x in self.plugin_mgr.region_info.names[prefix]: d = {'%s_contours_%s' % (prefix, x): {prefix.capitalize(): {'raw': ('#FFFFFF', 1.0), 'contours': [(x, self.plugin_mgr.region_info.colors[x], 1, show_ids)] } } } settings.get('General', 'rendering').update(d) # render settings for classifications d = {} if (settings.get('General', 'process_%s' %prefix) and \ settings.get('Processing', '%s_classification' % prefix)): for x in self.plugin_mgr.region_info.names[prefix]: if x == settings.get('Classification', '%s_classification_regionname' % prefix) or \ prefix == CH_VIRTUAL[0]: d = {'%s_classification_%s' % (prefix, x): {prefix.capitalize(): {'raw': ('#FFFFFF', 1.0), 'contours': [(x, 'class_label', 1, False), (x, '#000000' , 1, show_ids_class)] } } } if settings('EventSelection', 'supervised_event_selection'): settings.get('General', 'rendering_class').update(d) # setup rendering properties for merged channel # want the same rendering properties as for the primary channel! if settings.get('General', 'process_merged'): # color are defined for regions (not for channels) # therefore, we first retrieve the regions for the primary channel # and (in the case there are some) we assign the color of the first # ROI of the primary channel to the merged contour. regions_primary = self.plugin_mgr.region_info.names[CH_PRIMARY[0]] if len(regions_primary) == 0: default_color = '#FF00FF' else: default_color = self.plugin_mgr.region_info.colors[regions_primary[0]] regions = self._merged_regions(settings) d = {'merged_contours_%s' %str(regions): {"Merged": {'raw': ('#FFFFFF', 1.0), 'contours': [(regions, default_color, 1, show_ids)]}}} settings.get("General", "rendering").update(d) if settings.get('Processing', 'merged_classification'): d = {'merged_classification_%s' %str(regions): {"Merged": {'raw': ('#FFFFFF', 1.0), 'contours': [(regions, 'class_label', 1, False), (regions, '#000000' , 1, show_ids_class)]}}} settings.get("General", "rendering_class").update(d) if has_timelapse: # generate raw images of selected channels (later used for gallery images) if settings.get('Output', 'events_export_gallery_images'): for prefix in CHANNEL_PREFIX: if prefix == 'primary' or settings.get('General', 'process_%s' % prefix): settings.get('General', 'rendering').update({prefix : {prefix.capitalize() : {'raw': ('#FFFFFF', 1.0)}}}) return settings
def get_export_settings(self, settings, has_timelapse=True): settings = BaseProcessorFrame.get_special_settings( settings, has_timelapse) settings.set('General', 'version', version) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) # set properties of merged channel to the same as for Primary for prefix in CH_PRIMARY + CH_OTHER: if prefix == CH_PRIMARY[0] \ or settings.get('General', 'process_%s' %prefix): d = {} # render settings for contours for x in self.plugin_mgr.region_info.names[prefix]: d = { '%s_contours_%s' % (prefix, x): { prefix.capitalize(): { 'raw': ('#FFFFFF', 1.0), 'contours': [(x, self.plugin_mgr.region_info.colors[x], 1, False)] } } } settings.get('General', 'rendering').update(d) # render settings for classifications d = {} if (settings.get('General', 'process_%s' %prefix) and \ settings.get('Processing', '%s_classification' % prefix)): for x in self.plugin_mgr.region_info.names[prefix]: if x == settings.get('Classification', '%s_classification_regionname' % prefix) or \ prefix == CH_VIRTUAL[0]: d = { '%s_classification_%s' % (prefix, x): { prefix.capitalize(): { 'raw': ('#FFFFFF', 1.0), 'contours': [(x, 'class_label', 1, False), (x, '#000000', 1, False)] } } } if settings('EventSelection', 'supervised_event_selection'): settings.get('General', 'rendering_class').update(d) # setup rendering properties for merged channel # want the same rendering properties as for the primary channel! if settings.get('General', 'process_merged'): # color are defined for regions (not for channels) # therefore, we first retrieve the regions for the primary channel # and (in the case there are some) we assign the color of the first # ROI of the primary channel to the merged contour. regions_primary = self.plugin_mgr.region_info.names[CH_PRIMARY[0]] if len(regions_primary) == 0: default_color = '#FF00FF' else: default_color = self.plugin_mgr.region_info.colors[ regions_primary[0]] regions = self._merged_regions(settings) d = { 'merged_contours_%s' % str(regions): { "Merged": { 'raw': ('#FFFFFF', 1.0), 'contours': [(regions, default_color, 1, False)] } } } settings.get("General", "rendering").update(d) if settings.get('Processing', 'merged_classification'): d = { 'merged_classification_%s' % str(regions): { "Merged": { 'raw': ('#FFFFFF', 1.0), 'contours': [(regions, 'class_label', 1, False), (regions, '#000000', 1, False)] } } } settings.get("General", "rendering_class").update(d) return settings
def get_export_settings(cls, settings, has_timelapse=True): settings = BaseProcessorFrame.get_special_settings(settings, has_timelapse) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) additional_prefixes = [SecondaryChannel.PREFIX, TertiaryChannel.PREFIX] settings.set_section('Classification') sec_class_regions = dict([(prefix, settings.get2('%s_classification_regionname' % prefix)) for prefix in additional_prefixes]) settings.set_section('ObjectDetection') prim_id = PrimaryChannel.NAME sec_ids = dict([(x.PREFIX, x.NAME) for x in [SecondaryChannel, TertiaryChannel]]) sec_regions = dict([(prefix, [v for k,v in regions.iteritems() if settings.get2(k)]) for prefix, regions in [(SecondaryChannel.PREFIX, SECONDARY_REGIONS), (TertiaryChannel.PREFIX, TERTIARY_REGIONS), ] ]) show_ids = settings.get('Output', 'rendering_contours_showids') show_ids_class = settings.get('Output', 'rendering_class_showids') settings.get('General', 'rendering').update({'primary_contours': {prim_id: {'raw': ('#FFFFFF', 1.0), 'contours': {'primary': ('#FF0000', 1, show_ids)}}}}) settings.set_section('Processing') if settings.get2('primary_classification'): settings.get('General', 'rendering_class').update({'primary_classification': {prim_id: {'raw': ('#FFFFFF', 1.0), 'contours': [('primary', 'class_label', 1, False), ('primary', '#000000', 1, show_ids_class), ]}}}) for prefix in additional_prefixes: if settings.get2('%s_processchannel' % prefix): sec_id = sec_ids[prefix] settings.get('General', 'rendering').update(dict([('%s_contours_%s' % (prefix, x), {sec_id: {'raw': ('#FFFFFF', 1.0), 'contours': [(x, SECONDARY_COLORS[x] , 1, show_ids)] }}) for x in sec_regions[prefix]])) if settings.get2('%s_classification' % prefix): sec_id = sec_ids[prefix] sec_region = sec_class_regions[prefix] settings.get('General', 'rendering_class').update({'%s_classification_%s' % (prefix, sec_region): {sec_id: {'raw': ('#FFFFFF', 1.0), 'contours': [(sec_region, 'class_label', 1, False), (sec_region, '#000000', 1, show_ids_class), ]}}}) if has_timelapse: # generate raw images of selected channels (later used for gallery images) if settings.get('Output', 'events_export_gallery_images'): settings.get('General', 'rendering').update({'primary' : {prim_id : {'raw': ('#FFFFFF', 1.0)}}}) for prefix in additional_prefixes: if settings.get2('%s_processchannel' % prefix): sec_id = sec_ids[prefix] settings.get('General', 'rendering').update({prefix : {sec_id : {'raw': ('#FFFFFF', 1.0)}}}) return settings
def get_export_settings(cls, settings, has_timelapse=True): settings = BaseProcessorFrame.get_special_settings(settings, has_timelapse) settings.set('General', 'version', VERSION) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) show_ids = settings.get('Output', 'rendering_contours_showids') show_ids_class = settings.get('Output', 'rendering_class_showids') # set propertys of merged channel to the same as for Primary # unfortunately REGION_INFO is like a global variable d = {} for prefix in CH_PRIMARY+CH_OTHER: if prefix == CH_PRIMARY[0] \ or settings.get('Processing', '%s_processchannel' % prefix): for x in reginfo.names[prefix]: d = {'%s_contours_%s' % (prefix, x): {prefix.capitalize(): {'raw': ('#FFFFFF', 1.0), 'contours': [(x, reginfo.colors[x], 1, show_ids)] } } } settings.get('General', 'rendering').update(d) if settings.get('Processing', '%s_classification' % prefix): for x in reginfo.names[prefix]: if x == settings.get('Classification', '%s_classification_regionname' % prefix) or \ prefix == CH_VIRTUAL[0]: d = {'%s_classification_%s' % (prefix, x): {prefix.capitalize(): {'raw': ('#FFFFFF', 1.0), 'contours': [(x, 'class_label', 1, False), (x, '#000000' , 1, show_ids_class)] } } } settings.get('General', 'rendering_class').update(d) # setup rendering properties for merged channel # want the same rendering properties as for the primary channel! if settings.get('Processing', 'merged_processchannel'): regions = cls._merged_regions(settings) d = {'merged_contours_%s' %'-'.join(regions): {"Merged": {'raw': ('#FFFFFF', 1.0), 'contours': [(regions, reginfo.colors["primary"], 1, show_ids)]}}} settings.get("General", "rendering").update(d) if settings.get('Processing', 'merged_classification'): d = {'merged_classification_%s' %'-'.join(regions): {"Merged": {'raw': ('#FFFFFF', 1.0), 'contours': [(regions, 'class_label', 1, False), (regions, '#000000' , 1, show_ids_class)]}}} settings.get("General", "rendering_class").update(d) if has_timelapse: # generate raw images of selected channels (later used for gallery images) if settings.get('Output', 'events_export_gallery_images'): for prefix in CHANNEL_PREFIX: if prefix == 'primary' or settings.get('Processing', '%s_processchannel' % prefix): settings.get('General', 'rendering').update({prefix : {prefix.capitalize() : {'raw': ('#FFFFFF', 1.0)}}}) return settings
def _get_modified_settings(self, name, has_timelapse=True): settings = BaseProcessorFrame._get_modified_settings(self, name, has_timelapse) settings.set_section('ObjectDetection') prim_id = PrimaryChannel.NAME sec_id = SecondaryChannel.NAME #sec_regions = settings.get2('secondary_regions') settings.set_section('Processing') settings.set2('primary_classification', False) settings.set2('secondary_classification', False) settings.set2('tracking', False) settings.set_section('Classification') settings.set2('collectsamples', False) settings.set('General', 'rendering', {}) settings.set('General', 'rendering_class', {}) settings.set('Output', 'events_export_gallery_images', False) show_ids_class = settings.get('Output', 'rendering_class_showids') current_tab = self._tab.currentIndex() if current_tab == 0: settings.set('Processing', 'primary_featureextraction', True) settings.set('Processing', 'secondary_featureextraction', False) settings.set_section('Classification') settings.set2('collectsamples_prefix', 'primary') settings.set('Processing', 'secondary_processChannel', False) if name == self.PROCESS_TESTING: settings.set('Processing', 'primary_classification', True) settings.set('General', 'rendering_class', {'primary_classification': {prim_id: {'raw': ('#FFFFFF', 1.0), 'contours': [('primary', 'class_label', 1, False), ('primary', '#000000', 1, show_ids_class), ]}}}) else: settings.set2('collectsamples', True) settings.set('General', 'positions', '') settings.set('General', 'framerange_begin', 0) settings.set('General', 'framerange_end', 0) else: settings.set('Processing', 'primary_featureextraction', False) settings.set('Processing', 'secondary_featureextraction', True) settings.set_section('Classification') sec_region = settings.get2('secondary_classification_regionname') settings.set2('collectsamples_prefix', 'secondary') for k,v in SECONDARY_REGIONS.iteritems(): settings.set('ObjectDetection', k, v == sec_region) settings.set('Processing', 'secondary_processchannel', True) if name == self.PROCESS_TESTING: settings.set('Processing', 'secondary_classification', True) settings.set('General', 'rendering_class', {'secondary_classification_%s' % sec_region: {sec_id: {'raw': ('#FFFFFF', 1.0), 'contours': [(sec_region, 'class_label', 1, False), (sec_region, '#000000', 1, show_ids_class), ]}}}) else: settings.set2('collectsamples', True) settings.set('General', 'positions', '') settings.set('General', 'framerange_begin', 0) settings.set('General', 'framerange_end', 0) return settings