def _on_open_classifier(self): path = self._learner.clf_dir result = QFileDialog.getExistingDirectory(self, 'Open classifier directory', os.path.abspath(path)) if result: learner = self._load_classifier(result) if not learner is None: self._learner = learner self._update_class_definition_table() self._activate_objects_for_image(False, clear=True) path2 = learner.annotations_dir try: has_invalid = self._annotations.import_from_xml(path2, learner.class_names, self._imagecontainer) except: exception(self, "Problems loading annotation data...") self._learner = self._init_new_classifier() else: self._activate_objects_for_image(True) self._update_class_table() if self._class_table.rowCount() > 0: self._class_table.setCurrentCell(0, self.COLUMN_CLASS_NAME) else: self._current_class = None information(self, "Classifier successfully loaded", "Class definitions and annotations " "successfully loaded from '%s'." % result) finally: coord = self.browser.get_coordinate() self._imagecontainer.set_plate(coord.plate)
def _on_process_finished(self): if (not self._process_items is None and self._current_process_item+1 < len(self._process_items) and not self._is_abort and not self._has_error): self._current_process_item += 1 self._on_process_start(self._current_process, start_again=True) else: self._is_running = False #logger = logging.getLogger() #logger.removeHandler(self._handler) self._set_control_button_text(idx=0) self._toggle_control_buttons() self._toggle_tabs(True) # enable all section button of the main widget self.toggle_tabs.emit(self.get_name()) if not self._is_abort and not self._has_error: if self.SECTION_NAME == 'ObjectDetection': msg = 'Object detection successfully finished.' elif self.SECTION_NAME == 'Classification': if self._current_process == self.PROCESS_PICKING: msg = 'Samples successfully picked.\n\n'\ 'Please train the classifier now based on the '\ 'newly picked samples.' result_frame = self._get_result_frame(self._tab_name) result_frame.load_classifier(check=False) # nr_removed = len(result_frame._learner.filter_nans(apply=False)) nr_removed = len(result_frame._learner.nan_features) if nr_removed > 0: msg += '\n\n%d features contained NA values and will be removed from training.' % nr_removed elif self._current_process == self.PROCESS_TRAINING: msg = 'Classifier successfully trained.\n\n'\ 'You can test the classifier performance here'\ 'visually or apply the classifier in the '\ 'processing workflow.' elif self._current_process == self.PROCESS_TESTING: msg = 'Classifier testing successfully finished.' elif self.SECTION_NAME == 'Tracking': if self._current_process == self.PROCESS_TRACKING: msg = 'Tracking successfully finished.' elif self._current_process == self.PROCESS_SYNCING: msg = 'Motif selection successfully finished.' elif self.SECTION_NAME == 'ErrorCorrection': msg = 'HMM error correction successfully finished.' elif self.SECTION_NAME == 'Processing': msg = 'Processing successfully finished.' elif self.SECTION_NAME == "PostProcessing": msg = 'Postprocessing successfully finished' information(self, 'Process finished', msg) status(msg) else: if self._is_abort: status('Process aborted by user.') elif self._has_error: status('Process aborted by error.') self._current_process = None self._process_items = None
def _on_saveas_classifier(self, path=None): learner = self._learner if path is None: path = os.path.expanduser("~") result = QFileDialog.getExistingDirectory( self, 'Save to classifier directory', os.path.abspath(path)) else: result = path if result: if self._save_classifier(result): try: path2 = learner.annotations_dir filenames = os.listdir(path2) filenames = [os.path.join(path2, f) for f in filenames if os.path.isfile(os.path.join(path2, f)) and os.path.splitext(f)[1].lower() == '.xml'] fmt = time.strftime('_backup__%Y%m%d_%H%M%S') path_backup = os.path.join(path2, fmt) safe_mkdirs(path_backup) for filename in filenames: shutil.copy2(filename, path_backup) os.remove(filename) self._annotations.export_to_xml(path2, learner.class_labels, self._imagecontainer) except: exception(self, "Problems saving annotation data...") else: information(self, "Classifier successfully saved", "Class definitions and annotations " "successfully saved to '%s'." % result) finally: coord = self.browser.get_coordinate() self._imagecontainer.set_plate(coord.plate)
def _on_process_finished(self): if (not self._process_items is None and self._current_process_item + 1 < len(self._process_items) and not self._is_abort and not self._has_error): self._current_process_item += 1 self._on_process_start(self._current_process, start_again=True) else: self._is_running = False #logger = logging.getLogger() #logger.removeHandler(self._handler) self._set_control_button_text(idx=0) self._toggle_control_buttons() self._toggle_tabs(True) # enable all section button of the main widget self.toggle_tabs.emit(self.get_name()) if not self._is_abort and not self._has_error: if self.SECTION_NAME == 'ObjectDetection': msg = 'Object detection successfully finished.' elif self.SECTION_NAME == 'Classification': if self._current_process == self.PROCESS_PICKING: msg = 'Samples successfully picked.\n\n'\ 'Please train the classifier now based on the '\ 'newly picked samples.' result_frame = self._get_result_frame(self._tab_name) result_frame.load_classifier(check=False) # nr_removed = len(result_frame._learner.filter_nans(apply=False)) nr_removed = len(result_frame._learner.nan_features) if nr_removed > 0: msg += '\n\n%d features contained NA values and will be removed from training.' % nr_removed elif self._current_process == self.PROCESS_TRAINING: msg = 'Classifier successfully trained.\n\n'\ 'You can test the classifier performance here'\ 'visually or apply the classifier in the '\ 'processing workflow.' elif self._current_process == self.PROCESS_TESTING: msg = 'Classifier testing successfully finished.' elif self.SECTION_NAME == 'Tracking': if self._current_process == self.PROCESS_TRACKING: msg = 'Tracking successfully finished.' elif self._current_process == self.PROCESS_SYNCING: msg = 'Motif selection successfully finished.' elif self.SECTION_NAME == 'ErrorCorrection': msg = 'HMM error correction successfully finished.' elif self.SECTION_NAME == 'Processing': msg = 'Processing successfully finished.' elif self.SECTION_NAME == "PostProcessing": msg = 'Postprocessing successfully finished' information(self, 'Process finished', msg) status(msg) else: if self._is_abort: status('Process aborted by user.') elif self._has_error: status('Process aborted by error.') self._current_process = None self._process_items = None
def detect_objects_toggled(self, state): if state: if self._settings.get('Output', 'hdf5_reuse'): information( self, 'HDF5 reuse is enabled. Raw data and segmentation will be loaded from HDF5 files. Changes of normalization and segmentation parameters will have no effect in browser!' ) self.on_refresh()
def _on_process_finished(self): self._analyzer.image_ready.disconnect(self._on_update_image) if (not self._process_items is None and self._current_process_item+1 < len(self._process_items) and not self._is_abort and not self._has_error): self._current_process_item += 1 self._on_process_start(self._current_process, start_again=True) else: self._is_running = False self._set_control_button_text(idx=0) self._toggle_control_buttons() self._toggle_tabs(True) # enable all section button of the main widget self.toggle_tabs.emit(self.get_name()) if not self._is_abort and not self._has_error: if self.name == SECTION_NAME_OBJECTDETECTION: msg = 'Object detection successfully finished.' elif self.name == SECTION_NAME_CLASSIFICATION: if self._current_process == self.PROCESS_PICKING: msg = 'Samples successfully picked.\n\n'\ 'Please train the classifier now based on the '\ 'newly picked samples.' result_frame = self._get_result_frame(self._tab_name) result_frame.load_classifier(check=False) # nr_removed = len(result_frame._learner.filter_nans(apply=False)) nr_removed = len(result_frame._learner.nan_features) if nr_removed > 0: msg += '\n\n%d features contained NA values and will be removed from training.' % nr_removed elif self._current_process == self.PROCESS_TRAINING: msg = 'Classifier successfully trained.\n\n'\ 'You can test the classifier performance here'\ 'visually or apply the classifier in the '\ 'processing workflow.' elif self._current_process == self.PROCESS_TESTING: msg = 'Classifier testing successfully finished.' elif self.name == SECTION_NAME_TRACKING: msg = 'Tracking successfully finished.' elif self.name == SECTION_NAME_EVENT_SELECTION: msg = 'event selection successfully finished.' elif self.name == SECTION_NAME_ERRORCORRECTION: msg = 'HMM error correction successfully finished.' elif self.name == SECTION_NAME_PROCESSING: msg = 'Processing successfully finished.' elif self.name == SECTION_NAME_POST_PROCESSING: msg = 'Postprocessing successfully finished' information(self, 'Process finished', msg) self.status_message.emit(msg) else: if self._is_abort: self.status_message.emit('Process aborted by user.') elif self._has_error: self.status_message.emit('Process aborted by error.') self._current_process = None self._process_items = None
def load_classifier(self, check=True): _resolve = lambda x,y: self._settings.get(x, '%s_%s' % (self._channel, y)) clfdir = CecogEnvironment.convert_package_path(_resolve('Classification', 'classification_envpath')) # XXX - where does the "." come if not isdir(clfdir) or clfdir == ".": return else: self._learner = CommonClassPredictor( \ clf_dir=clfdir, name=self._channel, channels={self._channel.title(): _resolve('Classification', 'classification_regionname')}, color_channel=_resolve('ObjectDetection', 'channelid')) result = self._learner.check() if check: b = lambda x: 'Yes' if x else 'No' msg = 'Classifier path: %s\n' % result['path_env'] msg += 'Found class definition: %s\n' % b(result['has_definition']) msg += 'Found annotations: %s\n' % b(result['has_path_annotations']) msg += 'Can you pick new samples? %s\n\n' % b(self.is_pick_samples()) msg += 'Found ARFF file: %s\n' % b(result['has_arff']) msg += 'Can you train a classifier? %s\n\n' % b(self.is_train_classifier()) msg += 'Found SVM model: %s\n' % b(result['has_model']) msg += 'Found SVM range: %s\n' % b(result['has_range']) msg += 'Can you apply the classifier to images? %s\n\n' % b(self.is_apply_classifier()) msg += 'Found samples: %s\n' % b(result['has_path_samples']) msg += 'Sample images are only used for visualization and annotation control at the moment.' txt = '%s classifier inspection results' % self._channel information(self, txt, info=msg) if result['has_arff']: self._learner.importFromArff() nr_features_prev = len(self._learner.feature_names) removed_features = self._learner.filter_nans(apply=True) nr_features = nr_features_prev - len(removed_features) self._label_features.setText(self.LABEL_FEATURES %(nr_features, nr_features_prev)) self._label_features.setToolTip("removed %d features containing NA values:\n%s" % (len(removed_features), "\n".join(removed_features))) if result['has_definition']: self._learner.loadDefinition() if result['has_conf']: c, g, conf = self._learner.importConfusion() self._set_info(c, g, conf) self._init_conf_table(conf) self._update_conf_table(conf) else: conf = None self._init_conf_table(conf) self._set_info_table(conf)
def load_classifier(self, check=True): _resolve = lambda x,y: self._settings.get(x, '%s_%s' % (self._channel, y)) env_path = convert_package_path(_resolve('Classification', 'classification_envpath')) classifier_infos = {'strEnvPath' : env_path, #'strModelPrefix' : _resolve('Classification', 'classification_prefix'), 'strChannelId' : _resolve('ObjectDetection', 'channelid'), 'strRegionId' : _resolve('Classification', 'classification_regionname'), } try: self._learner = CommonClassPredictor(dctCollectSamples=classifier_infos) except: exception(self, 'Error on loading classifier.') else: result = self._learner.check() if check: b = lambda x: 'Yes' if x else 'No' msg = 'Classifier path: %s\n' % result['path_env'] msg += 'Found class definition: %s\n' % b(result['has_definition']) msg += 'Found annotations: %s\n' % b(result['has_path_annotations']) msg += 'Can you pick new samples? %s\n\n' % b(self.is_pick_samples()) msg += 'Found ARFF file: %s\n' % b(result['has_arff']) msg += 'Can you train a classifier? %s\n\n' % b(self.is_train_classifier()) msg += 'Found SVM model: %s\n' % b(result['has_model']) msg += 'Found SVM range: %s\n' % b(result['has_range']) msg += 'Can you apply the classifier to images? %s\n\n' % b(self.is_apply_classifier()) msg += 'Found samples: %s\n' % b(result['has_path_samples']) msg += 'Sample images are only used for visualization and annotation control at the moment.' txt = '%s classifier inspection results' % self._channel information(self, txt, info=msg) if result['has_arff']: self._learner.importFromArff() nr_features_prev = len(self._learner.lstFeatureNames) removed_features = self._learner.filterData(apply=False) nr_features = nr_features_prev - len(removed_features) self._label_features.setText(self.LABEL_FEATURES % (nr_features, nr_features_prev)) self._label_features.setToolTip("removed %d features containing NA values:\n%s" % (len(removed_features), "\n".join(removed_features))) if result['has_definition']: self._learner.loadDefinition() if result['has_conf']: c, g, conf = self._learner.importConfusion() self._set_info(c, g, conf) self._init_conf_table(conf) self._update_conf_table(conf) else: conf = None self._init_conf_table(conf) self._set_info_table(conf)
def load_classifier(self, check=True, quiet=False): clfdir = self._settings('Classification', '%s_classification_envpath' % self._channel) if not isdir(clfdir): return else: chid = self._settings('ObjectDetection', '%s_channelid' % self._channel) title = self._settings( 'Classification', '%s_classification_regionname' % self._channel) self._learner = CommonClassPredictor( \ clf_dir=clfdir, name=self._channel, channels = {self._channel.title(): title}, color_channel=chid) state = self._learner.state if check: b = lambda x: 'Yes' if x else 'No' msg = 'Classifier path: %s\n' % state['path_env'] msg += 'Found class definition: %s\n' % b( state['has_definition']) msg += 'Found annotations: %s\n' % b( state['has_path_annotations']) msg += 'Can you pick new samples? %s\n\n' % b( self._learner.is_annotated) msg += 'Found ARFF file: %s\n' % b(state['has_arff']) msg += 'Can you train a classifier? %s\n\n' % b( self._learner.is_trained) msg += 'Found SVM model: %s\n' % b(state['has_model']) msg += 'Found SVM range: %s\n' % b(state['has_range']) msg += 'Can you apply the classifier to images? %s\n\n' \ %b(self._learner.is_valid) msg += 'Found samples: %s\n' % b(state['has_path_samples']) msg += ( 'Sample images are only used for visualization and annotation ' ' control at the moment.') txt = '%s classifier inspection states' % self._channel if not quiet: information(self, txt, info=msg) if state['has_arff']: self._learner.importFromArff() if state['has_definition']: self._learner.loadDefinition()
def _on_submit_job(self): self._submit_settings.set_section(SECTION_NAME_GENERAL) if not self._submit_settings.get2('constrain_positions'): positions = [] for plate_id in self.imagecontainer.plates: self.imagecontainer.set_plate(plate_id) meta_data = self.imagecontainer.get_meta_data() positions += [ '%s___%s' % (plate_id, p) for p in meta_data.positions ] self._submit_settings.set2('positions', ','.join(positions)) nr_items = len(positions) else: positions = self._submit_settings.get2('positions') nr_items = len(positions.split(',')) # FIXME: we need to get the current value for 'position_granularity' settings_dummy = ProcessingFrame.get_special_settings(self._settings) position_granularity = settings_dummy.get(SECTION_NAME_CLUSTER, 'position_granularity') path_out = self._submit_settings.get2('pathout') emails = str(self._txt_mail.text()).split(',') try: self.dlg = waitingProgressDialog( 'Please wait until the job has been submitted...', self) self.dlg.setTarget(self._service.submit_job, 'cecog_batch', self._submit_settings.to_string(), path_out, emails, nr_items, position_granularity, VERSION) self.dlg.exec_() jobid = self.dlg.getTargetResult() except: exception(self, 'Error on job submission') else: # FIXME: no idea how DRMAA 1.0 compatible this is if type(jobid) == types.ListType: self._jobid = ','.join(jobid) main_jobid = jobid[0].split('.')[0] else: self._jobid = str(jobid) main_jobid = jobid self._txt_jobid.setText(self._jobid) self._update_job_status() information( self, 'Job submitted successfully', "Job successfully submitted to the cluster.\nJob ID: %s, items: %d" % (main_jobid, nr_items))
def msg_train_classifier(self, parent): state = self._learner.state text = 'Classifier training is not possible' info = 'You need to pick samples first.' detail = 'Missing components:\n' if not state['has_arff']: detail += "- Feature file '%s' not found.\n" % state['arff'] return information(parent, text, info, detail)
def msg_apply_classifier(self, parent): state = self._learner.state text = 'Classifier model not found' info = 'You need to train a classifier first.' detail = 'Missing components:\n' if not state['has_model']: detail += "- SVM model file '%s' not found.\n" % state['model'] if not state['has_range']: detail += "- SVM range file '%s' not found.\n" % state['range'] return information(parent, text, info, detail)
def _on_submit_job(self): self._submit_settings.set_section(SECTION_NAME_GENERAL) if not self._submit_settings.get2('constrain_positions'): positions = [] for plate_id in self.imagecontainer.plates: self.imagecontainer.set_plate(plate_id) meta_data = self.imagecontainer.get_meta_data() positions += ['%s___%s' % (plate_id, p) for p in meta_data.positions] self._submit_settings.set2('positions', ','.join(positions)) nr_items = len(positions) else: positions = self._submit_settings.get2('positions') nr_items = len(positions.split(',')) # FIXME: we need to get the current value for 'position_granularity' settings_dummy = self._clusterframe.get_special_settings(self._settings) position_granularity = settings_dummy.get('Cluster', 'position_granularity') path_out = self._submit_settings.get2('pathout') emails = str(self._txt_mail.text()).split(',') try: self.dlg = ProgressDialog("submitting jobs...", None, 0, 0, self) settings_str = self._submit_settings.to_string() func = lambda: self._service.submit_job('cecog_batch', settings_str, path_out, emails, nr_items, position_granularity, VERSION) self.dlg.exec_(func) jobid = self.dlg.getTargetResult() except Exception as e: exception(self, 'Error on job submission (%s)' %str(e)) else: # FIXME: no idea how DRMAA 1.0 compatible this is if type(jobid) == types.ListType: self._jobid = ','.join(jobid) main_jobid = jobid[0].split('.')[0] else: self._jobid = str(jobid) main_jobid = jobid self._txt_jobid.setText(self._jobid) self._update_job_status() information(self, 'Job submitted successfully', "Job successfully submitted to the cluster.\nJob ID: %s, items: %d" % (main_jobid, nr_items))
def _on_submit_job(self): self._submit_settings.set_section(SECTION_NAME_GENERAL) if not self._submit_settings.get2('constrain_positions'): # FIXME: imagecontainer = qApp._main_window._imagecontainer positions = [] for plate_id in imagecontainer.plates: imagecontainer.set_plate(plate_id) meta_data = imagecontainer.get_meta_data() positions += ['%s___%s' % (plate_id, p) for p in meta_data.positions] self._submit_settings.set2('positions', ','.join(positions)) nr_items = len(positions) else: positions = self._submit_settings.get2('positions') nr_items = len(positions.split(',')) # FIXME: we need to get the current value for 'position_granularity' settings_dummy = ProcessingFrame.get_special_settings(self._settings) position_granularity = settings_dummy.get(SECTION_NAME_CLUSTER, 'position_granularity') path_out = self._submit_settings.get2('pathout') emails = str(self._txt_mail.text()).split(',') try: self.dlg = waitingProgressDialog('Please wait until the job has been submitted...', self) self.dlg.setTarget(self._service.submit_job, 'cecog_batch', self._submit_settings.to_string(), path_out, emails, nr_items, position_granularity, VERSION) self.dlg.exec_() jobid = self.dlg.getTargetResult() except: exception(self, 'Error on job submission') else: # FIXME: no idea how DRMAA 1.0 compatible this is if type(jobid) == types.ListType: self._jobid = ','.join(jobid) main_jobid = jobid[0].split('.')[0] else: self._jobid = str(jobid) main_jobid = jobid self._txt_jobid.setText(self._jobid) self._update_job_status() information(self, 'Job submitted successfully', "Job successfully submitted to the cluster.\nJob ID: %s, items: %d" % (main_jobid, nr_items))
def load_classifier(self, check=True, quiet=False): clfdir = self._settings('Classification', '%s_classification_envpath' %self._channel) if not isdir(clfdir): return else: chid = self._settings('ObjectDetection', '%s_channelid' %self._channel) title = self._settings('Classification', '%s_classification_regionname' %self._channel) self._learner = CommonClassPredictor( \ clf_dir=clfdir, name=self._channel, channels = {self._channel.title(): title}, color_channel=chid) state = self._learner.state if check: b = lambda x: 'Yes' if x else 'No' msg = 'Classifier path: %s\n' % state['path_env'] msg += 'Found class definition: %s\n' % b(state['has_definition']) msg += 'Found annotations: %s\n' % b(state['has_path_annotations']) msg += 'Can you pick new samples? %s\n\n' % b(self._learner.is_annotated) msg += 'Found ARFF file: %s\n' % b(state['has_arff']) msg += 'Can you train a classifier? %s\n\n' %b(self._learner.is_trained) msg += 'Found SVM model: %s\n' % b(state['has_model']) msg += 'Found SVM range: %s\n' % b(state['has_range']) msg += 'Can you apply the classifier to images? %s\n\n' \ %b(self._learner.is_valid) msg += 'Found samples: %s\n' % b(state['has_path_samples']) msg += ('Sample images are only used for visualization and annotation ' ' control at the moment.') txt = '%s classifier inspection states' % self._channel if not quiet: information(self, txt, info=msg) if state['has_arff']: self._learner.importFromArff() if state['has_definition']: self._learner.loadDefinition()
def msg_pick_samples(self, parent): state = self._learner.state text = 'Sample picking is not possible' info = 'You need to provide a class definition '\ 'file and annotation files.' detail = 'Missing components:\n' if not state['has_path_annotations']: detail += "- Annotation path '%s' not found.\n" %state['path_annotations'] if not state['has_definition']: detail += "- Class definition file '%s' not found.\n" %state['definition'] return information(parent, text, info, detail)
def _on_file_open(self): if self._check_settings_saved() != QMessageBox.Cancel: dir = "" if not self._settings_filename is None: settings_filename = self.environ.convert_package_path( self._settings_filename) if os.path.isfile(settings_filename): dir = settings_filename filename = QtGui.QFileDialog.getOpenFileName(self, 'Open config file', dir, ';;'.join(self.NAME_FILTERS)) if filename: self._read_settings(filename) if self._settings.was_old_file_format(): information(self, ('Selected config file had an old ' 'version <= 1.3.0. The current version is %s. ' 'The config file was be updated...' %self.version)) else: information(self, "Config file version %s found" \ %self._settings.get('General', 'version')) self._clear_browser() self.set_modules_active(state=False)
def msg_pick_samples(self, parent): state = self._learner.state text = 'Sample picking is not possible' info = 'You need to provide a class definition '\ 'file and annotation files.' detail = 'Missing components:\n' if not state['has_path_annotations']: detail += "- Annotation path '%s' not found.\n" % state[ 'path_annotations'] if not state['has_definition']: detail += "- Class definition file '%s' not found.\n" % state[ 'definition'] return information(parent, text, info, detail)
def _on_file_open(self): if self._check_settings_saved() != QMessageBox.Cancel: home = "" if self._settings_filename is not None: settings_filename = self.environ.demo_settings if os.path.isfile(settings_filename): home = settings_filename filename = QtGui.QFileDialog.getOpenFileName( \ self, 'Open config file', home, ';;'.join(self.NAME_FILTERS)) if not bool(filename): return try: self.load_settings(filename) if self._settings.was_old_file_format(): information(self, ('Config file was updated to version %s' %self.version)) except Exception as e: critical(self, "Could not load file!", traceback.format_exc(e)) finally: self._clear_browser() self.set_modules_active(state=False)
def _load_image_container(self, plate_infos, scan_plates=None, show_dlg=True): self._clear_browser() imagecontainer = ImageContainer() self._imagecontainer = imagecontainer if scan_plates is None: scan_plates = dict((info[0], False) for info in plate_infos) def load(dlg): iter = imagecontainer.iter_import_from_settings(self._settings, scan_plates) for idx, info in enumerate(iter): dlg.targetSetValue.emit(idx + 1) if len(imagecontainer.plates) > 0: plate = imagecontainer.plates[0] imagecontainer.set_plate(plate) self.dlg = waitingProgressDialog('Please wait until the input structure is scanned\n' 'or the structure data loaded...', self, load, (0, len(scan_plates))) self.dlg.exec_(passDialog=True) if len(imagecontainer.plates) > 0: imagecontainer.check_dimensions() channels = imagecontainer.channels # do not report value changes to the main window self._settings.set_notify_change(False) self.set_image_crop_size() problems = [] for prefix in ['primary', 'secondary', 'tertiary']: trait = self._settings.get_trait(SECTION_NAME_OBJECTDETECTION, '%s_channelid' % prefix) if trait.set_list_data(channels) is None: problems.append(prefix) self._tabs[1].get_widget('%s_channelid' % prefix).update() # report problems about a mismatch between channel IDs found in the data and specified by the user if len(problems) > 0: critical(self, "Selected channel IDs not valid", "The selected channel IDs for %s are not valid.\nValid IDs are %s." % (", ".join(["'%s Channel'" % s.capitalize() for s in problems]), ", ".join(["'%s'" % s for s in channels]))) # a mismatch between settings and data will cause changed settings self.settings_changed(True) trait = self._settings.get_trait(SECTION_NAME_TRACKING, 'tracking_duration_unit') # allow time-base tracking durations only if time-stamp # information is present meta_data = imagecontainer.get_meta_data() if meta_data.has_timestamp_info: result = trait.set_list_data(TRACKING_DURATION_UNITS_TIMELAPSE) else: result = trait.set_list_data(TRACKING_DURATION_UNITS_DEFAULT) if result is None: critical(self, "Could not set tracking duration units", "The tracking duration units selected to match the load data. Please check your settings.") # a mismatch between settings and data will cause changed settings self.settings_changed(True) # activate change notification again self._settings.set_notify_change(True) self.set_modules_active(state=True) if show_dlg: information(self, "Plate(s) successfully loaded", "%d plates loaded successfully." % len(imagecontainer.plates)) else: critical(self, "No valid image data found", "The naming schema provided might not fit your image data" "or the coordinate file is not correct.\n\nPlease modify " "the values and scan the structure again.")
def _on_update_job_status(self): txt = self._update_job_status() information(self, 'Cluster update', "Message: '%s'" % txt)
def _load_image_container(self, plate_infos, scan_plates=None, show_dlg=True): self._clear_browser() imagecontainer = ImageContainer() self._imagecontainer = imagecontainer if scan_plates is None: scan_plates = dict((info[0], False) for info in plate_infos) def load(emitter, icontainer, settings, splates): iter_ = icontainer.iter_import_from_settings(settings, splates) for idx, info in enumerate(iter_): emitter.setValue.emit(idx) emitter.setLabelText.emit("checking dimensions...") emitter.setRange.emit(0, 0) QtCore.QCoreApplication.processEvents() if len(icontainer.plates) > 0: icontainer.set_plate(icontainer.plates[0]) icontainer.check_dimensions() label = ('Please wait until the input structure is scanned\n' 'or the structure data loaded...') self._dlg = ProgressDialog(label, None, 0, len(scan_plates), self) emitter = ProgressObject() emitter.setRange.connect(self._dlg.setRange) emitter.setValue.connect(self._dlg.setValue) emitter.setLabelText.connect(self._dlg.setLabelText) try: func = lambda: load(emitter, imagecontainer, self._settings, scan_plates) self._dlg.exec_(func, (emitter, )) except ImportError as e: # structure file from versions older than 1.3 contain pdk which is # removed if 'pdk' in str(e): critical(self, ("Your structure file format is outdated.\n" "You have to rescan the plate(s)")) else: critical(self, traceback.format_exc()) return except Exception as e: critical(self, str(e)) try: # I hate lookup tables! self._tab_lookup['Cluster'][1].set_imagecontainer(imagecontainer) except KeyError: pass if len(imagecontainer.plates) > 0: channels = imagecontainer.channels # do not report value changes to the main window self._settings.set_notify_change(False) self.set_image_crop_size() problems = [] for prefix in ['primary', 'secondary', 'tertiary']: trait = self._settings.get_trait(SECTION_NAME_OBJECTDETECTION, '%s_channelid' % prefix) if trait.set_list_data(channels) is None: problems.append(prefix) self._tabs[1].get_widget('%s_channelid' % prefix).update() # report problems about a mismatch between channel IDs found in the data # and specified by the user if len(problems) > 0: # a mismatch between settings and data will cause changed settings self.settings_changed(True) trait = self._settings.get_trait(SECTION_NAME_EVENT_SELECTION, 'duration_unit') # allow time-base tracking durations only if time-stamp # information is present meta_data = imagecontainer.get_meta_data() if meta_data.has_timestamp_info: result = trait.set_list_data(TimeConverter.units) else: result = trait.set_list_data([TimeConverter.FRAMES]) if result is None: critical(self, "Could not set tracking duration units", ("The tracking duration units selected to match the " "load data. Please check your settings.")) # a mismatch between settings and data will cause changed settings self.settings_changed(True) # activate change notification again self._settings.set_notify_change(True) self.set_modules_active(state=True) if show_dlg: information(self, "Plate(s) successfully loaded", "%d plates loaded successfully." % len(imagecontainer.plates)) else: critical(self, "No images found", "Verifiy your nameing scheme and rescan the data.")
def load_classifier(self, check=True): _resolve = lambda x, y: self._settings.get( x, '%s_%s' % (self._channel, y)) clfdir = CecogEnvironment.convert_package_path( _resolve('Classification', 'classification_envpath')) # XXX - where does the "." come if not isdir(clfdir) or clfdir == ".": return else: self._learner = CommonClassPredictor( \ clf_dir=clfdir, name=self._channel, channels={self._channel.title(): _resolve('Classification', 'classification_regionname')}, color_channel=_resolve('ObjectDetection', 'channelid')) result = self._learner.check() if check: b = lambda x: 'Yes' if x else 'No' msg = 'Classifier path: %s\n' % result['path_env'] msg += 'Found class definition: %s\n' % b( result['has_definition']) msg += 'Found annotations: %s\n' % b( result['has_path_annotations']) msg += 'Can you pick new samples? %s\n\n' % b( self.is_pick_samples()) msg += 'Found ARFF file: %s\n' % b(result['has_arff']) msg += 'Can you train a classifier? %s\n\n' % b( self.is_train_classifier()) msg += 'Found SVM model: %s\n' % b(result['has_model']) msg += 'Found SVM range: %s\n' % b(result['has_range']) msg += 'Can you apply the classifier to images? %s\n\n' % b( self.is_apply_classifier()) msg += 'Found samples: %s\n' % b(result['has_path_samples']) msg += 'Sample images are only used for visualization and annotation control at the moment.' txt = '%s classifier inspection results' % self._channel information(self, txt, info=msg) if result['has_arff']: self._learner.importFromArff() nr_features_prev = len(self._learner.feature_names) removed_features = self._learner.filter_nans(apply=True) nr_features = nr_features_prev - len(removed_features) self._label_features.setText(self.LABEL_FEATURES % (nr_features, nr_features_prev)) self._label_features.setToolTip( "removed %d features containing NA values:\n%s" % (len(removed_features), "\n".join(removed_features))) if result['has_definition']: self._learner.loadDefinition() if result['has_conf']: c, g, conf = self._learner.importConfusion() self._set_info(c, g, conf) self._init_conf_table(conf) self._update_conf_table(conf) else: conf = None self._init_conf_table(conf) self._set_info_table(conf)
def detect_objects_toggled(self, state): if state: if self._settings.get('Output', 'hdf5_reuse'): information(self, 'HDF5 reuse is enabled. Raw data and segmentation will be loaded from HDF5 files. Changes of normalization and segmentation parameters will have no effect in browser!') self.on_refresh()