def _setupClassifier(self): for name, image in self.images.iteritems(): if image is None: continue self.classifiers[name] = CommonClassPredictor( \ clf_dir=self.mapper('Classification', '%s_classification_envpath' %name), name=name, channels=name, color_channel=self.mapper("ObjectDetection", "%s_channelid" %name)) self.classifiers[name].importFromArff() self.classifiers[name].loadClassifier()
def load_classifier(self, check=True, quiet=False): clfdir = self._settings('Classification', '%s_classification_envpath' % self._channel) if not isdir(clfdir): return else: chid = self._settings('ObjectDetection', '%s_channelid' % self._channel) title = self._settings( 'Classification', '%s_classification_regionname' % self._channel) self._learner = CommonClassPredictor( \ clf_dir=clfdir, name=self._channel, channels = {self._channel.title(): title}, color_channel=chid) state = self._learner.state if check: b = lambda x: 'Yes' if x else 'No' msg = 'Classifier path: %s\n' % state['path_env'] msg += 'Found class definition: %s\n' % b( state['has_definition']) msg += 'Found annotations: %s\n' % b( state['has_path_annotations']) msg += 'Can you pick new samples? %s\n\n' % b( self._learner.is_annotated) msg += 'Found ARFF file: %s\n' % b(state['has_arff']) msg += 'Can you train a classifier? %s\n\n' % b( self._learner.is_trained) msg += 'Found SVM model: %s\n' % b(state['has_model']) msg += 'Found SVM range: %s\n' % b(state['has_range']) msg += 'Can you apply the classifier to images? %s\n\n' \ %b(self._learner.is_valid) msg += 'Found samples: %s\n' % b(state['has_path_samples']) msg += ( 'Sample images are only used for visualization and annotation ' ' control at the moment.') txt = '%s classifier inspection states' % self._channel if not quiet: information(self, txt, info=msg) if state['has_arff']: self._learner.importFromArff() if state['has_definition']: self._learner.loadDefinition()
def setup_classifiers(self): sttg = self.settings # processing channel, color channel for p_channel, c_channel in self.ch_mapping.iteritems(): self.settings.set_section('Processing') if sttg.get2(self._resolve_name(p_channel, 'classification')): sttg.set_section('Classification') clf = CommonClassPredictor( clf_dir=sttg.get2( self._resolve_name(p_channel, 'classification_envpath')), name=p_channel, channels=self._channel_regions(p_channel), color_channel=c_channel) clf.importFromArff() clf.loadClassifier() self.classifiers[p_channel] = clf
def setup_classifiers(self): sttg = self.settings # processing channel, color channel for p_channel, c_channel in self.ch_mapping.iteritems(): self.settings.set_section('Processing') if sttg.get2(self._resolve_name(p_channel, 'classification')): chreg = self._channel_regions(p_channel) if sttg("EventSelection", "unsupervised_event_selection"): nclusters = sttg("EventSelection", "num_clusters") self.classifiers[p_channel] = ClassDefinitionUnsup( \ nclusters, chreg) else: sttg.set_section('Classification') clf = CommonClassPredictor( clf_dir=sttg.get2(self._resolve_name(p_channel, 'classification_envpath')), name=p_channel, channels=chreg, color_channel=c_channel) clf.importFromArff() clf.loadClassifier() self.classifiers[p_channel] = clf
def _on_process_start(self, name, start_again=False): if not self._is_running or start_again: is_valid = True self._is_abort = False self._has_error = False if self._process_items is None: cls = self._control_buttons[name]['cls'] if type(cls) == types.ListType: self._process_items = cls self._current_process_item = 0 cls = cls[self._current_process_item] else: self._process_items = None self._current_process_item = 0 else: cls = self._process_items[self._current_process_item] if self.name == SECTION_NAME_CLASSIFICATION: result_frame = self._get_result_frame(self._tab_name) result_frame.load_classifier(check=False) learner = result_frame._learner if name == self.PROCESS_PICKING: if not result_frame.classifier.is_annotated: is_valid = False result_frame.msg_pick_samples(self) elif result_frame.classifier.is_trained: if not question(self, 'Samples already picked', 'Do you want to pick samples again and ' 'overwrite previous ' 'results?'): is_valid = False elif name == self.PROCESS_TRAINING: if not result_frame.classifier.is_trained: is_valid = False result_frame.msg_train_classifier(self) elif result_frame.classifier.is_valid: if not question(self, 'Classifier already trained', 'Do you want to train the classifier ' 'again?'): is_valid = False elif name == self.PROCESS_TESTING and not result_frame.classifier.is_valid: is_valid = False result_frame.msg_apply_classifier(self) if cls is MultiAnalyzerThread: ncpu = cpu_count() (ncpu, ok) = QInputDialog.getInt(None, "On your machine are %d processers available." % ncpu, \ "Select the number of processors", \ ncpu, 1, ncpu*2) if not ok: self._process_items = None is_valid = False if is_valid: self._current_process = name if not start_again: self.parent().main_window.log_window.clear() self._is_running = True self._stage_infos = {} self._toggle_tabs(False) # disable all section button of the main widget self.toggle_tabs.emit(self.get_name()) self._set_control_button_text(idx=1) self._toggle_control_buttons() imagecontainer = self.parent().main_window._imagecontainer if cls is PickerThread: self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse) self._analyzer = cls(self, self._current_settings, imagecontainer) self._clear_image() elif cls is AnalyzerThread: self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse) self._analyzer = cls(self, self._current_settings, imagecontainer) self._clear_image() elif cls is TrainingThread: self._current_settings = self._settings.copy() self._analyzer = cls(self, self._current_settings, result_frame._learner) self._analyzer.setTerminationEnabled(True) self._analyzer.conf_result.connect(result_frame.on_conf_result, Qt.QueuedConnection) result_frame.reset() elif cls is MultiAnalyzerThread: self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse) self._analyzer = cls(self, self._current_settings, imagecontainer, ncpu) elif cls is ErrorCorrectionThread: self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse) self._analyzer = cls(self, self._current_settings, self.parent().main_window._imagecontainer) elif cls is PostProcessingThread: learner_dict = {} for channel in ['primary', 'secondary']: path = self._settings('Classification', '%s_classification_envpath' %channel) if (self._settings('Processing', '%s_classification' %channel) and (channel == 'primary' or self._settings('General', 'process_secondary'))): learner = CommonClassPredictor( \ path, self._settings('ObjectDetection', '%s_channelid' %channel), self._settings('Classification', '%s_classification_regionname' %channel)) learner.importFromArff() learner_dict[channel] = learner self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse) self._analyzer = cls(self, self._current_settings, learner_dict, imagecontainer) self._analyzer.setTerminationEnabled(True) self._analyzer.finished.connect(self._on_process_finished) self._analyzer.stage_info.connect(self._on_update_stage_info, Qt.QueuedConnection) self._analyzer.analyzer_error.connect(self._on_error, Qt.QueuedConnection) self._analyzer.image_ready.connect(self._on_update_image) self._analyzer.start(QThread.LowestPriority) if self._current_process_item == 0: self.status_message.emit('Process started...') else: self._abort_processing()
def load_classifier(self, check=True): _resolve = lambda x, y: self._settings.get( x, '%s_%s' % (self._channel, y)) clfdir = CecogEnvironment.convert_package_path( _resolve('Classification', 'classification_envpath')) # XXX - where does the "." come if not isdir(clfdir) or clfdir == ".": return else: self._learner = CommonClassPredictor( \ clf_dir=clfdir, name=self._channel, channels={self._channel.title(): _resolve('Classification', 'classification_regionname')}, color_channel=_resolve('ObjectDetection', 'channelid')) result = self._learner.check() if check: b = lambda x: 'Yes' if x else 'No' msg = 'Classifier path: %s\n' % result['path_env'] msg += 'Found class definition: %s\n' % b( result['has_definition']) msg += 'Found annotations: %s\n' % b( result['has_path_annotations']) msg += 'Can you pick new samples? %s\n\n' % b( self.is_pick_samples()) msg += 'Found ARFF file: %s\n' % b(result['has_arff']) msg += 'Can you train a classifier? %s\n\n' % b( self.is_train_classifier()) msg += 'Found SVM model: %s\n' % b(result['has_model']) msg += 'Found SVM range: %s\n' % b(result['has_range']) msg += 'Can you apply the classifier to images? %s\n\n' % b( self.is_apply_classifier()) msg += 'Found samples: %s\n' % b(result['has_path_samples']) msg += 'Sample images are only used for visualization and annotation control at the moment.' txt = '%s classifier inspection results' % self._channel information(self, txt, info=msg) if result['has_arff']: self._learner.importFromArff() nr_features_prev = len(self._learner.feature_names) removed_features = self._learner.filter_nans(apply=True) nr_features = nr_features_prev - len(removed_features) self._label_features.setText(self.LABEL_FEATURES % (nr_features, nr_features_prev)) self._label_features.setToolTip( "removed %d features containing NA values:\n%s" % (len(removed_features), "\n".join(removed_features))) if result['has_definition']: self._learner.loadDefinition() if result['has_conf']: c, g, conf = self._learner.importConfusion() self._set_info(c, g, conf) self._init_conf_table(conf) self._update_conf_table(conf) else: conf = None self._init_conf_table(conf) self._set_info_table(conf)
def _on_process_start(self, name, start_again=False): if not self._is_running or start_again: is_valid = True self._is_abort = False self._has_error = False if self._process_items is None: cls = self._control_buttons[name]['cls'] if type(cls) == types.ListType: self._process_items = cls self._current_process_item = 0 cls = cls[self._current_process_item] # remove HmmThread if process is not first in list and # not valid error correction was activated if (HmmThread in self._process_items and self._process_items.index(HmmThread) > 0 and not (self._settings.get( 'Processing', 'primary_errorcorrection') or (self._settings.get( 'Processing', 'secondary_errorcorrection') and self._settings.get( 'Processing', 'secondary_processchannel')))): self._process_items.remove(HmmThread) else: self._process_items = None self._current_process_item = 0 else: cls = self._process_items[self._current_process_item] if self.SECTION_NAME == 'Classification': result_frame = self._get_result_frame(self._tab_name) result_frame.load_classifier(check=False) learner = result_frame._learner if name == self.PROCESS_PICKING: if not result_frame.is_pick_samples(): is_valid = False result_frame.msg_pick_samples(self) elif result_frame.is_train_classifier(): if not question( self, 'Samples already picked', 'Do you want to pick samples again and ' 'overwrite previous ' 'results?'): is_valid = False elif name == self.PROCESS_TRAINING: if not result_frame.is_train_classifier(): is_valid = False result_frame.msg_train_classifier(self) elif result_frame.is_apply_classifier(): if not question( self, 'Classifier already trained', 'Do you want to train the classifier ' 'again?'): is_valid = False elif name == self.PROCESS_TESTING and not result_frame.is_apply_classifier( ): is_valid = False result_frame.msg_apply_classifier(self) elif cls is HmmThread: success, cmd = HmmThread.test_executable( self._settings.get('ErrorCorrection', 'filename_to_R')) if not success: critical(self, 'Error running R', "The R command line program '%s' could not be executed.\n\n"\ "Make sure that the R-project is installed.\n\n"\ "See README.txt for details." % cmd) is_valid = False elif cls is MultiAnalyzerThread: ncpu = cpu_count() (ncpu, ok) = QInputDialog.getInt(None, "On your machine are %d processers available." % ncpu, \ "Select the number of processors", \ ncpu, 1, ncpu*2) if not ok: self._process_items = None is_valid = False if is_valid: self._current_process = name if not start_again: self.parent().main_window.log_window.clear() self._is_running = True self._stage_infos = {} self._toggle_tabs(False) # disable all section button of the main widget self.toggle_tabs.emit(self.get_name()) self._set_control_button_text(idx=1) self._toggle_control_buttons() imagecontainer = self.parent().main_window._imagecontainer if cls is PickerThread: self._current_settings = self._get_modified_settings( name, imagecontainer.has_timelapse) self._analyzer = cls(self, self._current_settings, imagecontainer) self._set_display_renderer_info() self._clear_image() elif cls is AnalyzerThread: self._current_settings = self._get_modified_settings( name, imagecontainer.has_timelapse) self._analyzer = cls(self, self._current_settings, imagecontainer) self._set_display_renderer_info() self._clear_image() elif cls is TrainingThread: self._current_settings = self._settings.copy() self._analyzer = cls(self, self._current_settings, result_frame._learner) self._analyzer.setTerminationEnabled(True) self._analyzer.conf_result.connect( result_frame.on_conf_result, Qt.QueuedConnection) result_frame.reset() elif cls is MultiAnalyzerThread: self._current_settings = self._get_modified_settings( name, imagecontainer.has_timelapse) self._analyzer = cls(self, self._current_settings, imagecontainer, ncpu) self._set_display_renderer_info() elif cls is HmmThread: self._current_settings = self._get_modified_settings( name, imagecontainer.has_timelapse) # FIXME: classifier handling needs revision!!! learner_dict = {} for kind in ['primary', 'secondary']: _resolve = lambda x, y: self._settings.get( x, '%s_%s' % (kind, y)) env_path = CecogEnvironment.convert_package_path( _resolve('Classification', 'classification_envpath')) if (os.path.exists(env_path) and (kind == 'primary' or self._settings.get( 'Processing', 'secondary_processchannel'))): learner = CommonClassPredictor( \ env_path, _resolve('ObjectDetection', 'channelid'), _resolve('Classification', 'classification_regionname')) learner.importFromArff() learner_dict[kind] = learner ### Whee, I like it... "self.parent().main_window._imagecontainer" crazy, crazy, michael... :-) self._analyzer = cls( self, self._current_settings, learner_dict, self.parent().main_window._imagecontainer) self._analyzer.setTerminationEnabled(True) lw = self.parent().main_window.log_window lw.show() elif cls is PostProcessingThread: learner_dict = {} for kind in ['primary', 'secondary']: _resolve = lambda x, y: self._settings.get( x, '%s_%s' % (kind, y)) env_path = CecogEnvironment.convert_package_path( _resolve('Classification', 'classification_envpath')) if (_resolve('Processing', 'classification') and (kind == 'primary' or self._settings.get( 'Processing', 'secondary_processchannel'))): learner = CommonClassPredictor( \ env_path, _resolve('ObjectDetection', 'channelid'), _resolve('Classification', 'classification_regionname')) learner.importFromArff() learner_dict[kind] = learner self._current_settings = self._get_modified_settings( name, imagecontainer.has_timelapse) self._analyzer = cls(self, self._current_settings, learner_dict, imagecontainer) self._analyzer.setTerminationEnabled(True) self._analyzer.finished.connect(self._on_process_finished) self._analyzer.stage_info.connect(self._on_update_stage_info, Qt.QueuedConnection) self._analyzer.analyzer_error.connect(self._on_error, Qt.QueuedConnection) self._analyzer.start(QThread.LowestPriority) if self._current_process_item == 0: status('Process started...') else: self._abort_processing()
import os import numpy import argparse from cecog.learning.learning import CommonClassPredictor import time if __name__ == "__main__": parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('directory', type=str, help='Directory to the classifier') args = parser.parse_args() if os.path.isdir(args.directory): learner = CommonClassPredictor(args.directory, None, None) learner.importFromArff() t0 = time.time() n, c, g, conf = learner.gridSearch() print "Grid search took: ", time.time() - t0 #c, g, conf = learner.importConfusion() numpy.set_printoptions(linewidth=80) print "Confusion Matrix:" for row in conf.conf: print row else: raise IOError("%s\n is not a valid directory" % args.directory) #learner.statsFromConfusion(conf) # #benchmark