Beispiel #1
0
    def load_classifier(self, check=True, quiet=False):

        clfdir = self._settings('Classification',
                                '%s_classification_envpath' % self._channel)

        if not isdir(clfdir):
            return
        else:
            chid = self._settings('ObjectDetection',
                                  '%s_channelid' % self._channel)
            title = self._settings(
                'Classification',
                '%s_classification_regionname' % self._channel)
            self._learner = CommonClassPredictor( \
                clf_dir=clfdir,
                name=self._channel,
                channels = {self._channel.title(): title},
                color_channel=chid)

            state = self._learner.state
            if check:
                b = lambda x: 'Yes' if x else 'No'
                msg = 'Classifier path: %s\n' % state['path_env']
                msg += 'Found class definition: %s\n' % b(
                    state['has_definition'])
                msg += 'Found annotations: %s\n' % b(
                    state['has_path_annotations'])
                msg += 'Can you pick new samples? %s\n\n' % b(
                    self._learner.is_annotated)
                msg += 'Found ARFF file: %s\n' % b(state['has_arff'])
                msg += 'Can you train a classifier? %s\n\n' % b(
                    self._learner.is_trained)
                msg += 'Found SVM model: %s\n' % b(state['has_model'])
                msg += 'Found SVM range: %s\n' % b(state['has_range'])
                msg += 'Can you apply the classifier to images? %s\n\n' \
                    %b(self._learner.is_valid)
                msg += 'Found samples: %s\n' % b(state['has_path_samples'])
                msg += (
                    'Sample images are only used for visualization and annotation '
                    ' control at the moment.')

                txt = '%s classifier inspection states' % self._channel
                if not quiet:
                    information(self, txt, info=msg)

            if state['has_arff']:
                self._learner.importFromArff()

            if state['has_definition']:
                self._learner.loadDefinition()
Beispiel #2
0
 def setup_classifiers(self):
     sttg = self.settings
     # processing channel, color channel
     for p_channel, c_channel in self.ch_mapping.iteritems():
         self.settings.set_section('Processing')
         if sttg.get2(self._resolve_name(p_channel, 'classification')):
             sttg.set_section('Classification')
             clf = CommonClassPredictor(
                 clf_dir=sttg.get2(self._resolve_name(p_channel,
                                                      'classification_envpath')),
                 name=p_channel,
                 channels=self._channel_regions(p_channel),
                 color_channel=c_channel)
             clf.importFromArff()
             clf.loadClassifier()
             self.classifiers[p_channel] = clf
Beispiel #3
0
    def load_classifier(self, check=True):

        _resolve = lambda x,y: self._settings.get(x, '%s_%s' % (self._channel, y))
        env_path = convert_package_path(_resolve('Classification',
                                                 'classification_envpath'))
        classifier_infos = {'strEnvPath' : env_path,
                            #'strModelPrefix' : _resolve('Classification', 'classification_prefix'),
                            'strChannelId' : _resolve('ObjectDetection', 'channelid'),
                            'strRegionId' : _resolve('Classification', 'classification_regionname'),
                            }
        try:
            self._learner = CommonClassPredictor(dctCollectSamples=classifier_infos)
        except:
            exception(self, 'Error on loading classifier.')
        else:
            result = self._learner.check()
            if check:
                b = lambda x: 'Yes' if x else 'No'
                msg =  'Classifier path: %s\n' % result['path_env']
                msg += 'Found class definition: %s\n' % b(result['has_definition'])
                msg += 'Found annotations: %s\n' % b(result['has_path_annotations'])
                msg += 'Can you pick new samples? %s\n\n' % b(self.is_pick_samples())
                msg += 'Found ARFF file: %s\n' % b(result['has_arff'])
                msg += 'Can you train a classifier? %s\n\n' % b(self.is_train_classifier())
                msg += 'Found SVM model: %s\n' % b(result['has_model'])
                msg += 'Found SVM range: %s\n' % b(result['has_range'])
                msg += 'Can you apply the classifier to images? %s\n\n' % b(self.is_apply_classifier())
                msg += 'Found samples: %s\n' % b(result['has_path_samples'])
                msg += 'Sample images are only used for visualization and annotation control at the moment.'

                txt = '%s classifier inspection results' % self._channel
                information(self, txt, info=msg)

            if result['has_arff']:
                self._learner.importFromArff()
                nr_features_prev = len(self._learner.lstFeatureNames)
                removed_features = self._learner.filterData(apply=False)
                nr_features = nr_features_prev - len(removed_features)
                self._label_features.setText(self.LABEL_FEATURES % (nr_features, nr_features_prev))
                self._label_features.setToolTip("removed %d features containing NA values:\n%s" %
                                                (len(removed_features), "\n".join(removed_features)))

            if result['has_definition']:
                self._learner.loadDefinition()

            if result['has_conf']:
                c, g, conf = self._learner.importConfusion()
                self._set_info(c, g, conf)
                self._init_conf_table(conf)
                self._update_conf_table(conf)
            else:
                conf = None
                self._init_conf_table(conf)
            self._set_info_table(conf)
Beispiel #4
0
    def load_classifier(self, check=True):
        _resolve = lambda x,y: self._settings.get(x, '%s_%s'
                                                  % (self._channel, y))
        clfdir = CecogEnvironment.convert_package_path(_resolve('Classification',
                                               'classification_envpath'))
        # XXX - where does the "." come
        if not isdir(clfdir) or clfdir == ".":
            return
        else:
            self._learner = CommonClassPredictor( \
                clf_dir=clfdir,
                name=self._channel,
                channels={self._channel.title(): _resolve('Classification', 'classification_regionname')},
                color_channel=_resolve('ObjectDetection', 'channelid'))
            result = self._learner.check()

            if check:
                b = lambda x: 'Yes' if x else 'No'
                msg =  'Classifier path: %s\n' % result['path_env']
                msg += 'Found class definition: %s\n' % b(result['has_definition'])
                msg += 'Found annotations: %s\n' % b(result['has_path_annotations'])
                msg += 'Can you pick new samples? %s\n\n' % b(self.is_pick_samples())
                msg += 'Found ARFF file: %s\n' % b(result['has_arff'])
                msg += 'Can you train a classifier? %s\n\n' % b(self.is_train_classifier())
                msg += 'Found SVM model: %s\n' % b(result['has_model'])
                msg += 'Found SVM range: %s\n' % b(result['has_range'])
                msg += 'Can you apply the classifier to images? %s\n\n' % b(self.is_apply_classifier())
                msg += 'Found samples: %s\n' % b(result['has_path_samples'])
                msg += 'Sample images are only used for visualization and annotation control at the moment.'

                txt = '%s classifier inspection results' % self._channel
                information(self, txt, info=msg)

            if result['has_arff']:
                self._learner.importFromArff()
                nr_features_prev = len(self._learner.feature_names)
                removed_features = self._learner.filter_nans(apply=True)
                nr_features = nr_features_prev - len(removed_features)
                self._label_features.setText(self.LABEL_FEATURES %(nr_features, nr_features_prev))
                self._label_features.setToolTip("removed %d features containing NA values:\n%s" %
                                                (len(removed_features), "\n".join(removed_features)))

            if result['has_definition']:
                self._learner.loadDefinition()

            if result['has_conf']:
                c, g, conf = self._learner.importConfusion()
                self._set_info(c, g, conf)
                self._init_conf_table(conf)
                self._update_conf_table(conf)
            else:
                conf = None
                self._init_conf_table(conf)
            self._set_info_table(conf)
Beispiel #5
0
    def setup_classifiers(self):
        sttg = self.settings

        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                chreg = self._channel_regions(p_channel)
                if sttg("EventSelection", "unsupervised_event_selection"):
                    nclusters = sttg("EventSelection", "num_clusters")
                    self.classifiers[p_channel] = ClassDefinitionUnsup( \
                        nclusters, chreg)
                else:
                    sttg.set_section('Classification')
                    clf = CommonClassPredictor(
                        clf_dir=sttg.get2(self._resolve_name(p_channel,
                                                             'classification_envpath')),
                        name=p_channel,
                        channels=chreg,
                        color_channel=c_channel)
                    clf.importFromArff()
                    clf.loadClassifier()
                    self.classifiers[p_channel] = clf
Beispiel #6
0
    def _setupClassifier(self):
        for name, image in self.images.iteritems():
            if image is None:
                continue
            self.classifiers[name] = CommonClassPredictor( \
                clf_dir=self.mapper('Classification',
                                    '%s_classification_envpath'
                                    %name),
                name=name,
                channels=name,
                color_channel=self.mapper("ObjectDetection", "%s_channelid" %name))

            self.classifiers[name].importFromArff()
            self.classifiers[name].loadClassifier()
Beispiel #7
0
    def load_classifier(self, check=True, quiet=False):

        clfdir = self._settings('Classification',
                                '%s_classification_envpath' %self._channel)

        if not isdir(clfdir):
            return
        else:
            chid = self._settings('ObjectDetection', '%s_channelid' %self._channel)
            title = self._settings('Classification',
                                   '%s_classification_regionname' %self._channel)
            self._learner = CommonClassPredictor( \
                clf_dir=clfdir,
                name=self._channel,
                channels = {self._channel.title(): title},
                color_channel=chid)

            state = self._learner.state
            if check:
                b = lambda x: 'Yes' if x else 'No'
                msg =  'Classifier path: %s\n' % state['path_env']
                msg += 'Found class definition: %s\n' % b(state['has_definition'])
                msg += 'Found annotations: %s\n' % b(state['has_path_annotations'])
                msg += 'Can you pick new samples? %s\n\n' % b(self._learner.is_annotated)
                msg += 'Found ARFF file: %s\n' % b(state['has_arff'])
                msg += 'Can you train a classifier? %s\n\n' %b(self._learner.is_trained)
                msg += 'Found SVM model: %s\n' % b(state['has_model'])
                msg += 'Found SVM range: %s\n' % b(state['has_range'])
                msg += 'Can you apply the classifier to images? %s\n\n' \
                    %b(self._learner.is_valid)
                msg += 'Found samples: %s\n' % b(state['has_path_samples'])
                msg += ('Sample images are only used for visualization and annotation '
                        ' control at the moment.')

                txt = '%s classifier inspection states' % self._channel
                if not quiet:
                    information(self, txt, info=msg)

            if state['has_arff']:
                self._learner.importFromArff()

            if state['has_definition']:
                self._learner.loadDefinition()
Beispiel #8
0
 def setup_classifiers(self):
     sttg = self.settings
     # processing channel, color channel
     for p_channel, c_channel in self.ch_mapping.iteritems():
         self.settings.set_section('Processing')
         if sttg.get2(self._resolve_name(p_channel, 'classification')):
             sttg.set_section('Classification')
             clf = CommonClassPredictor(
                 clf_dir=sttg.get2(
                     self._resolve_name(p_channel,
                                        'classification_envpath')),
                 name=p_channel,
                 channels=self._channel_regions(p_channel),
                 color_channel=c_channel)
             clf.importFromArff()
             clf.loadClassifier()
             self.classifiers[p_channel] = clf
Beispiel #9
0
    def setup_classifiers(self):
        sttg = self.settings

        # processing channel, color channel
        for p_channel, c_channel in self.ch_mapping.iteritems():
            self.settings.set_section('Processing')
            if sttg.get2(self._resolve_name(p_channel, 'classification')):
                chreg = self._channel_regions(p_channel)
                if sttg("EventSelection", "unsupervised_event_selection"):
                    nclusters = sttg("EventSelection", "num_clusters")
                    self.classifiers[p_channel] = ClassDefinitionUnsup( \
                        nclusters, chreg)
                else:
                    sttg.set_section('Classification')
                    clf = CommonClassPredictor(
                        clf_dir=sttg.get2(self._resolve_name(p_channel,
                                                             'classification_envpath')),
                        name=p_channel,
                        channels=chreg,
                        color_channel=c_channel)
                    clf.importFromArff()
                    clf.loadClassifier()
                    self.classifiers[p_channel] = clf
Beispiel #10
0
    def _on_process_start(self, name, start_again=False):
        if not self._is_running or start_again:
            is_valid = True
            self._is_abort = False
            self._has_error = False

            if self._process_items is None:
                cls = self._control_buttons[name]['cls']
                if type(cls) == types.ListType:
                    self._process_items = cls
                    self._current_process_item = 0
                    cls = cls[self._current_process_item]
                else:
                    self._process_items = None
                    self._current_process_item = 0
            else:
                cls = self._process_items[self._current_process_item]


            if self.name == SECTION_NAME_CLASSIFICATION:
                result_frame = self._get_result_frame(self._tab_name)
                result_frame.load_classifier(check=False)
                learner = result_frame._learner

                if name == self.PROCESS_PICKING:
                    if not result_frame.classifier.is_annotated:
                        is_valid = False
                        result_frame.msg_pick_samples(self)
                    elif result_frame.classifier.is_trained:
                        if not question(self, 'Samples already picked',
                                    'Do you want to pick samples again and '
                                    'overwrite previous '
                                    'results?'):
                            is_valid = False

                elif name == self.PROCESS_TRAINING:
                    if not result_frame.classifier.is_trained:
                        is_valid = False
                        result_frame.msg_train_classifier(self)
                    elif result_frame.classifier.is_valid:
                        if not question(self, 'Classifier already trained',
                                    'Do you want to train the classifier '
                                    'again?'):
                            is_valid = False

                elif name == self.PROCESS_TESTING and not result_frame.classifier.is_valid:
                    is_valid = False
                    result_frame.msg_apply_classifier(self)

            if cls is MultiAnalyzerThread:
                ncpu = cpu_count()
                (ncpu, ok) = QInputDialog.getInt(None, "On your machine are %d processers available." % ncpu, \
                                             "Select the number of processors", \
                                              ncpu, 1, ncpu*2)
                if not ok:
                    self._process_items = None
                    is_valid = False

            if is_valid:
                self._current_process = name

                if not start_again:
                    self.parent().main_window.log_window.clear()

                    self._is_running = True
                    self._stage_infos = {}

                    self._toggle_tabs(False)
                    # disable all section button of the main widget
                    self.toggle_tabs.emit(self.get_name())

                    self._set_control_button_text(idx=1)
                    self._toggle_control_buttons()

                imagecontainer = self.parent().main_window._imagecontainer

                if cls is PickerThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, imagecontainer)
                    self._clear_image()

                elif cls is AnalyzerThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, imagecontainer)
                    self._clear_image()

                elif cls is TrainingThread:
                    self._current_settings = self._settings.copy()

                    self._analyzer = cls(self, self._current_settings, result_frame._learner)
                    self._analyzer.setTerminationEnabled(True)

                    self._analyzer.conf_result.connect(result_frame.on_conf_result,
                                                       Qt.QueuedConnection)
                    result_frame.reset()

                elif cls is MultiAnalyzerThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, imagecontainer, ncpu)


                elif cls is ErrorCorrectionThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings,
                                         self.parent().main_window._imagecontainer)

                elif cls is PostProcessingThread:
                    learner_dict = {}
                    for channel in ['primary', 'secondary']:
                        path = self._settings('Classification', '%s_classification_envpath' %channel)
                        if (self._settings('Processing', '%s_classification' %channel) and
                            (channel == 'primary' or self._settings('General', 'process_secondary'))):
                            learner = CommonClassPredictor( \
                                path,
                                self._settings('ObjectDetection', '%s_channelid' %channel),
                                self._settings('Classification', '%s_classification_regionname' %channel))

                            learner.importFromArff()
                            learner_dict[channel] = learner
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, learner_dict, imagecontainer)
                    self._analyzer.setTerminationEnabled(True)

                self._analyzer.finished.connect(self._on_process_finished)
                self._analyzer.stage_info.connect(self._on_update_stage_info, Qt.QueuedConnection)
                self._analyzer.analyzer_error.connect(self._on_error, Qt.QueuedConnection)
                self._analyzer.image_ready.connect(self._on_update_image)

                self._analyzer.start(QThread.LowestPriority)
                if self._current_process_item == 0:
                    self.status_message.emit('Process started...')

        else:
            self._abort_processing()
Beispiel #11
0
    def _on_process_start(self, name, start_again=False):
        if not self._is_running or start_again:
            is_valid = True
            self._is_abort = False
            self._has_error = False

            if self._process_items is None:
                cls = self._control_buttons[name]['cls']
                if type(cls) == types.ListType:
                    self._process_items = cls
                    self._current_process_item = 0
                    cls = cls[self._current_process_item]
                else:
                    self._process_items = None
                    self._current_process_item = 0
            else:
                cls = self._process_items[self._current_process_item]


            if self.name == SECTION_NAME_CLASSIFICATION:
                result_frame = self._get_result_frame(self._tab_name)
                result_frame.load_classifier(check=False)
                learner = result_frame._learner

                if name == self.PROCESS_PICKING:
                    if not result_frame.classifier.is_annotated:
                        is_valid = False
                        result_frame.msg_pick_samples(self)
                    elif result_frame.classifier.is_trained:
                        if not question(self, 'Samples already picked',
                                    'Do you want to pick samples again and '
                                    'overwrite previous '
                                    'results?'):
                            is_valid = False

                elif name == self.PROCESS_TRAINING:
                    if not result_frame.classifier.is_trained:
                        is_valid = False
                        result_frame.msg_train_classifier(self)
                    elif result_frame.classifier.is_valid:
                        if not question(self, 'Classifier already trained',
                                    'Do you want to train the classifier '
                                    'again?'):
                            is_valid = False

                elif name == self.PROCESS_TESTING and not result_frame.classifier.is_valid:
                    is_valid = False
                    result_frame.msg_apply_classifier(self)

            if cls is MultiAnalyzerThread:
                ncpu = cpu_count()
                (ncpu, ok) = QInputDialog.getInt(None, "On your machine are %d processers available." % ncpu, \
                                             "Select the number of processors", \
                                              ncpu, 1, ncpu*2)
                if not ok:
                    self._process_items = None
                    is_valid = False

            if is_valid:
                self._current_process = name

                if not start_again:
                    self.parent().main_window.log_window.clear()

                    self._is_running = True
                    self._stage_infos = {}

                    self._toggle_tabs(False)
                    # disable all section button of the main widget
                    self.toggle_tabs.emit(self.get_name())

                    self._set_control_button_text(idx=1)
                    self._toggle_control_buttons()

                imagecontainer = self.parent().main_window._imagecontainer

                if cls is PickerThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, imagecontainer)
                    self._clear_image()

                elif cls is AnalyzerThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, imagecontainer)
                    self._clear_image()

                elif cls is TrainingThread:
                    self._current_settings = self._settings.copy()

                    self._analyzer = cls(self, self._current_settings, result_frame._learner)
                    self._analyzer.setTerminationEnabled(True)

                    self._analyzer.conf_result.connect(result_frame.on_conf_result,
                                                       Qt.QueuedConnection)
                    result_frame.reset()

                elif cls is MultiAnalyzerThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, imagecontainer, ncpu)


                elif cls is ErrorCorrectionThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings,
                                         self.parent().main_window._imagecontainer)

                elif cls is PostProcessingThread:
                    learner_dict = {}
                    for channel in ['primary', 'secondary']:
                        path = self._settings('Classification', '%s_classification_envpath' %channel)
                        if (self._settings('Processing', '%s_classification' %channel) and
                            (channel == 'primary' or self._settings('General', 'process_secondary'))):
                            learner = CommonClassPredictor( \
                                path,
                                self._settings('ObjectDetection', '%s_channelid' %channel),
                                self._settings('Classification', '%s_classification_regionname' %channel))

                            learner.importFromArff()
                            learner_dict[channel] = learner
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, learner_dict, imagecontainer)
                    self._analyzer.setTerminationEnabled(True)

                self._analyzer.finished.connect(self._on_process_finished)
                self._analyzer.stage_info.connect(self._on_update_stage_info, Qt.QueuedConnection)
                self._analyzer.analyzer_error.connect(self._on_error, Qt.QueuedConnection)
                self._analyzer.image_ready.connect(self._on_update_image)

                self._analyzer.start(QThread.LowestPriority)
                if self._current_process_item == 0:
                    self.status_message.emit('Process started...')

        else:
            self._abort_processing()
Beispiel #12
0
class ClassifierResultFrame(QGroupBox):

    LABEL_FEATURES = '#Features: %d (%d)'
    LABEL_ACC = 'Overall accuracy: %.1f%%'
    LABEL_C = 'Log2(C) = %.1f'
    LABEL_G = 'Log2(g) = %.1f'

    def __init__(self, parent, channel, settings):
        super(ClassifierResultFrame, self).__init__(parent)

        self._channel = channel
        self._settings = settings

        layout = QVBoxLayout(self)
        layout.setContentsMargins(5, 5, 5, 5)

        splitter = QSplitter(Qt.Horizontal, self)
        splitter.setSizePolicy(\
            QSizePolicy(QSizePolicy.Expanding|QSizePolicy.Maximum,
                        QSizePolicy.Expanding|QSizePolicy.Maximum))
        splitter.setStretchFactor(0, 2)
        layout.addWidget(splitter)

        frame_info = QFrame()
        layout_info = QVBoxLayout(frame_info)
        label = QLabel('Class & annotation info', frame_info)
        layout_info.addWidget(label)
        self._table_info = QTableWidget(frame_info)
        self._table_info.setEditTriggers(QTableWidget.NoEditTriggers)
        self._table_info.setSelectionMode(QTableWidget.NoSelection)
        self._table_info.setSizePolicy(\
            QSizePolicy(QSizePolicy.Expanding|QSizePolicy.Maximum,
                        QSizePolicy.Expanding|QSizePolicy.Maximum))
        layout_info.addWidget(self._table_info)
        splitter.addWidget(frame_info)


        frame_conf = QFrame()
        layout_conf = QVBoxLayout(frame_conf)
        label = QLabel('Confusion matrix', frame_conf)
        layout_conf.addWidget(label)
        self._table_conf = QTableWidget(frame_conf)
        self._table_conf.setEditTriggers(QTableWidget.NoEditTriggers)
        self._table_conf.setSelectionMode(QTableWidget.NoSelection)
        self._table_conf.setSizePolicy(\
            QSizePolicy(QSizePolicy.Expanding|QSizePolicy.Maximum,
                        QSizePolicy.Expanding|QSizePolicy.Maximum))
        layout_conf.addWidget(self._table_conf)
        splitter.addWidget(frame_conf)

        desc = QFrame(self)
        layout_desc = QHBoxLayout(desc)
        self._label_acc = QLabel(self.LABEL_ACC % float('NAN'), desc)
        layout_desc.addWidget(self._label_acc, Qt.AlignLeft)
        self._label_features = QLabel(self.LABEL_FEATURES % (0,0), desc)
        layout_desc.addWidget(self._label_features, Qt.AlignLeft)
        self._label_c = QLabel(self.LABEL_C % float('NAN'), desc)
        layout_desc.addWidget(self._label_c, Qt.AlignLeft)
        self._label_g = QLabel(self.LABEL_G % float('NAN'), desc)
        layout_desc.addWidget(self._label_g, Qt.AlignLeft)
        self.browserBtn = QPushButton('Open browser for annotation', desc)
        layout_desc.addWidget(self.browserBtn)
        layout.addWidget(desc)
        self._has_data = False
        self._learner = None

    @property
    def classifier(self):
        return self._learner

    def clear(self):
        self._table_conf.clear()
        self._table_info.clear()
        self._has_data = False

    def reset(self):
        self._has_data = False
        self._table_conf.clearContents()

    def on_load(self):
        self.load_classifier(check=True)
        self.update_frame()

    def load_classifier(self, check=True, quiet=False):

        clfdir = self._settings('Classification',
                                '%s_classification_envpath' %self._channel)

        if not isdir(clfdir):
            return
        else:
            chid = self._settings('ObjectDetection', '%s_channelid' %self._channel)
            title = self._settings('Classification',
                                   '%s_classification_regionname' %self._channel)
            self._learner = CommonClassPredictor( \
                clf_dir=clfdir,
                name=self._channel,
                channels = {self._channel.title(): title},
                color_channel=chid)

            state = self._learner.state
            if check:
                b = lambda x: 'Yes' if x else 'No'
                msg =  'Classifier path: %s\n' % state['path_env']
                msg += 'Found class definition: %s\n' % b(state['has_definition'])
                msg += 'Found annotations: %s\n' % b(state['has_path_annotations'])
                msg += 'Can you pick new samples? %s\n\n' % b(self._learner.is_annotated)
                msg += 'Found ARFF file: %s\n' % b(state['has_arff'])
                msg += 'Can you train a classifier? %s\n\n' %b(self._learner.is_trained)
                msg += 'Found SVM model: %s\n' % b(state['has_model'])
                msg += 'Found SVM range: %s\n' % b(state['has_range'])
                msg += 'Can you apply the classifier to images? %s\n\n' \
                    %b(self._learner.is_valid)
                msg += 'Found samples: %s\n' % b(state['has_path_samples'])
                msg += ('Sample images are only used for visualization and annotation '
                        ' control at the moment.')

                txt = '%s classifier inspection states' % self._channel
                if not quiet:
                    information(self, txt, info=msg)

            if state['has_arff']:
                self._learner.importFromArff()

            if state['has_definition']:
                self._learner.loadDefinition()

    def update_frame(self):
        """Updates cass & annotation info and confusion matrix in the gui"""

        # update only if possible...
        # if samples were picked/annotated
        try:
            nftr_prev = len(self._learner.feature_names)
        except TypeError:
            pass
        except AttributeError: # ClassDefUnsupervised, nothing to draw
            return
        else:
            removed_features = self._learner.filter_nans(apply=True)
            nftr = nftr_prev - len(removed_features)
            self._label_features.setText(self.LABEL_FEATURES %(nftr, nftr_prev))
            self._label_features.setToolTip(
                "removed %d features containing NA values:\n%s" %
                (len(removed_features), "\n".join(removed_features)))

        # if classifier was trained
        try:
            c, g, conf = self._learner.importConfusion()
        except IOError as e:
            conf = None
            self._init_conf_table(conf)
        else:
            self._set_info(c, g, conf)
            self._init_conf_table(conf)
            self._update_conf_table(conf)
        self._set_info_table(conf)


    def msg_pick_samples(self, parent):
        state = self._learner.state
        text = 'Sample picking is not possible'
        info = 'You need to provide a class definition '\
               'file and annotation files.'
        detail = 'Missing components:\n'
        if not state['has_path_annotations']:
            detail += "- Annotation path '%s' not found.\n" %state['path_annotations']
        if not state['has_definition']:
            detail += "- Class definition file '%s' not found.\n" %state['definition']
        return information(parent, text, info, detail)

    def msg_train_classifier(self, parent):
        state = self._learner.state
        text = 'Classifier training is not possible'
        info = 'You need to pick samples first.'
        detail = 'Missing components:\n'
        if not state['has_arff']:
            detail += "- Feature file '%s' not found.\n" % state['arff']
        return information(parent, text, info, detail)

    def msg_apply_classifier(self, parent):
        state = self._learner.state
        text = 'Classifier model not found'
        info = 'You need to train a classifier first.'
        detail = 'Missing components:\n'
        if not state['has_model']:
            detail += "- SVM model file '%s' not found.\n" % state['model']
        if not state['has_range']:
            detail += "- SVM range file '%s' not found.\n" % state['range']
        return information(parent, text, info, detail)

    def _set_info_table(self, conf):
        rows = len(self._learner.class_labels)
        self._table_info.clear()
        names_horizontal = [('Name', 'class name'),
                            ('Samples', 'class samples'),
                            ('Color', 'class color'),
                            ('%PR', 'class precision in %'),
                            ('%SE', 'class sensitivity in %')]

        names_vertical = [str(k) for k in self._learner.class_names.keys()] + \
            ['', '#']
        self._table_info.setColumnCount(len(names_horizontal))
        self._table_info.setRowCount(len(names_vertical))
        self._table_info.setVerticalHeaderLabels(names_vertical)
        self._table_info.setColumnWidth(1, 20)
        for c, (name, info) in enumerate(names_horizontal):
            item = QTableWidgetItem(name)
            item.setToolTip(info)
            self._table_info.setHorizontalHeaderItem(c, item)

        for r, label in enumerate(self._learner.class_names.keys()):
            self._table_info.setRowHeight(r, 20)
            name = self._learner.class_names[label]
            samples = self._learner.names2samples[name]
            self._table_info.setItem(r, 0, QTableWidgetItem(name))
            self._table_info.setItem(r, 1, QTableWidgetItem(str(samples)))
            item = QTableWidgetItem(' ')
            item.setBackground(QBrush(\
                    QColor(*hex2rgb(self._learner.hexcolors[name]))))
            self._table_info.setItem(r, 2, item)

            if not conf is None and r < len(conf):
                item = QTableWidgetItem('%.1f' % (conf.ppv[r] * 100.))
                item.setToolTip('"%s" precision' %  name)
                self._table_info.setItem(r, 3, item)

                item = QTableWidgetItem('%.1f' % (conf.se[r] * 100.))
                item.setToolTip('"%s" sensitivity' %  name)
                self._table_info.setItem(r, 4, item)

        if not conf is None:
            self._table_info.setRowHeight(r+1, 20)
            r += 2
            self._table_info.setRowHeight(r, 20)
            name = "overal"
            samples = sum(self._learner.names2samples.values())
            self._table_info.setItem(r, 0, QTableWidgetItem(name))
            self._table_info.setItem(r, 1, QTableWidgetItem(str(samples)))
            item = QTableWidgetItem(' ')
            item.setBackground(QBrush(QColor(*hex2rgb('#FFFFFF'))))
            self._table_info.setItem(r, 2, item)

            item = QTableWidgetItem('%.1f' % (conf.wav_ppv * 100.))
            item.setToolTip('%s per class precision' %  name)
            self._table_info.setItem(r, 3, item)

            item = QTableWidgetItem('%.1f' % (conf.wav_se * 100.))
            item.setToolTip('%s per class sensitivity' %  name)
            self._table_info.setItem(r, 4, item)

        self._table_info.resizeColumnsToContents()

    def _init_conf_table(self, conf):
        self._table_conf.clear()

        if not conf is None:
            conf_array = conf.conf
            rows, cols = conf_array.shape
            self._table_conf.setColumnCount(cols)
            self._table_conf.setRowCount(rows)

            for i, label in enumerate(self._learner.class_names):
                h_item = QTableWidgetItem(str(label))
                v_item = QTableWidgetItem(str(label))

                tooltip = '%d : %s' %(label, self._learner.class_names[label])
                v_item.setToolTip(tooltip)
                h_item.setToolTip(tooltip)

                self._table_conf.setHorizontalHeaderItem(i, h_item)
                self._table_conf.setVerticalHeaderItem(i, v_item)

                self._table_conf.setColumnWidth(i, 20)
                self._table_conf.setRowHeight(i, 20)

    def _update_conf_table(self, conf):
        conf_array = conf.conf
        rows, cols = conf_array.shape
        conf_norm = conf_array.swapaxes(0,1) / numpy.array(numpy.sum(conf_array, 1), numpy.float)
        conf_norm = conf_norm.swapaxes(0,1)
        self._table_conf.clearContents()
        for r in range(rows):
            for c in range(cols):
                item = QTableWidgetItem()
                item.setToolTip('%d samples' % conf_array[r,c])
                if not numpy.isnan(conf_norm[r,c]):
                    col = int(255 * (1 - conf_norm[r,c]))
                    item.setBackground(QBrush(QColor(col, col, col)))
                self._table_conf.setItem(r, c, item)

    def _set_info(self, c, g, conf):
        self._label_acc.setText(self.LABEL_ACC % (conf.ac_sample*100.))
        self._label_c.setText(self.LABEL_C % c)
        self._label_g.setText(self.LABEL_G % g)

    def on_conf_result(self, c, g, conf):
        self._set_info(c, g, conf)

        if not self._has_data:
            self._has_data = True
            self._init_conf_table(conf)
        self._set_info_table(conf)
        self._update_conf_table(conf)
Beispiel #13
0
class ClassifierResultFrame(QGroupBox):

    LABEL_FEATURES = '#Features: %d (%d)'
    LABEL_ACC = 'Overall accuracy: %.1f%%'
    LABEL_C = 'Log2(C) = %.1f'
    LABEL_G = 'Log2(g) = %.1f'

    def __init__(self, parent, channel, settings):
        super(ClassifierResultFrame, self).__init__(parent)

        self._channel = channel
        self._settings = settings

        layout = QVBoxLayout(self)
        layout.setContentsMargins(5, 5, 5, 5)

        splitter = QSplitter(Qt.Horizontal, self)
        splitter.setSizePolicy(\
            QSizePolicy(QSizePolicy.Expanding|QSizePolicy.Maximum,
                        QSizePolicy.Expanding|QSizePolicy.Maximum))
        splitter.setStretchFactor(0, 2)
        layout.addWidget(splitter)

        frame_info = QFrame()
        layout_info = QVBoxLayout(frame_info)
        label = QLabel('Class & annotation info', frame_info)
        layout_info.addWidget(label)
        self._table_info = QTableWidget(frame_info)
        self._table_info.setEditTriggers(QTableWidget.NoEditTriggers)
        self._table_info.setSelectionMode(QTableWidget.NoSelection)
        self._table_info.setSizePolicy(\
            QSizePolicy(QSizePolicy.Expanding|QSizePolicy.Maximum,
                        QSizePolicy.Expanding|QSizePolicy.Maximum))
        layout_info.addWidget(self._table_info)
        splitter.addWidget(frame_info)

        frame_conf = QFrame()
        layout_conf = QVBoxLayout(frame_conf)
        label = QLabel('Confusion matrix', frame_conf)
        layout_conf.addWidget(label)
        self._table_conf = QTableWidget(frame_conf)
        self._table_conf.setEditTriggers(QTableWidget.NoEditTriggers)
        self._table_conf.setSelectionMode(QTableWidget.NoSelection)
        self._table_conf.setSizePolicy(\
            QSizePolicy(QSizePolicy.Expanding|QSizePolicy.Maximum,
                        QSizePolicy.Expanding|QSizePolicy.Maximum))
        layout_conf.addWidget(self._table_conf)
        splitter.addWidget(frame_conf)

        desc = QFrame(self)
        layout_desc = QHBoxLayout(desc)
        self._label_acc = QLabel(self.LABEL_ACC % float('NAN'), desc)
        layout_desc.addWidget(self._label_acc, Qt.AlignLeft)
        self._label_features = QLabel(self.LABEL_FEATURES % (0, 0), desc)
        layout_desc.addWidget(self._label_features, Qt.AlignLeft)
        self._label_c = QLabel(self.LABEL_C % float('NAN'), desc)
        layout_desc.addWidget(self._label_c, Qt.AlignLeft)
        self._label_g = QLabel(self.LABEL_G % float('NAN'), desc)
        layout_desc.addWidget(self._label_g, Qt.AlignLeft)
        self.browserBtn = QPushButton('Open browser for annotation', desc)
        layout_desc.addWidget(self.browserBtn)
        layout.addWidget(desc)
        self._has_data = False
        self._learner = None

    @property
    def classifier(self):
        return self._learner

    def clear(self):
        self._table_conf.clear()
        self._table_info.clear()
        self._has_data = False

    def reset(self):
        self._has_data = False
        self._table_conf.clearContents()

    def on_load(self):
        self.load_classifier(check=True)
        self.update_frame()

    def load_classifier(self, check=True, quiet=False):

        clfdir = self._settings('Classification',
                                '%s_classification_envpath' % self._channel)

        if not isdir(clfdir):
            return
        else:
            chid = self._settings('ObjectDetection',
                                  '%s_channelid' % self._channel)
            title = self._settings(
                'Classification',
                '%s_classification_regionname' % self._channel)
            self._learner = CommonClassPredictor( \
                clf_dir=clfdir,
                name=self._channel,
                channels = {self._channel.title(): title},
                color_channel=chid)

            state = self._learner.state
            if check:
                b = lambda x: 'Yes' if x else 'No'
                msg = 'Classifier path: %s\n' % state['path_env']
                msg += 'Found class definition: %s\n' % b(
                    state['has_definition'])
                msg += 'Found annotations: %s\n' % b(
                    state['has_path_annotations'])
                msg += 'Can you pick new samples? %s\n\n' % b(
                    self._learner.is_annotated)
                msg += 'Found ARFF file: %s\n' % b(state['has_arff'])
                msg += 'Can you train a classifier? %s\n\n' % b(
                    self._learner.is_trained)
                msg += 'Found SVM model: %s\n' % b(state['has_model'])
                msg += 'Found SVM range: %s\n' % b(state['has_range'])
                msg += 'Can you apply the classifier to images? %s\n\n' \
                    %b(self._learner.is_valid)
                msg += 'Found samples: %s\n' % b(state['has_path_samples'])
                msg += (
                    'Sample images are only used for visualization and annotation '
                    ' control at the moment.')

                txt = '%s classifier inspection states' % self._channel
                if not quiet:
                    information(self, txt, info=msg)

            if state['has_arff']:
                self._learner.importFromArff()

            if state['has_definition']:
                self._learner.loadDefinition()

    def update_frame(self):
        """Updates cass & annotation info and confusion matrix in the gui"""

        # update only if possible...
        # if samples were picked/annotated
        try:
            nftr_prev = len(self._learner.feature_names)
        except TypeError:
            pass
        except AttributeError:  # ClassDefUnsupervised, nothing to draw
            return
        else:
            removed_features = self._learner.filter_nans(apply=True)
            nftr = nftr_prev - len(removed_features)
            self._label_features.setText(self.LABEL_FEATURES %
                                         (nftr, nftr_prev))
            self._label_features.setToolTip(
                "removed %d features containing NA values:\n%s" %
                (len(removed_features), "\n".join(removed_features)))

        # if classifier was trained
        try:
            c, g, conf = self._learner.importConfusion()
        except IOError as e:
            conf = None
            self._init_conf_table(conf)
        else:
            self._set_info(c, g, conf)
            self._init_conf_table(conf)
            self._update_conf_table(conf)
        self._set_info_table(conf)

    def msg_pick_samples(self, parent):
        state = self._learner.state
        text = 'Sample picking is not possible'
        info = 'You need to provide a class definition '\
               'file and annotation files.'
        detail = 'Missing components:\n'
        if not state['has_path_annotations']:
            detail += "- Annotation path '%s' not found.\n" % state[
                'path_annotations']
        if not state['has_definition']:
            detail += "- Class definition file '%s' not found.\n" % state[
                'definition']
        return information(parent, text, info, detail)

    def msg_train_classifier(self, parent):
        state = self._learner.state
        text = 'Classifier training is not possible'
        info = 'You need to pick samples first.'
        detail = 'Missing components:\n'
        if not state['has_arff']:
            detail += "- Feature file '%s' not found.\n" % state['arff']
        return information(parent, text, info, detail)

    def msg_apply_classifier(self, parent):
        state = self._learner.state
        text = 'Classifier model not found'
        info = 'You need to train a classifier first.'
        detail = 'Missing components:\n'
        if not state['has_model']:
            detail += "- SVM model file '%s' not found.\n" % state['model']
        if not state['has_range']:
            detail += "- SVM range file '%s' not found.\n" % state['range']
        return information(parent, text, info, detail)

    def _set_info_table(self, conf):
        rows = len(self._learner.class_labels)
        self._table_info.clear()
        names_horizontal = [('Name', 'class name'),
                            ('Samples', 'class samples'),
                            ('Color', 'class color'),
                            ('%PR', 'class precision in %'),
                            ('%SE', 'class sensitivity in %')]

        names_vertical = [str(k) for k in self._learner.class_names.keys()] + \
            ['', '#']
        self._table_info.setColumnCount(len(names_horizontal))
        self._table_info.setRowCount(len(names_vertical))
        self._table_info.setVerticalHeaderLabels(names_vertical)
        self._table_info.setColumnWidth(1, 20)
        for c, (name, info) in enumerate(names_horizontal):
            item = QTableWidgetItem(name)
            item.setToolTip(info)
            self._table_info.setHorizontalHeaderItem(c, item)

        for r, label in enumerate(self._learner.class_names.keys()):
            self._table_info.setRowHeight(r, 20)
            name = self._learner.class_names[label]
            samples = self._learner.names2samples[name]
            self._table_info.setItem(r, 0, QTableWidgetItem(name))
            self._table_info.setItem(r, 1, QTableWidgetItem(str(samples)))
            item = QTableWidgetItem(' ')
            item.setBackground(QBrush(\
                    QColor(*hex2rgb(self._learner.hexcolors[name]))))
            self._table_info.setItem(r, 2, item)

            if not conf is None and r < len(conf):
                item = QTableWidgetItem('%.1f' % (conf.ppv[r] * 100.))
                item.setToolTip('"%s" precision' % name)
                self._table_info.setItem(r, 3, item)

                item = QTableWidgetItem('%.1f' % (conf.se[r] * 100.))
                item.setToolTip('"%s" sensitivity' % name)
                self._table_info.setItem(r, 4, item)

        if not conf is None:
            self._table_info.setRowHeight(r + 1, 20)
            r += 2
            self._table_info.setRowHeight(r, 20)
            name = "overal"
            samples = sum(self._learner.names2samples.values())
            self._table_info.setItem(r, 0, QTableWidgetItem(name))
            self._table_info.setItem(r, 1, QTableWidgetItem(str(samples)))
            item = QTableWidgetItem(' ')
            item.setBackground(QBrush(QColor(*hex2rgb('#FFFFFF'))))
            self._table_info.setItem(r, 2, item)

            item = QTableWidgetItem('%.1f' % (conf.wav_ppv * 100.))
            item.setToolTip('%s per class precision' % name)
            self._table_info.setItem(r, 3, item)

            item = QTableWidgetItem('%.1f' % (conf.wav_se * 100.))
            item.setToolTip('%s per class sensitivity' % name)
            self._table_info.setItem(r, 4, item)

        self._table_info.resizeColumnsToContents()

    def _init_conf_table(self, conf):
        self._table_conf.clear()

        if not conf is None:
            conf_array = conf.conf
            rows, cols = conf_array.shape
            self._table_conf.setColumnCount(cols)
            self._table_conf.setRowCount(rows)

            for i, label in enumerate(self._learner.class_names):
                h_item = QTableWidgetItem(str(label))
                v_item = QTableWidgetItem(str(label))

                tooltip = '%d : %s' % (label, self._learner.class_names[label])
                v_item.setToolTip(tooltip)
                h_item.setToolTip(tooltip)

                self._table_conf.setHorizontalHeaderItem(i, h_item)
                self._table_conf.setVerticalHeaderItem(i, v_item)

                self._table_conf.setColumnWidth(i, 20)
                self._table_conf.setRowHeight(i, 20)

    def _update_conf_table(self, conf):
        conf_array = conf.conf
        rows, cols = conf_array.shape
        conf_norm = conf_array.swapaxes(0, 1) / numpy.array(
            numpy.sum(conf_array, 1), numpy.float)
        conf_norm = conf_norm.swapaxes(0, 1)
        self._table_conf.clearContents()
        for r in range(rows):
            for c in range(cols):
                item = QTableWidgetItem()
                item.setToolTip('%d samples' % conf_array[r, c])
                if not numpy.isnan(conf_norm[r, c]):
                    col = int(255 * (1 - conf_norm[r, c]))
                    item.setBackground(QBrush(QColor(col, col, col)))
                self._table_conf.setItem(r, c, item)

    def _set_info(self, c, g, conf):
        self._label_acc.setText(self.LABEL_ACC % (conf.ac_sample * 100.))
        self._label_c.setText(self.LABEL_C % c)
        self._label_g.setText(self.LABEL_G % g)

    def on_conf_result(self, c, g, conf):
        self._set_info(c, g, conf)

        if not self._has_data:
            self._has_data = True
            self._init_conf_table(conf)
        self._set_info_table(conf)
        self._update_conf_table(conf)
Beispiel #14
0
import os
import numpy
import argparse
from cecog.learning.learning import CommonClassPredictor
import time

if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('directory',
                        type=str,
                        help='Directory to the classifier')
    args = parser.parse_args()

    if os.path.isdir(args.directory):
        learner = CommonClassPredictor(args.directory, None, None)
        learner.importFromArff()
        t0 = time.time()
        n, c, g, conf = learner.gridSearch()
        print "Grid search took: ", time.time() - t0
        #c, g, conf = learner.importConfusion()

        numpy.set_printoptions(linewidth=80)
        print "Confusion Matrix:"
        for row in conf.conf:
            print row
    else:
        raise IOError("%s\n is not a valid directory" % args.directory)
    #learner.statsFromConfusion(conf)

# #benchmark
Beispiel #15
0
class ClassifierResultFrame(QGroupBox):

    LABEL_FEATURES = '#Features: %d (%d)'
    LABEL_ACC = 'Overall accuracy: %.1f%%'
    LABEL_C = 'Log2(C) = %.1f'
    LABEL_G = 'Log2(g) = %.1f'

    def __init__(self, parent, channel, settings):
        QGroupBox.__init__(self, parent)

        self._channel = channel
        self._settings = settings

        layout = QVBoxLayout(self)
        layout.setContentsMargins(5, 5, 5, 5)

        #self._button = QPushButton('Load', self)
        #self.connect(self._button, SIGNAL('clicked()'), self._on_load)
        #layout.addWidget(self._button, 1, 2)

        splitter = QSplitter(Qt.Horizontal, self)
        splitter.setSizePolicy(QSizePolicy(QSizePolicy.Expanding|QSizePolicy.Maximum,
                                           QSizePolicy.Expanding|QSizePolicy.Maximum))
        splitter.setStretchFactor(0, 2)
        layout.addWidget(splitter)

        frame_info = QFrame()
        layout_info = QVBoxLayout(frame_info)
        label = QLabel('Class & annotation info', frame_info)
        layout_info.addWidget(label)
        self._table_info = QTableWidget(frame_info)
        self._table_info.setEditTriggers(QTableWidget.NoEditTriggers)
        self._table_info.setSelectionMode(QTableWidget.NoSelection)
        self._table_info.setSizePolicy(QSizePolicy(QSizePolicy.Expanding|QSizePolicy.Maximum,
                                                   QSizePolicy.Expanding|QSizePolicy.Maximum))
        layout_info.addWidget(self._table_info)
        splitter.addWidget(frame_info)


        frame_conf = QFrame()
        layout_conf = QVBoxLayout(frame_conf)
        label = QLabel('Confusion matrix', frame_conf)
        layout_conf.addWidget(label)
        self._table_conf = QTableWidget(frame_conf)
        self._table_conf.setEditTriggers(QTableWidget.NoEditTriggers)
        self._table_conf.setSelectionMode(QTableWidget.NoSelection)
        self._table_conf.setSizePolicy(QSizePolicy(QSizePolicy.Expanding|QSizePolicy.Maximum,
                                                   QSizePolicy.Expanding|QSizePolicy.Maximum))
        layout_conf.addWidget(self._table_conf)
        splitter.addWidget(frame_conf)


        desc = QFrame(self)
        layout_desc = QHBoxLayout(desc)
        self._label_acc = QLabel(self.LABEL_ACC % float('NAN'), desc)
        layout_desc.addWidget(self._label_acc, Qt.AlignLeft)
        self._label_features = QLabel(self.LABEL_FEATURES % (0,0), desc)
        layout_desc.addWidget(self._label_features, Qt.AlignLeft)
        self._label_c = QLabel(self.LABEL_C % float('NAN'), desc)
        layout_desc.addWidget(self._label_c, Qt.AlignLeft)
        self._label_g = QLabel(self.LABEL_G % float('NAN'), desc)
        layout_desc.addWidget(self._label_g, Qt.AlignLeft)
        btn = QPushButton('Show Browser', desc)
        btn.clicked.connect(qApp._main_window._on_browser_open)
        layout_desc.addWidget(btn)
        layout.addWidget(desc)

        self._has_data = False

    def clear(self):
        self._table_conf.clear()
        self._table_info.clear()
        self._has_data = False

    def reset(self):
        self._has_data = False
        self._table_conf.clearContents()

    def on_load(self):
        self.load_classifier(check=True)

    def load_classifier(self, check=True):

        _resolve = lambda x,y: self._settings.get(x, '%s_%s' % (self._channel, y))
        env_path = convert_package_path(_resolve('Classification',
                                                 'classification_envpath'))
        classifier_infos = {'strEnvPath' : env_path,
                            #'strModelPrefix' : _resolve('Classification', 'classification_prefix'),
                            'strChannelId' : _resolve('ObjectDetection', 'channelid'),
                            'strRegionId' : _resolve('Classification', 'classification_regionname'),
                            }
        try:
            self._learner = CommonClassPredictor(dctCollectSamples=classifier_infos)
        except:
            exception(self, 'Error on loading classifier.')
        else:
            result = self._learner.check()
            if check:
                b = lambda x: 'Yes' if x else 'No'
                msg =  'Classifier path: %s\n' % result['path_env']
                msg += 'Found class definition: %s\n' % b(result['has_definition'])
                msg += 'Found annotations: %s\n' % b(result['has_path_annotations'])
                msg += 'Can you pick new samples? %s\n\n' % b(self.is_pick_samples())
                msg += 'Found ARFF file: %s\n' % b(result['has_arff'])
                msg += 'Can you train a classifier? %s\n\n' % b(self.is_train_classifier())
                msg += 'Found SVM model: %s\n' % b(result['has_model'])
                msg += 'Found SVM range: %s\n' % b(result['has_range'])
                msg += 'Can you apply the classifier to images? %s\n\n' % b(self.is_apply_classifier())
                msg += 'Found samples: %s\n' % b(result['has_path_samples'])
                msg += 'Sample images are only used for visualization and annotation control at the moment.'

                txt = '%s classifier inspection results' % self._channel
                information(self, txt, info=msg)

            if result['has_arff']:
                self._learner.importFromArff()
                nr_features_prev = len(self._learner.lstFeatureNames)
                removed_features = self._learner.filterData(apply=False)
                nr_features = nr_features_prev - len(removed_features)
                self._label_features.setText(self.LABEL_FEATURES % (nr_features, nr_features_prev))
                self._label_features.setToolTip("removed %d features containing NA values:\n%s" %
                                                (len(removed_features), "\n".join(removed_features)))

            if result['has_definition']:
                self._learner.loadDefinition()

            if result['has_conf']:
                c, g, conf = self._learner.importConfusion()
                self._set_info(c, g, conf)
                self._init_conf_table(conf)
                self._update_conf_table(conf)
            else:
                conf = None
                self._init_conf_table(conf)
            self._set_info_table(conf)

    def msg_pick_samples(self, parent):
        result = self._learner.check()
        text = 'Sample picking is not possible'
        info = 'You need to provide a class definition '\
               'file and annotation files.'
        detail = 'Missing components:\n'
        if not result['has_path_annotations']:
            detail += "- Annotation path '%s' not found.\n" % result['path_annotations']
        if not result['has_definition']:
            detail += "- Class definition file '%s' not found.\n" % result['definition']
        return information(parent, text, info, detail)

    def is_pick_samples(self):
        result = self._learner.check()
        return result['has_path_annotations'] and result['has_definition']

    def msg_train_classifier(self, parent):
        result = self._learner.check()
        text = 'Classifier training is not possible'
        info = 'You need to pick samples first.'
        detail = 'Missing components:\n'
        if not result['has_arff']:
            detail += "- Feature file '%s' not found.\n" % result['arff']
        return information(parent, text, info, detail)

    def is_train_classifier(self):
        result = self._learner.check()
        return result['has_arff']

    def msg_apply_classifier(self, parent):
        result = self._learner.check()
        text = 'Classifier model not found'
        info = 'You need to train a classifier first.'
        detail = 'Missing components:\n'
        if not result['has_model']:
            detail += "- SVM model file '%s' not found.\n" % result['model']
        if not result['has_range']:
            detail += "- SVM range file '%s' not found.\n" % result['range']
        return information(parent, text, info, detail)

    def is_apply_classifier(self):
        result = self._learner.check()
        return result['has_model'] and result['has_range']

    def _set_info_table(self, conf):
        rows = len(self._learner.lstClassLabels)
        self._table_info.clear()
        names_horizontal = [('Name', 'class name'),
                            ('Samples', 'class samples'),
                            ('Color', 'class color'),
                            ('%PR', 'class precision in %'),
                            ('%SE', 'class sensitivity in %'),
#                            ('AC%', 'class accuracy in %'),
#                            ('SE%', 'class sensitivity in %'),
#                            ('SP%', 'class specificity in %'),
#                            ('PPV%', 'class positive predictive value in %'),
#                            ('NPV%', 'class negative predictive value in %'),
                            ]
        names_vertical = [str(self._learner.nl2l[r]) for r in range(rows)] + ['','#']
        self._table_info.setColumnCount(len(names_horizontal))
        self._table_info.setRowCount(len(names_vertical))
        self._table_info.setVerticalHeaderLabels(names_vertical)
        self._table_info.setColumnWidth(1, 20)
        for c, (name, info) in enumerate(names_horizontal):
            item = QTableWidgetItem(name)
            item.setToolTip(info)
            self._table_info.setHorizontalHeaderItem(c, item)
        r = 0
        for r in range(rows):
            self._table_info.setRowHeight(r, 20)
            label = self._learner.nl2l[r]
            name = self._learner.dctClassNames[label]
            samples = self._learner.names2samples[name]
            self._table_info.setItem(r, 0, QTableWidgetItem(name))
            self._table_info.setItem(r, 1, QTableWidgetItem(str(samples)))
            item = QTableWidgetItem(' ')
            item.setBackground(QBrush(QColor(*hexToRgb(self._learner.dctHexColors[name]))))
            self._table_info.setItem(r, 2, item)

            if not conf is None and r < len(conf):
                item = QTableWidgetItem('%.1f' % (conf.ppv[r] * 100.))
                item.setToolTip('"%s" precision' %  name)
                self._table_info.setItem(r, 3, item)

                item = QTableWidgetItem('%.1f' % (conf.se[r] * 100.))
                item.setToolTip('"%s" sensitivity' %  name)
                self._table_info.setItem(r, 4, item)

#                item = QTableWidgetItem('%.1f' % (conf.ac[r] * 100.))
#                item.setToolTip('"%s" accuracy' %  name)
#                self._table_info.setItem(r, 3, item)
#
#                item = QTableWidgetItem('%.1f' % (conf.se[r] * 100.))
#                item.setToolTip('"%s" sensitivity' %  name)
#                self._table_info.setItem(r, 4, item)
#
#                item = QTableWidgetItem('%.1f' % (conf.sp[r] * 100.))
#                item.setToolTip('"%s" specificity' %  name)
#                self._table_info.setItem(r, 5, item)
#
#                item = QTableWidgetItem('%.1f' % (conf.ppv[r] * 100.))
#                item.setToolTip('"%s" positive predictive value' %  name)
#                self._table_info.setItem(r, 6, item)
#
#                item = QTableWidgetItem('%.1f' % (conf.npv[r] * 100.))
#                item.setToolTip('"%s" negative predictive value' %  name)
#                self._table_info.setItem(r, 7, item)

        if not conf is None:
            self._table_info.setRowHeight(r+1, 20)
            r += 2
            self._table_info.setRowHeight(r, 20)
            name = "overal"
            samples = sum(self._learner.names2samples.values())
            self._table_info.setItem(r, 0, QTableWidgetItem(name))
            self._table_info.setItem(r, 1, QTableWidgetItem(str(samples)))
            item = QTableWidgetItem(' ')
            item.setBackground(QBrush(QColor(*hexToRgb('#FFFFFF'))))
            self._table_info.setItem(r, 2, item)

            item = QTableWidgetItem('%.1f' % (conf.wav_ppv * 100.))
            item.setToolTip('%s per class precision' %  name)
            self._table_info.setItem(r, 3, item)

            item = QTableWidgetItem('%.1f' % (conf.wav_se * 100.))
            item.setToolTip('%s per class sensitivity' %  name)
            self._table_info.setItem(r, 4, item)

#            item = QTableWidgetItem('%.1f' % (conf.av_ac * 100.))
#            item.setToolTip('%s per class accuracy' %  name)
#            self._table_info.setItem(r, 3, item)
#
#            item = QTableWidgetItem('%.1f' % (conf.av_se * 100.))
#            item.setToolTip('%s per class sensitivity' %  name)
#            self._table_info.setItem(r, 4, item)
#
#            item = QTableWidgetItem('%.1f' % (conf.av_sp * 100.))
#            item.setToolTip('%s per class specificity' %  name)
#            self._table_info.setItem(r, 5, item)
#
#            item = QTableWidgetItem('%.1f' % (conf.av_ppv * 100.))
#            item.setToolTip('%s per class positive predictive value' %  name)
#            self._table_info.setItem(r, 6, item)
#
#            item = QTableWidgetItem('%.1f' % (conf.av_npv * 100.))
#            item.setToolTip('%s per class negative predictive value' %  name)
#            self._table_info.setItem(r, 7, item)

        self._table_info.resizeColumnsToContents()

    def _init_conf_table(self, conf):
        self._table_conf.clear()
        if not conf is None:
            conf_array = conf.conf
            rows, cols = conf_array.shape
            self._table_conf.setColumnCount(cols)
            self._table_conf.setRowCount(rows)
            #names2cols = self._learner.dctHexColors
            for c in range(cols):
                self._table_conf.setColumnWidth(c, 20)
                label = self._learner.nl2l[c]
                name = self._learner.dctClassNames[label]
                item = QTableWidgetItem(str(label))
                item.setToolTip('%d : %s' % (label, name))
                #item.setBackground(QBrush(QColor(*hexToRgb(names2cols[name]))))
                self._table_conf.setHorizontalHeaderItem(c, item)
            for r in range(rows):
                self._table_conf.setRowHeight(r, 20)
                label = self._learner.nl2l[r]
                name = self._learner.dctClassNames[label]
                item = QTableWidgetItem(str(label))
                item.setToolTip('%d : %s' % (label, name))
                #item.setForeground(QBrush(QColor(*hexToRgb(names2cols[name]))))
                self._table_conf.setVerticalHeaderItem(r, item)

    def _update_conf_table(self, conf):
        conf_array = conf.conf
        rows, cols = conf_array.shape
        conf_norm = conf_array.swapaxes(0,1) / numpy.array(numpy.sum(conf_array, 1), numpy.float)
        conf_norm = conf_norm.swapaxes(0,1)
        self._table_conf.clearContents()
        for r in range(rows):
            for c in range(cols):
                item = QTableWidgetItem()
                item.setToolTip('%d samples' % conf_array[r,c])
                if not numpy.isnan(conf_norm[r,c]):
                    col = int(255 * (1 - conf_norm[r,c]))
                    item.setBackground(QBrush(QColor(col, col, col)))
                self._table_conf.setItem(r, c, item)

    def _set_info(self, c, g, conf):
        self._label_acc.setText(self.LABEL_ACC % (conf.ac_sample*100.))
        self._label_c.setText(self.LABEL_C % c)
        self._label_g.setText(self.LABEL_G % g)

    def on_conf_result(self, c, g, conf):
        self._set_info(c, g, conf)

        if not self._has_data:
            self._has_data = True
            self._init_conf_table(conf)
        self._set_info_table(conf)
        self._update_conf_table(conf)
Beispiel #16
0
    def load_classifier(self, check=True):
        _resolve = lambda x, y: self._settings.get(
            x, '%s_%s' % (self._channel, y))
        clfdir = CecogEnvironment.convert_package_path(
            _resolve('Classification', 'classification_envpath'))
        # XXX - where does the "." come
        if not isdir(clfdir) or clfdir == ".":
            return
        else:
            self._learner = CommonClassPredictor( \
                clf_dir=clfdir,
                name=self._channel,
                channels={self._channel.title(): _resolve('Classification', 'classification_regionname')},
                color_channel=_resolve('ObjectDetection', 'channelid'))
            result = self._learner.check()

            if check:
                b = lambda x: 'Yes' if x else 'No'
                msg = 'Classifier path: %s\n' % result['path_env']
                msg += 'Found class definition: %s\n' % b(
                    result['has_definition'])
                msg += 'Found annotations: %s\n' % b(
                    result['has_path_annotations'])
                msg += 'Can you pick new samples? %s\n\n' % b(
                    self.is_pick_samples())
                msg += 'Found ARFF file: %s\n' % b(result['has_arff'])
                msg += 'Can you train a classifier? %s\n\n' % b(
                    self.is_train_classifier())
                msg += 'Found SVM model: %s\n' % b(result['has_model'])
                msg += 'Found SVM range: %s\n' % b(result['has_range'])
                msg += 'Can you apply the classifier to images? %s\n\n' % b(
                    self.is_apply_classifier())
                msg += 'Found samples: %s\n' % b(result['has_path_samples'])
                msg += 'Sample images are only used for visualization and annotation control at the moment.'

                txt = '%s classifier inspection results' % self._channel
                information(self, txt, info=msg)

            if result['has_arff']:
                self._learner.importFromArff()
                nr_features_prev = len(self._learner.feature_names)
                removed_features = self._learner.filter_nans(apply=True)
                nr_features = nr_features_prev - len(removed_features)
                self._label_features.setText(self.LABEL_FEATURES %
                                             (nr_features, nr_features_prev))
                self._label_features.setToolTip(
                    "removed %d features containing NA values:\n%s" %
                    (len(removed_features), "\n".join(removed_features)))

            if result['has_definition']:
                self._learner.loadDefinition()

            if result['has_conf']:
                c, g, conf = self._learner.importConfusion()
                self._set_info(c, g, conf)
                self._init_conf_table(conf)
                self._update_conf_table(conf)
            else:
                conf = None
                self._init_conf_table(conf)
            self._set_info_table(conf)
Beispiel #17
0
    def _on_process_start(self, name, start_again=False):
        if not self._is_running or start_again:
            is_valid = True
            self._is_abort = False
            self._has_error = False

            if self._process_items is None:
                cls = self._control_buttons[name]['cls']
                if type(cls) == types.ListType:
                    self._process_items = cls
                    self._current_process_item = 0
                    cls = cls[self._current_process_item]

                    # remove HmmThread if process is not first in list and
                    # not valid error correction was activated
                    if (HmmThread in self._process_items
                            and self._process_items.index(HmmThread) > 0 and
                            not (self._settings.get(
                                'Processing', 'primary_errorcorrection') or
                                 (self._settings.get(
                                     'Processing', 'secondary_errorcorrection')
                                  and self._settings.get(
                                      'Processing',
                                      'secondary_processchannel')))):
                        self._process_items.remove(HmmThread)

                else:
                    self._process_items = None
                    self._current_process_item = 0
            else:
                cls = self._process_items[self._current_process_item]

            if self.SECTION_NAME == 'Classification':
                result_frame = self._get_result_frame(self._tab_name)
                result_frame.load_classifier(check=False)
                learner = result_frame._learner

                if name == self.PROCESS_PICKING:
                    if not result_frame.is_pick_samples():
                        is_valid = False
                        result_frame.msg_pick_samples(self)
                    elif result_frame.is_train_classifier():
                        if not question(
                                self, 'Samples already picked',
                                'Do you want to pick samples again and '
                                'overwrite previous '
                                'results?'):
                            is_valid = False

                elif name == self.PROCESS_TRAINING:
                    if not result_frame.is_train_classifier():
                        is_valid = False
                        result_frame.msg_train_classifier(self)
                    elif result_frame.is_apply_classifier():
                        if not question(
                                self, 'Classifier already trained',
                                'Do you want to train the classifier '
                                'again?'):
                            is_valid = False

                elif name == self.PROCESS_TESTING and not result_frame.is_apply_classifier(
                ):
                    is_valid = False
                    result_frame.msg_apply_classifier(self)

            elif cls is HmmThread:

                success, cmd = HmmThread.test_executable(
                    self._settings.get('ErrorCorrection', 'filename_to_R'))
                if not success:
                    critical(self, 'Error running R',
                             "The R command line program '%s' could not be executed.\n\n"\
                             "Make sure that the R-project is installed.\n\n"\
                             "See README.txt for details." % cmd)
                    is_valid = False

            elif cls is MultiAnalyzerThread:
                ncpu = cpu_count()
                (ncpu, ok) = QInputDialog.getInt(None, "On your machine are %d processers available." % ncpu, \
                                             "Select the number of processors", \
                                              ncpu, 1, ncpu*2)
                if not ok:
                    self._process_items = None
                    is_valid = False

            if is_valid:
                self._current_process = name

                if not start_again:
                    self.parent().main_window.log_window.clear()

                    self._is_running = True
                    self._stage_infos = {}

                    self._toggle_tabs(False)
                    # disable all section button of the main widget
                    self.toggle_tabs.emit(self.get_name())

                    self._set_control_button_text(idx=1)
                    self._toggle_control_buttons()

                imagecontainer = self.parent().main_window._imagecontainer

                if cls is PickerThread:
                    self._current_settings = self._get_modified_settings(
                        name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings,
                                         imagecontainer)
                    self._set_display_renderer_info()
                    self._clear_image()

                elif cls is AnalyzerThread:
                    self._current_settings = self._get_modified_settings(
                        name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings,
                                         imagecontainer)
                    self._set_display_renderer_info()
                    self._clear_image()

                elif cls is TrainingThread:
                    self._current_settings = self._settings.copy()

                    self._analyzer = cls(self, self._current_settings,
                                         result_frame._learner)
                    self._analyzer.setTerminationEnabled(True)

                    self._analyzer.conf_result.connect(
                        result_frame.on_conf_result, Qt.QueuedConnection)
                    result_frame.reset()

                elif cls is MultiAnalyzerThread:
                    self._current_settings = self._get_modified_settings(
                        name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings,
                                         imagecontainer, ncpu)

                    self._set_display_renderer_info()

                elif cls is HmmThread:
                    self._current_settings = self._get_modified_settings(
                        name, imagecontainer.has_timelapse)

                    # FIXME: classifier handling needs revision!!!
                    learner_dict = {}
                    for kind in ['primary', 'secondary']:
                        _resolve = lambda x, y: self._settings.get(
                            x, '%s_%s' % (kind, y))
                        env_path = CecogEnvironment.convert_package_path(
                            _resolve('Classification',
                                     'classification_envpath'))
                        if (os.path.exists(env_path) and
                            (kind == 'primary' or self._settings.get(
                                'Processing', 'secondary_processchannel'))):

                            learner = CommonClassPredictor( \
                                env_path,
                                _resolve('ObjectDetection', 'channelid'),
                                _resolve('Classification', 'classification_regionname'))
                            learner.importFromArff()
                            learner_dict[kind] = learner

                    ### Whee, I like it... "self.parent().main_window._imagecontainer" crazy, crazy, michael... :-)
                    self._analyzer = cls(
                        self, self._current_settings, learner_dict,
                        self.parent().main_window._imagecontainer)
                    self._analyzer.setTerminationEnabled(True)
                    lw = self.parent().main_window.log_window
                    lw.show()

                elif cls is PostProcessingThread:
                    learner_dict = {}
                    for kind in ['primary', 'secondary']:
                        _resolve = lambda x, y: self._settings.get(
                            x, '%s_%s' % (kind, y))
                        env_path = CecogEnvironment.convert_package_path(
                            _resolve('Classification',
                                     'classification_envpath'))
                        if (_resolve('Processing', 'classification') and
                            (kind == 'primary' or self._settings.get(
                                'Processing', 'secondary_processchannel'))):
                            learner = CommonClassPredictor( \
                                env_path,
                                _resolve('ObjectDetection', 'channelid'),
                                _resolve('Classification', 'classification_regionname'))

                            learner.importFromArff()
                            learner_dict[kind] = learner
                    self._current_settings = self._get_modified_settings(
                        name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings,
                                         learner_dict, imagecontainer)
                    self._analyzer.setTerminationEnabled(True)

                self._analyzer.finished.connect(self._on_process_finished)
                self._analyzer.stage_info.connect(self._on_update_stage_info,
                                                  Qt.QueuedConnection)
                self._analyzer.analyzer_error.connect(self._on_error,
                                                      Qt.QueuedConnection)

                self._analyzer.start(QThread.LowestPriority)
                if self._current_process_item == 0:
                    status('Process started...')

        else:
            self._abort_processing()
Beispiel #18
0
    def _on_process_start(self, name, start_again=False):
        if not self._is_running or start_again:
            is_valid = True
            self._is_abort = False
            self._has_error = False

            if self._process_items is None:
                cls = self._control_buttons[name]['cls']
                if type(cls) == types.ListType:
                    self._process_items = cls
                    self._current_process_item = 0
                    cls = cls[self._current_process_item]

                    # remove HmmThread if process is not first in list and
                    # not valid error correction was activated
                    if (HmmThread in self._process_items and
                        self._process_items.index(HmmThread) > 0 and
                        not (self._settings.get('Processing', 'primary_errorcorrection') or
                             (self._settings.get('Processing', 'secondary_errorcorrection') and
                              self._settings.get('Processing', 'secondary_processchannel')))):
                        self._process_items.remove(HmmThread)

                else:
                    self._process_items = None
                    self._current_process_item = 0
            else:
                cls = self._process_items[self._current_process_item]


            if self.SECTION_NAME == 'Classification':
                result_frame = self._get_result_frame(self._tab_name)
                result_frame.load_classifier(check=False)
                learner = result_frame._learner

                if name == self.PROCESS_PICKING:
                    if not result_frame.is_pick_samples():
                        is_valid = False
                        result_frame.msg_pick_samples(self)
                    elif result_frame.is_train_classifier():
                        if not question(self, 'Samples already picked',
                                    'Do you want to pick samples again and '
                                    'overwrite previous '
                                    'results?'):
                            is_valid = False

                elif name == self.PROCESS_TRAINING:
                    if not result_frame.is_train_classifier():
                        is_valid = False
                        result_frame.msg_train_classifier(self)
                    elif result_frame.is_apply_classifier():
                        if not question(self, 'Classifier already trained',
                                    'Do you want to train the classifier '
                                    'again?'):
                            is_valid = False

                elif name == self.PROCESS_TESTING and not result_frame.is_apply_classifier():
                    is_valid = False
                    result_frame.msg_apply_classifier(self)

            elif cls is HmmThread:

                success, cmd = HmmThread.test_executable(self._settings.get('ErrorCorrection', 'filename_to_R'))
                if not success:
                    critical(self, 'Error running R',
                             "The R command line program '%s' could not be executed.\n\n"\
                             "Make sure that the R-project is installed.\n\n"\
                             "See README.txt for details." % cmd)
                    is_valid = False

            elif cls is MultiAnalyzerThread:
                ncpu = cpu_count()
                (ncpu, ok) = QInputDialog.getInt(None, "On your machine are %d processers available." % ncpu, \
                                             "Select the number of processors", \
                                              ncpu, 1, ncpu*2)
                if not ok:
                    self._process_items = None
                    is_valid = False

            if is_valid:
                self._current_process = name

                if not start_again:
                    self.parent().main_window.log_window.clear()

                    self._is_running = True
                    self._stage_infos = {}

                    self._toggle_tabs(False)
                    # disable all section button of the main widget
                    self.toggle_tabs.emit(self.get_name())

                    self._set_control_button_text(idx=1)
                    self._toggle_control_buttons()

                imagecontainer = self.parent().main_window._imagecontainer

                if cls is PickerThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, imagecontainer)
                    self._set_display_renderer_info()
                    self._clear_image()

                elif cls is AnalyzerThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, imagecontainer)
                    self._set_display_renderer_info()
                    self._clear_image()

                elif cls is TrainingThread:
                    self._current_settings = self._settings.copy()

                    self._analyzer = cls(self, self._current_settings, result_frame._learner)
                    self._analyzer.setTerminationEnabled(True)

                    self._analyzer.conf_result.connect(result_frame.on_conf_result,
                                                       Qt.QueuedConnection)
                    result_frame.reset()

                elif cls is MultiAnalyzerThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, imagecontainer, ncpu)

                    self._set_display_renderer_info()

                elif cls is HmmThread:
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)

                      # FIXME: classifier handling needs revision!!!
                    learner_dict = {}
                    for kind in ['primary', 'secondary']:
                        _resolve = lambda x,y: self._settings.get(x, '%s_%s' % (kind, y))
                        env_path = CecogEnvironment.convert_package_path(_resolve('Classification', 'classification_envpath'))
                        if (os.path.exists(env_path)
                              and (kind == 'primary' or self._settings.get('Processing', 'secondary_processchannel'))
                             ):

                            learner = CommonClassPredictor( \
                                env_path,
                                _resolve('ObjectDetection', 'channelid'),
                                _resolve('Classification', 'classification_regionname'))
                            learner.importFromArff()
                            learner_dict[kind] = learner

                    ### Whee, I like it... "self.parent().main_window._imagecontainer" crazy, crazy, michael... :-)
                    self._analyzer = cls(self, self._current_settings,
                                         learner_dict,
                                         self.parent().main_window._imagecontainer)
                    self._analyzer.setTerminationEnabled(True)
                    lw = self.parent().main_window.log_window
                    lw.show()


                elif cls is PostProcessingThread:
                    learner_dict = {}
                    for kind in ['primary', 'secondary']:
                        _resolve = lambda x,y: self._settings.get(x, '%s_%s' % (kind, y))
                        env_path = CecogEnvironment.convert_package_path(_resolve('Classification', 'classification_envpath'))
                        if (_resolve('Processing', 'classification') and
                            (kind == 'primary' or self._settings.get('Processing', 'secondary_processchannel'))):
                            learner = CommonClassPredictor( \
                                env_path,
                                _resolve('ObjectDetection', 'channelid'),
                                _resolve('Classification', 'classification_regionname'))

                            learner.importFromArff()
                            learner_dict[kind] = learner
                    self._current_settings = self._get_modified_settings(name, imagecontainer.has_timelapse)
                    self._analyzer = cls(self, self._current_settings, learner_dict, imagecontainer)
                    self._analyzer.setTerminationEnabled(True)

                self._analyzer.finished.connect(self._on_process_finished)
                self._analyzer.stage_info.connect(self._on_update_stage_info, Qt.QueuedConnection)
                self._analyzer.analyzer_error.connect(self._on_error, Qt.QueuedConnection)

                self._analyzer.start(QThread.LowestPriority)
                if self._current_process_item == 0:
                    status('Process started...')

        else:
            self._abort_processing()