示例#1
0
    def predict_probabilities(self, X):
        logger.debug( "Predicting with parallel vigra RF" )
        X = numpy.asarray(X, dtype=numpy.float32)

        # As each forest completes, aggregate results in a shared array.
        # (Must put in a list so we can update it in this closure.)
        total_predictions = [None]
        prediction_lock = RequestLock()
        def update_predictions(forest, forest_predictions):
            forest_predictions *= forest.treeCount()
            with prediction_lock:
                if total_predictions[0] is None:
                    total_predictions[0] = forest_predictions
                else:
                    total_predictions[0] += forest_predictions

        # Create a request for each forest
        pool = RequestPool()
        for forest in self._forests:
            req = Request( partial( forest.predictProbabilities, X ) )
            req.notify_finished( partial(update_predictions, forest) )
            pool.add( req )
        del req
        pool.wait()

        total_predictions[0] /= self._num_trees
        return total_predictions[0]
    def create_and_train(self, X, y):
        logger.debug( "Training parallel vigra RF" )
        # Save for future reference
        known_labels = numpy.unique(y)

        X = numpy.asarray(X, numpy.float32)
        y = numpy.asarray(y, numpy.uint32)
        if y.ndim == 1:
            y = y[:, numpy.newaxis]

        assert X.ndim == 2
        assert len(X) == len(y)

        # Create N forests
        forests = []
        for _ in range(self._num_forests):
            forest = vigra.learning.RandomForest(self._trees_per_forest, **self._kwargs)
            forests.append( forest )

        # Train them all in parallel
        oobs = [None] * len(forests)
        pool = RequestPool()
        for i, forest in enumerate(forests):
            req = Request( partial(forest.learnRF, X, y) )
            # save the oobs
            req.notify_finished( partial( oobs.__setitem__, i ) )
            pool.add( req )
        pool.wait()

        return ParallelVigraRfLazyflowClassifier( forests, oobs, known_labels )
示例#3
0
    def predict_probabilities(self, X):
        logger.debug( "Predicting with parallel vigra RF" )
        X = numpy.asarray(X, dtype=numpy.float32)
        assert X.ndim == 2

        if self._feature_names is not None:
            # For some reason, vigra doesn't seem to check this for us...
            assert X.shape[1] == len(self._feature_names), \
                "Feature count ({}) doesn't match the training feature count ({}).\n"\
                "Expected features: {}".format( X.shape[1], len(self._feature_names), self._feature_names )

        # As each forest completes, aggregate results in a shared array.
        # (Must put in a list so we can update it in this closure.)
        total_predictions = [None]
        prediction_lock = RequestLock()
        def update_predictions(forest, forest_predictions):
            forest_predictions *= forest.treeCount()
            with prediction_lock:
                if total_predictions[0] is None:
                    total_predictions[0] = forest_predictions
                else:
                    total_predictions[0] += forest_predictions

        # Create a request for each forest
        pool = RequestPool()
        for forest in self._forests:
            req = Request( partial( forest.predictProbabilities, X ) )
            req.notify_finished( partial(update_predictions, forest) )
            pool.add( req )
        del req
        pool.wait()

        total_predictions[0] /= self._num_trees
        return total_predictions[0]
示例#4
0
    def _triggerTableUpdate(self):
        # Check that object area is included in selected features
        featureNames = self.topLevelOperatorView.SelectedFeatures.value

        if 'Standard Object Features' not in featureNames or 'Count' not in featureNames[
                'Standard Object Features']:
            box = QMessageBox(
                QMessageBox.Warning, 'Warning',
                'Object area is not a selected feature. Please select this feature on: \"Standard Object Features > Shape > Size in pixels\"',
                QMessageBox.NoButton, self)
            box.show()
            return

        # Clear table
        self.table.clearContents()
        self.table.setRowCount(0)
        self.table.setSortingEnabled(False)
        self.progressBar.show()
        self.computeButton.setEnabled(False)

        # Compute object features and number of labels per frame
        def compute_features():
            features = self.topLevelOperatorView.ObjectFeatures([]).wait()
            labels = self.topLevelOperatorView.LabelInputs([]).wait()
            return features, labels

        req = Request(compute_features)
        req.notify_finished(self._populateTable)
        req.submit()
示例#5
0
    def run_export(self):
        role_names = self.parentApplet.dataSelectionApplet.topLevelOperator.DatasetRoles.value

        # Prepare file lists in an OrderedDict
        role_path_dict = OrderedDict()
        role_path_dict[0] = BatchProcessingGui.get_all_item_strings(
            self.list_widgets[0])
        num_datasets = len(role_path_dict[0])

        for role_index, list_widget in enumerate(self.list_widgets[1:],
                                                 start=1):
            role_path_dict[
                role_index] = BatchProcessingGui.get_all_item_strings(
                    self.list_widgets[role_index])
            assert len(role_path_dict[role_index]) <= num_datasets, \
                "Too many files given for role: '{}'".format( role_names[role_index] )
            if len(role_path_dict[role_index]) < num_datasets:
                role_path_dict[role_index] += [None] * (
                    num_datasets - len(role_path_dict[role_index]))

        # Run the export in a separate thread
        export_req = Request(
            partial(self.parentApplet.run_export, role_path_dict))
        export_req.notify_failed(self.handle_batch_processing_failure)
        export_req.notify_finished(self.handle_batch_processing_finished)
        export_req.notify_cancelled(self.handle_batch_processing_cancelled)
        self.export_req = export_req

        self.parentApplet.busy = True
        self.parentApplet.appletStateUpdateRequested.emit()
        self.cancel_button.setVisible(True)
        self.run_button.setEnabled(False)

        # Start the export
        export_req.submit()
示例#6
0
    def run_export(self):
        role_names = self.parentApplet.dataSelectionApplet.role_names

        # Prepare file lists in an OrderedDict
        role_inputs = {
            role_name: self._data_role_widgets[role_name].filepaths
            for role_name in role_names
        }
        if all(len(role_inp) == 0 for role_inp in role_inputs.values()):
            return

        # Run the export in a separate thread
        lane_configs = self.parentApplet.dataSelectionApplet.create_lane_configs(
            role_inputs=role_inputs)

        export_req = Request(
            partial(self.parentApplet.run_export, lane_configs=lane_configs))
        export_req.notify_failed(self.handle_batch_processing_failure)
        export_req.notify_finished(self.handle_batch_processing_finished)
        export_req.notify_cancelled(self.handle_batch_processing_cancelled)
        self.export_req = export_req

        self.parentApplet.busy = True
        self.parentApplet.appletStateUpdateRequested()
        self.cancel_button.setVisible(True)
        self.run_button.setEnabled(False)

        # Start the export
        export_req.submit()
示例#7
0
    def export_object_data(self, settings, selected_features, gui=None):
        """
        Initialize progress displays and start the actual export in a new thread using the lazyflow.request framework
        :param settings: the settings from the GUI export dialog
        :type settings: dict
        :param selected_features: the features to export from the GUI dialog
        :type selected_features: list
        :param gui: the Progress bar and callbacks for finish/fail/cancel see ExportingGui.show_export_dialog
        :type gui: dict
        """
        self.save_export_progress_dialog(None)
        if gui is None or "dialog" not in gui:
            progress_display = ProgressPrinter("Export Progress", xrange(100, -1, -5), 2)
        else:
            progress_display = gui["dialog"]
            self.save_export_progress_dialog(progress_display)

        export = partial(self.do_export, settings, selected_features, progress_display)  # ();return
        request = Request(export)
        request.notify_failed(gui["fail"] if gui is not None and "fail" in gui else self.export_failed)
        request.notify_failed(self.export_failed)
        request.notify_finished(gui["ok"] if gui is not None and "ok" in gui else self.export_finished)
        request.notify_cancelled(gui["cancel"] if gui is not None and "cancel" in gui else self.export_cancelled)
        request.submit()

        if gui is not None and "dialog" in gui:
            progress_display.cancel.connect(request.cancel)
示例#8
0
    def run_export(self):
        role_names = self.parentApplet.dataSelectionApplet.topLevelOperator.DatasetRoles.value

        # Prepare file lists in an OrderedDict
        role_path_dict = OrderedDict()
        role_path_dict[0] = BatchProcessingGui.get_all_item_strings(self.list_widgets[0])
        num_datasets = len(role_path_dict[0])

        for role_index, list_widget in enumerate(self.list_widgets[1:], start=1):
            role_path_dict[role_index] = BatchProcessingGui.get_all_item_strings(self.list_widgets[role_index])
            assert len(role_path_dict[role_index]) <= num_datasets, \
                "Too many files given for role: '{}'".format( role_names[role_index] )
            if len(role_path_dict[role_index]) < num_datasets:
                role_path_dict[role_index] += [None] * (num_datasets-len(role_path_dict[role_index]))

        # Run the export in a separate thread
        export_req = Request(partial(self.parentApplet.run_export, role_path_dict))
        export_req.notify_failed(self.handle_batch_processing_failure)
        export_req.notify_finished(self.handle_batch_processing_finished)
        export_req.notify_cancelled(self.handle_batch_processing_cancelled)
        self.export_req = export_req

        self.parentApplet.busy = True
        self.parentApplet.appletStateUpdateRequested()
        self.cancel_button.setVisible(True)
        self.run_button.setEnabled(False)

        # Start the export        
        export_req.submit()
    def predict_probabilities(self, X):
        logger.debug("Predicting with parallel vigra RF")
        X = numpy.asarray(X, dtype=numpy.float32)
        assert X.ndim == 2

        if self._feature_names is not None:
            # For some reason, vigra doesn't seem to check this for us...
            assert X.shape[1] == len(self._feature_names), \
                "Feature count doesn't match the training data."

        # As each forest completes, aggregate results in a shared array.
        # (Must put in a list so we can update it in this closure.)
        total_predictions = [None]
        prediction_lock = RequestLock()

        def update_predictions(forest, forest_predictions):
            forest_predictions *= forest.treeCount()
            with prediction_lock:
                if total_predictions[0] is None:
                    total_predictions[0] = forest_predictions
                else:
                    total_predictions[0] += forest_predictions

        # Create a request for each forest
        pool = RequestPool()
        for forest in self._forests:
            req = Request(partial(forest.predictProbabilities, X))
            req.notify_finished(partial(update_predictions, forest))
            pool.add(req)
        del req
        pool.wait()

        total_predictions[0] /= self._num_trees
        return total_predictions[0]
示例#10
0
    def _triggerTableUpdate(self):
        # Check that object area is included in selected features
        featureNames = self.topLevelOperatorView.SelectedFeatures.value

        if 'Standard Object Features' not in featureNames or 'Count' not in featureNames[
                'Standard Object Features']:
            box = QMessageBox(
                QMessageBox.Warning, 'Warning',
                'Object area is not a selected feature. Please select this feature on: \"Standard Object Features > Shape > Size in pixels\"',
                QMessageBox.NoButton, self)
            box.show()
            return

        # Clear table
        self.table.clearContents()
        self.table.setRowCount(0)
        self.table.setSortingEnabled(False)
        self.progressBar.show()
        self.computeButton.setEnabled(False)

        def compute_features_for_frame(tIndex, t, features):
            # Compute features and labels (called in parallel from request pool)
            roi = [
                slice(None) for i in range(
                    len(self.topLevelOperatorView.LabelImages.meta.shape))
            ]
            roi[tIndex] = slice(t, t + 1)
            roi = tuple(roi)

            frame = self.topLevelOperatorView.SegmentationImages(roi).wait()
            frame = frame.squeeze().astype(numpy.uint32, copy=False)

            # Dirty trick: We don't care what we're passing here for the 'image' parameter,
            # but vigra insists that we pass *something*, so we'll cast the label image as float32.
            features[t] = vigra.analysis.extractRegionFeatures(
                frame.view(numpy.float32), frame, ['Count'], ignoreLabel=0)

        tIndex = self.topLevelOperatorView.SegmentationImages.meta.axistags.index(
            't')
        tMax = self.topLevelOperatorView.SegmentationImages.meta.shape[tIndex]

        features = {}
        labels = {}

        def compute_all_features():
            # Compute features in parallel
            pool = RequestPool()
            for t in range(tMax):
                pool.add(
                    Request(
                        partial(compute_features_for_frame, tIndex, t,
                                features)))
            pool.wait()

        # Compute labels
        labels = self.topLevelOperatorView.LabelInputs([]).wait()

        req = Request(compute_all_features)
        req.notify_finished(partial(self._populateTable, features, labels))
        req.submit()
示例#11
0
    def predict_probabilities(self, X):
        logger.debug( "Predicting with parallel vigra RF" )
        X = numpy.asarray(X, dtype=numpy.float32)

        # As each forest completes, aggregate results in a shared array.
        # (Must put in a list so we can update it in this closure.)
        total_predictions = [None]
        prediction_lock = RequestLock()
        def update_predictions(forest, forest_predictions):
            forest_predictions *= forest.treeCount()
            with prediction_lock:
                if total_predictions[0] is None:
                    total_predictions[0] = forest_predictions
                else:
                    total_predictions[0] += forest_predictions

        # Create a request for each forest
        pool = RequestPool()
        for forest in self._forests:
            req = Request( partial( forest.predictProbabilities, X ) )
            req.notify_finished( partial(update_predictions, forest) )
            pool.add( req )
        del req
        pool.wait()

        total_predictions[0] /= self._num_trees
        return total_predictions[0]
示例#12
0
    def addStack(self, roleIndex, laneIndex):
        """
        The user clicked the "Import Stack Files" button.
        """
        stackDlg = StackFileSelectionWidget(self)
        stackDlg.exec_()
        if stackDlg.result() != QDialog.Accepted :
            return
        files = stackDlg.selectedFiles
        sequence_axis = stackDlg.sequence_axis
        if len(files) == 0:
            return

        info = DatasetInfo()
        info.filePath = os.path.pathsep.join( files )
        prefix = os.path.commonprefix(files)
        info.nickname = PathComponents(prefix).filenameBase
        # Add an underscore for each wildcard digit
        num_wildcards = len(files[-1]) - len(prefix) - len( os.path.splitext(files[-1])[1] )
        info.nickname += "_"*num_wildcards

        # Allow labels by default if this gui isn't being used for batch data.
        info.allowLabels = ( self.guiMode == GuiMode.Normal )
        info.fromstack = True

        originalNumLanes = len(self.topLevelOperator.DatasetGroup)

        if laneIndex is None or laneIndex == -1:
            laneIndex = len(self.topLevelOperator.DatasetGroup)
        if len(self.topLevelOperator.DatasetGroup) < laneIndex+1:
            self.topLevelOperator.DatasetGroup.resize(laneIndex+1)

        def importStack():
            self.parentApplet.busy = True
            self.parentApplet.appletStateUpdateRequested.emit()

            # Serializer will update the operator for us, which will propagate to the GUI.
            try:
                self.serializer.importStackAsLocalDataset( info, sequence_axis )
                try:
                    self.topLevelOperator.DatasetGroup[laneIndex][roleIndex].setValue(info)
                except DatasetConstraintError as ex:
                    # Give the user a chance to repair the problem.
                    filename = files[0] + "\n...\n" + files[-1]
                    return_val = [False]
                    self.handleDatasetConstraintError( info, filename, ex, roleIndex, laneIndex, return_val )
                    if not return_val[0]:
                        # Not successfully repaired.  Roll back the changes and give up.
                        self.topLevelOperator.DatasetGroup.resize(originalNumLanes)
            finally:
                self.parentApplet.busy = False
                self.parentApplet.appletStateUpdateRequested.emit()

        req = Request( importStack )
        req.notify_finished( lambda result: self.showDataset(laneIndex, roleIndex) )
        req.notify_failed( partial(self.handleFailedStackLoad, files, originalNumLanes ) )
        req.submit()
示例#13
0
    def addStack(self, roleIndex, laneIndex):
        """
        The user clicked the "Import Stack Files" button.
        """
        stackDlg = StackFileSelectionWidget(self)
        stackDlg.exec_()
        if stackDlg.result() != QDialog.Accepted :
            return
        files = stackDlg.selectedFiles
        sequence_axis = stackDlg.sequence_axis
        if len(files) == 0:
            return

        info = DatasetInfo()
        info.filePath = os.path.pathsep.join( files )
        prefix = os.path.commonprefix(files)
        info.nickname = PathComponents(prefix).filenameBase
        # Add an underscore for each wildcard digit
        num_wildcards = len(files[-1]) - len(prefix) - len( os.path.splitext(files[-1])[1] )
        info.nickname += "_"*num_wildcards

        # Allow labels by default if this gui isn't being used for batch data.
        info.allowLabels = ( self.guiMode == GuiMode.Normal )
        info.fromstack = True

        originalNumLanes = len(self.topLevelOperator.DatasetGroup)

        if laneIndex is None or laneIndex == -1:
            laneIndex = len(self.topLevelOperator.DatasetGroup)
        if len(self.topLevelOperator.DatasetGroup) < laneIndex+1:
            self.topLevelOperator.DatasetGroup.resize(laneIndex+1)

        def importStack():
            self.parentApplet.busy = True
            self.parentApplet.appletStateUpdateRequested.emit()

            # Serializer will update the operator for us, which will propagate to the GUI.
            try:
                self.serializer.importStackAsLocalDataset( info, sequence_axis )
                try:
                    self.topLevelOperator.DatasetGroup[laneIndex][roleIndex].setValue(info)
                except DatasetConstraintError as ex:
                    # Give the user a chance to repair the problem.
                    filename = files[0] + "\n...\n" + files[-1]
                    return_val = [False]
                    self.handleDatasetConstraintError( info, filename, ex, roleIndex, laneIndex, return_val )
                    if not return_val[0]:
                        # Not successfully repaired.  Roll back the changes and give up.
                        self.topLevelOperator.DatasetGroup.resize(originalNumLanes)
            finally:
                self.parentApplet.busy = False
                self.parentApplet.appletStateUpdateRequested.emit()

        req = Request( importStack )
        req.notify_finished( lambda result: self.showDataset(laneIndex, roleIndex) )
        req.notify_failed( partial(self.handleFailedStackLoad, files, originalNumLanes ) )
        req.submit()
示例#14
0
    def addStack(self, roleIndex, laneIndex):
        """
        The user clicked the "Import Stack Files" button.
        """
        stackDlg = StackFileSelectionWidget(self)
        stackDlg.exec_()
        if stackDlg.result() != QDialog.Accepted:
            return
        files = stackDlg.selectedFiles
        sequence_axis = stackDlg.sequence_axis
        if len(files) == 0:
            return

        cwd = self.topLevelOperator.WorkingDirectory.value
        info = DatasetInfo(os.path.pathsep.join(files), cwd=cwd)

        originalNumLanes = len(self.topLevelOperator.DatasetGroup)

        if laneIndex is None or laneIndex == -1:
            laneIndex = len(self.topLevelOperator.DatasetGroup)
        if len(self.topLevelOperator.DatasetGroup) < laneIndex + 1:
            self.topLevelOperator.DatasetGroup.resize(laneIndex + 1)

        def importStack():
            self.parentApplet.busy = True
            self.parentApplet.appletStateUpdateRequested()

            # Serializer will update the operator for us, which will propagate to the GUI.
            try:
                self.serializer.importStackAsLocalDataset(info, sequence_axis)
                try:
                    self.topLevelOperator.DatasetGroup[laneIndex][
                        roleIndex].setValue(info)
                except DatasetConstraintError as ex:
                    # Give the user a chance to repair the problem.
                    filename = files[0] + "\n...\n" + files[-1]
                    return_val = [False]
                    self.parentApplet.busy = False  # required for possible fixing dialogs from DatasetConstraintError
                    self.parentApplet.appletStateUpdateRequested()
                    self.handleDatasetConstraintError(info, filename, ex,
                                                      roleIndex, laneIndex,
                                                      return_val)
                    if not return_val[0]:
                        # Not successfully repaired.  Roll back the changes and give up.
                        self.topLevelOperator.DatasetGroup.resize(
                            originalNumLanes)
            finally:
                self.parentApplet.busy = False
                self.parentApplet.appletStateUpdateRequested()

        req = Request(importStack)
        req.notify_finished(
            lambda result: self.showDataset(laneIndex, roleIndex))
        req.notify_failed(
            partial(self.handleFailedStackLoad, files, originalNumLanes))
        req.submit()
    def _triggerTableUpdate(self):
        # Check that object area is included in selected features
        featureNames = self.topLevelOperatorView.SelectedFeatures.value
        
        if 'Standard Object Features' not in featureNames or 'Count' not in featureNames['Standard Object Features']:
            box = QMessageBox(QMessageBox.Warning,
                  'Warning',
                  'Object area is not a selected feature. Please select this feature on: \"Standard Object Features > Shape > Size in pixels\"',
                  QMessageBox.NoButton,
                  self)
            box.show()
            return 
        
        # Clear table
        self.table.clearContents()
        self.table.setRowCount(0)
        self.table.setSortingEnabled(False)
        self.progressBar.show()
        self.computeButton.setEnabled(False)

        def compute_features_for_frame(tIndex, t, features): 
            # Compute features and labels (called in parallel from request pool)
            roi = [slice(None) for i in range(len(self.topLevelOperatorView.LabelImages.meta.shape))]
            roi[tIndex] = slice(t, t+1)
            roi = tuple(roi)

            frame = self.topLevelOperatorView.SegmentationImages(roi).wait()           
            frame = frame.squeeze().astype(numpy.uint32, copy=False)
            
            # Dirty trick: We don't care what we're passing here for the 'image' parameter,
            # but vigra insists that we pass *something*, so we'll cast the label image as float32.
            features[t] = vigra.analysis.extractRegionFeatures(frame.view(numpy.float32),
                                                               frame,
                                                               ['Count'],
                                                               ignoreLabel=0)
            
        tIndex = self.topLevelOperatorView.SegmentationImages.meta.axistags.index('t')
        tMax = self.topLevelOperatorView.SegmentationImages.meta.shape[tIndex]     
        
        features = {}
        labels = {}

        def compute_all_features():
            # Compute features in parallel
            pool = RequestPool()
            for t in range(tMax):
                pool.add( Request( partial(compute_features_for_frame, tIndex, t, features) ) )
            pool.wait()
            
        # Compute labels
        labels = self.topLevelOperatorView.LabelInputs([]).wait()
            
        req = Request(compute_all_features)
        req.notify_finished( partial(self._populateTable, features, labels) )
        req.submit()
示例#16
0
    def exportFinalSegmentation(self,
                                outputPath,
                                axisorder,
                                progressCallback=None):
        assert self.FinalSegmentation.ready(
        ), "Can't export yet: The final segmentation isn't ready!"

        logger.info("Starting Final Segmentation Export...")

        opTranspose = OpReorderAxes(parent=self)
        opTranspose.AxisOrder.setValue(axisorder)
        opTranspose.Input.connect(self.FinalSegmentation)

        f = h5py.File(outputPath, 'w')
        opExporter = OpH5WriterBigDataset(parent=self)
        opExporter.hdf5File.setValue(f)
        opExporter.hdf5Path.setValue('split_result')
        opExporter.Image.connect(opTranspose.Output)
        if progressCallback is not None:
            opExporter.progressSignal.subscribe(progressCallback)

        req = Request(partial(self._runExporter, opExporter))

        def cleanOps():
            opExporter.cleanUp()
            opTranspose.cleanUp()

        def handleFailed(exc, exc_info):
            cleanOps()
            f.close()
            import traceback
            traceback.print_tb(exc_info[2])
            msg = "Final Segmentation export FAILED due to the following error:\n{}".format(
                exc)
            logger.error(msg)

        def handleFinished(result):
            try:
                cleanOps()
                logger.info("FINISHED Final Segmentation Export")
            finally:
                f.close()

        def handleCancelled():
            cleanOps()
            f.close()
            logger.info("Final Segmentation export was cancelled!")

        req.notify_failed(handleFailed)
        req.notify_finished(handleFinished)
        req.notify_cancelled(handleCancelled)

        req.submit()
        return req  # Returned in case the user wants to cancel it.
    def _train_forests_with_feature_importance(forests,
                                               X,
                                               y,
                                               feature_names,
                                               export_path=None):
        """
        Train all RFs (in parallel) and compute feature importances while doing so.
        The importances table will be logged as INFO, and also exported to a file if export_path is given.

        Returns: oobs and importances
        """
        oobs = [None] * len(forests)
        importances = [None] * len(forests)

        def store_training_results(i, training_results):
            oob, importance_results = training_results
            oobs[i] = oob
            importances[i] = importance_results

        with Timer() as train_timer:
            pool = RequestPool()
            for i, forest in enumerate(forests):
                req = Request(partial(forest.learnRFWithFeatureSelection, X,
                                      y))
                # save the training results
                req.notify_finished(partial(store_training_results, i))
                pool.add(req)
            pool.wait()

        logger.info("Training took, {} seconds".format(train_timer.seconds()))

        # Forests may have different numbers of trees,
        # so take a weighted average of their importances
        tree_counts = [f.treeCount() for f in forests]
        weights = numpy.array(tree_counts).astype(float)
        weights /= weights.sum()

        named_importances = collections.OrderedDict(
            list(
                zip(feature_names,
                    numpy.average(importances, weights=weights, axis=0))))

        importance_table = generate_importance_table(named_importances,
                                                     sort="overall",
                                                     export_path=export_path)

        logger.info(
            "Feature importance measurements during training: \n{}".format(
                importance_table))

        return oobs, named_importances
示例#18
0
    def addStack(self, roleIndex, laneIndex):
        """
        The user clicked the "Import Stack Files" button.
        """
        stackDlg = StackFileSelectionWidget(self)
        stackDlg.exec_()
        if stackDlg.result() != QDialog.Accepted :
            return
        files = stackDlg.selectedFiles
        sequence_axis = stackDlg.sequence_axis
        if len(files) == 0:
            return

        cwd = self.topLevelOperator.WorkingDirectory.value
        info = DatasetInfo(os.path.pathsep.join(files), cwd=cwd)

        originalNumLanes = len(self.topLevelOperator.DatasetGroup)

        if laneIndex is None or laneIndex == -1:
            laneIndex = len(self.topLevelOperator.DatasetGroup)
        if len(self.topLevelOperator.DatasetGroup) < laneIndex+1:
            self.topLevelOperator.DatasetGroup.resize(laneIndex+1)

        def importStack():
            self.parentApplet.busy = True
            self.parentApplet.appletStateUpdateRequested()

            # Serializer will update the operator for us, which will propagate to the GUI.
            try:
                self.serializer.importStackAsLocalDataset( info, sequence_axis )
                try:
                    self.topLevelOperator.DatasetGroup[laneIndex][roleIndex].setValue(info)
                except DatasetConstraintError as ex:
                    # Give the user a chance to repair the problem.
                    filename = files[0] + "\n...\n" + files[-1]
                    return_val = [False]
                    self.parentApplet.busy = False  # required for possible fixing dialogs from DatasetConstraintError
                    self.parentApplet.appletStateUpdateRequested()
                    self.handleDatasetConstraintError( info, filename, ex, roleIndex, laneIndex, return_val )
                    if not return_val[0]:
                        # Not successfully repaired.  Roll back the changes and give up.
                        self.topLevelOperator.DatasetGroup.resize(originalNumLanes)
            finally:
                self.parentApplet.busy = False
                self.parentApplet.appletStateUpdateRequested()

        req = Request( importStack )
        req.notify_finished( lambda result: self.showDataset(laneIndex, roleIndex) )
        req.notify_failed( partial(self.handleFailedStackLoad, files, originalNumLanes ) )
        req.submit()
    def exportFinalSegmentation(self, outputPath, axisorder, progressCallback=None):
        assert self.FinalSegmentation.ready(), "Can't export yet: The final segmentation isn't ready!"

        logger.info("Starting Final Segmentation Export...")
        
        opTranspose = OpReorderAxes( parent=self )
        opTranspose.AxisOrder.setValue( axisorder )
        opTranspose.Input.connect( self.FinalSegmentation )
        
        f = h5py.File(outputPath, 'w')
        opExporter = OpH5WriterBigDataset(parent=self)
        opExporter.hdf5File.setValue( f )
        opExporter.hdf5Path.setValue( 'split_result' )
        opExporter.Image.connect( opTranspose.Output )
        if progressCallback is not None:
            opExporter.progressSignal.subscribe( progressCallback )
        
        req = Request( partial(self._runExporter, opExporter) )

        def cleanOps():
            opExporter.cleanUp()
            opTranspose.cleanUp()
        
        def handleFailed( exc, exc_info ):
            cleanOps()        
            f.close()
            import traceback
            traceback.print_tb(exc_info[2])
            msg = "Final Segmentation export FAILED due to the following error:\n{}".format( exc )
            logger.error( msg )

        def handleFinished( result ):
            try:
                cleanOps()
                logger.info("FINISHED Final Segmentation Export")
            finally:
                f.close()

        def handleCancelled():
            cleanOps()
            f.close()
            logger.info( "Final Segmentation export was cancelled!" )

        req.notify_failed( handleFailed )
        req.notify_finished( handleFinished )
        req.notify_cancelled( handleCancelled )
        
        req.submit()
        return req # Returned in case the user wants to cancel it.
示例#20
0
    def _train_forests(forests, X, y):
        """
        Train all RFs (in parallel), and return the oobs.
        """
        oobs = [None] * len(forests)
        def store_oob_results(i, oob):
            oobs[i] = oob

        with Timer() as train_timer:
            pool = RequestPool()
            for i, forest in enumerate(forests):
                req = Request( partial(forest.learnRF, X, y) )
                # save the oob results
                req.notify_finished( partial( store_oob_results, i ) )
                pool.add( req )
            pool.wait()          
        logger.info("Training took, {} seconds".format( train_timer.seconds() ) )
        return oobs
示例#21
0
    def run(self, opExport):
        """
        Start the export and return immediately (after showing the progress dialog).
        
        :param opExport: The export object to execute.
                         It must have a 'run_export()' method and a 'progressSignal' member.
        """
        progressDlg = MultiStepProgressDialog(parent=self.parent())
        progressDlg.setNumberOfSteps(1)

        def _forwardProgressToGui(progress):
            self._forwardingSignal.emit(
                partial(progressDlg.setStepProgress, progress))

        opExport.progressSignal.subscribe(_forwardProgressToGui)

        def _onFinishExport(*args):  # Also called on cancel
            self._forwardingSignal.emit(progressDlg.finishStep)

        def _onFail(exc, exc_info):
            import traceback
            traceback.print_tb(exc_info[2])
            msg = "Failed to export layer due to the following error:\n{}".format(
                exc)
            self._forwardingSignal.emit(
                partial(QMessageBox.critical, self.parent(), "Export Failed",
                        msg))
            self._forwardingSignal.emit(progressDlg.setFailed)

        # Use a request to execute in the background
        req = Request(opExport.run_export)
        req.notify_cancelled(_onFinishExport)
        req.notify_finished(_onFinishExport)
        req.notify_failed(_onFail)

        # Allow cancel.
        progressDlg.rejected.connect(req.cancel)

        # Start the export
        req.submit()

        # Execute the progress dialog
        # (We can block the thread here because the QDialog spins up its own event loop.)
        progressDlg.exec_()
示例#22
0
    def create_and_train(self, X, y, feature_names=None):
        # Distribute trees as evenly as possible
        tree_counts = numpy.array([self._num_trees // self._num_forests] *
                                  self._num_forests)
        tree_counts[:self._num_trees % self._num_forests] += 1
        assert tree_counts.sum() == self._num_trees
        tree_counts = map(int, tree_counts)
        tree_counts[:] = (tree_count for tree_count in tree_counts
                          if tree_count != 0)

        logger.debug("Training parallel vigra RF")
        # Save for future reference
        known_labels = numpy.unique(y)

        X = numpy.asarray(X, numpy.float32)
        y = numpy.asarray(y, numpy.uint32)
        if y.ndim == 1:
            y = y[:, numpy.newaxis]

        assert X.ndim == 2
        assert len(X) == len(y)

        # Create N forests
        forests = []
        for tree_count in tree_counts:
            forests.append(
                vigra.learning.RandomForest(tree_count, **self._kwargs))

        # Train them all in parallel
        oobs = [None] * len(forests)
        pool = RequestPool()
        for i, forest in enumerate(forests):
            req = Request(partial(forest.learnRF, X, y))
            # save the oobs
            req.notify_finished(partial(oobs.__setitem__, i))
            pool.add(req)

        with Timer() as timer:
            pool.wait()
        logger.info("Training completed in {} seconds. Average OOB: {}".format(
            timer.seconds(), numpy.average(oobs)))
        return ParallelVigraRfLazyflowClassifier(forests, oobs, known_labels,
                                                 feature_names)
示例#23
0
    def execute(self, slot, subindex, roi, result):
        assert slot == self.LabelAndFeatureMatrix
        self.progressSignal(0.0)

        # Technically, this could result in strange progress reporting if execute()
        #  is called by multiple threads in parallel.
        # This could be fixed with some fancier progress state, but
        # (1) We don't expect that to by typical, and
        # (2) progress reporting is merely informational.
        num_dirty_blocks = len(self._dirty_blocks)

        def update_progress(result):
            remaining_dirty = len(self._dirty_blocks)
            percent_complete = 95.0 * (num_dirty_blocks -
                                       remaining_dirty) / num_dirty_blocks
            self.progressSignal(percent_complete)

        # Update all dirty blocks in the cache
        logger.debug("Updating {} dirty blocks".format(num_dirty_blocks))
        pool = RequestPool()
        for block_start in self._dirty_blocks:
            req = Request(partial(self._update_block, block_start))
            req.notify_finished(update_progress)
            pool.add(req)
        pool.wait()

        # Concatenate the all blockwise results
        if self._blockwise_feature_matrices:
            total_feature_matrix = numpy.concatenate(
                self._blockwise_feature_matrices.values(), axis=0)
        else:
            # No label points at all.
            # Return an empty label&feature matrix (of the correct shape)
            num_feature_channels = self.FeatureImage.meta.shape[-1]
            total_feature_matrix = numpy.ndarray(shape=(0, 1 +
                                                        num_feature_channels),
                                                 dtype=numpy.float)

        self.progressSignal(100.0)
        logger.debug("After update, there are {} clean blocks".format(
            len(self._blockwise_feature_matrices)))
        result[0] = total_feature_matrix
示例#24
0
def test_cancellation_behavior():
    """
    If a request is cancelled while it was waiting on a lock,
    it should raise the CancellationException.
    """
    lock = RequestLock()
    lock.acquire()

    def f():
        try:
            with lock:
                assert False
        except Request.CancellationException:
            pass
        else:
            assert False

    finished = [False]
    cancelled = [False]
    failed = [False]

    def handle_finished(result):
        finished[0] = True

    def handle_cancelled():
        cancelled[0] = True

    def handle_failed(*args):
        failed[0] = True

    req = Request(f)
    req.notify_finished(handle_finished)
    req.notify_failed(handle_failed)
    req.notify_cancelled(handle_cancelled)

    req.submit()
    req.cancel()
    time.sleep(0.1)
    lock.release()
    time.sleep(0.1)
    assert not finished[0] and not failed[0] and cancelled[0]
示例#25
0
def test_cancellation_behavior():
    """
    If a request is cancelled while it was waiting on a lock,
    it should raise the CancellationException.
    """
    lock = RequestLock()
    lock.acquire()

    def f():
        try:
            with lock:
                assert False
        except Request.CancellationException:
            pass
        else:
            assert False

    finished = [False]
    cancelled = [False]
    failed = [False]

    def handle_finished(result):
        finished[0] = True

    def handle_cancelled():
        cancelled[0] = True

    def handle_failed(*args):
        failed[0] = True

    req = Request(f)
    req.notify_finished(handle_finished)
    req.notify_failed(handle_failed)
    req.notify_cancelled(handle_cancelled)

    req.submit()
    req.cancel()
    time.sleep(0.1)
    lock.release()
    time.sleep(0.1)
    assert not finished[0] and not failed[0] and cancelled[0]
    def _train_forests_with_feature_importance(forests, X, y, feature_names, export_path=None):
        """
        Train all RFs (in parallel) and compute feature importances while doing so.
        The importances table will be logged as INFO, and also exported to a file if export_path is given.

        Returns: oobs and importances
        """
        oobs = [None] * len(forests)
        importances = [None] * len(forests)

        def store_training_results(i, training_results):
            oob, importance_results = training_results
            oobs[i] = oob
            importances[i] = importance_results

        with Timer() as train_timer:
            pool = RequestPool()
            for i, forest in enumerate(forests):
                req = Request(partial(forest.learnRFWithFeatureSelection, X, y))
                # save the training results
                req.notify_finished(partial(store_training_results, i))
                pool.add(req)
            pool.wait()

        logger.info("Training took, {} seconds".format(train_timer.seconds()))

        # Forests may have different numbers of trees,
        # so take a weighted average of their importances
        tree_counts = [f.treeCount() for f in forests]
        weights = numpy.array(tree_counts).astype(float)
        weights /= weights.sum()

        named_importances = collections.OrderedDict(
            list(zip(feature_names, numpy.average(importances, weights=weights, axis=0)))
        )

        importance_table = generate_importance_table(named_importances, sort="overall", export_path=export_path)

        logger.info("Feature importance measurements during training: \n{}".format(importance_table))

        return oobs, named_importances
示例#27
0
    def run_export(self):
        role_names = self.parentApplet.dataSelectionApplet.topLevelOperator.DatasetRoles.value

        # Prepare file lists in an OrderedDict
        role_path_dict = OrderedDict(
            (role_name, self._data_role_widgets[role_name].filepaths)
            for role_name
            in role_names
        )
        dominant_role_name = role_names[0]
        num_paths = len(role_path_dict[dominant_role_name])

        if num_paths == 0:
            return

        for role_name in role_names[1:]:
            paths = role_path_dict[role_name]
            if len(paths) == 0:
                role_path_dict[role_name] = [None] * num_paths

            if len(role_path_dict[role_name]) != num_paths:
                raise BatchProcessingDataConstraintException(
                    f"Number of files for '{role_name!r}' does not match! "
                    f"Exptected {num_paths} files."
                )

        # Run the export in a separate thread
        export_req = Request(partial(self.parentApplet.run_export, role_path_dict))
        export_req.notify_failed(self.handle_batch_processing_failure)
        export_req.notify_finished(self.handle_batch_processing_finished)
        export_req.notify_cancelled(self.handle_batch_processing_cancelled)
        self.export_req = export_req

        self.parentApplet.busy = True
        self.parentApplet.appletStateUpdateRequested()
        self.cancel_button.setVisible(True)
        self.run_button.setEnabled(False)

        # Start the export        
        export_req.submit()
    def create_and_train(self, X, y, feature_names=None):
        # Distribute trees as evenly as possible
        tree_counts = numpy.array( [self._num_trees // self._num_forests] * self._num_forests )
        tree_counts[:self._num_trees % self._num_forests] += 1
        assert tree_counts.sum() == self._num_trees
        tree_counts = map(int, tree_counts)
        tree_counts[:] = (tree_count for tree_count in tree_counts if tree_count != 0)
        
        logger.debug( "Training parallel vigra RF" )
        # Save for future reference
        known_labels = numpy.unique(y)

        X = numpy.asarray(X, numpy.float32)
        y = numpy.asarray(y, numpy.uint32)
        if y.ndim == 1:
            y = y[:, numpy.newaxis]

        assert X.ndim == 2
        assert len(X) == len(y)

        # Create N forests

        forests = []
        for tree_count in tree_counts:
            forests.append( vigra.learning.RandomForest(tree_count, **self._kwargs) ) # HERE <--- this links to C++ library

        # Train them all in parallel
        oobs = [None] * len(forests)
        pool = RequestPool()
        for i, forest in enumerate(forests):
            req = Request( partial(forest.learnRF, X, y) )
            # save the oobs
            req.notify_finished( partial( oobs.__setitem__, i ) )
            pool.add( req )

        with Timer() as timer:
            pool.wait()
        logger.info( "Training completed in {} seconds. Average OOB: {}".format( timer.seconds(), numpy.average(oobs) ) )
        return ParallelVigraRfLazyflowClassifier( forests, oobs, known_labels, feature_names )
示例#29
0
    def run(self, opExport):
        """
        Start the export and return immediately (after showing the progress dialog).
        
        :param opExport: The export object to execute.
                         It must have a 'run_export()' method and a 'progressSignal' member.
        """
        progressDlg = MultiStepProgressDialog(parent=self.parent())
        progressDlg.setNumberOfSteps(1)
        
        def _forwardProgressToGui(progress):
            self._forwardingSignal.emit( partial( progressDlg.setStepProgress, progress ) )
        opExport.progressSignal.subscribe( _forwardProgressToGui )
    
        def _onFinishExport( *args ): # Also called on cancel
            self._forwardingSignal.emit( progressDlg.finishStep )
    
        def _onFail( exc, exc_info ):
            import traceback
            traceback.print_tb(exc_info[2])
            msg = "Failed to export layer due to the following error:\n{}".format( exc )
            self._forwardingSignal.emit( partial(QMessageBox.critical, self.parent(), "Export Failed", msg) )
            self._forwardingSignal.emit( progressDlg.setFailed )

        # Use a request to execute in the background    
        req = Request( opExport.run_export )
        req.notify_cancelled( _onFinishExport )
        req.notify_finished( _onFinishExport )
        req.notify_failed( _onFail )

        # Allow cancel.
        progressDlg.rejected.connect( req.cancel )

        # Start the export
        req.submit()

        # Execute the progress dialog
        # (We can block the thread here because the QDialog spins up its own event loop.)
        progressDlg.exec_()
示例#30
0
    def export_object_data(self, lane_index, show_gui=False, filename_suffix=""):
        """
        Initialize progress displays and start the actual export in a new thread using the lazyflow.request framework
        :param settings: the settings from the GUI export dialog
        :type settings: dict
        :param selected_features: the features to export from the GUI dialog
        :type selected_features: list
        :param gui: the Progress bar and callbacks for finish/fail/cancel see ExportingGui.show_export_dialog
        :type gui: dict
        """
        settings, selected_features = self.get_table_export_settings()

        self.save_export_progress_dialog(None)
        if not show_gui:
            progress_display = ProgressPrinter("Export Progress", xrange(100, -1, -5), 2)
            gui = None
        else:
            from ilastik.widgets.progressDialog import ProgressDialog

            progress = ProgressDialog(["Feature Data", "Labeling Rois", "Raw Image", "Exporting"])
            progress.set_busy(True)
            progress.show()
            gui = {
                "dialog": progress,
                "ok": partial(progress.safe_popup, "information", "Information", "Export successful!"),
                "cancel": partial(progress.safe_popup, "information", "Information", "Export cancelled!"),
                "fail": partial(progress.safe_popup, "critical", "Critical", "Export failed!"),
                "unlock": self.unlock_gui,
                "lock": self.lock_gui,
            }
            progress_display = gui["dialog"]
            self.save_export_progress_dialog(progress_display)

        export = partial(self.do_export, settings, selected_features, progress_display, lane_index, filename_suffix)
        request = Request(export)
        if gui is not None:
            if "fail" in gui:
                request.notify_failed(gui["fail"])
            if "ok" in gui:
                request.notify_finished(gui["ok"])
            if "cancel" in gui:
                request.notify_cancelled(gui["cancel"])
            if "unlock" in gui:
                request.notify_cancelled(gui["unlock"])
                request.notify_failed(gui["unlock"])
                request.notify_finished(gui["unlock"])
            if "lock" in gui:
                lock = gui["lock"]
                lock()
        request.notify_failed(self.export_failed)
        request.notify_finished(self.export_finished)
        request.notify_cancelled(self.export_cancelled)
        request.submit()

        if gui is not None and "dialog" in gui:
            progress_display.cancel.connect(request.cancel)

        return request
示例#31
0
    def execute(self, slot, subindex, roi, result):
        assert slot == self.LabelAndFeatureMatrix
        self.progressSignal(0.0)

        # Technically, this could result in strange progress reporting if execute() 
        #  is called by multiple threads in parallel.
        # This could be fixed with some fancier progress state, but 
        # (1) We don't expect that to by typical, and
        # (2) progress reporting is merely informational.
        num_dirty_blocks = len( self._dirty_blocks )
        def update_progress( result ):
            remaining_dirty = len( self._dirty_blocks )
            percent_complete = 95.0*(num_dirty_blocks - remaining_dirty)/num_dirty_blocks
            self.progressSignal( percent_complete )

        # Update all dirty blocks in the cache
        logger.debug( "Updating {} dirty blocks".format(num_dirty_blocks) )
        pool = RequestPool()
        for block_start in self._dirty_blocks:
            req = Request( partial(self._update_block, block_start ) )
            req.notify_finished( update_progress )
            pool.add( req )
        pool.wait()

        # Concatenate the all blockwise results
        if self._blockwise_feature_matrices:
            total_feature_matrix = numpy.concatenate( self._blockwise_feature_matrices.values(), axis=0 )
        else:
            # No label points at all.
            # Return an empty label&feature matrix (of the correct shape)
            num_feature_channels = self.FeatureImage.meta.shape[-1]
            total_feature_matrix = numpy.ndarray( shape=(0, 1 + num_feature_channels), dtype=numpy.float )

        self.progressSignal(100.0)
        logger.debug( "After update, there are {} clean blocks".format( len(self._blockwise_feature_matrices) ) )
        result[0] = total_feature_matrix
示例#32
0
    def export_object_data(self, settings, selected_features, gui=None):
        """
        Initialize progress displays and start the actual export in a new thread using the lazyflow.request framework
        :param settings: the settings from the GUI export dialog
        :type settings: dict
        :param selected_features: the features to export from the GUI dialog
        :type selected_features: list
        :param gui: the Progress bar and callbacks for finish/fail/cancel see ExportingGui.show_export_dialog
        :type gui: dict
        """
        self.save_export_progress_dialog(None)
        if gui is None or "dialog" not in gui:
            progress_display = ProgressPrinter("Export Progress", xrange(100, -1, -5), 2)
        else:
            progress_display = gui["dialog"]
            self.save_export_progress_dialog(progress_display)

        export = partial(self.do_export, settings, selected_features, progress_display)
        request = Request(export)
        if gui is not None:
            if "fail" in gui:
                request.notify_failed(gui["fail"])
            if "ok" in gui:
                request.notify_finished(gui["ok"])
            if "cancel" in gui:
                request.notify_cancelled(gui["cancel"])
            if "unlock" in gui:
                request.notify_cancelled(gui["unlock"])
                request.notify_failed(gui["unlock"])
                request.notify_finished(gui["unlock"])
            if "lock" in gui:
                lock = gui["lock"]
                lock()
        request.notify_failed(self.export_failed)
        request.notify_finished(self.export_finished)
        request.notify_cancelled(self.export_cancelled)
        request.submit()

        if gui is not None and "dialog" in gui:
            progress_display.cancel.connect(request.cancel)
示例#33
0
    def _onTrackButtonPressed(self):
        if not self.mainOperator.ObjectFeatures.ready():
            self._criticalMessage("You have to compute object features first.")
            return

        def _track():
            self.applet.busy = True
            self.applet.appletStateUpdateRequested.emit()

            app = self._drawer.appSpinBox.value()
            dis = self._drawer.disSpinBox.value()
            opp = self._drawer.oppSpinBox.value()
            noiserate = self._drawer.noiseRateSpinBox.value()
            noiseweight = self._drawer.noiseWeightSpinBox.value()
            epGap = self._drawer.epGapSpinBox.value()
            n_neighbors = self._drawer.nNeighborsSpinBox.value()
            with_div = self._drawer.withDivisionsBox.isChecked()
            cplex_timeout = None
            if len(str(self._drawer.timeoutBox.text())):
                cplex_timeout = int(self._drawer.timeoutBox.text())

            from_t = self._drawer.from_time.value()
            to_t = self._drawer.to_time.value()
            from_x = self._drawer.from_x.value()
            to_x = self._drawer.to_x.value()
            from_y = self._drawer.from_y.value()
            to_y = self._drawer.to_y.value()
            from_z = self._drawer.from_z.value()
            to_z = self._drawer.to_z.value()
            from_size = self._drawer.from_size.value()
            to_size = self._drawer.to_size.value()

            try:
                self.mainOperator.track(time_range=range(from_t, to_t + 1),
                                        x_range=(from_x, to_x + 1),
                                        y_range=(from_y, to_y + 1),
                                        z_range=(from_z, to_z + 1),
                                        size_range=(from_size, to_size + 1),
                                        x_scale=self._drawer.x_scale.value(),
                                        y_scale=self._drawer.y_scale.value(),
                                        z_scale=self._drawer.z_scale.value(),
                                        app=app,
                                        dis=dis,
                                        noiserate=noiserate,
                                        noiseweight=noiseweight,
                                        opp=opp,
                                        ep_gap=epGap,
                                        n_neighbors=n_neighbors,
                                        with_div=with_div,
                                        cplex_timeout=cplex_timeout)
            except Exception:
                ex_type, ex, tb = sys.exc_info()
                log_exception(logger)
                self._criticalMessage("Exception(" + str(ex_type) + "): " +
                                      str(ex))
                return

        def _handle_finished(*args):
            self.applet.progressSignal.emit(100)
            self._drawer.TrackButton.setEnabled(True)
            self._drawer.exportButton.setEnabled(True)
            self._drawer.exportTifButton.setEnabled(True)
            self._setLayerVisible("Objects", False)
            self.applet.busy = False
            self.applet.appletStateUpdateRequested.emit()

        def _handle_failure(exc, exc_info):
            self.applet.progressSignal.emit(100)
            msg = "Exception raised during tracking.  See traceback above.\n"
            log_exception(logger, msg, exc_info)
            self._drawer.TrackButton.setEnabled(True)
            self.applet.busy = False
            self.applet.appletStateUpdateRequested.emit()

        self._drawer.TrackButton.setEnabled(False)
        self.applet.progressSignal.emit(0)
        self.applet.progressSignal.emit(-1)
        req = Request(_track)
        req.notify_failed(_handle_failure)
        req.notify_finished(_handle_finished)
        req.submit()
示例#34
0
    def export_object_data(self,
                           lane_index,
                           show_gui=False,
                           filename_suffix=""):
        """
        Initialize progress displays and start the actual export in a new thread
        using the lazyflow.request framework

        Args:
            lane_index (int): Index of the lane to be exported
            show_gui (bool, optional): boolean to determine whether or not to
              show gui
            filename_suffix (str, optional): If provided, appended to the
              filename (before the extension)

        Returns:
            lazyflow.request.Request: Request object from which the result can
              be obtained.
        """
        settings, selected_features = self.get_table_export_settings()
        if not settings:
            return Request.with_value(None)

        self.save_export_progress_dialog(None)
        if not show_gui:
            progress_display = ProgressPrinter("Export Progress",
                                               range(100, -1, -5), 2)
            gui = None
        else:
            from ilastik.widgets.progressDialog import ProgressDialog

            progress = ProgressDialog(
                ["Feature Data", "Labeling Rois", "Raw Image", "Exporting"])
            progress.set_busy(True)
            progress.show()
            gui = {
                "dialog":
                progress,
                "ok":
                partial(progress.safe_popup, "information", "Information",
                        "Export successful!"),
                "cancel":
                partial(progress.safe_popup, "information", "Information",
                        "Export cancelled!"),
                "fail":
                partial(progress.safe_popup, "critical", "Critical",
                        "Export failed!"),
                "unlock":
                self.unlock_gui,
                "lock":
                self.lock_gui,
            }
            progress_display = gui["dialog"]
            self.save_export_progress_dialog(progress_display)

        export = partial(self.do_export, settings, selected_features,
                         progress_display, lane_index, filename_suffix)
        request = Request(export)
        if gui is not None:
            if "fail" in gui:
                request.notify_failed(gui["fail"])
            if "ok" in gui:
                request.notify_finished(gui["ok"])
            if "cancel" in gui:
                request.notify_cancelled(gui["cancel"])
            if "unlock" in gui:
                request.notify_cancelled(gui["unlock"])
                request.notify_failed(gui["unlock"])
                request.notify_finished(gui["unlock"])
            if "lock" in gui:
                lock = gui["lock"]
                lock()
        request.notify_failed(self.export_failed)
        request.notify_finished(self.export_finished)
        request.notify_cancelled(self.export_cancelled)
        request.submit()

        if gui is not None and "dialog" in gui:
            progress_display.cancel.connect(request.cancel)

        return request
    def exportFinalSupervoxels(self, outputPath, axisorder, progressCallback=None):
        """
        Executes the export process within a request.
        The (already-running) request is returned, in case you want to wait for it or monitor its progress.
        """
        assert self.FinalSupervoxels.ready(), "Can't export yet: The final segmentation isn't ready!"

        logger.info("Starting Final Segmentation Export...")
        
        opTranspose = OpReorderAxes( parent=self )
        opTranspose.AxisOrder.setValue( axisorder )
        opTranspose.Input.connect( self.FinalSupervoxels )
        
        f = h5py.File(outputPath, 'w')
        opExporter = OpH5WriterBigDataset(parent=self)
        opExporter.hdf5File.setValue( f )
        opExporter.hdf5Path.setValue( 'stack' )
        opExporter.Image.connect( opTranspose.Output )
        if progressCallback is not None:
            opExporter.progressSignal.subscribe( progressCallback )
        
        req = Request( partial(self._runExporter, opExporter) )

        def cleanOps():
            opExporter.cleanUp()
            opTranspose.cleanUp()
        
        def handleFailed( exc, exc_info ):
            cleanOps()        
            f.close()
            import traceback
            traceback.print_tb(exc_info[2])
            msg = "Final Supervoxel export FAILED due to the following error:\n{}".format( exc )
            logger.error( msg )

        def handleFinished( result ):
            # Generate the mapping transforms dataset
            mapping = self._opAccumulateFinalImage.Mapping.value
            num_labels = mapping.keys()[-1][1]
            transform = numpy.zeros( shape=(num_labels, 2), dtype=numpy.uint32 )
            for (start, stop), body_id in mapping.items():
                for supervoxel_label in range(start, stop):
                    transform[supervoxel_label][0] = supervoxel_label
                    if body_id == -1:
                        # Special case: -1 means "identity transform" for this supervoxel
                        # (Which is really an untouched raveler body)
                        transform[supervoxel_label][1] = supervoxel_label
                    else:
                        transform[supervoxel_label][1] = body_id

            # Save the transform before closing the file
            f.create_dataset('transforms', data=transform)

            # Copy all other datasets from the original segmentation file.
            ravelerSegmentationInfo = self.DatasetInfos[2].value
            pathComponents = PathComponents(ravelerSegmentationInfo.filePath, self.WorkingDirectory.value)
            with h5py.File(pathComponents.externalPath, 'r') as originalFile:
                for k,dset in originalFile.items():
                    if k not in ['transforms', 'stack']:
                        f.copy(dset, k)
            
            try:
                cleanOps()
                logger.info("FINISHED Final Supervoxel Export")
            finally:
                f.close()

        def handleCancelled():
            cleanOps()
            f.close()
            logger.info( "Final Supervoxel export was cancelled!" )

        req.notify_failed( handleFailed )
        req.notify_finished( handleFinished )
        req.notify_cancelled( handleCancelled )
        
        req.submit()
        return req # Returned in case the user wants to cancel it.
示例#36
0
    def export_object_data(self,
                           lane_index,
                           show_gui=False,
                           filename_suffix=""):
        """
        Initialize progress displays and start the actual export in a new thread using the lazyflow.request framework
        :param settings: the settings from the GUI export dialog
        :type settings: dict
        :param selected_features: the features to export from the GUI dialog
        :type selected_features: list
        :param gui: the Progress bar and callbacks for finish/fail/cancel see ExportingGui.show_export_dialog
        :type gui: dict
        """
        settings, selected_features = self.get_table_export_settings()

        self.save_export_progress_dialog(None)
        if not show_gui:
            progress_display = ProgressPrinter("Export Progress",
                                               xrange(100, -1, -5), 2)
            gui = None
        else:
            from ilastik.widgets.progressDialog import ProgressDialog
            progress = ProgressDialog(
                ["Feature Data", "Labeling Rois", "Raw Image", "Exporting"])
            progress.set_busy(True)
            progress.show()
            gui = {
                "dialog":
                progress,
                "ok":
                partial(progress.safe_popup, "information", "Information",
                        "Export successful!"),
                "cancel":
                partial(progress.safe_popup, "information", "Information",
                        "Export cancelled!"),
                "fail":
                partial(progress.safe_popup, "critical", "Critical",
                        "Export failed!"),
                "unlock":
                self.unlock_gui,
                "lock":
                self.lock_gui
            }
            progress_display = gui["dialog"]
            self.save_export_progress_dialog(progress_display)

        export = partial(self.do_export, settings, selected_features,
                         progress_display, lane_index, filename_suffix)
        request = Request(export)
        if gui is not None:
            if "fail" in gui:
                request.notify_failed(gui["fail"])
            if "ok" in gui:
                request.notify_finished(gui["ok"])
            if "cancel" in gui:
                request.notify_cancelled(gui["cancel"])
            if "unlock" in gui:
                request.notify_cancelled(gui["unlock"])
                request.notify_failed(gui["unlock"])
                request.notify_finished(gui["unlock"])
            if "lock" in gui:
                lock = gui["lock"]
                lock()
        request.notify_failed(self.export_failed)
        request.notify_finished(self.export_finished)
        request.notify_cancelled(self.export_cancelled)
        request.submit()

        if gui is not None and "dialog" in gui:
            progress_display.cancel.connect(request.cancel)

        return request
示例#37
0
    def _onTrackButtonPressed( self ):    
        if not self.mainOperator.ObjectFeatures.ready():
            self._criticalMessage("You have to compute object features first.")            
            return
        
        
        def _track():
            self.applet.busy = True
            self.applet.appletStateUpdateRequested.emit()
            
            app = self._drawer.appSpinBox.value()
            dis = self._drawer.disSpinBox.value()
            opp = self._drawer.oppSpinBox.value()
            noiserate = self._drawer.noiseRateSpinBox.value()
            noiseweight = self._drawer.noiseWeightSpinBox.value()
            epGap = self._drawer.epGapSpinBox.value()
            n_neighbors = self._drawer.nNeighborsSpinBox.value()
            with_div = self._drawer.withDivisionsBox.isChecked()
            cplex_timeout = None
            if len(str(self._drawer.timeoutBox.text())):
                cplex_timeout = int(self._drawer.timeoutBox.text())
                
            from_t = self._drawer.from_time.value()
            to_t = self._drawer.to_time.value()
            from_x = self._drawer.from_x.value()
            to_x = self._drawer.to_x.value()
            from_y = self._drawer.from_y.value()
            to_y = self._drawer.to_y.value()        
            from_z = self._drawer.from_z.value()
            to_z = self._drawer.to_z.value()        
            from_size = self._drawer.from_size.value()
            to_size = self._drawer.to_size.value()        
            
            try:
                self.mainOperator.track(
                            time_range = range(from_t, to_t + 1),
                            x_range = (from_x, to_x + 1),
                            y_range = (from_y, to_y + 1),
                            z_range = (from_z, to_z + 1),
                            size_range = (from_size, to_size + 1),
                            x_scale = self._drawer.x_scale.value(),
                            y_scale = self._drawer.y_scale.value(),
                            z_scale = self._drawer.z_scale.value(),
                            app=app,
                            dis=dis,
                            noiserate = noiserate,
                            noiseweight = noiseweight,
                            opp=opp,                        
                            ep_gap=epGap,
                            n_neighbors=n_neighbors,
                            with_div=with_div,
                            cplex_timeout=cplex_timeout)
            except Exception:
                ex_type, ex, tb = sys.exc_info()
                traceback.print_tb(tb)    
                self._criticalMessage("Exception(" + str(ex_type) + "): " + str(ex))                        
                return
    
        
        def _handle_finished(*args):
            self.applet.progressSignal.emit(100)
            self._drawer.TrackButton.setEnabled(True)
            self._drawer.exportButton.setEnabled(True)
            self._drawer.exportTifButton.setEnabled(True)
            self._setLayerVisible("Objects", False) 
            self.applet.busy = False            
            self.applet.appletStateUpdateRequested.emit()
            
        def _handle_failure( exc, exc_info ):
            self.applet.progressSignal.emit(100)
            traceback.print_exception(*exc_info)
            sys.stderr.write("Exception raised during tracking.  See traceback above.\n")
            self._drawer.TrackButton.setEnabled(True)
            self.applet.busy = False
            self.applet.appletStateUpdateRequested.emit()
        
        self._drawer.TrackButton.setEnabled(False)        
        self.applet.progressSignal.emit(0)
        self.applet.progressSignal.emit(-1)
        req = Request( _track )
        req.notify_failed( _handle_failure )
        req.notify_finished( _handle_finished )
        req.submit()

        
            
示例#38
0
    def export_object_data(self, lane_index, show_gui=False, filename_suffix=""):
        """
        Initialize progress displays and start the actual export in a new thread
        using the lazyflow.request framework

        Args:
            lane_index (int): Index of the lane to be exported
            show_gui (bool, optional): boolean to determine whether or not to
              show gui
            filename_suffix (str, optional): If provided, appended to the
              filename (before the extension)

        Returns:
            lazyflow.request.Request: Request object from which the result can
              be obtained.
        """
        settings, selected_features = self.get_table_export_settings()

        self.save_export_progress_dialog(None)
        if not show_gui:
            progress_display = ProgressPrinter("Export Progress", range(100, -1, -5), 2)
            gui = None
        else:
            from ilastik.widgets.progressDialog import ProgressDialog
            progress = ProgressDialog(["Feature Data", "Labeling Rois", "Raw Image", "Exporting"])
            progress.set_busy(True)
            progress.show()
            gui = {
                "dialog": progress,
                "ok": partial(progress.safe_popup, "information", "Information", "Export successful!"),
                "cancel": partial(progress.safe_popup, "information", "Information", "Export cancelled!"),
                "fail": partial(progress.safe_popup, "critical", "Critical", "Export failed!"),
                "unlock": self.unlock_gui,
                "lock": self.lock_gui
            }
            progress_display = gui["dialog"]
            self.save_export_progress_dialog(progress_display)

        export = partial(self.do_export, settings, selected_features, progress_display, lane_index, filename_suffix)
        request = Request(export)
        if gui is not None:
            if "fail" in gui:
                request.notify_failed(gui["fail"])
            if "ok" in gui:
                request.notify_finished(gui["ok"])
            if "cancel" in gui:
                request.notify_cancelled(gui["cancel"])
            if "unlock" in gui:
                request.notify_cancelled(gui["unlock"])
                request.notify_failed(gui["unlock"])
                request.notify_finished(gui["unlock"])
            if "lock" in gui:
                lock = gui["lock"]
                lock()
        request.notify_failed(self.export_failed)
        request.notify_finished(self.export_finished)
        request.notify_cancelled(self.export_cancelled)
        request.submit()

        if gui is not None and "dialog" in gui:
            progress_display.cancel.connect(request.cancel)

        return request
    def exportFinalSupervoxels(self,
                               outputPath,
                               axisorder,
                               progressCallback=None):
        """
        Executes the export process within a request.
        The (already-running) request is returned, in case you want to wait for it or monitor its progress.
        """
        assert self.FinalSupervoxels.ready(
        ), "Can't export yet: The final segmentation isn't ready!"

        logger.info("Starting Final Segmentation Export...")

        opTranspose = OpReorderAxes(parent=self)
        opTranspose.AxisOrder.setValue(axisorder)
        opTranspose.Input.connect(self.FinalSupervoxels)

        f = h5py.File(outputPath, 'w')
        opExporter = OpH5WriterBigDataset(parent=self)
        opExporter.hdf5File.setValue(f)
        opExporter.hdf5Path.setValue('stack')
        opExporter.Image.connect(opTranspose.Output)
        if progressCallback is not None:
            opExporter.progressSignal.subscribe(progressCallback)

        req = Request(partial(self._runExporter, opExporter))

        def cleanOps():
            opExporter.cleanUp()
            opTranspose.cleanUp()

        def handleFailed(exc, exc_info):
            cleanOps()
            f.close()
            msg = "Final Supervoxel export FAILED due to the following error:\n{}".format(
                exc)
            log_exception(logger, msg, exc_info)

        def handleFinished(result):
            # Generate the mapping transforms dataset
            mapping = self._opAccumulateFinalImage.Mapping.value
            num_labels = mapping.keys()[-1][1]
            transform = numpy.zeros(shape=(num_labels, 2), dtype=numpy.uint32)
            for (start, stop), body_id in mapping.items():
                for supervoxel_label in range(start, stop):
                    transform[supervoxel_label][0] = supervoxel_label
                    if body_id == -1:
                        # Special case: -1 means "identity transform" for this supervoxel
                        # (Which is really an untouched raveler body)
                        transform[supervoxel_label][1] = supervoxel_label
                    else:
                        transform[supervoxel_label][1] = body_id

            # Save the transform before closing the file
            f.create_dataset('transforms', data=transform)

            # Copy all other datasets from the original segmentation file.
            ravelerSegmentationInfo = self.DatasetInfos[2].value
            pathComponents = PathComponents(ravelerSegmentationInfo.filePath,
                                            self.WorkingDirectory.value)
            with h5py.File(pathComponents.externalPath, 'r') as originalFile:
                for k, dset in originalFile.items():
                    if k not in ['transforms', 'stack']:
                        f.copy(dset, k)

            try:
                cleanOps()
                logger.info("FINISHED Final Supervoxel Export")
            finally:
                f.close()

        def handleCancelled():
            cleanOps()
            f.close()
            logger.info("Final Supervoxel export was cancelled!")

        req.notify_failed(handleFailed)
        req.notify_finished(handleFinished)
        req.notify_cancelled(handleCancelled)

        req.submit()
        return req  # Returned in case the user wants to cancel it.
示例#40
0
    def execute(self, slot, subindex, roi, result):
        assert slot == self.LabelAndFeatureMatrix
        self.progressSignal(0.0)

        # Technically, this could result in strange progress reporting if execute()
        #  is called by multiple threads in parallel.
        # This could be fixed with some fancier progress state, but
        # (1) We don't expect that to by typical, and
        # (2) progress reporting is merely informational.
        num_dirty_blocks = len(self._dirty_blocks)
        remaining_dirty = [num_dirty_blocks]

        def update_progress(result):
            remaining_dirty[0] -= 1
            percent_complete = 95.0 * (num_dirty_blocks -
                                       remaining_dirty[0]) / num_dirty_blocks
            self.progressSignal(percent_complete)

        # Update all dirty blocks in the cache
        logger.debug("Updating {} dirty blocks".format(num_dirty_blocks))

        # Before updating the blocks, ensure that the necessary block locks exist
        # It's better to do this now instead of inside each request
        #  to avoid contention over self._lock
        with self._lock:
            for block_start in self._dirty_blocks:
                if block_start not in self._block_locks:
                    self._block_locks[block_start] = RequestLock()

        # Update each block in its own request.
        pool = RequestPool()
        reqs = {}
        for block_start in self._dirty_blocks:
            req = Request(partial(self._get_features_for_block, block_start))
            req.notify_finished(update_progress)
            reqs[block_start] = req
            pool.add(req)
        pool.wait()

        # Now store the results we got.
        # It's better to store the blocks here -- rather than within each request -- to
        #  avoid contention over self._lock from within every block's request.
        with self._lock:
            for block_start, req in reqs.items():
                if req.result is None:
                    # 'None' means the block wasn't dirty. No need to update.
                    continue
                labels_and_features_matrix = req.result
                self._dirty_blocks.remove(block_start)

                if labels_and_features_matrix.shape[0] > 0:
                    # Update the block entry with the new matrix.
                    self._blockwise_feature_matrices[
                        block_start] = labels_and_features_matrix
                else:
                    # All labels were removed from the block,
                    # So the new feature matrix is empty.
                    # Just delete its entry from our list.
                    try:
                        del self._blockwise_feature_matrices[block_start]
                    except KeyError:
                        pass

        # Concatenate the all blockwise results
        if self._blockwise_feature_matrices:
            total_feature_matrix = numpy.concatenate(
                self._blockwise_feature_matrices.values(), axis=0)
        else:
            # No label points at all.
            # Return an empty label&feature matrix (of the correct shape)
            num_feature_channels = self.FeatureImage.meta.shape[-1]
            total_feature_matrix = numpy.ndarray(shape=(0, 1 +
                                                        num_feature_channels),
                                                 dtype=numpy.float32)

        self.progressSignal(100.0)
        logger.debug("After update, there are {} clean blocks".format(
            len(self._blockwise_feature_matrices)))
        result[0] = total_feature_matrix
示例#41
0
    def execute(self, slot, subindex, roi, result):
        assert slot == self.LabelAndFeatureMatrix
        self.progressSignal(0.0)

        # Technically, this could result in strange progress reporting if execute() 
        #  is called by multiple threads in parallel.
        # This could be fixed with some fancier progress state, but 
        # (1) We don't expect that to by typical, and
        # (2) progress reporting is merely informational.
        num_dirty_blocks = len( self._dirty_blocks )
        remaining_dirty = [num_dirty_blocks]
        def update_progress( result ):
            remaining_dirty[0] -= 1
            percent_complete = 95.0*(num_dirty_blocks - remaining_dirty[0])/num_dirty_blocks
            self.progressSignal( percent_complete )

        # Update all dirty blocks in the cache
        logger.debug( "Updating {} dirty blocks".format(num_dirty_blocks) )

        # Before updating the blocks, ensure that the necessary block locks exist
        # It's better to do this now instead of inside each request
        #  to avoid contention over self._lock
        with self._lock:
            for block_start in self._dirty_blocks:
                if block_start not in self._block_locks:
                    self._block_locks[block_start] = RequestLock()

        # Update each block in its own request.
        pool = RequestPool()
        reqs = {}
        for block_start in self._dirty_blocks:
            req = Request( partial(self._get_features_for_block, block_start ) )
            req.notify_finished( update_progress )
            reqs[block_start] = req
            pool.add( req )
        pool.wait()

        # Now store the results we got.
        # It's better to store the blocks here -- rather than within each request -- to 
        #  avoid contention over self._lock from within every block's request.
        with self._lock:
            for block_start, req in reqs.items():
                if req.result is None:
                    # 'None' means the block wasn't dirty. No need to update.
                    continue
                labels_and_features_matrix = req.result
                self._dirty_blocks.remove(block_start)
                
                if labels_and_features_matrix.shape[0] > 0:
                    # Update the block entry with the new matrix.
                    self._blockwise_feature_matrices[block_start] = labels_and_features_matrix
                else:
                    # All labels were removed from the block,
                    # So the new feature matrix is empty.  
                    # Just delete its entry from our list.
                    try:
                        del self._blockwise_feature_matrices[block_start]
                    except KeyError:
                        pass

        # Concatenate the all blockwise results
        if self._blockwise_feature_matrices:
            total_feature_matrix = numpy.concatenate( self._blockwise_feature_matrices.values(), axis=0 )
        else:
            # No label points at all.
            # Return an empty label&feature matrix (of the correct shape)
            num_feature_channels = self.FeatureImage.meta.shape[-1]
            total_feature_matrix = numpy.ndarray( shape=(0, 1 + num_feature_channels), dtype=numpy.float32 )

        self.progressSignal(100.0)
        logger.debug( "After update, there are {} clean blocks".format( len(self._blockwise_feature_matrices) ) )
        result[0] = total_feature_matrix