Exemplo n.º 1
0
    def _augmentFeatureNames(self, features):
        # Take a dictionary of feature names, augment it by default features and set to Features() slot

        feature_names = features
        feature_names_with_default = deepcopy(feature_names)

        #expand the feature list by our default features
        logger.debug(
            "attaching default features {} to vigra features {}".format(
                default_features, feature_names))
        plugin = pluginManager.getPluginByName("Standard Object Features",
                                               "ObjectFeatures")
        all_default_props = plugin.plugin_object.fill_properties(
            default_features)  #fill in display name and such
        feature_names_with_default[default_features_key] = all_default_props

        if not "Standard Object Features" in list(feature_names.keys()):
            # The user has not selected any standard features. Add them now
            feature_names_with_default["Standard Object Features"] = {}

        for default_feature_name, default_feature_props in default_features.items(
        ):
            if default_feature_name not in feature_names_with_default[
                    "Standard Object Features"]:
                # this feature has not been selected by the user, add it now.
                feature_names_with_default["Standard Object Features"][
                    default_feature_name] = all_default_props[
                        default_feature_name]
                feature_names_with_default["Standard Object Features"][
                    default_feature_name]["selected"] = False

        return feature_names_with_default
Exemplo n.º 2
0
    def _export_with_plugin(self, lane_index: int, checkOverwriteFiles: bool, pluginName: str) -> bool:
        argsSlot = self.topLevelOperator.AdditionalPluginArguments
        pluginInfo = pluginManager.getPluginByName(pluginName, category="TrackingExportFormats")

        if pluginInfo is None:
            logger.error("Could not find selected plugin %s", pluginName)
            return False

        plugin = pluginInfo.plugin_object
        logger.info("Exporting tracking result using %s", pluginName)

        name_format = self.topLevelOperator.getLane(lane_index).OutputFilenameFormat.value
        filename = self.getPartiallyFormattedName(lane_index, name_format)

        if plugin.exportsToFile and not os.path.basename(filename):
            filename = os.path.join(filename, 'pluginExport.txt')

        if not filename:
            logger.error("Cannot export from plugin with empty output filename")
            return False

        self.progressSignal(-1)

        try:
            status = self._pluginExportFunc(lane_index, filename, plugin, checkOverwriteFiles, argsSlot)
        finally:
            self.progressSignal(100)

        if not status:
            return False

        logger.info("Export done")
        return True
Exemplo n.º 3
0
    def post_process_lane_export(self, lane_index, checkOverwriteFiles=False):
        # `checkOverwriteFiles` parameter ensures we check only once for files that could be overwritten, pop up
        # the MessageBox and then don't export. For the next round we click the export button,
        # we really want it to export, so checkOverwriteFiles=False.

        # Plugin export if selected
        logger.info(
            "Export source is: " +
            self.dataExportApplet.topLevelOperator.SelectedExportSource.value)

        if self.dataExportApplet.topLevelOperator.SelectedExportSource.value == OpTrackingBaseDataExport.PluginOnlyName:
            logger.info("Export source plugin selected!")
            selectedPlugin = self.dataExportApplet.topLevelOperator.SelectedPlugin.value
            additionalPluginArgumentsSlot = self.dataExportApplet.topLevelOperator.AdditionalPluginArguments

            exportPluginInfo = pluginManager.getPluginByName(
                selectedPlugin, category="TrackingExportFormats")
            if exportPluginInfo is None:
                logger.error("Could not find selected plugin %s" %
                             exportPluginInfo)
            else:
                exportPlugin = exportPluginInfo.plugin_object
                logger.info("Exporting tracking result using %s" %
                            selectedPlugin)
                name_format = self.dataExportApplet.topLevelOperator.getLane(
                    lane_index).OutputFilenameFormat.value
                partially_formatted_name = self.getPartiallyFormattedName(
                    lane_index, name_format)

                if exportPlugin.exportsToFile:
                    filename = partially_formatted_name
                    if os.path.basename(filename) == '':
                        filename = os.path.join(filename, 'pluginExport.txt')
                else:
                    filename = partially_formatted_name

                if filename is None or len(str(filename)) == 0:
                    logger.error(
                        "Cannot export from plugin with empty output filename")
                    return True

                self.dataExportApplet.progressSignal(-1)
                exportStatus = self.trackingApplet.topLevelOperator.getLane(
                    lane_index).exportPlugin(filename, exportPlugin,
                                             checkOverwriteFiles,
                                             additionalPluginArgumentsSlot)
                self.dataExportApplet.progressSignal(100)

                if not exportStatus:
                    return False
                logger.info("Export done")

            return True
        return True
    def post_process_lane_export(self, lane_index, checkOverwriteFiles=False):
        # Plugin export if selected
        logger.info("Export source is: " + self.dataExportTrackingApplet.
                    topLevelOperator.SelectedExportSource.value)

        print("in post_process_lane_export")
        if self.dataExportTrackingApplet.topLevelOperator.SelectedExportSource.value == OpTrackingBaseDataExport.PluginOnlyName:
            logger.info("Export source plugin selected!")
            selectedPlugin = self.dataExportTrackingApplet.topLevelOperator.SelectedPlugin.value

            exportPluginInfo = pluginManager.getPluginByName(
                selectedPlugin, category="TrackingExportFormats")
            if exportPluginInfo is None:
                logger.error("Could not find selected plugin %s" %
                             exportPluginInfo)
            else:
                exportPlugin = exportPluginInfo.plugin_object
                logger.info("Exporting tracking result using %s" %
                            selectedPlugin)
                name_format = self.dataExportTrackingApplet.topLevelOperator.getLane(
                    lane_index).OutputFilenameFormat.value
                partially_formatted_name = self.getPartiallyFormattedName(
                    lane_index, name_format)

                if exportPlugin.exportsToFile:
                    filename = partially_formatted_name
                    if os.path.basename(filename) == '':
                        filename = os.path.join(filename, 'pluginExport.txt')
                else:
                    filename = partially_formatted_name

                if filename is None or len(str(filename)) == 0:
                    logger.error(
                        "Cannot export from plugin with empty output filename")
                    return True

                self.dataExportTrackingApplet.progressSignal(-1)
                exportStatus = self.trackingApplet.topLevelOperator.getLane(
                    lane_index).exportPlugin(filename, exportPlugin,
                                             checkOverwriteFiles)
                self.dataExportTrackingApplet.progressSignal(100)

                if not exportStatus:
                    return False
                logger.info("Export done")

            return True

        return True
    def post_process_lane_export(self, lane_index, checkOverwriteFiles=False):
        # `checkOverwriteFiles` parameter ensures we check only once for files that could be overwritten, pop up
        # the MessageBox and then don't export. For the next round we click the export button,
        # we really want it to export, so checkOverwriteFiles=False.
        
        # Plugin export if selected
        logger.info("Export source is: " + self.dataExportApplet.topLevelOperator.SelectedExportSource.value)

        if self.dataExportApplet.topLevelOperator.SelectedExportSource.value == OpTrackingBaseDataExport.PluginOnlyName:
            logger.info("Export source plugin selected!")
            selectedPlugin = self.dataExportApplet.topLevelOperator.SelectedPlugin.value

            exportPluginInfo = pluginManager.getPluginByName(selectedPlugin, category="TrackingExportFormats")
            if exportPluginInfo is None:
                logger.error("Could not find selected plugin %s" % exportPluginInfo)
            else:
                exportPlugin = exportPluginInfo.plugin_object
                logger.info("Exporting tracking result using %s" % selectedPlugin)
                name_format = self.dataExportApplet.topLevelOperator.getLane(lane_index).OutputFilenameFormat.value
                partially_formatted_name = self.getPartiallyFormattedName(lane_index, name_format)

                if exportPlugin.exportsToFile:
                    filename = partially_formatted_name
                    if os.path.basename(filename) == '':
                        filename = os.path.join(filename, 'pluginExport.txt')
                else:
                    filename = partially_formatted_name

                if filename is None or len(str(filename)) == 0:
                    logger.error("Cannot export from plugin with empty output filename")
                    return True

                self.dataExportApplet.progressSignal(-1)
                exportStatus = self.trackingApplet.topLevelOperator.getLane(lane_index).exportPlugin(filename, exportPlugin, checkOverwriteFiles)
                self.dataExportApplet.progressSignal(100)

                if not exportStatus:
                    return False
                logger.info("Export done")

            return True
        return True
    def post_process_lane_export(self, lane_index, checkOverwriteFiles=False):
        # Plugin export if selected
        logger.info("Export source is: " + self.dataExportTrackingApplet.topLevelOperator.SelectedExportSource.value)

        print("in post_process_lane_export")
        if self.dataExportTrackingApplet.topLevelOperator.SelectedExportSource.value == OpTrackingBaseDataExport.PluginOnlyName:
            logger.info("Export source plugin selected!")
            selectedPlugin = self.dataExportTrackingApplet.topLevelOperator.SelectedPlugin.value

            exportPluginInfo = pluginManager.getPluginByName(selectedPlugin, category="TrackingExportFormats")
            if exportPluginInfo is None:
                logger.error("Could not find selected plugin %s" % exportPluginInfo)
            else:
                exportPlugin = exportPluginInfo.plugin_object
                logger.info("Exporting tracking result using %s" % selectedPlugin)
                name_format = self.dataExportTrackingApplet.topLevelOperator.getLane(lane_index).OutputFilenameFormat.value
                partially_formatted_name = self.getPartiallyFormattedName(lane_index, name_format)

                if exportPlugin.exportsToFile:
                    filename = partially_formatted_name
                    if os.path.basename(filename) == '':
                        filename = os.path.join(filename, 'pluginExport.txt')
                else:
                    filename = partially_formatted_name

                if filename is None or len(str(filename)) == 0:
                    logger.error("Cannot export from plugin with empty output filename")
                    return True

                self.dataExportTrackingApplet.progressSignal(-1)
                exportStatus = self.trackingApplet.topLevelOperator.getLane(lane_index).exportPlugin(filename, exportPlugin, checkOverwriteFiles)
                self.dataExportTrackingApplet.progressSignal(100)

                if not exportStatus:
                    return False
                logger.info("Export done")

            return True

        return True
Exemplo n.º 7
0
    def _augmentFeatureNames(self, features):
        # Take a dictionary of feature names, augment it by default features and set to Features() slot
        
        feature_names = features
        feature_names_with_default = deepcopy(feature_names)

        #expand the feature list by our default features
        logger.debug("attaching default features {} to vigra features {}".format(default_features, feature_names))
        plugin = pluginManager.getPluginByName("Standard Object Features", "ObjectFeatures")
        all_default_props = plugin.plugin_object.fill_properties(default_features) #fill in display name and such
        feature_names_with_default[default_features_key] = all_default_props

        if not "Standard Object Features" in list(feature_names.keys()):
            # The user has not selected any standard features. Add them now
            feature_names_with_default["Standard Object Features"] = {}

        for default_feature_name, default_feature_props in default_features.items():
            if default_feature_name not in feature_names_with_default["Standard Object Features"]:
                # this feature has not been selected by the user, add it now.
                feature_names_with_default["Standard Object Features"][default_feature_name] = all_default_props[default_feature_name]
                feature_names_with_default["Standard Object Features"][default_feature_name]["selected"] = False

        return feature_names_with_default
Exemplo n.º 8
0
    def _extract(self, image, labels):
        if not (image.ndim == labels.ndim == 4):
            raise Exception("both images must be 4D. raw image shape: {}"
                            " label image shape: {}".format(image.shape, labels.shape))

        # FIXME: maybe simplify? taggedShape should be easier here
        class Axes(object):
            x = image.axistags.index('x')
            y = image.axistags.index('y')
            z = image.axistags.index('z')
            c = image.axistags.index('c')
        axes = Axes()

        slc3d = [slice(None)] * 4 # FIXME: do not hardcode
        slc3d[axes.c] = 0

        labels = labels[slc3d]
        
        logger.debug("Computing default features")

        feature_names = deepcopy(self.Features([]).wait())

        # do global features
        logger.debug("computing global features")
        extra_features_computed = False
        global_features = {}
        selected_vigra_features = []
        for plugin_name, feature_dict in feature_names.iteritems():
            plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
            if plugin_name == "Standard Object Features":
                #expand the feature list by our default features
                logger.debug("attaching default features {} to vigra features {}".format(default_features, feature_dict))
                selected_vigra_features = feature_dict.keys()
                feature_dict.update(default_features)
                extra_features_computed = True
            global_features[plugin_name] = plugin.plugin_object.compute_global(image, labels, feature_dict, axes)
        
        extrafeats = {}
        if extra_features_computed:
            for feat_key in default_features:
                feature = None
                if feat_key in selected_vigra_features:
                    #we wanted that feature independently
                    feature = global_features["Standard Object Features"][feat_key]
                else:
                    feature = global_features["Standard Object Features"].pop(feat_key)
                    feature_names["Standard Object Features"].pop(feat_key)
                extrafeats[feat_key] = feature
        else:
            logger.debug("default features not computed, computing separately")
            extrafeats_acc = vigra.analysis.extractRegionFeatures(image[slc3d].squeeze().astype(np.float32), labels.squeeze(),
                                                        default_features.keys(),
                                                        ignoreLabel=0)
            #remove the 0th object, we'll add it again later
            for k, v in extrafeats_acc.iteritems():
                extrafeats[k]=v[1:]
                if len(v.shape)==1:
                    extrafeats[k]=extrafeats[k].reshape(extrafeats[k].shape+(1,))
        
        extrafeats = dict((k.replace(' ', ''), v)
                          for k, v in extrafeats.iteritems())
        
        mincoords = extrafeats["Coord<Minimum>"]
        maxcoords = extrafeats["Coord<Maximum>"]
        nobj = mincoords.shape[0]
        
        # local features: loop over all objects
        def dictextend(a, b):
            for key in b:
                a[key].append(b[key])
            return a
        

        local_features = defaultdict(lambda: defaultdict(list))
        margin = max_margin(feature_names)
        has_local_features = {}
        for plugin_name, feature_dict in feature_names.iteritems():
            has_local_features[plugin_name] = False
            for features in feature_dict.itervalues():
                if 'margin' in features:
                    has_local_features[plugin_name] = True
                    break
            
                            
        if np.any(margin) > 0:
            #starting from 0, we stripped 0th background object in global computation
            for i in range(0, nobj):
                logger.debug("processing object {}".format(i))
                extent = self.compute_extent(i, image, mincoords, maxcoords, axes, margin)
                rawbbox = self.compute_rawbbox(image, extent, axes)
                #it's i+1 here, because the background has label 0
                binary_bbox = np.where(labels[tuple(extent)] == i+1, 1, 0).astype(np.bool)
                for plugin_name, feature_dict in feature_names.iteritems():
                    if not has_local_features[plugin_name]:
                        continue
                    plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
                    feats = plugin.plugin_object.compute_local(rawbbox, binary_bbox, feature_dict, axes)
                    local_features[plugin_name] = dictextend(local_features[plugin_name], feats)

        logger.debug("computing done, removing failures")
        # remove local features that failed
        for pname, pfeats in local_features.iteritems():
            for key in pfeats.keys():
                value = pfeats[key]
                try:
                    pfeats[key] = np.vstack(list(v.reshape(1, -1) for v in value))
                except:
                    logger.warn('feature {} failed'.format(key))
                    del pfeats[key]

        # merge the global and local features
        logger.debug("removed failed, merging")
        all_features = {}
        plugin_names = set(global_features.keys()) | set(local_features.keys())
        for name in plugin_names:
            d1 = global_features.get(name, {})
            d2 = local_features.get(name, {})
            all_features[name] = dict(d1.items() + d2.items())
        all_features[default_features_key]=extrafeats

        # reshape all features
        for pfeats in all_features.itervalues():
            for key, value in pfeats.iteritems():
                if value.shape[0] != nobj:
                    raise Exception('feature {} does not have enough rows, {} instead of {}'.format(key, value.shape[0], nobj))

                # because object classification operator expects nobj to
                # include background. FIXME: we should change that assumption.
                value = np.vstack((np.zeros(value.shape[1]),
                                   value))
                value = value.astype(np.float32) #turn Nones into numpy.NaNs

                assert value.dtype == np.float32
                assert value.shape[0] == nobj+1
                assert value.ndim == 2

                pfeats[key] = value
        logger.debug("merged, returning")
        return all_features
Exemplo n.º 9
0
 def compute_for_one_plugin(plugin_name, feature_dict):
     plugin_inner = pluginManager.getPluginByName(
         plugin_name, "ObjectFeatures")
     global_features[
         plugin_name] = plugin_inner.plugin_object.compute_global(
             image, labels, feature_dict, axes)
Exemplo n.º 10
0
    def _extract(self, image, labels):
        if not (image.ndim == labels.ndim == 4):
            raise Exception("both images must be 4D. raw image shape: {}"
                            " label image shape: {}".format(image.shape, labels.shape))

        # FIXME: maybe simplify?
        class Axes(object):
            x = image.axistags.index('x')
            y = image.axistags.index('y')
            z = image.axistags.index('z')
            c = image.axistags.index('c')
        axes = Axes()

        image = np.asarray(image, dtype=np.float32)
        labels = np.asarray(labels, dtype=np.uint32)

        slc3d = [slice(None)] * 4 # FIXME: do not hardcode
        slc3d[axes.c] = 0

        labels = labels[slc3d]

        
        logger.debug("Computing default features")

        #FIXME: clamp the global vigra features here
        extrafeats = vigra.analysis.extractRegionFeatures(image[slc3d], labels,
                                                        default_features,
                                                        ignoreLabel=0)
        logger.debug("computed default features")

        extrafeats = dict((k.replace(' ', ''), v)
                          for k, v in extrafeats.iteritems())

        mincoords = extrafeats["Coord<Minimum>"]
        maxcoords = extrafeats["Coord<Maximum>"]
        nobj = mincoords.shape[0]

        feature_names = self.Features([]).wait()

        # do global features
        global_features = {}
        for plugin_name, feature_list in feature_names.iteritems():
            plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
            global_features[plugin_name] = plugin.plugin_object.compute_global(image, labels, feature_list, axes)

        logger.debug("computing global features")
        # local features: loop over all objects
        def dictextend(a, b):
            for key in b:
                a[key].append(b[key])
            return a

        local_features = defaultdict(lambda: defaultdict(list))
        margin = max_margin(feature_names)
        if np.any(margin) > 0:
            for i in range(1, nobj):
                logger.debug("processing object {}".format(i))
                extent = self.compute_extent(i, image, mincoords, maxcoords, axes, margin)
                rawbbox = self.compute_rawbbox(image, extent, axes)
                binary_bbox = np.where(labels[tuple(extent)] == i, 1, 0).astype(np.bool)
                for plugin_name, feature_list in feature_names.iteritems():
                    plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
                    feats = plugin.plugin_object.compute_local(rawbbox, binary_bbox, feature_list, axes)
                    local_features[plugin_name] = dictextend(local_features[plugin_name], feats)

        logger.debug("computing done, removing failures")
        # remove local features that failed
        for pname, pfeats in local_features.iteritems():
            for key in pfeats.keys():
                value = pfeats[key]
                try:
                    pfeats[key] = np.vstack(list(v.reshape(1, -1) for v in value))
                except:
                    logger.warn('feature {} failed'.format(key))
                    del pfeats[key]

        # merge the global and local features
        logger.debug("removed failed, merging")
        all_features = {}
        plugin_names = set(global_features.keys()) | set(local_features.keys())
        for name in plugin_names:
            d1 = global_features.get(name, {})
            d2 = local_features.get(name, {})
            all_features[name] = dict(d1.items() + d2.items())

        # reshape all features
        for pfeats in all_features.itervalues():
            for key, value in pfeats.iteritems():
                if value.shape[0] != nobj - 1:
                    raise Exception('feature {} does not have enough rows')

                # because object classification operator expects nobj to
                # include background. we should change that assumption.
                value = np.vstack((np.zeros(value.shape[1]),
                                   value))

                value = value.astype(np.float32) #turn Nones into numpy.NaNs

                assert value.dtype == np.float32
                assert value.shape[0] == nobj
                assert value.ndim == 2

                pfeats[key] = value
        logger.debug("merged, returning")
        # add features needed by downstream applets. these should be
        # removed before classification.
        all_features[default_features_key] = extrafeats
        return all_features
Exemplo n.º 11
0
    def populate(self):
        #self.ui.treeWidget.setColumnCount(2)
        for pluginName, features in self.featureDict.items():
            if pluginName=="TestFeatures" and not ilastik_config.getboolean("ilastik", "debug"):
                continue
            parent = QTreeWidgetItem(self.ui.treeWidget)
            parent.setText(0, pluginName)

            parent.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
            # hack to ensure checkboxes visible
            parent.setCheckState(0, Qt.Checked)
            parent.setCheckState(0, Qt.Unchecked)
            parent.setExpanded(False)
            self.countChecked[pluginName]=0
            self.countAll[pluginName]=len(self.featureDict[pluginName])

            advanced_names = []
            simple_names = []
            selected_names = []

            groups = set()
            plugin = pluginManager.getPluginByName(pluginName, "ObjectFeatures")
            features_with_props = deepcopy(features)
            if plugin is not None:
                plugin.plugin_object.fill_properties(features_with_props)

            for name in sorted(features.keys()):
                parameters = features[name]

                for prop, prop_value in features_with_props[name].items():
                    if not prop in list(parameters.keys()):
                        # this property has not been added yet (perhaps the feature dictionary has been read from file)
                        # set it now
                        parameters[prop] = prop_value

                try:
                    if parameters['advanced'] is True:
                        advanced_names.append(name)
                    else:
                        simple_names.append(name)
                except KeyError:
                    simple_names.append(name)
                try:
                    groups.add(parameters["group"])
                except KeyError:
                    pass

                if pluginName in self.selectedFeatures:
                    if name in self.selectedFeatures[pluginName]:
                        selected_names.append(name)
            gr_items = {}
            for gr in groups:
                gr_items[gr] = QTreeWidgetItem(parent)
                gr_items[gr].setText(0, gr)
                #gr_items[gr].setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
                gr_items[gr].setExpanded(True)
            
            for name in simple_names+advanced_names:
                if name in advanced_names and (not name in selected_names):
                    # do not display advanced features, if they have not been selected previously
                    continue
                parameters = features[name]
                if "group" in parameters:
                    item = QTreeWidgetItem(gr_items[parameters["group"]])
                    item.group_name = parameters["group"]
                else:
                    item = QTreeWidgetItem(parent)
                if 'displaytext' in parameters:
                    itemtext = parameters['displaytext']
                else:
                    itemtext = name
                item.setText(0, itemtext)
                item.feature_id = name

                item.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
                if 'tooltip' in parameters:
                    item.setToolTip(0, parameters['tooltip'])

                # hack to ensure checkboxes visible
                item.setCheckState(0, Qt.Checked)
                item.setCheckState(0, Qt.Unchecked)
                if name in selected_names:
                    item.setCheckState(0, Qt.Checked)
                    self.countChecked[pluginName]+=1
            if self.countChecked[pluginName] == 0:
                parent.setCheckState(0, Qt.Unchecked)
            elif self.countChecked[pluginName] == self.countAll[pluginName]:
                parent.setCheckState(0, Qt.Checked)
            else:
                parent.setCheckState(0, Qt.PartiallyChecked)
            self.updateToolTip(parent)
        # facilitates switching of the CheckBox when clicking on the Text of a QTreeWidgetItem
        self.ui.treeWidget.setCurrentItem(None)
Exemplo n.º 12
0
    def _extract(self, image, labels, atlas=None):
        if not (image.ndim == labels.ndim == 4):
            raise Exception("both images must be 4D. raw image shape: {}"
                            " label image shape: {}".format(
                                image.shape, labels.shape))

        # FIXME: maybe simplify? taggedShape should be easier here
        class Axes(object):
            x = image.axistags.index('x')
            y = image.axistags.index('y')
            z = image.axistags.index('z')
            c = image.axistags.index('c')

        axes = Axes()

        slc3d = [slice(None)] * 4  # FIXME: do not hardcode
        slc3d[axes.c] = 0

        labels = labels[slc3d]

        #These are the feature names, selected by the user and the default feature names.
        feature_names = deepcopy(self.Features([]).wait())
        feature_names = self._augmentFeatureNames(feature_names)

        # do global features
        logger.debug("Computing global and default features")
        global_features = {}
        pool = RequestPool()

        def compute_for_one_plugin(plugin_name, feature_dict):
            plugin_inner = pluginManager.getPluginByName(
                plugin_name, "ObjectFeatures")
            global_features[
                plugin_name] = plugin_inner.plugin_object.compute_global(
                    image, labels, feature_dict, axes)

        for plugin_name, feature_dict in feature_names.items():
            if plugin_name != default_features_key:
                pool.add(
                    Request(
                        partial(compute_for_one_plugin, plugin_name,
                                feature_dict)))

        pool.wait()

        extrafeats = {}
        for feat_key in default_features:
            try:
                sel = feature_names["Standard Object Features"][feat_key][
                    "selected"]
            except KeyError:
                # we don't always set this property to True, sometimes it's just not there. The only important
                # thing is that it's not False
                sel = True
            if not sel:
                # This feature has not been selected by the user. Remove it from the computed dict into a special dict
                # for default features
                feature = global_features["Standard Object Features"].pop(
                    feat_key)
            else:
                feature = global_features["Standard Object Features"][feat_key]
            extrafeats[feat_key] = feature

        if atlas is not None:
            extrafeats['AtlasMapping'] = self._createAtlasMapping(
                extrafeats['RegionCenter'], atlas)

        extrafeats = dict(
            (k.replace(' ', ''), v) for k, v in extrafeats.items())

        mincoords = extrafeats["Coord<Minimum>"].astype(int)
        maxcoords = extrafeats["Coord<Maximum>"].astype(int)
        nobj = mincoords.shape[0]

        # local features: loop over all objects
        def dictextend(a, b):
            for key in b:
                a[key].append(b[key])
            return a

        local_features = collections.defaultdict(
            lambda: collections.defaultdict(list))
        margin = max_margin(feature_names)
        has_local_features = {}
        for plugin_name, feature_dict in feature_names.items():
            has_local_features[plugin_name] = False
            for features in feature_dict.values():
                if 'margin' in features:
                    has_local_features[plugin_name] = True
                    break

        if numpy.any(margin) > 0:
            #starting from 0, we stripped 0th background object in global computation
            for i in range(0, nobj):
                logger.debug("processing object {}".format(i))
                extent = self.compute_extent(i, image, mincoords, maxcoords,
                                             axes, margin)
                rawbbox = self.compute_rawbbox(image, extent, axes)
                #it's i+1 here, because the background has label 0
                binary_bbox = numpy.where(labels[tuple(extent)] == i + 1, 1,
                                          0).astype(numpy.bool)
                for plugin_name, feature_dict in feature_names.items():
                    if not has_local_features[plugin_name]:
                        continue
                    plugin = pluginManager.getPluginByName(
                        plugin_name, "ObjectFeatures")
                    feats = plugin.plugin_object.compute_local(
                        rawbbox, binary_bbox, feature_dict, axes)
                    local_features[plugin_name] = dictextend(
                        local_features[plugin_name], feats)

        logger.debug("computing done, removing failures")
        # remove local features that failed
        for pname, pfeats in local_features.items():
            for key in list(pfeats.keys()):
                value = pfeats[key]
                try:
                    pfeats[key] = numpy.vstack(
                        list(v.reshape(1, -1) for v in value))
                except:
                    logger.warning('feature {} failed'.format(key))
                    del pfeats[key]

        # merge the global and local features
        logger.debug("removed failed, merging")
        all_features = {}
        plugin_names = set(global_features.keys()) | set(local_features.keys())
        for name in plugin_names:
            d1 = global_features.get(name, {})
            d2 = local_features.get(name, {})
            all_features[name] = dict(list(d1.items()) + list(d2.items()))
        all_features[default_features_key] = extrafeats

        # reshape all features
        for pfeats in all_features.values():
            for key, value in pfeats.items():
                if value.shape[0] != nobj:
                    raise Exception(
                        'feature {} does not have enough rows, {} instead of {}'
                        .format(key, value.shape[0], nobj))

                # because object classification operator expects nobj to
                # include background. FIXME: we should change that assumption.
                value = numpy.vstack((numpy.zeros(value.shape[1]), value))
                value = value.astype(
                    numpy.float32)  #turn Nones into numpy.NaNs

                assert value.dtype == numpy.float32
                assert value.shape[0] == nobj + 1
                assert value.ndim == 2

                pfeats[key] = value
        logger.debug("merged, returning")
        return all_features
Exemplo n.º 13
0
    def _extract(self, image, labels, atlas=None):
        if not (image.ndim == labels.ndim == 4):
            raise Exception("both images must be 4D. raw image shape: {}"
                            " label image shape: {}".format(image.shape, labels.shape))

        # FIXME: maybe simplify? taggedShape should be easier here
        class Axes(object):
            x = image.axistags.index('x')
            y = image.axistags.index('y')
            z = image.axistags.index('z')
            c = image.axistags.index('c')
        axes = Axes()

        slc3d = [slice(None)] * 4 # FIXME: do not hardcode
        slc3d[axes.c] = 0

        labels = labels[slc3d]

        #These are the feature names, selected by the user and the default feature names.
        feature_names = deepcopy(self.Features([]).wait())
        feature_names = self._augmentFeatureNames(feature_names)

        # do global features
        logger.debug("Computing global and default features")
        global_features = {}
        pool = RequestPool()

        def compute_for_one_plugin(plugin_name, feature_dict):
            plugin_inner = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
            global_features[plugin_name] = plugin_inner.plugin_object.compute_global(image, labels, feature_dict, axes)

        for plugin_name, feature_dict in feature_names.items():
            if plugin_name != default_features_key:
                pool.add(Request(partial(compute_for_one_plugin, plugin_name, feature_dict)))

        pool.wait()

        extrafeats = {}
        for feat_key in default_features:
            try:
                sel = feature_names["Standard Object Features"][feat_key]["selected"]
            except KeyError:
                # we don't always set this property to True, sometimes it's just not there. The only important
                # thing is that it's not False
                sel = True
            if not sel:
                # This feature has not been selected by the user. Remove it from the computed dict into a special dict
                # for default features
                feature = global_features["Standard Object Features"].pop(feat_key)
            else:
                feature = global_features["Standard Object Features"][feat_key]
            extrafeats[feat_key] = feature

        if atlas is not None:
            extrafeats['AtlasMapping'] = self._createAtlasMapping(extrafeats['RegionCenter'], atlas)

        extrafeats = dict((k.replace(' ', ''), v)
                          for k, v in extrafeats.items())
        
        mincoords = extrafeats["Coord<Minimum>"].astype(int)
        maxcoords = extrafeats["Coord<Maximum>"].astype(int)
        nobj = mincoords.shape[0]
        
        # local features: loop over all objects
        def dictextend(a, b):
            for key in b:
                a[key].append(b[key])
            return a
        

        local_features = collections.defaultdict(lambda: collections.defaultdict(list))
        margin = max_margin(feature_names)
        has_local_features = {}
        for plugin_name, feature_dict in feature_names.items():
            has_local_features[plugin_name] = False
            for features in feature_dict.values():
                if 'margin' in features:
                    has_local_features[plugin_name] = True
                    break
            
                            
        if numpy.any(margin) > 0:
            #starting from 0, we stripped 0th background object in global computation
            for i in range(0, nobj):
                logger.debug("processing object {}".format(i))
                extent = self.compute_extent(i, image, mincoords, maxcoords, axes, margin)
                rawbbox = self.compute_rawbbox(image, extent, axes)
                #it's i+1 here, because the background has label 0
                binary_bbox = numpy.where(labels[tuple(extent)] == i+1, 1, 0).astype(numpy.bool)
                for plugin_name, feature_dict in feature_names.items():
                    if not has_local_features[plugin_name]:
                        continue
                    plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
                    feats = plugin.plugin_object.compute_local(rawbbox, binary_bbox, feature_dict, axes)
                    local_features[plugin_name] = dictextend(local_features[plugin_name], feats)

        logger.debug("computing done, removing failures")
        # remove local features that failed
        for pname, pfeats in local_features.items():
            for key in list(pfeats.keys()):
                value = pfeats[key]
                try:
                    pfeats[key] = numpy.vstack(list(v.reshape(1, -1) for v in value))
                except:
                    logger.warning('feature {} failed'.format(key))
                    del pfeats[key]

        # merge the global and local features
        logger.debug("removed failed, merging")
        all_features = {}
        plugin_names = set(global_features.keys()) | set(local_features.keys())
        for name in plugin_names:
            d1 = global_features.get(name, {})
            d2 = local_features.get(name, {})
            all_features[name] = dict(list(d1.items()) + list(d2.items()))
        all_features[default_features_key]=extrafeats

        # reshape all features
        for pfeats in all_features.values():
            for key, value in pfeats.items():
                if value.shape[0] != nobj:
                    raise Exception('feature {} does not have enough rows, {} instead of {}'.format(key, value.shape[0], nobj))

                # because object classification operator expects nobj to
                # include background. FIXME: we should change that assumption.
                value = numpy.vstack((numpy.zeros(value.shape[1]),
                                   value))
                value = value.astype(numpy.float32) #turn Nones into numpy.NaNs

                assert value.dtype == numpy.float32
                assert value.shape[0] == nobj+1
                assert value.ndim == 2

                pfeats[key] = value
        logger.debug("merged, returning")
        return all_features
Exemplo n.º 14
0
 def compute_for_one_plugin(plugin_name, feature_dict):
     plugin_inner = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
     global_features[plugin_name] = plugin_inner.plugin_object.compute_global(image, labels, feature_dict, axes)
Exemplo n.º 15
0
def flatten_ilastik_feature_table(table, selection, signal):
    selection = list(selection)
    frames = table.meta.shape[0]

    logger.info("Fetching object features for feature table...")
    computed_feature = table([]).wait()

    signal(0)

    feature_long_names = []  # For example, "Size in Pixels"
    feature_short_names = []  # For example, "Count"
    feature_plugins = []
    feature_channels = []
    feature_types = []

    for plugin_name, feature_dict in computed_feature[0].items():
        all_props = None

        if plugin_name == default_features_key:
            plugin = pluginManager.getPluginByName("Standard Object Features",
                                                   "ObjectFeatures")
        else:
            plugin = pluginManager.getPluginByName(plugin_name,
                                                   "ObjectFeatures")
        if plugin:
            plugin_feature_names = {el: {} for el in list(feature_dict.keys())}
            all_props = plugin.plugin_object.fill_properties(
                plugin_feature_names)  # fill in display name and such

        for feat_name, feat_array in feature_dict.items():
            if all_props:
                long_name = all_props[feat_name]["displaytext"]
            else:
                long_name = feat_name
            if (plugin_name == default_features_key or long_name in selection
                    or feat_name
                    in selection) and long_name not in feature_long_names:
                feature_long_names.append(long_name)
                feature_short_names.append(feat_name)
                feature_plugins.append(plugin_name)
                feature_channels.append((feat_array.shape[1]))
                feature_types.append(feat_array.dtype)

    signal(25)

    obj_count = []
    for t, cf in computed_feature.items():
        obj_count.append(cf[default_features_key]["Count"].shape[0] -
                         1)  # no background

    signal(50)

    dtype_names = []
    dtype_types = []
    dtype_to_key = {}

    for i, name in enumerate(feature_long_names):
        if feature_channels[i] > 1:
            for c in range(feature_channels[i]):
                dtype_names.append("%s_%i" % (name, c))
                dtype_types.append(feature_types[i].name)
                dtype_to_key[dtype_names[-1]] = (feature_plugins[i],
                                                 feature_short_names[i], c)
        else:
            dtype_names.append(name)
            dtype_types.append(feature_types[i].name)
            dtype_to_key[dtype_names[-1]] = (feature_plugins[i],
                                             feature_short_names[i], 0)

    feature_table = np.zeros((sum(obj_count), ), dtype=",".join(dtype_types))
    feature_table.dtype.names = list(map(str, dtype_names))

    signal(75)

    start = 0
    end = obj_count[0]
    for t, cf in computed_feature.items():
        for name in dtype_names:
            plugin, feat_name, index = dtype_to_key[name]
            data_len = len(cf[plugin][feat_name][1:, index])
            feature_table[name][start:start +
                                data_len] = cf[plugin][feat_name][1:, index]
        start = end
        try:
            end += obj_count[int(t) + 1]
        except IndexError:
            end = sum(obj_count)

    signal(100)

    return feature_table
Exemplo n.º 16
0
    def _extract(self, image, labels):
        if not (image.ndim == labels.ndim == 4):
            raise Exception("both images must be 4D. raw image shape: {}"
                            " label image shape: {}".format(image.shape, labels.shape))

        # FIXME: maybe simplify? taggedShape should be easier here
        class Axes(object):
            x = image.axistags.index('x')
            y = image.axistags.index('y')
            z = image.axistags.index('z')
            c = image.axistags.index('c')
        axes = Axes()

        slc3d = [slice(None)] * 4 # FIXME: do not hardcode
        slc3d[axes.c] = 0

        labels = labels[slc3d]
        
        logger.debug("Computing default features")

        feature_names = deepcopy(self.Features([]).wait())

        # do global features
        logger.debug("computing global features")
        extra_features_computed = False
        global_features = {}
        selected_vigra_features = []
        for plugin_name, feature_dict in feature_names.iteritems():
            plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
            if plugin_name == "Standard Object Features":
                #expand the feature list by our default features
                logger.debug("attaching default features {} to vigra features {}".format(default_features, feature_dict))
                selected_vigra_features = feature_dict.keys()
                feature_dict.update(default_features)
                extra_features_computed = True
            global_features[plugin_name] = plugin.plugin_object.compute_global(image, labels, feature_dict, axes)
        
        extrafeats = {}
        if extra_features_computed:
            for feat_key in default_features:
                feature = None
                if feat_key in selected_vigra_features:
                    #we wanted that feature independently
                    feature = global_features["Standard Object Features"][feat_key]
                else:
                    feature = global_features["Standard Object Features"].pop(feat_key)
                    feature_names["Standard Object Features"].pop(feat_key)
                extrafeats[feat_key] = feature
        else:
            logger.debug("default features not computed, computing separately")
            extrafeats_acc = vigra.analysis.extractRegionFeatures(image[slc3d].squeeze().astype(np.float32), labels.squeeze(),
                                                        default_features.keys(),
                                                        ignoreLabel=0)
            #remove the 0th object, we'll add it again later
            for k, v in extrafeats_acc.iteritems():
                extrafeats[k]=v[1:]
                if len(v.shape)==1:
                    extrafeats[k]=extrafeats[k].reshape(extrafeats[k].shape+(1,))
        
        extrafeats = dict((k.replace(' ', ''), v)
                          for k, v in extrafeats.iteritems())
        
        mincoords = extrafeats["Coord<Minimum>"]
        maxcoords = extrafeats["Coord<Maximum>"]
        nobj = mincoords.shape[0]
        
        # local features: loop over all objects
        def dictextend(a, b):
            for key in b:
                a[key].append(b[key])
            return a
        

        local_features = collections.defaultdict(lambda: collections.defaultdict(list))
        margin = max_margin(feature_names)
        has_local_features = {}
        for plugin_name, feature_dict in feature_names.iteritems():
            has_local_features[plugin_name] = False
            for features in feature_dict.itervalues():
                if 'margin' in features:
                    has_local_features[plugin_name] = True
                    break
            
                            
        if np.any(margin) > 0:
            #starting from 0, we stripped 0th background object in global computation
            for i in range(0, nobj):
                logger.debug("processing object {}".format(i))
                extent = self.compute_extent(i, image, mincoords, maxcoords, axes, margin)
                rawbbox = self.compute_rawbbox(image, extent, axes)
                #it's i+1 here, because the background has label 0
                binary_bbox = np.where(labels[tuple(extent)] == i+1, 1, 0).astype(np.bool)
                for plugin_name, feature_dict in feature_names.iteritems():
                    if not has_local_features[plugin_name]:
                        continue
                    plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
                    feats = plugin.plugin_object.compute_local(rawbbox, binary_bbox, feature_dict, axes)
                    local_features[plugin_name] = dictextend(local_features[plugin_name], feats)

        logger.debug("computing done, removing failures")
        # remove local features that failed
        for pname, pfeats in local_features.iteritems():
            for key in pfeats.keys():
                value = pfeats[key]
                try:
                    pfeats[key] = np.vstack(list(v.reshape(1, -1) for v in value))
                except:
                    logger.warn('feature {} failed'.format(key))
                    del pfeats[key]

        # merge the global and local features
        logger.debug("removed failed, merging")
        all_features = {}
        plugin_names = set(global_features.keys()) | set(local_features.keys())
        for name in plugin_names:
            d1 = global_features.get(name, {})
            d2 = local_features.get(name, {})
            all_features[name] = dict(d1.items() + d2.items())
        all_features[default_features_key]=extrafeats

        # reshape all features
        for pfeats in all_features.itervalues():
            for key, value in pfeats.iteritems():
                if value.shape[0] != nobj:
                    raise Exception('feature {} does not have enough rows, {} instead of {}'.format(key, value.shape[0], nobj))

                # because object classification operator expects nobj to
                # include background. FIXME: we should change that assumption.
                value = np.vstack((np.zeros(value.shape[1]),
                                   value))
                value = value.astype(np.float32) #turn Nones into numpy.NaNs

                assert value.dtype == np.float32
                assert value.shape[0] == nobj+1
                assert value.ndim == 2

                pfeats[key] = value
        logger.debug("merged, returning")
        return all_features
Exemplo n.º 17
0
 def _initMetaInfoText(self):
     ## meta-info display widgets
     plugin = pluginManager.getPluginByName(self.pluginName, category="TrackingExportFormats")
     self.metaInfoTextEdit.setHtml(plugin.description)
Exemplo n.º 18
0
    def _extract(self, image, labels):
        assert image.ndim == labels.ndim == 4, "Images must be 4D.  Shapes were: {} and {}".format(image.shape, labels.shape)

        class Axes(object):
            x = image.axistags.index('x')
            y = image.axistags.index('y')
            z = image.axistags.index('z')
            c = image.axistags.index('c')
        axes = Axes()

        image = np.asarray(image, dtype=np.float32)
        labels = np.asarray(labels, dtype=np.uint32)

        slc3d = [slice(None)] * 4 # FIXME: do not hardcode
        slc3d[axes.c] = 0

        labels = labels[slc3d]

        extrafeats = vigra.analysis.extractRegionFeatures(image[slc3d], labels,
                                                          gui_features,
                                                          ignoreLabel=0)
        mincoords = extrafeats["Coord<Minimum >"]
        maxcoords = extrafeats["Coord<Maximum >"]
        nobj = mincoords.shape[0]

        feature_names = self.Features([]).wait()

        # do global features
        global_features = defaultdict(list)
        for plugin_name, feature_list in feature_names.iteritems():
            plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
            feats = plugin.plugin_object.compute_global(image, labels, feature_list, axes)
            global_features = dict(global_features.items() + feats.items())

        # local features: loop over all objects
        def dictextend(a, b):
            for key in b:
                a[key].append(b[key])
            return a

        local_features = defaultdict(list)
        for i in range(1, nobj):
            print "processing object {}".format(i)
            mins, maxs = self.compute_minmax(i, image, mincoords, maxcoords, axes)
            rawbbox = self.compute_rawbbox(image, mins, maxs, axes)
            label_bboxes = self.compute_label_bboxes(i, labels, mins, maxs, axes)

            for plugin_name, feature_list in feature_names.iteritems():
                plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
                feats = plugin.plugin_object.compute_local(rawbbox, label_bboxes, feature_list, axes, mins, maxs)
                local_features = dictextend(local_features, feats)

        for key in local_features.keys():
            value = local_features[key]
            try:
                local_features[key] = np.vstack(list(v.reshape(1, -1) for v in value))
            except:
                print 'warning: feature {} failed'.format(key)
                del local_features[key]

        all_features = dict(global_features.items() + local_features.items())

        for key, value in all_features.iteritems():
            if value.shape[0] != nobj - 1:
                raise Exception('feature {} does not have enough rows')

            # because object classification operator expects nobj to
            # include background. we should change that assumption.
            value = np.vstack((np.zeros(value.shape[1]),
                               value))

            value = value.astype(np.float32)

            assert value.dtype == np.float32
            assert value.shape[0] == nobj
            assert value.ndim == 2

            all_features[key] = value

        # add features needed by downstream applets. these should be
        # removed before classification.
        extrafeats = dict((k.replace(' ', '') + gui_features_suffix, v)
                          for k, v in extrafeats.iteritems())

        return dict(all_features.items() + extrafeats.items())
Exemplo n.º 19
0
    def post_process_lane_export(self, lane_index, checkOverwriteFiles=False):
        # FIXME: This probably only works for the non-blockwise export slot.
        #        We should assert that the user isn't using the blockwise slot.

        # Plugin export if selected
        logger.info("Export source is: " + self.dataExportTrackingApplet.
                    topLevelOperator.SelectedExportSource.value)

        print "in post_process_lane_export"
        if self.dataExportTrackingApplet.topLevelOperator.SelectedExportSource.value == OpTrackingBaseDataExport.PluginOnlyName:
            logger.info("Export source plugin selected!")
            selectedPlugin = self.dataExportTrackingApplet.topLevelOperator.SelectedPlugin.value

            exportPluginInfo = pluginManager.getPluginByName(
                selectedPlugin, category="TrackingExportFormats")
            if exportPluginInfo is None:
                logger.error("Could not find selected plugin %s" %
                             exportPluginInfo)
            else:
                exportPlugin = exportPluginInfo.plugin_object
                logger.info("Exporting tracking result using %s" %
                            selectedPlugin)
                name_format = self.dataExportTrackingApplet.topLevelOperator.getLane(
                    lane_index).OutputFilenameFormat.value
                partially_formatted_name = self.getPartiallyFormattedName(
                    lane_index, name_format)

                if exportPlugin.exportsToFile:
                    filename = partially_formatted_name
                    if os.path.basename(filename) == '':
                        filename = os.path.join(filename, 'pluginExport.txt')
                else:
                    filename = os.path.dirname(partially_formatted_name)

                if filename is None or len(str(filename)) == 0:
                    logger.error(
                        "Cannot export from plugin with empty output filename")
                    return

                exportStatus = self.trackingApplet.topLevelOperator.getLane(
                    lane_index).exportPlugin(filename, exportPlugin,
                                             checkOverwriteFiles)
                if not exportStatus:
                    return False
                logger.info("Export done")

            return

        # CSV Table export (only if plugin was not selected)
        settings, selected_features = self.trackingApplet.topLevelOperator.getLane(
            lane_index).get_table_export_settings()
        from lazyflow.utility import PathComponents, make_absolute, format_known_keys

        if settings:
            self.dataExportTrackingApplet.progressSignal.emit(-1)
            raw_dataset_info = self.dataSelectionApplet.topLevelOperator.DatasetGroup[
                lane_index][0].value

            project_path = self.shell.projectManager.currentProjectPath
            project_dir = os.path.dirname(project_path)
            dataset_dir = PathComponents(
                raw_dataset_info.filePath).externalDirectory
            abs_dataset_dir = make_absolute(dataset_dir, cwd=project_dir)

            known_keys = {}
            known_keys['dataset_dir'] = abs_dataset_dir
            nickname = raw_dataset_info.nickname.replace('*', '')
            if os.path.pathsep in nickname:
                nickname = PathComponents(nickname.split(
                    os.path.pathsep)[0]).fileNameBase
            known_keys['nickname'] = nickname

            # use partial formatting to fill in non-coordinate name fields
            name_format = settings['file path']
            partially_formatted_name = format_known_keys(
                name_format, known_keys)
            settings['file path'] = partially_formatted_name

            req = self.trackingApplet.topLevelOperator.getLane(
                lane_index
            ).export_object_data(
                lane_index,
                # FIXME: Even in non-headless mode, we can't show the gui because we're running in a non-main thread.
                #        That's not a huge deal, because there's still a progress bar for the overall export.
                show_gui=False)

            req.wait()
            self.dataExportTrackingApplet.progressSignal.emit(100)
    def post_process_lane_export(self, lane_index, checkOverwriteFiles=False):
        # `time` parameter ensures we check only once for files that could be overwritten, pop up
        # the MessageBox and then don't export (time=0). For the next round we click the export button,
        # we really want it to export, so time=1. The default parameter is 1, so everything but not 0,
        # in order to ensure writing out even in headless mode.

        # FIXME: This probably only works for the non-blockwise export slot.
        #        We should assert that the user isn't using the blockwise slot.

        # Plugin export if selected
        logger.info(
            "Export source is: " +
            self.dataExportApplet.topLevelOperator.SelectedExportSource.value)

        if self.dataExportApplet.topLevelOperator.SelectedExportSource.value == OpTrackingBaseDataExport.PluginOnlyName:
            logger.info("Export source plugin selected!")
            selectedPlugin = self.dataExportApplet.topLevelOperator.SelectedPlugin.value

            exportPluginInfo = pluginManager.getPluginByName(
                selectedPlugin, category="TrackingExportFormats")
            if exportPluginInfo is None:
                logger.error("Could not find selected plugin %s" %
                             exportPluginInfo)
            else:
                exportPlugin = exportPluginInfo.plugin_object
                logger.info("Exporting tracking result using %s" %
                            selectedPlugin)
                name_format = self.dataExportApplet.topLevelOperator.getLane(
                    lane_index).OutputFilenameFormat.value
                partially_formatted_name = self.getPartiallyFormattedName(
                    lane_index, name_format)

                if exportPlugin.exportsToFile:
                    filename = partially_formatted_name
                    if os.path.basename(filename) == '':
                        filename = os.path.join(filename, 'pluginExport.txt')
                else:
                    filename = os.path.dirname(partially_formatted_name)

                if filename is None or len(str(filename)) == 0:
                    logger.error(
                        "Cannot export from plugin with empty output filename")
                    return

                exportStatus = self.trackingApplet.topLevelOperator.getLane(
                    lane_index).exportPlugin(filename, exportPlugin,
                                             checkOverwriteFiles)
                if not exportStatus:
                    return False
                logger.info("Export done")

            return

        # CSV Table export (only if plugin was not selected)
        settings, selected_features = self.trackingApplet.topLevelOperator.getLane(
            lane_index).get_table_export_settings()
        if settings:
            self.dataExportApplet.progressSignal.emit(0)
            name_format = settings['file path']
            partially_formatted_name = self.getPartiallyFormattedName(
                lane_index, name_format)
            settings['file path'] = partially_formatted_name

            req = self.trackingApplet.topLevelOperator.getLane(
                lane_index
            ).export_object_data(
                lane_index,
                # FIXME: Even in non-headless mode, we can't show the gui because we're running in a non-main thread.
                #        That's not a huge deal, because there's still a progress bar for the overall export.
                show_gui=False)

            req.wait()
            self.dataExportApplet.progressSignal.emit(100)

            # Restore option to bypass cache to false
            self.objectExtractionApplet.topLevelOperator[
                lane_index].BypassModeEnabled.setValue(False)

            # Restore state of axis ranges
            parameters = self.trackingApplet.topLevelOperator.Parameters.value
            parameters['time_range'] = self.prev_time_range
            parameters['x_range'] = self.prev_x_range
            parameters['y_range'] = self.prev_y_range
            parameters['z_range'] = self.prev_z_range
 def _initMetaInfoText(self):
     ## meta-info display widgets
     plugin = pluginManager.getPluginByName(
         self.pluginName, category="TrackingExportFormats")
     self.metaInfoTextEdit.setHtml(plugin.description)
    def populate(self):
        #self.ui.treeWidget.setColumnCount(2)
        for pluginName, features in self.featureDict.items():
            if pluginName == "TestFeatures" and not ilastik_config.getboolean(
                    "ilastik", "debug"):
                continue
            parent = QTreeWidgetItem(self.ui.treeWidget)
            parent.setText(0, pluginName)

            parent.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
            # hack to ensure checkboxes visible
            parent.setCheckState(0, Qt.Checked)
            parent.setCheckState(0, Qt.Unchecked)
            parent.setExpanded(False)
            self.countChecked[pluginName] = 0
            self.countAll[pluginName] = len(self.featureDict[pluginName])

            advanced_names = []
            simple_names = []
            selected_names = []

            groups = set()
            plugin = pluginManager.getPluginByName(pluginName,
                                                   "ObjectFeatures")
            features_with_props = deepcopy(features)
            if plugin is not None:
                plugin.plugin_object.fill_properties(features_with_props)

            for name in sorted(features.keys()):
                parameters = features[name]

                for prop, prop_value in features_with_props[name].items():
                    if not prop in list(parameters.keys()):
                        # this property has not been added yet (perhaps the feature dictionary has been read from file)
                        # set it now
                        parameters[prop] = prop_value

                try:
                    if parameters['advanced'] is True:
                        advanced_names.append(name)
                    else:
                        simple_names.append(name)
                except KeyError:
                    simple_names.append(name)
                try:
                    groups.add(parameters["group"])
                except KeyError:
                    pass

                if pluginName in self.selectedFeatures:
                    if name in self.selectedFeatures[pluginName]:
                        selected_names.append(name)
            gr_items = {}
            for gr in groups:
                gr_items[gr] = QTreeWidgetItem(parent)
                gr_items[gr].setText(0, gr)
                #gr_items[gr].setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
                gr_items[gr].setExpanded(True)

            for name in simple_names + advanced_names:
                if name in advanced_names and (not name in selected_names):
                    # do not display advanced features, if they have not been selected previously
                    continue
                parameters = features[name]
                if "group" in parameters:
                    item = QTreeWidgetItem(gr_items[parameters["group"]])
                    item.group_name = parameters["group"]
                else:
                    item = QTreeWidgetItem(parent)
                if 'displaytext' in parameters:
                    itemtext = parameters['displaytext']
                else:
                    itemtext = name
                item.setText(0, itemtext)
                item.feature_id = name

                item.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
                if 'tooltip' in parameters:
                    item.setToolTip(0, parameters['tooltip'])

                # hack to ensure checkboxes visible
                item.setCheckState(0, Qt.Checked)
                item.setCheckState(0, Qt.Unchecked)
                if name in selected_names:
                    item.setCheckState(0, Qt.Checked)
                    self.countChecked[pluginName] += 1
            if self.countChecked[pluginName] == 0:
                parent.setCheckState(0, Qt.Unchecked)
            elif self.countChecked[pluginName] == self.countAll[pluginName]:
                parent.setCheckState(0, Qt.Checked)
            else:
                parent.setCheckState(0, Qt.PartiallyChecked)
            self.updateToolTip(parent)
        # facilitates switching of the CheckBox when clicking on the Text of a QTreeWidgetItem
        self.ui.treeWidget.setCurrentItem(None)
Exemplo n.º 23
0
def flatten_ilastik_feature_table(table, selection, signal):
    selection = list(selection)
    frames = table.meta.shape[0]

    logger.info('Fetching object features for feature table...')
    computed_feature = table([]).wait()

    signal(0)

    feature_long_names = [] # For example, "Size in Pixels"
    feature_short_names = [] # For example, "Count"
    feature_plugins = []
    feature_channels = []
    feature_types = []

    for plugin_name, feature_dict in computed_feature[0].items():
        all_props = None
        
        if plugin_name==default_features_key:
            plugin = pluginManager.getPluginByName("Standard Object Features", "ObjectFeatures")
        else:
            plugin = pluginManager.getPluginByName(plugin_name, "ObjectFeatures")
        if plugin:
            plugin_feature_names = {el:{} for el in list(feature_dict.keys())}
            all_props = plugin.plugin_object.fill_properties(plugin_feature_names) #fill in display name and such

        for feat_name, feat_array in feature_dict.items():
            if all_props:
                long_name = all_props[feat_name]["displaytext"]
            else:
                long_name = feat_name
            if (plugin_name == default_features_key or \
                     long_name in selection or \
                     feat_name in selection) and \
                     long_name not in feature_long_names:
                feature_long_names.append(long_name)
                feature_short_names.append(feat_name)
                feature_plugins.append(plugin_name)
                feature_channels.append((feat_array.shape[1]))
                feature_types.append(feat_array.dtype)

    signal(25)

    obj_count = []
    for t, cf in computed_feature.items():
        obj_count.append(cf[default_features_key]["Count"].shape[0] - 1)  # no background

    signal(50)

    dtype_names = []
    dtype_types = []
    dtype_to_key = {}

    for i, name in enumerate(feature_long_names):
        if feature_channels[i] > 1:
            for c in range(feature_channels[i]):
                dtype_names.append("%s_%i" % (name, c))
                dtype_types.append(feature_types[i].name)
                dtype_to_key[dtype_names[-1]] = (feature_plugins[i], feature_short_names[i], c)
        else:
            dtype_names.append(name)
            dtype_types.append(feature_types[i].name)
            dtype_to_key[dtype_names[-1]] = (feature_plugins[i], feature_short_names[i], 0)

    feature_table = np.zeros((sum(obj_count),), dtype=",".join(dtype_types))
    feature_table.dtype.names = list(map(str, dtype_names))

    signal(75)

    start = 0
    end = obj_count[0]
    for t, cf in computed_feature.items():
        for name in dtype_names:
            plugin, feat_name, index = dtype_to_key[name]
            data_len = len(cf[plugin][feat_name][1:, index])
            feature_table[name][start:start + data_len] = cf[plugin][feat_name][1:, index]
        start = end
        try:
            end += obj_count[int(t) + 1]
        except IndexError:
            end = sum(obj_count)

    signal(100)

    return feature_table