Exemple #1
0
    def setupOutputs(self):
        if self._opReader is not None:
            self.Output.disconnect()
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None

        try:
            # Configure the reader
            dataReady = True
            self._opReader = OpInputDataReader(parent=self)
            self._opReader.WorkingDirectory.setValue(
                self.WorkingDirectory.value)
            self._opReader.FilePath.setValue(self.DatasetPath.value)

            # Since most file formats don't save meta-info,
            # The reader output's axis order may be incorrect.
            # (For example, if we export in npy format with zxy order,
            #  the Npy reader op will simply assume xyz order when it reads the data.)

            # Force the metadata back to the correct state by copying select items from Input.meta
            metadata = {}
            metadata['axistags'] = self.Input.meta.axistags
            metadata['drange'] = self.Input.meta.drange
            metadata['display_mode'] = self.Input.meta.display_mode
            self._opMetadataInjector = OpMetadataInjector(parent=self)
            self._opMetadataInjector.Input.connect(self._opReader.Output)
            self._opMetadataInjector.Metadata.setValue(metadata)

            dataReady &= self._opMetadataInjector.Output.meta.shape == self.Input.meta.shape
            dataReady &= self._opMetadataInjector.Output.meta.dtype == self.Input.meta.dtype
            if dataReady:
                self.Output.connect(self._opMetadataInjector.Output)
            else:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
                self._opReader.cleanUp()
                self._opReader = None
                self.Output.meta.NOTREADY = True

        #except OpInputDataReader.DatasetReadError:
        except Exception as ex:
            #logger.debug( "On-disk image can't be read: {}".format(ex) )
            # Note: If the data is exported as a 'sequence', then this will always be NOTREADY
            #       because the 'path' (e.g. 'myfile_{slice_index}.png' will be nonexistent.
            #       That's okay because a stack is probably too slow to be of use for a preview anyway.
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None
            # The dataset doesn't exist yet.
            self.Output.meta.NOTREADY = True
    def test(self):
        g = lazyflow.graph.Graph()
        op = OpMetadataInjector(graph=g)

        additionalMetadata = {"layertype": 7}
        op.Metadata.setValue(additionalMetadata)
        op.Input.setValue("Hello")

        # Output value should match input value
        assert op.Output.value == op.Input.value

        # Make sure all input metadata was copied to the output
        assert all(((k, v) in list(op.Output.meta.items()))
                   for k, v in list(op.Input.meta.items()))

        # Check that the additional metadata was added to the output
        assert op.Output.meta.layertype == 7

        # Make sure dirtyness gets propagated to the output.
        dirtyList = []

        def handleDirty(*args):
            dirtyList.append(True)

        op.Output.notifyDirty(handleDirty)
        op.Input.setValue(8)
        assert len(dirtyList) == 1
Exemple #3
0
    def setupOutputs( self ):
        if self._opReader is not None:
            self.Output.disconnect()
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None

        try:
            # Configure the reader
            dataReady = True
            self._opReader = OpInputDataReader( parent=self )
            self._opReader.WorkingDirectory.setValue( self.WorkingDirectory.value )
            self._opReader.FilePath.setValue( self.DatasetPath.value )

            # Since most file formats don't save meta-info,
            # The reader output's axis order may be incorrect.
            # (For example, if we export in npy format with zxy order, 
            #  the Npy reader op will simply assume xyz order when it reads the data.)

            # Force the metadata back to the correct state by copying select items from Input.meta
            metadata = {}
            metadata['axistags'] = self.Input.meta.axistags
            metadata['drange'] = self.Input.meta.drange
            metadata['display_mode'] = self.Input.meta.display_mode
            self._opMetadataInjector = OpMetadataInjector( parent=self )
            self._opMetadataInjector.Input.connect( self._opReader.Output )
            self._opMetadataInjector.Metadata.setValue( metadata )

            dataReady &= self._opMetadataInjector.Output.meta.shape == self.Input.meta.shape
            dataReady &= self._opMetadataInjector.Output.meta.dtype == self.Input.meta.dtype
            if dataReady:
                self.Output.connect( self._opMetadataInjector.Output )
            else:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
                self._opReader.cleanUp()
                self._opReader = None
                self.Output.meta.NOTREADY = True

        #except OpInputDataReader.DatasetReadError:
        except Exception as ex:
            #logger.debug( "On-disk image can't be read: {}".format(ex) )
            # Note: If the data is exported as a 'sequence', then this will always be NOTREADY
            #       because the 'path' (e.g. 'myfile_{slice_index}.png' will be nonexistent.
            #       That's okay because a stack is probably too slow to be of use for a preview anyway.
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None
            # The dataset doesn't exist yet.
            self.Output.meta.NOTREADY = True
Exemple #4
0
    def get_provider_slot(
        self, meta: Optional[Dict] = None, parent: Optional[Operator] = None, graph: Optional[Graph] = None
    ) -> OutputSlot:
        metadata = {"display_mode": self.display_mode, "axistags": self.axistags}
        metadata.update(meta or {})

        if self.drange is not None:
            metadata["drange"] = self.drange
        elif self.laneDtype == numpy.uint8:
            metadata["drange"] = (0, 255)
        if self.normalizeDisplay is not None:
            metadata["normalizeDisplay"] = self.normalizeDisplay
        if self.subvolume_roi is not None:
            metadata["subvolume_roi"] = self.subvolume_roi

        opMetadataInjector = OpMetadataInjector(parent=parent, graph=graph)
        opMetadataInjector.Input.connect(self.get_non_transposed_provider_slot(parent=parent, graph=graph))
        opMetadataInjector.Metadata.setValue(metadata)
        return opMetadataInjector.Output
    def __init__(self, *args, **kwargs):
        super(OpFormattedDataExport, self).__init__(*args, **kwargs)
        self._dirty = True

        opSubRegion = OpSubRegion(parent=self)
        opSubRegion.Input.connect(self.Input)
        self._opSubRegion = opSubRegion

        # If normalization parameters are provided, we inject a 'drange'
        #  metadata item for downstream operators/gui to use.
        opDrangeInjection = OpMetadataInjector(parent=self)
        opDrangeInjection.Input.connect(opSubRegion.Output)
        self._opDrangeInjection = opDrangeInjection

        # Normalization and dtype conversion are performed in one step
        #  using an OpPixelOperator.
        opNormalizeAndConvert = OpPixelOperator(parent=self)
        opNormalizeAndConvert.Input.connect(opDrangeInjection.Output)
        self._opNormalizeAndConvert = opNormalizeAndConvert

        # ConvertedImage shows the full result but WITHOUT axis reordering.
        self.ConvertedImage.connect(self._opNormalizeAndConvert.Output)

        opReorderAxes = OpReorderAxes(parent=self)
        opReorderAxes.Input.connect(opNormalizeAndConvert.Output)
        self._opReorderAxes = opReorderAxes

        self.ImageToExport.connect(opReorderAxes.Output)

        self._opExportSlot = OpExportSlot(parent=self)
        self._opExportSlot.Input.connect(opReorderAxes.Output)
        self._opExportSlot.OutputFormat.connect(self.OutputFormat)

        self.ExportPath.connect(self._opExportSlot.ExportPath)
        self.FormatSelectionErrorMsg.connect(
            self._opExportSlot.FormatSelectionErrorMsg)
        self.progressSignal = self._opExportSlot.progressSignal
Exemple #6
0
class OpImageOnDiskProvider(Operator):
    """
    This simply wraps a lazyflow OpInputDataReader, but ensures that the metadata 
    (axistags, drange) on the output matches the metadata from the original data 
    (even if the output file format doesn't support metadata fields).
    """
    TransactionSlot = InputSlot()
    Input = InputSlot() # Used for dtype and shape only. Data is always provided directly from the file.

    WorkingDirectory = InputSlot()
    DatasetPath = InputSlot() # A TOTAL path (possibly including a dataset name, e.g. myfile.h5/volume/data
    Dirty = InputSlot()
    
    Output = OutputSlot()

    def __init__(self, *args, **kwargs):
        super( OpImageOnDiskProvider, self ).__init__(*args, **kwargs)
        self._opReader = None
        self._opMetadataInjector = None
    
    # Block diagram:
    #
    # (Input.axistags, Input.drange)
    #                               \  
    # DatasetPath ---> opReader ---> opMetadataInjector --> Output
    #                 /
    # WorkingDirectory
    
    def setupOutputs( self ):
        if self._opReader is not None:
            self.Output.disconnect()
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None

        try:
            # Configure the reader
            dataReady = True
            self._opReader = OpInputDataReader( parent=self )
            self._opReader.WorkingDirectory.setValue( self.WorkingDirectory.value )
            self._opReader.FilePath.setValue( self.DatasetPath.value )

            # Since most file formats don't save meta-info,
            # The reader output's axis order may be incorrect.
            # (For example, if we export in npy format with zxy order, 
            #  the Npy reader op will simply assume xyz order when it reads the data.)

            # Force the metadata back to the correct state by copying select items from Input.meta
            metadata = {}
            metadata['axistags'] = self.Input.meta.axistags
            metadata['drange'] = self.Input.meta.drange
            self._opMetadataInjector = OpMetadataInjector( parent=self )
            self._opMetadataInjector.Input.connect( self._opReader.Output )
            self._opMetadataInjector.Metadata.setValue( metadata )

            dataReady &= self._opMetadataInjector.Output.meta.shape == self.Input.meta.shape
            dataReady &= self._opMetadataInjector.Output.meta.dtype == self.Input.meta.dtype
            if dataReady:
                self.Output.connect( self._opMetadataInjector.Output )
            else:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
                self._opReader.cleanUp()
                self._opReader = None
                self.Output.meta.NOTREADY = True

        except OpInputDataReader.DatasetReadError:
            # Note: If the data is exported as a 'sequence', then this will always be NOTREADY
            #       because the 'path' (e.g. 'myfile_{slice_index}.png' will be nonexistent.
            #       That's okay because a stack is probably too slow to be of use for a preview anyway.
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None
            # The dataset doesn't exist yet.
            self.Output.meta.NOTREADY = True

    def execute(self, slot, subindex, roi, result):
        assert False, "Output is supposed to be directly connected to an internal operator."

    def propagateDirty(self, slot, subindex, roi):
        if slot == self.Input:
            self.Output.setDirty( roi )
        else:
            self.Output.setDirty( slice(None) )
Exemple #7
0
def import_labeling_layer(labelLayer, labelingSlots, parent_widget=None):
    """
    Prompt the user for layer import settings, and perform the layer import.
    :param labelLayer: The top label layer source
    :param labelingSlots: An instance of LabelingGui.LabelingSlots
    :param parent_widget: The Qt GUI parent object
    """
    writeSeeds = labelingSlots.labelInput
    assert isinstance(
        writeSeeds,
        lazyflow.graph.Slot), "slot is of type %r" % (type(writeSeeds))
    opLabels = writeSeeds.getRealOperator()
    assert isinstance(opLabels, lazyflow.graph.Operator
                      ), "slot's operator is of type %r" % (type(opLabels))

    recentlyImported = PreferencesManager().get('labeling',
                                                'recently imported')
    mostRecentProjectPath = PreferencesManager().get('shell',
                                                     'recently opened')
    mostRecentImageFile = PreferencesManager().get('DataSelection',
                                                   'recent image')
    if recentlyImported:
        defaultDirectory = os.path.split(recentlyImported)[0]
    elif mostRecentProjectPath:
        defaultDirectory = os.path.split(mostRecentProjectPath)[0]
    elif mostRecentImageFile:
        defaultDirectory = os.path.split(mostRecentImageFile)[0]
    else:
        defaultDirectory = os.path.expanduser('~')

    fileNames = DataSelectionGui.getImageFileNamesToOpen(
        parent_widget, defaultDirectory)
    fileNames = map(str, fileNames)

    if not fileNames:
        return

    PreferencesManager().set('labeling', 'recently imported', fileNames[0])

    try:
        # Initialize operators
        opImport = OpInputDataReader(parent=opLabels.parent)
        opCache = OpArrayCache(parent=opLabels.parent)
        opMetadataInjector = OpMetadataInjector(parent=opLabels.parent)
        opReorderAxes = OpReorderAxes(parent=opLabels.parent)

        # Set up the pipeline as follows:
        #
        #   opImport --> opCache --> opMetadataInjector --------> opReorderAxes --(inject via setInSlot)--> labelInput
        #                           /                            /
        #   User-specified axisorder    labelInput.meta.axistags

        opImport.WorkingDirectory.setValue(defaultDirectory)
        opImport.FilePath.setValue(fileNames[0] if len(fileNames) ==
                                   1 else os.path.pathsep.join(fileNames))
        assert opImport.Output.ready()

        opCache.blockShape.setValue(opImport.Output.meta.shape)
        opCache.Input.connect(opImport.Output)
        assert opCache.Output.ready()

        opMetadataInjector.Input.connect(opCache.Output)
        metadata = opCache.Output.meta.copy()
        opMetadataInjector.Metadata.setValue(metadata)
        opReorderAxes.Input.connect(opMetadataInjector.Output)

        # Transpose the axes for assignment to the labeling operator.
        opReorderAxes.AxisOrder.setValue(writeSeeds.meta.getAxisKeys())

        # We'll show a little window with a busy indicator while the data is loading
        busy_dlg = QProgressDialog(parent=parent_widget)
        busy_dlg.setLabelText("Importing Label Data...")
        busy_dlg.setCancelButton(None)
        busy_dlg.setMinimum(100)
        busy_dlg.setMaximum(100)

        def close_busy_dlg(*args):
            QApplication.postEvent(busy_dlg, QCloseEvent())

        # Load the data from file into our cache
        # When it's done loading, close the progress dialog.
        req = opCache.Output[:]
        req.notify_finished(close_busy_dlg)
        req.notify_failed(close_busy_dlg)
        req.submit()
        busy_dlg.exec_()

        readData = req.result

        maxLabels = len(labelingSlots.labelNames.value)

        # Can't use return_counts feature because that requires numpy >= 1.9
        #unique_read_labels, readLabelCounts = numpy.unique(readData, return_counts=True)

        # This does the same as the above, albeit slower, and probably with more ram.
        unique_read_labels = numpy.unique(readData)
        readLabelCounts = numpy.bincount(readData.flat)[unique_read_labels]

        labelInfo = (maxLabels, (unique_read_labels, readLabelCounts))
        del readData

        # Ask the user how to interpret the data.
        settingsDlg = LabelImportOptionsDlg(parent_widget, fileNames,
                                            opMetadataInjector.Output,
                                            labelingSlots.labelInput,
                                            labelInfo)

        def handle_updated_axes():
            # The user is specifying a new interpretation of the file's axes
            updated_axisorder = str(settingsDlg.axesEdit.text())
            metadata = opMetadataInjector.Metadata.value.copy()
            metadata.axistags = vigra.defaultAxistags(updated_axisorder)
            opMetadataInjector.Metadata.setValue(metadata)

        settingsDlg.axesEdit.editingFinished.connect(handle_updated_axes)

        dlg_result = settingsDlg.exec_()
        if dlg_result != LabelImportOptionsDlg.Accepted:
            return

        # Get user's chosen label mapping from dlg
        labelMapping = settingsDlg.labelMapping

        # Get user's chosen offsets.
        # Offsets in dlg only include the file axes, not the 5D axes expected by the label input,
        # so expand them to full 5D
        axes_5d = opReorderAxes.Output.meta.getAxisKeys()
        tagged_offsets = collections.OrderedDict(
            zip(axes_5d, [0] * len(axes_5d)))
        tagged_offsets.update(
            dict(
                zip(opMetadataInjector.Output.meta.getAxisKeys(),
                    settingsDlg.imageOffsets)))
        imageOffsets = tagged_offsets.values()

        # Optimization if mapping is identity
        if labelMapping.keys() == labelMapping.values():
            labelMapping = None

        # This will be fast (it's already cached)
        label_data = opReorderAxes.Output[:].wait()

        # Map input labels to output labels
        if labelMapping:
            # There are other ways to do a relabeling (e.g skimage.segmentation.relabel_sequential)
            # But this supports potentially huge values of unique_read_labels (in the billions),
            # without needing GB of RAM.
            mapping_indexes = numpy.searchsorted(unique_read_labels,
                                                 label_data)
            new_labels = numpy.array(
                [labelMapping[x] for x in unique_read_labels])
            label_data[:] = new_labels[mapping_indexes]

        label_roi = numpy.array(roiFromShape(opReorderAxes.Output.meta.shape))
        label_roi += imageOffsets
        label_slice = roiToSlice(*label_roi)
        writeSeeds[label_slice] = label_data

    finally:
        opReorderAxes.cleanUp()
        opMetadataInjector.cleanUp()
        opCache.cleanUp()
        opImport.cleanUp()
Exemple #8
0
def import_labeling_layer(labelLayer, labelingSlots, parent_widget=None):
    """
    Prompt the user for layer import settings, and perform the layer import.
    :param labelLayer: The top label layer source
    :param labelingSlots: An instance of LabelingGui.LabelingSlots
    :param parent_widget: The Qt GUI parent object
    """
    writeSeeds = labelingSlots.labelInput
    assert isinstance(writeSeeds, lazyflow.graph.Slot), "slot is of type %r" % (type(writeSeeds))
    opLabels = writeSeeds.getRealOperator()
    assert isinstance(opLabels, lazyflow.graph.Operator), "slot's operator is of type %r" % (type(opLabels))


    recentlyImported = PreferencesManager().get('labeling', 'recently imported')
    mostRecentProjectPath = PreferencesManager().get('shell', 'recently opened')
    mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' )
    if recentlyImported:
        defaultDirectory = os.path.split(recentlyImported)[0]
    elif mostRecentProjectPath:
        defaultDirectory = os.path.split(mostRecentProjectPath)[0]
    elif mostRecentImageFile:
        defaultDirectory = os.path.split(mostRecentImageFile)[0]
    else:
        defaultDirectory = os.path.expanduser('~')

    fileNames = DataSelectionGui.getImageFileNamesToOpen(parent_widget, defaultDirectory)
    fileNames = map(str, fileNames)

    if not fileNames:
        return

    PreferencesManager().set('labeling', 'recently imported', fileNames[0])

    try:
        # Initialize operators
        opImport = OpInputDataReader( parent=opLabels.parent )
        opCache = OpArrayCache( parent=opLabels.parent )
        opMetadataInjector = OpMetadataInjector( parent=opLabels.parent )
        opReorderAxes = OpReorderAxes( parent=opLabels.parent )
    
        # Set up the pipeline as follows:
        #
        #   opImport --> opCache --> opMetadataInjector --------> opReorderAxes --(inject via setInSlot)--> labelInput
        #                           /                            /
        #   User-specified axisorder    labelInput.meta.axistags
    
        opImport.WorkingDirectory.setValue(defaultDirectory)
        opImport.FilePath.setValue(fileNames[0] if len(fileNames) == 1 else
                                   os.path.pathsep.join(fileNames))
        assert opImport.Output.ready()
    
        opCache.blockShape.setValue( opImport.Output.meta.shape )
        opCache.Input.connect( opImport.Output )
        assert opCache.Output.ready()

        opMetadataInjector.Input.connect( opCache.Output )
        metadata = opCache.Output.meta.copy()
        opMetadataInjector.Metadata.setValue( metadata )
        opReorderAxes.Input.connect( opMetadataInjector.Output )

        # Transpose the axes for assignment to the labeling operator.
        opReorderAxes.AxisOrder.setValue( writeSeeds.meta.getAxisKeys() )
    
        # We'll show a little window with a busy indicator while the data is loading
        busy_dlg = QProgressDialog(parent=parent_widget)
        busy_dlg.setLabelText("Importing Label Data...")
        busy_dlg.setCancelButton(None)
        busy_dlg.setMinimum(100)
        busy_dlg.setMaximum(100)
        def close_busy_dlg(*args):
            QApplication.postEvent(busy_dlg, QCloseEvent())
    
        # Load the data from file into our cache
        # When it's done loading, close the progress dialog.
        req = opCache.Output[:]
        req.notify_finished( close_busy_dlg )
        req.notify_failed( close_busy_dlg )
        req.submit()
        busy_dlg.exec_()

        readData = req.result
        
        maxLabels = len(labelingSlots.labelNames.value)

        # Can't use return_counts feature because that requires numpy >= 1.9
        #unique_read_labels, readLabelCounts = numpy.unique(readData, return_counts=True)

        # This does the same as the above, albeit slower, and probably with more ram.
        unique_read_labels = numpy.unique(readData)
        readLabelCounts = vigra_bincount(readData)[unique_read_labels]

        labelInfo = (maxLabels, (unique_read_labels, readLabelCounts))
        del readData
    
        # Ask the user how to interpret the data.
        settingsDlg = LabelImportOptionsDlg( parent_widget,
                                             fileNames, opMetadataInjector.Output,
                                             labelingSlots.labelInput, labelInfo )

        def handle_updated_axes():
            # The user is specifying a new interpretation of the file's axes
            updated_axisorder = str(settingsDlg.axesEdit.text())
            metadata = opMetadataInjector.Metadata.value.copy()
            metadata.axistags = vigra.defaultAxistags(updated_axisorder)
            opMetadataInjector.Metadata.setValue( metadata )
            
            if opReorderAxes._invalid_axes:
                settingsDlg.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
                # Red background
                settingsDlg.axesEdit.setStyleSheet("QLineEdit { background: rgb(255, 128, 128);"
                                                   "selection-background-color: rgb(128, 128, 255); }")
        settingsDlg.axesEdit.editingFinished.connect( handle_updated_axes )
        
        # Initialize
        handle_updated_axes()

        dlg_result = settingsDlg.exec_()
        if dlg_result != LabelImportOptionsDlg.Accepted:
            return

        # Get user's chosen label mapping from dlg
        labelMapping = settingsDlg.labelMapping    

        # Get user's chosen offsets.
        # Offsets in dlg only include the file axes, not the 5D axes expected by the label input,
        # so expand them to full 5D 
        axes_5d = opReorderAxes.Output.meta.getAxisKeys()
        tagged_offsets = collections.OrderedDict( zip( axes_5d, [0]*len(axes_5d) ) )
        tagged_offsets.update( dict( zip( opMetadataInjector.Output.meta.getAxisKeys(), settingsDlg.imageOffsets ) ) )
        imageOffsets = tagged_offsets.values()

        # Optimization if mapping is identity
        if labelMapping.keys() == labelMapping.values():
            labelMapping = None

        # This will be fast (it's already cached)
        label_data = opReorderAxes.Output[:].wait()
        
        # Map input labels to output labels
        if labelMapping:
            # There are other ways to do a relabeling (e.g skimage.segmentation.relabel_sequential)
            # But this supports potentially huge values of unique_read_labels (in the billions),
            # without needing GB of RAM.
            mapping_indexes = numpy.searchsorted(unique_read_labels, label_data)
            new_labels = numpy.array([labelMapping[x] for x in unique_read_labels])
            label_data[:] = new_labels[mapping_indexes]

        label_roi = numpy.array( roiFromShape(opReorderAxes.Output.meta.shape) )
        label_roi += imageOffsets
        label_slice = roiToSlice(*label_roi)
        writeSeeds[label_slice] = label_data

    finally:
        opReorderAxes.cleanUp()
        opMetadataInjector.cleanUp()
        opCache.cleanUp()
        opImport.cleanUp()
Exemple #9
0
    def connectLane(self, laneIndex):
        # Get a handle to each operator
        opData = self.dataSelectionApplet.topLevelOperator.getLane(laneIndex)
        opFirstFeatures = self.featureSelectionApplets[
            0].topLevelOperator.getLane(laneIndex)
        opFirstClassify = self.pcApplets[0].topLevelOperator.getLane(laneIndex)
        opFinalClassify = self.pcApplets[-1].topLevelOperator.getLane(
            laneIndex)
        opDataExport = self.dataExportApplet.topLevelOperator.getLane(
            laneIndex)

        def checkConstraints(*_):
            # if (opData.Image.meta.dtype in [np.uint8, np.uint16]) == False:
            #    msg = "The Autocontext Workflow only supports 8-bit images (UINT8 pixel type)\n"\
            #          "or 16-bit images (UINT16 pixel type)\n"\
            #          "Your image has a pixel type of {}.  Please convert your data to UINT8 and try again."\
            #          .format( str(np.dtype(opData.Image.meta.dtype)) )
            #    raise DatasetConstraintError( "Autocontext Workflow", msg, unfixable=True )
            pass

        opData.Image.notifyReady(checkConstraints)

        # Input Image -> Feature Op
        #         and -> Classification Op (for display)
        opFirstFeatures.InputImage.connect(opData.Image)
        opFirstClassify.InputImages.connect(opData.Image)

        # Feature Images -> Classification Op (for training, prediction)
        opFirstClassify.FeatureImages.connect(opFirstFeatures.OutputImage)
        opFirstClassify.CachedFeatureImages.connect(
            opFirstFeatures.CachedOutputImage)

        upstreamPcApplets = self.pcApplets[0:-1]
        downstreamFeatureApplets = self.featureSelectionApplets[1:]
        downstreamPcApplets = self.pcApplets[1:]

        for (upstreamPcApplet, downstreamFeaturesApplet,
             downstreamPcApplet) in zip(upstreamPcApplets,
                                        downstreamFeatureApplets,
                                        downstreamPcApplets):

            opUpstreamClassify = upstreamPcApplet.topLevelOperator.getLane(
                laneIndex)
            opDownstreamFeatures = downstreamFeaturesApplet.topLevelOperator.getLane(
                laneIndex)
            opDownstreamClassify = downstreamPcApplet.topLevelOperator.getLane(
                laneIndex)

            # Connect label inputs (all are connected together).
            # opDownstreamClassify.LabelInputs.connect( opUpstreamClassify.LabelInputs )

            # Connect data path
            assert opData.Image.meta.dtype == opUpstreamClassify.PredictionProbabilitiesAutocontext.meta.dtype, (
                "Probability dtype needs to match up with input image dtype, got: "
                f"input: {opData.Image.meta.dtype} "
                f"probabilities: {opUpstreamClassify.PredictionProbabilitiesAutocontext.meta.dtype}"
            )
            opStacker = OpMultiArrayStacker(parent=self)
            opStacker.Images.resize(2)
            opStacker.Images[0].connect(opData.Image)
            opStacker.Images[1].connect(
                opUpstreamClassify.PredictionProbabilitiesAutocontext)
            opStacker.AxisFlag.setValue("c")

            opDownstreamFeatures.InputImage.connect(opStacker.Output)
            opDownstreamClassify.InputImages.connect(opStacker.Output)
            opDownstreamClassify.FeatureImages.connect(
                opDownstreamFeatures.OutputImage)
            opDownstreamClassify.CachedFeatureImages.connect(
                opDownstreamFeatures.CachedOutputImage)

        # Data Export connections
        opDataExport.RawData.connect(opData.ImageGroup[self.DATA_ROLE_RAW])
        opDataExport.RawDatasetInfo.connect(
            opData.DatasetGroup[self.DATA_ROLE_RAW])
        opDataExport.ConstraintDataset.connect(
            opData.ImageGroup[self.DATA_ROLE_RAW])

        opDataExport.Inputs.resize(len(self.EXPORT_NAMES))
        for reverse_stage_index, (stage_index, pcApplet) in enumerate(
                reversed(list(enumerate(self.pcApplets)))):
            opPc = pcApplet.topLevelOperator.getLane(laneIndex)
            num_items_per_stage = len(self.EXPORT_NAMES_PER_STAGE)
            opDataExport.Inputs[num_items_per_stage * reverse_stage_index +
                                0].connect(
                                    opPc.HeadlessPredictionProbabilities)
            opDataExport.Inputs[num_items_per_stage * reverse_stage_index +
                                1].connect(opPc.SimpleSegmentation)
            opDataExport.Inputs[num_items_per_stage * reverse_stage_index +
                                2].connect(opPc.HeadlessUncertaintyEstimate)
            opDataExport.Inputs[num_items_per_stage * reverse_stage_index +
                                3].connect(opPc.FeatureImages)
            opDataExport.Inputs[num_items_per_stage * reverse_stage_index +
                                4].connect(opPc.LabelImages)
            opDataExport.Inputs[
                num_items_per_stage * reverse_stage_index + 5].connect(
                    opPc.InputImages
                )  # Input must come last due to an assumption in PixelClassificationDataExportGui

        # One last export slot for all probabilities, all stages
        opAllStageStacker = OpMultiArrayStacker(parent=self)
        opAllStageStacker.Images.resize(len(self.pcApplets))
        for stage_index, pcApplet in enumerate(self.pcApplets):
            opPc = pcApplet.topLevelOperator.getLane(laneIndex)
            opAllStageStacker.Images[stage_index].connect(
                opPc.HeadlessPredictionProbabilities)
            opAllStageStacker.AxisFlag.setValue("c")

        # The ideal_blockshape metadata field will be bogus, so just eliminate it
        # (Otherwise, the channels could be split up in an unfortunate way.)
        opMetadataOverride = OpMetadataInjector(parent=self)
        opMetadataOverride.Input.connect(opAllStageStacker.Output)
        opMetadataOverride.Metadata.setValue({"ideal_blockshape": None})

        opDataExport.Inputs[-1].connect(opMetadataOverride.Output)

        for slot in opDataExport.Inputs:
            assert slot.upstream_slot is not None
Exemple #10
0
class OpImageOnDiskProvider(Operator):
    """
    This simply wraps a lazyflow OpInputDataReader, but ensures that the metadata 
    (axistags, drange) on the output matches the metadata from the original data 
    (even if the output file format doesn't support metadata fields).
    """
    TransactionSlot = InputSlot()
    Input = InputSlot() # Used for dtype and shape only. Data is always provided directly from the file.

    WorkingDirectory = InputSlot()
    DatasetPath = InputSlot() # A TOTAL path (possibly including a dataset name, e.g. myfile.h5/volume/data
    Dirty = InputSlot()
    
    Output = OutputSlot()

    def __init__(self, *args, **kwargs):
        super( OpImageOnDiskProvider, self ).__init__(*args, **kwargs)
        self._opReader = None
        self._opMetadataInjector = None
    
    # Block diagram:
    #
    # (Input.axistags, Input.drange)
    #                               \  
    # DatasetPath ---> opReader ---> opMetadataInjector --> Output
    #                 /
    # WorkingDirectory
    
    def setupOutputs( self ):
        if self._opReader is not None:
            self.Output.disconnect()
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None

        try:
            # Configure the reader
            dataReady = True
            self._opReader = OpInputDataReader( parent=self )
            self._opReader.WorkingDirectory.setValue( self.WorkingDirectory.value )
            self._opReader.FilePath.setValue( self.DatasetPath.value )

            # Since most file formats don't save meta-info,
            # The reader output's axis order may be incorrect.
            # (For example, if we export in npy format with zxy order, 
            #  the Npy reader op will simply assume xyz order when it reads the data.)

            # Force the metadata back to the correct state by copying select items from Input.meta
            metadata = {}
            metadata['axistags'] = self.Input.meta.axistags
            metadata['drange'] = self.Input.meta.drange
            metadata['display_mode'] = self.Input.meta.display_mode
            self._opMetadataInjector = OpMetadataInjector( parent=self )
            self._opMetadataInjector.Input.connect( self._opReader.Output )
            self._opMetadataInjector.Metadata.setValue( metadata )

            dataReady &= self._opMetadataInjector.Output.meta.shape == self.Input.meta.shape
            dataReady &= self._opMetadataInjector.Output.meta.dtype == self.Input.meta.dtype
            if dataReady:
                self.Output.connect( self._opMetadataInjector.Output )
            else:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
                self._opReader.cleanUp()
                self._opReader = None
                self.Output.meta.NOTREADY = True

        #except OpInputDataReader.DatasetReadError:
        except Exception as ex:
            #logger.debug( "On-disk image can't be read: {}".format(ex) )
            # Note: If the data is exported as a 'sequence', then this will always be NOTREADY
            #       because the 'path' (e.g. 'myfile_{slice_index}.png' will be nonexistent.
            #       That's okay because a stack is probably too slow to be of use for a preview anyway.
            if self._opMetadataInjector:
                self._opMetadataInjector.cleanUp()
                self._opMetadataInjector = None
            self._opReader.cleanUp()
            self._opReader = None
            # The dataset doesn't exist yet.
            self.Output.meta.NOTREADY = True

    def execute(self, slot, subindex, roi, result):
        assert False, "Output is supposed to be directly connected to an internal operator."

    def propagateDirty(self, slot, subindex, roi):
        if slot == self.Input:
            self.Output.setDirty( roi )
        else:
            self.Output.setDirty( slice(None) )
Exemple #11
0
def import_labeling_layer(labelLayer, labelingSlots, parent_widget=None):
    """
    Prompt the user for layer import settings, and perform the layer import.
    :param labelLayer: The top label layer source
    :param labelingSlots: An instance of LabelingGui.LabelingSlots
    :param parent_widget: The Qt GUI parent object
    """
    writeSeeds = labelingSlots.labelInput
    assert isinstance(
        writeSeeds,
        lazyflow.graph.Slot), "slot is of type %r" % (type(writeSeeds))
    opLabels = writeSeeds.getRealOperator()
    assert isinstance(opLabels, lazyflow.graph.Operator
                      ), "slot's operator is of type %r" % (type(opLabels))

    recentlyImported = PreferencesManager().get('labeling',
                                                'recently imported')
    mostRecentProjectPath = PreferencesManager().get('shell',
                                                     'recently opened')
    mostRecentImageFile = PreferencesManager().get('DataSelection',
                                                   'recent image')
    if recentlyImported:
        defaultDirectory = os.path.split(recentlyImported)[0]
    elif mostRecentProjectPath:
        defaultDirectory = os.path.split(mostRecentProjectPath)[0]
    elif mostRecentImageFile:
        defaultDirectory = os.path.split(mostRecentImageFile)[0]
    else:
        defaultDirectory = os.path.expanduser('~')

    fileNames = DataSelectionGui.getImageFileNamesToOpen(
        parent_widget, defaultDirectory)
    fileNames = map(str, fileNames)

    if not fileNames:
        return

    PreferencesManager().set('labeling', 'recently imported', fileNames[0])

    try:
        # Initialize operators
        opImport = OpInputDataReader(parent=opLabels.parent)
        opCache = OpBlockedArrayCache(parent=opLabels.parent)
        opMetadataInjector = OpMetadataInjector(parent=opLabels.parent)
        opReorderAxes = OpReorderAxes(parent=opLabels.parent)

        # Set up the pipeline as follows:
        #
        #   opImport --> (opCache) --> opMetadataInjector --------> opReorderAxes --(inject via setInSlot)--> labelInput
        #                             /                            /
        #     User-specified axisorder    labelInput.meta.axistags

        opImport.WorkingDirectory.setValue(defaultDirectory)
        opImport.FilePath.setValue(fileNames[0] if len(fileNames) ==
                                   1 else os.path.pathsep.join(fileNames))
        assert opImport.Output.ready()

        maxLabels = len(labelingSlots.labelNames.value)

        # We don't bother with counting the label pixels
        # (and caching the data) if it's big (1 GB)
        if numpy.prod(opImport.Output.meta.shape) > 1e9:
            reading_slot = opImport.Output

            # For huge data, we don't go through and search for the pixel values,
            # because that takes an annoyingly long amount of time.
            # Instead, we make the reasonable assumption that the input labels are already 1,2,3..N
            # and we don't tell the user what the label pixel counts are.
            unique_read_labels = numpy.array(range(maxLabels + 1))
            readLabelCounts = numpy.array([-1] * (maxLabels + 1))
            labelInfo = (maxLabels, (unique_read_labels, readLabelCounts))
        else:
            opCache.Input.connect(opImport.Output)
            opCache.CompressionEnabled.setValue(True)
            assert opCache.Output.ready()
            reading_slot = opCache.Output

            # We'll show a little window with a busy indicator while the data is loading
            busy_dlg = QProgressDialog(parent=parent_widget)
            busy_dlg.setLabelText("Scanning Label Data...")
            busy_dlg.setCancelButton(None)
            busy_dlg.setMinimum(100)
            busy_dlg.setMaximum(100)

            def close_busy_dlg(*args):
                QApplication.postEvent(busy_dlg, QCloseEvent())

            # Load the data from file into our cache
            # When it's done loading, close the progress dialog.
            req = reading_slot[:]
            req.notify_finished(close_busy_dlg)
            req.notify_failed(close_busy_dlg)
            req.submit()
            busy_dlg.exec_()

            readData = req.result

            # Can't use return_counts feature because that requires numpy >= 1.9
            #unique_read_labels, readLabelCounts = numpy.unique(readData, return_counts=True)

            # This does the same as the above, albeit slower, and probably with more ram.
            bincounts = chunked_bincount(readData)
            unique_read_labels = bincounts.nonzero()[0].astype(readData.dtype,
                                                               copy=False)
            readLabelCounts = bincounts[unique_read_labels]

            labelInfo = (maxLabels, (unique_read_labels, readLabelCounts))
            del readData

        opMetadataInjector.Input.connect(reading_slot)
        metadata = reading_slot.meta.copy()
        opMetadataInjector.Metadata.setValue(metadata)
        opReorderAxes.Input.connect(opMetadataInjector.Output)

        # Transpose the axes for assignment to the labeling operator.
        opReorderAxes.AxisOrder.setValue(writeSeeds.meta.getAxisKeys())

        # Ask the user how to interpret the data.
        settingsDlg = LabelImportOptionsDlg(parent_widget, fileNames,
                                            opMetadataInjector.Output,
                                            labelingSlots.labelInput,
                                            labelInfo)

        def handle_updated_axes():
            # The user is specifying a new interpretation of the file's axes
            updated_axisorder = str(settingsDlg.axesEdit.text())
            metadata = opMetadataInjector.Metadata.value.copy()
            metadata.axistags = vigra.defaultAxistags(updated_axisorder)
            opMetadataInjector.Metadata.setValue(metadata)

            if opReorderAxes._invalid_axes:
                settingsDlg.buttonBox.button(
                    QDialogButtonBox.Ok).setEnabled(False)
                # Red background
                settingsDlg.axesEdit.setStyleSheet(
                    "QLineEdit { background: rgb(255, 128, 128);"
                    "selection-background-color: rgb(128, 128, 255); }")

        settingsDlg.axesEdit.editingFinished.connect(handle_updated_axes)

        # Initialize
        handle_updated_axes()

        dlg_result = settingsDlg.exec_()
        if dlg_result != LabelImportOptionsDlg.Accepted:
            return

        # Get user's chosen label mapping from dlg
        labelMapping = settingsDlg.labelMapping

        # Get user's chosen offsets, ordered by the 'write seeds' slot
        axes_5d = opReorderAxes.Output.meta.getAxisKeys()
        tagged_offsets = collections.OrderedDict(
            zip(axes_5d, [0] * len(axes_5d)))
        tagged_offsets.update(
            dict(
                zip(opReorderAxes.Output.meta.getAxisKeys(),
                    settingsDlg.imageOffsets)))
        imageOffsets = tagged_offsets.values()

        # Optimization if mapping is identity
        if labelMapping.keys() == labelMapping.values():
            labelMapping = None

        # If the data was already cached, this will be fast.
        label_data = opReorderAxes.Output[:].wait()

        # Map input labels to output labels
        if labelMapping:
            # There are other ways to do a relabeling (e.g skimage.segmentation.relabel_sequential)
            # But this supports potentially huge values of unique_read_labels (in the billions),
            # without needing GB of RAM.
            mapping_indexes = numpy.searchsorted(unique_read_labels,
                                                 label_data)
            new_labels = numpy.array(
                [labelMapping[x] for x in unique_read_labels])
            label_data[:] = new_labels[mapping_indexes]

        label_roi = numpy.array(roiFromShape(opReorderAxes.Output.meta.shape))
        label_roi += imageOffsets
        label_slice = roiToSlice(*label_roi)
        writeSeeds[label_slice] = label_data

    finally:
        opReorderAxes.cleanUp()
        opMetadataInjector.cleanUp()
        opCache.cleanUp()
        opImport.cleanUp()
Exemple #12
0
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (
                datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value

            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingH5N5Reader(parent=self)
                opReader.H5N5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
            elif datasetInfo.location == DatasetInfo.Location.PreloadedArray:
                preloaded_array = datasetInfo.preloaded_array
                assert preloaded_array is not None
                if not hasattr(preloaded_array, 'axistags'):
                    axisorder = get_default_axisordering(preloaded_array.shape)
                    preloaded_array = vigra.taggedView(preloaded_array,
                                                       axisorder)

                opReader = OpArrayPiper(parent=self)
                opReader.Input.setValue(preloaded_array)
                providerSlot = opReader.Output
            else:
                if datasetInfo.realDataSource:
                    # Use a normal (filesystem) reader
                    opReader = OpInputDataReader(parent=self)
                    if datasetInfo.subvolume_roi is not None:
                        opReader.SubVolumeRoi.setValue(
                            datasetInfo.subvolume_roi)
                    opReader.WorkingDirectory.setValue(
                        self.WorkingDirectory.value)
                    opReader.SequenceAxis.setValue(datasetInfo.sequenceAxis)
                    opReader.FilePath.setValue(datasetInfo.filePath)
                else:
                    # Use fake reader: allows to run the project in a headless
                    # mode without the raw data
                    opReader = OpZeroDefault(parent=self)
                    opReader.MetaInput.meta = MetaDict(
                        shape=datasetInfo.laneShape,
                        dtype=datasetInfo.laneDtype,
                        drange=datasetInfo.drange,
                        axistags=datasetInfo.axistags)
                    opReader.MetaInput.setValue(
                        numpy.zeros(datasetInfo.laneShape,
                                    dtype=datasetInfo.laneDtype))
                providerSlot = opReader.Output
            self._opReaders.append(opReader)

            # Inject metadata if the dataset info specified any.
            # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255)
            metadata = {}
            metadata['display_mode'] = datasetInfo.display_mode
            role_name = self.RoleName.value
            if 'c' not in providerSlot.meta.getTaggedShape():
                num_channels = 0
            else:
                num_channels = providerSlot.meta.getTaggedShape()['c']
            if num_channels > 1:
                metadata['channel_names'] = [
                    "{}-{}".format(role_name, i) for i in range(num_channels)
                ]
            else:
                metadata['channel_names'] = [role_name]

            if datasetInfo.drange is not None:
                metadata['drange'] = datasetInfo.drange
            elif providerSlot.meta.dtype == numpy.uint8:
                # SPECIAL case for uint8 data: Provide a default drange.
                # The user can always override this herself if she wants.
                metadata['drange'] = (0, 255)
            if datasetInfo.normalizeDisplay is not None:
                metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
            if datasetInfo.axistags is not None:
                if len(datasetInfo.axistags) != len(providerSlot.meta.shape):
                    ts = providerSlot.meta.getTaggedShape()
                    if 'c' in ts and 'c' not in datasetInfo.axistags and len(
                            datasetInfo.axistags) + 1 == len(ts):
                        # provider has no channel axis, but template has => add channel axis to provider
                        # fixme: Optimize the axistag guess in BatchProcessingApplet instead of hoping for the best here
                        metadata['axistags'] = vigra.defaultAxistags(
                            ''.join(datasetInfo.axistags.keys()) + 'c')
                    else:
                        # This usually only happens when we copied a DatasetInfo from another lane,
                        # and used it as a 'template' to initialize this lane.
                        # This happens in the BatchProcessingApplet when it attempts to guess the axistags of
                        # batch images based on the axistags chosen by the user in the interactive images.
                        # If the interactive image tags don't make sense for the batch image, you get this error.
                        raise Exception(
                            "Your dataset's provided axistags ({}) do not have the "
                            "correct dimensionality for your dataset, which has {} dimensions."
                            .format(
                                "".join(tag.key
                                        for tag in datasetInfo.axistags),
                                len(providerSlot.meta.shape)))
                else:
                    metadata['axistags'] = datasetInfo.axistags
            if datasetInfo.original_axistags is not None:
                metadata['original_axistags'] = datasetInfo.original_axistags

            if datasetInfo.subvolume_roi is not None:
                metadata['subvolume_roi'] = datasetInfo.subvolume_roi

                # FIXME: We are overwriting the axistags metadata to intentionally allow
                #        the user to change our interpretation of which axis is which.
                #        That's okay, but technically there's a special corner case if
                #        the user redefines the channel axis index.
                #        Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel.
                #        For most use-cases, that won't really matter, which is why I'm not worrying about it right now.

            opMetadataInjector = OpMetadataInjector(parent=self)
            opMetadataInjector.Input.connect(providerSlot)
            opMetadataInjector.Metadata.setValue(metadata)
            providerSlot = opMetadataInjector.Output
            self._opReaders.append(opMetadataInjector)

            self._NonTransposedImage.connect(providerSlot)

            # make sure that x and y axes are present in the selected axis order
            if 'x' not in providerSlot.meta.axistags or 'y' not in providerSlot.meta.axistags:
                raise DatasetConstraintError(
                    "DataSelection",
                    "Data must always have at leaset the axes x and y for ilastik to work."
                )

            if self.forceAxisOrder:
                assert isinstance(self.forceAxisOrder, list), \
                    "forceAxisOrder should be a *list* of preferred axis orders"

                # Before we re-order, make sure no non-singleton
                #  axes would be dropped by the forced order.
                tagged_provider_shape = providerSlot.meta.getTaggedShape()
                minimal_axes = [
                    k_v for k_v in list(tagged_provider_shape.items())
                    if k_v[1] > 1
                ]
                minimal_axes = set(k for k, v in minimal_axes)

                # Pick the shortest of the possible 'forced' orders that
                # still contains all the axes of the original dataset.
                candidate_orders = list(self.forceAxisOrder)
                candidate_orders = [
                    order for order in candidate_orders
                    if minimal_axes.issubset(set(order))
                ]

                if len(candidate_orders) == 0:
                    msg = "The axes of your dataset ({}) are not compatible with any of the allowed"\
                          " axis configurations used by this workflow ({}). Please fix them."\
                          .format(providerSlot.meta.getAxisKeys(), self.forceAxisOrder)
                    raise DatasetConstraintError("DataSelection", msg)

                output_order = sorted(candidate_orders,
                                      key=len)[0]  # the shortest one
                output_order = "".join(output_order)
            else:
                # No forced axisorder is supplied. Use original axisorder as
                # output order: it is assumed by the export-applet, that the
                # an OpReorderAxes operator is added in the beginning
                output_order = "".join(
                    [x for x in providerSlot.meta.axistags.keys()])

            op5 = OpReorderAxes(parent=self)
            op5.AxisOrder.setValue(output_order)
            op5.Input.connect(providerSlot)
            providerSlot = op5.Output
            self._opReaders.append(op5)

            # If the channel axis is missing, add it as last axis
            if 'c' not in providerSlot.meta.axistags:
                op5 = OpReorderAxes(parent=self)
                keys = providerSlot.meta.getAxisKeys()

                # Append
                keys.append('c')
                op5.AxisOrder.setValue("".join(keys))
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)

            self.AllowLabels.setValue(datasetInfo.allowLabels)

            # If the reading operator provides a nickname, use it.
            if self.Image.meta.nickname is not None:
                datasetInfo.nickname = self.Image.meta.nickname

            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)

        except:
            self.internalCleanup()
            raise
Exemple #13
0
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value
    
            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingHdf5Reader(parent=self)
                opReader.Hdf5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
            elif datasetInfo.location == DatasetInfo.Location.PreloadedArray:
                preloaded_array = datasetInfo.preloaded_array
                assert preloaded_array is not None
                if not hasattr(preloaded_array, 'axistags'):
                    # Guess the axis order, since one was not provided.
                    axisorders = { 2 : 'yx',
                                   3 : 'zyx',
                                   4 : 'zyxc',
                                   5 : 'tzyxc' }

                    shape = preloaded_array.shape
                    ndim = preloaded_array.ndim            
                    assert ndim != 0, "Support for 0-D data not yet supported"
                    assert ndim != 1, "Support for 1-D data not yet supported"
                    assert ndim <= 5, "No support for data with more than 5 dimensions."
        
                    axisorder = axisorders[ndim]
                    if ndim == 3 and shape[2] <= 4:
                        # Special case: If the 3rd dim is small, assume it's 'c', not 'z'
                        axisorder = 'yxc'
                    preloaded_array = vigra.taggedView(preloaded_array, axisorder)
                opReader = OpArrayPiper(parent=self)
                opReader.Input.setValue( preloaded_array )
                providerSlot = opReader.Output
            else:
                # Use a normal (filesystem) reader
                opReader = OpInputDataReader(parent=self)
                if datasetInfo.subvolume_roi is not None:
                    opReader.SubVolumeRoi.setValue( datasetInfo.subvolume_roi )
                opReader.WorkingDirectory.setValue( self.WorkingDirectory.value )
                opReader.FilePath.setValue(datasetInfo.filePath)
                providerSlot = opReader.Output
            self._opReaders.append(opReader)
            
            # Inject metadata if the dataset info specified any.
            # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255)
            metadata = {}
            metadata['display_mode'] = datasetInfo.display_mode
            role_name = self.RoleName.value
            if 'c' not in providerSlot.meta.getTaggedShape():
                num_channels = 0
            else:
                num_channels = providerSlot.meta.getTaggedShape()['c']
            if num_channels > 1:
                metadata['channel_names'] = ["{}-{}".format(role_name, i) for i in range(num_channels)]
            else:
                metadata['channel_names'] = [role_name]
                 
            if datasetInfo.drange is not None:
                metadata['drange'] = datasetInfo.drange
            elif providerSlot.meta.dtype == numpy.uint8:
                # SPECIAL case for uint8 data: Provide a default drange.
                # The user can always override this herself if she wants.
                metadata['drange'] = (0,255)
            if datasetInfo.normalizeDisplay is not None:
                metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
            if datasetInfo.axistags is not None:
                if len(datasetInfo.axistags) != len(providerSlot.meta.shape):
                    # This usually only happens when we copied a DatasetInfo from another lane,
                    # and used it as a 'template' to initialize this lane.
                    # This happens in the BatchProcessingApplet when it attempts to guess the axistags of 
                    # batch images based on the axistags chosen by the user in the interactive images.
                    # If the interactive image tags don't make sense for the batch image, you get this error.
                    raise Exception( "Your dataset's provided axistags ({}) do not have the "
                                     "correct dimensionality for your dataset, which has {} dimensions."
                                     .format( "".join(tag.key for tag in datasetInfo.axistags), len(providerSlot.meta.shape) ) )
                metadata['axistags'] = datasetInfo.axistags
            if datasetInfo.subvolume_roi is not None:
                metadata['subvolume_roi'] = datasetInfo.subvolume_roi
                
                # FIXME: We are overwriting the axistags metadata to intentionally allow 
                #        the user to change our interpretation of which axis is which.
                #        That's okay, but technically there's a special corner case if 
                #        the user redefines the channel axis index.  
                #        Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel.
                #        For most use-cases, that won't really matter, which is why I'm not worrying about it right now.
            
            opMetadataInjector = OpMetadataInjector( parent=self )
            opMetadataInjector.Input.connect( providerSlot )
            opMetadataInjector.Metadata.setValue( metadata )
            providerSlot = opMetadataInjector.Output
            self._opReaders.append( opMetadataInjector )

            self._NonTransposedImage.connect(providerSlot)
            
            if self.forceAxisOrder:
                # Before we re-order, make sure no non-singleton 
                #  axes would be dropped by the forced order.
                output_order = "".join(self.forceAxisOrder)
                provider_order = "".join(providerSlot.meta.getAxisKeys())
                tagged_provider_shape = providerSlot.meta.getTaggedShape()
                dropped_axes = set(provider_order) - set(output_order)
                if any(tagged_provider_shape[a] > 1 for a in dropped_axes):
                    msg = "The axes of your dataset ({}) are not compatible with the axes used by this workflow ({}). Please fix them."\
                          .format(provider_order, output_order)
                    raise DatasetConstraintError("DataSelection", msg)

                op5 = OpReorderAxes(parent=self)
                op5.AxisOrder.setValue(self.forceAxisOrder)
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)
            
            # If the channel axis is not last (or is missing),
            #  make sure the axes are re-ordered so that channel is last.
            if providerSlot.meta.axistags.index('c') != len( providerSlot.meta.axistags )-1:
                op5 = OpReorderAxes( parent=self )
                keys = providerSlot.meta.getTaggedShape().keys()
                try:
                    # Remove if present.
                    keys.remove('c')
                except ValueError:
                    pass
                # Append
                keys.append('c')
                op5.AxisOrder.setValue( "".join( keys ) )
                op5.Input.connect( providerSlot )
                providerSlot = op5.Output
                self._opReaders.append( op5 )
            
            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)
            
            # Set the image name and usage flag
            self.AllowLabels.setValue( datasetInfo.allowLabels )
            
            # If the reading operator provides a nickname, use it.
            if self.Image.meta.nickname is not None:
                datasetInfo.nickname = self.Image.meta.nickname
            
            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)
        
        except:
            self.internalCleanup()
            raise
Exemple #14
0
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            providerSlot = datasetInfo.get_provider_slot(parent=self)
            opReader = providerSlot.operator
            self._opReaders.append(opReader)

            # Inject metadata if the dataset info specified any.
            # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255)
            metadata = {}
            metadata["display_mode"] = datasetInfo.display_mode

            role_name = self.RoleName.value
            if "c" not in providerSlot.meta.getTaggedShape():
                num_channels = 0
            else:
                num_channels = providerSlot.meta.getTaggedShape()["c"]
            if num_channels > 1:
                metadata["channel_names"] = [f"{role_name}-{i}" for i in range(num_channels)]
            else:
                metadata["channel_names"] = [role_name]

            if datasetInfo.drange is not None:
                metadata["drange"] = datasetInfo.drange
            elif providerSlot.meta.dtype == numpy.uint8:
                metadata["drange"] = (0, 255)
            if datasetInfo.normalizeDisplay is not None:
                metadata["normalizeDisplay"] = datasetInfo.normalizeDisplay
            if datasetInfo.axistags is not None:
                metadata["axistags"] = datasetInfo.axistags

            if datasetInfo.subvolume_roi is not None:
                metadata["subvolume_roi"] = datasetInfo.subvolume_roi

                # FIXME: We are overwriting the axistags metadata to intentionally allow
                #        the user to change our interpretation of which axis is which.
                #        That's okay, but technically there's a special corner case if
                #        the user redefines the channel axis index.
                #        Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel.
                #        For most use-cases, that won't really matter, which is why I'm not worrying about it right now.

            opMetadataInjector = OpMetadataInjector(parent=self)
            opMetadataInjector.Input.connect(providerSlot)
            opMetadataInjector.Metadata.setValue(metadata)
            providerSlot = opMetadataInjector.Output
            self._opReaders.append(opMetadataInjector)

            self._NonTransposedImage.connect(providerSlot)

            # make sure that x and y axes are present in the selected axis order
            if "x" not in providerSlot.meta.axistags or "y" not in providerSlot.meta.axistags:
                raise DatasetConstraintError(
                    "DataSelection", "Data must always have at leaset the axes x and y for ilastik to work."
                )

            if self.forceAxisOrder:
                assert isinstance(
                    self.forceAxisOrder, list
                ), "forceAxisOrder should be a *list* of preferred axis orders"

                # Before we re-order, make sure no non-singleton
                #  axes would be dropped by the forced order.
                tagged_provider_shape = providerSlot.meta.getTaggedShape()
                minimal_axes = {k for k, v in tagged_provider_shape.items() if v > 1}

                # Pick the shortest of the possible 'forced' orders that
                # still contains all the axes of the original dataset.
                candidate_orders = list(self.forceAxisOrder)
                candidate_orders = [order for order in candidate_orders if minimal_axes.issubset(set(order))]

                if len(candidate_orders) == 0:
                    msg = (
                        f"The axes of your dataset ({providerSlot.meta.getAxisKeys()}) are not compatible with "
                        f"any of the allowed axis configurations used by this workflow ({self.forceAxisOrder})."
                    )
                    raise DatasetConstraintError("DataSelection", msg)

                output_order = sorted(candidate_orders, key=len)[0]  # the shortest one
                output_order = "".join(output_order)
            else:
                # No forced axisorder is supplied. Use original axisorder as
                # output order: it is assumed by the export-applet, that the
                # an OpReorderAxes operator is added in the beginning
                output_order = providerSlot.meta.getAxisKeys()
            if "c" not in output_order:
                output_order += "c"

            op5 = OpReorderAxes(parent=self, AxisOrder=output_order, Input=providerSlot)
            self._opReaders.append(op5)

            self.Image.connect(op5.Output)

            self.AllowLabels.setValue(datasetInfo.allowLabels)

            if self.Image.meta.nickname is not None:
                datasetInfo.nickname = self.Image.meta.nickname

            self.ImageName.setValue(datasetInfo.nickname)

        except:
            self.internalCleanup()
            raise
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (
                datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value

            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingHdf5Reader(parent=self)
                opReader.Hdf5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
                self._opReaders.append(opReader)
            else:
                # Use a normal (filesystem) reader
                opReader = OpInputDataReader(parent=self)
                opReader.WorkingDirectory.setValue(self.WorkingDirectory.value)
                opReader.FilePath.setValue(datasetInfo.filePath)
                providerSlot = opReader.Output
                self._opReaders.append(opReader)

            # Inject metadata if the dataset info specified any.
            if datasetInfo.normalizeDisplay is not None or \
               datasetInfo.drange is not None or \
               datasetInfo.axistags is not None:
                metadata = {}
                if datasetInfo.drange is not None:
                    metadata['drange'] = datasetInfo.drange
                if datasetInfo.normalizeDisplay is not None:
                    metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
                if datasetInfo.axistags is not None:
                    metadata['axistags'] = datasetInfo.axistags
                opMetadataInjector = OpMetadataInjector(parent=self)
                opMetadataInjector.Input.connect(providerSlot)
                opMetadataInjector.Metadata.setValue(metadata)
                providerSlot = opMetadataInjector.Output
                self._opReaders.append(opMetadataInjector)

            self._NonTransposedImage.connect(providerSlot)

            if self.force5d:
                op5 = OpReorderAxes(parent=self)
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # If there is no channel axis, use an OpReorderAxes to append one.
            if providerSlot.meta.axistags.index('c') >= len(
                    providerSlot.meta.axistags):
                op5 = OpReorderAxes(parent=self)
                providerKeys = "".join(
                    providerSlot.meta.getTaggedShape().keys())
                op5.AxisOrder.setValue(providerKeys + 'c')
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # Most of workflows can't handle replacement of a dataset of a different dimensionality.
            # We guard against that by checking for errors NOW, before connecting our Image output,
            #  which is connected to the rest of the workflow.
            new_axiskeys = "".join(providerSlot.meta.getAxisKeys())
            if self._previous_output_axiskeys is not None and len(
                    new_axiskeys) != len(self._previous_output_axiskeys):
                msg =  "You can't replace an existing dataset with one of a different dimensionality.\n"\
                       "Your existing dataset was {}D ({}), but the new dataset is {}D ({}).\n"\
                       "Your original dataset entry has been reset.  "\
                       "Please remove it and then add your new dataset."\
                       "".format( len(self._previous_output_axiskeys), self._previous_output_axiskeys,
                                  len(new_axiskeys), new_axiskeys )
                raise OpDataSelection.InvalidDimensionalityError(msg)

            self._previous_output_axiskeys = new_axiskeys

            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)

            # Set the image name and usage flag
            self.AllowLabels.setValue(datasetInfo.allowLabels)

            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)

        except:
            self.internalCleanup()
            raise
Exemple #16
0
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (
                datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value

            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingHdf5Reader(parent=self)
                opReader.Hdf5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
                self._opReaders.append(opReader)
            else:
                # Use a normal (filesystem) reader
                opReader = OpInputDataReader(parent=self)
                if datasetInfo.subvolume_roi is not None:
                    opReader.SubVolumeRoi.setValue(datasetInfo.subvolume_roi)
                opReader.WorkingDirectory.setValue(self.WorkingDirectory.value)
                opReader.FilePath.setValue(datasetInfo.filePath)
                providerSlot = opReader.Output
                self._opReaders.append(opReader)

            # Inject metadata if the dataset info specified any.
            # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255)
            if datasetInfo.normalizeDisplay is not None or \
               datasetInfo.drange is not None or \
               datasetInfo.axistags is not None or \
               (providerSlot.meta.drange is None and providerSlot.meta.dtype == numpy.uint8):
                metadata = {}
                if datasetInfo.drange is not None:
                    metadata['drange'] = datasetInfo.drange
                elif providerSlot.meta.dtype == numpy.uint8:
                    # SPECIAL case for uint8 data: Provide a default drange.
                    # The user can always override this herself if she wants.
                    metadata['drange'] = (0, 255)
                if datasetInfo.normalizeDisplay is not None:
                    metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
                if datasetInfo.axistags is not None:
                    if len(datasetInfo.axistags) != len(
                            providerSlot.meta.shape):
                        raise Exception(
                            "Your dataset's provided axistags ({}) do not have the "
                            "correct dimensionality for your dataset, which has {} dimensions."
                            .format(
                                "".join(tag.key
                                        for tag in datasetInfo.axistags),
                                len(providerSlot.meta.shape)))
                    metadata['axistags'] = datasetInfo.axistags
                if datasetInfo.subvolume_roi is not None:
                    metadata['subvolume_roi'] = datasetInfo.subvolume_roi

                    # FIXME: We are overwriting the axistags metadata to intentionally allow
                    #        the user to change our interpretation of which axis is which.
                    #        That's okay, but technically there's a special corner case if
                    #        the user redefines the channel axis index.
                    #        Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel.
                    #        For most use-cases, that won't really matter, which is why I'm not worrying about it right now.

                opMetadataInjector = OpMetadataInjector(parent=self)
                opMetadataInjector.Input.connect(providerSlot)
                opMetadataInjector.Metadata.setValue(metadata)
                providerSlot = opMetadataInjector.Output
                self._opReaders.append(opMetadataInjector)

            self._NonTransposedImage.connect(providerSlot)

            if self.force5d:
                op5 = OpReorderAxes(parent=self)
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # If the channel axis is not last (or is missing),
            #  make sure the axes are re-ordered so that channel is last.
            if providerSlot.meta.axistags.index('c') != len(
                    providerSlot.meta.axistags) - 1:
                op5 = OpReorderAxes(parent=self)
                keys = providerSlot.meta.getTaggedShape().keys()
                try:
                    # Remove if present.
                    keys.remove('c')
                except ValueError:
                    pass
                # Append
                keys.append('c')
                op5.AxisOrder.setValue("".join(keys))
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)

            # Set the image name and usage flag
            self.AllowLabels.setValue(datasetInfo.allowLabels)

            # If the reading operator provides a nickname, use it.
            if self.Image.meta.nickname is not None:
                datasetInfo.nickname = self.Image.meta.nickname

            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)

        except:
            self.internalCleanup()
            raise