コード例 #1
0
    def setupOutputs(self):
        datasetInfo = self.Dataset.value
        internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId

        # Data only comes from the project file if the user said so AND it exists in the project
        datasetInProject = (datasetInfo.location == DatasetInfo.Location.ProjectInternal)
        datasetInProject &= self.ProjectFile.connected() and \
                            internalPath in self.ProjectFile.value

        if self._opReader is not None:
            self.Image.disconnect()
            self._opReader.cleanUp()
        
        # If we should find the data in the project file, use a dataset reader
        if datasetInProject:
            self._opReader = OpStreamingHdf5Reader(parent=self)
            self._opReader.Hdf5File.setValue(self.ProjectFile.value)
            self._opReader.InternalPath.setValue(internalPath)
            providerSlot = self._opReader.OutputImage
        else:
            # Use a normal (filesystem) reader
            self._opReader = OpInputDataReader(parent=self)
            if datasetInfo.axisorder is not None:
                self._opReader.DefaultAxisOrder.setValue( datasetInfo.axisorder )
            self._opReader.WorkingDirectory.connect( self.WorkingDirectory )
            self._opReader.FilePath.setValue(datasetInfo.filePath)
            providerSlot = self._opReader.Output        
        
        # Connect our external outputs to the internal operators we chose
        self.Image.connect(providerSlot)
        
        # Set the image name and usage flag
        self.AllowLabels.setValue( datasetInfo.allowLabels )
        self.ImageName.setValue(datasetInfo.filePath)
コード例 #2
0
    def setup_method(self, method):
        self.graph = Graph()
        self.testDataFileName = 'test.h5'
        self.op = OpStreamingHdf5Reader(graph=self.graph)

        self.h5File = h5py.File(self.testDataFileName)
        self.h5File.create_group('volume')

        # Create a test dataset
        datashape = (1,2,3,4,5)
        self.data = numpy.indices(datashape).sum(0).astype(numpy.float32)
コード例 #3
0
    def _deserialize(self, group, slot):
        # Flush the GUI cache of any saved up dirty rois
        if self.operator.FreezePredictions.value == True:
            self.operator.FreezePredictions.setValue(False)
            self.operator.FreezePredictions.setValue(True)

        #self.operator.PredictionsFromDisk.resize(len(group))
        if len(group.keys()) > 0:
            assert len(group.keys()) == len(self.operator.PredictionsFromDisk), "Expected to find the same number of on-disk predications as there are images loaded."
        else:
            for slot in self.operator.PredictionsFromDisk:
                slot.disconnect()
        for imageIndex, datasetName in enumerate(group.keys()):
            opStreamer = OpStreamingHdf5Reader(graph=self.operator.graph, parent=self.operator.parent)
            opStreamer.Hdf5File.setValue(group)
            opStreamer.InternalPath.setValue(datasetName)
            self.operator.PredictionsFromDisk[imageIndex].connect(opStreamer.OutputImage)
コード例 #4
0
    def _deserializePredictions(self, topGroup):
        self._predictionsPresent = 'Predictions' in topGroup.keys()
        if self._predictionsPresent:
            predictionGroup = topGroup['Predictions']

            # Flush the GUI cache of any saved up dirty rois
            if self.mainOperator.FreezePredictions.value == True:
                self.mainOperator.FreezePredictions.setValue(False)
                self.mainOperator.FreezePredictions.setValue(True)

            for imageIndex, datasetName in enumerate(predictionGroup.keys()):
                opStreamer = OpStreamingHdf5Reader(
                    graph=self.mainOperator.graph)
                opStreamer.Hdf5File.setValue(predictionGroup)
                opStreamer.InternalPath.setValue(datasetName)
                self.mainOperator.PredictionsFromDisk[imageIndex].connect(
                    opStreamer.OutputImage)
        self._dirtyFlags[Section.Predictions] = False
コード例 #5
0
    def _attemptOpenAsHdf5(self, filePath):
        # Check for an hdf5 extension
        pathComponents = PathComponents(filePath)
        ext = pathComponents.extension
        if ext not in (".%s" % x for x in OpInputDataReader.h5Exts):
            return ([], None)

        externalPath = pathComponents.externalPath
        internalPath = pathComponents.internalPath

        if not os.path.exists(externalPath):
            raise OpInputDataReader.DatasetReadError(
                "Input file does not exist: " + externalPath)

        # Open the h5 file in read-only mode
        try:
            h5File = h5py.File(externalPath, 'r')
        except OpInputDataReader.DatasetReadError:
            raise
        except Exception as e:
            msg = "Unable to open HDF5 File: {}\n{}".format(
                externalPath, str(e))
            raise OpInputDataReader.DatasetReadError(msg)
        else:
            if not internalPath:
                possible_internal_paths = self._get_hdf5_dataset_names(h5File)
                if len(possible_internal_paths) == 1:
                    internalPath = possible_internal_paths[0]
                elif len(possible_internal_paths) == 0:
                    h5File.close()
                    msg = "HDF5 file contains no datasets: {}".format(
                        externalPath)
                    raise OpInputDataReader.DatasetReadError(msg)
                else:
                    h5File.close()
                    msg = "When using hdf5, you must append the hdf5 internal path to the "\
                          "data set to your filename, e.g. myfile.h5/volume/data  "\
                          "No internal path provided for dataset in file: {}".format(
                              externalPath)
                    raise OpInputDataReader.DatasetReadError(msg)

            try:
                compression_setting = h5File[internalPath].compression
            except Exception as e:
                h5File.close()
                msg = "Error reading HDF5 File: {}\n{}".format(externalPath, e)
                raise OpInputDataReader.DatasetReadError(msg)

            # If the h5 dataset is compressed, we'll have better performance
            #  with a multi-process hdf5 access object.
            # (Otherwise, single-process is faster.)
            allow_multiprocess_hdf5 = "LAZYFLOW_MULTIPROCESS_HDF5" in os.environ and os.environ[
                "LAZYFLOW_MULTIPROCESS_HDF5"] != ""
            if compression_setting is not None and allow_multiprocess_hdf5:
                h5File.close()
                h5File = MultiProcessHdf5File(externalPath, 'r')

        self._file = h5File

        h5Reader = OpStreamingHdf5Reader(parent=self)
        h5Reader.Hdf5File.setValue(h5File)

        try:
            h5Reader.InternalPath.setValue(internalPath)
        except OpStreamingHdf5Reader.DatasetReadError as e:
            msg = "Error reading HDF5 File: {}\n{}".format(externalPath, e.msg)
            raise OpInputDataReader.DatasetReadError(msg)

        return ([h5Reader], h5Reader.OutputImage)
コード例 #6
0
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value
    
            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingHdf5Reader(parent=self)
                opReader.Hdf5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
            elif datasetInfo.location == DatasetInfo.Location.PreloadedArray:
                preloaded_array = datasetInfo.preloaded_array
                assert preloaded_array is not None
                if not hasattr(preloaded_array, 'axistags'):
                    # Guess the axis order, since one was not provided.
                    axisorders = { 2 : 'yx',
                                   3 : 'zyx',
                                   4 : 'zyxc',
                                   5 : 'tzyxc' }

                    shape = preloaded_array.shape
                    ndim = preloaded_array.ndim            
                    assert ndim != 0, "Support for 0-D data not yet supported"
                    assert ndim != 1, "Support for 1-D data not yet supported"
                    assert ndim <= 5, "No support for data with more than 5 dimensions."
        
                    axisorder = axisorders[ndim]
                    if ndim == 3 and shape[2] <= 4:
                        # Special case: If the 3rd dim is small, assume it's 'c', not 'z'
                        axisorder = 'yxc'
                    preloaded_array = vigra.taggedView(preloaded_array, axisorder)
                opReader = OpArrayPiper(parent=self)
                opReader.Input.setValue( preloaded_array )
                providerSlot = opReader.Output
            else:
                # Use a normal (filesystem) reader
                opReader = OpInputDataReader(parent=self)
                if datasetInfo.subvolume_roi is not None:
                    opReader.SubVolumeRoi.setValue( datasetInfo.subvolume_roi )
                opReader.WorkingDirectory.setValue( self.WorkingDirectory.value )
                opReader.FilePath.setValue(datasetInfo.filePath)
                providerSlot = opReader.Output
            self._opReaders.append(opReader)
            
            # Inject metadata if the dataset info specified any.
            # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255)
            metadata = {}
            metadata['display_mode'] = datasetInfo.display_mode
            role_name = self.RoleName.value
            if 'c' not in providerSlot.meta.getTaggedShape():
                num_channels = 0
            else:
                num_channels = providerSlot.meta.getTaggedShape()['c']
            if num_channels > 1:
                metadata['channel_names'] = ["{}-{}".format(role_name, i) for i in range(num_channels)]
            else:
                metadata['channel_names'] = [role_name]
                 
            if datasetInfo.drange is not None:
                metadata['drange'] = datasetInfo.drange
            elif providerSlot.meta.dtype == numpy.uint8:
                # SPECIAL case for uint8 data: Provide a default drange.
                # The user can always override this herself if she wants.
                metadata['drange'] = (0,255)
            if datasetInfo.normalizeDisplay is not None:
                metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
            if datasetInfo.axistags is not None:
                if len(datasetInfo.axistags) != len(providerSlot.meta.shape):
                    # This usually only happens when we copied a DatasetInfo from another lane,
                    # and used it as a 'template' to initialize this lane.
                    # This happens in the BatchProcessingApplet when it attempts to guess the axistags of 
                    # batch images based on the axistags chosen by the user in the interactive images.
                    # If the interactive image tags don't make sense for the batch image, you get this error.
                    raise Exception( "Your dataset's provided axistags ({}) do not have the "
                                     "correct dimensionality for your dataset, which has {} dimensions."
                                     .format( "".join(tag.key for tag in datasetInfo.axistags), len(providerSlot.meta.shape) ) )
                metadata['axistags'] = datasetInfo.axistags
            if datasetInfo.subvolume_roi is not None:
                metadata['subvolume_roi'] = datasetInfo.subvolume_roi
                
                # FIXME: We are overwriting the axistags metadata to intentionally allow 
                #        the user to change our interpretation of which axis is which.
                #        That's okay, but technically there's a special corner case if 
                #        the user redefines the channel axis index.  
                #        Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel.
                #        For most use-cases, that won't really matter, which is why I'm not worrying about it right now.
            
            opMetadataInjector = OpMetadataInjector( parent=self )
            opMetadataInjector.Input.connect( providerSlot )
            opMetadataInjector.Metadata.setValue( metadata )
            providerSlot = opMetadataInjector.Output
            self._opReaders.append( opMetadataInjector )

            self._NonTransposedImage.connect(providerSlot)
            
            if self.forceAxisOrder:
                # Before we re-order, make sure no non-singleton 
                #  axes would be dropped by the forced order.
                output_order = "".join(self.forceAxisOrder)
                provider_order = "".join(providerSlot.meta.getAxisKeys())
                tagged_provider_shape = providerSlot.meta.getTaggedShape()
                dropped_axes = set(provider_order) - set(output_order)
                if any(tagged_provider_shape[a] > 1 for a in dropped_axes):
                    msg = "The axes of your dataset ({}) are not compatible with the axes used by this workflow ({}). Please fix them."\
                          .format(provider_order, output_order)
                    raise DatasetConstraintError("DataSelection", msg)

                op5 = OpReorderAxes(parent=self)
                op5.AxisOrder.setValue(self.forceAxisOrder)
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)
            
            # If the channel axis is not last (or is missing),
            #  make sure the axes are re-ordered so that channel is last.
            if providerSlot.meta.axistags.index('c') != len( providerSlot.meta.axistags )-1:
                op5 = OpReorderAxes( parent=self )
                keys = providerSlot.meta.getTaggedShape().keys()
                try:
                    # Remove if present.
                    keys.remove('c')
                except ValueError:
                    pass
                # Append
                keys.append('c')
                op5.AxisOrder.setValue( "".join( keys ) )
                op5.Input.connect( providerSlot )
                providerSlot = op5.Output
                self._opReaders.append( op5 )
            
            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)
            
            # Set the image name and usage flag
            self.AllowLabels.setValue( datasetInfo.allowLabels )
            
            # If the reading operator provides a nickname, use it.
            if self.Image.meta.nickname is not None:
                datasetInfo.nickname = self.Image.meta.nickname
            
            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)
        
        except:
            self.internalCleanup()
            raise
コード例 #7
0
ファイル: opDataSelection.py プロジェクト: jj-u/ilastik
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (
                datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value

            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingHdf5Reader(parent=self)
                opReader.Hdf5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
            elif datasetInfo.location == DatasetInfo.Location.PreloadedArray:
                preloaded_array = datasetInfo.preloaded_array
                assert preloaded_array is not None
                if not hasattr(preloaded_array, 'axistags'):
                    axisorder = get_default_axisordering(preloaded_array.shape)
                    preloaded_array = vigra.taggedView(preloaded_array,
                                                       axisorder)

                opReader = OpArrayPiper(parent=self)
                opReader.Input.setValue(preloaded_array)
                providerSlot = opReader.Output
            else:
                # Use a normal (filesystem) reader
                opReader = OpInputDataReader(parent=self)
                if datasetInfo.subvolume_roi is not None:
                    opReader.SubVolumeRoi.setValue(datasetInfo.subvolume_roi)
                opReader.WorkingDirectory.setValue(self.WorkingDirectory.value)
                opReader.SequenceAxis.setValue(datasetInfo.sequenceAxis)
                opReader.FilePath.setValue(datasetInfo.filePath)
                providerSlot = opReader.Output
            self._opReaders.append(opReader)

            # Inject metadata if the dataset info specified any.
            # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255)
            metadata = {}
            metadata['display_mode'] = datasetInfo.display_mode
            role_name = self.RoleName.value
            if 'c' not in providerSlot.meta.getTaggedShape():
                num_channels = 0
            else:
                num_channels = providerSlot.meta.getTaggedShape()['c']
            if num_channels > 1:
                metadata['channel_names'] = [
                    "{}-{}".format(role_name, i) for i in range(num_channels)
                ]
            else:
                metadata['channel_names'] = [role_name]

            if datasetInfo.drange is not None:
                metadata['drange'] = datasetInfo.drange
            elif providerSlot.meta.dtype == numpy.uint8:
                # SPECIAL case for uint8 data: Provide a default drange.
                # The user can always override this herself if she wants.
                metadata['drange'] = (0, 255)
            if datasetInfo.normalizeDisplay is not None:
                metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
            if datasetInfo.axistags is not None:
                if len(datasetInfo.axistags) != len(providerSlot.meta.shape):
                    ts = providerSlot.meta.getTaggedShape()
                    if 'c' in ts and 'c' not in datasetInfo.axistags and len(
                            datasetInfo.axistags) + 1 == len(ts):
                        # provider has no channel axis, but template has => add channel axis to provider
                        # fixme: Optimize the axistag guess in BatchProcessingApplet instead of hoping for the best here
                        metadata['axistags'] = vigra.defaultAxistags(
                            ''.join(datasetInfo.axistags.keys()) + 'c')
                    else:
                        # This usually only happens when we copied a DatasetInfo from another lane,
                        # and used it as a 'template' to initialize this lane.
                        # This happens in the BatchProcessingApplet when it attempts to guess the axistags of
                        # batch images based on the axistags chosen by the user in the interactive images.
                        # If the interactive image tags don't make sense for the batch image, you get this error.
                        raise Exception(
                            "Your dataset's provided axistags ({}) do not have the "
                            "correct dimensionality for your dataset, which has {} dimensions."
                            .format(
                                "".join(tag.key
                                        for tag in datasetInfo.axistags),
                                len(providerSlot.meta.shape)))
                else:
                    metadata['axistags'] = datasetInfo.axistags
            if datasetInfo.original_axistags is not None:
                metadata['original_axistags'] = datasetInfo.original_axistags

            if datasetInfo.subvolume_roi is not None:
                metadata['subvolume_roi'] = datasetInfo.subvolume_roi

                # FIXME: We are overwriting the axistags metadata to intentionally allow
                #        the user to change our interpretation of which axis is which.
                #        That's okay, but technically there's a special corner case if
                #        the user redefines the channel axis index.
                #        Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel.
                #        For most use-cases, that won't really matter, which is why I'm not worrying about it right now.

            opMetadataInjector = OpMetadataInjector(parent=self)
            opMetadataInjector.Input.connect(providerSlot)
            opMetadataInjector.Metadata.setValue(metadata)
            providerSlot = opMetadataInjector.Output
            self._opReaders.append(opMetadataInjector)

            self._NonTransposedImage.connect(providerSlot)

            # make sure that x and y axes are present in the selected axis order
            if 'x' not in providerSlot.meta.axistags or 'y' not in providerSlot.meta.axistags:
                raise DatasetConstraintError(
                    "DataSelection",
                    "Data must always have at leaset the axes x and y for ilastik to work."
                )

            if self.forceAxisOrder:
                assert isinstance(self.forceAxisOrder, list), \
                    "forceAxisOrder should be a *list* of preferred axis orders"

                # Before we re-order, make sure no non-singleton
                #  axes would be dropped by the forced order.
                tagged_provider_shape = providerSlot.meta.getTaggedShape()
                minimal_axes = [
                    k_v for k_v in list(tagged_provider_shape.items())
                    if k_v[1] > 1
                ]
                minimal_axes = set(k for k, v in minimal_axes)

                # Pick the shortest of the possible 'forced' orders that
                # still contains all the axes of the original dataset.
                candidate_orders = list(self.forceAxisOrder)
                candidate_orders = [
                    order for order in candidate_orders
                    if minimal_axes.issubset(set(order))
                ]

                if len(candidate_orders) == 0:
                    msg = "The axes of your dataset ({}) are not compatible with any of the allowed"\
                          " axis configurations used by this workflow ({}). Please fix them."\
                          .format(providerSlot.meta.getAxisKeys(), self.forceAxisOrder)
                    raise DatasetConstraintError("DataSelection", msg)

                output_order = sorted(candidate_orders,
                                      key=len)[0]  # the shortest one
                output_order = "".join(output_order)

                op5 = OpReorderAxes(parent=self)
                op5.AxisOrder.setValue(output_order)
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # If the channel axis is missing, add it as last axis
            if 'c' not in providerSlot.meta.axistags:
                op5 = OpReorderAxes(parent=self)
                keys = providerSlot.meta.getAxisKeys()

                # Append
                keys.append('c')
                op5.AxisOrder.setValue("".join(keys))
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)

            self.AllowLabels.setValue(datasetInfo.allowLabels)

            # If the reading operator provides a nickname, use it.
            if self.Image.meta.nickname is not None:
                datasetInfo.nickname = self.Image.meta.nickname

            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)

        except:
            self.internalCleanup()
            raise
コード例 #8
0
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (
                datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value

            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingHdf5Reader(parent=self)
                opReader.Hdf5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
                self._opReaders.append(opReader)
            else:
                # Use a normal (filesystem) reader
                opReader = OpInputDataReader(parent=self)
                opReader.WorkingDirectory.setValue(self.WorkingDirectory.value)
                opReader.FilePath.setValue(datasetInfo.filePath)
                providerSlot = opReader.Output
                self._opReaders.append(opReader)

            # Inject metadata if the dataset info specified any.
            if datasetInfo.normalizeDisplay is not None or \
               datasetInfo.drange is not None or \
               datasetInfo.axistags is not None:
                metadata = {}
                if datasetInfo.drange is not None:
                    metadata['drange'] = datasetInfo.drange
                if datasetInfo.normalizeDisplay is not None:
                    metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
                if datasetInfo.axistags is not None:
                    metadata['axistags'] = datasetInfo.axistags
                opMetadataInjector = OpMetadataInjector(parent=self)
                opMetadataInjector.Input.connect(providerSlot)
                opMetadataInjector.Metadata.setValue(metadata)
                providerSlot = opMetadataInjector.Output
                self._opReaders.append(opMetadataInjector)

            self._NonTransposedImage.connect(providerSlot)

            if self.force5d:
                op5 = OpReorderAxes(parent=self)
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # If there is no channel axis, use an OpReorderAxes to append one.
            if providerSlot.meta.axistags.index('c') >= len(
                    providerSlot.meta.axistags):
                op5 = OpReorderAxes(parent=self)
                providerKeys = "".join(
                    providerSlot.meta.getTaggedShape().keys())
                op5.AxisOrder.setValue(providerKeys + 'c')
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # Most of workflows can't handle replacement of a dataset of a different dimensionality.
            # We guard against that by checking for errors NOW, before connecting our Image output,
            #  which is connected to the rest of the workflow.
            new_axiskeys = "".join(providerSlot.meta.getAxisKeys())
            if self._previous_output_axiskeys is not None and len(
                    new_axiskeys) != len(self._previous_output_axiskeys):
                msg =  "You can't replace an existing dataset with one of a different dimensionality.\n"\
                       "Your existing dataset was {}D ({}), but the new dataset is {}D ({}).\n"\
                       "Your original dataset entry has been reset.  "\
                       "Please remove it and then add your new dataset."\
                       "".format( len(self._previous_output_axiskeys), self._previous_output_axiskeys,
                                  len(new_axiskeys), new_axiskeys )
                raise OpDataSelection.InvalidDimensionalityError(msg)

            self._previous_output_axiskeys = new_axiskeys

            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)

            # Set the image name and usage flag
            self.AllowLabels.setValue(datasetInfo.allowLabels)

            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)

        except:
            self.internalCleanup()
            raise
コード例 #9
0
    def setupOutputs(self):
        self.internalCleanup()
        datasetInfo = self.Dataset.value

        try:
            # Data only comes from the project file if the user said so AND it exists in the project
            datasetInProject = (
                datasetInfo.location == DatasetInfo.Location.ProjectInternal)
            datasetInProject &= self.ProjectFile.ready()
            if datasetInProject:
                internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId
                datasetInProject &= internalPath in self.ProjectFile.value

            # If we should find the data in the project file, use a dataset reader
            if datasetInProject:
                opReader = OpStreamingHdf5Reader(parent=self)
                opReader.Hdf5File.setValue(self.ProjectFile.value)
                opReader.InternalPath.setValue(internalPath)
                providerSlot = opReader.OutputImage
                self._opReaders.append(opReader)
            else:
                # Use a normal (filesystem) reader
                opReader = OpInputDataReader(parent=self)
                if datasetInfo.subvolume_roi is not None:
                    opReader.SubVolumeRoi.setValue(datasetInfo.subvolume_roi)
                opReader.WorkingDirectory.setValue(self.WorkingDirectory.value)
                opReader.FilePath.setValue(datasetInfo.filePath)
                providerSlot = opReader.Output
                self._opReaders.append(opReader)

            # Inject metadata if the dataset info specified any.
            # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255)
            if datasetInfo.normalizeDisplay is not None or \
               datasetInfo.drange is not None or \
               datasetInfo.axistags is not None or \
               (providerSlot.meta.drange is None and providerSlot.meta.dtype == numpy.uint8):
                metadata = {}
                if datasetInfo.drange is not None:
                    metadata['drange'] = datasetInfo.drange
                elif providerSlot.meta.dtype == numpy.uint8:
                    # SPECIAL case for uint8 data: Provide a default drange.
                    # The user can always override this herself if she wants.
                    metadata['drange'] = (0, 255)
                if datasetInfo.normalizeDisplay is not None:
                    metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay
                if datasetInfo.axistags is not None:
                    if len(datasetInfo.axistags) != len(
                            providerSlot.meta.shape):
                        raise Exception(
                            "Your dataset's provided axistags ({}) do not have the "
                            "correct dimensionality for your dataset, which has {} dimensions."
                            .format(
                                "".join(tag.key
                                        for tag in datasetInfo.axistags),
                                len(providerSlot.meta.shape)))
                    metadata['axistags'] = datasetInfo.axistags
                if datasetInfo.subvolume_roi is not None:
                    metadata['subvolume_roi'] = datasetInfo.subvolume_roi

                    # FIXME: We are overwriting the axistags metadata to intentionally allow
                    #        the user to change our interpretation of which axis is which.
                    #        That's okay, but technically there's a special corner case if
                    #        the user redefines the channel axis index.
                    #        Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel.
                    #        For most use-cases, that won't really matter, which is why I'm not worrying about it right now.

                opMetadataInjector = OpMetadataInjector(parent=self)
                opMetadataInjector.Input.connect(providerSlot)
                opMetadataInjector.Metadata.setValue(metadata)
                providerSlot = opMetadataInjector.Output
                self._opReaders.append(opMetadataInjector)

            self._NonTransposedImage.connect(providerSlot)

            if self.force5d:
                op5 = OpReorderAxes(parent=self)
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # If the channel axis is not last (or is missing),
            #  make sure the axes are re-ordered so that channel is last.
            if providerSlot.meta.axistags.index('c') != len(
                    providerSlot.meta.axistags) - 1:
                op5 = OpReorderAxes(parent=self)
                keys = providerSlot.meta.getTaggedShape().keys()
                try:
                    # Remove if present.
                    keys.remove('c')
                except ValueError:
                    pass
                # Append
                keys.append('c')
                op5.AxisOrder.setValue("".join(keys))
                op5.Input.connect(providerSlot)
                providerSlot = op5.Output
                self._opReaders.append(op5)

            # Connect our external outputs to the internal operators we chose
            self.Image.connect(providerSlot)

            # Set the image name and usage flag
            self.AllowLabels.setValue(datasetInfo.allowLabels)

            # If the reading operator provides a nickname, use it.
            if self.Image.meta.nickname is not None:
                datasetInfo.nickname = self.Image.meta.nickname

            imageName = datasetInfo.nickname
            if imageName == "":
                imageName = datasetInfo.filePath
            self.ImageName.setValue(imageName)

        except:
            self.internalCleanup()
            raise