def setupOutputs(self): self._memmap = None # Closes the file filepath = self.FilePath.value filename = os.path.split(filepath)[1] # Infer the dimensions by parsing the filename # We split on . and - characters shape = () for s in re.split('\.|-', filename): try: shape += (int(s),) except ValueError: pass if not ( 3 <= len(shape) <= 5 ): raise OpRawBinaryFileReader.DatasetReadError( "Binary filename does not include a valid shape: {}".format(filename) ) # Uint8 by default, but search for an explicit type in the filename dtype = numpy.uint8 for d in 'uint8 uint16 uint32 uint64 int8 int16 int32 int64 float32 float64'.split(): if d in filename: dtype = numpy.dtype(d).type break try: self._memmap = numpy.memmap(filepath, dtype=dtype, shape=shape, mode='r') except: raise OpRawBinaryFileReader.DatasetReadError( "Unable to open numpy dataset: {}".format( filepath ) ) axisorder = get_default_axisordering(shape) self.Output.meta.dtype = dtype self.Output.meta.axistags = vigra.defaultAxistags(axisorder) self.Output.meta.shape = shape
def setupOutputs(self): # Read the dataset meta-info from the HDF5 dataset self._h5N5File = self.H5N5File.value internalPath = self.InternalPath.value if internalPath not in self._h5N5File: raise OpStreamingH5N5Reader.DatasetReadError(internalPath) dataset = self._h5N5File[internalPath] try: # Read the axistags property without actually importing the data # Throws KeyError if 'axistags' can't be found axistagsJson = self._h5N5File[internalPath].attrs["axistags"] axistags = vigra.AxisTags.fromJSON(axistagsJson) axisorder = "".join(tag.key for tag in axistags) if "?" in axisorder: raise KeyError("?") except KeyError: # No axistags found. if "axes" in dataset.attrs: axisorder = "".join(dataset.attrs["axes"][::-1]).lower() else: axisorder = get_default_axisordering(dataset.shape) axistags = vigra.defaultAxistags(str(axisorder)) assert len(axistags) == len( dataset.shape ), f"Mismatch between shape {dataset.shape} and axisorder {axisorder}" # Configure our slot meta-info self.OutputImage.meta.dtype = dataset.dtype.type self.OutputImage.meta.shape = dataset.shape self.OutputImage.meta.axistags = axistags # If the dataset specifies a datarange, add it to the slot metadata if "drange" in self._h5N5File[internalPath].attrs: self.OutputImage.meta.drange = tuple( self._h5N5File[internalPath].attrs["drange"]) # Same for display_mode if "display_mode" in self._h5N5File[internalPath].attrs: self.OutputImage.meta.display_mode = str( self._h5N5File[internalPath].attrs["display_mode"]) total_volume = numpy.prod( numpy.array(self._h5N5File[internalPath].shape)) chunks = self._h5N5File[internalPath].chunks if not chunks and total_volume > 1e8: self.OutputImage.meta.inefficient_format = True logger.warning( f"This dataset ({self._h5N5File.filename}{internalPath}) is NOT chunked. " f"Performance for 3D access patterns will be bad!") if chunks: self.OutputImage.meta.ideal_blockshape = chunks
def __init__(self, *, preloaded_array: numpy.ndarray, axistags: AxisTags = None, nickname: str = "", **info_kwargs): self.preloaded_array = vigra.taggedView( preloaded_array, axistags or get_default_axisordering(preloaded_array.shape) ) super().__init__( nickname=nickname or "preloaded-{}-array".format(self.preloaded_array.dtype.name), default_tags=self.preloaded_array.axistags, laneShape=preloaded_array.shape, laneDtype=preloaded_array.dtype, **info_kwargs, )
def from_h5_group(cls, data: h5py.Group, params: Dict = None): laneShape = tuple(data["shape"]) default_tags = get_default_axisordering(laneShape) params = params or {} if "laneShape" not in params: params["laneShape"] = laneShape if "default_tags" not in params: params["default_tags"] = default_tags if "laneDtype" not in params: params["laneDtype"] = numpy.uint8 return super().from_h5_group(data, params)
def setupOutputs(self): # Read the dataset meta-info from the HDF5 dataset self._hdf5File = self.Hdf5File.value internalPath = self.InternalPath.value if internalPath not in self._hdf5File: raise OpStreamingHdf5Reader.DatasetReadError(internalPath) dataset = self._hdf5File[internalPath] try: # Read the axistags property without actually importing the data axistagsJson = self._hdf5File[internalPath].attrs[ 'axistags'] # Throws KeyError if 'axistags' can't be found axistags = vigra.AxisTags.fromJSON(axistagsJson) axisorder = ''.join(tag.key for tag in axistags) if '?' in axisorder: raise KeyError('?') except KeyError: # No axistags found. axisorder = get_default_axisordering(dataset.shape) axistags = vigra.defaultAxistags(str(axisorder)) assert len(axistags) == len( dataset.shape ),\ "Mismatch between shape {} and axisorder {}".format( dataset.shape, axisorder ) # Configure our slot meta-info self.OutputImage.meta.dtype = dataset.dtype.type self.OutputImage.meta.shape = dataset.shape self.OutputImage.meta.axistags = axistags # If the dataset specifies a datarange, add it to the slot metadata if 'drange' in self._hdf5File[internalPath].attrs: self.OutputImage.meta.drange = tuple( self._hdf5File[internalPath].attrs['drange']) # Same for display_mode if 'display_mode' in self._hdf5File[internalPath].attrs: self.OutputImage.meta.display_mode = str( self._hdf5File[internalPath].attrs['display_mode']) total_volume = numpy.prod( numpy.array(self._hdf5File[internalPath].shape)) chunks = self._hdf5File[internalPath].chunks if not chunks and total_volume > 1e8: self.OutputImage.meta.inefficient_format = True logger.warning( "This dataset ({}{}) is NOT chunked. " "Performance for 3D access patterns will be bad!".format( self._hdf5File.filename, internalPath)) if chunks: self.OutputImage.meta.ideal_blockshape = chunks
def setupOutputs(self): """ Load the file specified via our input slot and present its data on the output slot. """ if self._memmapFile is not None: self._memmapFile.close() fileName = self.FileName.value try: # Load the file in read-only "memmap" mode to avoid reading it from disk all at once. rawLoadedNumpyObject = numpy.load(str(fileName), mmap_mode="r", allow_pickle=False) except (ValueError, IOError): raise OpNpyFileReader.DatasetReadError( "Unable to open numpy dataset: {}".format(fileName)) # .npy files: if isinstance(rawLoadedNumpyObject, numpy.ndarray): rawNumpyArray = rawLoadedNumpyObject self._memmapFile = rawNumpyArray._mmap # .npz files: elif isinstance(rawLoadedNumpyObject, numpy.lib.npyio.NpzFile): if self.InternalPath.ready(): try: rawNumpyArray = rawLoadedNumpyObject[ self.InternalPath.value] except KeyError: raise OpNpyFileReader.DatasetReadError( "InternalPath not found in file. Unable to open numpy npz dataset: " "{fileName}: {internalPath}".format( fileName=fileName, internalPath=self.InternalPath.value)) else: raise OpNpyFileReader.DatasetReadError( "InternalPath not given. Unable to open numpy npz dataset: {fileName}" .format(fileName=fileName)) shape = rawNumpyArray.shape axisorder = get_default_axisordering(shape) # Cast to vigra array self._rawVigraArray = rawNumpyArray.view(vigra.VigraArray) self._rawVigraArray.axistags = vigra.defaultAxistags(axisorder) self.Output.meta.dtype = self._rawVigraArray.dtype.type self.Output.meta.axistags = copy.copy(self._rawVigraArray.axistags) self.Output.meta.shape = self._rawVigraArray.shape
def setupOutputs(self): # Read the dataset meta-info from the HDF5 dataset self._h5N5File = self.H5N5File.value internalPath = self.InternalPath.value if internalPath not in self._h5N5File: raise OpStreamingH5N5Reader.DatasetReadError(internalPath) dataset = self._h5N5File[internalPath] try: # Read the axistags property without actually importing the data # Throws KeyError if 'axistags' can't be found axistagsJson = self._h5N5File[internalPath].attrs["axistags"] axistags = vigra.AxisTags.fromJSON(axistagsJson) axisorder = "".join(tag.key for tag in axistags) if "?" in axisorder: raise KeyError("?") except KeyError: # No axistags found. axisorder = get_default_axisordering(dataset.shape) axistags = vigra.defaultAxistags(str(axisorder)) assert len(axistags) == len(dataset.shape), f"Mismatch between shape {dataset.shape} and axisorder {axisorder}" # Configure our slot meta-info self.OutputImage.meta.dtype = dataset.dtype.type self.OutputImage.meta.shape = dataset.shape self.OutputImage.meta.axistags = axistags # If the dataset specifies a datarange, add it to the slot metadata if "drange" in self._h5N5File[internalPath].attrs: self.OutputImage.meta.drange = tuple(self._h5N5File[internalPath].attrs["drange"]) # Same for display_mode if "display_mode" in self._h5N5File[internalPath].attrs: self.OutputImage.meta.display_mode = str(self._h5N5File[internalPath].attrs["display_mode"]) total_volume = numpy.prod(numpy.array(self._h5N5File[internalPath].shape)) chunks = self._h5N5File[internalPath].chunks if not chunks and total_volume > 1e8: self.OutputImage.meta.inefficient_format = True logger.warning( f"This dataset ({self._h5N5File.filename}{internalPath}) is NOT chunked. " f"Performance for 3D access patterns will be bad!" ) if chunks: self.OutputImage.meta.ideal_blockshape = chunks
def __init__(self, *, inner_path: str, project_file: h5py.File, nickname: str = "", **info_kwargs): self.inner_path = inner_path self.project_file = project_file self.dataset = project_file[inner_path] if "axistags" in self.dataset.attrs: default_tags = vigra.AxisTags.fromJSON(self.dataset.attrs["axistags"]) else: default_tags = vigra.defaultAxistags(get_default_axisordering(self.dataset.shape)) super().__init__( default_tags=default_tags, laneShape=self.dataset.shape, laneDtype=self.dataset.dtype, nickname=nickname or os.path.split(self.inner_path)[-1], **info_kwargs, ) self.legacy_datasetId = Path(inner_path).name
def setupOutputs(self): """ Load the file specified via our input slot and present its data on the output slot. """ if self._memmapFile is not None: self._memmapFile.close() fileName = self.FileName.value try: # Load the file in read-only "memmap" mode to avoid reading it from disk all at once. rawLoadedNumpyObject = numpy.load(str(fileName), mmap_mode="r", allow_pickle=False) except (ValueError, IOError): raise OpNpyFileReader.DatasetReadError("Unable to open numpy dataset: {}".format(fileName)) # .npy files: if isinstance(rawLoadedNumpyObject, numpy.ndarray): rawNumpyArray = rawLoadedNumpyObject self._memmapFile = rawNumpyArray._mmap # .npz files: elif isinstance(rawLoadedNumpyObject, numpy.lib.npyio.NpzFile): if self.InternalPath.ready(): try: rawNumpyArray = rawLoadedNumpyObject[self.InternalPath.value] except KeyError: raise OpNpyFileReader.DatasetReadError( "InternalPath not found in file. Unable to open numpy npz dataset: " "{fileName}: {internalPath}".format(fileName=fileName, internalPath=self.InternalPath.value) ) else: raise OpNpyFileReader.DatasetReadError( "InternalPath not given. Unable to open numpy npz dataset: " "{fileName}".format(fileName=fileName) ) shape = rawNumpyArray.shape axisorder = get_default_axisordering(shape) # Cast to vigra array self._rawVigraArray = rawNumpyArray.view(vigra.VigraArray) self._rawVigraArray.axistags = vigra.defaultAxistags(axisorder) self.Output.meta.dtype = self._rawVigraArray.dtype.type self.Output.meta.axistags = copy.copy(self._rawVigraArray.axistags) self.Output.meta.shape = self._rawVigraArray.shape
def setupOutputs(self): self._filepath = self.Filepath.value with tifffile.TiffFile(self._filepath) as tiff_file: series = tiff_file.series[0] if len(tiff_file.series) > 1: raise RuntimeError( "Don't know how to read TIFF files with more than one image series.\n" "(Your image has {} series".format(len(tiff_file.series))) axes = series.axes shape = series.shape pages = series.pages first_page = pages[0] dtype_code = first_page.dtype if first_page.is_palette: # For now, we don't support colormaps. # Drop the (last) channel axis # (Yes, there can be more than one :-/) last_C_pos = axes.rfind('C') assert axes[last_C_pos] == 'C' axes = axes[:last_C_pos] + axes[last_C_pos + 1:] shape = shape[:last_C_pos] + shape[last_C_pos + 1:] # first_page.dtype refers to the type AFTER colormapping. # We want the original type. key = (first_page.sample_format, first_page.bits_per_sample) dtype_code = self._dtype = tifffile.TIFF_SAMPLE_DTYPES.get( key, None) # From the tifffile.TiffPage code: # ----- # The internal, normalized '_shape' attribute is 6 dimensional: # # 0. number planes (stk) # 1. planar samples_per_pixel # 2. image_depth Z (sgi) # 3. image_length Y # 4. image_width X # 5. contig samples_per_pixel (N, P, D, Y, X, S) = first_page._shape assert N == 1, "Don't know how to handle any number of planes except 1 (per page)" assert P == 1, "Don't know how to handle any number of planar samples per pixel except 1 (per page)" assert D == 1, "Don't know how to handle any image depth except 1" if S == 1: self._page_shape = (Y, X) self._page_axes = 'yx' else: assert shape[-3:] == (Y, X, S) self._page_shape = (Y, X, S) self._page_axes = 'yxc' assert 'C' not in axes, \ "If channels are in separate pages, then each page can't have multiple channels itself.\n"\ "(Don't know how to weave multi-channel pages together.)" self._non_page_shape = shape[:-len(self._page_shape)] assert shape == self._non_page_shape + self._page_shape assert self._non_page_shape or len(pages) == 1 axes = axes.lower().replace('s', 'c') if 'i' in axes: for k in 'tzc': if k not in axes: axes = axes.replace('i', k) break if 'i' in axes: raise RuntimeError( "Image has an 'I' axis, and I don't know what it represents. " "(Separate T,Z,C axes already exist.)") if 'q' in axes: # in case of unknown axes, assume default axis order TZYXC if not all(elem == 'q' for elem in axes): raise RuntimeError( "Image has SOME unknown ('Q') axes, which is currently not supported. " ) logger.warning( 'Unknown axistags detected - assuming default axis order.') axes = get_default_axisordering(shape) self.Output.meta.shape = shape self.Output.meta.axistags = vigra.defaultAxistags(str(axes)) self.Output.meta.dtype = numpy.dtype(dtype_code).type self.Output.meta.ideal_blockshape = ( (1, ) * len(self._non_page_shape)) + self._page_shape
def testValidShapes(self): testshapes = [((10, 20), "yx"), ((10, 20, 30), "zyx"), ((10, 20, 30, 3), "zyxc"), ((5, 10, 20, 30, 3), "tzyxc")] for shape, expected_axes_string in testshapes: default_axes = helpers.get_default_axisordering(shape) assert default_axes == expected_axes_string
def setupOutputs(self): self.internalCleanup() datasetInfo = self.Dataset.value try: # Data only comes from the project file if the user said so AND it exists in the project datasetInProject = ( datasetInfo.location == DatasetInfo.Location.ProjectInternal) datasetInProject &= self.ProjectFile.ready() if datasetInProject: internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId datasetInProject &= internalPath in self.ProjectFile.value # If we should find the data in the project file, use a dataset reader if datasetInProject: opReader = OpStreamingH5N5Reader(parent=self) opReader.H5N5File.setValue(self.ProjectFile.value) opReader.InternalPath.setValue(internalPath) providerSlot = opReader.OutputImage elif datasetInfo.location == DatasetInfo.Location.PreloadedArray: preloaded_array = datasetInfo.preloaded_array assert preloaded_array is not None if not hasattr(preloaded_array, 'axistags'): axisorder = get_default_axisordering(preloaded_array.shape) preloaded_array = vigra.taggedView(preloaded_array, axisorder) opReader = OpArrayPiper(parent=self) opReader.Input.setValue(preloaded_array) providerSlot = opReader.Output else: if datasetInfo.realDataSource: # Use a normal (filesystem) reader opReader = OpInputDataReader(parent=self) if datasetInfo.subvolume_roi is not None: opReader.SubVolumeRoi.setValue( datasetInfo.subvolume_roi) opReader.WorkingDirectory.setValue( self.WorkingDirectory.value) opReader.SequenceAxis.setValue(datasetInfo.sequenceAxis) opReader.FilePath.setValue(datasetInfo.filePath) else: # Use fake reader: allows to run the project in a headless # mode without the raw data opReader = OpZeroDefault(parent=self) opReader.MetaInput.meta = MetaDict( shape=datasetInfo.laneShape, dtype=datasetInfo.laneDtype, drange=datasetInfo.drange, axistags=datasetInfo.axistags) opReader.MetaInput.setValue( numpy.zeros(datasetInfo.laneShape, dtype=datasetInfo.laneDtype)) providerSlot = opReader.Output self._opReaders.append(opReader) # Inject metadata if the dataset info specified any. # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255) metadata = {} metadata['display_mode'] = datasetInfo.display_mode role_name = self.RoleName.value if 'c' not in providerSlot.meta.getTaggedShape(): num_channels = 0 else: num_channels = providerSlot.meta.getTaggedShape()['c'] if num_channels > 1: metadata['channel_names'] = [ "{}-{}".format(role_name, i) for i in range(num_channels) ] else: metadata['channel_names'] = [role_name] if datasetInfo.drange is not None: metadata['drange'] = datasetInfo.drange elif providerSlot.meta.dtype == numpy.uint8: # SPECIAL case for uint8 data: Provide a default drange. # The user can always override this herself if she wants. metadata['drange'] = (0, 255) if datasetInfo.normalizeDisplay is not None: metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay if datasetInfo.axistags is not None: if len(datasetInfo.axistags) != len(providerSlot.meta.shape): ts = providerSlot.meta.getTaggedShape() if 'c' in ts and 'c' not in datasetInfo.axistags and len( datasetInfo.axistags) + 1 == len(ts): # provider has no channel axis, but template has => add channel axis to provider # fixme: Optimize the axistag guess in BatchProcessingApplet instead of hoping for the best here metadata['axistags'] = vigra.defaultAxistags( ''.join(datasetInfo.axistags.keys()) + 'c') else: # This usually only happens when we copied a DatasetInfo from another lane, # and used it as a 'template' to initialize this lane. # This happens in the BatchProcessingApplet when it attempts to guess the axistags of # batch images based on the axistags chosen by the user in the interactive images. # If the interactive image tags don't make sense for the batch image, you get this error. raise Exception( "Your dataset's provided axistags ({}) do not have the " "correct dimensionality for your dataset, which has {} dimensions." .format( "".join(tag.key for tag in datasetInfo.axistags), len(providerSlot.meta.shape))) else: metadata['axistags'] = datasetInfo.axistags if datasetInfo.original_axistags is not None: metadata['original_axistags'] = datasetInfo.original_axistags if datasetInfo.subvolume_roi is not None: metadata['subvolume_roi'] = datasetInfo.subvolume_roi # FIXME: We are overwriting the axistags metadata to intentionally allow # the user to change our interpretation of which axis is which. # That's okay, but technically there's a special corner case if # the user redefines the channel axis index. # Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel. # For most use-cases, that won't really matter, which is why I'm not worrying about it right now. opMetadataInjector = OpMetadataInjector(parent=self) opMetadataInjector.Input.connect(providerSlot) opMetadataInjector.Metadata.setValue(metadata) providerSlot = opMetadataInjector.Output self._opReaders.append(opMetadataInjector) self._NonTransposedImage.connect(providerSlot) # make sure that x and y axes are present in the selected axis order if 'x' not in providerSlot.meta.axistags or 'y' not in providerSlot.meta.axistags: raise DatasetConstraintError( "DataSelection", "Data must always have at leaset the axes x and y for ilastik to work." ) if self.forceAxisOrder: assert isinstance(self.forceAxisOrder, list), \ "forceAxisOrder should be a *list* of preferred axis orders" # Before we re-order, make sure no non-singleton # axes would be dropped by the forced order. tagged_provider_shape = providerSlot.meta.getTaggedShape() minimal_axes = [ k_v for k_v in list(tagged_provider_shape.items()) if k_v[1] > 1 ] minimal_axes = set(k for k, v in minimal_axes) # Pick the shortest of the possible 'forced' orders that # still contains all the axes of the original dataset. candidate_orders = list(self.forceAxisOrder) candidate_orders = [ order for order in candidate_orders if minimal_axes.issubset(set(order)) ] if len(candidate_orders) == 0: msg = "The axes of your dataset ({}) are not compatible with any of the allowed"\ " axis configurations used by this workflow ({}). Please fix them."\ .format(providerSlot.meta.getAxisKeys(), self.forceAxisOrder) raise DatasetConstraintError("DataSelection", msg) output_order = sorted(candidate_orders, key=len)[0] # the shortest one output_order = "".join(output_order) else: # No forced axisorder is supplied. Use original axisorder as # output order: it is assumed by the export-applet, that the # an OpReorderAxes operator is added in the beginning output_order = "".join( [x for x in providerSlot.meta.axistags.keys()]) op5 = OpReorderAxes(parent=self) op5.AxisOrder.setValue(output_order) op5.Input.connect(providerSlot) providerSlot = op5.Output self._opReaders.append(op5) # If the channel axis is missing, add it as last axis if 'c' not in providerSlot.meta.axistags: op5 = OpReorderAxes(parent=self) keys = providerSlot.meta.getAxisKeys() # Append keys.append('c') op5.AxisOrder.setValue("".join(keys)) op5.Input.connect(providerSlot) providerSlot = op5.Output self._opReaders.append(op5) # Connect our external outputs to the internal operators we chose self.Image.connect(providerSlot) self.AllowLabels.setValue(datasetInfo.allowLabels) # If the reading operator provides a nickname, use it. if self.Image.meta.nickname is not None: datasetInfo.nickname = self.Image.meta.nickname imageName = datasetInfo.nickname if imageName == "": imageName = datasetInfo.filePath self.ImageName.setValue(imageName) except: self.internalCleanup() raise
def testValidShapes(self): testshapes = [((10, 20), "yx"), ((10, 20, 30), "zyx"), ((10, 20, 30, 3), "zyxc"), ((5, 10, 20, 30, 3), "tzyxc")] for shape, expected_axes_string in testshapes: default_axes = helpers.get_default_axisordering(shape) assert default_axes == expected_axes_string
def setupOutputs(self): self.internalCleanup() datasetInfo = self.Dataset.value try: # Data only comes from the project file if the user said so AND it exists in the project datasetInProject = (datasetInfo.location == DatasetInfo.Location.ProjectInternal) datasetInProject &= self.ProjectFile.ready() if datasetInProject: internalPath = self.ProjectDataGroup.value + '/' + datasetInfo.datasetId datasetInProject &= internalPath in self.ProjectFile.value # If we should find the data in the project file, use a dataset reader if datasetInProject: opReader = OpStreamingH5N5Reader(parent=self) opReader.H5N5File.setValue(self.ProjectFile.value) opReader.InternalPath.setValue(internalPath) providerSlot = opReader.OutputImage elif datasetInfo.location == DatasetInfo.Location.PreloadedArray: preloaded_array = datasetInfo.preloaded_array assert preloaded_array is not None if not hasattr(preloaded_array, 'axistags'): axisorder = get_default_axisordering(preloaded_array.shape) preloaded_array = vigra.taggedView(preloaded_array, axisorder) opReader = OpArrayPiper(parent=self) opReader.Input.setValue(preloaded_array) providerSlot = opReader.Output else: if datasetInfo.realDataSource: # Use a normal (filesystem) reader opReader = OpInputDataReader(parent=self) if datasetInfo.subvolume_roi is not None: opReader.SubVolumeRoi.setValue(datasetInfo.subvolume_roi) opReader.WorkingDirectory.setValue(self.WorkingDirectory.value) opReader.SequenceAxis.setValue(datasetInfo.sequenceAxis) opReader.FilePath.setValue(datasetInfo.filePath) else: # Use fake reader: allows to run the project in a headless # mode without the raw data opReader = OpZeroDefault(parent=self) opReader.MetaInput.meta = MetaDict( shape=datasetInfo.laneShape, dtype=datasetInfo.laneDtype, drange=datasetInfo.drange, axistags=datasetInfo.axistags) opReader.MetaInput.setValue(numpy.zeros(datasetInfo.laneShape, dtype=datasetInfo.laneDtype)) providerSlot = opReader.Output self._opReaders.append(opReader) # Inject metadata if the dataset info specified any. # Also, inject if if dtype is uint8, which we can reasonably assume has drange (0,255) metadata = {} metadata['display_mode'] = datasetInfo.display_mode role_name = self.RoleName.value if 'c' not in providerSlot.meta.getTaggedShape(): num_channels = 0 else: num_channels = providerSlot.meta.getTaggedShape()['c'] if num_channels > 1: metadata['channel_names'] = ["{}-{}".format(role_name, i) for i in range(num_channels)] else: metadata['channel_names'] = [role_name] if datasetInfo.drange is not None: metadata['drange'] = datasetInfo.drange elif providerSlot.meta.dtype == numpy.uint8: # SPECIAL case for uint8 data: Provide a default drange. # The user can always override this herself if she wants. metadata['drange'] = (0, 255) if datasetInfo.normalizeDisplay is not None: metadata['normalizeDisplay'] = datasetInfo.normalizeDisplay if datasetInfo.axistags is not None: if len(datasetInfo.axistags) != len(providerSlot.meta.shape): ts = providerSlot.meta.getTaggedShape() if 'c' in ts and 'c' not in datasetInfo.axistags and len(datasetInfo.axistags) + 1 == len(ts): # provider has no channel axis, but template has => add channel axis to provider # fixme: Optimize the axistag guess in BatchProcessingApplet instead of hoping for the best here metadata['axistags'] = vigra.defaultAxistags(''.join(datasetInfo.axistags.keys()) + 'c') else: # This usually only happens when we copied a DatasetInfo from another lane, # and used it as a 'template' to initialize this lane. # This happens in the BatchProcessingApplet when it attempts to guess the axistags of # batch images based on the axistags chosen by the user in the interactive images. # If the interactive image tags don't make sense for the batch image, you get this error. raise Exception("Your dataset's provided axistags ({}) do not have the " "correct dimensionality for your dataset, which has {} dimensions." .format("".join(tag.key for tag in datasetInfo.axistags), len(providerSlot.meta.shape))) else: metadata['axistags'] = datasetInfo.axistags if datasetInfo.original_axistags is not None: metadata['original_axistags'] = datasetInfo.original_axistags if datasetInfo.subvolume_roi is not None: metadata['subvolume_roi'] = datasetInfo.subvolume_roi # FIXME: We are overwriting the axistags metadata to intentionally allow # the user to change our interpretation of which axis is which. # That's okay, but technically there's a special corner case if # the user redefines the channel axis index. # Technically, it invalidates the meaning of meta.ram_usage_per_requested_pixel. # For most use-cases, that won't really matter, which is why I'm not worrying about it right now. opMetadataInjector = OpMetadataInjector(parent=self) opMetadataInjector.Input.connect(providerSlot) opMetadataInjector.Metadata.setValue(metadata) providerSlot = opMetadataInjector.Output self._opReaders.append(opMetadataInjector) self._NonTransposedImage.connect(providerSlot) # make sure that x and y axes are present in the selected axis order if 'x' not in providerSlot.meta.axistags or 'y' not in providerSlot.meta.axistags: raise DatasetConstraintError("DataSelection", "Data must always have at leaset the axes x and y for ilastik to work.") if self.forceAxisOrder: assert isinstance(self.forceAxisOrder, list), \ "forceAxisOrder should be a *list* of preferred axis orders" # Before we re-order, make sure no non-singleton # axes would be dropped by the forced order. tagged_provider_shape = providerSlot.meta.getTaggedShape() minimal_axes = [k_v for k_v in list(tagged_provider_shape.items()) if k_v[1] > 1] minimal_axes = set(k for k, v in minimal_axes) # Pick the shortest of the possible 'forced' orders that # still contains all the axes of the original dataset. candidate_orders = list(self.forceAxisOrder) candidate_orders = [order for order in candidate_orders if minimal_axes.issubset(set(order))] if len(candidate_orders) == 0: msg = "The axes of your dataset ({}) are not compatible with any of the allowed"\ " axis configurations used by this workflow ({}). Please fix them."\ .format(providerSlot.meta.getAxisKeys(), self.forceAxisOrder) raise DatasetConstraintError("DataSelection", msg) output_order = sorted(candidate_orders, key=len)[0] # the shortest one output_order = "".join(output_order) else: # No forced axisorder is supplied. Use original axisorder as # output order: it is assumed by the export-applet, that the # an OpReorderAxes operator is added in the beginning output_order = "".join([x for x in providerSlot.meta.axistags.keys()]) op5 = OpReorderAxes(parent=self) op5.AxisOrder.setValue(output_order) op5.Input.connect(providerSlot) providerSlot = op5.Output self._opReaders.append(op5) # If the channel axis is missing, add it as last axis if 'c' not in providerSlot.meta.axistags: op5 = OpReorderAxes(parent=self) keys = providerSlot.meta.getAxisKeys() # Append keys.append('c') op5.AxisOrder.setValue("".join(keys)) op5.Input.connect(providerSlot) providerSlot = op5.Output self._opReaders.append(op5) # Connect our external outputs to the internal operators we chose self.Image.connect(providerSlot) self.AllowLabels.setValue(datasetInfo.allowLabels) # If the reading operator provides a nickname, use it. if self.Image.meta.nickname is not None: datasetInfo.nickname = self.Image.meta.nickname imageName = datasetInfo.nickname if imageName == "": imageName = datasetInfo.filePath self.ImageName.setValue(imageName) except: self.internalCleanup() raise
def setupOutputs(self): self._filepath = self.Filepath.value with tifffile.TiffFile(self._filepath) as tiff_file: series = tiff_file.series[0] if len(tiff_file.series) > 1: raise RuntimeError( "Don't know how to read TIFF files with more than one image series.\n" "(Your image has {} series".format(len(tiff_file.series)) ) axes = series.axes shape = series.shape pages = series.pages first_page = pages[0] dtype_code = first_page.dtype if first_page.is_palette: # For now, we don't support colormaps. # Drop the (last) channel axis # (Yes, there can be more than one :-/) last_C_pos = axes.rfind("C") assert axes[last_C_pos] == "C" axes = axes[:last_C_pos] + axes[last_C_pos + 1 :] shape = shape[:last_C_pos] + shape[last_C_pos + 1 :] # first_page.dtype refers to the type AFTER colormapping. # We want the original type. key = (first_page.sample_format, first_page.bits_per_sample) dtype_code = self._dtype = tifffile.TIFF_SAMPLE_DTYPES.get(key, None) # From the tifffile.TiffPage code: # ----- # The internal, normalized '_shape' attribute is 6 dimensional: # # 0. number planes (stk) # 1. planar samples_per_pixel # 2. image_depth Z (sgi) # 3. image_length Y # 4. image_width X # 5. contig samples_per_pixel (N, P, D, Y, X, S) = first_page._shape assert N == 1, "Don't know how to handle any number of planes except 1 (per page)" assert P == 1, "Don't know how to handle any number of planar samples per pixel except 1 (per page)" assert D == 1, "Don't know how to handle any image depth except 1" if S == 1: self._page_shape = (Y, X) self._page_axes = "yx" else: assert shape[-3:] == (Y, X, S) self._page_shape = (Y, X, S) self._page_axes = "yxc" assert "C" not in axes, ( "If channels are in separate pages, then each page can't have multiple channels itself.\n" "(Don't know how to weave multi-channel pages together.)" ) self._non_page_shape = shape[: -len(self._page_shape)] assert shape == self._non_page_shape + self._page_shape assert self._non_page_shape or len(pages) == 1 axes = axes.lower().replace("s", "c") if "i" in axes: for k in "tzc": if k not in axes: axes = axes.replace("i", k) break if "i" in axes: raise RuntimeError( "Image has an 'I' axis, and I don't know what it represents. " "(Separate T,Z,C axes already exist.)" ) if "q" in axes: # in case of unknown axes, assume default axis order TZYXC if not all(elem == "q" for elem in axes): raise RuntimeError("Image has SOME unknown ('Q') axes, which is currently not supported. ") logger.warning("Unknown axistags detected - assuming default axis order.") axes = get_default_axisordering(shape) self.Output.meta.shape = shape self.Output.meta.axistags = vigra.defaultAxistags(str(axes)) self.Output.meta.dtype = numpy.dtype(dtype_code).type self.Output.meta.ideal_blockshape = ((1,) * len(self._non_page_shape)) + self._page_shape
def setupOutputs(self): self._filepath = self.Filepath.value with tifffile.TiffFile(self._filepath) as tiff_file: series = tiff_file.series[0] if len(tiff_file.series) > 1: raise RuntimeError( "Don't know how to read TIFF files with more than one image series.\n" "(Your image has {} series".format(len(tiff_file.series))) axes = series.axes shape = series.shape pages = series.pages first_page = pages[0] dtype_code = first_page.dtype if first_page.photometric == tifffile.TIFF.PHOTOMETRIC.PALETTE: # For now, we don't support colormaps. # Drop the (last) channel axis # (Yes, there can be more than one :-/) last_C_pos = axes.rfind("C") assert axes[last_C_pos] == "C" axes = axes[:last_C_pos] + axes[last_C_pos + 1:] shape = shape[:last_C_pos] + shape[last_C_pos + 1:] # first_page.dtype refers to the type AFTER colormapping. # We want the original type. key = (first_page.sample_format, first_page.bits_per_sample) dtype_code = self._dtype = tifffile.TIFF_SAMPLE_DTYPES.get( key, None) # From the tifffile.TiffPage code: # shaped : tuple of int # Normalized 5-dimensional shape of the image in IFD: # 0 : separate samplesperpixel or 1. # 1 : imagedepth Z or 1. # 2 : imagelength Y. # 3 : imagewidth X. # 4 : contig samplesperpixel or 1. (P, D, Y, X, S) = first_page.shaped assert P == 1, "Don't know how to handle any number of planar samples per pixel except 1 (per page)" assert D == 1, "Don't know how to handle any image depth except 1" if S == 1: self._page_shape = (Y, X) self._page_axes = "yx" else: assert shape[-3:] == (Y, X, S) self._page_shape = (Y, X, S) self._page_axes = "yxc" assert "C" not in axes, ( "If channels are in separate pages, then each page can't have multiple channels itself.\n" "(Don't know how to weave multi-channel pages together.)") self._non_page_shape = shape[:-len(self._page_shape)] assert shape == self._non_page_shape + self._page_shape assert self._non_page_shape or len(pages) == 1 axes = axes.lower().replace("s", "c") if "i" in axes: for k in "tzc": if k not in axes: axes = axes.replace("i", k) break if "i" in axes: raise RuntimeError( "Image has an 'I' axis, and I don't know what it represents. " "(Separate T,Z,C axes already exist.)") if "q" in axes: old_axes = axes axes = get_default_axisordering(shape) logger.warning( f"Unknown axistags detected - assuming default axis order. Guessed {axes} from {old_axes}." ) self.Output.meta.shape = shape self.Output.meta.axistags = vigra.defaultAxistags(str(axes)) self.Output.meta.dtype = numpy.dtype(dtype_code).type self.Output.meta.ideal_blockshape = ( (1, ) * len(self._non_page_shape)) + self._page_shape