def make_tiff(array):
     tiff = io.BytesIO()
     with TiffWriter(tiff) as writer:
         writer.save(array)
         tiff.seek(0)
     return tiff.read()
    def write_image_by_plane(
        self,
        image_name: str,
        output_dir: Union[Path, str] = "",
        write_pyramid: bool = True,
        tile_size: int = 512,
        compression: Optional[str] = "default",
    ) -> str:
        """
        Write OME-TIFF image plane-by-plane to disk. WsiReg compatible RegImages all
        have methods to read an image channel-by-channel, thus each channel is read, transformed, and written to
        reduce memory during write.
        RGB images may run large memory footprints as they are interleaved before write, for RGB images,
        using the `OmeTiledTiffWriter` is recommended.

        Parameters
        ----------
        image_name: str
            Name to be written WITHOUT extension
            for example if image_name = "cool_image" the file
            would be "cool_image.ome.tiff"
        output_dir: Path or str
            Directory where the image will be saved
        write_pyramid: bool
            Whether to write the OME-TIFF with sub-resolutions or not
        tile_size: int
            What size to write OME-TIFF tiles to disk
        compression: str
            tifffile string to pass to compression argument, defaults to "deflate" for minisblack
            and "jpeg" for RGB type images

        Returns
        -------
        output_file_name: str
            File path to the written OME-TIFF

        """

        output_file_name = str(Path(output_dir) / f"{image_name}.ome.tiff")
        self._prepare_image_info(
            image_name,
            reg_transform_seq=self.reg_transform_seq,
            write_pyramid=write_pyramid,
            tile_size=tile_size,
            compression=compression,
        )

        rgb_im_data = []

        print(f"saving to {output_file_name}")
        with TiffWriter(output_file_name, bigtiff=True) as tif:
            if self.reg_image.reader == "sitk":
                self.reg_image._read_full_image()

            for channel_idx in range(self.reg_image.n_ch):
                print(f"transforming : {channel_idx}")
                image = self.reg_image.read_single_channel(channel_idx)
                image = np.squeeze(image)
                image = sitk.GetImageFromArray(image)
                image.SetSpacing(
                    (self.reg_image.image_res, self.reg_image.image_res)
                )

                if self.reg_transform_seq:
                    image = self.reg_transform_seq.resampler.Execute(image)
                    # image = transform_plane(
                    #     image, final_transform, composite_transform
                    # )
                    print(f"transformed : {channel_idx}")

                if self.reg_image.is_rgb:
                    rgb_im_data.append(image)
                else:
                    print("saving")
                    if isinstance(image, sitk.Image):
                        image = sitk.GetArrayFromImage(image)

                    options = dict(
                        tile=(self.tile_size, self.tile_size),
                        compression=self.compression,
                        photometric="rgb"
                        if self.reg_image.is_rgb
                        else "minisblack",
                        metadata=None,
                    )
                    # write OME-XML to the ImageDescription tag of the first page
                    description = self.omexml if channel_idx == 0 else None
                    # write channel data
                    print(
                        f" writing channel {channel_idx} - shape: {image.shape}"
                    )
                    tif.write(
                        image,
                        subifds=self.subifds,
                        description=description,
                        **options,
                    )

                    if write_pyramid:
                        for pyr_idx in range(1, self.n_pyr_levels):
                            resize_shape = (
                                self.pyr_levels[pyr_idx][0],
                                self.pyr_levels[pyr_idx][1],
                            )
                            image = cv2.resize(
                                image,
                                resize_shape,
                                cv2.INTER_LINEAR,
                            )
                            print(
                                f"pyramid index {pyr_idx} : channel {channel_idx} shape: {image.shape}"
                            )

                            tif.write(image, **options, subfiletype=1)

            if self.reg_image.is_rgb:
                rgb_im_data = sitk.Compose(rgb_im_data)
                rgb_im_data = sitk.GetArrayFromImage(rgb_im_data)

                options = dict(
                    tile=(self.tile_size, self.tile_size),
                    compression=self.compression,
                    photometric="rgb",
                    metadata=None,
                )
                # write OME-XML to the ImageDescription tag of the first page
                description = self.omexml

                # write channel data
                tif.write(
                    rgb_im_data,
                    subifds=self.subifds,
                    description=description,
                    **options,
                )

                print(f"RGB shape: {rgb_im_data.shape}")
                if write_pyramid:
                    for pyr_idx in range(1, self.n_pyr_levels):
                        resize_shape = (
                            self.pyr_levels[pyr_idx][0],
                            self.pyr_levels[pyr_idx][1],
                        )
                        rgb_im_data = cv2.resize(
                            rgb_im_data,
                            resize_shape,
                            cv2.INTER_LINEAR,
                        )
                        tif.write(rgb_im_data, **options, subfiletype=1)
        return output_file_name
Exemple #3
0
def to_tiff(img,
            file,
            image_name: Union[str, bool, None] = None,
            image_date: Union[str, datetime, None] = None,
            channel_names: Union[Sequence[str], bool, None] = None,
            description: Optional[str] = None,
            profile: TiffProfile = TiffProfile.OME_TIFF,
            big_endian: Optional[bool] = None,
            big_tiff: Optional[bool] = None,
            big_tiff_threshold: int = 2**32 - 2**25,
            interleaved: bool = True,
            compression_type: Optional[str] = None,
            compression_level: int = 0,
            pixel_size: Optional[float] = None,
            pixel_depth: Optional[float] = None,
            software: str = 'xtiff',
            ome_xml_fun=get_ome_xml,
            **ome_xml_kwargs) -> None:
    """
    Writes an image as TIFF file with TZCYX channel order.

    :param img: The image to write, as xarray DataArray or numpy-compatible data structure.
        Supported shapes:
        - (y, x),
        - (c, y, x)
        - (z, c, y, x)
        - (t, z, c, y, x)
        - (t, z, c, y, x, s)
        Supported data types:
        - any numpy data type when using TiffProfile.TIFF
        - uint8, uint16, float32 when using TiffProfile.IMAGEJ (uint8 for RGB images)
        - bool, int8, int16, int32, uint8, uint16, uint32, float32, float64 when using TiffProfile.OME_TIFF
    :param file: File target supported by tifffile TiffWriter, e.g. path to file (str, pathlib.Path) or binary stream.
    :param image_name: Image name for OME-TIFF images. If True, the image name is determined using the DataArray name or
        the file name (in that order); if False, the image name is not set. If None, defaults to the behavior for True
        for named DataArrays and when the file path is provided, and to the behavior of False otherwise. Only relevant
        when writing OME-TIFF files, any value other than None or False will raise a warning for other TIFF profiles.
    :param image_date: Date and time of image creation in '%Y:%m:%d %H:%M:%S' format or as datetime object. Defaults to
        the current date and time if None. Note: this does not determine the OME-XML AcquisitionDate element value.
    :param channel_names: A list of channel names. If True, channel names are determined using the DataArray channel
        coordinate; if False, channel names are not set. If None, defaults to the behavior for True for DataArrays when
        writing multi-channel OME-TIFFs, and to the behavior for False otherwise. Only relevant when writing
        multi-channel OME-TIFF files, any value other than None or False will raise a warning for other TIFF profiles.
    :param description: TIFF description tag. Will default to the OME-XML header when writing OME-TIFF files. Any value
        other than None will raise a warning in this case.
    :param profile: TIFF specification of the written file.
        Supported TIFF profiles:
        - TIFF (no restrictions apply)
        - ImageJ (undocumented file format that is supported by the ImageJ software)
        - OME-TIFF (Open Microscopy Environment TIFF standard-compliant file format with minimal OME-XML header)
    :param big_endian: If true, stores data in big endian format, otherwise uses little endian byte order. If None, the
        byte order is set to True for the ImageJ TIFF profile and defaults to the system default otherwise.
    :param big_tiff: If True, enables support for writing files larger than 4GB. Not supported for TiffProfile.IMAGEJ.
    :param big_tiff_threshold: Threshold for enabling BigTIFF support when big_tiff is set to None, in bytes. Defaults
        to 4GB, minus 32MB for metadata.
    :param interleaved: If True, OME-TIFF images are saved as interleaved (this only affects OME-XML metadata). Always
        True for RGB(A) images (i.e., S=3 or 4) - a warning will be raised if explicitly set to False for RGB(A) images.
    :param compression_type: Compression algorithm, see tifffile.TIFF.COMPRESSION() for available values. Compression is
        not supported for TiffProfile.IMAGEJ. Note: Compression prevents from memory-mapping images and should therefore
        be avoided when images are compressed externally, e.g. when they are stored in compressed archives.
    :param compression_level: Compression level, between 0 and 9. Compression is not supported for TiffProfile.IMAGEJ.
        Note: Compression prevents from memory-mapping images and should therefore be avoided when images are compressed
        externally, e.g. when they are stored in compressed archives.
    :param pixel_size: Planar (x/y) size of one pixel, in micrometer.
    :param pixel_depth: Depth (z size) of one pixel, in micrometer. Only relevant when writing OME-TIFF files, any value
        other than None will raise a warning for other TIFF profiles.
    :param software: Name of the software used to create the file. Must be 7-bit ASCII. Saved with the first page only.
    :param ome_xml_fun: Function that will be used for generating the OME-XML header. See the default implementation for
        reference of the required signature. Only relevant when writing OME-TIFF files, ignored otherwise.
    :param ome_xml_kwargs: Optional arguments that are passed to the ome_xml_fun function. Only relevant when writing
        OME-TIFF files, will raise a warning if provided for other TIFF profiles.
    """
    # file
    if isinstance(file, str):
        file = Path(file)
    if isinstance(file, Path):
        if not file.suffix.lower() == '.tiff':
            warnings.warn(
                'The specified TIFF file name does not end with .tiff: {}'.
                format(file))
        if profile == TiffProfile.OME_TIFF:
            if not file.name.lower().endswith('.ome.tiff'):
                warnings.warn(
                    'The specified OME-TIFF file name does not end with .ome.tiff: {}'
                    .format(file))
        else:
            if file.name.lower().endswith('.ome.tiff'):
                warnings.warn(
                    'The specified non-OME-TIFF file name ends with .ome.tiff: {}'
                    .format(file))

    # image name
    data_array_has_image_name = (_is_data_array(img) and img.name)
    if image_name is None and (data_array_has_image_name or isinstance(
            file, Path)) and profile == TiffProfile.OME_TIFF:
        image_name = True
    if isinstance(image_name, bool):
        if image_name:
            if data_array_has_image_name:
                image_name = img.name
            elif isinstance(file, Path):
                image_name = file.name
            else:
                raise ValueError(
                    'Cannot determine image name from non-DataArray images written to unknown file names'
                )
        else:
            image_name = None
    if isinstance(image_name, str) and len(image_name) == 0:
        raise ValueError('Image name is empty')
    if image_name is not None and profile != TiffProfile.OME_TIFF:
        warnings.warn(
            'The provided TIFF profile does not support image names, ignoring image name'
        )
        image_name = None
    assert image_name is None or len(image_name) > 0

    # image date
    if image_date is None:
        image_date = datetime.now()

    # byte order
    if big_endian is None:
        big_endian = (profile == TiffProfile.IMAGEJ) or sys.byteorder == 'big'
    elif profile == TiffProfile.IMAGEJ and not big_endian:
        warnings.warn(
            'The ImageJ TIFF profile does not support the specified byte order, continuing with big endian'
        )
        big_endian = True
    assert big_endian is not None

    # compression
    if compression_type is not None and compression_type not in tifffile.TIFF.COMPRESSION(
    ):
        raise ValueError(
            'The specified compression type is not supported: {}'.format(
                compression_type))
    if not 0 <= compression_level <= 9:
        raise ValueError(
            'The specified compression level is not between 0 and 9: {:d}'.
            format(compression_level))
    compression = compression_level
    if compression_type is not None:
        compression = (compression_type, compression_level)
    if profile == TiffProfile.IMAGEJ and compression != 0:
        warnings.warn(
            'The ImageJ TIFF profile does not support compression, ignoring compression'
        )
        compression = 0
    assert isinstance(
        compression,
        int) or isinstance(compression, tuple) and len(compression) == 2

    # resolution
    resolution = None
    if pixel_size is not None:
        if pixel_size <= 0.:
            raise ValueError(
                'The specified pixel size is not larger than zero: {:f}'.
                format(pixel_size))
        pixels_per_centimeter = 10**4 / pixel_size
        resolution = (pixels_per_centimeter, pixels_per_centimeter,
                      'CENTIMETER')
    if pixel_depth is not None and profile != TiffProfile.OME_TIFF:
        warnings.warn(
            'Pixel depth information is supported for OME-TIFF only, ignoring pixel depth'
        )
        pixel_depth = None
    if pixel_depth is not None and pixel_depth <= 0:
        raise ValueError(
            'The specified pixel depth is not larger than zero: {:f}'.format(
                pixel_size))

    # convert image to numpy array or xarray DataArray
    if not isinstance(img, np.ndarray) and not _is_data_array(img):
        img = np.asarray(img)
    if profile == TiffProfile.IMAGEJ and img.dtype not in (np.uint8, np.uint16,
                                                           np.float32):
        fmt = 'The ImageJ TIFF profile does not support the specified data type: {} (supported: uint8, uint16, float32)'
        raise ValueError(fmt.format(str(img.dtype)))
    assert isinstance(img, np.ndarray) or _is_data_array(img)

    # determine image shape
    channel_axis = None
    img_shape = img.shape
    if img.ndim == 2:  # YX
        img_shape = (1, 1, 1, img.shape[0], img.shape[1], 1)
    elif img.ndim == 3:  # CYX
        channel_axis = 0
        img_shape = (1, 1, img.shape[0], img.shape[1], img.shape[2], 1)
    elif img.ndim == 4:  # ZCYX
        channel_axis = 1
        img_shape = (1, img.shape[0], img.shape[1], img.shape[2], img.shape[3],
                     1)
    elif img.ndim == 5:  # TZCYX
        channel_axis = 2
        img_shape = (img.shape[0], img.shape[1], img.shape[2], img.shape[3],
                     img.shape[4], 1)
    elif img.ndim == 6:  # TZCYXS
        channel_axis = 2
        if img.shape[-1] > 1 and not interleaved:
            interleaved = True
            if profile == TiffProfile.OME_TIFF:
                warnings.warn(
                    'RGB(A) OME-TIFF images must be saved as interleaved, ignoring interleaved parameter'
                )
    else:
        raise ValueError(
            'Unsupported number of dimensions: {:d} (supported: 2, 3, 4, 5, 6)'
            .format(img.ndim))
    size_t, size_z, size_c, size_y, size_x, size_s = img_shape
    if profile == TiffProfile.IMAGEJ and size_s in (
            3, 4) and img.dtype != np.uint8:
        warnings.warn(
            'The ImageJ TIFF profile for RGB does not support the specified data type, casting to uint8'
        )
        img = img.astype(np.uint8)
    assert len(img_shape) == 6

    # determine channel names
    if channel_names is None and _is_data_array(
            img
    ) and channel_axis is not None and profile == TiffProfile.OME_TIFF:
        channel_names = True
    if isinstance(channel_names, bool):
        if channel_names:
            if not _is_data_array(img):
                raise ValueError(
                    'Cannot determine channel names from non-DataArray image')
            if channel_axis is None:
                raise ValueError(
                    'Cannot determine channel names from DataArrays without a channel dimension'
                )
            img: Any  # to "help" PyCharm dealing with DataArrays
            channel_names = img.coords[img.dims[channel_axis]].values
        else:
            channel_names = None
    if channel_names is not None and len(channel_names) != size_c:
        raise ValueError(
            'Invalid number of channel names: {:d} (expected: {:d})'.format(
                len(channel_names), size_c))
    if channel_names is not None and profile != TiffProfile.OME_TIFF:
        warnings.warn(
            'Channel names are supported for OME-TIFF only, ignoring channel names'
        )
        channel_names = None
    assert channel_names is None or len(channel_names) == size_c

    # convert image to TZCYXS numpy array
    if _is_data_array(img):
        img = img.values
    img = img.reshape(img_shape)
    assert isinstance(img, np.ndarray) and len(img.shape) == 6

    # determine BigTIFF status
    if big_tiff_threshold < 0:
        raise ValueError('The BigTIFF size threshold is negative: {:d}'.format(
            big_tiff_threshold))
    if big_tiff is None:
        big_tiff = (img.size * img.itemsize > big_tiff_threshold)
    if big_tiff and profile == TiffProfile.IMAGEJ:
        warnings.warn(
            'BigTIFF is not supported for ImageJ format, disabling BigTIFF')
        big_tiff = False
    assert big_tiff is not None

    # get description tag
    if description is not None and profile == TiffProfile.OME_TIFF:
        warnings.warn(
            'Custom TIFF description tags are not supported for OME-TIFF, ignoring description'
        )
        description = None
    if ome_xml_kwargs and profile != TiffProfile.OME_TIFF:
        warnings.warn(
            'Additional arguments are supported for OME-TIFF only, ignoring additional keyword arguments'
        )
        ome_xml_kwargs = {}
    if profile == TiffProfile.OME_TIFF:
        if ome_xml_fun is None:
            raise ValueError('No function provided for generating the OME-XML')
        ome_xml = ome_xml_fun(img,
                              image_name,
                              channel_names,
                              big_endian,
                              pixel_size,
                              pixel_depth,
                              interleaved=interleaved,
                              **ome_xml_kwargs)
        # While TIFF technically only supports ASCII, OME-XML requires UTF-8 encoding. In particular, the ASCII standard
        # does not support the micron (μ) character, which is used in PhysicalSizeX/Y/Z attributes. Therefore, the
        # OME-XML Description Tag is always encoded as UTF-8.
        with BytesIO() as description_buffer:
            ome_xml.write(description_buffer,
                          encoding='utf-8',
                          xml_declaration=True)
            description = description_buffer.getvalue(
            )  # do not decode byte string to skip tifffile's ASCII check

    # write image
    byte_order = '>' if big_endian else '<'
    imagej = (profile == TiffProfile.IMAGEJ)
    metadata = None if profile == TiffProfile.OME_TIFF else {}
    with TiffWriter(file,
                    bigtiff=big_tiff,
                    byteorder=byte_order,
                    imagej=imagej) as writer:
        # set photometric to 'MINISBLACK' to not treat three-channel images as RGB
        writer.save(data=img,
                    photometric='MINISBLACK',
                    compress=compression,
                    description=description,
                    datetime=image_date,
                    resolution=resolution,
                    software=software,
                    metadata=metadata)
Exemple #4
0
 def save_ome(self,file_path):
     with TiffWriter(file_path) as tif:
         tif.save(np.moveaxis(self.pixels,[3],[1]),
                 photometric='minisblack',
                 metadata={'axes': 'ZCYX'})# need to handle meta data batter.
Exemple #5
0
def write_tiff(filepath: str, img: ndarray) -> None:
    """Write the image to a tiff file."""
    tw = TiffWriter(filepath)
    tw.write(img)
Exemple #6
0
    def predict(self, input_folder: Union[Path, str],
                target_folder: Union[Path, str], model_path: Union[Path, str]):
        """Run prediction.

        @param input_folder: Path|str
            Path to the folder where to store the predicted images.

        @param target_folder: Path|str
            Path to the folder where to store the predicted images.

        @param model_path: Path|str
            Full path to the model to use.

        @return True if the prediction was successful, False otherwise.
        """
        # Inform
        self._print_header("Prediction")

        # Get the device
        self._device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # If the model is not in memory, instantiate it first
        if self._model is None:
            self._define_model()

        # Try loading the model weights: they must be compatible
        # with the model in memory
        try:
            checkpoint = torch.load(model_path,
                                    map_location=torch.device('cpu'))
            self._model.load_state_dict(checkpoint)
            print(f"Loaded best metric model {model_path}.", file=self._stdout)
        except Exception as e:
            self._message = "Error: there was a problem loading the model! Aborting."
            return False

        # Make sure the target folder exists
        if type(target_folder) == str and target_folder == '':
            self._message = "Error: please specify a valid target folder! Aborting."
            return False

        target_folder = Path(target_folder)
        target_folder.mkdir(parents=True, exist_ok=True)

        # Get prediction dataloader
        if not self._define_prediction_data_loaders(input_folder):
            self._message = "Error: could not instantiate prediction dataloader! Aborting."
            return False

        # Switch to evaluation mode
        self._model.eval()

        indx = 0

        # Make sure not to update the gradients
        with torch.no_grad():
            for prediction_data in self._prediction_dataloader:

                # Get the next batch and move it to device
                prediction_images = prediction_data.to(self._device)

                # Apply sliding inference over ROI size
                prediction_outputs = sliding_window_inference(
                    prediction_images, self._roi_size,
                    self._sliding_window_batch_size, self._model)
                prediction_outputs = self._prediction_post_transforms(
                    prediction_outputs)

                # Retrieve the image from the GPU (if needed)
                pred = prediction_outputs.cpu().numpy().squeeze()

                # Prepare the output file name
                basename = os.path.splitext(
                    os.path.basename(self._prediction_image_names[indx]))[0]
                basename = "pred_" + basename

                # Convert to label image
                label_img = self._prediction_to_label_tiff_image(pred)

                # Save label image as tiff file
                label_file_name = os.path.join(str(target_folder),
                                               basename + '.tif')
                with TiffWriter(label_file_name) as tif:
                    tif.save(label_img)

                # Inform
                print(f"Saved {str(target_folder)}/{basename}.tif",
                      file=self._stdout)

                # Update the index
                indx += 1

        # Inform
        print(f"Prediction completed.", file=self._stdout)

        # Return success
        return True
Exemple #7
0
def transform_to_ome_tiff(
    tform_reg_im,
    image_name,
    output_dir,
    final_transform,
    composite_transform,
    tile_size=512,
    write_pyramid=True,
):

    y_size, x_size, y_spacing, x_spacing = get_final_yx_from_tform(
        tform_reg_im, final_transform)

    # protect against too large tile size
    while y_size / tile_size <= 1 or x_size / tile_size <= 1:
        tile_size = tile_size // 2

    n_ch = (tform_reg_im.im_dims[2]
            if tform_reg_im.is_rgb else tform_reg_im.im_dims[0])
    pyr_levels, pyr_shapes = get_pyramid_info(y_size, x_size, n_ch, tile_size)
    n_pyr_levels = len(pyr_levels)
    output_file_name = str(Path(output_dir) / image_name)
    channel_names = format_channel_names(tform_reg_im.channel_names, n_ch)

    if final_transform is not None:
        PhysicalSizeY = y_spacing
        PhysicalSizeX = x_spacing
    else:
        PhysicalSizeY = tform_reg_im.image_res
        PhysicalSizeX = tform_reg_im.image_res

    omexml = prepare_ome_xml_str(
        y_size,
        x_size,
        n_ch,
        tform_reg_im.im_dtype,
        tform_reg_im.is_rgb,
        PhysicalSizeX=PhysicalSizeX,
        PhysicalSizeY=PhysicalSizeY,
        PhysicalSizeXUnit="µm",
        PhysicalSizeYUnit="µm",
        Name=image_name,
        Channel=None if tform_reg_im.is_rgb else {"Name": channel_names},
    )
    subifds = n_pyr_levels - 1 if write_pyramid is True else None

    rgb_im_data = []

    if tform_reg_im.reader == "sitk":
        full_image = sitk.ReadImage(tform_reg_im.image_filepath)

    print(f"saving to {output_file_name}.ome.tiff")
    with TiffWriter(f"{output_file_name}.ome.tiff", bigtiff=True) as tif:
        for channel_idx in range(n_ch):
            print(f"transforming : {channel_idx}")
            if tform_reg_im.reader != "sitk":
                image = tform_reg_im.read_single_channel(channel_idx)
                image = np.squeeze(image)
                image = sitk.GetImageFromArray(image)
                image.SetSpacing(
                    (tform_reg_im.image_res, tform_reg_im.image_res))
            else:
                if tform_reg_im.is_rgb:
                    image = sitk.VectorIndexSelectionCast(
                        full_image, channel_idx)
                elif len(full_image.GetSize()) > 2:
                    image = full_image[:, :, channel_idx]
                else:
                    image = full_image

            if composite_transform is not None:
                image = transform_plane(image, final_transform,
                                        composite_transform)
                print(f"transformed : {channel_idx}")

            if tform_reg_im.is_rgb:
                rgb_im_data.append(image)
            else:
                print("saving")
                if isinstance(image, sitk.Image):
                    image = sitk.GetArrayFromImage(image)

                options = dict(
                    tile=(tile_size, tile_size),
                    compression="jpeg" if tform_reg_im.is_rgb else "deflate",
                    photometric="rgb" if tform_reg_im.is_rgb else "minisblack",
                    metadata=None,
                )
                # write OME-XML to the ImageDescription tag of the first page
                description = omexml if channel_idx == 0 else None
                # write channel data
                print(f" writing channel {channel_idx} - shape: {image.shape}")
                tif.write(
                    image,
                    subifds=subifds,
                    description=description,
                    **options,
                )

                if write_pyramid:
                    for pyr_idx in range(1, n_pyr_levels):
                        resize_shape = (
                            pyr_levels[pyr_idx][0],
                            pyr_levels[pyr_idx][1],
                        )
                        image = cv2.resize(
                            image,
                            resize_shape,
                            cv2.INTER_LINEAR,
                        )
                        print(
                            f"pyr {pyr_idx} : channel {channel_idx} shape: {image.shape}"
                        )

                        tif.write(image, **options, subfiletype=1)

        if tform_reg_im.is_rgb:
            rgb_im_data = sitk.Compose(rgb_im_data)
            rgb_im_data = sitk.GetArrayFromImage(rgb_im_data)

            options = dict(
                tile=(tile_size, tile_size),
                compression="jpeg" if tform_reg_im.is_rgb else None,
                photometric="rgb" if tform_reg_im.is_rgb else "minisblack",
                metadata=None,
            )
            # write OME-XML to the ImageDescription tag of the first page
            description = omexml

            # write channel data
            tif.write(
                rgb_im_data,
                subifds=subifds,
                description=description,
                **options,
            )

            print(f"RGB shape: {rgb_im_data.shape}")
            if write_pyramid:
                for pyr_idx in range(1, n_pyr_levels):
                    resize_shape = (
                        pyr_levels[pyr_idx][0],
                        pyr_levels[pyr_idx][1],
                    )
                    rgb_im_data = cv2.resize(rgb_im_data, resize_shape,
                                             cv2.INTER_LINEAR)
                    print(f"pyr {pyr_idx} : RGB , shape: {rgb_im_data.shape}")

                    tif.write(rgb_im_data, **options, subfiletype=1)

    return f"{output_file_name}.ome.tiff"
Exemple #8
0
def write(filename, image, sed=None, optical=None, ranges=None,
          multichannel=True, dtype=None, write_float=None):
    """Writes MIBI data to a multipage TIFF.

    Args:
        filename: The path to the target file if multi-channel, or the path to
            a folder if single-channel.
        image: A :class:`mibidata.mibi_image.MibiImage` instance.
        sed: Optional, an array of the SED image data. This is assumed to be
            grayscale even if 3-dimensional, in which case only one channel
            will be used.
        optical: Optional, an RGB array of the optical image data.
        ranges: A list of (min, max) tuples the same length as the number of
            channels. If None, the min will default to zero and the max to the
            max pixel value in that channel. This is used by some external
            software to calibrate the display.
        multichannel: Boolean for whether to create a single multi-channel TIFF,
            or a folder of single-channel TIFFs. Defaults to True; if False,
            the sed and optical options are ignored.
        dtype: dtype: One of (``np.float32``, ``np.uint16``) to force the dtype
            of the saved image data. Defaults to ``None``, which chooses the
            format based on the data's input type, and will convert to
            ``np.float32`` or ``np.uint16`` from other float or int types,
            respectively, if it can do so without a loss of data.
        write_float: Deprecated, will raise ValueError if specified. To
            specify the dtype of the saved image, please use the `dtype`
            argument instead.

    Raises:
        ValueError: Raised if

            * The image is not a :class:`mibidata.mibi_image.MibiImage`
              instance.
            * The :class:`mibidata.mibi_image.MibiImage` coordinates, size,
              fov_id, fov_name, run, folder, dwell, scans, mass_gain,
              mass_offset, time_resolution, masses or targets are None.
            * `dtype` is not one of ``np.float32`` or ``np.uint16``.
            * `write_float` has been specified.
            * Converting the native :class:`mibidata.mibi_image.MibiImage` dtype
              to the specified or inferred ``dtype`` results in a loss of data.
    """
    if not isinstance(image, mi.MibiImage):
        raise ValueError('image must be a mibidata.mibi_image.MibiImage '
                         'instance.')
    missing_required_metadata = [m for m in REQUIRED_METADATA_ATTRIBUTES
                                 if not getattr(image, m)]
    if missing_required_metadata:
        if len(missing_required_metadata) == 1:
            missing_metadata_error = (f'{missing_required_metadata[0]} is '
                                      f'required and may not be None.')
        else:
            missing_metadata_error = (f'{", ".join(missing_required_metadata)}'
                                      f' are required and may not be None.')
        raise ValueError(missing_metadata_error)

    if write_float is not None:
        raise ValueError('`write_float` has been deprecated. Please use the '
                         '`dtype` argument instead.')
    if dtype and not dtype in [np.float32, np.uint16]:
        raise ValueError('Invalid dtype specification.')

    if dtype == np.float32:
        save_dtype = np.float32
        range_dtype = 'd'
    elif dtype == np.uint16:
        save_dtype = np.uint16
        range_dtype = 'I'
    elif np.issubdtype(image.data.dtype, np.floating):
        save_dtype = np.float32
        range_dtype = 'd'
    else:
        save_dtype = np.uint16
        range_dtype = 'I'

    to_save = image.data.astype(save_dtype)
    if not np.all(np.equal(to_save, image.data)):
        raise ValueError('Cannot convert data from '
                         f'{image.data.dtype} to {save_dtype}')

    if ranges is None:
        ranges = [(0, m) for m in to_save.max(axis=(0, 1))]

    coordinates = [
        (286, '2i', 1, _micron_to_cm(image.coordinates[0])),  # x-position
        (287, '2i', 1, _micron_to_cm(image.coordinates[1])),  # y-position
    ]
    resolution = (image.data.shape[0] * 1e4 / float(image.size),
                  image.data.shape[1] * 1e4 / float(image.size),
                  'CENTIMETER')

    # The mibi. prefix is added to attributes defined in the spec.
    # Other user-defined attributes are included too but without the prefix.
    prefixed_attributes = mi.SPECIFIED_METADATA_ATTRIBUTES[1:]
    description = {}
    for key, value in image.metadata().items():
        if key in prefixed_attributes:
            description[f'mibi.{key}'] = value
        elif key in RESERVED_MIBITIFF_ATTRIBUTES:
            warnings.warn(f'Skipping writing user-defined {key} to the '
                          f'metadata as it is a reserved attribute.')
        elif key != 'date':
            description[key] = value
    # TODO: Decide if should filter out those that are None or convert to empty
    # string so that don't get saved as 'None'

    if multichannel:
        targets = list(image.targets)
        util.sort_channel_names(targets)
        indices = image.channel_inds(targets)
        with TiffWriter(filename) as infile:
            for i in indices:
                metadata = description.copy()
                metadata.update({
                    'image.type': 'SIMS',
                    'channel.mass': int(image.masses[i]),
                    'channel.target': image.targets[i],
                })

                page_name_string = _page_name_string(
                    image.targets[i], image.masses[i])
                page_name = (285, 's', 0, page_name_string)
                min_value = (340, range_dtype, 1, ranges[i][0])
                max_value = (341, range_dtype, 1, ranges[i][1])
                page_tags = coordinates + [page_name, min_value, max_value]

                # Adding rowsperstrip parameter to prevent using the
                # auto-calculated value. The auto-calculated value results in
                # the "STRIP_OFFSETS directory entry is the wrong type" error.
                infile.write(
                    to_save[:, :, i], compress=6, resolution=resolution,
                    extratags=page_tags, metadata=metadata, datetime=image.date,
                    software=SOFTWARE_VERSION, rowsperstrip=to_save.shape[0])
            if sed is not None:
                if sed.ndim > 2:
                    sed = sed[:, :, 0]

                sed_resolution = (sed.shape[0] * 1e4 / float(image.size),
                                  sed.shape[1] * 1e4 / float(image.size),
                                  'CENTIMETER')

                page_name = (285, 's', 0, 'SED')
                page_tags = coordinates + [page_name]
                infile.write(
                    sed, compress=6, resolution=sed_resolution,
                    extratags=page_tags, metadata={'image.type': 'SED'},
                    software=SOFTWARE_VERSION, rowsperstrip=sed.shape[0])
            if optical is not None:
                infile.write(optical, compress=6, software=SOFTWARE_VERSION,
                             metadata={'image.type': 'Optical'},
                             rowsperstrip=optical.shape[0])
                label_coordinates = (
                    _TOP_LABEL_COORDINATES if image.coordinates[1] > 0 else
                    _BOTTOM_LABEL_COORDINATES)
                slide_label = np.fliplr(np.moveaxis(
                    optical[label_coordinates[0][0]:label_coordinates[0][1],
                            label_coordinates[1][0]:label_coordinates[1][1]],
                    0, 1))
                infile.write(slide_label, compress=6, software=SOFTWARE_VERSION,
                             metadata={'image.type': 'Label'},
                             rowsperstrip=slide_label.shape[0])

    else:
        for i in range(image.data.shape[2]):
            metadata = description.copy()
            metadata.update({
                'image.type': 'SIMS',
                'channel.mass': int(image.masses[i]),
                'channel.target': image.targets[i],
            })
            # Converting to bytes string to support non-ascii characters
            page_name_string = _page_name_string(
                image.targets[i], image.masses[i])
            page_name = (285, 's', 0, page_name_string)
            min_value = (340, range_dtype, 1, ranges[i][0])
            max_value = (341, range_dtype, 1, ranges[i][1])
            page_tags = coordinates + [page_name, min_value, max_value]

            target_filename = os.path.join(
                filename, '{}.tiff'.format(
                    util.format_for_filename(image.targets[i])))

            with TiffWriter(target_filename) as infile:

                infile.write(
                    to_save[:, :, i], compress=6, resolution=resolution,
                    metadata=metadata, datetime=image.date,
                    extratags=page_tags, software=SOFTWARE_VERSION,
                    rowsperstrip=to_save.shape[0])
Exemple #9
0
def svs2tif(input_file,
            output_folder,
            tile_size,
            overlap,
            num_workers=os.cpu_count(),
            output_filename="image.tif"):
    output_folder = str(output_folder)

    logger.info("Parameters")
    logger.info("       input file: %s", input_file)
    logger.info("    output folder: %s", output_folder)
    logger.info("        tile size: %d", tile_size)
    logger.info("          overlap: %d", overlap)
    logger.info("      num_workers: %d", num_workers)
    logger.info("  output filename: %s", output_filename)

    with OpenSlide(input_file) as slide:
        properties = slide.properties
        slide_dimensions = slide.dimensions

        tiles = DeepZoomGenerator(slide,
                                  tile_size=tile_size,
                                  overlap=overlap,
                                  limit_bounds=False)

        output_file = Path(output_folder) / output_filename

        np_memmap = []
        width, height = slide_dimensions
        img_w, img_h = width, height
        for level in range(tiles.level_count):
            memmap_filename = Path(output_folder, "level{}.mmap".format(level))
            memmap_shape = (img_h, img_w, 3)
            np_memmap_arr = np.memmap(memmap_filename,
                                      dtype=np.uint8,
                                      mode="w+",
                                      shape=memmap_shape)
            np_memmap.append(np_memmap_arr)
            logger.info("  Created %s %s", memmap_filename, repr(memmap_shape))

            img_w = round(img_w / 2)
            img_h = round(img_h / 2)
            if max(img_w, img_h) < tile_size:
                break
        try:

            # Multithread processing for each tile in the largest
            # image (index 0)
            logger.info("Processing tiles...")
            dim_index = tiles.level_count - 1
            tile_pos_x, tile_pos_y = tiles.level_tiles[dim_index]
            index_iter = np.ndindex(tile_pos_x, tile_pos_y)
            with concurrent.futures.ThreadPoolExecutor(
                    max_workers=num_workers) as executor:
                executor.map(
                    filter_tile,
                    repeat(tiles),
                    repeat(dim_index),
                    index_iter,
                    repeat(tile_size),
                    repeat(np_memmap[0]),
                )

            logger.info("Storing low resolution images...")
            for index in range(1, len(np_memmap)):
                src_arr = np_memmap[index - 1]
                target_arr = np_memmap[index]
                target_arr[:] = cv2.resize(src_arr, (0, 0),
                                           fx=0.5,
                                           fy=0.5,
                                           interpolation=cv2.INTER_AREA)
                # th, tw = target_arr.shape[:2]
                # target_arr[:] = src_arr[
                #     : th * 2 : 2, : tw * 2 : 2, :
                # ]  # Fast resizing. No anti-aliasing.
                logger.info("  Level %d: %s", index, repr(target_arr.shape))

            # Calculate resolution
            if (properties.get("tiff.ResolutionUnit")
                    and properties.get("tiff.XResolution")
                    and properties.get("tiff.YResolution")):
                resolution_unit = properties.get("tiff.ResolutionUnit")
                x_resolution = float(properties.get("tiff.XResolution"))
                y_resolution = float(properties.get("tiff.YResolution"))
            else:
                resolution_unit = properties.get("tiff.ResolutionUnit", "inch")
                if properties.get("tiff.ResolutionUnit",
                                  "inch").lower() == "inch":
                    numerator = 25400  # Microns in Inch
                else:
                    numerator = 10000  # Microns in CM
                x_resolution = int(numerator //
                                   float(properties.get('openslide.mpp-x', 1)))
                y_resolution = int(numerator //
                                   float(properties.get('openslide.mpp-y', 1)))

            # Write TIFF file
            with TiffWriter(output_file, bigtiff=True) as tif:
                # Save from the largest image (openslide requires that)
                for level in range(len(np_memmap)):
                    src_arr = np_memmap[level]
                    height, width = src_arr.shape[:2]
                    logger.info("Saving Level %d image (%d x %d)...", level,
                                width, height)
                    if level:
                        subfiletype = SUBFILETYPE_REDUCEDIMAGE
                    else:
                        subfiletype = SUBFILETYPE_NONE

                    tif.save(
                        src_arr,
                        software="Glencoe/Faas pyramid",
                        metadata={"axes": "YXC"},
                        tile=(tile_size, tile_size),
                        photometric="RGB",
                        planarconfig="CONTIG",
                        resolution=(
                            x_resolution // 2**level,
                            y_resolution // 2**level,
                            resolution_unit,
                        ),
                        compress=("jpeg", 95),  # requires imagecodecs
                        subfiletype=subfiletype,
                    )
                logger.info("Done.")
        finally:
            # Remove memory-mapped file
            logger.info("Removing memmapped files...")
            src_arr = None
            target_arr = None
            np_memmap_arr = None
            del np_memmap
            gc.collect()
            mmap_file_iter = Path(output_folder).glob("*.mmap")
            for fp in mmap_file_iter:
                fp.unlink()
Exemple #10
0
    def event_page(self, doc):
        '''Add event page document information to a ".tiff" file.

        This method adds event_page document information to a ".tiff" file,
        creating it if nesecary.

        .. warning::

            All non 2D 'image like' data is explicitly ignored.

        .. note::

            The data in Events might be structured as an Event, an EventPage,
            or a "bulk event" (deprecated). The DocumentRouter base class takes
            care of first transforming the other representations into an
            EventPage and then routing them through here, so no further action
            is required in this class. We can assume we will always receive an
            EventPage.

        Parameters:
        -----------
        doc : dict
            EventPage document
        '''
        event_model.verify_filled(doc)
        descriptor = self._descriptors[doc['descriptor']]
        stream_name = descriptor.get('name')
        for field in doc['data']:
            for img in doc['data'][field]:
                # Check that the data is 2D or 3D; if not ignore it.
                data_key = descriptor['data_keys'][field]
                ndim = len(data_key['shape'] or [])
                if data_key['dtype'] == 'array' and 1 < ndim < 4:
                    # there is data to be written so
                    # create a file for this stream and field
                    # if one does not exist yet
                    if not self._tiff_writers.get(stream_name, {}).get(field):
                        filename = get_prefixed_filename(
                            file_prefix=self._file_prefix,
                            start_doc=self._start,
                            stream_name=stream_name,
                            field=field)
                        fname = self._manager.reserve_name(
                            'stream_data', filename)
                        Path(fname).parent.mkdir(parents=True, exist_ok=True)
                        tw = TiffWriter(fname, **self._init_kwargs)
                        self._tiff_writers[stream_name][field] = tw

                    # write the data
                    img_asarray = numpy.asarray(img, dtype=self._astype)
                    if ndim == 2:
                        # handle 2D data just like 3D data
                        # by adding a 3rd dimension
                        img_asarray = numpy.expand_dims(img_asarray, axis=0)
                    for i in range(img_asarray.shape[0]):
                        img_asarray_2d = img_asarray[i, :]
                        # append the image to the file
                        tw = self._tiff_writers[stream_name][field]
                        tw.write(img_asarray_2d,
                                 contiguous=True,
                                 *self._kwargs)
laser = TopticaiBeam(port="COM1")

# initialise buffer as a queue
image_buffer = Queue()

# configure camera, pass the buffer queue and enable.
camera.set_client(image_buffer)
camera.exposure_time = exposure_seconds
camera.set_trigger(TriggerType.SOFTWARE, TriggerMode.ONCE)
camera.enable()

# configure laser
laser.power = power_level
laser.set_trigger(TriggerType.HIGH, TriggerMode.BULB)
laser.enable()

# main loop to collect images.
for i in range(n_repeats):
    camera.trigger()
    time.sleep(interval_seconds)

# shutdown hardware devices
laser.shutdown()
camera.shutdown()

# write out image data to a file.
writer = TiffWriter("data.tif")
for i in range(n_repeats):
    writer.save(image_buffer.get())
writer.close()
Exemple #12
0
            data=Nodes,
            columns=["NodeID", "NodeArray", "NodeCells", "NodeIndex"])
        df_nodes.to_excel(writer, sheet_name="Nodes_t" + file[-7:-4])
    writer.save()
    writer.close()

    for file in sorted(
            glob.glob(image_j_dir)):  #loops over all the files in path
        read = imread(file)
        image_j_list.append(read)  #adds junction images to stack

    #saves the stacks to tiff
    image_c_stack = np.stack(image_c_list, axis=0)
    image_j_stack = np.stack(image_j_list, axis=0)
    image_n_stack = np.stack(image_n_list, axis=0)
    with TiffWriter(parent_dir + "/Segments_stack.tif") as tif:
        tif.save(image_c_stack)
    with TiffWriter(parent_dir + "/Outlines_stack.tif") as tif:
        tif.save(image_j_stack)
    with TiffWriter(parent_dir + "/Nodes_stack.tif") as tif:
        tif.save(image_n_stack)

    # makes stack of all stacks
    image_stack = np.stack([image_c_stack, image_j_stack, image_n_stack],
                           axis=1)
    with TiffWriter(parent_dir + "/Merged_stack.tif", imagej=True) as tif:
        tif.save(image_stack)
    print(
        "files written: Segments_stack.tif, Outlines_stack.tif, Nodes_stack.tif, Merged_stack.tif, Nodes.xlsx, Nodes folder"
    )
Exemple #13
0
                               a_brain)
print(f'Your brain directory: {brain_directory}')
# Your brain directory: /Users/amcg0011/Data/InSituData/plane_of_section-1/entrez_id_14812_Grin2b/age_id-15_id-74988710
print(os.path.exists(brain_directory))
# True
name_pattern = r'image_id-\d*.jpeg'
# the amoount of work required to get a brain directory is too much.
# This needs to be simplified. Should be as simple as:
# data_tree = get_data_tree(base_directory)
# data_tree.genes[0].planes[0].brains[0] --> absolute dir path
# or somthing equivalently easy
my_brain = InSituSeries(brain_directory, name_pattern)
target = my_brain._target_volume
save_at = os.path.join(gene_directory, 'InSituVolumes',
                       'Grin2b-age_id-15_id-74988710.tif')
with TiffWriter(save_at) as tiff:
    for i in range(target.shape[0]):
        tiff.save(target[i, :, :])

a_gene = genes[2]
print(f'results from gene lucky dip: {a_gene}')
# results from gene lucky dip: entrez_id_17202_Mc4r
coronal_brains = list(gene_data_tree[a_gene]['plane_of_section-1'].keys())
a_brain = coronal_brains[0]
print(f'Your brain: {a_brain}')
# Your brain: age_id-15_id-79556630
brain_directory = os.path.join(gene_directory, a_gene, 'plane_of_section-1',
                               a_brain)
print(f'Your brain directory: {brain_directory}')
# Your brain directory: /Users/amcg0011/Data/InSituData/entrez_id_17202_Mc4r/plane_of_section-1/age_id-15_id-79556630
print(os.path.exists(brain_directory))
Exemple #14
0
def main():
    parser = argparse.ArgumentParser(
        description=
        "boxcrop tool [using techniques of https://dx.doi.org/10.1371/journal.pone.0163453 ]"
    )

    parser.add_argument("input", type=str, help="input file name")
    parser.add_argument("--output",
                        type=str,
                        default="",
                        help="output file name")
    parser.add_argument("--channel",
                        type=int,
                        default=0,
                        help="bright field channel")

    args = parser.parse_args()
    if args.output == '':
        args.output = args.input + '_registered-%04d.tif'

    ims = MultiImageStack.open(args.input)

    mp = ims.get_meta('multipoints')

    channels = ims.get_meta('channels')

    bright_field_channel = args.channel

    for p in range(mp):
        first = ims.get_image(t=0, pos=p, channel=bright_field_channel)

        angle = find_rotation(first)

        rotated = rotate_image(first, angle)

        top, bottom, left, right = find_box(rotated, subsample=1, debug=False)

        first_box = rotated[top:bottom, left:right]

        buffer = numpy.zeros((channels, ) + first_box.shape, dtype=first.dtype)

        try:
            output_name = args.output % p
        except TypeError:
            output_name = args.output

        with TiffWriter(output_name, imagej=True) as tiff:
            for t in range(ims.get_meta('timepoints')):

                current = ims.get_image(t=t,
                                        pos=p,
                                        channel=bright_field_channel)
                shift, = translation_2x1d(first, current)

                for c in range(channels):
                    if c == bright_field_channel:
                        shifted = shift_image(current,
                                              shift,
                                              background='blank')
                    else:
                        shifted = shift_image(ims.get_image(t=t,
                                                            pos=p,
                                                            channel=c),
                                              shift,
                                              background='blank')

                    rotated = rotate_image(shifted, angle)

                    buffer[c, :, :] = rotated[top:bottom, left:right]

                tiff.save(buffer)
Exemple #15
0
def transform_to_ome_tiff_merge(
    tform_reg_im,
    image_name,
    output_dir,
    final_transform,
    composite_transform,
    tile_size=512,
    write_pyramid=True,
):

    y_size, x_size, y_spacing, x_spacing = get_final_yx_from_tform(
        tform_reg_im.images[0], final_transform[0])

    # protect against too large tile size
    while y_size / tile_size <= 1 or x_size / tile_size <= 1:
        tile_size = tile_size // 2

    n_ch = tform_reg_im.n_ch
    pyr_levels, pyr_shapes = get_pyramid_info(y_size, x_size, n_ch, tile_size)
    n_pyr_levels = len(pyr_levels)
    output_file_name = str(Path(output_dir) / image_name)
    channel_names = format_channel_names(tform_reg_im.channel_names, n_ch)

    if final_transform is not None:
        PhysicalSizeY = y_spacing
        PhysicalSizeX = x_spacing
    else:
        PhysicalSizeY = tform_reg_im.image_res
        PhysicalSizeX = tform_reg_im.image_res

    omexml = prepare_ome_xml_str(
        y_size,
        x_size,
        n_ch,
        tform_reg_im.images[0].im_dtype,
        tform_reg_im.images[0].is_rgb,
        PhysicalSizeX=PhysicalSizeX,
        PhysicalSizeY=PhysicalSizeY,
        PhysicalSizeXUnit="µm",
        PhysicalSizeYUnit="µm",
        Name=image_name,
        Channel={"Name": channel_names},
    )
    subifds = n_pyr_levels - 1 if write_pyramid is True else None

    print(f"saving to {output_file_name}.ome.tiff")
    with TiffWriter(f"{output_file_name}.ome.tiff", bigtiff=True) as tif:
        for m_idx, merge_image in enumerate(tform_reg_im.images):
            merge_n_ch = merge_image.n_ch
            for channel_idx in range(merge_n_ch):
                image = merge_image.read_single_channel(channel_idx)
                image = np.squeeze(image)
                image = sitk.GetImageFromArray(image)
                image.SetSpacing(
                    (merge_image.image_res, merge_image.image_res))

                if composite_transform[m_idx] is not None:
                    image = transform_plane(
                        image,
                        final_transform[m_idx],
                        composite_transform[m_idx],
                    )

                print("saving")
                if isinstance(image, sitk.Image):
                    image = sitk.GetArrayFromImage(image)

                options = dict(
                    tile=(tile_size, tile_size),
                    compression="jpeg" if merge_image.is_rgb else "deflate",
                    photometric="rgb" if merge_image.is_rgb else "minisblack",
                    metadata=None,
                )
                # write OME-XML to the ImageDescription tag of the first page
                description = omexml if channel_idx == 0 else None
                # write channel data
                print(f" writing channel {channel_idx} - shape: {image.shape}")
                tif.write(
                    image,
                    subifds=subifds,
                    description=description,
                    **options,
                )

                if write_pyramid:
                    for pyr_idx in range(1, n_pyr_levels):
                        resize_shape = (
                            pyr_levels[pyr_idx][0],
                            pyr_levels[pyr_idx][1],
                        )
                        image = cv2.resize(
                            image,
                            resize_shape,
                            cv2.INTER_LINEAR,
                        )
                        print(
                            f"pyr {pyr_idx} : channel {channel_idx} shape: {image.shape}"
                        )

                        tif.write(image, **options, subfiletype=1)

        return f"{output_file_name}.ome.tiff"
Exemple #16
0
    def event(self, doc):
        '''Add event document information to a ".tiff" file.

        This method adds event document information to a ".tiff" file,
        creating it if necessary.

        .. warning::

            All non 2D 'image-like' data is explicitly ignored.

        .. note::

            The data in Events might be structured as an Event, an EventPage,
            or a "bulk event" (deprecated). The DocumentRouter base class takes
            care of first transforming the other representations into an
            EventPage and then routing them through here, as we require Event
            documents _in this case_ we overwrite both the `event` method and
            the `event_page` method so we can assume we will always receive an
            Event.

        Parameters:
        -----------
        doc : dict
            Event document
        '''
        event_model.verify_filled(event_model.pack_event_page(*[doc]))
        descriptor = self._descriptors[doc['descriptor']]
        stream_name = descriptor.get('name')
        for field in doc['data']:
            img = doc['data'][field]
            # Check that the data is 2D or 3D; if not ignore it.
            data_key = descriptor['data_keys'][field]
            ndim = len(data_key['shape'] or [])
            if data_key['dtype'] == 'array' and 1 < ndim < 4:
                img_asarray = numpy.asarray(img, dtype=self._astype)
                if tuple(data_key['shape']) != img_asarray.shape:
                    warnings.warn(
                        f"The descriptor claims the data shape is {data_key['shape']} "
                        f"but the data is actual data shape is {img_asarray.shape}! "
                        f"This will be an error in the future.")
                    ndim = img_asarray.ndim

                if ndim == 2:
                    # handle 2D data just like 3D data
                    # by adding a 3rd dimension
                    img_asarray = numpy.expand_dims(img_asarray, axis=0)
                for i in range(img_asarray.shape[0]):
                    img_asarray_2d = img_asarray[i, :]
                    num = next(self._counter[stream_name][field])
                    filename = get_prefixed_filename(
                        file_prefix=self._file_prefix,
                        start_doc=self._start,
                        descriptor_doc=descriptor,
                        event_doc=doc,
                        num=num,
                        stream_name=stream_name,
                        field=field,
                        pad=self._event_num_pad)
                    fname = self._manager.reserve_name('stream_data', filename)
                    Path(fname).parent.mkdir(parents=True, exist_ok=True)
                    tw = TiffWriter(fname, **self._init_kwargs)
                    self._tiff_writers[stream_name][field + f'-{num}'] = tw
                    tw.write(img_asarray_2d, *self._kwargs)
Exemple #17
0
    def apply_adjustments(dir_in,
                          dir_out,
                          regex_in,
                          group_layername=0,
                          flatfield: Flatfield = None,
                          reflectance_cal: ReflectanceCalibration = None,
                          rotate=0,
                          rgb2gray=False,
                          medianfilter=0,
                          new_dpi=None,
                          skip_existing=True):
        """
        Applies adjustments on the developed images, in the order of the function parameters
        :param dir_in: input directory
        :param dir_out: output directory
        :param regex_in: both a filter for input files and a hint to the
                        part of the filename determining the layer type
        :param group_layername: the group of regex_in determining the layer type. if set to 0 (default),
                                every file gets own group..
        :param flatfield: for flatfield calibration
        :param reflectance_cal: for reflectance calibration
        :param rotate: degrees of rotation (counter clockwise!); allowed values: 0, 90, 180, 270/-90
        :param rgb2gray: merge a 3 channel grayscale image to a single channel (via median)
        :param new_dpi: set the new dpi
        :param skip_existing: if true and the output file for a given input file already exists, skip
        :return:
        """

        print('<<<TIFF ADJUSTMENTS starting>>>')

        if not os.path.exists(dir_out):
            os.makedirs(dir_out)

        # for efficiency in flatfield correction we group the input files by layer type
        files_grouped = {}
        total_files = 0
        for f in os.listdir(dir_in):
            result = re.search(regex_in, f)
            if result:
                layer_name = result.group(group_layername)
                total_files += 1
                if layer_name in files_grouped.keys():
                    files_grouped[layer_name].append(f)
                else:
                    files_grouped[layer_name] = [f]

        i = 0
        for layer_name, files in files_grouped.items():
            # prepare flatfield correction
            if flatfield is not None:
                flatfield.prepare(layer_name)

            for f in files:
                file_in = os.path.join(dir_in, f)
                file_out = os.path.join(dir_out, f)
                i += 1

                # check if already done
                if os.path.exists(file_out) and skip_existing:
                    print('skipping %s' % file_in)
                    continue

                print('adjusting %s (%d/%d)...' % (file_in, i, total_files))

                # read tiff file
                tiff = TiffFile(file_in)
                img = tiff.pages[0].asarray()
                dtype_in = img.dtype

                # flatfield correction
                if flatfield is not None:
                    img = flatfield.correct(img)

                # reflectance calibration
                if reflectance_cal is not None:
                    img = reflectance_cal.correct(img, layer_name)

                # rotate
                if rotate == 0:
                    pass
                elif rotate == 90:
                    img = np.rot90(img, 1)
                elif rotate == 180:
                    img = np.rot90(img, 2)
                elif rotate == -90 or rotate == 270:
                    img = np.rot90(img, 3)
                else:
                    print(
                        "WARNING: invalid rotation angle provided (%d). Not rotating."
                        % rotate)

                # rgb2gray
                if rgb2gray:
                    img = Tools.rgb2gray(img, Tools.Stat.MEDIAN)

                # apply medianfilter (e.g. for removing sensor noise)
                if medianfilter > 0:
                    img = ndimage.median_filter(
                        img, (medianfilter * 2 + 1, medianfilter * 2 + 1))

                # set dpi
                if new_dpi is not None:
                    resolution = (new_dpi, new_dpi)
                else:
                    resolution = (tiff.pages[0].tags['XResolution'].value,
                                  tiff.pages[0].tags['YResolution'].value)

                # save image
                with TiffWriter(file_out) as tw:
                    tw.save(img.astype(dtype_in),
                            resolution=resolution,
                            compress=6)
                # copy all metadata from original, except for Resolution (because we just set that to the right value)
                Tools.copy_exif(file_in, file_out, omit_dpi=True)

        print('<<<TIFF ADJUSTMENTS completed>>>')
Exemple #18
0
def save_tiff(mov: np.ndarray, fname: str) -> None:
    """Save image stack array to tiff file."""
    with TiffWriter(fname) as tif:
        for frame in np.floor(mov).astype(np.int16):
            tif.save(frame)
        plt.show()

        filename_saveimg = config.tmp_save_dir + os.sep + 'result_' + filenames[
            0] + 'result.tiff'
        filename_mask = config.tmp_save_dir + os.sep + 'result_' + filenames[
            0] + 'mask.tiff'

        folder_name = os.path.split(filename_saveimg)[0]
        if not os.path.exists(folder_name):
            os.makedirs(folder_name)

        # res = (res > 0.5).astype(np.float32)

        res = np.transpose(res, (3, 0, 1, 2))

        with TiffWriter(filename_saveimg, bigtiff=True) as tif:

            for k in range(res.shape[0]):

                tif.write(res[k, :, :, :, ], compress=2)

        lbls = lbls[0, ...]
        lbls = np.transpose(lbls, (3, 0, 1, 2))

        with TiffWriter(filename_mask, bigtiff=True) as tif:

            for k in range(lbls.shape[0]):

                tif.write(lbls[k, :, :, :, ], compress=2)

        # res_loaded = imread(filename_saveimg,key = slice(None))
Exemple #20
0
    def test_predict(self,
                     target_folder: Union[Path, str] = '',
                     model_path: Union[Path, str] = '') -> bool:
        """Run prediction on predefined test data.

        @param target_folder: Path|str, optional: default = ''
            Path to the folder where to store the predicted images. If not specified,
            if defaults to '{working_dir}/predictions'. See constructor.

        @param model_path: Path|str, optional: default = ''
            Full path to the model to use. If omitted and a training was
            just run, the path to the model with the best metric is
            already stored and will be used.

            @see get_best_model_path()

        @return True if the prediction was successful, False otherwise.
        """

        # Inform
        self._print_header("Test prediction")

        # Get the device
        self._device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # If the model is not in memory, instantiate it first
        if self._model is None:
            self._define_model()

        # If the path to the best model was not set, use current one (if set)
        if model_path == '':
            model_path = self.get_best_model_path()

        # Try loading the model weights: they must be compatible
        # with the model in memory
        try:
            checkpoint = torch.load(model_path,
                                    map_location=torch.device('cpu'))
            self._model.load_state_dict(checkpoint)
            print(f"Loaded best metric model {model_path}.", file=self._stdout)
        except Exception as e:
            self._message = "Error: there was a problem loading the model! Aborting."
            return False

        # If the target folder is not specified, set it to the standard predictions out
        if target_folder == '':
            target_folder = Path(self._working_dir) / "tests"
        else:
            target_folder = Path(target_folder)
        target_folder.mkdir(parents=True, exist_ok=True)

        # Switch to evaluation mode
        self._model.eval()

        indx = 0

        # Make sure not to update the gradients
        with torch.no_grad():
            for test_data in self._test_dataloader:

                # Get the next batch and move it to device
                test_images, test_masks = test_data[0].to(
                    self._device), test_data[1].to(self._device)

                # Apply sliding inference over ROI size
                test_outputs = sliding_window_inference(
                    test_images, self._roi_size,
                    self._sliding_window_batch_size, self._model)
                test_outputs = self._test_post_transforms(test_outputs)

                # Retrieve the image from the GPU (if needed)
                pred = test_outputs.cpu().numpy().squeeze()

                # Prepare the output file name
                basename = os.path.splitext(
                    os.path.basename(self._test_image_names[indx]))[0]
                basename = basename.replace('train_', 'pred_')

                # Convert to label image
                label_img = self._prediction_to_label_tiff_image(pred)

                # Save label image as tiff file
                label_file_name = os.path.join(str(target_folder),
                                               basename + '.tif')
                with TiffWriter(label_file_name) as tif:
                    tif.save(label_img)

                # Inform
                print(f"Saved {str(target_folder)}/{basename}.tif",
                      file=self._stdout)

                # Update the index
                indx += 1

        # Inform
        print(f"Test prediction completed.", file=self._stdout)

        # Return success
        return True
Exemple #21
0
 def open(self):
     self.tif = TiffWriter(self.tifpath.str, bigtiff=True)
     self.csv = self.csvpath.open('w')
	def process(self, theSourceDir, theDestinationDir, **kwargs):
		for key, value in kwargs.items():
			setattr(self, key, value)

		if theSourceDir == theDestinationDir:
			self.log("Input and output directories cannot be the same.")
			return False
		fileList = glob.glob(theSourceDir+'/*.'+self.fileExtension)
		if len(fileList) == 0:
			self.log(" Zero files to process in source directory.")
			return False

		fileList = glob.glob(theSourceDir+'/*.'+self.fileExtension)
		self.log('Processing %i images' % len(fileList))
		progress = 0
		for file in fileList:
			#Load image
			data = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB)
			data = data.astype('float')
			self.currentMax = 0

			#Load original EXIF data
			inExif = pyexiv2.metadata.ImageMetadata(file)
			inExif.read()
			
			#Radiometric Calibration
			if self.radiometricCalibrator != None and self.radiometricCalibration:
				data = self.radiometricCalibrator.calibrate(data)
				self.currentMax = 1

			#compute index
			if self.index == 'NDVI':
				data = self.indexNdvi(data)

			#Scale
			if self.currentMax != 0:
				percentOfRange = (data - self.currentMin) / (self.currentMax - self.currentMin)
				data = (self.scaleFrom * (1 - percentOfRange)) + (self.scaleTo * percentOfRange)
			
			#Set dtype
			if self.scaleTo == 1:
				if data.dtype.name != 'float32':
					data = data.astype('float32')
			elif self.scaleTo > 255:
				#unsigned int (I:16)
				data = data.astype('uint16')
			elif self.index != 'None':
				if self.lut == 'None':
					#bit image (L)
					data = data.astype('uint8')
					pass
				else:
					#Paletted
					pass
			else:
				#RGB (RGB)
				data = data.astype('uint8')

			#Save processed data to new file
			path, baseName = os.path.split(file)
			baseName = re.sub(r''+re.escape(self.fileExtension)+'$', 'tiff', baseName)
			if self.index != 'None':
				baseName = self.index+'-'+baseName
			tif = TiffWriter(theDestinationDir+'/'+baseName)
			tif.save(data, compress=6)
			tif = None
			
			#Transfer EXIF data to new file
			outExif = pyexiv2.metadata.ImageMetadata(theDestinationDir+'/'+baseName)
			outExif.read()
			#inExif.copy(outExif, comment=False)
			#outExif.write()

			#Log and progress
			progress += 1
			self.log('%s created' % (baseName), progress=progress)
Exemple #23
0
def procAll(inDirName, outDirName, dishId, prefix):
    global verbose, rWidth
    ofpath = "%s/%s/%s"%(outDirName, prefix.replace("apogwas","batch"), dishId)
    #reg_file="%s/plant-regions-%s.png"%(outDirName,dishId,dishId)
    reg_file="%s/plant-regions-%s.png"%(ofpath,dishId)
    if os.path.isfile(reg_file):
        if verbose:
            print("Skipping dish %s (all done, file %s exists)"%(dishId, reg_file))
        return
    now = time.strftime("%Y-%b-%d_%H:%M:%S")
    reportLogs = configparser.ConfigParser()
    try:
        reportLogs.read(f"{ofpath}/plateprocsplit.txt")
    except:
        pass
    if verbose:
        print("Input directory:  %s/%s"%(inDirName,prefix))
        print("Output directory/set: %s"%(ofpath))

    # align the dishes, first check if a file with aligned dishes exists
    plates_file = "%s/plates-%s.tif"%(ofpath,dishId)
    plates = loadTiff(plates_file)
    if plates is None:
        if verbose:
            print("Detecting and aligning dishes for set %s*%s in %s"%(prefix,dishId,inDirName))
        plates, reportLog = platealign.procPlateSet(inDirName, ofpath, dishId, prefix)
        with TiffWriter("%s/plates-%s.tif"%(ofpath, dishId)) as tif: tif.save(plates)
        imageio.imwrite("%s/plates-%s.png"%(ofpath, dishId), plates.max(axis=0)[::4,::4,:])
    else:
        if verbose:
            print("Reusing aligned plates: %s"%plates_file)
        reportLog = {"Reusing aligned plates": "%s"%plates_file}
    reportLogs["platealign %s"%now] = reportLog

    # identify seeds, first check if (inverted) file with seed masks exists
    mask_file = "%s/seeds-mask-%s.tif"%(ofpath,dishId)
    invmask = loadTiff(mask_file)
    if invmask is None:
        if verbose:
            print("Detecting seeds in %s"%plates_file)
        #ipdb.set_trace()
        mask, reportLog = platesegseed.procPlateSet(plates)
        with TiffWriter("%s/seeds-mask-%s.tif"%(ofpath,dishId)) as tif:
            tif.save(platesegseed.img3mask(plates[0]+1,1-mask),compress=5)
    else:
        if verbose:
            print("Reusing seed mask: %s"%mask_file)
        mask = invmask[...,0] > 0
        reportLog = ("Reusing seed mask: %s"%mask_file)
    #write in both cases, eventually to reflect manual changes in the seeds-mask file
    with TiffWriter("%s/seeds-%s.tif"%(ofpath,dishId)) as tif:
        tif.save(platesegseed.img3mask(plates[0],mask),compress=5)

    #Save regions
    if verbose:
        print("Saving regions to %s/%s/%s"%(outDirName, prefix, dishId))
    #ipdb.set_trace()
    reportLog = platesplit.procPlateSet(ofpath, dishId, plates, mask, rWidth)
    reportLogs["platesplit %s"%now] = reportLog

    with open("%s/plateprocsplit.txt"%(ofpath), 'w') as reportfile: reportLogs.write(reportfile)