コード例 #1
0
ファイル: readers.py プロジェクト: d-v-b/thunder
    def getarray(idx_buffer_filename):
        idx, buf, fname = idx_buffer_filename
        fbuf = BytesIO(buf)
        tfh = TiffFile(fbuf)
        ary = tfh.asarray()
        pageCount = ary.shape[0]
        if nplanes is not None:
            extra = pageCount % nplanes
            if extra:
                if discard_extra:
                    pageCount = pageCount - extra
                    logging.getLogger('thunder').warn('Ignored %d pages in file %s' % (extra, fname))
                else:
                    raise ValueError("nplanes '%d' does not evenly divide '%d in file %s'" % (nplanes, pageCount,
                                                                                              fname))
            values = [ary[i:(i+nplanes)] for i in range(0, pageCount, nplanes)]
        else:
            values = [ary]
        tfh.close()

        if ary.ndim == 3:
            values = [val.squeeze() for val in values]

        nvals = len(values)
        keys = [(idx*nvals + timepoint,) for timepoint in range(nvals)]
        return zip(keys, values)
コード例 #2
0
    def getarray(idx_buffer_filename):
        idx, buf, fname = idx_buffer_filename
        fbuf = BytesIO(buf)
        tfh = TiffFile(fbuf)
        ary = tfh.asarray()
        pageCount = ary.shape[0]
        if nplanes is not None:
            extra = pageCount % nplanes
            if extra:
                if discard_extra:
                    pageCount = pageCount - extra
                    logging.getLogger('thunder').warn(
                        'Ignored %d pages in file %s' % (extra, fname))
                else:
                    raise ValueError(
                        "nplanes '%d' does not evenly divide '%d in file %s'" %
                        (nplanes, pageCount, fname))
            values = [
                ary[i:(i + nplanes)] for i in range(0, pageCount, nplanes)
            ]
        else:
            values = [ary]
        tfh.close()

        if ary.ndim == 3:
            values = [val.squeeze() for val in values]

        nvals = len(values)
        keys = [(idx * nvals + timepoint, ) for timepoint in range(nvals)]
        return zip(keys, values)
コード例 #3
0
class LocalTiff(LocalTarget):
    tif = None

    def open(self):
        self.tif = TiffFile(self.path)
        return self

    def close(self):
        if self.tif is not None:
            self.tif.close()

    def __len__(self):
        return len(self.tif.pages)

    @property
    def shape(self):
        return (len(self), *self.tif.pages[0].shape)

    def __getitem__(self, item):
        if isinstance(item, tuple):
            if isinstance(item[0], int):
                return self[item[0]][item[1:]]
            else:
                return self[item[0]][item]
        else:
            out = self.tif.asarray(key=item)
            if isinstance(item, list) and len(item) == 1:
                return out[None]  # Add axis
            elif isinstance(item, slice) and len(
                    range(*item.indices(len(self)))) == 1:
                return out[None]  # Add axis
            else:
                return out
コード例 #4
0
def get_info_from_path(path):
    tif = TiffFile(path)
    res = get_pixel_size(tif)
    time, time_unit = get_time(tif)
    tif.close()

    return res, time, time_unit
コード例 #5
0
ファイル: __init__.py プロジェクト: csachs/molyso
def test_image():
    """
    Returns a test image (first image of the small dataset).

    :return: image
    :rtype: numpy.ndarray
    """
    global _test_image
    if _test_image is None:
        t = TiffFile(os.path.join(os.path.dirname(__file__), 'example-frame.tif'))
        _test_image = t.pages[0].asarray()
        t.close()

    return _test_image
コード例 #6
0
ファイル: multipagetiff.py プロジェクト: mehta-lab/waveorder
    def _gather_index_maps(self):
        """
        Will return a dictionary of {coord: (filepath, page, byte_offset)} of length(N_Images) to later query

        Returns
        -------

        """

        positions = 0
        frames = 0
        channels = 0
        slices = 0
        for file in self.files:
            tf = TiffFile(file)
            meta = tf.micromanager_metadata['IndexMap']
            tf.close()
            offsets = list(meta['Offset'])

            for page in range(len(meta['Channel'])):
                coord = [0, 0, 0, 0]
                coord[0] = meta['Position'][page]
                coord[1] = meta['Frame'][page]
                coord[2] = meta['Channel'][page]
                coord[3] = meta['Slice'][page]
                offset = self._get_byte_offset(offsets, page)
                self.coord_map[tuple(coord)] = (file, page, offset)

                # update dimensions as we go along, helps with incomplete datasets
                if coord[0] + 1 > positions:
                    positions = coord[0] + 1

                if coord[1] + 1 > frames:
                    frames = coord[1] + 1

                if coord[2] + 1 > channels:
                    channels = coord[2] + 1

                if coord[3] + 1 > slices:
                    slices = coord[3] + 1

        # update dimensions to the largest dimensions present in the saved data
        self.positions = positions
        self.frames = frames
        self.channels = channels
        self.slices = slices
コード例 #7
0
    def open(self, filename):
        prefs = self.preferences
        # OPEN
        tif = TiffFile(str(filename))
        img = tif.asarray()
        # due to different conventions:
        #img = transpose(img)
        # crop
        if prefs.pCrop.value():
            r = (prefs.pCropX0.value(), prefs.pCropX1.value(),
                 prefs.pCropY0.value(), prefs.pCropY1.value())
            img = img[r[0]:r[1], r[2]:r[3]]
        # resize
        if prefs.pResize.value():
            img = cv2.resize(img,
                             (prefs.pResizeX.value(), prefs.pResizeY.value()))

        img = self.toFloat(img)

        try:
            # try to extract labels names set by imageJ:
            labels = tif.pages[0].imagej_tags['labels']
            # remove surplus information:
            for n, l in enumerate(labels):
                try:
                    i = l.index('\n')
                    if i != -1:
                        labels[n] = l[:i]
                except ValueError:
                    # no \n in label
                    pass

        except AttributeError:
            if img.ndim == 3:
                # color image
                labels = [str(r) for r in range(len(img))]
            else:
                labels = None

        tif.close()
        return img, labels
コード例 #8
0
ファイル: geotiff.py プロジェクト: KipCrossing/geotiff
    def __init__(
        self,
        file: str,
        band: int = 0,
        as_crs: Optional[int] = 4326,
        crs_code: Optional[int] = None,
    ):
        """For representing a geotiff

        Args:
            file (str): Location of the geotiff file
            band (int): The band of the tiff file to use. Defaults to 0.
            as_crs (Optional[int]): The epsg crs code to read the data as.  Defaults to 4326 (WGS84).
            crs_code (Optional[int]): The epsg crs code of the tiff file. Include this if the crs code can't be detected.

        """
        self.file = file
        self._as_crs = crs_code if as_crs is None else as_crs
        tif = TiffFile(self.file)

        if not tif.is_geotiff:
            raise Exception("Not a geotiff file")

        store = tif.aszarr(key=band)
        self._z = zarr.open(store, mode="r")
        store.close()
        if isinstance(crs_code, int):
            self._crs_code: int = crs_code
        else:
            self._crs_code = self._get_crs_code(tif.geotiff_metadata)
        self._tif_shape: List[int] = self._z.shape
        scale: Tuple[float, float,
                     float] = tif.geotiff_metadata["ModelPixelScale"]
        tilePoint: List[float] = tif.geotiff_metadata["ModelTiepoint"]
        self._tifTrans: TifTransformer = TifTransformer(
            self._tif_shape[0], self._tif_shape[1], scale, tilePoint)
        tif.close()
コード例 #9
0
ファイル: file_handler.py プロジェクト: iandobbie/CytoCensus
class Intermediate_handler:
    """ Wrapper for saving intermediate results to file and keeping track of them """
    def __init__(self, filename):
        try:
            os.remove(filename)
        except:
            pass
        self.plane = 0
        self.tif = None
        self.filename = filename
        self.refs = []
        self.array = None

    def reader(self):
        self.read = True
        self.write = False
        return self

    def writer(self):
        self.write = True
        self.read = False
        return self

    def __enter__(self):
        if self.write:
            self.tif = TiffWriter(self.filename, bigtiff=True, append=True)
        if self.read:
            self.tif = TiffFile(self.filename)
            self.array = self.tif.asarray()  # memmap=True)

        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.array = None
        self.tif.close()

    def close(self):
        self.array = None

        if self.tif is not None:
            self.tif.close()

    def write_plane(self, data, t, z, f):
        # works plane by plane
        # print 'writing plane' + str((t,z,f))
        # print data.shape

        self.tif.save(data, compress=0, contiguous=True)

        if len(data.shape) > 2:
            for i in range(data.shape[0]):
                self.plane += 1
                self.refs = self.refs + [(t, z + i)]
        else:
            self.plane += 1
            self.refs = self.refs + [(t, z)]

    def read_plane(self, t, z, f):
        # works plane by plane
        planeno = self.refs.index((t, z))
        return np.squeeze(self.tif.asarray(planeno))
コード例 #10
0
ファイル: image_reader.py プロジェクト: Anodarai/PartSeg
class TiffImageReader(BaseImageReader):
    """
    TIFF/LSM files reader. Base reading with :py:meth:`BaseImageReader.read_image`

    image_file: TiffFile
    mask_file: TiffFile
    """
    def __init__(self, callback_function=None):
        super().__init__(callback_function)
        self.image_file = None
        self.mask_file: typing.Optional[TiffFile] = None
        self.colors = None
        self.labels = None
        self.ranges = None

    def read(self,
             image_path: typing.Union[str, BytesIO, Path],
             mask_path=None,
             ext=None) -> Image:
        """
        Read tiff image from tiff_file
        """
        self.spacing, self.colors, self.labels, self.ranges = self.default_spacing, None, None, None
        self.image_file = TiffFile(image_path)
        total_pages_num = len(self.image_file.series[0])
        if mask_path is not None:
            self.mask_file = TiffFile(mask_path)
            total_pages_num += len(self.mask_file.series[0])
            self.verify_mask()
        else:
            self.mask_file = None

        # shape = self.image_file.series[0].shape
        axes = self.image_file.series[0].axes
        self.callback_function("max", total_pages_num)

        if self.image_file.is_lsm:
            self.read_lsm_metadata()
        elif self.image_file.is_imagej:
            self.read_imagej_metadata()
        elif self.image_file.is_ome:
            self.read_ome_metadata()
        else:
            x_spac, y_spac = self.read_resolution_from_tags()
            self.spacing = self.default_spacing[0], y_spac, x_spac
        mutex = Lock()
        count_pages = [0]

        def report_func():
            mutex.acquire()
            count_pages[0] += 1
            self.callback_function("step", count_pages[0])
            mutex.release()

        self.image_file.report_func = report_func
        try:
            image_data = self.image_file.asarray()
        except ValueError as e:  # pragma: no cover
            raise TiffFileException(*e.args)
        image_data = self.update_array_shape(image_data, axes)
        if self.mask_file is not None:
            self.mask_file.report_func = report_func
            mask_data = self.mask_file.asarray()
            mask_data = self.update_array_shape(
                mask_data, self.mask_file.series[0].axes)[..., 0]
        else:
            mask_data = None
        self.image_file.close()
        if self.mask_file is not None:
            self.mask_file.close()
        if not isinstance(image_path, str):
            image_path = ""
        return self.image_class(
            image_data,
            self.spacing,
            mask=mask_data,
            default_coloring=self.colors,
            labels=self.labels,
            ranges=self.ranges,
            file_path=os.path.abspath(image_path),
            axes_order=self.return_order(),
        )

    def verify_mask(self):
        """
        verify if mask fit to image. Raise ValueError exception on error
        :return:
        """
        if self.mask_file is None:  # pragma: no cover
            return
        image_series = self.image_file.pages[0]
        mask_series = self.mask_file.pages[0]
        for i, pos in enumerate(mask_series.axes):
            if mask_series.shape[i] == 1:  # pragma: no cover
                continue
            try:
                j = image_series.axes.index(pos)
            except ValueError:
                raise ValueError("Incompatible shape of mask and image (axes)")
                # TODO add verification if problem with T/Z/I
            if image_series.shape[j] != mask_series.shape[i]:
                raise ValueError("Incompatible shape of mask and image")
            # TODO Add verification if mask have to few dimensions

    @staticmethod
    def decode_int(val: int):
        """
        This function split 32 bits int on 4 8-bits ints

        :param val: value to decode
        :return: list of four numbers with values from [0, 255]
        """
        return [(val >> x) & 255 for x in [24, 16, 8, 0]]

    def read_resolution_from_tags(self):
        tags = self.image_file.pages[0].tags
        try:

            if self.image_file.is_imagej:
                scalar = name_to_scalar[
                    self.image_file.imagej_metadata["unit"]]
            else:
                unit = tags["ResolutionUnit"].value
                if unit == 3:
                    scalar = name_to_scalar["centimeter"]
                elif unit == 2:
                    scalar = name_to_scalar["cal"]
                else:
                    raise KeyError(
                        f"wrong scalar {tags['ResolutionUnit']}, {tags['ResolutionUnit'].value}"
                    )

            x_spacing = tags["XResolution"].value[1] / tags[
                "XResolution"].value[0] * scalar
            y_spacing = tags["YResolution"].value[1] / tags[
                "YResolution"].value[0] * scalar
        except (KeyError, ZeroDivisionError):
            x_spacing, y_spacing = self.default_spacing[
                2], self.default_spacing[1]
        return x_spacing, y_spacing

    def read_imagej_metadata(self):
        try:
            z_spacing = (
                self.image_file.imagej_metadata["spacing"] *
                name_to_scalar[self.image_file.imagej_metadata["unit"]])
        except KeyError:
            z_spacing = self.default_spacing[0]
        x_spacing, y_spacing = self.read_resolution_from_tags()
        self.spacing = z_spacing, y_spacing, x_spacing
        self.colors = self.image_file.imagej_metadata.get("LUTs")
        self.labels = self.image_file.imagej_metadata.get("Labels")
        if "Ranges" in self.image_file.imagej_metadata:
            ranges = self.image_file.imagej_metadata["Ranges"]
            self.ranges = list(zip(ranges[::2], ranges[1::2]))

    def read_ome_metadata(self):
        if isinstance(self.image_file.ome_metadata, str):
            if hasattr(tifffile, "xml2dict"):
                meta_data = tifffile.xml2dict(
                    self.image_file.ome_metadata)["OME"]["Image"]["Pixels"]
            else:
                return
        else:
            meta_data = self.image_file.ome_metadata["Image"]["Pixels"]
        try:
            self.spacing = [
                meta_data[f"PhysicalSize{x}"] *
                name_to_scalar[meta_data[f"PhysicalSize{x}Unit"]]
                for x in ["Z", "Y", "X"]
            ]
        except KeyError:  # pragma: no cover
            pass
        if "Channel" in meta_data and isinstance(meta_data["Channel"],
                                                 (list, tuple)):
            try:
                self.labels = [ch["Name"] for ch in meta_data["Channel"]]
            except KeyError:
                pass
            try:
                self.colors = [
                    self.decode_int(ch["Color"])[:-1]
                    for ch in meta_data["Channel"]
                ]
            except KeyError:
                pass

    def read_lsm_metadata(self):
        self.spacing = [
            self.image_file.lsm_metadata[f"VoxelSize{x}"]
            for x in ["Z", "Y", "X"]
        ]
        if "ChannelColors" in self.image_file.lsm_metadata:
            if "Colors" in self.image_file.lsm_metadata["ChannelColors"]:
                self.colors = [
                    x[:3] for x in
                    self.image_file.lsm_metadata["ChannelColors"]["Colors"]
                ]
            if "ColorNames" in self.image_file.lsm_metadata["ChannelColors"]:
                self.labels = self.image_file.lsm_metadata["ChannelColors"][
                    "ColorNames"]
コード例 #11
0
ファイル: imagej_tiff.py プロジェクト: Yongcheng123/ChimeraX
def imagej_pixels(path):

    from tifffile import TiffFile, TIFF
    tif = TiffFile(path)
    pages = tif.pages
    page0 = pages[0]
    tags = page0.tags
    
    pixel_width = 1.0
    if 'XResolution' in tags:
        v = tags['XResolution'].value
        num,denom = v
        if num != 0:
            pixel_width = denom/num	# 1/value

    pixel_height = 1.0
    if 'YResolution' in tags:
        v = tags['YResolution'].value
        num,denom = v
        if num != 0:
            pixel_height = denom/num	# 1/value

    pixel_size = (pixel_width, pixel_height)

    header = None
    if 'ImageDescription' in tags:
        d = tags['ImageDescription'].value
        if d.startswith('ImageJ='):
            header = d

    if header is None:
        from os.path import basename
        raise TypeError('ImageJ TIFF file %s does not have an image description tag'
                        ' starting with "ImageJ=<version>"' % basename(path))
        
    h = {}
    lines = header.split('\n')
    for line in lines:
        kv = line.split('=')
        if len(kv) == 2:
            h[kv[0]] = kv[1]

    from os.path import basename
    name = basename(path)
    shape = page0.shape
    ysize, xsize = shape[:2]
    zsize = int(h['slices']) if 'slices' in h else len(pages)
    grid_size = (xsize, ysize, zsize)
    zspacing = float(h['spacing']) if 'spacing' in h else 1
    grid_spacing = (pixel_width, pixel_height, zspacing)
    nc = int(h['channels']) if 'channels' in h else 1
    nt = int(h['frames']) if 'frames' in h else 1
    value_type = page0.dtype
    ncolors = 3 if page0.photometric == TIFF.PHOTOMETRIC.RGB else 1
    multiframe = (zsize > 1)

    # Check for ImageJ hyperstack format for > 4 GB data where TIFF has only one page.
    if zsize > 1 or nc > 1 or nt > 1:
        try:
            pages[1]
        except IndexError:
            from chimerax.core.errors import UserError
            raise UserError('Cannot read ImageJ hyperstack TIFF file "%s".  ImageJ TIFF files larger than 4 Gbytes do not follow the TIFF standard.  They include only one TIFF page and append the rest of the raw 2d images to the file.  ChimeraX cannot currently handle these hacked TIFF files.  Contact the ChimeraX developers and we can discuss adding support for this format.' % path)

    tif.close()
    
    pi = ImageJ_Pixels(path, name, value_type, grid_size, grid_spacing, ncolors, nc, nt, multiframe)
    return pi
コード例 #12
0
ファイル: logo_maker.py プロジェクト: pjb7687/cellocity
    drawn_frames = flow_analysis.draw_all_flow_frames_superimposed(
        scalebarFlag=True, scalebarLength=10, scale=10, line_thicknes=1)
    channel_name = flow_analysis.getChannelName()

    fig = plt.figure()
    camera = Camera(fig)
    plt.title("A logo made from " + channel_name)
    plt.style.use('seaborn-dark')
    plt.axis('off')

    for i in range(drawn_frames.shape[0]):
        plt.imshow(drawn_frames[i])
        camera.snap()

    animation = camera.animate()
    file_name = channel_name + ".mp4"
    saveme = savepath / file_name

    animation.save(str(saveme), writer='ffmpeg')


a_list_of_analysis = [regular_analysis, median_analysis]

for a in a_list_of_analysis:
    animateAndSaveLogo(a)
    a.calculateAverageSpeeds()
    a.saveArrayAsTif(savepath)
    a.saveFlowAsTif(savepath)

tif.close()
コード例 #13
0
def lsm2cmap(lsmfile: PathLike,
             cmapfile: PathLike | None = None,
             **kwargs) -> None:
    """Convert 5D TZCYX LSM file to Chimera MAP files, one per channel.

    Parameters
    ----------
    lsmfile : str
        Name of the LSM file to convert.
    cmapfile : str, optional
        Name of the output CMAP file. If None (default), the name is
        derived from lsmfile.
    **kwargs
        Optional extra arguments passed to the CmapFile.addmap function,
        e.g. verbose, step, origin, cell_angles, rotation_axis,
        rotation_angle, subsample, chunks, and compression.

    """
    verbose = kwargs.get('verbose', False)
    try:
        cmaps = []
        lsm = None
        # open LSM file
        lsm = TiffFile(lsmfile)
        series = lsm.series[0]  # first series contains the image data
        if hasattr(series, 'get_shape'):
            # tifffile > 2020.2.25 return squeezed shape and axes
            shape = series.get_shape(False)
            axes = series.get_axes(False)
            if axes[:2] == 'MP' and shape[:2] == (1, 1):
                axes = axes[2:]
                shape = shape[2:]
        else:
            shape = series.shape
            axes = series.axes
        if axes != 'TZCYX':
            raise ValueError(f'not a 5D LSM file (expected TZCYX, got {axes})')
        if verbose:
            print(lsm)
            print(shape, axes, flush=True)
        # create one CMAP file per channel
        if cmapfile:
            cmapfile = '{}.ch%04d{}'.format(*os.path.splitext(cmapfile))
        else:
            cmapfile = f'{lsmfile}.ch%04d.cmap'
        cmaps = [CmapFile(cmapfile % i) for i in range(shape[2])]
        # voxel/step sizes
        if not kwargs.get('step', None):
            try:
                attrs = lsm[0].cz_lsm_info
                kwargs['step'] = (
                    attrs['voxel_size_x'] / attrs['voxel_size_x'],
                    attrs['voxel_size_y'] / attrs['voxel_size_x'],
                    attrs['voxel_size_z'] / attrs['voxel_size_x'],
                )
            except Exception:
                pass
        # iterate over Tiff pages containing data
        pages = iter(series.pages)
        for _ in range(shape[0]):  # iterate over time axis
            datalist = []
            for _ in range(shape[1]):  # iterate over z slices
                datalist.append(next(pages).asarray())
            data = numpy.vstack(datalist).reshape(shape[1:])
            for c in range(shape[2]):  # iterate over channels
                # write datasets and attributes
                cmaps[c].addmap(data=data[:, c], **kwargs)
    finally:
        if lsm:
            lsm.close()
        for f in cmaps:
            f.close()
コード例 #14
0
ファイル: array.py プロジェクト: liamchalcroft/nitorch
class TiffArray(MappedArray):
    """
    MappedArray that uses `tifffile` under the hood.
    """
    def __init__(self, file_like, mode='r', keep_open=False, **hints):
        """

        Parameters
        ----------
        file_like : str or file object
        mode : {'r'}, default='r'
        keep_open : bool, default=True
            Whether to keep the file handle open
        hints : keyword of the form `is_<format>=<True|False>`
            Tells the Tiff reader that a file is or isn't of a specific
            subformat. If not provided, it it guessed by the Tiff reader.
        """
        self._tiff = TiffFile(file_like, **hints)
        if not keep_open:
            self._tiff.close()

        self._series = 0
        self._level = 0
        self._cache = dict()
        super().__init__()

    _series: int = 0  # index of series to map
    _level: int = 0  # index of pyramid level to map
    _cache: dict = {}  # a cache of precomputed _shape, _spatial, etc

    @property
    def _shape(self):
        """Full shape of a series+level"""
        if '_shape' not in self._cache:
            with self.tiffobj() as tiff:
                shape = tiff.series[self.series].levels[self.level].shape
            self._cache['_shape'] = shape
        return self._cache['_shape']

    @property
    def _axes(self):
        """Axes names of a series+level"""
        if '_axes' not in self._cache:
            with self.tiffobj() as tiff:
                axes = tiff.series[self.series].levels[self.level].axes
            self._cache['_axes'] = axes
        return self._cache['_axes']

    @property
    def _spatial(self):
        """Mask of spatial axes of a series+level"""
        msk = [ax in 'XYZ' for ax in self._axes]
        return msk

    @property
    def _affine(self):
        """Affine orientation matrix of a series+level"""
        # TODO: I don't know yet how we should use GeoTiff to encode
        #   affine matrices. In the matrix/zooms, their voxels are ordered
        #   as [x, y, z] even though their dimensions in the returned array
        #   are ordered as [Z, Y, X]. If we want to keep the same convention
        #   as nitorch, I need to permute the matrix/zooms.
        if '_affine' not in self._cache:
            with self.tiffobj() as tiff:
                omexml = tiff.ome_metadata
                geotags = tiff.geotiff_metadata or {}
            zooms, units, axes = ome_zooms(omexml, self.series)
            if zooms:
                # convert to mm + drop non-spatial zooms
                units = [parse_unit(u) for u in units]
                zooms = [
                    z * (f / 1e-3) for z, (f, type) in zip(zooms, units)
                    if type == 'm'
                ]
                if 'ModelPixelScaleTag' in geotags:
                    warn("Both OME and GeoTiff pixel scales are present: "
                         "{} vs {}. Using OME.".format(
                             zooms, geotags['ModelPixelScaleTag']))
            elif 'ModelPixelScaleTag' in geotags:
                zooms = geotags['ModelPixelScaleTag']
                axes = 'XYZ'
            else:
                zooms = 1.
                axes = [ax for ax in self._axes if ax in 'XYZ']
            if 'ModelTransformation' in geotags:
                aff = geotags['ModelTransformation']
                aff = torch.as_tensor(aff, dtype=torch.double).reshape(4, 4)
                self._cache['_affine'] = aff
            elif ('ModelTiepointTag' in geotags):
                # copied from tifffile
                sx, sy, sz = py.make_list(zooms, n=3)
                tiepoints = torch.as_tensor(geotags['ModelTiepointTag'])
                affines = []
                for tiepoint in tiepoints:
                    i, j, k, x, y, z = tiepoint
                    affines.append(
                        torch.as_tensor(
                            [[sx, 0.0, 0.0, x - i * sx],
                             [0.0, -sy, 0.0, y + j * sy],
                             [0.0, 0.0, sz, z - k * sz], [0.0, 0.0, 0.0, 1.0]],
                            dtype=torch.double))
                affines = torch.stack(affines, dim=0)
                if len(tiepoints) == 1:
                    affines = affines[0]
                    self._cache['_affine'] = affines
            else:
                zooms = py.make_list(zooms, n=len(axes))
                ax2zoom = {ax: zoom for ax, zoom in zip(axes, zooms)}
                axes = [ax for ax in self._axes if ax in 'XYZ']
                shape = [
                    shp for shp, msk in zip(self._shape, self._spatial) if msk
                ]
                zooms = [ax2zoom.get(ax, 1.) for ax in axes]
                layout = [('R' if ax == 'Z' else 'P' if ax == 'Y' else 'S')
                          for ax in axes]
                aff = affine_default(shape, zooms, layout=''.join(layout))
                self._cache['_affine'] = aff
        return self._cache['_affine']

    @property
    def dtype(self):
        if 'dtype' not in self._cache:
            with self.tiffobj() as tiff:
                dt = tiff.series[self.series].levels[self.level].dtype
            self._cache['dtype'] = dt
        return self._cache['dtype']

    @property
    def series(self):
        """Series index (Tiff files can hold multiple series)"""
        return self._series

    @series.setter
    def series(self, val):
        if val != self.series and not all(is_fullslice(self.slicer)):
            raise RuntimeError("Cannot change series in a view")
        self._series = val
        self._cache = {}

    @property
    def level(self):
        """Level index (Tiff files can hold multiple spatial resolutions)"""
        return self._level

    @level.setter
    def level(self, val):
        if val != self.level and not all(is_fullslice(self.slicer)):
            raise RuntimeError("Cannot change resolution level in a view")
        self._level = val
        self._cache = {}

    @property
    def readable(self):
        # That's not exact: pseudo partial access in-plane
        return AccessType.TruePartial

    @property
    def writable(self):
        return AccessType.No

    @contextmanager
    def tiffobj(self):
        """Returns an *open* Tiff reader.

        Should be used in a `with` statement:
        ```python
        >>> with self.tiffobj() as tiff:
        >>>     # do stuff with `tiff`
        ```
        """
        closed = self._tiff.filehandle.closed
        if closed:
            self._tiff.filehandle.open()
        yield self._tiff
        if closed:
            self._tiff.close()

    def __del__(self):
        # make sure we close all file objects
        self._tiff.close()

    @property
    def filename(self):
        with self.tiffobj() as f:
            return f.filename

    def data(self,
             dtype=None,
             device=None,
             casting='unsafe',
             rand=True,
             cutoff=None,
             dim=None,
             numpy=False):

        # --- sanity check before reading ---
        dtype = self.dtype if dtype is None else dtype
        dtype = dtypes.dtype(dtype)
        if not numpy and dtype.torch is None:
            raise TypeError(
                'Data type {} does not exist in PyTorch.'.format(dtype))

        # --- check that view is not empty ---
        if py.prod(self.shape) == 0:
            if numpy:
                return np.zeros(self.shape, dtype=dtype.numpy)
            else:
                return torch.zeros(self.shape,
                                   dtype=dtype.torch,
                                   device=device)

        # --- read native data ---
        slicer, perm, newdim = split_operation(self.permutation, self.slicer,
                                               'r')
        with self.tiffobj() as f:
            dat = self._read_data_raw(slicer, tiffobj=f)
        dat = dat.transpose(perm)[newdim]
        indtype = dtypes.dtype(self.dtype)

        # --- cutoff ---
        dat = volutils.cutoff(dat, cutoff, dim)

        # --- cast ---
        rand = rand and not indtype.is_floating_point
        if rand and not dtype.is_floating_point:
            tmpdtype = dtypes.float64
        else:
            tmpdtype = dtype
        dat, scale = volutils.cast(dat,
                                   tmpdtype.numpy,
                                   casting,
                                   with_scale=True)

        # --- random sample ---
        # uniform noise in the uncertainty interval
        if rand and not (scale == 1 and not dtype.is_floating_point):
            dat = volutils.addnoise(dat, scale)

        # --- final cast ---
        dat = volutils.cast(dat, dtype.numpy, 'unsafe')

        # convert to torch if needed
        if not numpy:
            dat = torch.as_tensor(dat, device=device)
        return dat

    # --------------
    #   LOW LEVEL
    # --------------

    def _read_data_raw(self, slicer=None, tiffobj=None):
        """Read native data

        Dispatch to `_read_data_raw_full` or `_read_data_raw_partial`.

        Parameters
        ----------
        slicer : tuple[index_like], optional
            A tuple of indices that describe the chunk of data to read.
            If None, read everything.
        tiffobj : file object, default=`self.fileobj('image', 'r')`
            A file object (with `seek`, `read`) from which to read

        Returns
        -------
        dat : np.ndarray

        """
        if tiffobj is None:
            with self.tiffobj() as tiffobj:
                return self._read_data_raw(slicer, tiffobj)

        # load sub-array
        if slicer is None or all(is_fullslice(slicer, self._shape)):
            dat = self._read_data_raw_full(tiffobj)
        else:
            dat = self._read_data_raw_partial(slicer, tiffobj)

        return dat

    def _read_data_raw_partial(self, slicer, tiffobj=None):
        """Read a chunk of data from disk

        Parameters
        ----------
        slicer : tuple[slice or int]
        tiffobj : TiffFile

        Returns
        -------
        dat : np.ndarray

        """
        if tiffobj is None:
            with self.tiffobj() as tiffobj:
                return self._read_data_raw_partial(slicer, tiffobj)

        # 1) split dimensions
        shape_feat, shape_stack, shape_page = self._shape_split(tiffobj)
        dim_feat = len(shape_feat)
        dim_stack = len(shape_stack)
        dim_page = len(shape_page)

        # 2) split slicer
        slicer_feat = slicer[:dim_feat]
        slicer_stack = slicer[dim_feat:dim_feat + dim_stack]
        slicer_page = slicer[dim_feat + dim_stack:]

        dim_feat_out = sum(isinstance(idx, slice) for idx in slicer_feat)
        dim_stack_out = sum(isinstance(idx, slice) for idx in slicer_stack)
        dim_page_out = sum(isinstance(idx, slice) for idx in slicer_page)

        # 3) ensure positive strides
        slicer_inv = [
            slice(None, None, -1) if idx.step and idx.step < 0 else slice(None)
            for idx in slicer_stack if isinstance(idx, slice)
        ]
        slicer_stack = [
            invert_slice(idx, shp)
            if isinstance(idx, slice) and idx.step and idx.step < 0 else idx
            for idx, shp in zip(slicer_stack, shape_stack)
        ]

        # 4) convert stack slice to list of linear indices
        #    (or to one slice if possible)
        index_stack = slicer_sub2ind(slicer_stack, shape_stack)

        # 5) read only pages in the substack
        dat = tiffobj.asarray(key=index_stack,
                              series=self.series,
                              level=self.level)
        dat = dat.reshape([*shape_feat, -1, *shape_page])

        # 6) apply slicers along the feature and page dimensions
        dat = dat[(*slicer_feat, slice(None), *slicer_page)]

        # 7) reshape
        dat = dat.reshape(self.shape)

        # 7) final slicers for negative strides along stack dimensions
        slicer = [slice(None)] * dim_feat_out + slicer_inv + [slice(None)
                                                              ] * dim_page_out
        dat = dat[tuple(slicer)]

        return dat

    def _read_data_raw_full(self, tiffobj=None):
        """Read the full data from disk

        Parameters
        ----------
        tiffobj : TiffFile

        Returns
        -------
        dat : np.ndarray

        """
        if tiffobj is None:
            with self.tiffobj() as tiffobj:
                return self._read_data_raw_full(tiffobj)

        return tiffobj.asarray(series=self.series, level=self.level)

    def _shape_split(self, tiffobj=None):
        """Split the shape into different components

        Returns
        -------
        shape_feat : tuple[int]
            Color features (belong to pages but end-up at the left-most axis)
        shape_collection : tuple[int]
            Shape of the collection of pages (usually Z, T, etc. axes)
        shape_page : tuple[int]
            Shape of one page -- with or without features (usually X, Y axes)
        """
        if tiffobj is None:
            with self.tiffobj() as tiffobj:
                return self._shape_split(tiffobj)

        if tiffobj.is_imagej:
            return self._shape_split_imagej(tiffobj)
        else:
            page = tiffobj.series[self.series].levels[self.level].pages[0]
            shape_page = page.shape
            page_dim = len(shape_page)
            shape_collection = self._shape[:-page_dim]
            return tuple(), tuple(shape_collection), tuple(shape_page)

    def _shape_split_imagej(self, tiffobj):
        """Split the shape into different components (ImageJ format).

        This is largely copied from tifffile.
        """

        pages = tiffobj.pages
        pages.useframes = True
        pages.keyframe = 0
        page = pages[0]
        meta = tiffobj.imagej_metadata

        def is_virtual():
            # ImageJ virtual hyperstacks store all image metadata in the first
            # page and image data are stored contiguously before the second
            # page, if any
            if not page.is_final:
                return False
            images = meta.get('images', 0)
            if images <= 1:
                return False
            offset, count = page.is_contiguous
            if (count != py.prod(page.shape) * page.bitspersample // 8
                    or offset + count * images > self.filehandle.size):
                raise ValueError()
            # check that next page is stored after data
            if len(pages) > 1 and offset + count * images > pages[1].offset:
                return False
            return True

        isvirtual = is_virtual()
        if isvirtual:
            # no need to read other pages
            pages = [page]
        else:
            pages = pages[:]

        images = meta.get('images', len(pages))
        frames = meta.get('frames', 1)
        slices = meta.get('slices', 1)
        channels = meta.get('channels', 1)

        # compute shape of the collection of pages
        shape = []
        axes = []
        if frames > 1:
            shape.append(frames)
            axes.append('T')
        if slices > 1:
            shape.append(slices)
            axes.append('Z')
        if channels > 1 and (py.prod(shape) if shape else 1) != images:
            shape.append(channels)
            axes.append('C')

        remain = images // (py.prod(shape) if shape else 1)
        if remain > 1:
            shape.append(remain)
            axes.append('I')

        if page.axes[0] == 'S' and 'C' in axes:
            # planar storage, S == C, saved by Bio-Formats
            return tuple(), tuple(shape), tuple(page.shape[1:])
        elif page.axes[0] == 'I':
            # contiguous multiple images
            return tuple(), tuple(shape), tuple(page.shape[1:])
        elif page.axes[:2] == 'SI':
            # color-mapped contiguous multiple images
            return tuple(page.shape[0:1]), tuple(shape), tuple(page.shape[2:])
        else:
            return tuple(), tuple(shape), tuple(page.shape)