コード例 #1
0
ファイル: czi_reader.py プロジェクト: zeroth/aicsimageio
    def _read_immediate(self) -> da.core.Array:
        # Init temp czi
        czi = CziFile(self._file)

        # Safely construct the numpy array or catch any exception
        try:
            # Get image dims indicies
            image_dim_indices = czi.dims_shape()

            # Catch inconsistent scene dimension sizes
            if len(image_dim_indices) > 1:
                # Choose the provided scene
                log.info(
                    f"File contains variable dimensions per scene, "
                    f"selected scene: {self.specific_s_index} for data retrieval."
                )

                # Get the specific scene
                if self.specific_s_index < len(image_dim_indices):
                    data, _ = czi.read_image(
                        **{Dimensions.Scene: self.specific_s_index})
                else:
                    raise exceptions.InconsistentShapeError(
                        f"The CZI image provided has variable dimensions per scene. "
                        f"Please provide a valid index to the 'S' parameter to create "
                        f"a dask array for the index provided. "
                        f"Provided scene index: {self.specific_s_index}. "
                        f"Scene index range: 0-{len(image_dim_indices)}.")

            else:
                # If the list is length one that means that all the scenes in the image
                # have the same dimensions
                # Read all data in the image
                data, _ = czi.read_image()

                # A really bad way to close any connection to the CZI object
                czi._bytes = None
                czi.reader = None

        except Exception as e:
            # A really bad way to close any connection to the CZI object
            czi._bytes = None
            czi.reader = None

            raise e

        return data
コード例 #2
0
        dims_dict[d] = dimstring.find(d)
        dimindex_list.append(dimstring.find(d))

    numvalid_dims = sum(i > 0 for i in dimindex_list)

    return dims_dict, dimindex_list, numvalid_dims


filename = r"C:\Temp\input\DTScan_ID4.czi"

md, addmd = imf.get_metadata(filename)

czi = CziFile(filename)

# Get the shape of the data, the coordinate pairs are (start index, size)
dimensions = czi.dims_shape()
print(dimensions)
print(czi.dims)
print(czi.size)
print(czi.is_mosaic())  # True
# Mosaic files ignore the S dimension and use an internal mIndex to reconstruct, the scale factor allows one to generate a manageable image
mosaic_data = czi.read_mosaic(C=0, scale_factor=1)
print('CZI Mosaic Data Shape : ', mosaic_data.shape)

md = {}
md['SizeS'] = 1
md['SizeT'] = 3
md['SizeZ'] = 5
md['SizeC'] = 2
md['SizeY'] = 100
md['SizeX'] = 200
コード例 #3
0
ファイル: md_tools.py プロジェクト: sebi06/napari_zeiss
def get_metadata_czi(filename, dim2none=False,
                     forceDim=False,
                     forceDimname='SizeC',
                     forceDimvalue=2,
                     convert_scunit=True):
    """
    Returns a dictionary with CZI metadata.

    Information CZI Dimension Characters:
    - '0': 'Sample',  # e.g. RGBA
    - 'X': 'Width',
    - 'Y': 'Height',
    - 'C': 'Channel',
    - 'Z': 'Slice',  # depth
    - 'T': 'Time',
    - 'R': 'Rotation',
    - 'S': 'Scene',  # contiguous regions of interest in a mosaic image
    - 'I': 'Illumination',  # direction
    - 'B': 'Block',  # acquisition
    - 'M': 'Mosaic',  # index of tile for compositing a scene
    - 'H': 'Phase',  # e.g. Airy detector fibers
    - 'V': 'View',  # e.g. for SPIM

    :param filename: filename of the CZI image
    :type filename: str
    :param dim2none: option to set non-existing dimension to None, defaults to False
    :type dim2none: bool, optional
    :param forceDim: option to force to not read certain dimensions, defaults to False
    :type forceDim: bool, optional
    :param forceDimname: name of the dimension not to read, defaults to SizeC
    :type forceDimname: str, optional
    :param forceDimvalue: index of the dimension not to read, defaults to 2
    :type forceDimvalue: int, optional      
    :param convert_scunit: convert scale unit string from 'µm' to 'micron', defaults to False
    :type convert_scunit: bool, optional  
    :return: metadata - dictionary with the relevant CZI metainformation
    :rtype: dict
    """

    # get CZI object
    czi = zis.CziFile(filename)

    # parse the XML into a dictionary
    metadatadict_czi = czi.metadata(raw=False)

    # initialize metadata dictionary
    metadata = create_metadata_dict()

    # get directory and filename etc.
    metadata['Directory'] = os.path.dirname(filename)
    metadata['Filename'] = os.path.basename(filename)
    metadata['Extension'] = 'czi'
    metadata['ImageType'] = 'czi'

    # add axes and shape information using czifile package
    metadata['Axes_czifile'] = czi.axes
    metadata['Shape_czifile'] = czi.shape

    # add axes and shape information using aicsimageio package
    czi_aics = AICSImage(filename)
    metadata['Axes_aics'] = czi_aics.dims
    try:
        metadata['Shape_aics'] = czi_aics.shape
        metadata['SizeX_aics'] = czi_aics.size_x
        metadata['SizeY_aics'] = czi_aics.size_y
        metadata['SizeC_aics'] = czi_aics.size_c
        metadata['SizeZ_aics'] = czi_aics.size_t
        metadata['SizeT_aics'] = czi_aics.size_t
        metadata['SizeS_aics'] = czi_aics.size_s
    except KeyError as e:
        metadata['Shape_aics'] = None
        metadata['SizeX_aics'] = None
        metadata['SizeY_aics'] = None
        metadata['SizeC_aics'] = None
        metadata['SizeZ_aics'] = None
        metadata['SizeT_aics'] = None
        metadata['SizeS_aics'] = None

    # get additional data by using pylibczi directly
    # Get the shape of the data, the coordinate pairs are (start index, size)
    aics_czi = CziFile(filename)
    metadata['dims_aicspylibczi'] = aics_czi.dims_shape()[0]
    metadata['dimorder_aicspylibczi'] = aics_czi.dims
    metadata['size_aicspylibczi'] = aics_czi.size
    metadata['czi_isMosaic'] = aics_czi.is_mosaic()

    # determine pixel type for CZI array
    metadata['NumPy.dtype'] = czi.dtype

    # check if the CZI image is an RGB image depending
    # on the last dimension entry of axes
    if czi.shape[-1] == 3:
        metadata['czi_isRGB'] = True

    try:
        metadata['PixelType'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['PixelType']
    except KeyError as e:
        print('Key not found:', e)
        metadata['PixelType'] = None
    try:
        metadata['SizeX'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeX'])
    except KeyError as e:
        metadata['SizeX'] = None
    try:
        metadata['SizeY'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeY'])
    except KeyError as e:
        metadata['SizeY'] = None

    try:
        metadata['SizeZ'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeZ'])
    except Exception as e:
        # print('Exception:', e)
        if dim2none:
            metadata['SizeZ'] = None
        if not dim2none:
            metadata['SizeZ'] = 1

    # for special cases do not read the SizeC from the metadata
    if forceDim and forceDimname == 'SizeC':
        metadata[forceDimname] = forceDimvalue

    if not forceDim:

        try:
            metadata['SizeC'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeC'])
        except Exception as e:
            # print('Exception:', e)
            if dim2none:
                metadata['SizeC'] = None
            if not dim2none:
                metadata['SizeC'] = 1

    # create empty lists for channel related information
    channels = []
    channels_names = []
    channels_colors = []

    # in case of only one channel
    if metadata['SizeC'] == 1:
        # get name for dye
        try:
            channels.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']
                                            ['Channels']['Channel']['ShortName'])
        except KeyError as e:
            print('Exception:', e)
            try:
                channels.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']
                                ['Channels']['Channel']['DyeName'])
            except KeyError as e:
                print('Exception:', e)
                channels.append('Dye-CH1')

        # get channel name
        try:
            channels_names.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']
                                  ['Channels']['Channel']['Name'])
        except KeyError as e:
            print('Exception:', e)
            channels_names.append['CH1']

        # get channel color
        try:
            channels_colors.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']
                                   ['Channels']['Channel']['Color'])
        except KeyError as e:
            print('Exception:', e)
            channels_colors.append('#80808000')

    # in case of two or more channels
    if metadata['SizeC'] > 1:
        # loop over all channels
        for ch in range(metadata['SizeC']):
            # get name for dyes
            try:
                channels.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']
                                                ['Channels']['Channel'][ch]['ShortName'])
            except KeyError as e:
                print('Exception:', e)
                try:
                    channels.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']
                                                    ['Channels']['Channel'][ch]['DyeName'])
                except KeyError as e:
                    print('Exception:', e)
                    channels.append('Dye-CH' + str(ch))

            # get channel names
            try:
                channels_names.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']
                                      ['Channels']['Channel'][ch]['Name'])
            except KeyError as e:
                print('Exception:', e)
                channels_names.append('CH' + str(ch))

            # get channel colors
            try:
                channels_colors.append(metadatadict_czi['ImageDocument']['Metadata']['DisplaySetting']
                                       ['Channels']['Channel'][ch]['Color'])
            except KeyError as e:
                print('Exception:', e)
                # use grayscale instead
                channels_colors.append('80808000')

    # write channels information (as lists) into metadata dictionary
    metadata['Channels'] = channels
    metadata['ChannelNames'] = channels_names
    metadata['ChannelColors'] = channels_colors

    try:
        metadata['SizeT'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeT'])
    except Exception as e:
        # print('Exception:', e)
        if dim2none:
            metadata['SizeT'] = None
        if not dim2none:
            metadata['SizeT'] = 1

    try:
        metadata['SizeM'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeM'])
    except Exception as e:
        # print('Exception:', e)
        if dim2none:
            metadata['SizeM'] = None
        if not dim2none:
            metadata['SizeM'] = 1

    try:
        metadata['SizeB'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeB'])
    except Exception as e:
        # print('Exception:', e)
        if dim2none:
            metadata['SizeB'] = None
        if not dim2none:
            metadata['SizeB'] = 1

    try:
        metadata['SizeS'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeS'])
    except Exception as e:
        # print('Exception:', e)
        if dim2none:
            metadata['SizeS'] = None
        if not dim2none:
            metadata['SizeS'] = 1

    try:
        metadata['SizeH'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeH'])
    except Exception as e:
        # print('Exception:', e)
        if dim2none:
            metadata['SizeH'] = None
        if not dim2none:
            metadata['SizeH'] = 1

    try:
        metadata['SizeI'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeI'])
    except Exception as e:
        # print('Exception:', e)
        if dim2none:
            metadata['SizeI'] = None
        if not dim2none:
            metadata['SizeI'] = 1

    try:
        metadata['SizeV'] = np.int(metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['SizeV'])
    except Exception as e:
        # print('Exception:', e)
        if dim2none:
            metadata['SizeV'] = None
        if not dim2none:
            metadata['SizeV'] = 1

    # get the scaling information
    try:
        # metadata['Scaling'] = metadatadict_czi['ImageDocument']['Metadata']['Scaling']
        metadata['XScale'] = float(metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][0]['Value']) * 1000000
        metadata['YScale'] = float(metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][1]['Value']) * 1000000
        metadata['XScale'] = np.round(metadata['XScale'], 3)
        metadata['YScale'] = np.round(metadata['YScale'], 3)
        try:
            metadata['XScaleUnit'] = metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][0]['DefaultUnitFormat']
            metadata['YScaleUnit'] = metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][1]['DefaultUnitFormat']
        except KeyError as e:
            print('Key not found:', e)
            metadata['XScaleUnit'] = None
            metadata['YScaleUnit'] = None
        try:
            metadata['ZScale'] = float(metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][2]['Value']) * 1000000
            metadata['ZScale'] = np.round(metadata['ZScale'], 3)
            # additional check for faulty z-scaling
            if metadata['ZScale'] == 0.0:
                metadata['ZScale'] = 1.0
            try:
                metadata['ZScaleUnit'] = metadatadict_czi['ImageDocument']['Metadata']['Scaling']['Items']['Distance'][2]['DefaultUnitFormat']
            except KeyError as e:
                print('Key not found:', e)
                metadata['ZScaleUnit'] = metadata['XScaleUnit']
        except Exception as e:
            # print('Exception:', e)
            if dim2none:
                metadata['ZScale'] = None
                metadata['ZScaleUnit'] = None
            if not dim2none:
                # set to isotropic scaling if it was single plane only
                metadata['ZScale'] = metadata['XScale']
                metadata['ZScaleUnit'] = metadata['XScaleUnit']
    except Exception as e:
        print('Exception:', e)
        print('Scaling Data could not be found.')

    # try to get software version
    try:
        metadata['SW-Name'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Application']['Name']
        metadata['SW-Version'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Application']['Version']
    except KeyError as e:
        print('Key not found:', e)
        metadata['SW-Name'] = None
        metadata['SW-Version'] = None

    try:
        metadata['AcqDate'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['AcquisitionDateAndTime']
    except KeyError as e:
        print('Key not found:', e)
        metadata['AcqDate'] = None

    # check if Instrument metadat actually exist
    if pydash.objects.has(metadatadict_czi, ['ImageDocument', 'Metadata', 'Information', 'Instrument']):
        # get objective data
        if isinstance(metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Objectives']['Objective'], list):
            num_obj = len(metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Objectives']['Objective'])
        else:
            num_obj = 1

        # if there is only one objective found
        if num_obj == 1:
            try:
                metadata['ObjName'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                           ['Instrument']['Objectives']['Objective']['Name'])
            except KeyError as e:
                print('Key not found:', e)
                metadata['ObjName'].append(None)

            try:
                metadata['ObjImmersion'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Objectives']['Objective']['Immersion']
            except KeyError as e:
                print('Key not found:', e)
                metadata['ObjImmersion'] = None

            try:
                metadata['ObjNA'] = np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                             ['Instrument']['Objectives']['Objective']['LensNA'])
            except KeyError as e:
                print('Key not found:', e)
                metadata['ObjNA'] = None

            try:
                metadata['ObjID'] = metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Objectives']['Objective']['Id']
            except KeyError as e:
                print('Key not found:', e)
                metadata['ObjID'] = None

            try:
                metadata['TubelensMag'] = np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                   ['Instrument']['TubeLenses']['TubeLens']['Magnification'])
            except KeyError as e:
                print('Key not found:', e, 'Using Default Value = 1.0 for Tublens Magnification.')
                metadata['TubelensMag'] = 1.0

            try:
                metadata['ObjNominalMag'] = np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                     ['Instrument']['Objectives']['Objective']['NominalMagnification'])
            except KeyError as e:
                print('Key not found:', e, 'Using Default Value = 1.0 for Nominal Magnification.')
                metadata['ObjNominalMag'] = 1.0

            try:
                if metadata['TubelensMag'] is not None:
                    metadata['ObjMag'] = metadata['ObjNominalMag'] * metadata['TubelensMag']
                if metadata['TubelensMag'] is None:
                    print('No TublensMag found. Use 1 instead')
                    metadata['ObjMag'] = metadata['ObjNominalMag'] * 1.0

            except KeyError as e:
                print('Key not found:', e)
                metadata['ObjMag'] = None

        if num_obj > 1:
            for o in range(num_obj):

                try:
                    metadata['ObjName'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                               ['Instrument']['Objectives']['Objective'][o]['Name'])
                except KeyError as e:
                    print('Key not found:', e)
                    metadata['ObjName'].append(None)

                try:
                    metadata['ObjImmersion'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                    ['Instrument']['Objectives']['Objective'][o]['Immersion'])
                except KeyError as e:
                    print('Key not found:', e)
                    metadata['ObjImmersion'].append(None)

                try:
                    metadata['ObjNA'].append(np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                      ['Instrument']['Objectives']['Objective'][o]['LensNA']))
                except KeyError as e:
                    print('Key not found:', e)
                    metadata['ObjNA'].append(None)

                try:
                    metadata['ObjID'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                             ['Instrument']['Objectives']['Objective'][o]['Id'])
                except KeyError as e:
                    print('Key not found:', e)
                    metadata['ObjID'].append(None)

                try:
                    metadata['TubelensMag'].append(np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                            ['Instrument']['TubeLenses']['TubeLens'][o]['Magnification']))
                except KeyError as e:
                    print('Key not found:', e, 'Using Default Value = 1.0 for Tublens Magnification.')
                    metadata['TubelensMag'].append(1.0)

                try:
                    metadata['ObjNominalMag'].append(np.float(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                              ['Instrument']['Objectives']['Objective'][o]['NominalMagnification']))
                except KeyError as e:
                    print('Key not found:', e, 'Using Default Value = 1.0 for Nominal Magnification.')
                    metadata['ObjNominalMag'].append(1.0)

                try:
                    if metadata['TubelensMag'] is not None:
                        metadata['ObjMag'].append(metadata['ObjNominalMag'][o] * metadata['TubelensMag'][o])
                    if metadata['TubelensMag'] is None:
                        print('No TublensMag found. Use 1 instead')
                        metadata['ObjMag'].append(metadata['ObjNominalMag'][o] * 1.0)

                except KeyError as e:
                    print('Key not found:', e)
                    metadata['ObjMag'].append(None)

    # get detector information

    # check if there are any detector entries inside the dictionary
    if pydash.objects.has(metadatadict_czi, ['ImageDocument', 'Metadata', 'Information', 'Instrument', 'Detectors']):

        if isinstance(metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Detectors']['Detector'], list):
            num_detectors = len(metadatadict_czi['ImageDocument']['Metadata']['Information']['Instrument']['Detectors']['Detector'])
        else:
            num_detectors = 1

        # if there is only one detector found
        if num_detectors == 1:

            # check for detector ID
            try:
                metadata['DetectorID'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                              ['Instrument']['Detectors']['Detector']['Id'])
            except KeyError as e:
                metadata['DetectorID'].append(None)

            # check for detector Name
            try:
                metadata['DetectorName'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                ['Instrument']['Detectors']['Detector']['Name'])
            except KeyError as e:
                metadata['DetectorName'].append(None)

            # check for detector model
            try:
                metadata['DetectorModel'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                 ['Instrument']['Detectors']['Detector']['Manufacturer']['Model'])
            except KeyError as e:
                metadata['DetectorModel'].append(None)

            # check for detector type
            try:
                metadata['DetectorType'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                ['Instrument']['Detectors']['Detector']['Type'])
            except KeyError as e:
                metadata['DetectorType'].append(None)

        if num_detectors > 1:
            for d in range(num_detectors):

                # check for detector ID
                try:
                    metadata['DetectorID'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                  ['Instrument']['Detectors']['Detector'][d]['Id'])
                except KeyError as e:
                    metadata['DetectorID'].append(None)

                # check for detector Name
                try:
                    metadata['DetectorName'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                    ['Instrument']['Detectors']['Detector'][d]['Name'])
                except KeyError as e:
                    metadata['DetectorName'].append(None)

                # check for detector model
                try:
                    metadata['DetectorModel'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                     ['Instrument']['Detectors']['Detector'][d]['Manufacturer']['Model'])
                except KeyError as e:
                    metadata['DetectorModel'].append(None)

                # check for detector type
                try:
                    metadata['DetectorType'].append(metadatadict_czi['ImageDocument']['Metadata']['Information']
                                                    ['Instrument']['Detectors']['Detector'][d]['Type'])
                except KeyError as e:
                    metadata['DetectorType'].append(None)

    # check for well information
    metadata['Well_ArrayNames'] = []
    metadata['Well_Indices'] = []
    metadata['Well_PositionNames'] = []
    metadata['Well_ColId'] = []
    metadata['Well_RowId'] = []
    metadata['WellCounter'] = None
    metadata['SceneStageCenterX'] = []
    metadata['SceneStageCenterY'] = []

    try:
        print('Trying to extract Scene and Well information if existing ...')
        # extract well information from the dictionary
        allscenes = metadatadict_czi['ImageDocument']['Metadata']['Information']['Image']['Dimensions']['S']['Scenes']['Scene']

        # loop over all detected scenes
        for s in range(metadata['SizeS']):

            if metadata['SizeS'] == 1:
                well = allscenes
                try:
                    metadata['Well_ArrayNames'].append(allscenes['ArrayName'])
                except KeyError as e:
                    # print('Key not found in Metadata Dictionary:', e)
                    try:
                        metadata['Well_ArrayNames'].append(well['Name'])
                    except KeyError as e:
                        print('Key not found in Metadata Dictionary:', e, 'Using A1 instead')
                        metadata['Well_ArrayNames'].append('A1')

                try:
                    metadata['Well_Indices'].append(allscenes['Index'])
                except KeyError as e:
                    print('Key not found in Metadata Dictionary:', e)
                    metadata['Well_Indices'].append(1)

                try:
                    metadata['Well_PositionNames'].append(allscenes['Name'])
                except KeyError as e:
                    print('Key not found in Metadata Dictionary:', e)
                    metadata['Well_PositionNames'].append('P1')

                try:
                    metadata['Well_ColId'].append(np.int(allscenes['Shape']['ColumnIndex']))
                except KeyError as e:
                    print('Key not found in Metadata Dictionary:', e)
                    metadata['Well_ColId'].append(0)

                try:
                    metadata['Well_RowId'].append(np.int(allscenes['Shape']['RowIndex']))
                except KeyError as e:
                    print('Key not found in Metadata Dictionary:', e)
                    metadata['Well_RowId'].append(0)

                try:
                    # count the content of the list, e.g. how many time a certain well was detected
                    metadata['WellCounter'] = Counter(metadata['Well_ArrayNames'])
                except KeyError as e:
                    print('Key not found in Metadata Dictionary:', e)
                    metadata['WellCounter'].append(Counter({'A1': 1}))

                try:
                    # get the SceneCenter Position
                    sx = allscenes['CenterPosition'].split(',')[0]
                    sy = allscenes['CenterPosition'].split(',')[1]
                    metadata['SceneStageCenterX'].append(np.double(sx))
                    metadata['SceneStageCenterY'].append(np.double(sy))
                except KeyError as e:
                    metadata['SceneStageCenterX'].append(0.0)
                    metadata['SceneStageCenterY'].append(0.0)

            if metadata['SizeS'] > 1:
                try:
                    well = allscenes[s]
                    metadata['Well_ArrayNames'].append(well['ArrayName'])
                except KeyError as e:
                    # print('Key not found in Metadata Dictionary:', e)
                    try:
                        metadata['Well_ArrayNames'].append(well['Name'])
                    except KeyError as e:
                        print('Key not found in Metadata Dictionary:', e, 'Using A1 instead')
                        metadata['Well_ArrayNames'].append('A1')

                # get the well information
                try:
                    metadata['Well_Indices'].append(well['Index'])
                except KeyError as e:
                    # print('Key not found in Metadata Dictionary:', e)
                    metadata['Well_Indices'].append(None)
                try:
                    metadata['Well_PositionNames'].append(well['Name'])
                except KeyError as e:
                    # print('Key not found in Metadata Dictionary:', e)
                    metadata['Well_PositionNames'].append(None)

                try:
                    metadata['Well_ColId'].append(np.int(well['Shape']['ColumnIndex']))
                except KeyError as e:
                    print('Key not found in Metadata Dictionary:', e)
                    metadata['Well_ColId'].append(None)

                try:
                    metadata['Well_RowId'].append(np.int(well['Shape']['RowIndex']))
                except KeyError as e:
                    print('Key not found in Metadata Dictionary:', e)
                    metadata['Well_RowId'].append(None)

                # count the content of the list, e.g. how many time a certain well was detected
                metadata['WellCounter'] = Counter(metadata['Well_ArrayNames'])

                # try:
                if isinstance(allscenes, list):
                    try:
                        # get the SceneCenter Position
                        sx = allscenes[s]['CenterPosition'].split(',')[0]
                        sy = allscenes[s]['CenterPosition'].split(',')[1]
                        metadata['SceneStageCenterX'].append(np.double(sx))
                        metadata['SceneStageCenterY'].append(np.double(sy))
                    except KeyError as e:
                        print('Key not found in Metadata Dictionary:', e)
                        metadata['SceneCenterX'].append(0.0)
                        metadata['SceneCenterY'].append(0.0)
                if not isinstance(allscenes, list):
                    metadata['SceneStageCenterX'].append(0.0)
                    metadata['SceneStageCenterY'].append(0.0)

            # count the number of different wells
            metadata['NumWells'] = len(metadata['WellCounter'].keys())

    except (KeyError, TypeError) as e:
        print('No valid Scene or Well information found:', e)

    # close CZI file
    czi.close()

    # close AICSImage object
    czi_aics.close()

    # convert scale unit tom avoid encoding problems
    if convert_scunit:
        if metadata['XScaleUnit'] == 'µm':
            metadata['XScaleUnit'] = 'micron'
        if metadata['YScaleUnit'] == 'µm':
            metadata['YScaleUnit'] = 'micron'
        if metadata['ZScaleUnit'] == 'µm':
            metadata['ZScaleUnit'] = 'micron'

    return metadata
コード例 #4
0
        print('Using AICSImageIO to read the image (Dask Delayed Reader).')
        all_scenes_array = img.get_image_data()
    if not use_dask_delayed:
        print('Using AICSImageIO to read the image.')
        all_scenes_array = img.get_image_dask_data()

if not use_aicsimageio and use_pylibczi is True:

    # read CZI using aicspylibczi
    czi = CziFile(filename)

    # for testing
    # Get the shape of the data
    print('Dimensions   : ', czi.dims)
    print('Size         : ', czi.size)
    print('Shape        : ', czi.dims_shape())
    print('IsMoasic     : ', czi.is_mosaic())
    if czi.is_mosaic():
        print('Mosaic Size  : ', czi.read_mosaic_size())

    # get the required shape for all and single scenes
    shape_all, shape_single, same_shape = czt.get_shape_allscenes(czi, md)
    print('Required_Array Shape for all scenes: ', shape_all)
    for sh in shape_single:
        print('Required Array Shape for single scenes: ', sh)

    #array_type = 'dask'
    array_type = 'zarr'
    #array_type = 'numpy'

    if array_type == 'zarr':
コード例 #5
0
def test_image_shape(data_dir, fname, expects):
    with open(data_dir / fname, 'rb') as fp:
        czi = CziFile(czi_filename=fp)
        shape = czi.dims_shape()
    assert shape == expects
コード例 #6
0
def test_read_image_two(data_dir, fname, exp_str, exp_dict):
    czi = CziFile(str(data_dir / fname))
    dims = czi.dims
    dim_dict = czi.dims_shape()
    assert dims == exp_str
    assert dim_dict == exp_dict
コード例 #7
0
ファイル: segment_objects.py プロジェクト: soyers/OAD
def execute(filepath,
            separator=';',
            filter_method='none',
            filter_size=3,
            threshold_method='triangle',
            min_objectsize=1000,
            max_holesize=100,
            saveformat='ome.tiff'):
    """Main function that executed the workflow.

    :param filepath: file path of the CZI image
    :type filepath: tsr
    :param separator: sepeartor for the CSV table, defaults to ';'
    :type separator: str, optional
    :param filter_method: smoothing filer, defaults to 'none'
    :type filter_method: str, optional
    :param filter_size: kernel size or radius of filter element, defaults to 3
    :type filter_size: int, optional
    :param threshold_method: threshold method, defaults to 'triangle'
    :type threshold_method: str, optional
    :param min_objectsize: minimum object size, defaults to 1000
    :type min_objectsize: int, optional
    :param max_holesize: maximum object size, defaults to 100
    :type max_holesize: int, optional
    :param saveformat: format to save the segmented image, defaults to 'ome.tiff'
    :type saveformat: str, optional
    :return: outputs
    :rtype: dict
    """

    print('--------------------------------------------------')
    print('FilePath : ', filepath)
    print(os.getcwd())
    print('File exists : ', os.path.exists(filepath))
    print('--------------------------------------------------')

    # define name for figure to be saved
    filename = os.path.basename(filepath)

    # get the metadata from the czi file
    md, additional_mdczi = imf.get_metadata(filepath)

    # to make it more readable extravt values from metadata dictionary
    stageX = md['SceneStageCenterX']
    stageY = md['SceneStageCenterY']

    # define columns names for dataframe
    cols = ['S', 'T', 'Z', 'C', 'Number']
    objects = pd.DataFrame(columns=cols)

    # optional dipslay of "some" results - empty list = no display
    show_image = [0]

    # scalefactor to read CZI
    sf = 1.0

    # index for channel - currently only single channel images are supported !
    chindex = 0

    # define maximum object sizes
    max_objectsize = 1000000000

    # define save format for mask
    adapt_dtype_mask = True
    dtype_mask = np.int8

    # check if it makes sense
    if max_holesize > min_objectsize:
        min_objectsize = max_holesize

    # read the czi mosaic image
    czi = CziFile(filepath)

    # get the shape of the data using aicspylibczi
    print('Dimensions   : ', czi.dims)
    print('Size         : ', czi.size)
    print('Shape        : ', czi.dims_shape())
    print('IsMosaic     : ', czi.is_mosaic())

    # read the mosaic pixel data
    mosaic = czi.read_mosaic(C=0, scale_factor=sf)
    print('Mosaic Shape :', mosaic.shape)

    # get the mosaic as NumPy.Array - must fit im memory !!!
    image2d = np.squeeze(mosaic, axis=0)
    md['SizeX_readmosaic'] = image2d.shape[1]
    md['SizeY_readmosaic'] = image2d.shape[0]

    # create the savename for the OME-TIFF
    if saveformat == 'ome.tiff':
        savename_seg = filename.split('.')[0] + '.ome.tiff'
    if saveformat == 'tiff':
        savename_seg = filename.split('.')[0] + '.tiff'

    # initialize empty dataframe
    results = pd.DataFrame()

    # main loop over all T - Z - C slices
    for s in progressbar.progressbar(range(md['SizeS']), redirect_stdout=True):
        for t in range(md['SizeT']):
            for z in range(md['SizeZ']):

                values = {'S': s, 'T': t, 'Z': z, 'C': chindex, 'Number': 0}

        # preprocessing - filter the image
        if filter_method == 'none' or filter_method == 'None':
            image2d_filtered = image2d
        if filter_method == 'median':
            image2d_filtered = median(image2d, selem=disk(filter_size))
        if filter_method == 'gauss':
            image2d_filtered = gaussian(image2d,
                                        sigma=filter_size,
                                        mode='reflect')

        # threshold image
        binary = sgt.autoThresholding(image2d_filtered,
                                      method=threshold_method)

        # Remove contiguous holes smaller than the specified size
        mask = morphology.remove_small_holes(binary,
                                             area_threshold=max_holesize,
                                             connectivity=1,
                                             in_place=True)

        # remove small objects
        mask = morphology.remove_small_objects(mask,
                                               min_size=min_objectsize,
                                               in_place=True)

        # clear the border
        mask = segmentation.clear_border(mask, bgval=0, in_place=True)

        # label the objects
        mask = measure.label(binary)

        # adapt pixel type of mask
        if adapt_dtype_mask:
            mask = mask.astype(dtype_mask, copy=False)

        # measure region properties
        to_measure = ('label', 'area', 'centroid', 'bbox')

        # measure the specified parameters store in dataframe
        props = pd.DataFrame(
            measure.regionprops_table(
                mask, intensity_image=image2d,
                properties=to_measure)).set_index('label')

        # filter objects by size
        props = props[(props['area'] >= min_objectsize)
                      & (props['area'] <= max_objectsize)]

        # add well information for CZI metadata
        try:
            props['WellId'] = md['Well_ArrayNames'][s]
            props['Well_ColId'] = md['Well_ColId'][s]
            props['Well_RowId'] = md['Well_RowId'][s]
        except (IndexError, KeyError) as error:
            # Output expected ImportErrors.
            print('Key not found:', error)
            print('Well Information not found. Using S-Index.')
            props['WellId'] = s
            props['Well_ColId'] = s
            props['Well_RowId'] = s

        # add plane indices
        props['S'] = s
        props['T'] = t
        props['Z'] = z
        props['C'] = chindex

        # count the number of objects
        values['Number'] = props.shape[0]

        # update dataframe containing the number of objects
        objects = objects.append(pd.DataFrame(values, index=[0]),
                                 ignore_index=True)
        results = results.append(props, ignore_index=True)

    # make sure the array as 5D of order (T, Z, C, X, Y) to write an correct OME-TIFF
    mask = imf.expand_dims5d(mask, md)

    # write the OME-TIFF suing apeer-ometiff-library
    io.write_ometiff(savename_seg, mask, omexml_string=None)

    # rename columns in pandas datatable
    results.rename(columns={
        'bbox-0': 'ystart',
        'bbox-1': 'xstart',
        'bbox-2': 'yend',
        'bbox-3': 'xend'
    },
                   inplace=True)

    # calculate the bbox width in height in [pixel] and [micron]
    results['bbox_width'] = results['xend'] - results['xstart']
    results['bbox_height'] = results['yend'] - results['ystart']
    results['bbox_width_scaled'] = results['bbox_width'] * md['XScale']
    results['bbox_height_scaled'] = results['bbox_height'] * md['XScale']

    # calculate the bbox center StageXY
    results['bbox_center_stageX'], results[
        'bbox_center_stageY'] = bbox2stageXY(
            image_stageX=stageX,
            image_stageY=stageY,
            sizeX=md['SizeX'],
            sizeY=md['SizeY'],
            scale=md['XScale'],
            xstart=results['xstart'],
            ystart=results['ystart'],
            bbox_width=results['bbox_width'],
            bbox_height=results['bbox_height'])

    # show results
    print(results)
    print('Done.')

    # write the CSV data table
    print('Write to CSV File : ', filename)
    csvfile = os.path.splitext(filename)[0] + '_planetable.csv'
    results.to_csv(csvfile, sep=separator, index=False)

    # set the outputs
    outputs = {}
    outputs['segmented_image'] = savename_seg
    outputs['objects_table'] = csvfile

    return outputs
コード例 #8
0
def open_image_stack(filepath):
    """ Open a file using AICSImageIO and display it using napari

    :param path: filepath of the image
    :type path: str
    """

    if os.path.isfile(filepath):

        # remove existing layers from napari
        viewer.layers.select_all()
        viewer.layers.remove_selected()

        # get the metadata
        metadata, add_metadata = czt.get_metadata_czi(filepath)

        # add the global metadata and adapt the table display
        mdbrowser.update_metadata(metadata)
        mdbrowser.update_style()

        use_aicsimageio = True
        use_pylibczi = False

        # decide which tool to use to read the image
        if metadata['ImageType'] != 'czi':
            use_aicsimageio = True
        elif metadata['ImageType'] == 'czi' and metadata['isMosaic'] is False:
            use_aicsimageio = True
        elif metadata['ImageType'] == 'czi' and metadata['isMosaic'] is True:
            use_aicsimageio = False
            use_pylibczi = True

        """
        # check if CZI has T or Z dimension
        hasT = False
        hasZ = False

        if 'T' in metadata['dims_aicspylibczi']:
            hasT = True
        if 'Z' in metadata['dims_aicspylibczi']:
            hasZ = True
        """

        if use_aicsimageio:
            # get AICSImageIO object
            img = AICSImage(filepath)

            # check if the Dask Delayed Reader should be used
            if not checkboxes.cbox_dask.isChecked():
                print('Using AICSImageIO normal ImageReader.')
                all_scenes_array = img.get_image_data()
            if checkboxes.cbox_dask.isChecked():
                print('Using AICSImageIO Dask Delayed ImageReader')
                all_scenes_array = img.get_image_dask_data()

        if not use_aicsimageio and use_pylibczi is True:

            # read CZI using aicspylibczi
            czi = CziFile(filepath)

            # Get the shape of the data
            print('Dimensions   : ', czi.dims)
            print('Size         : ', czi.size)
            print('Shape        : ', czi.dims_shape())
            print('IsMoasic     : ', czi.is_mosaic())
            if czi.is_mosaic():
                print('Mosaic Size  : ', czi.read_mosaic_size())

            # get the required shape for all and single scenes
            shape_all, shape_single, same_shape = czt.get_shape_allscenes(czi, metadata)
            print('Required_Array Shape for all scenes: ', shape_all)
            for sh in shape_single:
                print('Required Array Shape for single scenes: ', sh)

            if not same_shape:
                print('No all scenes have the same shape. Exiting ...')
                sys.exit()

            #array_type = 'dask'
            array_type = 'zarr'
            #array_type = 'numpy'

            if array_type == 'zarr':

                # define array to store all channels
                print('Using aicspylibCZI to read the image (ZARR array).')

                # option 1
                all_scenes_array = zarr.create(tuple(shape_all),
                                               dtype=metadata['NumPy.dtype'],
                                               chunks=True)

                # option 2
                # all_scenes_array = zarr.open(r'c:\Temp\czi_scene_all.zarr', mode='w',
                #                            shape=shape_all,
                #                            chunks=True,
                #                            dtype=md['NumPy.dtype'])

            if array_type == 'numpy':
                print('Using aicspylibCZI to read the image (Numpy.Array).')
                all_scenes_array = np.empty(shape_all, dtype=metadata['NumPy.dtype'])

            if array_type == 'zarr' or array_type == 'numpy':

                # loop over all scenes
                for s in range(metadata['SizeS']):
                    # get the CZIscene for the current scene
                    single_scene = czt.CZIScene(czi, metadata, sceneindex=s)
                    out = czt.read_czi_scene(czi, single_scene, metadata)
                    all_scenes_array[s, :, :, :, :, :] = np.squeeze(out, axis=0)

                print(all_scenes_array.shape)

            elif array_type == 'dask':

                def dask_load_sceneimage(czi, s, md):

                    # get the CZIscene for the current scene
                    single_scene = czt.CZIScene(czi, md, sceneindex=s)
                    out = czt.read_czi_scene(czi, single_scene, md)
                    return out

                sp = shape_all[1:]

                # create dask stack of lazy image readers
                print('Using aicspylibCZI to read the image (Dask.Array) + Delayed Reading.')
                lazy_process_image = dask.delayed(dask_load_sceneimage)  # lazy reader

                lazy_arrays = [lazy_process_image(czi, s, metadata) for s in range(metadata['SizeS'])]

                dask_arrays = [
                    da.from_delayed(lazy_array, shape=sp, dtype=metadata['NumPy.dtype'])
                    for lazy_array in lazy_arrays
                ]
                # Stack into one large dask.array
                all_scenes_array = da.stack(dask_arrays, axis=0)
                print(all_scenes_array.shape)

        do_scaling = checkboxes.cbox_autoscale.isChecked()

        # show the actual image stack
        nap.show_napari(viewer, all_scenes_array, metadata,
                        blending='additive',
                        adjust_contrast=do_scaling,
                        gamma=0.85,
                        add_mdtable=False,
                        rename_sliders=True)
コード例 #9
0
from aicspylibczi import CziFile
import xmltodict
from lxml import etree as ET
import imgfile_tools as imf
import numpy as np

filename = r"C:\Users\m1srh\OneDrive - Carl Zeiss AG\Smart_Microscopy_Workshop\datasets\OverViewScan.czi"
metadata = {}

# get metadata dictionary using aicspylibczi
czi_aicspylibczi = CziFile(filename)
metadatadict_czi = xmltodict.parse(ET.tostring(czi_aicspylibczi.meta))

# Get the shape of the data, the coordinate pairs are (start index, size)
metadata['dims_aicspylibczi'] = czi_aicspylibczi.dims_shape()[0]
metadata['axes_aicspylibczi'] = czi_aicspylibczi.dims
metadata['size_aicspylibczi'] = czi_aicspylibczi.size
metadata['czi_isMosaic'] = czi_aicspylibczi.is_mosaic()
print('CZI is Mosaic :', metadata['czi_isMosaic'])

metadata['SizeS'] = np.int(metadatadict_czi['ImageDocument']['Metadata']
                           ['Information']['Image']['SizeS'])
metadata['SizeT'] = np.int(metadatadict_czi['ImageDocument']['Metadata']
                           ['Information']['Image']['SizeT'])
metadata['SizeZ'] = np.int(metadatadict_czi['ImageDocument']['Metadata']
                           ['Information']['Image']['SizeZ'])
metadata['SizeC'] = np.int(metadatadict_czi['ImageDocument']['Metadata']
                           ['Information']['Image']['SizeC'])
metadata['SizeX'] = np.int(metadatadict_czi['ImageDocument']['Metadata']
                           ['Information']['Image']['SizeX'])
metadata['SizeY'] = np.int(metadatadict_czi['ImageDocument']['Metadata']
コード例 #10
0
def daread(img: Union[str, Path]) -> da.core.Array:
    """
    Read a CZI image file as a delayed dask array where each YX plane will be read on
    request.

    Parameters
    ----------
    img: Union[str, Path]
        The filepath to read.

    Returns
    -------
    img: dask.array.core.Array
        The constructed dask array where each YX plane is a delayed read.
    """
    # Convert pathlike to CziFile
    if isinstance(img, (str, Path)):
        # Resolve path
        img = Path(img).expanduser().resolve(strict=True)

        # Check path
        if img.is_dir():
            raise IsADirectoryError(
                f"Please provide a single file to the `img` parameter. "
                f"Received directory: {img}")

    # Check that no other type was provided
    if not isinstance(img, Path):
        raise TypeError(
            f"Please provide a path to a file as a string, or an pathlib.Path, to the "
            f"`img` parameter. "
            f"Received type: {type(img)}")

    # Init temp czi
    czi = CziFile(img)

    # Get image dims shape
    image_dims = czi.dims_shape()

    # Setup the read dimensions dictionary for reading the first plane
    first_plane_read_dims = {}
    for dim, dim_info in image_dims.items():
        # Unpack dimension info
        dim_begin_index, dim_end_index = dim_info

        # Add to read dims
        first_plane_read_dims[dim] = dim_begin_index

    # Read first plane for information used by dask.array.from_delayed
    sample, sample_dims = czi.read_image(**first_plane_read_dims)

    # The Y and X dimensions are always the last two dimensions, in that order.
    # These dimensions cannot be operated over but the shape information is used
    # in multiple places so we pull them out for easier access.
    sample_YX_shape = sample.shape[-2:]

    # Create operating shape and dim order list
    operating_shape = czi.size[:-2]
    dims = [dim for dim in czi.dims[:-2]]

    # Create empty numpy array with the operating shape so that we can iter through
    # and use the multi_index to create the readers.
    # We add empty dimensions of size one to fake being the Y and X dimensions.
    lazy_arrays = np.ndarray(operating_shape + (1, 1), dtype=object)

    # We can enumerate over the multi-indexed array and construct read_dims
    # dictionaries by simply zipping together the ordered dims list and the current
    # multi-index plus the begin index for that plane.
    # We then set the value of the array at the same multi-index to
    # the delayed reader using the constructed read_dims dictionary.
    begin_indicies = tuple(image_dims[dim][0] for dim in dims)
    for i, _ in np.ndenumerate(lazy_arrays):
        this_plane_read_indicies = (current_dim_begin_index + curr_dim_index
                                    for current_dim_begin_index, curr_dim_index
                                    in zip(begin_indicies, i))
        this_plane_read_dims = dict(zip(dims, this_plane_read_indicies))
        lazy_arrays[i] = da.from_delayed(
            delayed(_imread)(img, this_plane_read_dims),
            shape=sample_YX_shape,
            dtype=sample.dtype,
        )

    # Convert the numpy array of lazy readers into a dask array
    merged = da.block(lazy_arrays.tolist())

    # Because dimensions outside of Y and X can be in any order and present or not
    # we also return the dimension order string.
    dims = dims + ["Y", "X"]
    return merged, "".join(dims)
コード例 #11
0
ファイル: czi_reader.py プロジェクト: toloudis/aicsimageio
    def _daread(img: Path,
                czi: CziFile,
                chunk_by_dims: List[str] = [
                    Dimensions.SpatialZ, Dimensions.SpatialY,
                    Dimensions.SpatialX
                ],
                S: int = 0) -> Tuple[da.core.Array, str]:
        """
        Read a CZI image file as a delayed dask array where certain dimensions act as the chunk size.

        Parameters
        ----------
        img: Path
            The filepath to read.
        czi: CziFile
            The loaded CziFile object created from reading the filepath.
        chunk_by_dims: List[str]
            The dimensions to use as the for mapping the chunks / blocks.
            Default: [Dimensions.SpatialZ, Dimensions.SpatialY, Dimensions.SpatialX]
            Note: SpatialY and SpatialX will always be added to the list if not present.
        S: int
            If the image has different dimensions on any scene from another, the dask array construction will fail.
            In that case, use this parameter to specify a specific scene to construct a dask array for.
            Default: 0 (select the first scene)

        Returns
        -------
        img: dask.array.core.Array
            The constructed dask array where certain dimensions are chunked.
        dims: str
            The dimension order as a string.
        """
        # Get image dims indicies
        image_dim_indices = czi.dims_shape()

        # Catch inconsistent scene dimension sizes
        if len(image_dim_indices) > 1:
            # Choose the provided scene
            try:
                image_dim_indices = image_dim_indices[S]
                log.info(
                    f"File contains variable dimensions per scene, selected scene: {S} for data retrieval."
                )
            except IndexError:
                raise exceptions.InconsistentShapeError(
                    f"The CZI image provided has variable dimensions per scene. "
                    f"Please provide a valid index to the 'S' parameter to create a dask array for the index provided. "
                    f"Provided scene index: {S}. Scene index range: 0-{len(image_dim_indices)}."
                )
        else:
            # If the list is length one that means that all the scenes in the image have the same dimensions
            # Just select the first dictionary in the list
            image_dim_indices = image_dim_indices[0]

        # Uppercase dimensions provided to chunk by dims
        chunk_by_dims = [d.upper() for d in chunk_by_dims]

        # Always add Y and X dims to chunk by dims because that is how CZI files work
        if Dimensions.SpatialY not in chunk_by_dims:
            log.info(
                f"Adding the Spatial Y dimension to chunk by dimensions as it was not found."
            )
            chunk_by_dims.append(Dimensions.SpatialY)
        if Dimensions.SpatialX not in chunk_by_dims:
            log.info(
                f"Adding the Spatial X dimension to chunk by dimensions as it was not found."
            )
            chunk_by_dims.append(Dimensions.SpatialX)

        # Setup read dimensions for an example chunk
        first_chunk_read_dims = {}
        for dim, (dim_begin_index, dim_end_index) in image_dim_indices.items():
            # Only add the dimension if the dimension isn't a part of the chunk
            if dim not in chunk_by_dims:
                # Add to read dims
                first_chunk_read_dims[dim] = dim_begin_index

        # Read first chunk for information used by dask.array.from_delayed
        sample, sample_dims = czi.read_image(**first_chunk_read_dims)

        # Get the shape for the chunk and operating shape for the dask array
        # We also collect the chunk and non chunk dimension ordering so that we can swap the dimensions after we
        # block the dask array together.
        sample_chunk_shape = []
        operating_shape = []
        non_chunk_dimension_ordering = []
        chunk_dimension_ordering = []
        for i, dim_info in enumerate(sample_dims):
            # Unpack dim info
            dim, size = dim_info

            # If the dim is part of the specified chunk dims then append it to the sample, and, append the dimension
            # to the chunk dimension ordering
            if dim in chunk_by_dims:
                sample_chunk_shape.append(size)
                chunk_dimension_ordering.append(dim)

            # Otherwise, append the dimension to the non chunk dimension ordering, and, append the true size of the
            # image at that dimension
            else:
                non_chunk_dimension_ordering.append(dim)
                operating_shape.append(image_dim_indices[dim][1] -
                                       image_dim_indices[dim][0])

        # Convert shapes to tuples and combine the non and chunked dimension orders as that is the order the data will
        # actually come out of the read data as
        sample_chunk_shape = tuple(sample_chunk_shape)
        blocked_dimension_order = non_chunk_dimension_ordering + chunk_dimension_ordering

        # Fill out the rest of the operating shape with dimension sizes of 1 to match the length of the sample chunk
        # When dask.block happens it fills the dimensions from inner-most to outer-most with the chunks as long as
        # the dimension is size 1
        # Basically, we are adding empty dimensions to the operating shape that will be filled by the chunks from dask
        operating_shape = tuple(
            operating_shape) + (1, ) * len(sample_chunk_shape)

        # Create empty numpy array with the operating shape so that we can iter through and use the multi_index to
        # create the readers.
        lazy_arrays = np.ndarray(operating_shape, dtype=object)

        # We can enumerate over the multi-indexed array and construct read_dims dictionaries by simply zipping together
        # the ordered dims list and the current multi-index plus the begin index for that plane. We then set the value
        # of the array at the same multi-index to the delayed reader using the constructed read_dims dictionary.
        dims = [d for d in czi.dims]
        begin_indicies = tuple(image_dim_indices[d][0] for d in dims)
        for i, _ in np.ndenumerate(lazy_arrays):
            # Add the czi file begin index for each dimension to the array dimension index
            this_chunk_read_indicies = (
                current_dim_begin_index + curr_dim_index
                for current_dim_begin_index, curr_dim_index in zip(
                    begin_indicies, i))

            # Zip the dims with the read indices
            this_chunk_read_dims = dict(
                zip(blocked_dimension_order, this_chunk_read_indicies))

            # Remove the dimensions that we want to chunk by from the read dims
            for d in chunk_by_dims:
                if d in this_chunk_read_dims:
                    this_chunk_read_dims.pop(d)

            # Add delayed array to lazy arrays at index
            lazy_arrays[i] = da.from_delayed(
                delayed(CziReader._imread)(img, this_chunk_read_dims),
                shape=sample_chunk_shape,
                dtype=sample.dtype,
            )

        # Convert the numpy array of lazy readers into a dask array and fill the inner-most empty dimensions with chunks
        merged = da.block(lazy_arrays.tolist())

        # Because we have set certain dimensions to be chunked and others not
        # we will need to transpose back to original dimension ordering
        # Example being, if the original dimension ordering was "SZYX" and we want to chunk by "S", "Y", and "X"
        # We created an array with dimensions ordering "ZSYX"
        transpose_indices = []
        transpose_required = False
        for i, d in enumerate(czi.dims):
            new_index = blocked_dimension_order.index(d)
            if new_index != i:
                transpose_required = True
                transpose_indices.append(new_index)
            else:
                transpose_indices.append(i)

        # Only run if the transpose is actually required
        # The default case is "Z", "Y", "X", which _usually_ doesn't need to be transposed because that is _usually_
        # The normal dimension order of the CZI file anyway
        if transpose_required:
            merged = da.transpose(merged, tuple(transpose_indices))

        # Because dimensions outside of Y and X can be in any order and present or not
        # we also return the dimension order string.
        return merged, "".join(dims)