def __init__(self, file_name, start_iter=0, stop_iter=None): self.file_name = file_name if '.nd2' in file_name: self.ext = 'nd2' self.reader = ND2Reader(file_name) elif ('.tif' in file_name) or ('.tiff' in file_name): self.ext = 'tif' self.reader = tifffile.TiffFile(file_name) elif ('.czi' in file_name): self.ext = 'czi' self.reader = CziFile(file_name) else: raise RuntimeError("Image format %s " \ "not recognized" % \ os.path.splitext(file_name)[1]) # Record the shape of the movie self.n_frames, self.height, self.width = \ self.get_shape() # Set the defaults for iterating over this # object self.start_iter = start_iter self.stop_iter = stop_iter if self.stop_iter is None: self.stop_iter = self.n_frames
def open(self, location, **kwargs): with warnings.catch_warnings(): warnings.simplefilter('ignore') self.czi = CziFile(location.path) self.frames = { _get_subblock_identifier(subblock): subblock for subblock in self.czi.filtered_subblock_directory } self.size = _dim_shape_to_dict(self.czi.axes, self.czi.shape) self.metadata = self.czi.metadata(raw=False) scaling = self.metadata['ImageDocument']['Metadata']['Scaling'][ 'Items']['Distance'] # /ImageDocument calibration_x = float( next(iter([scale for scale in scaling if scale['Id'] == 'X' ]))['Value']) * 1E6 calibration_y = float( next(iter([scale for scale in scaling if scale['Id'] == 'Y' ]))['Value']) * 1E6 assert calibration_x == calibration_y self.calibration = calibration_x timestamps = None for entry in self.czi.attachment_directory: if entry.name == 'TimeStamps': timestamps = entry.data_segment().data() break self.timestamps = timestamps positions = [] for scene in sorted( self.metadata['ImageDocument']['Metadata']['Information'] ['Image']['Dimensions']['S']['Scenes']['Scene'], key=lambda scene_: int(scene_['Index'])): x, y = scene['CenterPosition'].split(',') positions.append((float(x), float(y))) self.positions = positions self.set_dimensions_and_sizes([ Dimensions.Time, Dimensions.PositionXY, Dimensions.PositionZ, Dimensions.Channel ], [ self.size.get('T', 1), self.size.get('S', 1), 1, self.size.get('C', 1) ])
def getMetadata(self): ''' Open czi xml header convert to dictionary extract needed information and convert to correct units return new dictionary ''' with CziFile(self.directory) as c: cziMetadata = c.metadata() cziMetadata = parse(cziMetadata) #X, Y, & Z pixel size converted from meters to microns x_pixel_size = (10**6)*float(cziMetadata["ImageDocument"]["Metadata"]["Scaling"]["Items"]["Distance"][0]["Value"]) y_pixel_size = (10**6)*float(cziMetadata["ImageDocument"]["Metadata"]["Scaling"]["Items"]["Distance"][1]["Value"]) z_pixel_size = (10**6)*float(cziMetadata["ImageDocument"]["Metadata"]["Scaling"]["Items"]["Distance"][2]["Value"]) voxel_size = (x_pixel_size * y_pixel_size * z_pixel_size) self.metadata = {'x_pixel_size' : x_pixel_size, 'y_pixel_size' : y_pixel_size, 'z_pixel_size' : z_pixel_size, 'voxel_size' : voxel_size} return self.metadata
def load_zeiss(path): _, img_name = os.path.split(path) with CziFile(path) as czi: xmltxt = czi.metadata() meta = xml.etree.ElementTree.fromstring(xmltxt) # next line is somewhat cryptic, but just extracts um/pix (calibration) of X and Y into res res = [float(i[0].text) for i in meta.findall('.//Scaling/Items/*') if i.attrib['Id'] == 'X' or i.attrib['Id'] == 'Y'] assert np.isclose(res[0], res[1]), "Pixels are not square." # get first calibration value and convert it from meters to um res = res[0] * 1e6 ts_ix = [k for k, a1 in enumerate(czi.attachment_directory) if a1.filename[:10] == 'TimeStamps'][0] timestamps = list(czi.attachments())[ts_ix].data() dt = np.median(np.diff(timestamps)) ax_dct = {n: k for k, n in enumerate(czi.axes)} n_frames = czi.shape[ax_dct['T']] n_channels = czi.shape[ax_dct['C']] n_X = czi.shape[ax_dct['X']] n_Y = czi.shape[ax_dct['Y']] images = list() for sb in czi.subblock_directory: images.append(sb.data_segment().data().reshape((n_X, n_Y))) return MetadataImage(image=np.array(images), pix_per_um=1. / res, um_per_pix=res, time_interval=dt, frames=n_frames, channels=n_channels, width=n_X, height=n_Y, series=None)
def openFile(img_f): from czifile import CziFile with CziFile(img_f) as czi: image_arrays = czi.asarray() print 'Shape:', image_arrays.shape # Returns several dimensions: # ?, colors, ?, Z, X, Y, ? return image_arrays
def read_czifile(czi_file, squeeze=True): """ Reads czifile """ from czifile import CziFile with CziFile(czi_file) as czi: image_arrays = czi.asarray() if squeeze: image_arrays = np.squeeze(image_arrays) return image_arrays
def get_czi_metadata(p_czi, p_out=None, stdout=False): """ Convenience function to strip the metadata out of a CZI image and return. \ Requires CZI path. If `p_out` is provided, log the output to file. If \ `stdout` is True, print the metadata to standard output. """ with CziFile(p_czi) as czi: meta = czi.metadata() if p_out: with open(p_out, 'w') as outfile: outfile.writelines(meta) if stdout: print(meta) return meta
def read_czi_image(czi_file, channel_names=None): from czifile import CziFile czi_img = CziFile(czi_file) czi_channels = np.transpose(czi_img.asarray()[0, :, 0, :, :, :, 0], (0, 2, 3, 1)) voxelsize = {} for s in czi_img.segments(): if s.SID == "ZISRAWMETADATA": metadata = s.data().split('\n') for i, row in enumerate(metadata): if "Distance Id" in row: s = metadata[i + 1] voxelsize[row.split('"')[1]] = np.around( float(s[s.find('>') + 1:s.find('>') + s[s.find('>'):].find('<')]) * 1e6, decimals=3) voxelsize = tuple([voxelsize[dim] for dim in ['X', 'Y', 'Z']]) n_channels = czi_channels.shape[0] print czi_file.split('/')[-1], " : ", n_channels, " Channels ", voxelsize if n_channels > 1: if channel_names is None: channel_names = ["CH" + str(i) for i in range(n_channels)] img = {} for i_channel, channel_name in enumerate(channel_names): img[channel_name] = SpatialImage(czi_channels[i_channel], voxelsize=voxelsize) else: img = SpatialImage(czi_channels[0], voxelsize=voxelsize) return img
def test_czifile(): """Test JpegXR compressed CZI file.""" from czifile import CziFile fname = datafiles('jpegxr.czi') if not os.path.exists(fname): pytest.skip('large file not included with source distribution') with CziFile(fname) as czi: assert czi.shape == (1, 1, 15, 404, 356, 1) assert czi.axes == 'BCZYX0' # verify data data = czi.asarray() assert data.flags['C_CONTIGUOUS'] assert data.shape == (1, 1, 15, 404, 356, 1) assert data.dtype == 'uint16' assert data[0, 0, 14, 256, 146, 0] == 38086
def __init__(self,fpath,summary=True): ''' Read in file using czifile Parameters ---------- fpath : str Complete or relative file path to czi file ''' with CziFile(fpath) as czi: self.raw_im = czi.asarray() if summary: self.print_summary() self.squeeze_data()
def get_im_info(image_filepath): """ Use CziFile and tifffile to get image dimension and other information. Parameters ---------- image_filepath: str filepath to the image file Returns ------- im_dims: np.ndarray image dimensions in np.ndarray im_dtype: np.dtype data type of the image reader:str whether to use "czifile" or "tifffile" to read the image """ if Path(image_filepath).suffix.lower() == ".czi": czi = CziFile(image_filepath) ch_dim_idx = czi.axes.index('C') y_dim_idx = czi.axes.index('Y') x_dim_idx = czi.axes.index('X') im_dims = np.array(czi.shape)[[ch_dim_idx, y_dim_idx, x_dim_idx]] im_dtype = czi.dtype reader = "czi" elif Path(image_filepath).suffix.lower() in TIFFFILE_EXTS: largest_series = tf_get_largest_series(image_filepath) zarr_im = zarr.open( imread(image_filepath, aszarr=True, series=largest_series)) zarr_im = zarr_get_base_pyr_layer(zarr_im) im_dims = np.squeeze(zarr_im.shape) if len(im_dims) == 2: im_dims = np.concatenate([[1], im_dims]) im_dtype = zarr_im.dtype reader = "tifffile" else: im_dims, im_dtype = get_sitk_image_info(image_filepath) reader = "sitk" return im_dims, im_dtype, reader
def load_zeiss(path): _, img_name = os.path.split(path) with CziFile(path) as czi: xmltxt = czi.metadata() meta = xml.etree.ElementTree.fromstring(xmltxt) # next line is somewhat cryptic, but just extracts um/pix (calibration) of X and Y into res res = [ float(i[0].text) for i in meta.findall('.//Scaling/Items/*') if i.attrib['Id'] == 'X' or i.attrib['Id'] == 'Y' ] assert np.isclose(res[0], res[1]), "Pixels are not square." # get first calibration value and convert it from meters to um res = res[0] * 1e6 ts_ix = [ k for k, a1 in enumerate(czi.attachment_directory) if a1.filename[:10] == 'TimeStamps' ][0] timestamps = list(czi.attachments())[ts_ix].data() dt = np.median(np.diff(timestamps)) ax_dct = {n: k for k, n in enumerate(czi.axes)} n_frames = czi.shape[ax_dct['T']] n_channels = czi.shape[ax_dct['C']] n_zstacks = czi.shape[ax_dct['Z']] n_x = czi.shape[ax_dct['X']] n_y = czi.shape[ax_dct['Y']] images = list() for sb in czi.subblock_directory: images.append(sb.data_segment().data().reshape((n_x, n_y))) logger.info( f"Loaded {czi._fh.name}. WxH({n_x},{n_y}), channels: {n_channels}, frames: {n_frames}, stacks: {n_zstacks}" ) return np.array( images), 1 / res, dt, n_frames, n_channels # , n_zstacks
class CZI: def __init__(self, path): self.filename = path self.czi = CziFile(path) def getPixelArray(self): im = self.czi.asarray() print('Image Width: {}'.format(im.shape[-1])) print('Image Height: {}'.format(im.shape[-2])) return im[0, 0, 0, ] def cvtStandardImgFormat(self, savePath, fmt, compression=False): im_arr = self.getPixelArray() if fmt.endswith('jpg') and im_arr[0] > 65535 and im_arr[1] > 65535: print('Too Large size for JPEG-2000 format...') return elif fmt.endswith('png') and im_arr[0] > 10000 and im_arr[1] > 10000: print('Too Large size for PNG format...') return fname = os.path.basename(self.filename) sname = os.path.splitext(fname)[-1] im = Image.new_from_array(im_arr) if fmt.endswith('tiff'): if compression: im.write_to_file(savePath + '/' + sname + '.' + fmt, compression='lzw') else: im.write_to_file(savePath + '/' + sname + '.' + fmt, compression='lzw') else: im.write_to_file(savePath + '/' + sname + '.' + fmt)
def read_z_slices_per_file(self, file_name: Path) -> int: with CziFile(file_name) as czi_file: return CziImageReader.find_count_of_axis(czi_file, "Z")
def read_sample_count(self, file_name: Path) -> int: with CziFile(file_name) as czi_file: return CziImageReader.find_count_of_axis(czi_file, "0")
def read_dimensions(self, file_name: Path) -> Tuple[int, int]: with CziFile(file_name) as czi_file: return ( CziImageReader.find_count_of_axis(czi_file, "X"), CziImageReader.find_count_of_axis(czi_file, "Y"), )
def read_array( self, file_name: Path, dtype: Union[type, np.dtype], z_slice: int, channel_index: Optional[int], sample_index: Optional[int], ) -> np.ndarray: with CziFile(file_name) as czi_file: # Read metadata tile_shape = czi_file.filtered_subblock_directory[ # pylint: disable=unsubscriptable-object 0 ].shape dataset_shape: Tuple[int, ...] = tuple(czi_file.shape) count = 0 used_axes = {"Z", "C", "Y", "X", "0"} axes: Dict[str, Tuple[bool, int, int]] = {} for i, axis in enumerate(czi_file.axes): axes[axis] = (True, i, i - count) if axis not in used_axes: count += 1 for axis in used_axes: if axis not in axes: axes[axis] = (False, 0, 0) assert tile_shape is not None, "Cannot read tile shape format." if sample_index is not None: num_output_channel = 1 elif channel_index is not None: num_output_channel = dataset_shape[axes["0"][1]] else: channel_count = dataset_shape[axes["C"][1]] if axes["C"][0] else 1 num_output_channel = channel_count * dataset_shape[axes["0"][1]] output_shape = ( dataset_shape[axes["X"][1]], dataset_shape[axes["Y"][1]], num_output_channel, ) output = np.empty(output_shape, dtype) z_file_start = ( czi_file.start[axes["Z"][1]] # pylint: disable=unsubscriptable-object if axes["Z"][0] else 0 ) c_file_start = ( czi_file.start[axes["C"][1]] # pylint: disable=unsubscriptable-object if axes["C"][0] else 0 ) output_channel_offset = 0 # Read the data for tile_channel_index, ( tile_z_index, data_z_index, data_channel_index, data_sample_index, ) in CziImageReader._select_correct_tiles_for_czi( channel_index, sample_index, z_slice, axes, tile_shape, dataset_shape ).items(): # since the czi tiles are not sorted, we search linearly through them and check if the tile matches with the wanted coordinates # however, some axes might not exist, so the _matches_if_exist helper returns true if the axis does not exist for ( entry ) in ( czi_file.filtered_subblock_directory # pylint: disable=not-an-iterable ): if CziImageReader._matches_if_exist( "Z", entry.start[axes["Z"][1]] - z_file_start, tile_z_index, axes, ) and CziImageReader._matches_if_exist( "C", (entry.start[axes["C"][1]] - c_file_start) // tile_shape[axes["C"][1]], tile_channel_index, axes, ): data = entry.data_segment().data() data = to_target_datatype(data, dtype) # left axis are in order [(Z)(C)0XY] for axis in czi_file.axes: # pylint: disable=not-an-iterable if axis not in used_axes: data = data.take([0], axes[axis][2]) if data.ndim == 5: data = data.transpose( ( axes["Z"][2], axes["X"][2], axes["Y"][2], axes["0"][2], axes["C"][2], ) ) elif data.ndim == 4: if axes["Z"][0]: data = data.transpose( ( axes["Z"][2], axes["X"][2], axes["Y"][2], axes["0"][2], ) ) else: data = data.transpose( ( axes["X"][2], axes["Y"][2], axes["0"][2], axes["C"][2], ) ) elif data.ndim == 3: data = data.transpose( ( axes["X"][2], axes["Y"][2], axes["0"][2], ) ) else: raise Exception if data_z_index is not None: data = data.take([data_z_index], 0) # if z axis exist, data_z_index is set, so the access index for this is always 0 if data_channel_index is not None and axes["C"][0]: data = data.take([data_channel_index], -1) # if the sample index is set, the channel index is set, too, so access index is always 0 if data_sample_index is not None: data = data.take([data_sample_index], -1) # special case with C and 0 and no selected index set if data.ndim == 4: for c in range(tile_shape[axes["C"][1]]): next_output_channel_offset = ( output_channel_offset + data.shape[2] ) output[ :, :, output_channel_offset:next_output_channel_offset, ] = data[:, :, :, c] output_channel_offset = next_output_channel_offset elif data.ndim == 2: output[:, :, output_channel_offset] = data output_channel_offset += 1 else: output_channel_increment = ( 1 if data_sample_index is not None else data.shape[-1] ) next_output_channel_offset = ( output_channel_offset + output_channel_increment ) output[ :, :, output_channel_offset:next_output_channel_offset ] = data output_channel_offset = next_output_channel_offset # CZI stores pixels as BGR instead of RGB, so swap axes to ensure right color output if ( num_output_channel == 3 and dtype == np.uint8 and czi_file.filtered_subblock_directory[ # pylint: disable=unsubscriptable-object 0 ].pixel_type == "Bgr24" ): output[:, :, 2], output[:, :, 0] = output[:, :, 0], output[:, :, 2] output = output.reshape(output.shape + (1,)) return output
class ImageFileReader(object): def __init__(self, file_name, start_iter=0, stop_iter=None): self.file_name = file_name if '.nd2' in file_name: self.ext = 'nd2' self.reader = ND2Reader(file_name) elif ('.tif' in file_name) or ('.tiff' in file_name): self.ext = 'tif' self.reader = tifffile.TiffFile(file_name) elif ('.czi' in file_name): self.ext = 'czi' self.reader = CziFile(file_name) else: raise RuntimeError("Image format %s " \ "not recognized" % \ os.path.splitext(file_name)[1]) # Record the shape of the movie self.n_frames, self.height, self.width = \ self.get_shape() # Set the defaults for iterating over this # object self.start_iter = start_iter self.stop_iter = stop_iter if self.stop_iter is None: self.stop_iter = self.n_frames def __iter__(self): self.c_idx = self.start_iter return self def __next__(self): if self.c_idx < self.stop_iter: self.c_idx += 1 return self.get_frame(self.c_idx-1) else: raise StopIteration @property def shape(self): return self.get_shape() @property def dtype(self): return self.get_frame(0).dtype def _frame_valid(self, frame_idx): return (frame_idx < self.n_frames) def _frame_range_valid(self, frame_0, frame_1): return (frame_0 <= self.n_frames) and \ (frame_1 <= self.n_frames) and \ (frame_0 < frame_1) def _process_frame_range(self, start_frame, stop_frame): if start_frame is None: start_frame = 0 if stop_frame is None: stop_frame = self.n_frames assert self._frame_range_valid(start_frame, stop_frame) return start_frame, stop_frame def _subregion_valid(self, y0, y1, x0, x1): return (y0<=self.height) and (y1<=self.height) \ and (x0<=self.width) and (x1<=self.width) \ and (y0 < y1) and (x0 < x1) def _process_subregion(self, y0, y1, x0, x1): if y0 is None: y0 = 0 if x0 is None: x0 = 0 if y1 is None: y1 = self.height if x1 is None: x1 = self.width assert self._subregion_valid(y0, y1, x0, x1) return y0, y1, x0, x1 def close(self): self.reader.close() def get_shape(self): """ returns ------- (int n_frames, int height, int width) """ if self.ext == 'nd2': H = self.reader.metadata['height'] W = self.reader.metadata['width'] T = self.reader.metadata['total_images_per_channel'] elif self.ext == 'tif' or self.ext == 'czi': H, W = self.reader.pages[0].shape T = len(self.reader.pages) return (T, H, W) def get_frame(self, frame_idx): """ args ---- frame_idx : int returns ------- 2D ndarray (YX), dtype uint16 """ assert self._frame_valid(frame_idx) if self.ext == 'nd2': return self.reader.get_frame_2D(t=frame_idx) elif self.ext == 'tif' or self.ext == 'czi': return self.reader.pages[frame_idx].asarray() def get_frames(self, frame_indices): """ args ---- frame_indices : list of int returns ------- 3D ndarray (TYX), dtype uint16 """ n = len(frame_indices) result = np.empty((n, self.height, self.width), dtype='uint16') for i, idx in enumerate(frame_indices): result[i,:,:] = self.get_frame(idx) return result def get_frame_range(self, start_frame, stop_frame): """ args ---- start_frame, stop_frame : int, frame indices returns ------- 3D ndarray (TYX), dtype uint16 """ assert self._frame_range_valid(start_frame, stop_frame) n = stop_frame-start_frame result = np.empty((n, self.height, self.width), dtype='uint16') for j, idx in enumerate(range(start_frame, stop_frame)): result[j,:,:] = self.get_frame(idx) return result def sum_proj(self, start_frame=None, stop_frame=None): """ Return the sum intensity projection for the full movie. args ---- start_frame, stop_frame : int, limits for the frames to use, if desired returns ------- 2D ndarray (YX), dtype uint16 """ start_frame, stop_frame = self._process_frame_range( start_frame, stop_frame) result = np.zeros((self.height, self.width), dtype='float64') for frame_idx in range(start_frame, stop_frame): result = result + self.get_frame(frame_idx) return result def max_int_proj(self, start_frame=None, stop_frame=None): """ Return the maximum intensity projection for the full movie. args ---- start_frame, stop_frame : int, limits for the frames to use, if desired returns ------- 2D ndarray (YX), dtype uint16 """ start_frame, stop_frame = self._process_frame_range( start_frame, stop_frame) result = np.zeros((self.height, self.width), dtype='float64') for frame_idx in range(start_frame, stop_frame): result = np.maximum(result, self.get_frame(frame_idx)) return result def min_max(self, start_frame=None, stop_frame=None): """ args ---- start_frame, stop_frame : int returns ------- (int, int), the minimum and maximum pixel intensities in the stack """ start_frame, stop_frame = self._process_frame_range( start_frame, stop_frame) cmax, cmin = 0, 0 for frame_idx in range(start_frame, stop_frame): img = self.get_frame(frame_idx) cmax = max([img.max(), cmax]) cmin = min([img.min(), cmin]) return cmin, cmax def frame_subregion(self, frame_idx, y0=None, y1=None, x0=None, x1=None): """ Return a subregion of a single frame. args ---- frame_idx : int, the frame index y0, y1 : int, y limits of rectangular subregion x0, x1 : int, x limits of rectangular subregion returns ------- 2D ndarray (YX), dtype uint16, the desired subregion """ y0, y1, x0, x1 = self._process_subregion(y0, y1, x0, x1) return self.get_frame(frame_idx)[y0:y1,x0:x1] def subregion(self, y0=None, y1=None, x0=None, x1=None, start_frame=None, stop_frame=None): """ Return a subregion of the movie. args ---- y0, y1 : int, the y limits of the rectangular subregion x0, x1 : int, the x limits of the rectangular subregion start_frame, stop_frame : int, the temporal limits on the subregion returns ------- 3D ndarray (TYX), dtype uint16 """ y0, y1, x0, x1 = self._process_subregion(y0, y1, x0, x1) start_frame, stop_frame = self._process_frame_range( start_frame, stop_frame) T = stop_frame - start_frame N = y1 - y0 M = x1 - x0 result = np.empty((T, N, M), dtype='uint16') for j, frame_idx in enumerate(range(start_frame, stop_frame)): result[j,:,:] = self.get_frame(frame_idx)[y0:y1,x0:x1] return result def imsave(self, out_tif, **subregion_kwargs): """ Save a portion of the image to a TIF file. args ---- out_tif : str subregion_kwargs : to self.subregion(), which rectangular subregion to save returns ------- None """ tifffile.imsave( out_tif, self.subregion(**subregion_kwargs) )
def __init__(self, path): self.filename = path self.czi = CziFile(path)
def open_image(img_path, meta_path=None, z_first=False): """Open a multichannel 3D microscopy image. Open a multichannel 3D microscopy image and return the image as a 4-D numpy array. Accepted filetypes are CZI image (`.czi`) or OME-TIFF image (`.ome.tiff` or `.ome.tif`). Also return the metadata as an XML ElementTree object, either scraped from the CZI image or grabbed from the XML file provided at `meta_path`. By default, the image is returned with the dimensions [c, x, y, z]. For CZI images, the `czifile` package is required. For OME-TIFF images, the `bioformats` and `javabridge` packages are required, as well as Java. Parameters: img_path: str Path to the microscopy image. CZI and OME-TIFF files are accepted. Optional: meta_path: str (default None) Path to the metadata XML file. If a `img_path` points to a CZI file, this argument is not required, but if provided, it will be used over the CZI metadata. If `img_path` points to an OME-TIFF file, `meta_path` will be used to read the image's metadata. If `meta_path` is not provided, search the image directory for an XML file with the same basename as `img_path`. z_first: bool (default False) If True, return `img` with the dimensions [c, z, x, y]. """ # determine image filetype and open if img_path.lower().endswith('.czi'): img_name = os.path.splitext(os.path.basename(img_path))[0] # open CZI format using `czifile` library from czifile import CziFile with CziFile(img_path) as czi_file: img_czi = czi_file.asarray() if meta_path is not None: metatree = ElementTree.parse(meta_path) else: meta = czi_file.metadata() metatree = ElementTree.fromstring(meta) if z_first: img = img_czi[0, 0, :, 0, :, :, :, 0] # c, z, x, y else: img = img_czi[0, 0, :, 0, :, :, :, 0].transpose(0, 2, 3, 1) # c, x, y, z elif any( [img_path.lower().endswith(ext) for ext in ['.ome.tiff', '.ome.tif']]): img_name = os.path.splitext( os.path.splitext(os.path.basename(img_path))[0])[0] # open OME-TIFF format using `bioformats` library (requires Java) import javabridge import bioformats javabridge.start_vm(class_path=bioformats.JARS) # iterate over z-stack until end of file slices = [] for z in count(): try: s = bioformats.load_image(img_path, z=z) slices.append(s) except javabridge.jutil.JavaException: # final z-slice was read, stop reading javabridge.kill_vm() break img_ome = np.stack(slices) if z_first: img = img_ome.transpose(3, 0, 1, 2) # c, z, x, y else: img = img_ome.transpose(3, 1, 2, 0) # c, x, y, z # look for metadata .XML file with same filename if meta_path is None: meta_path = os.path.splitext( os.path.splitext(img_path)[0])[0] + '.xml' try: metatree = ElementTree.parse(meta_path) except IOError: # metadata file not found raise IOError('CZI metadata XML not found at expected path "' + meta_path + '" (required for OME-TIFF)') else: raise ValueError( 'Image filetype not recognized. Allowed: .CZI, .OME.TIFF') return img, img_name, metatree
#from scipy.misc import imresize #import matplotlib as plt from matplotlib import pylab as pl import urllib.request import os #fn='/home/pwatkins/Downloads/0.08 lead_continuous 150-50-50-03.czi' #fn = '/data/pwatkins/kara/sample_alignment_data_from_Kara_20180125/2017/training-zebrafish-4-sections/' +\ # 'zebrafish_20171013_10-15-10/003_Region3.czi' url = 'https://keeper.mpdl.mpg.de/f/ec98ecaff5674dfea904/?dl=1' fn = 'face.czi' urllib.request.urlretrieve(url, fn) czi = CziFile(fn) #print(czi.metadata, dir(czi.metadata)) iimg = czi.asarray() print(iimg.shape) os.remove(fn) #shape = np.array(iimg.shape[3:5]) #img = np.zeros(shape//10 + 1,dtype=iimg.dtype) # #for scene in range(iimg.shape[1]): # cimg = np.squeeze(iimg[0,scene,0,:,:,0])[0::10,0::10] # sel = (cimg > 0) # img[sel] = cimg[sel] img = np.squeeze(iimg)
return np.interp(image, (np.min(image), np.max(image)), (vmin, vmax)) # define arguments parser = argparse.ArgumentParser() parser.add_argument('image', type=str, nargs=1, help='Path to .CZI image file.') # parse arguments args = vars(parser.parse_args()) img_path = args['image'][0] # open CZI image with CziFile(img_path) as image_file: img_czi_format = image_file.asarray() # print(image_file.axes) # meta = image_file.metadata() # mtree = ElementTree.fromstring(meta) img = img_czi_format[0, 0, :, 0, 0, :, :, 0] # c, x, y img_dapi = normalize_image(img[3, :, :]) img_fish = normalize_image(img[2, :, :]) img_dcas13 = img[1, :, :] # fig, ax = plt.subplots(1,3) # ax[0].imshow(img_dapi, cmap='binary_r') # ax[0].set_title('DAPI') # ax[1].imshow(img_fish, cmap='binary_r') # ax[1].set_title('FISH')
def read_dtype(self, file_name: Path) -> str: with CziFile(file_name) as czi_file: return czi_file.dtype.name # pylint: disable=no-member
def animate_zstack_from_czi(czi, gain=1., separate=False, **kwargs): """ Animate the z-stack in a given CZI file. Unless separate is True, plot \ all channels as an overlay, with the legend constructed from CZI file \ metadata. If separate is True, plot all channels on separate subplots. Currently only works with Airyscan images. """ def update_frame(f): for k, imxy in enumerate(imgs_xy): imxy.set_data(channels[k][:,:,f]) return imxy # interpret `czi` argument if isinstance(czi, str): # interpret as path to CZI file with CziFile(czi) as czi_file: meta = czi_file.metadata() img = czi_file.asarray() elif isinstance(czi, CziFile): # interpret as CziFile object meta = czi.metadata() img = czi.asarray() else: # unknown argument type raise TypeError('`czi` argument must be of type `str` or `CziFile`.') gain = float(gain) # attempt to get channels and make legend mtree = ElementTree.fromstring(meta) track_tree = mtree.find('Metadata').find('Experiment').find('ExperimentBlocks').find('AcquisitionBlock').find('MultiTrackSetup').findall('TrackSetup') tracks = [] for track in track_tree: name = track.get('Name') wavelen = float(track.find('Attenuators').find('Attenuator').find('Wavelength').text)*1E9 rgb = np.array(wavelength_to_rgb(wavelen))/256. # print rgb tracks.append([name, rgb]) channels = [] for t in range(len(tracks)): channels.append(img[0, 0, t, 0, :, :, :, 0].transpose(1, 2, 0)) # x, y, z # make colormaps cmaps = [] for j, track in enumerate(tracks): cmaps.append(LinearSegmentedColormap.from_list('cmap' + str(j), [list(tracks[j][1]) + [0.], list(tracks[j][1]) + [1.]])) frames = list(range(channels[0].shape[2])) vmins = [np.amin(channels[i]) for i in range(len(channels))] vmaxs = [np.amax(channels[i])/gain for i in range(len(channels))] if not separate: fig, ax = plt.subplots() ax.set_facecolor('k') imgs_xy = [] for i in range(len(channels)): imgs_xy.append(ax.imshow(channels[i][:,:,frames[0]], vmin=vmins[i], vmax=vmaxs[i], cmap=cmaps[i], **kwargs)) # ax.legend([l[0] for l in tracks]) else: fig, ax = plt.subplots(1, len(channels)) imgs_xy = [] for i in range(len(channels)): ax[i].set_facecolor('k') imgs_xy.append(ax[i].imshow(channels[i][:,:,frames[0]], vmin=vmins[i], vmax=vmaxs[i], cmap=cmaps[i], **kwargs)) ax[i].set_title(tracks[i][0]) anim = FuncAnimation(fig, update_frame, frames=frames, interval=200, blit=False) # plt.show() return True
def read_czi(czi_path): with CziFile(czi_path) as czi: image_arrays = czi.asarray() return image_arrays
import sys sys.path.insert(0, '../../czifile/czifile/') from czifile import CziFile import matplotlib.pyplot as plt import numpy as np with CziFile('data/3500000427_100X_20170120_F05_P27.czi') as czi: image_arrays = czi.asarray() print(image_arrays.shape) images = [image_arrays[0, 0, 0, idx] for idx in range(39)] for i in range(3): plt.figure(i + 1) ax = plt.subplot(311) plt.tight_layout() ax.set_title('sample #{:d}'.format(i * 3)) ax.axis('off') plt.imshow(np.squeeze(images[i * 3])) ax = plt.subplot(312) plt.tight_layout() ax.set_title('sample #{:d}'.format(i * 3 + 1)) ax.axis('off') plt.imshow(np.squeeze(images[i * 3 + 1])) ax = plt.subplot(313) plt.tight_layout() ax.set_title('sample #{:d}'.format(i * 3 + 2))
names = [] pathfile = [] for row in czifiles.index: names.append(czifiles.loc[row][:-4]) pathfile.append(os.path.join(targetdir, czifiles.loc[row])) #generate a pandas dataframe listing files and paths xlFrame = pd.DataFrame(pathfile, index=names, columns=['Pathfile']) #save xlsfile xlspath = os.path.join(targetdir, 'SummaryFile.xlsx') xlFrame.to_excel(xlspath) #load images xlFrame['Image'] = '' for row in xlFrame.index: #xlFrame['Image'].loc[row] = tifffile.imread( xlFrame['Pathfile'].loc[row]) with CziFile(xlFrame['Pathfile'].loc[row]) as czi: img = np.squeeze(czi.asarray()) img = np.rollaxis(img, 2, 0) img = np.flipud(img) img = np.rollaxis(img, 0, 3) xlFrame['Image'].loc[row] = img #normal intensity values within image def normalizeImage(img): img = img - np.min(img) return img / np.max(img) #determines the range def setrange(x, xrange, limits):
class CziImageStack(ImageStack): extensions = ('.czi', ) priority = 500 def open(self, location, **kwargs): with warnings.catch_warnings(): warnings.simplefilter('ignore') self.czi = CziFile(location.path) self.frames = { _get_subblock_identifier(subblock): subblock for subblock in self.czi.filtered_subblock_directory } self.size = _dim_shape_to_dict(self.czi.axes, self.czi.shape) self.metadata = self.czi.metadata(raw=False) scaling = self.metadata['ImageDocument']['Metadata']['Scaling'][ 'Items']['Distance'] # /ImageDocument calibration_x = float( next(iter([scale for scale in scaling if scale['Id'] == 'X' ]))['Value']) * 1E6 calibration_y = float( next(iter([scale for scale in scaling if scale['Id'] == 'Y' ]))['Value']) * 1E6 assert calibration_x == calibration_y self.calibration = calibration_x timestamps = None for entry in self.czi.attachment_directory: if entry.name == 'TimeStamps': timestamps = entry.data_segment().data() break self.timestamps = timestamps positions = [] for scene in sorted( self.metadata['ImageDocument']['Metadata']['Information'] ['Image']['Dimensions']['S']['Scenes']['Scene'], key=lambda scene_: int(scene_['Index'])): x, y = scene['CenterPosition'].split(',') positions.append((float(x), float(y))) self.positions = positions self.set_dimensions_and_sizes([ Dimensions.Time, Dimensions.PositionXY, Dimensions.PositionZ, Dimensions.Channel ], [ self.size.get('T', 1), self.size.get('S', 1), 1, self.size.get('C', 1) ]) # noinspection PyProtectedMember def notify_fork(self): self.czi._fh.close() self.czi._fh.open() def get_data(self, what): channel = what.get(Dimensions.Channel, 0) position = what.get(Dimensions.PositionXY, 0) time = what.get(Dimensions.Time, 0) return _get_image_from_subblock(self.frames[_normalize( _only_existing_dim(self.size, dict(C=channel, S=position, T=time)))]) def get_meta(self, what): try: time = float(self.timestamps[what[Dimensions.Time]]) except TypeError: time = what[Dimensions.Time] try: position = ( self.positions[what[Dimensions.PositionXY]][0], self.positions[what[Dimensions.PositionXY]][1], 0.0, ) except KeyError: position = (float('nan'), float('nan'), 0.0) meta = self.__class__.Metadata(time=time, position=position, calibration=self.calibration) return meta