def save_timepoint_as_tiff(dask_array, idx): # write your saving code here writer = OmeTiffWriter( f"//allen/aics/animated-cell/Dan/LLS2/T{idx}.ome.tif", overwrite_file=True) imgdata = dask_array.compute() writer.save(imgdata, dimension_order="CZYX")
def save_segmentation( bw: np.ndarray, contour_flag: bool, output_path: Path, fn: str, suffix: str = "_struct_segmentation", ): """save the segmentation into a tiff file Parameters: ------------ bw: np.ndarray the segmentation to save contour_flag: book whether to also save segmentation contour output_path: Path the path to save fn: str the core file name to use, for example, "img_102", then after a suffix (say "_seg") is added, the file name of the output is "img_101_seg.tiff" suffix: str the suffix to add to the output filename """ with OmeTiffWriter(str(output_path / (fn + suffix + ".tiff"))) as writer: writer.save(bw) if contour_flag: bd = generate_segmentation_contour(bw) out_fn = str(output_path / (fn + suffix + "_contour.tiff")) with OmeTiffWriter(out_fn) as writer: writer.save(bd)
def execute(self, args): if not args.data_type.startswith('.'): args.data_type = '.' + args.data_type filenames = glob(args.raw_path + os.sep + '*' + args.data_type) filenames.sort() existing_files = glob(args.train_path + os.sep + 'img_*.ome.tif') print(len(existing_files)) training_data_count = len(existing_files) // 3 for _, fn in enumerate(filenames): training_data_count += 1 # load raw reader = AICSImage(fn) struct_img = reader.get_image_data("CZYX", S=0, T=0, C=[args.input_channel ]).astype(np.float32) struct_img = input_normalization(img, args) # load seg seg_fn = args.seg_path + os.sep + os.path.basename( fn)[:-1 * len(args.data_type)] + '_struct_segmentation.tiff' reader = AICSImage(seg_fn) seg = reader.get_image_data("ZYX", S=0, T=0, C=0) > 0.01 seg = seg.astype(np.uint8) seg[seg > 0] = 1 # excluding mask cmap = np.ones(seg.shape, dtype=np.float32) mask_fn = args.mask_path + os.sep + os.path.basename( fn)[:-1 * len(args.data_type)] + '_mask.tiff' if os.path.isfile(mask_fn): reader = AICSImage(mask_fn) mask = reader.get_image_data("ZYX", S=0, T=0, C=0) cmap[mask == 0] = 0 with OmeTiffWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '.ome.tif') as writer: writer.save(struct_img) with OmeTiffWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_GT.ome.tif') as writer: writer.save(seg) with OmeTiffWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_CM.ome.tif') as writer: writer.save(cmap)
def save_segmentation(bw, contour_flag, output_path, fn): with OmeTiffWriter(str(output_path / (fn + '_struct_segmentation.tiff'))) as writer: writer.save(bw) if contour_flag: bd = generate_segmentation_contour(bw) with OmeTiffWriter(str(output_path / (fn + '_struct_contour.tiff'))) as writer: writer.save(bd)
def test_overwriteFile(resources_dir): """ Test to check if save() can overwrite a file """ with OmeTiffWriter(resources_dir / filename, overwrite_file=True) as writer: writer.save(image)
def combineFiles(files, out, channel_names=None): finalimage = None for f in files: ai = AICSImage(f) # ai.data is 6d. image = ai.data if image.dtype == numpy.float32: # normalizes data in range 0 - uint16max image = image.clip(min=0.0) image = image / image.max() image = 65535 * image # convert float to uint16 image = image.astype(numpy.uint16) if finalimage is None: finalimage = [image[0][0]] else: finalimage = numpy.append(finalimage, [image[0][0]], axis=1) print(finalimage.shape) finalimage = finalimage.transpose([0, 2, 1, 3, 4]) with OmeTiffWriter(file_path=out, overwrite_file=True) as writer: writer.save( finalimage, channel_names=channel_names, pixels_physical_size=[0.108, 0.108, 0.290], )
def create_multichtiff_data(path_root: Path, dims_zyx: Sequence[int], n_ch_in: int, n_ch_out: int, n_items: int) -> Path: assert len(dims_zyx) == 3 path_data = create_data_dir(path_root) records = [] for idx in range(n_items): path_x = path_data / f"{idx:02}.tif" data_x = np.random.randint(128, size=[n_ch_in + n_ch_out] + list(dims_zyx), dtype=np.uint8) with OmeTiffWriter(path_x) as writer: writer.save(data_x, dimension_order="CZYX") # should be a numpy array records.append({ "dummy_id": idx, "path_tiff": path_x, "channel_signal": list(np.arange(0, n_ch_in)), "channel_target": list(np.arange(0, n_ch_out) + n_ch_in), }) path_csv = path_root / "dummy.csv" pd.DataFrame(records).set_index("dummy_id").to_csv(path_csv) return path_csv
def test_dontOverwriteFile(resources_dir): """ Test to check if save() will raise error when user does not want to overwrite a file that exists """ with pytest.raises(Exception): with OmeTiffWriter(resources_dir / filename) as writer: writer.save(image)
def convert_combined(): inroot = "\\\\allen\\aics\\modeling\\cheko\\for_others\\2018-02-14_dan_vday_mitosis\\timelapse_wt2_s2\\" for j in range(0, 20): finalimage = None for i in range(0, len(INFILES)): infilepath = inroot + str(j).zfill(2) + "\\" + INFILES[i] image = TiffReader(infilepath).data image = image.transpose([1, 0, 2, 3]) # normalizes data in range 0 - uint16max image = image.clip(min=0.0) image = image / image.max() image = 65535 * image # convert float to uint16 image = image.astype(numpy.uint16) # axis=2 is the C axis if finalimage is None: finalimage = [image] else: finalimage = numpy.append(finalimage, [image], axis=2) with OmeTiffWriter( file_path=OUTROOT + "combined_frame_" + str(j).zfill(2) + ".ome.tiff", overwrite_file=True, ) as writer: writer.save( finalimage, channel_names=CHNAMES, pixels_physical_size=[0.290, 0.290, 0.290], )
def test_loadAssertionError(resources_dir): """ Test to check if save() will only accept 3, 4, 5 dimensions for data """ image_to_save = np.ones((1, 2, 3, 4, 5, 6)) with pytest.raises(Exception): with OmeTiffWriter(resources_dir / filename, overwrite_file=True) as writer: writer.save(image_to_save)
def output_hook(im, names, out_flag, output_path, fn): assert len(im) == len(names) and len(names) == len(out_flag) for i in range(len(out_flag)): if out_flag[i]: if names[i].startswith('bw_'): segmentation_type = names[i] bw = im[i].astype(np.uint8) bw[bw > 0] = 255 with OmeTiffWriter( str(output_path / (fn + '_bw_' + segmentation_type[3:] + '.tiff'))) as writer: writer.save(bw) else: with OmeTiffWriter( str(output_path / (fn + '_' + names[i] + '.tiff'))) as writer: writer.save(im[i])
def output_hook(im, names, out_flag, output_path, fn): """ general hook for cutomized output """ assert len(im) == len(names) and len(names) == len(out_flag) for i in range(len(out_flag)): if out_flag[i]: if names[i].startswith("bw_"): segmentation_type = names[i] bw = im[i].astype(np.uint8) bw[bw > 0] = 255 with OmeTiffWriter( str(output_path / (fn + "_bw_" + segmentation_type[3:] + ".tiff"))) as writer: writer.save(bw) else: with OmeTiffWriter( str(output_path / (fn + "_" + names[i] + ".tiff"))) as writer: writer.save(im[i])
def test_writerShapeComparison(resources_dir): """ Test to check that OmeTiffWriter saves arrays that are reflexive with OmeTiffReader """ with OmeTiffWriter(resources_dir / filename, overwrite_file=True) as writer: writer.save(image) output = OmeTiffReader(resources_dir / filename).data assert output.shape == image.shape[1:]
def test_noopOverwriteFile(resources_dir): """ Test to check if save() silently no-ops when user does not want to overwrite exiting file """ with open(resources_dir / filename, "w") as f: f.write("test") with OmeTiffWriter(resources_dir / filename, overwrite_file=False) as writer: writer.save(image) with open(resources_dir / filename, "r") as f: line = f.readline().strip() assert "test" == line
def convert_tiff_to_ome_tiff_1ch(filepathin, filepathout): image = TiffReader(filepathin).data image = image.transpose([1, 0, 2, 3]) # normalizes data in range 0 - uint16max image = image.clip(min=0.0) image = image / image.max() image = 65535 * image # convert float to uint16 image = image.astype(numpy.uint16) with OmeTiffWriter(file_path=filepathout, overwrite_file=True) as writer: writer.save(image, channel_names=["dna"], pixels_physical_size=[0.290, 0.290, 0.290])
def test_dimensionOrder(resources_dir, dims, expected_t, expected_c, expected_z, expected_y, expected_x): with OmeTiffWriter(resources_dir / filename, overwrite_file=True) as writer: writer.save(image, dimension_order=dims) reader = OmeTiffReader(resources_dir / filename) output = reader.data t = reader.size_t() c = reader.size_c() z = reader.size_z() y = reader.size_y() x = reader.size_x() os.remove(resources_dir / filename) assert output.shape == image.shape[1:] assert x == expected_x assert y == expected_y assert z == expected_z assert c == expected_c assert t == expected_t
def _generate_single_cell_images( row_index: int, row: pd.Series, cell_ceiling_adjustment: int, bounding_box: np.ndarray, projection_method: str, cell_images_3d_dir: Path, cell_images_2d_all_proj_dir: Path, cell_images_2d_yx_proj_dir: Path, overwrite: bool, ) -> Union[CellImagesResult, CellImagesError]: # Don't use dask for image reading aicsimageio.use_dask(False) # Get the ultimate end save paths for this cell cell_image_3d_save_path = cell_images_3d_dir / f"{row.CellId}.ome.tiff" cell_image_2d_all_proj_save_path = (cell_images_2d_all_proj_dir / f"{row.CellId}.png") cell_image_2d_yx_proj_save_path = (cell_images_2d_yx_proj_dir / f"{row.CellId}.png") # Check skip if (not overwrite # Only skip if all images exist for this cell and all(p.is_file() for p in [ cell_image_3d_save_path, cell_image_2d_all_proj_save_path, cell_image_2d_yx_proj_save_path, ])): log.info( f"Skipping single cell image generation for CellId: {row.CellId}" ) return CellImagesResult( row.CellId, cell_image_3d_save_path, cell_image_2d_all_proj_save_path, cell_image_2d_yx_proj_save_path, ) # Overwrite or didn't exist log.info( f"Beginning single cell image generation for CellId: {row.CellId}") # Wrap errors for debugging later try: # Initialize image object with standardized FOV standardized_image = AICSImage(row.StandardizedFOVPath) channels = standardized_image.get_channel_names() # Preload image data standardized_image.data # Select and adjust cell shape ceiling for this cell image = image_utils.select_and_adjust_segmentation_ceiling( # Unlike most other operations, we can read in normal "CZYX" dimension # order here as all future operations are expecting it image=standardized_image.get_image_data("CYXZ", S=0, T=0), cell_index=row.CellIndex, cell_ceiling_adjustment=cell_ceiling_adjustment, ) # Perform a rigid registration on the image image, _, _ = proc.cell_rigid_registration( image, # Reorder bounding box as image is currently CYXZ bbox_size=bounding_box[[0, 2, 3, 1]], ) # Reduce size crop_3d = image * 255 crop_3d = crop_3d.astype(np.uint8) # Transpose to CZYX for saving crop_3d = transforms.transpose_to_dims(crop_3d, "CYXZ", "CZYX") # Save to OME-TIFF with OmeTiffWriter(cell_image_3d_save_path, overwrite_file=True) as writer: writer.save( crop_3d, dimension_order="CZYX", channel_names=standardized_image.get_channel_names(), pixels_physical_size=standardized_image. get_physical_pixel_size(), ) # Generate 2d image projections # Crop raw channels using segmentations image = image_utils.crop_raw_channels_with_segmentation( image, channels) # Transpose to CZYX for projections image = transforms.transpose_to_dims(image, "CYXZ", "CZYX") # Select the DNA, Membrane, and Structure channels image = image[[ channels.index(target) for target in [Channels.DNA, Channels.Membrane, Channels.Structure] ]] # Set RGB colors # This will set: # DNA to Blue # Membrane to Red # Structure to Green colors = [[0, 0, 1], [1, 0, 0], [0, 1, 0]] # Get all axes projection image all_proj = proc.imgtoprojection( image, proj_all=True, proj_method=projection_method, local_adjust=False, global_adjust=True, colors=colors, ) # Convert to YXC for PNG writing all_proj = transforms.transpose_to_dims(all_proj, "CYX", "YXC") # Drop size to uint8 all_proj = all_proj.astype(np.uint8) # Save to PNG imwrite(cell_image_2d_all_proj_save_path, all_proj) # Get YX axes projection image yx_proj = proc.imgtoprojection( image, proj_all=False, proj_method=projection_method, local_adjust=False, global_adjust=True, colors=colors, ) # Convert to YXC for PNG writing yx_proj = transforms.transpose_to_dims(yx_proj, "CYX", "YXC") # Drop size to uint8 yx_proj = yx_proj.astype(np.uint8) # Save to PNG imwrite(cell_image_2d_yx_proj_save_path, yx_proj) log.info( f"Completed single cell image generation for CellId: {row.CellId}" ) # Return ready to save image return CellImagesResult( row.CellId, cell_image_3d_save_path, cell_image_2d_all_proj_save_path, cell_image_2d_yx_proj_save_path, ) # Catch and return error except Exception as e: log.info( f"Failed single cell image generation for CellId: {row.CellId}. " "Error: {e}") return CellImagesError(row.CellId, str(e))
img_rna_smooth = image_smoothing_gaussian_3d(imgs_rna[i], sigma=1) s3_param = [[1, 0.75*t_spots[i]], [2, 0.75*t_spots[i]]] print('Identifying spots...') bw = dot_3d_wrapper(img_rna_smooth, s3_param) # watershed minArea = 4 Mask = morphology.remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False) labeled_mask = measure.label(Mask) print('Performing watershed segmentation...') peaks = feature.peak_local_max(imgs_rna[i],labels=labeled_mask, min_distance=2, indices=False) Seed = morphology.binary_dilation(peaks, selem=morphology.ball(1)) Watershed_Map = -1*distance_transform_edt(bw) seg = morphology.watershed(Watershed_Map, measure.label(Seed), mask=Mask, watershed_line=True) seg = morphology.remove_small_objects(seg>0, min_size=minArea, connectivity=1, in_place=False) print('Exporting mask...') outname = img_name + '_mask^' + g + '.tiff' p_out = os.path.join(outdir, 'masks', outname) seg = seg > 0 out=seg.astype(np.uint8) out[out>0]=255 writer = OmeTiffWriter(p_out, overwrite_file=True) writer.save(out) # output animation print('Exporting animation...') anim_name = img_name + '_mask^' + g + '.gif' su.animate_zstacks([imgs_rna[i].transpose(1,2,0), seg.astype(int).transpose(1,2,0)], vmax=[0.2, 1], gif_name=os.path.join(outdir, 'anim', anim_name))
def execute(self, args): global draw_mask # part 1: do sorting df = pd.read_csv(args.csv_name, index_col=False) for index, row in df.iterrows(): if not np.isnan(row['score']) and (row['score']==1 or row['score']==0): continue reader = AICSImage(row['raw']) struct_img = reader.get_image_data("ZYX", S=0, T=0, C=args.input_channel) struct_img[struct_img>5000] = struct_img.min() # adjust contrast raw_img = (struct_img- struct_img.min() + 1e-8)/(struct_img.max() - struct_img.min() + 1e-8) raw_img = 255 * raw_img raw_img = raw_img.astype(np.uint8) seg = np.squeeze(imread(row['seg'])) score = gt_sorting(raw_img, seg) if score == 1: df['score'].iloc[index]=1 need_mask = input('Do you need to add a mask for this image, enter y or n: ') if need_mask == 'y': create_mask(raw_img, seg.astype(np.uint8)) mask_fn = args.mask_path + os.sep + os.path.basename(row['raw'])[:-5] + '_mask.tiff' crop_mask = np.zeros(seg.shape, dtype=np.uint8) for zz in range(crop_mask.shape[0]): crop_mask[zz,:,:] = draw_mask[:crop_mask.shape[1],:crop_mask.shape[2]] crop_mask = crop_mask.astype(np.uint8) crop_mask[crop_mask>0]=255 with OmeTiffWriter(mask_fn) as writer: writer.save(crop_mask) df['mask'].iloc[index]=mask_fn else: df['score'].iloc[index]=0 df.to_csv(args.csv_name, index=False) ######################################### # generate training data: # (we want to do this step after "sorting" # (is mainly because we want to get the sorting # step as smooth as possible, even though # this may waster i/o time on reloading images) # ####################################### print('finish merging, start building the training data ...') existing_files = glob(args.train_path+os.sep+'img_*.ome.tif') print(len(existing_files)) training_data_count = len(existing_files)//3 for index, row in df.iterrows(): if row['score']==1: training_data_count += 1 # load raw image reader = AICSImage(row['raw']) img = reader.get_image_data("CZYX", S=0, T=0, C=[args.input_channel]).astype(np.float32) struct_img = input_normalization(img, args) struct_img= struct_img[0,:,:,:] # load segmentation gt seg = np.squeeze(imread(row['seg'])) > 0.01 seg = seg.astype(np.uint8) seg[seg>0]=1 cmap = np.ones(seg.shape, dtype=np.float32) if os.path.isfile(str(row['mask'])): # load segmentation gt mask = np.squeeze(imread(row['mask'])) cmap[mask>0]=0 with OmeTiffWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '.ome.tif') as writer: writer.save(struct_img) with OmeTiffWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_GT.ome.tif') as writer: writer.save(seg) with OmeTiffWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_CM.ome.tif') as writer: writer.save(cmap) print('training data is ready')
def field_worker( image_path, image_dims="CYX", out_dir=None, contrast_method="simple_quantile", contrast_kwargs=DEFAULT_CONTRAST_KWARGS, channels=DEFAULT_CHANNELS, channel_groups=DEFAULT_CHANNEL_GROUPS, verbose=False, ): r""" Process an entire field -- autocontrast + save, log info to df Args: image_path (str): location of input tiff image image_dims (str): input image dimension ordering, default="CYX" out_dir (str): where to save output images, default=None contrast_method (str): method for autocontrasting, default=="simple_quantile" contrast_kwargs (dict):, default=DEFAULT_CONTRAST_KWARGS channels (dict): {"name":index} map for input tiff, default=DEFAULT_CHANNELS channel_groups (dict): fluor/bf/seg grouping, default=DEFAULT_CHANNEL_GROUPS verbose (bool): print info while processing or not, default=False Returns: field_info_df (pd.DataFrame): info for each field, merged with info for each cell """ # early exit if file does not exist -- return df of just image_path = Path(image_path) if not image_path.is_file(): return pd.DataFrame({"field_image_path": [image_path]}) # set out dir if needed if out_dir is None: out_dir = Path.cwd() else: out_dir = Path(out_dir) out_dir = out_dir.absolute() # set out dir for rescaled images and create if needed output_field_image_dir = out_dir.joinpath("output_field_images") output_field_image_dir.mkdir(exist_ok=True) # contrast stretch the field channels Cmaxs, Cautos = read_and_contrast_image( image_path, contrast_method=contrast_method, contrast_kwargs=contrast_kwargs, channel_groups=channel_groups, image_dims=image_dims, verbose=verbose, ) # save original and rescaled field data rescaled_out_path = output_field_image_dir.joinpath( f"{image_path.stem}_rescaled.ome.tiff") field_info_df = pd.DataFrame({ "2D_fov_tiff_path": [image_path], "rescaled_2D_fov_tiff_path": [rescaled_out_path], }) # Reshape prior to sending to save out_data = np.array(Cautos) out_data = transforms.reshape_data(out_data, "CYX", "TZCYX", S=0) # Write with channel metadata channel_names = [ k for k, v in sorted(channels.items(), key=lambda kv: kv[1]) ] with OmeTiffWriter(rescaled_out_path, overwrite_file=True) as writer: writer.save(out_data, channel_names=channel_names) # extract napari annotation channel and grab unique labels for cells label_image = Cautos[channels["cell"]] labels = np.unique(label_image) cell_labels = np.sort(labels[labels > 0]) assert (cell_labels[0], cell_labels[-1]) == (1, len(cell_labels)) if verbose: print(f"processing {image_path}") print(f"found {len(cell_labels)} segmented cells") # partial function for iterating over all cells in an image with map _cell_worker_partial = partial( cell_worker, Cautos=Cautos, label_channel="cell", field_image_path=image_path, out_dir=out_dir, channels=channels, verbose=verbose, ) # iterate through all cells in an image all_cell_info_df = pd.concat(map(_cell_worker_partial, cell_labels), axis="rows", ignore_index=True) # merge cell-wise data into field-wise data all_cell_info_df["2D_fov_tiff_path"] = image_path field_info_df = field_info_df.merge(all_cell_info_df, how="inner") return field_info_df
def create_random_source_image(): random_array = np.random.rand(*BASE_IMAGE_DIM) # write numpy array to .tiff file with OmeTiffWriter(TEST_IMG_DIR + "random_input.tiff") as writer: writer.save(random_array)
def test_big_tiff(): x = np.zeros((10, 10)) assert OmeTiffWriter._size_of_ndarray(data=x) == 10 * 10 * x.itemsize
def _generate_standardized_fov_array( row_index: int, row: pd.Series, current_pixel_sizes: Optional[Tuple[float]], desired_pixel_sizes: Optional[Tuple[float]], save_dir: Path, overwrite: bool, ) -> Union[StandardizeFOVArrayResult, StandardizeFOVArrayError]: # Don't use dask for image reading aicsimageio.use_dask(False) # Get the ultimate end save path for this cell save_path = save_dir / f"{row.FOVId}.ome.tiff" # Check skip if not overwrite and save_path.is_file(): log.info( f"Skipping standardized FOV generation for FOVId: {row.FOVId}") return StandardizeFOVArrayResult(row.FOVId, save_path) # Overwrite or didn't exist log.info( f"Beginning standardized FOV generation for FOVId: {row.FOVId}") # Wrap errors for debugging later try: # Get normalized image array normalized_img, channels, pixel_sizes = image_utils.get_normed_image_array( raw_image=row.SourceReadPath, nucleus_seg_image=row.NucleusSegmentationReadPath, membrane_seg_image=row.MembraneSegmentationReadPath, dna_channel_index=row.ChannelIndexDNA, membrane_channel_index=row.ChannelIndexMembrane, structure_channel_index=row.ChannelIndexStructure, brightfield_channel_index=row.ChannelIndexBrightfield, nucleus_seg_channel_index=row.ChannelIndexNucleusSegmentation, membrane_seg_channel_index=row. ChannelIndexMembraneSegmentation, current_pixel_sizes=current_pixel_sizes, desired_pixel_sizes=desired_pixel_sizes, ) # Reshape data for serialization reshaped = transforms.transpose_to_dims(normalized_img, "CYXZ", "CZYX") # Save array as OME Tiff with OmeTiffWriter(save_path, overwrite_file=True) as writer: writer.save( data=reshaped, dimension_order="CZYX", channel_names=channels, pixels_physical_size=pixel_sizes, ) log.info( f"Completed standardized FOV generation for FOVId: {row.FOVId}" ) return StandardizeFOVArrayResult(row.FOVId, save_path) # Catch and return error except Exception as e: log.info( f"Failed standardized FOV generation for FOVId: {row.FOVId}. Error: {e}" ) return StandardizeFOVArrayError(row.FOVId, str(e))
def cell_worker( cell_label_value, Cautos=[], label_channel="cell", field_image_path="unnamed_image_field.tiff", out_dir=None, channels=DEFAULT_CHANNELS, verbose=False, ): r""" segment single cells + save, log info to df Args: cell_label_value (int): integer mask value in segmentation for this cell, Cautos (list): auto-contrasted images from single iimage field, default=[], label_channel (str): name of channel to use as the image mask, default=="cell", field_image_path (str): location of input tiff imagee field, default=="unnamed_image_field.tiff", out_dir (str): where to save output images, default==None, channels (dict): {"name":index} map for input tiff, default=DEFAULT_CHANNELS verbose (bool): print info while processing or not, default=False Returns: cell_info_df (pd.DataFrame): info for each cell """ field_image_path = Path(field_image_path) if out_dir is None: out_dir = Path.cwd() else: out_dir = Path(out_dir) out_dir = out_dir.absolute() img_out_dir = out_dir.joinpath("output_single_cell_images") img_out_dir.mkdir(exist_ok=True) # crop to single cell boundaries label_image = Cautos[channels[label_channel]] y, x = np.where(label_image == cell_label_value) crop_slice = np.s_[min(y):max(y) + 1, min(x):max(x) + 1] label_crop = label_image[crop_slice] mask = (label_crop == cell_label_value).astype(np.float64) out_data_sc = np.array([ img_as_ubyte_nowarn(rescale_intensity(c[crop_slice] * mask)) for c in Cautos ]) # Reshape prior to sending to save out_data_sc = transforms.reshape_data(out_data_sc, "CYX", "TZCYX", S=0) # save input field path, cell label value, and output single cell path out_image_path = img_out_dir.joinpath( f"{field_image_path.stem}_rescaled_cell{cell_label_value}.ome.tiff") cell_info_df = pd.DataFrame({ "2D_fov_tiff_path": [field_image_path], "cell_label_value": [cell_label_value], "rescaled_2D_single_cell_tiff_path": [out_image_path], }) # Write tiff with channel metadata channel_names = [ k for k, v in sorted(channels.items(), key=lambda kv: kv[1]) ] with OmeTiffWriter(out_image_path, overwrite_file=True) as writer: writer.save(out_data_sc, channel_names=channel_names) if verbose: print(f"cell_label_value = {cell_label_value}") return cell_info_df