def apply_extract_features_cp(well_tile,filepattern): wildcards = {'well':well_tile[0],'tile':well_tile[1]} filepattern.update(wildcards) stacked = read_hdf_image(name(filepattern)) nuclei = read(name(filepattern,subdir='process_ph',tag='nuclei',ext='tif')) cells = read(name(filepattern,subdir='process_ph',tag='cells',ext='tif')) df_result = Snake._extract_phenotype_cp(data_phenotype=stacked, nuclei=nuclei, cells=cells, wildcards=wildcards, nucleus_channels=[0,1,2,3], cell_channels=[0,1,2,3], channel_names=['dapi','tubulin','gh2ax','phalloidin'] ) df_result.to_csv(name(filepattern,subdir='process_ph',tag='cp_phenotype',ext='csv'))
def calculate_illumination_correction(files, smooth=None, rescale=True, threading=False, slicer=slice(None)): """calculate illumination correction field for use with apply_illumination_correction Snake method. Equivalent to CellProfiler's CorrectIlluminationCalculate module with option "Regular", "All", "Median Filter" Note: algorithm originally benchmarked using ~250 images per plate to calculate plate-wise illumination correction functions (Singh et al. J Microscopy, 256(3):231-236, 2014) """ from ops.io import read_stack as read N = len(files) global data data = read(files[0])[slicer]/N def accumulate_image(file): global data data += read(file)[slicer]/N if threading: from joblib import Parallel, delayed Parallel(n_jobs=-1,require='sharedmem')(delayed(accumulate_image)(file) for file in files[1:]) else: for file in files[1:]: accumulate_image(file) data = np.squeeze(data.astype(np.uint16)) if not smooth: # default is 1/20th area of image # smooth = (np.array(data.shape[-2:])/8).mean().astype(int) smooth = int(np.sqrt((data.shape[-1]*data.shape[-2])/(np.pi*20))) selem = skimage.morphology.disk(smooth) median_filter = ops.utils.applyIJ(skimage.filters.median) with warnings.catch_warnings(): warnings.simplefilter("ignore") smoothed = median_filter(data,selem,behavior='rank') if rescale: @ops.utils.applyIJ def rescale_channels(data): # use 2nd percentile for robust minimum robust_min = np.quantile(data.reshape(-1),q=0.02) robust_min = 1 if robust_min == 0 else robust_min data = data/robust_min data[data<1] = 1 return data smoothed = rescale_channels(smoothed) return smoothed
def accumulate_image(file): global data data += read(file)[slicer]/N
.askopenfilename(initialdir=dirname(label_file), title="Select intensity image (.tif)", filetypes=(("image files","*.tif"), ("all files","*.*") ) ) ) root_2.destroy() _,ext_2= splitext(filename_2) if ext_2 not in image_ext: raise Exception('intensity image file must be .tif or .tiff') else: img_file = filename_2 img_labels = read(label_file) df = (feature_table(img_labels, img_labels, features_basic) .assign(label_file=label_file,img_file=img_file) ) else: table_file = filename_1 raise Exception('input table file must be .csv or .hdf') elif len(sys.argv) == 2: filename_1 = sys.argv[1] _,ext_1= splitext(filename_1) if ext_1 not in table_ext: raise Exception('input table file must be .csv or .hdf') else: