def blend_lightsheets(flds, dst, cores, cleanup=False): """0=L, 1=R """ #make sure l and r are in appropriate positions if np.any(["right_lightsheet" in xx for xx in flds]) and np.any( ["left_lightsheet" in xx for xx in flds]): l = [xx for xx in flds if "left_lightsheet" in xx][0] r = [xx for xx in flds if "right_lightsheet" in xx][0] flds = [l, r] # st = time.time() name = os.path.basename(dst) ch = dst[-2:] sys.stdout.write("\nStarting blending of {}...".format(dst)) sys.stdout.flush() ydim0, xdim0 = sitk.GetArrayFromImage( sitk.ReadImage(listall(flds[0], keyword=".tif")[0])).shape ydim1, xdim1 = sitk.GetArrayFromImage( sitk.ReadImage(listall(flds[1], keyword=".tif")[0])).shape ydim = ydim0 if ydim0 < ydim1 else ydim1 xdim = xdim0 if xdim0 < xdim1 else xdim1 #alpha=np.tile(scipy.stats.logistic.cdf(np.linspace(-250, 250, num=xdim)), (ydim, 1)) fls0 = listall(flds[0], keyword=".tif") fls0.sort() fls1 = listall(flds[1], keyword=".tif") fls1.sort() fls = fls0 if len(fls0) < len(fls1) else fls1 makedir(dst) #makedir(os.path.join(dst, name)) iterlst = [{ "xdim": xdim, "ydim": ydim, "fl0": fls0[i], "channel": ch, "fl1": fls1[i], "dst": dst, "name": name, "zplane": i } for i, fl0 in enumerate(fls)] if cores >= 2: p = mp.Pool(cores) p.map(blend, iterlst) p.terminate() else: [blend(dct) for dct in iterlst] #if cleanup: [shutil.rmtree(xx) for xx in flds] sys.stdout.write("\n...finished in {} minutes.\n".format(np.round( (time.time() - st) / 60), decimals=2)) sys.stdout.flush() return
def resize(src, resizefactor, cores): """ src = fullsizedata_fld """ #find files fls = listall(src, keyword=".tif") #calc resize y, x = tifffile.imread(fls[0], multifile=False).shape yr = int(y / resizefactor) xr = int(x / resizefactor) #set up dsts [ makedir( os.path.join(os.path.dirname(src), xx[:-4] + "resized_" + xx[-4:])) for xx in os.listdir(src) if ".txt" not in xx ] #parallelize iterlst = [copy.deepcopy({"fl": fl, "xr": xr, "yr": yr}) for fl in fls] p = mp.Pool(cores) p.map(resize_helper, iterlst) p.terminate() return
def multiple_original_directory_structure(src, transfertype="move"): """ src = "/home/wanglab/wang/pisano/tracing_output/retro_4x/20180312_jg_bl6f_prv_17/full_sizedatafld" """ fls = listall(src, keyword="terastitcher_dct") for fl in fls: print(fl) original_directory_structure(fl, transfertype=transfertype) return
def move_images_after_stitching(ts_out, dst, channel): #make sure destination exists makedir(dst) imgs = listall(ts_out, '.tif') imgs.sort() #parellelize iterlst = [(i, img, dst, channel) for i, img in enumerate(imgs)] p = mp.Pool(6) p.starmap(move_images_after_stitching_par, iterlst) return dst
def blend_lightsheets(name, flds, dst, cores): ''' ''' sys.stdout.write('\nStarting blending of {}...'.format(name)); sys.stdout.flush() ydim, xdim =tifffile.imread(listall(flds[0], keyword='.tif')[0]).shape alpha=np.tile(scipy.stats.logistic.cdf(np.linspace(-250, 250, num=xdim)), (ydim, 1)) fls0 = listall(flds[0], keyword='.tif'); fls0.sort() fls1 = listall(flds[1], keyword='.tif'); fls1.sort() assert set([os.path.basename(xx) for xx in fls0]) == set([os.path.basename(xx) for xx in fls1]), 'uneven number of z planes between L and R lightsheets' makedir(os.path.join(dst, name)) iterlst=[{'alpha':alpha, 'fl0':fl0, 'fl1':fls1[i], 'dst':dst, 'name':name, 'zplane':i} for i,fl0 in enumerate(fls0)] if cores>=2: p=mp.Pool(cores) p.map(blend, iterlst) p.terminate() else: [blend(dct) for dct in iterlst] [shutil.rmtree(xx) for xx in flds] sys.stdout.write('completed.\n'.format(name)); sys.stdout.flush() return
print(e) merged = np.stack([ cnn_cellvolloaded, cellvolloaded, np.zeros_like(cellvolloaded) ], -1) merged = np.swapaxes(merged, 0, 2) #reorient to horizontal tifffile.imsave( os.path.join( dst, 'generate_downsized_overlay_{}_points_merged_resampled_for_elastix.tif' .format(os.path.basename(fld))), merged) #EXAMPLE USING LIGHTSHEET - assumes marking centers in the 'raw' full sized cell channel. This will transform those centers into "atlas" space (in this case the moving image) #in this case the "inverse transform has the atlas as the moving image in the first step, and the autofluorescence channel as the moving image in the second step r2s0 = [ xx for xx in listall(cellvol.inverse_elastixfld, 'reg2sig_TransformParameters.0.txt') if 'cellch' in xx ][0] r2s1 = [ xx for xx in listall(cellvol.inverse_elastixfld, 'reg2sig_TransformParameters.1.txt') if 'cellch' in xx ][0] a2r0 = [ xx for xx in listall( cellvol.inverse_elastixfld, 'atlas2reg2sig/atlas2reg_TransformParameters.0.txt') if 'cellch' in xx ][0] a2r1 = [ xx for xx in listall(
def overlay_qc(args): #unpacking this way for multiprocessing fld, folder_suffix, output_folder, verbose, doubletransform, make_volumes = args try: #get 3dunet cell dataframe csv file input_csv = listdirfull(os.path.join(fld, folder_suffix), ".csv") assert len(input_csv) == 1, "multiple csv files" dataframe = pd.read_csv(input_csv[0]) #location to save out dst = os.path.join(output_folder, os.path.basename(fld)); makedir(dst) #EXAMPLE USING LIGHTSHEET - assumes marking centers in the "raw" full sized cell channel. This will transform those #centers into "atlas" space (in this case the moving image) #in this case the "inverse transform has the atlas as the moving image in the first step, #and the autofluorescence channel as the moving image in the second step #NOTE - it seems that the registration of cell to auto is failing on occasion....thus get new files... ################################ cell_inverse_folder = listdirfull(os.path.join(fld, "elastix_inverse_transform"), "cellch")[0] a2r = listall(cell_inverse_folder, "atlas2reg_TransformParameters"); a2r.sort() r2s = listall(cell_inverse_folder, "reg2sig_TransformParameters"); r2s.sort() #possibly remove #IMPORTANT. the idea is to apply cfos->auto->atlas transformfiles = r2s + a2r if doubletransform else a2r #might get rid of r2s lightsheet_parameter_dictionary = os.path.join(fld, "param_dict.p") converted_points = generate_transformed_cellcount(dataframe, dst, transformfiles, lightsheet_parameter_dictionary, verbose=verbose) #load and convert to single voxel loc zyx = np.asarray([str((int(xx[0]), int(xx[1]), int(xx[2]))) for xx in np.nan_to_num(np.load(converted_points))]) from collections import Counter zyx_cnt = Counter(zyx) #check... if make_volumes: #manually call transformix kwargs = load_dictionary(lightsheet_parameter_dictionary) vol = [xx for xx in kwargs["volumes"] if xx.ch_type == "cellch"][0].resampled_for_elastix_vol transformed_vol = os.path.join(dst, "transformed_volume"); makedir(transformed_vol) if not doubletransform: transformfiles = [os.path.join(fld, "elastix/TransformParameters.0.txt"), os.path.join(fld, "elastix/TransformParameters.1.txt")] transformfiles = modify_transform_files(transformfiles, transformed_vol) #copy over elastix files transformix_command_line_call(vol, transformed_vol, transformfiles[-1]) else: v=[xx for xx in kwargs["volumes"] if xx.ch_type == "cellch"][0] #sig to reg tps = [listall(os.path.dirname(v.ch_to_reg_to_atlas), "/TransformParameters.0")[0], listall(os.path.dirname(v.ch_to_reg_to_atlas), "/TransformParameters.1")[0]] #reg to atlas transformfiles = tps+[os.path.join(fld, "elastix/TransformParameters.0.txt"), os.path.join(fld, "elastix/TransformParameters.1.txt")] transformfiles = modify_transform_files(transformfiles, transformed_vol) #copy over elastix files transformix_command_line_call(vol, transformed_vol, transformfiles[-1]) #cell_registered channel cell_reg = tifffile.imread(os.path.join(transformed_vol, "result.tif")) tifffile.imsave(os.path.join(transformed_vol, "result.tif"), cell_reg, compress=1) cell_cnn = np.zeros_like(cell_reg) tarr = []; badlist=[] for zyx,v in zyx_cnt.items(): z,y,x = [int(xx) for xx in zyx.replace("(","",).replace(")","").split(",")] tarr.append([z,y,x]) try: cell_cnn[z,y,x] = v*100 except: badlist.append([z,y,x]) #apply x y dilation r = 2 selem = ball(r)[int(r/2)] cell_cnn = cell_cnn.astype("uint8") cell_cnn = np.asarray([cv2.dilate(cell_cnn[i], selem, iterations = 1) for i in range(cell_cnn.shape[0])]) tarr=np.asarray(tarr) if len(badlist)>0: print("{} errors in mapping with cell_cnn shape {}, each max dim {}, \npossibly due to a registration overshoot \ or not using double transform\n\n{}".format(len(badlist), cell_cnn.shape, np.max(tarr,0), badlist)) merged = np.stack([cell_cnn, cell_reg, np.zeros_like(cell_reg)], -1) tifffile.imsave(os.path.join(transformed_vol, "merged.tif"), merged)#, compress=1) #out = np.concatenate([cell_cnn, cell_reg, ], 0) #####check at the resampled for elastix phase before transform...this mapping looks good... if make_volumes: #make zyx numpy arry zyx = dataframe[["z","y","x"]].values kwargs = load_dictionary(lightsheet_parameter_dictionary) vol = [xx for xx in kwargs["volumes"] if xx.ch_type =="cellch"][0] fullsizedimensions = get_fullsizedims_from_kwargs(kwargs) #don"t get from kwargs["volumes"][0].fullsizedimensions it"s bad! use this instead zyx = fix_contour_orientation(zyx, verbose=verbose, **kwargs) #now in orientation of resample zyx = points_resample(zyx, original_dims = fix_dimension_orientation(fullsizedimensions, **kwargs), resample_dims = tifffile.imread(vol.resampled_for_elastix_vol).shape, verbose = verbose)[:, :3] #cell channel cell_ch = tifffile.imread(vol.resampled_for_elastix_vol) cell_cnn = np.zeros_like(cell_ch) tarr = []; badlist=[] for _zyx in zyx: z,y,x = [int(xx) for xx in _zyx] tarr.append([z,y,x]) try: cell_cnn[z,y,x] = 100 except: badlist.append([z,y,x]) tarr=np.asarray(tarr) merged = np.stack([cell_cnn, cell_ch, np.zeros_like(cell_ch)], -1) tifffile.imsave(os.path.join(transformed_vol, "resampled_merged.tif"), merged)#, compress=1) except Exception as e: print(e) with open(error_file, "a") as err_fl: err_fl.write("\n\n{} {}\n\n".format(fld, e))
from collections import Counter zyx_cnt = Counter(zyx) #now overlay for zyx,v in zyx_cnt.iteritems(): z,y,x = [int(xx) for xx in zyx.replace('(','',).replace(')','').split(',')] try: cnn_cellvolloaded[z,y,x] = v*100 except Exception, e: print e merged = np.stack([cnn_cellvolloaded, cellvolloaded, np.zeros_like(cellvolloaded)], -1) merged = np.swapaxes(merged, 0,2)#reorient to horizontal tifffile.imsave(os.path.join(dst, '{}_points_merged_resampled_for_elastix.tif'.format(os.path.basename(fld))), merged) #EXAMPLE USING LIGHTSHEET - assumes marking centers in the 'raw' full sized cell channel. This will transform those centers into "atlas" space (in this case the moving image) #in this case the "inverse transform has the atlas as the moving image in the first step, and the autofluorescence channel as the moving image in the second step r2s0 = [xx for xx in listall(cellvol.inverse_elastixfld, 'reg2sig_TransformParameters.0.txt') if 'cellch' in xx][0] r2s1 = [xx for xx in listall(cellvol.inverse_elastixfld, 'reg2sig_TransformParameters.1.txt') if 'cellch' in xx][0] a2r0 = [xx for xx in listall(cellvol.inverse_elastixfld, 'atlas2reg2sig/atlas2reg_TransformParameters.0.txt') if 'cellch' in xx][0] a2r1 = [xx for xx in listall(cellvol.inverse_elastixfld, 'atlas2reg2sig/atlas2reg_TransformParameters.1.txt') if 'cellch' in xx][0] if cnn_transform_type == 'all': transformfiles = [r2s0, r2s1, a2r0, a2r1] elif cnn_transform_type == 'single': transformfiles = [a2r0, a2r1] elif cnn_transform_type == 'affine_only_reg_to_sig': transformfiles = [r2s0, a2r0, a2r1] transformfiles = modify_transform_files(transformfiles, dst = dst1) #convert points converted_points = generate_transformed_cellcount(dataframe, dst1, transformfiles, lightsheet_parameter_dictionary=os.path.join(fld, 'param_dict.p'), verbose=verbose) #load and convert to single voxel loc
def find_location(src, dst=False, correspondence_type='post_elastix', verbose=False): ''' Function to transform an excel sheet (e.g.: lightsheet/supp_files/sample_coordinate_to_location.xlsx) and output transformed locations. Suggestion is to use imagej to find XYZ coordinates to input into excel sheet. Inputs ---------------- src = excelsheet correspondence_type = 'post_elastix': your coordinates are the corresponding post-registered elastix file (outputfolder/elastix/..../result....tif) 'full_size_data': you coordinates are from the "full_sizedatafld" where: Z== #### in 'file_name_Z####.tif' X,Y are the pixel of that tif file Returns ---------------- dst = (optional) output excelfile. Ensure path ends with '.xlsx' ''' #from __future__ import division #import shutil, os, tifffile, cv2, numpy as np, pandas as pd, sys, SimpleITK as sitk #from tools.utils.io import listdirfull, load_kwargs, writer, makedir #from tools.conv_net.read_roi import read_roi, read_roi_zip from tools.registration.register import transformed_pnts_to_allen_helper_func from tools.registration.transform import structure_lister from tools.utils.io import load_kwargs, listdirfull, listall import SimpleITK as sitk import pandas as pd, numpy as np, os from skimage.external import tifffile if correspondence_type == 'post_elastix': print( 'This function assumes coordinates are from the corresponding post-registered elastix file. \nMake sure the excel file has number,<space>number,<space>number and not number,number,number' ) #inputs df = pd.read_excel(src) for brain in df.columns[1:]: print(brain) #load and find files kwargs = load_kwargs( df[brain][df['Inputs'] == 'Path to folder'][0]) ann = sitk.GetArrayFromImage( sitk.ReadImage(kwargs['annotationfile'])) #Look up coordinates to pixel value xyz_points = np.asarray([(int(xx.split(',')[0]), int(xx.split(',')[1]), int(xx.split(',')[2])) for xx in df[brain][3:].tolist()]) xyz_points = transformed_pnts_to_allen_helper_func(xyz_points, ann=ann, order='XYZ') #pixel id to transform if 'allen_id_table' in kwargs: structures = structure_lister( pd.read_excel(kwargs['allen_id_table']), *xyz_points) else: structures = structure_lister( pd.read_excel(kwargs['volumes'][0].allen_id_table), *xyz_points) #update dataframe df[brain + ' point transform'] = df[brain][:3].tolist() + [ str(s.tolist()[0]) for s in structures ] if not dst: dst = src[:-5] + '_output.xlsx' df.to_excel(dst) print('Saved as {}'.format(dst)) if correspondence_type == 'full_size_data': from tools.imageprocessing.orientation import fix_dimension_orientation, fix_contour_orientation from tools.utils.directorydeterminer import pth_update from tools.registration.register import collect_points_post_transformix from tools.registration.transform import points_resample, points_transform print( 'This function assumes coordinates are from the corresponding "full_sizedatafld". \nMake sure the excel file has number,<space>number,<space>number and not number,number,number' ) #inputs df = pd.read_excel(src) for brain in df.columns[1:]: #load and find files kwargs = load_kwargs( df[brain][df['Inputs'] == 'Path to folder'][0]) ann = sitk.GetArrayFromImage( sitk.ReadImage(kwargs['annotationfile'])) ch_type = str( df[brain][df['Inputs'] == 'Channel Type'].tolist()[0]) vol = [xx for xx in kwargs['volumes'] if xx.ch_type == ch_type][0] #Look up coordinates to pixel value zyx_points = np.asarray([(int(xx.split(',')[2]), int(xx.split(',')[1]), int(xx.split(',')[0])) for xx in df[brain][3:].tolist()]) #Fix orientation zyx_points = fix_contour_orientation(np.asarray(zyx_points), verbose=verbose, **kwargs) #Fix Scaling trnsfmdpnts = points_resample( zyx_points, original_dims=fix_dimension_orientation( vol.fullsizedimensions, **kwargs), resample_dims=tifffile.imread( pth_update(vol.resampled_for_elastix_vol)).shape, verbose=verbose) #write out points for transformix transformfile = [ xx for xx in listall(os.path.join(vol.inverse_elastixfld)) if os.path.basename(vol.full_sizedatafld_vol)[:-5] in xx and 'atlas2reg2sig' in xx and 'reg2sig_TransformParameters.1.txt' in xx ][0] tmpdst = os.path.join(os.path.dirname(src), 'coordinate_to_location_tmp') output = points_transform(src=trnsfmdpnts[:, :3], dst=tmpdst, transformfile=transformfile, verbose=True) #collect from transformix xyz_points = collect_points_post_transformix(output) #now ID: pix_ids = transformed_pnts_to_allen_helper_func(xyz_points, ann=ann, order='XYZ') #pixel id to transform aid = kwargs[ 'allen_id_table'] if 'allen_id_table' in kwargs else kwargs[ 'volumes'][0].allen_id_table structures = structure_lister(pd.read_excel(aid), *pix_ids) #update dataframe df[brain + ' xyz points atlas space'] = df[brain][:3].tolist() + [ str(s.tolist()[0]) for zyx in xyz_points ] df[brain + ' structures'] = df[brain][:3].tolist() + [ str(s.tolist()[0]) for s in structures ] if not dst: dst = src[:-5] + '_output.xlsx' df.to_excel(dst) print('Saved as {}'.format(dst)) return
jobid = int(os.environ["SLURM_ARRAY_TASK_ID"]) #list of brains brains = [os.path.join(src, xx) for xx in os.listdir(src)] #set brain name brain = brains[jobid] start = time.time() kwargs = load_kwargs(brain) #accessing parameter dictionary cellvol = [xx for xx in kwargs["volumes"] if xx.ch_type == "cellch"][0] a2r0 = [xx for xx in listall(cellvol.inverse_elastixfld) if "atlas2reg_TransformParameters.0" in xx and "cellch" in xx][0] a2r1 = [xx for xx in listall(cellvol.inverse_elastixfld) if "atlas2reg_TransformParameters.1" in xx and "cellch" in xx][0] r2s0 = [xx for xx in listall(cellvol.inverse_elastixfld) if "reg2sig_TransformParameters.0" in xx and "cellch" in xx][0] r2s1 = [xx for xx in listall(cellvol.inverse_elastixfld) if "reg2sig_TransformParameters.1" in xx and "cellch" in xx][0] #set destination directory braindst = os.path.join(scratch_dir, os.path.basename(brain)+"_dorsal") makedir(braindst) aldst = os.path.join(braindst, "transformed_annotations"); makedir(aldst) #transformix transformfiles = modify_transform_files(transformfiles=[a2r0, a2r1, r2s0, r2s1], dst = aldst) [change_interpolation_order(xx,0) for xx in transformfiles] #change the parameter in the transform files that outputs 16bit images instead