def generate_transformed_cellcount(dataframe,
                                   dst,
                                   transformfiles,
                                   lightsheet_parameter_dictionary,
                                   verbose=False):
    '''Function to take a csv file and generate an input to transformix
    
    Inputs
    ----------------
    dataframe = preloaded pandas dataframe
    dst = destination to save files
    transformfiles = list of all elastix transform files used, and in order of the original transform****
    lightsheet_parameter_file = .p file generated from lightsheet package
    '''
    #set up locations
    transformed_dst = os.path.join(dst, 'transformed_points')
    makedir(transformed_dst)

    #make zyx numpy arry
    zyx = dataframe[['z', 'y', 'x']].values

    #adjust for reorientation THEN rescaling, remember full size data needs dimension change releative to resample
    kwargs = load_dictionary(lightsheet_parameter_dictionary)
    vol = [xx for xx in kwargs['volumes'] if xx.ch_type == 'cellch'][0]
    fullsizedimensions = get_fullsizedims_from_kwargs(
        kwargs
    )  #don't get from kwargs['volumes'][0].fullsizedimensions it's bad! use this instead
    zyx = fix_contour_orientation(zyx, verbose=verbose,
                                  **kwargs)  #now in orientation of resample
    zyx = points_resample(
        zyx,
        original_dims=fix_dimension_orientation(fullsizedimensions, **kwargs),
        resample_dims=tifffile.imread(vol.resampled_for_elastix_vol).shape,
        verbose=verbose)[:, :3]

    #make into transformix-friendly text file
    pretransform_text_file = create_text_file_for_elastix(zyx, transformed_dst)

    #copy over elastix files
    transformfiles = modify_transform_files(transformfiles, transformed_dst)
    change_transform_parameter_initial_transform(transformfiles[0],
                                                 'NoInitialTransform')

    #run transformix on points
    points_file = point_transformix(pretransform_text_file, transformfiles[-1],
                                    transformed_dst)

    #convert registered points into structure counts
    converted_points = unpack_pnts(points_file, transformed_dst)

    return converted_points
示例#2
0
def generate_transformed_cellcount(dataframe, dst, transformfiles, lightsheet_parameter_dictionary, verbose=False):
    """Function to take a csv file and generate an input to transformix
    
    Inputs
    ----------------
    dataframe = preloaded pandas dataframe
    dst = destination to save files
    transformfiles = list of all elastix transform files used, and in order of the original transform****
    lightsheet_parameter_file = .p file generated from lightsheet package
    """
    #set up locations
    transformed_dst = os.path.join(dst, "transformed_points"); makedir(transformed_dst)
    
    #make zyx numpy arry
    zyx = dataframe[["z","y","x"]].values
    
    #adjust for reorientation THEN rescaling, remember full size data needs dimension change releative to resample
    fullsizedimensions = get_fullsizedimensions(lightsheet_parameter_dictionary)
    kwargs = load_kwargs(lightsheet_parameter_dictionary)
     
    zyx = fix_contour_orientation(zyx, verbose=verbose, **kwargs) #now in orientation of resample
    resampled_dims, resampled_vol = get_resampledvol_n_dimensions(lightsheet_parameter_dictionary)
    
    zyx = points_resample(zyx, original_dims = fix_dimension_orientation(fullsizedimensions, 
            **kwargs), resample_dims = resampled_dims, verbose = verbose)[:, :3]
         
    #make into transformix-friendly text file
    pretransform_text_file = create_text_file_for_elastix(zyx, transformed_dst)
        
    #copy over elastix files
    transformfiles = modify_transform_files(transformfiles, transformed_dst) 
    change_transform_parameter_initial_transform(transformfiles[0], "NoInitialTransform")
   
    #run transformix on points
    points_file = point_transformix(pretransform_text_file, transformfiles[-1], transformed_dst)
    
    #convert registered points into structure counts
    converted_points = unpack_pnts(points_file, transformed_dst)   
    
    return converted_points
示例#3
0
def generate_transformed_cellcount(points, dst, transformfiles):
    """ makes an input to transformix """
    #set up locations
    transformed_dst = os.path.join(dst, "transformed_points")
    makedir(transformed_dst)

    #make zyx numpy arry
    zyx = np.asarray(points)

    transformfiles = modify_transform_files(transformfiles, transformed_dst)
    change_transform_parameter_initial_transform(transformfiles[0],
                                                 "NoInitialTransform")

    pretransform_text_file = create_text_file_for_elastix(zyx, transformed_dst)

    #run transformix on points
    points_file = point_transformix(pretransform_text_file, transformfiles[-1],
                                    transformed_dst)

    #convert registered points into structure counts
    converted_points = unpack_pnts(points_file, transformed_dst)

    return converted_points
    a2r0 = "/jukebox/scratch/zmd/save/contra_ipsi_projection_studies_20191125/20150804_tp_bl6_ts04/elastix_inverse_transform/TransformParameters.0.txt"
    a2r1 = "/jukebox/scratch/zmd/save/contra_ipsi_projection_studies_20191125/20150804_tp_bl6_ts04/elastix_inverse_transform/TransformParameters.1.txt"
    # r2s0 = "/jukebox/wang/pisano/tracing_output/bl6_ts/20150804_tp_bl6_ts04/elastix_inverse_transform/cellch_20150804_tp_bl6_ts04_555_z3um_70msec_3hfds/20150804_tp_bl6_ts04_555_z3um_70msec_3hfds_resized_ch00_resampledforelastix_atlas2reg2sig/reg2sig_TransformParameters.0.txt"
    # r2s1 = "/jukebox/LightSheetTransfer/tp/20200701_12_55_28_20170207_db_bl6_crii_rpv_01/elastix_inverse_transform/TransformParameters.1.txt"

    #set destination directory
    braindst = os.path.join(scratch_dir, os.path.basename(brain))

    makedir(braindst)

    aldst = os.path.join(braindst, "transformed_annotations")
    makedir(aldst)

    #transformix
    # transformfiles = modify_transform_files(transformfiles=[a2r0, a2r1, r2s0], dst = aldst)
    transformfiles = modify_transform_files(transformfiles=[a2r0, a2r1],
                                            dst=aldst)
    [change_interpolation_order(xx, 0) for xx in transformfiles]

    #change the parameter in the transform files that outputs 16bit images instead
    for fl in transformfiles:  # Read in the file
        with open(fl, "r") as file:
            filedata = file.read()
        # Replace the target string
        filedata = filedata.replace('(ResultImagePixelType "short")',
                                    '(ResultImagePixelType "float")')
        # Write the file out again
        with open(fl, "w") as file:
            file.write(filedata)
    #run transformix
    transformix_command_line_call(ann, aldst, transformfiles[-1])
示例#5
0
ann = tif.imread(
    "/jukebox/LightSheetTransfer/atlas/annotation_sagittal_atlas_20um_iso.tif")

dst = "/home/wanglab/Desktop/test1"

pixel_values = np.unique(ann).astype("uint16")

pixel_value = 136  #id that corresponds to structure

structure = np.asarray(
    np.where(ann == pixel_value)
)  #gives a tuple of the z,y,x coordinates corresponding to the structure

z = structure[0]
y = structure[1]
x = structure[2]

points = [[z[ii], y[ii], x[ii]] for ii in range(len(z))
          ]  #these are the points you would want to map to atlas space

#transform files
r2atl0 = "/home/wanglab/mounts/wang/pisano/tracing_output/antero_4x/20160801_db_cri_02_1200rlow_52hr/elastix/20160801_db_cri_02_1200rlow_52hr_647_017na_1hfds_z7d5um_200msec_10povlp_resized_ch00/sig_to_reg/regtoatlas_TransformParameters.0.txt"
r2atl1 = "/home/wanglab/mounts/wang/pisano/tracing_output/antero_4x/20160801_db_cri_02_1200rlow_52hr/elastix/20160801_db_cri_02_1200rlow_52hr_647_017na_1hfds_z7d5um_200msec_10povlp_resized_ch00/sig_to_reg/regtoatlas_TransformParameters.1.txt"

cell2r0 = "/home/wanglab/mounts/wang/pisano/tracing_output/antero_4x/20160801_db_cri_02_1200rlow_52hr/elastix/20160801_db_cri_02_1200rlow_52hr_647_017na_1hfds_z7d5um_200msec_10povlp_resized_ch00/sig_to_reg/TransformParameters.0.txt"
cell2r1 = "/home/wanglab/mounts/wang/pisano/tracing_output/antero_4x/20160801_db_cri_02_1200rlow_52hr/elastix/20160801_db_cri_02_1200rlow_52hr_647_017na_1hfds_z7d5um_200msec_10povlp_resized_ch00/sig_to_reg/TransformParameters.1.txt"

transformfiles = [r2atl0, r2atl1, cell2r0, cell2r1]
transformfiles = modify_transform_files(transformfiles, dst)

converted_points = generate_transformed_cellcount(points, dst, transformfiles)
示例#6
0
def overlay_qc(args):  
    #unpacking this way for multiprocessing
    fld, folder_suffix, output_folder, verbose, doubletransform, make_volumes = args
    try:
        #get 3dunet cell dataframe csv file
        input_csv = listdirfull(os.path.join(fld, folder_suffix), ".csv")
        assert len(input_csv) == 1, "multiple csv files"
        dataframe = pd.read_csv(input_csv[0])
        
        #location to save out
        dst = os.path.join(output_folder, os.path.basename(fld)); makedir(dst)
    
        #EXAMPLE USING LIGHTSHEET - assumes marking centers in the "raw" full sized cell channel. This will transform those 
        #centers into "atlas" space (in this case the moving image)
        #in this case the "inverse transform has the atlas as the moving image in the first step, 
        #and the autofluorescence channel as the moving image in the second step 
        #NOTE - it seems that the registration of cell to auto is failing on occasion....thus get new files...
        ################################
        cell_inverse_folder = listdirfull(os.path.join(fld, "elastix_inverse_transform"), "cellch")[0]
        a2r = listall(cell_inverse_folder, "atlas2reg_TransformParameters"); a2r.sort()
        r2s = listall(cell_inverse_folder, "reg2sig_TransformParameters"); r2s.sort() #possibly remove

        #IMPORTANT. the idea is to apply cfos->auto->atlas
        transformfiles = r2s + a2r if doubletransform else a2r #might get rid of r2s
    
        lightsheet_parameter_dictionary = os.path.join(fld, "param_dict.p")
            
        converted_points = generate_transformed_cellcount(dataframe, dst, transformfiles, 
                                                          lightsheet_parameter_dictionary, verbose=verbose)
    
        #load and convert to single voxel loc
        zyx = np.asarray([str((int(xx[0]), int(xx[1]), int(xx[2]))) for xx in np.nan_to_num(np.load(converted_points))])
        from collections import Counter
        zyx_cnt = Counter(zyx)
        
        #check...
        if make_volumes:
            #manually call transformix
            kwargs = load_dictionary(lightsheet_parameter_dictionary)
            vol = [xx for xx in kwargs["volumes"] if xx.ch_type == "cellch"][0].resampled_for_elastix_vol
            transformed_vol = os.path.join(dst, "transformed_volume"); makedir(transformed_vol)
            if not doubletransform:
                transformfiles = [os.path.join(fld, "elastix/TransformParameters.0.txt"), os.path.join(fld, 
                                  "elastix/TransformParameters.1.txt")]
                transformfiles = modify_transform_files(transformfiles, transformed_vol) #copy over elastix files
                transformix_command_line_call(vol, transformed_vol, transformfiles[-1])
            else:
                v=[xx for xx in kwargs["volumes"] if xx.ch_type == "cellch"][0]
                #sig to reg
                tps = [listall(os.path.dirname(v.ch_to_reg_to_atlas), "/TransformParameters.0")[0], 
                       listall(os.path.dirname(v.ch_to_reg_to_atlas), "/TransformParameters.1")[0]]
                #reg to atlas
                transformfiles = tps+[os.path.join(fld, "elastix/TransformParameters.0.txt"), 
                                      os.path.join(fld, "elastix/TransformParameters.1.txt")]
                transformfiles = modify_transform_files(transformfiles, transformed_vol) #copy over elastix files
                transformix_command_line_call(vol, transformed_vol, transformfiles[-1])
            

            #cell_registered channel
            cell_reg = tifffile.imread(os.path.join(transformed_vol, "result.tif"))
            tifffile.imsave(os.path.join(transformed_vol, "result.tif"), cell_reg, compress=1)
            cell_cnn = np.zeros_like(cell_reg)
            tarr = []; badlist=[]
            for zyx,v in zyx_cnt.items():
                z,y,x = [int(xx) for xx in zyx.replace("(","",).replace(")","").split(",")]
                tarr.append([z,y,x])
                try:
                    cell_cnn[z,y,x] = v*100
                except:
                    badlist.append([z,y,x])
                    
            #apply x y dilation
            r = 2
            selem = ball(r)[int(r/2)]
            cell_cnn = cell_cnn.astype("uint8")
            cell_cnn = np.asarray([cv2.dilate(cell_cnn[i], selem, iterations = 1) for i in range(cell_cnn.shape[0])])
            
            tarr=np.asarray(tarr)
            if len(badlist)>0: 
                print("{} errors in mapping with cell_cnn shape {}, each max dim {}, \npossibly due to a registration overshoot \
                      or not using double transform\n\n{}".format(len(badlist), cell_cnn.shape, np.max(tarr,0), badlist))
            merged = np.stack([cell_cnn, cell_reg, np.zeros_like(cell_reg)], -1)
            tifffile.imsave(os.path.join(transformed_vol, "merged.tif"), merged)#, compress=1)
            #out = np.concatenate([cell_cnn, cell_reg, ], 0)
        
        #####check at the resampled for elastix phase before transform...this mapping looks good...
        if make_volumes:
            #make zyx numpy arry
            zyx = dataframe[["z","y","x"]].values
            kwargs = load_dictionary(lightsheet_parameter_dictionary)
            vol = [xx for xx in kwargs["volumes"] if xx.ch_type =="cellch"][0]
            fullsizedimensions = get_fullsizedims_from_kwargs(kwargs) #don"t get from kwargs["volumes"][0].fullsizedimensions it"s bad! use this instead
            zyx = fix_contour_orientation(zyx, verbose=verbose, **kwargs) #now in orientation of resample
            zyx = points_resample(zyx, original_dims = fix_dimension_orientation(fullsizedimensions, **kwargs), 
                                  resample_dims = tifffile.imread(vol.resampled_for_elastix_vol).shape, verbose = verbose)[:, :3]
            
            #cell channel
            cell_ch = tifffile.imread(vol.resampled_for_elastix_vol)
            cell_cnn = np.zeros_like(cell_ch)
            tarr = []; badlist=[]
            for _zyx in zyx:
                z,y,x = [int(xx) for xx in _zyx]
                tarr.append([z,y,x])
                try:
                    cell_cnn[z,y,x] = 100
                except:
                    badlist.append([z,y,x])
            tarr=np.asarray(tarr)        
            merged = np.stack([cell_cnn, cell_ch, np.zeros_like(cell_ch)], -1)
            tifffile.imsave(os.path.join(transformed_vol, "resampled_merged.tif"), merged)#, compress=1)
            
    except Exception as e:
        print(e)
        with open(error_file, "a") as err_fl:
            err_fl.write("\n\n{} {}\n\n".format(fld, e))
]
atl_dst = "/jukebox/wang/pisano/tracing_output/cfos/201701_cfos/clearmap_analysis/pma_to_aba_transformed_cells"
if not os.path.exists(atl_dst): os.mkdir(atl_dst)
#collect
for br in brains:
    arr = np.load(
        os.path.join(br,
                     "clearmap_cluster_output/cells_transformed_to_Atlas.npy"))
    #change to zyx!!!
    arr = np.array([arr[:, 2], arr[:, 1], arr[:, 0]]).T
    #make into transformix-friendly text file
    transformed_dst = os.path.join(atl_dst, os.path.basename(br))
    if not os.path.exists(transformed_dst): os.mkdir(transformed_dst)
    pretransform_text_file = create_text_file_for_elastix(arr, transformed_dst)
    #copy over elastix files
    trfm_fl = modify_transform_files(transformfiles, transformed_dst)
    change_transform_parameter_initial_transform(trfm_fl[0],
                                                 'NoInitialTransform')
    #run transformix on points
    points_file = point_transformix(pretransform_text_file, trfm_fl[-1],
                                    transformed_dst)
    #convert registered points into structure counts
    converted_points = unpack_pnts(points_file, transformed_dst)


#make into dataframe
def transformed_cells_to_allen(fld, ann, dst, fl_nm, id_table=df_pth):
    """ consolidating to one function bc then no need to copy/paste """
    dct = {}
    for fl in fld:
        converted_points = os.path.join(fl, "posttransformed_zyx_voxels.npy")
watl_for_pra = zoom(watl, (zf, yf, xf), order=1)

#saved out annotation volume
print("\nsaving zoomed volume...")
tif.imsave(os.path.join(src, "WHS_SD_rat_atlas_v3_annotation_for_pra_reg.tif"),
           watl_for_pra.astype("uint16"))

reg = os.path.join(src, "waxholm_to_pra")
a2r = [os.path.join(reg, xx) for xx in os.listdir(reg) if "Transform" in xx]
a2r.sort()

dst = os.path.join(src, "transformed_annotation_volume")
makedir(dst)

#transformix
transformfiles = modify_transform_files(transformfiles=a2r, dst=dst)
[change_interpolation_order(xx, 0) for xx in transformfiles]

#change the parameter in the transform files that outputs 16bit images instead
for fl in transformfiles:  # Read in the file
    with open(fl, "r") as file:
        filedata = file.read()
    # Replace the target string
    filedata = filedata.replace('(ResultImagePixelType "float")',
                                '(ResultImagePixelType "short")')
    # Write the file out again
    with open(fl, "w") as file:
        file.write(filedata)
#run transformix
transformix_command_line_call(
    os.path.join(src, "WHS_SD_rat_atlas_v3_annotation_for_pra_reg.tif"), dst,