예제 #1
0
def cell_detect_qc_wrapper(src=False, **kwargs):
    '''Function to take in pth to brain folder, and output qc data
    
    src = destination of package (main folder)
    '''
    #load
    if src: kwargs.update(load_kwargs(src))
    if not src: kwargs.update(load_kwargs(**kwargs))

    #run for each cellch
    for cellch in [xx for xx in kwargs['volumes'] if xx.ch_type == 'cellch']:
        1
        #cell_detect(src = cellch.full_sizedatafld_vol, dst = os.path.join(os.path.dirname(os.path.dirname(cellch.full_sizedatafld_vol)), 'cells', os.path.basename(cellch.full_sizedatafld_vol)), **kwargs)

    return
def run_downsizing(pth, cores=12):
    """
    function to take a whole brain volume processed using BrainPipe and downsize it to a
    specific resolution in XY
    useful if you are trying to make an atlas with a specific final resolution (e.g. 25um in XYZ)
    """
    print(os.path.basename(pth))
    kwargs = load_kwargs(
        pth
    )  #note: this function specifically relies on the paramter dictionary made when processing
    #if you move the processed directory to another location, the paths initially saved in this dictionary will be incorrect
    #so suggest you do not move processed directories, or see the BrainPipe scripts on how to correct it if you did
    regvol = kwargs["volumes"][0]
    print(kwargs["volumes"])
    fszdt = regvol.full_sizedatafld_vol
    dst = os.path.join(
        pth, "downsized_for_atlas")  #set destination for downsized planes
    if not os.path.exists(dst): os.mkdir(dst)  #make dest directory
    plns = [
        os.path.join(fszdt, xx) for xx in os.listdir(fszdt) if "tif" in xx
    ]
    plns.sort()
    iterlst = [(pln, dst) for pln in plns]
    p = mp.Pool(cores)
    p.starmap(downsize, iterlst)
예제 #3
0
def detect_contours_in_3d_checker(jobid, pln_chnk=50, **kwargs):
    '''Not utilized yet
    '''
    kwargs = load_kwargs(**kwargs)
    outdr = kwargs['outputdirectory']
    vols = kwargs['volumes']
    reg_vol = [xx for xx in vols if xx.ch_type == 'regch'][0]
    ###set volume to use
    vol = [xx for xx in vols if xx.ch_type != 'regch'][jobid]
    if vol.ch_type == 'cellch':
        detect3dfld = reg_vol.celldetect3dfld
        coordinatesfld = reg_vol.cellcoordinatesfld
    elif vol.ch_type == 'injch':
        detect3dfld = reg_vol.injdetect3dfld
        coordinatesfld = reg_vol.injcoordinatesfld

    zmax = vols[0].fullsizedimensions[0]
    if len(os.listdir(detect3dfld)) != int(ceil(zmax / pln_chnk)):
        writer(
            outdr,
            '\n\n***************************STEP 4 FAILED*********************\n{} files found in {}. Should have {}.'
            .format(len(os.listdir(contourdetect3dfld)),
                    contourdetect3dfld[contourdetect3dfld.rfind('/') + 1:],
                    int(ceil(zmax / pln_chnk))))
    else:
        writer(
            outdr,
            '\n\n***************************STEP 4 SUCCESS*********************\n{} files found in {}. Should have {}.'
            .format(len(os.listdir(contourdetect3dfld)),
                    contourdetect3dfld[contourdetect3dfld.rfind('/') + 1:],
                    int(ceil(zmax / pln_chnk))))
    return
예제 #4
0
def elastix_wrapper(jobid, cores=5, **kwargs):
    '''Wrapper to handle most registration operations.
    
    jobid = 
        0: 'normal registration'
        1: 'cellchannel inverse'
        2: 'injchannel inverse'
    '''
    #inputs
    kwargs = load_kwargs(**kwargs)
    sys.stdout.write('\nElastix in:\n')
    sys.stdout.flush()
    os.system('which elastix')

    #'normal' registration
    if jobid == 0: elastix_registration(jobid, cores=cores, **kwargs)

    #cellchannel inverse
    if jobid == 1:
        make_inverse_transform(
            [xx for xx in kwargs['volumes'] if xx.ch_type == 'cellch'][0],
            cores=cores,
            **kwargs)

    #injchannel inverse -- ##FIXME think about limiting the search to only the cerebellum
    if jobid == 2:
        #make inverse transform
        transformfile = make_inverse_transform(
            [xx for xx in kwargs['volumes'] if xx.ch_type == 'injch'][0],
            cores=cores,
            **kwargs)

        #detect injection site  ##FIXME need to define image and pass in appropriate thresh/filter-kernels
        inj = [xx for xx in kwargs['volumes'] if xx.ch_type == 'injch'][0]
        #array = find_site(inj.ch_to_reg_to_atlas+'/result.1.tif', thresh=10, filter_kernel=(5,5,5))

        array = find_site(inj.resampled_for_elastix_vol,
                          thresh=10,
                          filter_kernel=(5, 5, 5)).astype(int)

        #old version
        #array = inj_detect_using_labels(threshold = .15, resampledforelastix = True, num_labels_to_keep=1, show = False, save = True, masking = True, **kwargs)

        #apply resizing point transform
        txtflnm = point_transform_due_to_resizing(array,
                                                  chtype='injch',
                                                  **kwargs)

        #run transformix on points
        points_file = point_transformix(txtflnm, transformfile)

        #convert registered points into structure counts
        transformed_pnts_to_allen(points_file,
                                  ch_type='injch',
                                  point_or_index=None,
                                  **kwargs)

    return
예제 #5
0
def fill_params(expt_name, stepid, jobid):

    params = {}

    # slurm params
    params["stepid"] = stepid
    params["jobid"] = jobid

    # experiment params
    # going one folder up to get to fullsizedata
    params["expt_name"] = os.path.basename(
        os.path.abspath(os.path.dirname(expt_name)))

    # find cell channel tiff directory from parameter dict
    kwargs = load_kwargs(os.path.dirname(expt_name))
    vol = [vol for vol in kwargs["volumes"]]
    src = vol.full_sizedatafld_vol
    assert os.path.isdir(src), "nonexistent data directory"
    print("\n\n data directory: {}".format(src))

    params["cellch_dir"] = src
    params["scratch_dir"] = "/jukebox/scratch/ejdennis"
    params["data_dir"] = os.path.join(params["scratch_dir"],
                                      params["expt_name"])

    # changed paths after cnn run
    params["cnn_data_dir"] = os.path.join(params["scratch_dir"],
                                          params["expt_name"])
    params["cnn_dir"] = os.path.join(params["cnn_data_dir"],
                                     "output_chnks")  # set cnn patch directory
    params["reconstr_arr"] = os.path.join(params["cnn_data_dir"],
                                          "reconstructed_array.npy")
    params["output_dir"] = expt_name

    # pre-processing params
    params["dtype"] = "float32"
    params["cores"] = 8
    params["verbose"] = True
    params["cleanup"] = False

    # cnn window size for lightsheet =
    # typically 20, 192, 192 for 4x, 20, 32, 32 for 1.3x
    params["patchsz"] = (60, 3840, 3328)
    params["stridesz"] = (40, 3648, 3136)
    params["window"] = (20, 192, 192)

    params["inputshape"] = get_dims_from_folder(src)
    params["patchlist"] = make_indices(params["inputshape"],
                                       params["stridesz"])

    # post-processing params
    params["threshold"] = (0.5, 1)  # h129 = 0.6; prv = 0.85
    params["zsplt"] = 30
    params["ovlp_plns"] = 30

    return params
예제 #6
0
def fix_orientation(imstack, axes=None, verbose=False, **kwargs):
    '''Function to fix orientation of imaging series relative to atlas. Note assumes inputs of tuple of STRINGS**** not ints. 
    This allows for '-0' which is need if one wants to reverse the x axis.
    Assumes XYZ orientation ('0','1','2'). To reverse the order of an axis add a '-'. I.e. to flip the Y axis: ('0', '-1', '2').
    
    Order of operations is reversing of axes BEFORE swapping axes. E.g: ('2','-0','1'). This means reverse the X axis, then move: X->Y, Z->X, Y->Z.
    
    Inputs
    --------------------
        imstack: np.array of image
        axes (optional): tuple of strs ('0','1','2')
        verbose (optional): gives information about shape before and after
        kwargs (optional): will look for kwargs['finalorientation'] = ('0','1','2')
        
    Returns
    --------------------
        imstack: numpy array reorientated
        
    '''
    #handle inputs
    if not axes:
        try:
            axes = kwargs['finalorientation']
        except KeyError:
            #reload param dictionary from original run_tracing file and update kwargs
            import tools.utils.io as io
            sys.path.append(kwargs['packagedirectory'])
            import run_tracing
            kwargs.update(run_tracing.params)
            io.save_kwargs(**kwargs)
            #reload kwargs now that it has been updated
            kwargs = io.load_kwargs(kwargs['outputdirectory'])
            axes = kwargs['finalorientation']

    #verbosity:
    if verbose:
        shape_before = imstack.shape
        origax = axes

    #handle reversing of an axis
    imstack = reverse_axis(imstack, axes)

    #change from XYZ to ZYX (np's) convention and remove negatives from axes and change to ints now that axes have been flipped
    axes = [abs(2 - abs(int(xx))) for xx in axes][::-1]

    #swap axes:
    imstack = imstack.transpose(axes)

    #verbosity:
    if verbose:
        sys.stdout.write(
            '\n\nfix_orientation function:\n   "finalorientation" command (xyz): {}\n   Shape before(xyz): {}\n   Shape after(xyz): {}\n\n'
            .format(origax, shape_before[::-1], imstack.shape[::-1]))
        sys.stdout.flush()
    return imstack
예제 #7
0
def terastitcher_from_params(**params):
    """
    """
    assert params["stitchingmethod"] in [
        "terastitcher", "Terastitcher", "TeraStitcher"
    ]
    kwargs = pth_update(load_kwargs(**params))
    kwargs["cores"] = params["cores"] if "cores" in params else 12
    terastitcher_wrapper(**kwargs)

    return
예제 #8
0
def get_resampledvol_n_dimensions(dct):
    """ works around param dict in case paths were missaved """
    try:
        kwargs = load_kwargs(dct)
        vol = [xx for xx in kwargs["volumes"] if xx.ch_type =="cellch"][0]
        resampled_vol = vol.resampled_for_elastix_vol
        resampled_dims = tifffile.imread(resampled_vol).shape        
    except FileNotFoundError:
        fls = listdirfull(os.path.dirname(dct), ".tif"); fls.sort()
        resampled_vol = fls[-1] #will be the last one, bc of the 647 channel
        resampled_dims = tifffile.imread(resampled_vol).shape
        
    return resampled_dims, resampled_vol
예제 #9
0
def get_fullsizedimensions(dct):
    """ works around param dict in case paths were missaved """    
    try:
        kwargs = load_kwargs(dct)
        vol = [xx for xx in kwargs["volumes"] if xx.ch_type =="cellch"][0]
        zf = len(listdirfull(vol.full_sizedatafld_vol, ".tif"))
        yf,xf = tifffile.imread(listdirfull(vol.full_sizedatafld_vol, "tif")[0]).shape
        fullsizedimensions = tuple((zf, yf, xf))
    except: #if param dict is messed up
        fsz = os.path.join(os.path.dirname(dct), "full_sizedatafld")
        vols = os.listdir(fsz); vols.sort()
        src = os.path.join(fsz, vols[len(vols)-1]) #hack - try to load param_dict instead?
        if not os.path.isdir(src): src = os.path.join(fsz, vols[len(vols)-2])     
        zf = len(listdirfull(src, ".tif"))
        yf,xf = tifffile.imread(listdirfull(src, "tif")[0]).shape
        fullsizedimensions = tuple((zf, yf, xf))
    
    return fullsizedimensions
예제 #10
0
def correct_kwargs(src): 
    '''Temporary adjustment to correct kwargs after setting up folders in step 0 locally.
    
    Input: source path of output directory
    '''
    #import kwargs
    kwargs=load_kwargs(src) 
    
    #change packagedirectory (which defaults to my lightsheet_copy for some reason???)
    kwargs['packagedirectory'] = os.path.join(src,'lightsheet')
    
    kwargs['parameterfolder'] = os.path.join(src,'lightsheet/parameterfolder')
    
    #save kwargs
    save_kwargs(os.path.join(src, 'param_dict.p'), **kwargs)
    
    #return kwargs to check
    return kwargs
예제 #11
0
def generate_transformed_cellcount(dataframe, dst, transformfiles, lightsheet_parameter_dictionary, verbose=False):
    """Function to take a csv file and generate an input to transformix
    
    Inputs
    ----------------
    dataframe = preloaded pandas dataframe
    dst = destination to save files
    transformfiles = list of all elastix transform files used, and in order of the original transform****
    lightsheet_parameter_file = .p file generated from lightsheet package
    """
    #set up locations
    transformed_dst = os.path.join(dst, "transformed_points"); makedir(transformed_dst)
    
    #make zyx numpy arry
    zyx = dataframe[["z","y","x"]].values
    
    #adjust for reorientation THEN rescaling, remember full size data needs dimension change releative to resample
    fullsizedimensions = get_fullsizedimensions(lightsheet_parameter_dictionary)
    kwargs = load_kwargs(lightsheet_parameter_dictionary)
     
    zyx = fix_contour_orientation(zyx, verbose=verbose, **kwargs) #now in orientation of resample
    resampled_dims, resampled_vol = get_resampledvol_n_dimensions(lightsheet_parameter_dictionary)
    
    zyx = points_resample(zyx, original_dims = fix_dimension_orientation(fullsizedimensions, 
            **kwargs), resample_dims = resampled_dims, verbose = verbose)[:, :3]
         
    #make into transformix-friendly text file
    pretransform_text_file = create_text_file_for_elastix(zyx, transformed_dst)
        
    #copy over elastix files
    transformfiles = modify_transform_files(transformfiles, transformed_dst) 
    change_transform_parameter_initial_transform(transformfiles[0], "NoInitialTransform")
   
    #run transformix on points
    points_file = point_transformix(pretransform_text_file, transformfiles[-1], transformed_dst)
    
    #convert registered points into structure counts
    converted_points = unpack_pnts(points_file, transformed_dst)   
    
    return converted_points
def transformed_pnts_to_allen(points_file,
                              ann,
                              ch_type="injch",
                              point_or_index=None,
                              allen_id_table_pth=False,
                              **kwargs):
    """function to take elastix point transform file and return anatomical locations of those points
    point_or_index=None/point/index: determines which transformix output to use: point is more accurate, index is pixel value(?)
    Elastix uses the xyz convention rather than the zyx numpy convention
    NOTE: this modification does not output out a single excel file, but a data frame
    
    Inputs
    -----------
    points_file = 
    ch_type = "injch" or "cellch"
    allen_id_table_pth (optional) pth to allen_id_table
    ann = annotation file
    
    Returns
    -----------
    df = data frame containing voxel counts
    
    """
    kwargs = load_kwargs(**kwargs)
    #####inputs
    assert type(points_file) == str

    if point_or_index == None:
        point_or_index = "OutputPoint"
    elif point_or_index == "point":
        point_or_index = "OutputPoint"
    elif point_or_index == "index":
        point_or_index = "OutputIndexFixed"

    #
    vols = kwargs["volumes"]
    reg_vol = [xx for xx in vols if xx.ch_type == "regch"][0]

    ####load files
    if not allen_id_table_pth:
        allen_id_table = pd.read_excel(
            os.path.join(reg_vol.packagedirectory,
                         "supp_files/allen_id_table.xlsx")
        )  ##use for determining neuroanatomical locations according to allen
    else:
        allen_id_table = pd.read_excel(allen_id_table_pth)

    #####inputs
    assert type(points_file) == str
    point_or_index = 'OutputPoint'

    #get points
    with open(points_file, "r") as f:
        lines = f.readlines()
        f.close()

    #####populate post-transformed array of contour centers
    sys.stdout.write("\n\n{} points detected\n\n".format(len(lines)))
    arr = np.empty((len(lines), 3))
    for i in range(len(lines)):
        arr[i,
            ...] = lines[i].split()[lines[i].split().index(point_or_index) +
                                    3:lines[i].split().index(point_or_index) +
                                    6]  #x,y,z

    pnts = transformed_pnts_to_allen_helper_func(arr, ann)
    pnt_lst = [xx for xx in pnts if xx != 0]

    #check to see if any points where found
    if len(pnt_lst) == 0:
        raise ValueError("pnt_lst is empty")
    else:
        sys.stdout.write("\nlen of pnt_lst({})\n\n".format(len(pnt_lst)))

    #generate dataframe with column
    df = count_structure_lister(allen_id_table, *pnt_lst)

    return df
def pool_injections_inversetransform(**kwargs):
    """Function to pool several injection sites. 
    Assumes that the basic registration AND inverse transform using elastix has been run. 
    If not, runs inverse transform. Additions to analyze_injection.py and pool_injections_for_analysis().

    Inputs
    -----------
    kwargs:
      "inputlist": inputlist, #list of folders generated previously from software
      "channel": "01", 
      "channel_type": "injch",
      "filter_kernel": (5,5,5), #gaussian blur in pixels (if registered to ABA then 1px likely is 25um)
      "threshold": 10 (int, value to use for thresholding, this value represents the number of stand devs above the mean of the gblurred image)
      "num_sites_to_keep": #int, number of injection sites to keep, useful if multiple distinct sites
      "injectionscale": 45000, #use to increase intensity of injection site visualizations generated - DOES NOT AFFECT DATA
      "imagescale": 2, #use to increase intensity of background  site visualizations generated - DOES NOT AFFECT DATA
      "reorientation": ("2","0","1"), #use to change image orientation for visualization only
      "crop": #use to crop volume, values below assume horizontal imaging and sagittal atlas
                False
                cerebellum: "[:,390:,:]"
                caudal midbrain: "[:,300:415,:]"
                midbrain: "[:,215:415,:]"
                thalamus: "[:,215:345,:]"
                anterior cortex: "[:,:250,:]"
      
      "dst": "/home/wanglab/Downloads/test", #save location
      "save_individual": True, #optional to save individual images, useful to inspect brains, which you can then remove bad brains from list and rerun function
      "colormap": "plasma", 
      "atlas": "/jukebox/LightSheetTransfer/atlas/sagittal_atlas_20um_iso.tif", #whole brain atlas
      
      Optional:
          ----------
          "save_array": path to folder to save out numpy array per brain of binarized detected site
          "save_tif": saves out tif volume per brain of binarized detected site
          "dpi": dots per square inch to save at

      Returns
      ----------------count_threshold
      a pooled image consisting of max IP of reorientations provide in kwargs.
      a list of structures (csv file) with pixel counts, pooling across brains.
      if save individual will save individual images, useful for inspection and/or visualization
    """

    inputlist = kwargs["inputlist"]
    dst = kwargs["dst"]
    makedir(dst)
    injscale = kwargs["injectionscale"] if "injectionscale" in kwargs else 1
    imagescale = kwargs["imagescale"] if "imagescale" in kwargs else 1
    axes = kwargs["reorientation"] if "reorientation" in kwargs else ("0", "1",
                                                                      "2")
    cmap = kwargs["colormap"] if "colormap" in kwargs else "plasma"
    save_array = kwargs["save_array"] if "save_array" in kwargs else False
    save_tif = kwargs["save_tif"] if "save_tif" in kwargs else False
    num_sites_to_keep = kwargs[
        "num_sites_to_keep"] if "num_sites_to_keep" in kwargs else 1
    ann = sitk.GetArrayFromImage(sitk.ReadImage(kwargs["annotation"]))
    #if kwargs["crop"]: (from original analyze injection function, no functionality here if points file exist)
    #    ann = eval("ann{}".format(kwargs["crop"]))
    nonzeros = []
    #not needed as mapped points from point_transformix used
    #id_table = kwargs["id_table"] if "id_table" in kwargs else "/jukebox/temp_wang/pisano/Python/lightsheet/supp_files/allen_id_table.xlsx"
    #allen_id_table = pd.read_excel(id_table)

    for i in range(len(inputlist)):  #to iteratre through brains
        pth = inputlist[i]  #path of each processed brain
        print("  loading:\n     {}".format(pth))

        dct = load_kwargs(pth)  #load kwargs of brain as dct

        try:
            inj_vol = [xx for xx in dct["volumes"] if xx.ch_type == "injch"
                       ][0]  #set injection channel volume
            im = tifffile.imread(inj_vol.resampled_for_elastix_vol
                                 )  #load inj_vol as numpy array
            if kwargs["crop"]:
                im = eval("im{}".format(kwargs["crop"]))  #; print im.shape

            #run find site function to segment inj site using non-registered resampled for elastix volume - pulled directly from tools.registration.register.py and tools.analysis.analyze_injection.py
            array = find_site(im,
                              thresh=kwargs["threshold"],
                              filter_kernel=kwargs["filter_kernel"],
                              num_sites_to_keep=num_sites_to_keep) * injscale
            if save_array:
                np.save(
                    os.path.join(dst,
                                 "{}".format(os.path.basename(pth)) + ".npy"),
                    array.astype("uint16"))
            if save_tif:
                tifffile.imsave(
                    os.path.join(dst,
                                 "{}".format(os.path.basename(pth)) + ".tif"),
                    array.astype("uint16"))

            #optional "save_individual"
            if kwargs["save_individual"]:
                im = im * imagescale
                a = np.concatenate((np.max(
                    im, axis=0), np.max(array.astype("uint16"), axis=0)),
                                   axis=1)
                b = np.concatenate((np.fliplr(
                    np.rot90(np.max(fix_orientation(im, axes=axes), axis=0),
                             k=3)),
                                    np.fliplr(
                                        np.rot90(np.max(fix_orientation(
                                            array.astype("uint16"), axes=axes),
                                                        axis=0),
                                                 k=3))),
                                   axis=1)
                plt.figure()
                plt.imshow(np.concatenate((b, a), axis=0), cmap=cmap, alpha=1)
                plt.axis("off")
                plt.savefig(os.path.join(
                    dst, "{}".format(os.path.basename(pth)) + ".pdf"),
                            dpi=300,
                            transparent=True)
                plt.close()

            #find all nonzero pixels in resampled for elastix volume
            print("   finding nonzero pixels for voxel counts...\n")
            nz = np.nonzero(array)
            nonzeros.append(zip(*nz))  #<-for pooled image

            #find transform file
            inverse_fld = inj_vol.inverse_elastixfld
            inj_fld = listdirfull(inverse_fld, "inj")[0]
            atlas2reg2sig_fld = listdirfull(inj_fld, "atlas2reg2sig")[0]
            transformfile = os.path.join(atlas2reg2sig_fld,
                                         "reg2sig_TransformParameters.1.txt")

            if not os.path.exists(transformfile):  #if transformed points exist
                print(
                    "Transform file file not found. Running elastix inverse transform... \n"
                )
                transformfile = make_inverse_transform(
                    [xx for xx in dct["volumes"] if xx.ch_type == "injch"][0],
                    cores=6,
                    **dct)
            else:
                print("Inverse transform exists. \n")

            #apply resizing point transform
            txtflnm = point_transform_due_to_resizing(array,
                                                      chtype="injch",
                                                      **dct)
            #run transformix on points
            points_file = point_transformix(txtflnm, transformfile)

            tdf = transformed_pnts_to_allen(points_file,
                                            ann,
                                            ch_type="injch",
                                            point_or_index=None,
                                            **dct)  #map to allen atlas
            if i == 0:
                df = tdf.copy()
                countcol = "count" if "count" in df.columns else "cell_count"
                df.drop([countcol], axis=1, inplace=True)
            df[os.path.basename(pth)] = tdf[countcol]

        except:
            print(
                "could not recover injection site, inspect manually for parameter dictionary errors or missing inj channel \n\n"
            )

    #cell counts to csv
    df.to_csv(os.path.join(dst, "voxel_counts.csv"))
    print("\n\nCSV file of cell counts, saved as {}\n\n\n".format(
        os.path.join(dst, "voxel_counts.csv")))

    #condense nonzero pixels
    nzs = [
        str(x) for xx in nonzeros for x in xx
    ]  #this list has duplicates if two brains had the same voxel w label
    c = Counter(nzs)
    arr = np.zeros(im.shape)
    print("Collecting nonzero pixels for pooled image...")
    tick = 0
    #generating pooled array where voxel value = total number of brains with that voxel as positive
    for k, v in c.items():
        k = [int(xx) for xx in k.replace("(", "").replace(")", "").split(",")]
        arr[k[0], k[1], k[2]] = int(v)
        tick += 1
        if tick % 50000 == 0: print("   {}".format(tick))

    #load atlas and generate final figure
    print("Generating final figure...")
    atlas = tifffile.imread(kwargs["atlas"])  #reads atlas
    print(
        "Zooming in atlas..."
    )  #necessary to have a representative heat map as these segmentations are done from the resized volume, diff dimensions than atlas
    zoomed_atlas = zoom(
        atlas, 1.3)  #zooms atlas; different than original analyze_injection.py
    sites = fix_orientation(arr, axes=axes)

    #cropping
    if kwargs["crop"]:
        zoomed_atlas = eval("zoomed_atlas{}".format(kwargs["crop"]))
    zoomed_atlas = fix_orientation(zoomed_atlas, axes=axes)

    my_cmap = eval("plt.cm.{}(np.arange(plt.cm.RdBu.N))".format(cmap))
    my_cmap[:1, :4] = 0.0
    my_cmap = mpl.colors.ListedColormap(my_cmap)
    my_cmap.set_under("w")
    plt.figure()
    plt.imshow(np.max(zoomed_atlas, axis=0), cmap="gray")
    plt.imshow(np.max(sites, axis=0), alpha=0.99, cmap=my_cmap)
    plt.colorbar()
    plt.axis("off")
    dpi = int(kwargs["dpi"]) if "dpi" in kwargs else 300
    plt.savefig(os.path.join(dst, "heatmap.pdf"), dpi=dpi, transparent=True)
    plt.close()

    print("Saved as {}".format(os.path.join(dst, "heatmap.pdf")))

    return df
예제 #14
0
def overlay_qc(args):  
    #unpacking this way for multiprocessing
    fld, folder_suffix, output_folder, verbose, doubletransform, make_volumes = args
    
    #init error file
    error_file = os.path.join(os.path.join(output_folder, os.path.basename(fld)), "errors.txt")
    
    try:
        #get 3dunet cell dataframe csv file
        input_csv = listdirfull(os.path.join(fld, folder_suffix), ".csv")
        assert len(input_csv) == 1, "multiple csv files"
        dataframe = pd.read_csv(input_csv[0])
        
        #location to save out
        dst = os.path.join(output_folder, os.path.basename(fld)); makedir(dst)
    
        #EXAMPLE USING LIGHTSHEET - assumes marking centers in the "raw" full sized cell channel. This will transform those 
        #centers into "atlas" space (in this case the moving image)
        #in this case the "inverse transform has the atlas as the moving image in the first step, 
        #and the autofluorescence channel as the moving image in the second step 
        #NOTE - it seems that the registration of cell to auto is failing on occasion....thus get new files...
        ################################
        cell_inverse_folder = listdirfull(os.path.join(fld, "elastix_inverse_transform"), "cellch")[0]
        a2r = listall(cell_inverse_folder, "atlas2reg_TransformParameters"); a2r.sort()
        r2s = listall(cell_inverse_folder, "reg2sig_TransformParameters"); r2s.sort() #possibly remove

        #IMPORTANT. the idea is to apply cfos->auto->atlas
        transformfiles = r2s + a2r if doubletransform else a2r #might get rid of r2s
    
        lightsheet_parameter_dictionary = os.path.join(fld, "param_dict.p")
            
        converted_points = generate_transformed_cellcount(dataframe, dst, transformfiles, 
                                                          lightsheet_parameter_dictionary, verbose=verbose)
    
        #load and convert to single voxel loc
        zyx = np.asarray([str((int(xx[0]), int(xx[1]), int(xx[2]))) for xx in np.nan_to_num(np.load(converted_points))])
        from collections import Counter
        zyx_cnt = Counter(zyx)
        
        #check...
        if make_volumes:
            #manually call transformix
            kwargs = load_kwargs(lightsheet_parameter_dictionary)
            resampled_dims, resampled_vol = get_resampledvol_n_dimensions(lightsheet_parameter_dictionary)
            transformed_vol = os.path.join(dst, "transformed_volume"); makedir(transformed_vol)
            
            if not doubletransform:
                transformfiles = [os.path.join(fld, "elastix/TransformParameters.0.txt"), os.path.join(fld, 
                                  "elastix/TransformParameters.1.txt")]
                transformfiles = modify_transform_files(transformfiles, transformed_vol) #copy over elastix files
                transformix_command_line_call(resampled_vol, transformed_vol, transformfiles[-1])
            else:
                v=[xx for xx in kwargs["volumes"] if xx.ch_type == "cellch"][0]
                #sig to reg
                tps = [listall(os.path.dirname(v.ch_to_reg_to_atlas), "/TransformParameters.0")[0], 
                       listall(os.path.dirname(v.ch_to_reg_to_atlas), "/TransformParameters.1")[0]]
                #reg to atlas
                transformfiles = tps+[os.path.join(fld, "elastix/TransformParameters.0.txt"), 
                                      os.path.join(fld, "elastix/TransformParameters.1.txt")]
                transformfiles = modify_transform_files(transformfiles, transformed_vol) #copy over elastix files
                transformix_command_line_call(resampled_vol, transformed_vol, transformfiles[-1])
            

            #cell_registered channel
            cell_reg = tifffile.imread(os.path.join(transformed_vol, "result.tif"))
            tifffile.imsave(os.path.join(transformed_vol, "result.tif"), cell_reg, compress=1)
            cell_cnn = np.zeros_like(cell_reg)
            tarr = []; badlist=[]
            for zyx,v in zyx_cnt.items():
                z,y,x = [int(xx) for xx in zyx.replace("(","",).replace(")","").split(",")]
                tarr.append([z,y,x])
                try:
                    cell_cnn[z,y,x] = v*100
                except:
                    badlist.append([z,y,x])
                    
            #apply x y dilation
            r = 2
            selem = ball(r)[int(r/2)]
            cell_cnn = cell_cnn.astype("uint8")
            cell_cnn = np.asarray([cv2.dilate(cell_cnn[i], selem, iterations = 1) for i in range(cell_cnn.shape[0])])
            
            tarr=np.asarray(tarr)
            if len(badlist)>0: 
                print("{} errors in mapping with cell_cnn shape {}, each max dim {}, \npossibly due to a registration overshoot \
                      or not using double transform\n\n{}".format(len(badlist), cell_cnn.shape, np.max(tarr,0), badlist))
            merged = np.stack([cell_reg, cell_cnn, np.zeros_like(cell_reg)], -1)
            tifffile.imsave(os.path.join(transformed_vol, "merged.tif"), merged)#, compress=1)
            #out = np.concatenate([cell_cnn, cell_reg, ], 0)
        
            #####check at the resampled for elastix phase before transform
            #make zyx numpy arry
            zyx = dataframe[["z","y","x"]].values
            fullsizedimensions = get_fullsizedimensions(lightsheet_parameter_dictionary) 
            zyx = fix_contour_orientation(zyx, verbose=verbose, **kwargs) #now in orientation of resample
            zyx = points_resample(zyx, original_dims = fix_dimension_orientation(fullsizedimensions, **kwargs), 
                                  resample_dims = resampled_dims, verbose = verbose)[:, :3]
            
            #cell channel
            cell_ch = tifffile.imread(resampled_vol)
            cell_cnn = np.zeros_like(cell_ch)
            tarr = []; badlist=[]
            for _zyx in zyx:
                z,y,x = [int(xx) for xx in _zyx]
                tarr.append([z,y,x])
                try:
                    cell_cnn[z,y,x] = 100
                except:
                    badlist.append([z,y,x])
            tarr = np.asarray(tarr)        
            merged = np.stack([cell_ch, cell_cnn, np.zeros_like(cell_ch)], -1)
            tifffile.imsave(os.path.join(transformed_vol, "resampled_merged.tif"), merged)#, compress=1)
            
    except Exception as e:
        print(e)
        with open(error_file, "a") as err_fl:
            err_fl.write("\n\n{} {}\n\n".format(fld, e))
    return outpth


if __name__ == "__main__":

    print(sys.argv)
    jobid = int(os.environ["SLURM_ARRAY_TASK_ID"])

    src = "/jukebox/wang/pisano/tracing_output/eaat4"
    dst = "/jukebox/wang/zahra/eaat4_screening/201910_analysis"

    brains = listdirfull(src)

    brain = brains[jobid]

    kwargs = load_kwargs(brain)
    cellvol = [
        vol for vol in kwargs["volumes"]
        if vol.ch_type == "cellch" or vol.ch_type == "injch"
    ][0]
    fullszfld = cellvol.full_sizedatafld_vol

    imgs = [os.path.join(fullszfld, xx) for xx in os.listdir(fullszfld)]
    imgs.sort()
    stk = np.array([tif.imread(img) for img in imgs])[:, 1700:, :]
    #stk = tif.imread(src).astype("uint16")

    clh = np.array([
        equalize_adapthist(img,
                           clip_limit=0.05,
                           kernel_size=(50, 100),
예제 #16
0
def make_inverse_transform(vol_to_process, cores=5, **kwargs):
    '''Script to perform inverse transform and return path to elastix inverse parameter file
    
    Returns:
    ---------------
    transformfile
    '''

    sys.stdout.write('starting make_inverse_transform, this will take time...')
    ############inputs
    kwargs = load_kwargs(kwargs['outputdirectory'])
    outdr = kwargs['outputdirectory']
    vols = kwargs['volumes']
    reg_vol = [xx for xx in vols if xx.ch_type == 'regch'][0]
    AtlasFile = reg_vol.atlasfile
    parameterfolder = reg_vol.parameterfolder

    ###############
    ###images need to have been stitched, resized, and saved into single tiff stack ###
    ###resize to ~220% total size of atlas (1.3x/dim) ###
    reg_vol.add_resampled_for_elastix_vol(reg_vol.downsized_vol +
                                          '_resampledforelastix.tif')
    #resample_par(cores, reg_vol, AtlasFile, svlocname=reg_vol_resampled, singletifffile=True, resamplefactor=1.2)
    if not os.path.exists(reg_vol.resampled_for_elastix_vol):
        print('Resizing')
        #resample(reg_vol, AtlasFile, svlocname=reg_vol_resampled, singletifffile=True, resamplefactor=1.3)
        resample_par(cores,
                     reg_vol.downsized_vol + '.tif',
                     AtlasFile,
                     svlocname=reg_vol.resampled_for_elastix_vol,
                     singletifffile=True,
                     resamplefactor=1.3)
        print('Past Resizing')

    vol_to_process.add_resampled_for_elastix_vol(vol_to_process.downsized_vol +
                                                 '_resampledforelastix.tif')

    if not os.path.exists(vol_to_process.resampled_for_elastix_vol):
        print('Resizing')
        resample_par(cores,
                     vol_to_process.downsized_vol + '.tif',
                     AtlasFile,
                     svlocname=vol_to_process.resampled_for_elastix_vol,
                     singletifffile=True,
                     resamplefactor=1.3)
        print('Past Resizing')

    ####setup
    parameters = []
    [
        parameters.append(os.path.join(parameterfolder, files))
        for files in os.listdir(parameterfolder)
        if files[0] != '.' and files[-1] != '~'
    ]
    parameters.sort()

    ###set up save locations
    svlc = os.path.join(outdr, 'elastix_inverse_transform')
    makedir(svlc)
    svlc = os.path.join(
        svlc, '{}_{}'.format(vol_to_process.ch_type, vol_to_process.brainname))
    makedir(svlc)

    ###Creating LogFile
    #writer(svlc, 'Starting elastix...AtlasFile: {}\n   parameterfolder: {}\n   svlc: {}\n'.format(AtlasFile, parameterfolder, svlc))
    writer(
        svlc,
        'Order of parameters used in Elastix:{}\n...\n\n'.format(parameters))

    ##register: 1) atlas->reg 2) reg->sig NOTE these are intentionally backwards so applying point transform can be accomplished
    #atlas(mv)->reg (fx)
    atlas2reg = os.path.join(
        svlc,
        reg_vol.resampled_for_elastix_vol[reg_vol.resampled_for_elastix_vol.
                                          rfind('/') + 1:-4] + '_atlas2reg')
    makedir(atlas2reg)
    e_out_file, e_transform_file = elastix_command_line_call(
        fx=reg_vol.resampled_for_elastix_vol,
        mv=AtlasFile,
        out=atlas2reg,
        parameters=parameters)

    #reg(mv)->sig(fx)
    reg2sig = os.path.join(
        svlc, vol_to_process.resampled_for_elastix_vol[
            vol_to_process.resampled_for_elastix_vol.rfind('/') + 1:-4] +
        '_reg2sig')
    makedir(reg2sig)
    e_out_file, e_transform_file = elastix_command_line_call(
        fx=vol_to_process.resampled_for_elastix_vol,
        mv=reg_vol.resampled_for_elastix_vol,
        out=reg2sig,
        parameters=parameters)

    ##set up transform series:
    atlas2reg2sig = os.path.join(
        svlc, vol_to_process.resampled_for_elastix_vol[
            vol_to_process.resampled_for_elastix_vol.rfind('/') + 1:-4] +
        '_atlas2reg2sig')
    makedir(atlas2reg2sig)
    #copy transform paramters
    [
        shutil.copy(os.path.join(reg2sig, xx),
                    os.path.join(atlas2reg2sig, 'reg2sig_' + xx))
        for xx in os.listdir(reg2sig) if 'TransformParameters' in xx
    ]
    [
        shutil.copy(os.path.join(atlas2reg, xx),
                    os.path.join(atlas2reg2sig, 'atlas2reg_' + xx))
        for xx in os.listdir(atlas2reg) if 'TransformParameters' in xx
    ]

    #connect transforms by setting regtoatlas TP0's initial transform to sig->reg transform
    tps = [
        os.path.join(atlas2reg2sig, xx) for xx in os.listdir(atlas2reg2sig)
        if 'TransformParameters' in xx
    ]
    tps.sort(
        reverse=True
    )  #they are now in order recent to first, thus first is regtoatlas_TransformParameters.1.txt
    for x in range(len(tps)):
        if not x == len(tps) - 1:
            change_transform_parameter_initial_transform(tps[x], tps[x + 1])

    assert os.path.exists(tps[0])
    writer(svlc, '***Elastix Registration Successfully Completed***\n')
    writer(svlc, '\ne_transform_file is {}'.format(tps[0]))
    ####################
    sys.stdout.write('complted make_inverse_transform')
    return tps[0]
예제 #17
0
def identify_structures_w_contours(jobid,
                                   cores=5,
                                   make_color_images=False,
                                   overlay_on_original_data=False,
                                   consider_only_multipln_contours=False,
                                   **kwargs):
    '''function to take 3d detected contours and apply elastix transform
    '''
    #######################inputs and setup#################################################
    ###inputs
    kwargs = load_kwargs(**kwargs)
    outdr = kwargs['outputdirectory']
    vols = kwargs['volumes']
    reg_vol = [xx for xx in vols if xx.ch_type == 'regch'][0]

    ###get rid of extra jobs
    if jobid >= len([xx for xx in vols if xx.ch_type != 'regch'
                     ]):  ###used to end jobs if too many are called
        print('jobid({}) >= volumes {}'.format(
            jobid, len([xx for xx in vols if xx.ch_type != 'regch'])))
        return

    ###volumes to process: each job represents a different contour volume
    vol_to_process = [xx for xx in vols if xx.ch_type != 'regch'][jobid]
    ch = vol_to_process.channel
    print(vol_to_process.ch_type)

    #find appropriate folders for contours
    if vol_to_process.ch_type == 'cellch':
        detect3dfld = reg_vol.celldetect3dfld
        coordinatesfld = reg_vol.cellcoordinatesfld
    elif vol_to_process.ch_type == 'injch':
        detect3dfld = reg_vol.injdetect3dfld
        coordinatesfld = reg_vol.injcoordinatesfld

    #set scale and atlas
    xscl, yscl, zscl = reg_vol.xyz_scale  ###micron/pixel
    zmx, ymx, xmx = reg_vol.fullsizedimensions
    AtlasFile = reg_vol.atlasfile
    print('Using {} CORES'.format(cores))
    try:
        p
    except NameError:
        p = mp.Pool(cores)
    resizefactor = kwargs['resizefactor']
    brainname = reg_vol.brainname

    ############################################################################################################
    #######################use regex to sort np files by ch and then by zpln####################################
    ############################################################################################################
    fl = [f for f in os.listdir(detect3dfld)
          if '.p' in f and 'ch' in f]  #sorted for raw files
    reg = re.compile(r'(.*h+)(?P<ch>\d{2})(.*)(.p)')
    matches = map(reg.match, fl)

    ##load .np files
    sys.stdout.write(
        '\njobid({}), loading ch{} .p files to extract contour_class objects....'
        .format(jobid, ch))
    contour_class_lst = []
    for fl in [
            os.path.join(detect3dfld, ''.join(xx.groups())) for xx in matches
            if xx.group('ch')[-2:] in ch
    ]:
        tmpkwargs = {}
        pckl = open(fl, 'rb')
        tmpkwargs.update(pickle.load(pckl))
        pckl.close()
        if consider_only_multipln_contours == False:
            tmplst = tmpkwargs['single']
            [tmplst.append(xx) for xx in tmpkwargs['multi']]
        elif consider_only_multipln_contours == True:
            tmplst = tmpkwargs['multi']
        [contour_class_lst.append(xx) for xx in tmplst]
    sys.stdout.write('\ndone loading contour_class objects.\n')

    #check for successful loading
    if len(contour_class_lst) == 0:
        print('Length of contours in ch{} was {}, ending process...'.format(
            jobid, len(contour_class_lst)))
        try:
            p.terminate()
        except:
            1
        return

    ############################################################################################################
    ##############################make color files##############################################################
    ############################################################################################################
    if make_color_images == True:
        sys.stdout.write('\nmaking 3d planes...')
        sys.stdout.flush()
        valid_plns = range(0, zmx + 1)
        svlc = os.path.join(outdr, 'ch{}_3dcontours'.format(ch))
        removedir(svlc)
        if overlay_on_original_data == False:
            ovly = False
        elif overlay_on_original_data == True:
            ovly = True
        iterlst = []
        [
            iterlst.append(
                (ch, svlc, contour_class_lst, valid_plns, outdr,
                 vol_to_process, resizefactor, contour_class_lst,
                 consider_only_multipln_contours, ovly, core, cores))
            for core in range(cores)
        ]
        p.starmap(ovly_3d, iterlst)
        lst = os.listdir(svlc)
        lst1 = [os.path.join(svlc, xx) for xx in lst]
        lst1.sort()
        del lst
        del iterlst
        ###load ims and return dct of keys=str(zpln), values=np.array
        sys.stdout.write(
            '\n3d planes made, saved in {},\nnow compressing into single tifffile'
            .format(svlc))
        imstack = tifffile.imread(lst1)
        del lst1
        if len(imstack.shape) > 3:
            imstack = np.squeeze(imstack)
        try:  ###check for orientation differences, i.e. from horiztonal scan to sagittal for atlas registration
            imstack = np.swapaxes(imstack, *kwargs['swapaxes'])
        except:
            pass
        tiffstackpth = os.path.join(
            outdr, '3D_contours_ch{}_{}'.format(ch, brainname))
        tifffile.imsave(tiffstackpth, imstack.astype('uint16'))
        del imstack
        gc.collect()
        shutil.rmtree(svlc)
        sys.stdout.write('\ncolor image stack made for ch{}'.format(ch))
    else:
        sys.stdout.write('\nmake_color_images=False, not creating images')
    ############################################################################################################
    ######################apply point transform and make transformix input file#################################
    ############################################################################################################
    ###find centers and add 1's to make nx4 array for affine matrix multiplication to account for downsizing
    ###everything is in PIXELS
    contourarr = np.empty((len(contour_class_lst), 3))
    for i in range(len(contour_class_lst)):
        contourarr[i, ...] = contour_class_lst[
            i].center  ###full sized dimensions: if 3x3 tiles z(~2000),y(7680),x(6480) before any rotation
    try:
        contourarr = swap_cols(
            contourarr, *kwargs['swapaxes']
        )  ###change columns to account for orientation changes between brain and atlas: if horizontal to sagittal==>x,y,z relative to horizontal; zyx relative to sagittal
        z, y, x = swap_cols(np.array([
            vol_to_process.fullsizedimensions
        ]), *kwargs['swapaxes'])[
            0]  ##convert full size cooridnates into sagittal atlas coordinates
        sys.stdout.write('\nSwapping Axes')
    except:  ###if no swapaxes then just take normal z,y,x dimensions in original scan orientation
        z, y, x = vol_to_process.fullsizedimensions
        sys.stdout.write('\nNo Swapping of Axes')
    d1, d2 = contourarr.shape
    nx4centers = np.ones((d1, d2 + 1))
    nx4centers[:, :-1] = contourarr
    ###find resampled elastix file dim

    print(os.listdir(outdr))
    print([xx.channel for xx in vols if xx.ch_type == 'regch'])
    with tifffile.TiffFile([
            os.path.join(outdr, f) for f in os.listdir(outdr)
            if 'resampledforelastix' in f and 'ch{}'.format(
                [xx.channel for xx in vols if xx.ch_type == 'regch'][0]) in f
    ][0]) as tif:
        zr = len(tif.pages)
        yr, xr = tif.pages[0].shape
        tif.close()
    ####create transformmatrix
    trnsfrmmatrix = np.identity(4) * (
        zr / z, yr / y, xr / x, 1)  ###downscale to "resampledforelastix size"
    sys.stdout.write('trnsfrmmatrix:\n{}\n'.format(trnsfrmmatrix))
    #nx4 * 4x4 to give transform
    trnsfmdpnts = nx4centers.dot(trnsfrmmatrix)  ##z,y,x
    sys.stdout.write('first three transformed pnts:\n{}\n'.format(
        trnsfmdpnts[0:3]))
    #create txt file, with elastix header, then populate points
    txtflnm = '{}_zyx_transformedpnts_ch{}.txt'.format(brainname, ch)
    pnts_fld = os.path.join(outdr, 'transformedpoints_pretransformix')
    makedir(pnts_fld)
    transforminput = os.path.join(pnts_fld, txtflnm)
    removedir(transforminput)  ###prevent adding to an already made file
    writer(pnts_fld, 'index\n{}\n'.format(len(trnsfmdpnts)), flnm=txtflnm)
    sys.stdout.write(
        '\nwriting centers to transfomix input points text file: {}....'.
        format(transforminput))
    stringtowrite = '\n'.join([
        '\n'.join(['{} {} {}'.format(i[2], i[1], i[0])]) for i in trnsfmdpnts
    ])  ####this step converts from zyx to xyz*****
    writer(pnts_fld, stringtowrite, flnm=txtflnm)
    #[writer(pnts_fld, '{} {} {}\n'.format(i[2],i[1],i[0]), flnm=txtflnm, verbose=False) for i in trnsfmdpnts] ####this step converts from zyx to xyz*****
    sys.stdout.write('...done writing centers.')
    sys.stdout.flush()
    del trnsfmdpnts, trnsfrmmatrix, nx4centers, contourarr
    gc.collect()
    ############################################################################################################
    ####################################elastix for inverse transform###########################################
    ############################################################################################################
    transformfile = make_inverse_transform(vol_to_process, cores, **kwargs)
    assert os.path.exists(transformfile)
    sys.stdout.write(
        '\n***Elastix Inverse Transform File: {}***'.format(transformfile))
    ############################################################################################################
    ####################################transformix#############################################################
    ############################################################################################################
    if make_color_images != False:
        #apply transform to 3d_tiffstack:
        transformimageinput = tiffstackpth
        elastixpth = os.path.join(outdr, 'elastix')
        trnsfrm_outpath = os.path.join(
            elastixpth, '3D_contours_ch{}_{}'.format(ch, brainname))
        makedir(trnsfrm_outpath)
        writer(trnsfrm_outpath, '\nProcessing ch{} 3D...'.format(ch))
        #transformfiles=[os.path.join(elastixpth, xx) for xx in os.listdir(os.path.join(outdr, 'elastix')) if "TransformParameters" in xx]; mxx=max([xx[-5] for xx in transformfiles])
        #transformfile=os.path.join(elastixpth, 'TransformParameters.{}.txt'.format(mxx))
        trnsfrm_out_file = os.path.join(trnsfrm_outpath,
                                        'result.tif')  #output of transformix
        transformimageinput_resized = transformimageinput[:-4] + '_resampledforelastix.tif'
        print('Resizing {}'.format(transformimageinput_resized))
        resample_par(cores,
                     transformimageinput,
                     AtlasFile,
                     svlocname=transformimageinput_resized,
                     singletifffile=True,
                     resamplefactor=1.7)
        sp.call([
            'transformix', '-in', transformimageinput_resized, '-out',
            trnsfrm_outpath, '-tp', transformfile
        ])
        writer(trnsfrm_outpath,
               '\n   Transformix File Generated: {}'.format(trnsfrm_out_file))
        writer(
            trnsfrm_outpath, '\n   Passing colorcode: {} file as {}'.format(
                ch, os.path.join(trnsfrm_outpath, 'depthcoded.png')))
        ###depth coded image of transformix result; not functional yet
        #depth.colorcode(trnsfrm_out_file, trnsfrm_outpath)
        #getvoxels(trnsfrm_out_file, os.path.join(trnsfrm_outpath, 'zyx_voxels_{}.npy'.format(ch)))
        #allen_compare(AtlasFile, svlc, trnsfrm_outpath)
        ##if successful delete contour cooridnates and maybe contourdetect3d flds
    ############################################################

    ##############apply transform to points#####################
    elastixpth = os.path.join(outdr, 'elastix_inverse_transform')
    trnsfrm_outpath = os.path.join(elastixpth, 'ch{}_3dpoints'.format(ch))
    makedir(trnsfrm_outpath)
    writer(trnsfrm_outpath,
           '\n***********Starting Transformix for: {}***********'.format(ch))
    sys.stdout.flush()
    #transformfiles=[os.path.join(elastixpth, xx) for xx in os.listdir(os.path.join(outdr, 'elastix')) if "TransformParameters" in xx]; mxx=max([xx[-5] for xx in transformfiles])
    #transformfile=os.path.join(elastixpth, 'TransformParameters.{}.txt'.format(mxx))
    trnsfrm_out_file = os.path.join(
        trnsfrm_outpath, 'outputpoints.txt')  #MIGHT NEED TO CHANGE THIS
    sp.call([
        'transformix', '-def', transforminput, '-out', trnsfrm_outpath, '-tp',
        transformfile
    ])
    #sp.call(['transformix', '-def', 'all', '-out', trnsfrm_outpath, '-tp', transformfile]) ##displacement field
    writer(trnsfrm_outpath,
           '\n   Transformix File Generated: {}'.format(trnsfrm_out_file))
    ####################################################################################
    ##############generate list and image overlaid onto allen atlas#####################
    ####################################################################################
    name = 'job{}_{}'.format(jobid, vol_to_process.ch_type)
    transformed_pnts_to_allen(trnsfrm_out_file, ch, cores, name=name, **kwargs)
    writer(outdr, '*************STEP 5*************\n Finished')
    print('end of script')
    try:
        p.terminate()
    except:
        1
    return
예제 #18
0
def check_registration_injection(pth, inputs, cerebellum_only = True, axis = 0):
    '''Function to output registered brain images from processed directories.
    Useful to determine 'bad' registration.
    
    Inputs:
        pth = pdf file path
        inputs = Directories containing processed data
    '''
    pdf_pages = PdfPages(pth) #compiles into multiple pdfs
    
    #iterate through inputs
    for src in inputs:
        
        #load kwargs
        dct = load_kwargs(src)    
        
        #set output directory
        outdr = dct['outputdirectory']
        
        #set atlas file path
        atl = tifffile.imread(dct['AtlasFile'])
        
        #determine elastix output path
        elastix_out = os.path.join(outdr, 'elastix')
            
        #set registration channel file path
        reg = tifffile.imread(os.path.join(elastix_out, 'result.1.tif'))
        
        vol = [xx for xx in dct['volumes'] if xx.ch_type == 'injch'][0] #get injection volume
    
        #read transformed injch image
        print('Reading registered injection channel image\n     {}'.format(outdr))
        im = tifffile.imread(os.path.dirname(vol.ch_to_reg_to_atlas)+'/result.tif')#.astype('uint8')
         
        print('\nPlotting images...\n')
        figs = plt.figure(figsize=(8.27, 11.69))
        #starting to plot figures
        plt.subplot(131)        
        #plot the result and atlas next to each other
        plt.imshow(atl[300], cmap = 'gray'); plt.axis('off'); plt.title('Atlas', fontsize = 10)        
        plt.subplot(132)
        plt.imshow(reg[300], cmap = 'gray'); plt.axis('off'); plt.title('Registered brain', fontsize = 10)
        
        #plot the max intensity zplane for the injection channel
        plt.subplot(133)
        a = np.max(im, axis = axis) # coronal view = 1; saggital view = 0
        plt.imshow(a, cmap = 'plasma', alpha = 1); plt.axis('off'); plt.title('Injection site', fontsize = 10)
        #fix title

#        brainname = re.search('(?<=_)(\w+)(?=_1d3x)', vol.brainname)
        
        if cerebellum_only:
            #add title to page
            plt.text(0.5,0.65, os.path.basename(src), transform = figs.transFigure, size = 16) #.group(0)
        else:
            #add title to page
            plt.text(0.1,0.70, os.path.basename(src), transform = figs.transFigure, size = 16) 
        
        #done with the page
        pdf_pages.savefig(dpi = 300, bbox_inches = 'tight') 
        
    #write PDF document contain composite of all brains
    pdf_pages.close()
    
    print('Saved as {}'.format(pth))
def pool_injections_for_analysis(**kwargs):
    '''Function to pool several injection sites. Assumes that the basic registration using this software has been run.
    
   
    Inputs
    -----------
    kwargs:
      'inputlist': inputlist, #list of folders generated previously from software
      'channel': '01', 
      'channel_type': 'injch',
      'filter_kernel': (3,3,3), #gaussian blur in pixels (if registered to ABA then 1px likely is 25um)
      'threshold': 3 (int, value to use for thresholding, this value represents the number of stand devs above the mean of the gblurred image)
      'num_sites_to_keep': int, number of injection sites to keep, useful if multiple distinct sites
      'injectionscale': 45000, #use to increase intensity of injection site visualizations generated - DOES NOT AFFECT DATA
      'imagescale': 2, #use to increase intensity of background  site visualizations generated - DOES NOT AFFECT DATA
      'reorientation': ('2','0','1'), #use to change image orientation for visualization only
      'crop': #use to crop volume, values below assume horizontal imaging and sagittal atlas
                False
                cerebellum: '[:,390:,:]'
                caudal midbrain: '[:,300:415,:]'
                midbrain: '[:,215:415,:]'
                thalamus: '[:,215:345,:]'
                anterior cortex: '[:,:250,:]'
      
      'dst': '/home/wanglab/Downloads/test', #save location
      'save_individual': True, #optional to save individual images, useful to inspect brains, which you can then remove bad brains from list and rerun function
      'colormap': 'plasma', 
      'atlas': '/jukebox/wang/pisano/Python/allenatlas/average_template_25_sagittal_forDVscans.tif',
      'annotation':'/jukebox/wang/pisano/Python/allenatlas/annotation_25_ccf2015_forDVscans.nrrd',
      'id_table': '/jukebox/temp_wang/pisano/Python/lightsheet/supp_files/allen_id_table.xlsx',
      
      Optional:
          ----------
          'save_array': path to folder to save out numpy array per brain of binarized detected site
          'save_tif': saves out tif volume per brain of binarized detected site
          'dpi': dots per square inch to save at
          'crop_atlas':(notfunctional) similiar to crop. Use when you would like to greatly restrain the cropping for injsite detection, but you want to display a larger area of overlay.
                      this will 0 pad the injection sites to accomodate the difference in size. Note this MUST be LARGER THAN crop.
          
      Returns
      ----------------count_threshold
      a pooled image consisting of max IP of reorientations provide in kwargs.
      a list of structures (csv file) with pixel counts, pooling across brains.
      if save individual will save individual images, useful for inspection and/or visualization
    '''

    inputlist = kwargs['inputlist']
    dst = kwargs['dst']
    makedir(dst)
    injscale = kwargs['injectionscale'] if 'injectionscale' in kwargs else 1
    imagescale = kwargs['imagescale'] if 'imagescale' in kwargs else 1
    axes = kwargs['reorientation'] if 'reorientation' in kwargs else ('0', '1',
                                                                      '2')
    cmap = kwargs['colormap'] if 'colormap' in kwargs else 'plasma'
    id_table = kwargs[
        'id_table'] if 'id_table' in kwargs else '/jukebox/temp_wang/pisano/Python/lightsheet/supp_files/allen_id_table.xlsx'
    count_threshold = kwargs[
        'count_threshold'] if 'count_threshold' in kwargs else 10
    save_array = kwargs['save_array'] if 'save_array' in kwargs else False
    save_tif = kwargs['save_tif'] if 'save_tif' in kwargs else False
    num_sites_to_keep = kwargs[
        'num_sites_to_keep'] if 'num_sites_to_keep' in kwargs else 1
    nonzeros = []
    ann = sitk.GetArrayFromImage(sitk.ReadImage(kwargs['annotation']))
    if kwargs['crop']: ann = eval('ann{}'.format(kwargs['crop']))
    allen_id_table = pd.read_excel(id_table)

    for i in range(len(inputlist)):
        pth = inputlist[i]
        print('\n\n_______\n{}'.format(os.path.basename(pth)))
        dct = load_kwargs(pth)
        #print dct['AtlasFile']
        try:
            vol = [
                xx for xx in dct['volumes']
                if xx.ch_type == kwargs['channel_type']
                and xx.channel == kwargs['channel']
            ][0]
        except:
            vol = [
                xx for xx in dct['volumes']
                if xx.ch_type == "cellch" and xx.channel == kwargs['channel']
            ][0]
        #done to account for different versions
        if os.path.exists(vol.ch_to_reg_to_atlas + '/result.1.tif'):
            impth = vol.ch_to_reg_to_atlas + '/result.1.tif'
        elif os.path.exists(vol.ch_to_reg_to_atlas
                            ) and vol.ch_to_reg_to_atlas[-4:] == '.tif':
            impth = vol.ch_to_reg_to_atlas
        elif os.path.exists(
                os.path.dirname(vol.ch_to_reg_to_atlas) + '/result.1.tif'):
            impth = os.path.dirname(vol.ch_to_reg_to_atlas) + '/result.1.tif'
        elif os.path.exists(
                os.path.dirname(vol.ch_to_reg_to_atlas) + '/result.tif'):
            impth = os.path.dirname(vol.ch_to_reg_to_atlas) + '/result.tif'

        print('  loading:\n     {}'.format(pth))
        im = tifffile.imread(impth)

        if kwargs['crop']:
            im = eval('im{}'.format(kwargs['crop']))  #; print im.shape

        #segment
        arr = find_site(im,
                        thresh=kwargs['threshold'],
                        filter_kernel=kwargs['filter_kernel'],
                        num_sites_to_keep=num_sites_to_keep) * injscale
        if save_array:
            np.save(
                os.path.join(dst, '{}'.format(os.path.basename(pth)) + '.npy'),
                arr.astype('float32'))
        if save_tif:
            tifffile.imsave(
                os.path.join(dst, '{}'.format(os.path.basename(pth)) + '.tif'),
                arr.astype('float32'))

        #optional 'save_individual'
        if kwargs['save_individual']:
            im = im * imagescale
            a = np.concatenate(
                (np.max(im, axis=0), np.max(arr.astype('uint16'), axis=0)),
                axis=1)
            b = np.concatenate((np.fliplr(
                np.rot90(np.max(fix_orientation(im, axes=axes), axis=0), k=3)),
                                np.fliplr(
                                    np.rot90(np.max(fix_orientation(
                                        arr.astype('uint16'), axes=axes),
                                                    axis=0),
                                             k=3))),
                               axis=1)
            plt.figure()
            plt.imshow(np.concatenate((b, a), axis=0), cmap=cmap, alpha=1)
            plt.axis('off')
            plt.savefig(os.path.join(
                dst, '{}'.format(os.path.basename(pth)) + '.pdf'),
                        dpi=300,
                        transparent=True)
            plt.close()

        #cell counts to csv
        print('   finding nonzero pixels for voxel counts...')
        nz = np.nonzero(arr)
        nonzeros.append(zip(*nz))  #<-for pooled image
        pos = transformed_pnts_to_allen_helper_func(
            np.asarray(zip(*[nz[2], nz[1], nz[0]])), ann)
        tdf = count_structure_lister(allen_id_table, *pos)
        if i == 0:
            df = tdf.copy()
            countcol = 'count' if 'count' in df.columns else 'cell_count'
            df.drop([countcol], axis=1, inplace=True)
        df[os.path.basename(pth)] = tdf[countcol]

    df.to_csv(os.path.join(dst, 'voxel_counts.csv'))
    print('\n\nCSV file of cell counts, saved as {}\n\n\n'.format(
        os.path.join(dst, 'voxel_counts.csv')))

    #condense nonzero pixels
    nzs = [
        str(x) for xx in nonzeros for x in xx
    ]  #this list has duplicates if two brains had the same voxel w label
    c = Counter(nzs)
    array = np.zeros(im.shape)
    print('Collecting nonzero pixels for pooled image...')
    tick = 0
    #generating pooled array where voxel value = total number of brains with that voxel as positive
    for k, v in c.iteritems():
        k = [int(xx) for xx in k.replace('(', '').replace(')', '').split(',')]
        array[k[0], k[1], k[2]] = int(v)
        tick += 1
        if tick % 50000 == 0: print('   {}'.format(tick))

    #load atlas and generate final figure
    print('Generating final figure...')
    atlas = tifffile.imread(kwargs['atlas'])
    arr = fix_orientation(array, axes=axes)
    #cropping
    #if 'crop_atlas' not in kwargs:
    if kwargs['crop']: atlas = eval('atlas{}'.format(kwargs['crop']))
    atlas = fix_orientation(atlas, axes=axes)
    #elif 'crop_atlas' in kwargs:
    #if kwargs['crop_atlas']: atlas = eval('atlas{}'.format(kwargs['crop_atlas']))
    #atlas = fix_orientation(atlas, axes=axes)
    #accomodate for size difference
    #d0,d1,d2 = [(x-y)/2 for x,y in zip(atlas.shape, arr.shape)]
    #arr = np.pad(arr,((d0,d0),(d1,d1),(d2,d2)), mode='constant')
    ##allows for a single pixel shift - if needed
    #d0,d1,d2 = [(x-y) for x,y in zip(atlas.shape, arr.shape)]
    #arr = np.pad(arr,((d0,0),(d1,0),(d2,0)), mode='constant')

    my_cmap = eval('plt.cm.{}(np.arange(plt.cm.RdBu.N))'.format(cmap))
    my_cmap[:1, :4] = 0.0
    my_cmap = mpl.colors.ListedColormap(my_cmap)
    my_cmap.set_under('w')
    plt.figure()
    plt.imshow(np.max(atlas, axis=0), cmap='gray')
    plt.imshow(np.max(arr, axis=0), alpha=0.99, cmap=my_cmap)
    plt.colorbar()
    plt.axis('off')
    dpi = int(kwargs['dpi']) if 'dpi' in kwargs else 300
    plt.savefig(os.path.join(dst, 'heatmap.pdf'), dpi=dpi, transparent=True)
    plt.close()

    print('Saved as {}'.format(os.path.join(dst, 'heatmap.pdf')))

    return df
예제 #20
0
def point_transform_due_to_resizing(array,
                                    chtype='injch',
                                    svlc=False,
                                    **kwargs):
    '''Function to take npy array, find nonzero pixels, apply point transform (due to resizing) and package them into a file for final elastix point transform
    
    
    Inputs
    -------------
    array = np array, tif, or path to numpy array from tools.objectdetection.injdetect.inj_detect_using_labels ZYX****
    chtype = 'injch' or 'cellch'
    svlc = 
        False: savesfile into (outdr, 'transformedpoints_pretransformix'). Strongly recommended to use this as it will then work with the rest of package
        True
    
    Returns
    ---------------
    txtflnm = pth to file containing transformed points BEFORE elastix transformation
    
    NOTE THIS FUNCTION ASSUMES ARRAY AND ATLAS HAVE SAME Z,Y,X (DOES NOT TAKE INTO ACCOUNT SWAPPING OF AXES)
    '''
    if type(array) == str:
        if array[-4:] == '.npy': array = np.load(array)
        if array[-4:] == '.tif': array = tifffile.imread(array)

    kwargs = load_kwargs(**kwargs)
    outdr = kwargs['outputdirectory']
    brainname = [xx for xx in kwargs['volumes']
                 if xx.ch_type == 'regch'][0].brainname
    vol = [xx for xx in kwargs['volumes'] if xx.ch_type == chtype][0]

    #array dimensions
    z, y, x = array.shape

    #compare size of array with 'resampledforelastixsize'
    with tifffile.TiffFile([
            os.path.join(outdr, f) for f in os.listdir(outdr)
            if 'resampledforelastix.tif' in f and not '3D_contours' in f
            and 'ch{}'.format([
                xx.channel for xx in kwargs['volumes'] if xx.ch_type == 'regch'
            ][0]) in f
    ][0]) as tif:
        zr = len(tif.pages)
        yr, xr = tif.pages[0].shape
        tif.close()

    nonzeropixels = np.argwhere(array > 0)
    nx4centers = np.ones(
        (len(nonzeropixels), 4))  #FIXME: the logic needs to be checked
    nx4centers[:, :-1] = nonzeropixels

    ####create transformmatrix
    trnsfrmmatrix = np.identity(4) * (
        zr / z, yr / y, xr / x, 1)  ###downscale to "resampledforelastix size"
    sys.stdout.write('\n\n Transfrom matrix:\n{}\n'.format(trnsfrmmatrix))

    #nx4 * 4x4 to give transform
    trnsfmdpnts = nx4centers.dot(trnsfrmmatrix)  ##z,y,x
    sys.stdout.write('\nfirst three transformed pnts:\n{}\n'.format(
        trnsfmdpnts[0:3]))

    #create txt file, with elastix header, then populate points
    txtflnm = '{}_zyx_transformedpnts_{}.txt'.format(brainname, vol.ch_type)

    #
    if not svlc:
        pnts_fld = os.path.join(outdr, 'transformedpoints_pretransformix')
        makedir(pnts_fld)
    if svlc:
        pnts_fld = os.path.join(svlc)
        makedir(pnts_fld)

    transforminput = os.path.join(pnts_fld, txtflnm)
    removedir(transforminput)  ###prevent adding to an already made file
    writer(pnts_fld, 'index\n{}\n'.format(len(trnsfmdpnts)), flnm=txtflnm)
    sys.stdout.write(
        '\nwriting centers to transfomix input points text file...')
    stringtowrite = '\n'.join([
        '\n'.join(['{} {} {}'.format(i[2], i[1], i[0])]) for i in trnsfmdpnts
    ])  ####this step converts from zyx to xyz*****
    writer(pnts_fld, stringtowrite, flnm=txtflnm)
    #[writer(pnts_fld, '{} {} {}\n'.format(i[2],i[1],i[0]), flnm=txtflnm, verbose=False) for i in trnsfmdpnts] ####this step converts from zyx to xyz*****
    sys.stdout.write('...done writing centers.\nSaved in {}'.format(pnts_fld))
    sys.stdout.flush()

    del trnsfmdpnts, trnsfrmmatrix, nx4centers

    return os.path.join(pnts_fld, txtflnm)
예제 #21
0
        #print newim.shape

    imstack = tifffile.imread(
        '/home/wanglab/LightSheetData/marm_ghazanfar/post_processed/marm_01/smallctx/marm_01_smallctx_488_647_90msec_z3um_20hfds_01na_resized_ch00_resampledforelastix.tif'
    )
    kwargs = {}
    #kwargs['finalorientation']= ('1', '-0','2')
    kwargs['finalorientation'] = ('2', '0', '-1')
    newim = fix_orientation(imstack, axes=None, **kwargs)

    sitk.Show(sitk.GetImageFromArray(imstack))
    sitk.Show(sitk.GetImageFromArray(newim))

    from tools.utils.io import load_kwargs
    pth = '/home/wanglab/wang/pisano/tracing_output/bl6_ts/20150804_tp_bl6_ts01'
    kwargs = load_kwargs(pth)

    axes = ('-2', '-0', '-1')
    contour_array = np.zeros((2, 3))
    contour_array[0] = np.asarray([3, 4, 5])
    contour_array[1] = np.asarray([300, 400, 500])

    contour_array = fix_contour_orientation(contour_array,
                                            axes=axes,
                                            verbose=True)

    zyx = (500, 4444, 3456)
    zyx = fix_dimension_orientation(zyx,
                                    axes=[int(xx) for xx in axes],
                                    verbose=True)
예제 #22
0
def elastix_registration(jobid, cores=5, **kwargs):
    '''Function to take brainvolumes and register them to AtlasFiles using elastix parameters in parameterfolder.
    Inputs
    ---------------
    cores = for parallelization
    
    optional kwargs:
    secondary_registration (optional): False (default) - apply transform determined from regch->atlas to other channels
                                       True - register other channel(s) to regch then apply transform determined from regch->atlas 
                                       (useful if imaging conditions were different between channel and regch, i.e. added horizontal foci, sheet na...etc)
                                       kwargs overrides explicit 'secondary_registration' input to function
    
    
    Required inputs via kwargs:
            brainname='AnimalID'
            brainpath= pathtofolder
            AtlasFile=pathtoatlas ###atlas is a tif stack
            parameterfolder ##contains parameter files: Order1_filename.txt, Order2_filename.txt,....
            svlc=pathtosavedirectory
            maskfile(optional)=creates a nested folder inside svlc and runs registration with mask
    To run in parallel use: parallel_elastixlooper 
  '''

    ###inputs
    outdr = kwargs['outputdirectory']
    kwargs = load_kwargs(outdr)
    #check to see if masking, cropping or normal atlas
    if 'maskatlas' in kwargs:
        AtlasFile = generate_masked_atlas(**kwargs)
    elif 'cropatlas' in kwargs:
        AtlasFile = generate_cropped_atlas(**kwargs)
    else:
        AtlasFile = kwargs['AtlasFile']

    ###make variables for volumes:
    vols = kwargs['volumes']
    reg_vol = [xx for xx in vols if xx.ch_type == 'regch'][0]

    #images need to have been stitched, resized, and saved into single tiff stack
    #resize to ~220% total size of atlas (1.3x/dim)
    sys.stdout.write('Beginning registration on {}'.format(reg_vol.brainname))
    sys.stdout.flush()
    reg_vol.add_resampled_for_elastix_vol(reg_vol.downsized_vol +
                                          '_resampledforelastix.tif')
    if not os.path.exists(reg_vol.resampled_for_elastix_vol):
        sys.stdout.write('\n   Resizing {}'.format(reg_vol.downsized_vol))
        sys.stdout.flush()
        resample_par(cores,
                     reg_vol.downsized_vol + '.tif',
                     AtlasFile,
                     svlocname=reg_vol.resampled_for_elastix_vol,
                     singletifffile=True,
                     resamplefactor=1.3)
        [
            vol.add_registration_volume(reg_vol.resampled_for_elastix_vol)
            for vol in vols
        ]
        sys.stdout.write('...completed resizing\n')
        sys.stdout.flush()

    ###find elastix parameters files and sort, set up parameters and logfiles
    parameters = []
    [
        parameters.append(os.path.join(reg_vol.parameterfolder, files))
        for files in os.listdir(reg_vol.parameterfolder)
        if files[0] != '.' and files[-1] != '~'
    ]
    parameters.sort()
    svlc = os.path.join(outdr, 'elastix')
    makedir(svlc)
    writer(
        svlc,
        'Starting elastix...AtlasFile: {}\n   parameterfolder: {}\n   svlc: {}\n'
        .format(AtlasFile, reg_vol.parameterfolder, svlc))
    writer(
        svlc,
        'Order of parameters used in Elastix:{}\n...\n\n'.format(parameters))

    #optionally generate MHD file for better scaling in elastix (make both mhds if present since one tiff and one mhd doesn't work well)
    resampled_zyx_dims = False
    if False and 'atlas_scale' in kwargs:
        atlasfilecopy = AtlasFile
        AtlasFile = convert_to_mhd(
            AtlasFile,
            dims=kwargs['atlas_scale'],
            dst=os.path.join(
                kwargs['outputdirectory'],
                os.path.splitext(os.path.basename(kwargs['AtlasFile']))[0]) +
            '.mhd')
        #copy reg vol and calculate effective distance/pixel scale
        reg_volcopy = reg_vol.resampled_for_elastix_vol
        resampled_zyx_dims = [
            cc * dd for cc, dd in zip(kwargs['xyz_scale'][::-1], [
                float(bb) / float(aa) for aa, bb in zip(
                    tifffile.imread(reg_vol.resampled_for_elastix_vol).shape,
                    reg_vol.fullsizedimensions)
            ])
        ]
        #note convert_to_mhd dims are in XYZ
        reg_vol.add_resampled_for_elastix_vol(
            convert_to_mhd(
                reg_vol.resampled_for_elastix_vol,
                dims=resampled_zyx_dims[::-1],
                dst=os.path.join(
                    kwargs['outputdirectory'],
                    os.path.splitext(
                        os.path.basename(
                            reg_vol.resampled_for_elastix_vol))[0]) + '.mhd'))

    #ELASTIX
    e_out_file, transformfile = elastix_command_line_call(
        AtlasFile, reg_vol.resampled_for_elastix_vol, svlc, parameters)

    #optionally generate MHD file for better scaling in elastix
    if False and 'atlas_scale' in kwargs:
        removedir(AtlasFile)
        removedir(AtlasFile[-3:] + '.raw')
        AtlasFile = atlasfilecopy
        removedir(reg_vol.resampled_for_elastix_vol)
        removedir(reg_vol.resampled_for_elastix_vol + '.raw')
        reg_vol.add_resampled_for_elastix_vol(reg_volcopy)

    #RG movie for visual inspection of image registration
    color_movie_merger(AtlasFile, e_out_file, svlc, reg_vol.brainname)

    #RGB movie with blue channel=pre-registered stack
    bluechannel = os.path.join(svlc, 'combinedmovies',
                               reg_vol.brainname + '_resized_bluechannel.tif')
    resample(
        reg_vol.downsized_vol + '.tif',
        AtlasFile,
        svlocname=bluechannel,
        singletifffile=True
    )  ##needs to be resample(not resample_par) as you need EXTACT dimensions
    color_movie_merger(AtlasFile,
                       e_out_file,
                       svlc,
                       reg_vol.brainname + '_bluechanneloriginal',
                       movie5=bluechannel)

    #Make gridline transform file
    gridfld, tmpgridline = gridcompare(svlc, reg_vol)
    sp.call([
        'transformix', '-in', tmpgridline, '-out', gridfld, '-tp',
        transformfile
    ])
    combine_images(str(reg_vol.resampled_for_elastix_vol), AtlasFile,
                   os.path.join(gridfld, 'result.tif'), e_out_file, svlc,
                   reg_vol.brainname)
    shutil.rmtree(gridfld)

    #Apply transforms to other channels
    writer(
        svlc,
        '\n...\nStarting Transformix on channel files...\n\nChannels to process are {}\n*****\n\n'
        .format([x.downsized_vol for x in vols]))

    #type of transform and channels to apply transform to
    secondary_registration = kwargs[
        'secondary_registration'] if 'secondary_registration' in kwargs else True
    transform_function = apply_transformix_and_register if secondary_registration else apply_transformix
    vols_to_register = [xx for xx in vols if xx.ch_type != 'regch']

    #appy transform
    [
        transform_function(vol, reg_vol, svlc, cores, AtlasFile, parameters,
                           transformfile, resampled_zyx_dims)
        for vol in vols_to_register
    ]
    writer(svlc, '\nPast transformix step')

    ###make final output image if a cellch and injch exist
    if any([True
            for vol in vols_to_register if vol.ch_type == 'cellch']) and any(
                [True for vol in vols_to_register if vol.ch_type == 'injch']):
        injch = [
            vol.registration_volume for vol in vols_to_register
            if vol.ch_type == 'injch'
        ][0]
        cellch = [
            vol.registration_volume for vol in vols_to_register
            if vol.ch_type == 'cellch'
        ][0]
        inj_and_cells(svlc, cellch, injch, AtlasFile)

    ####### check to see if script finished due to an error
    if os.path.exists(e_out_file) == False:
        writer(
            svlc,
            '****ERROR****GOTTEN TO END OF SCRIPT,\nTHIS ELASTIX OUTPUT FILE DOES NOT EXIST: {0} \n'
            .format(e_out_file))

    ######write out logfile describing parameters input into function
    writer(
        svlc,
        "\nelastixlooper has finished using:\nbrainname: {}\nAtlasFile: {}\nparameterfolder: {}\nparameter files {}\nsvlc: {}"
        .format(reg_vol.brainname, AtlasFile, reg_vol.parameterfolder,
                parameters, svlc))

    ###update volumes in kwargs and pickle
    vols_to_register.append(reg_vol)
    kwargs.update(dict([('volumes', vols_to_register)]))
    pckloc = os.path.join(outdr, 'param_dict.p')
    pckfl = open(pckloc, 'wb')
    pickle.dump(kwargs, pckfl)
    pckfl.close()
    writer(
        outdr,
        "\n\n*************STEP 3************************\nelastix has completed using:\nbrainname: {}\nAtlasFile: {}\nparameterfolder: {}\nparameter files {}\nsvlc: {}\n****************\n"
        .format(reg_vol.brainname, AtlasFile, reg_vol.parameterfolder,
                parameters, svlc))

    return
예제 #23
0
    cb_roi[z, y, x] = 1  #make a mask of structure in annotation space

cb_roi = cb_roi.astype(bool)  #add mask of structure to dictionary

#mask all the regions that are not cerebellum in the segmentation
for site in sites:
    site[~cb_roi] = 0

#%%
#visualization
atl = fix_orientation(tifffile.imread(atl_pth)[:, 450:, :], ("2", "0", "1"))
#masked_sites = np.array([fix_orientation(site[:, 450:, :], ("2", "0", "1")) for site in sites])

for i, site in enumerate(sites):
    masked_site = fix_orientation(site[:, 450:, :], ("2", "0", "1"))
    kwargs = load_kwargs(os.path.join(data, os.path.basename(imgs[i])[:-15]))
    cellvol = [
        vol for vol in kwargs["volumes"]
        if vol.ch_type == "cellch" or vol.ch_type == "injch"
    ][0]
    regvol = fix_orientation(
        tifffile.imread(cellvol.ch_to_reg_to_atlas)[:, 450:, :],
        ("2", "0", "1"))
    tifffile.imsave(
        os.path.join(dst,
                     os.path.basename(imgs[i])[:-15] + "maxproj_.tif"),
        np.max(regvol, axis=0))
#    merged = np.stack([regvol, masked_site, np.zeros_like(atl)], -1)
#    tifffile.imsave(os.path.join(dst, os.path.basename(imgs[i])), merged.astype("uint16"))
#
#my_cmap = eval("plt.cm.{}(np.arange(plt.cm.RdBu.N))".format("viridis"))
예제 #24
0
def transformed_pnts_to_allen(trnsfrm_out_file,
                              ch,
                              cores,
                              point_or_index=None,
                              name=False,
                              **kwargs):
    '''function to take elastix point transform file and return anatomical locations of those points
    point_or_index=None/point/index: determines which transformix output to use: point is more accurate, index is pixel value(?)
    Elastix uses the xyz convention rather than the zyx numpy convention
    
    ###ASSUMES INPUT OF XYZ
    
    '''
    #####inputs
    assert type(trnsfrm_out_file) == str
    if point_or_index == None:
        point_or_index = 'OutputPoint'
    elif point_or_index == 'point':
        point_or_index = 'OutputPoint'
    elif point_or_index == 'index':
        point_or_index = 'OutputIndexFixed'
    try:  #check to see if pool processes have already been spawned
        p
    except NameError:
        p = mp.Pool(cores)

    kwargs = load_kwargs(**kwargs)
    vols = kwargs['volumes']
    reg_vol = [xx for xx in vols if xx.ch_type == 'regch'][0]

    ####load files
    id_table = pd.read_excel(
        os.path.join(kwargs['packagedirectory'], 'supp_files/id_table.xlsx')
    )  ##use for determining neuroanatomical locations according to allen
    ann = sitk.GetArrayFromImage(sitk.ReadImage(
        kwargs['annotationfile']))  ###zyx
    with open(trnsfrm_out_file, "rb") as f:
        lines = f.readlines()
        f.close()

    #####populate post-transformed array of contour centers
    sys.stdout.write('\n{} points detected'.format(len(lines)))
    arr = np.empty((len(lines), 3))
    for i in range(len(lines)):
        arr[i,
            ...] = lines[i].split()[lines[i].split().index(point_or_index) +
                                    3:lines[i].split().index(point_or_index) +
                                    6]  #x,y,z

    #optional save out of points
    np.save(kwargs['outputdirectory'] + '/injection/zyx_voxels.npy',
            np.asarray([(z, y, x) for x, y, z in arr]))

    pnts = transformed_pnts_to_allen_helper_func(arr, ann)
    pnt_lst = [xx for xx in pnts if xx != 0]
    if len(pnt_lst) == 0:
        raise ValueError('pnt_lst is empty')
    else:
        sys.stdout.write('\nlen of pnt_lst({})'.format(len(pnt_lst)))
    imstack = brain_structure_keeper(
        ann, True,
        *pnt_lst)  ###annotation file, true=to depict density, list of pnts
    df = count_structure_lister(id_table, *pnt_lst)
    #########save out imstack and df
    nametosave = '{}{}_{}'.format(name, ch, reg_vol.brainname)
    tifffile.imsave(
        os.path.join(kwargs['outputdirectory'],
                     nametosave + '_structure_density_map.tif'), imstack)
    excelfl = os.path.join(kwargs['outputdirectory'],
                           nametosave + '_stuctures_table.xlsx')
    df.to_excel(excelfl)
    print('file saved as: {}'.format(excelfl))
    try:
        p.terminate()
    except:
        1
    return
예제 #25
0
def inj_detect_using_labels(threshold = .1, resampledforelastix = False, num_labels_to_keep=100, show = False, save = True, masking = False, **kwargs):
    '''Loads, thresholds and then applied scipy.ndimage.labels to find connected structures.
    
    Inputs
    -------
    threshold = % of maximum pixel intensity to make zero
    resampledforelastix= False:use downsized image (still fairly large); True: use 'resampledforelastix' image (smaller and thus faster)
    num_labels_to_keep= optional; take the top xx labels    
    show = optional, dispalys the threholded image and the image after scipy.ndimage.label has been applied    
    save = True: saves npy array; False = returns the numpy array
    masking = (optional) if True: using BD's masking algorithm *BEFORE* of thresholding
    
    Returns
    --------
    if save=False: returns thresholded numpy array
    if save=True: returns path to saved npfl
        
    '''
    sys.stdout.write('\n\nStarting Injection site detection, you need ~3x Memory of size of volume to run\n\n'); sys.stdout.flush()
    
    kwargs = load_kwargs(**kwargs)
        
    #load inj vol
    if not resampledforelastix: injvol = tifffile.imread([xx for xx in kwargs['volumes'] if xx.ch_type == 'injch'][0].downsized_vol+'.tif')
    if resampledforelastix: injvol = tifffile.imread([xx for xx in kwargs['volumes'] if xx.ch_type == 'injch'][0].downsized_vol+'_resampledforelastix.tif')
    injvolcp = injvol.copy()

    #bd's masking using regression    
    if masking:
        sys.stdout.write('\nStarting Masking Step...'); sys.stdout.flush()        
        #load reg vol
        if not resampledforelastix: regvol = [xx for xx in kwargs['volumes'] if xx.ch_type == 'regch'][0].downsized_vol+'.tif'
        if resampledforelastix: regvol = [xx for xx in kwargs['volumes'] if xx.ch_type == 'regch'][0].downsized_vol+'_resampledforelastix.tif'

        #masking regression
        maskfld = os.path.join(kwargs['outputdirectory'], 'injection', 'mask'); makedir(maskfld)
        injvol = make_mask(injvol, regvol, step=0.05, slope_thresh=0.4, init_window=200, out=maskfld, despeckle_kernel=5, imsave_kwargs={'compress': 5}, save_plots=True, verbose=True, **kwargs)
        sys.stdout.write('\n\nCompleted Masking Step'); sys.stdout.flush()
        
    #threshold bottom xx% of pixels
    injvol[injvol <= np.max(injvol)*threshold] = 0 
    
    
    #look for connected pixels        
    sys.stdout.write('\nLooking for connected pixels....'); sys.stdout.flush()
    lbl, numfeat=label(injvol)
    if show: sitk.Show(sitk.GetImageFromArray(injvol))
    if show: sitk.Show(sitk.GetImageFromArray(lbl))
    del injvol
    
    sys.stdout.write('\n      {} number of unique labels detected, if a large number, increase the threshold value'.format(numfeat)); sys.stdout.flush()
    
    #get pixelid, pixelcounts
    pxvl_num = Counter([lbl[tuple(xx)] for xx in np.argwhere(lbl>0)])
    [sys.stdout.write('\n{} pixels at value {}'.format(num, pxvl)) for pxvl, num in pxvl_num.iteritems()]; sys.stdout.flush()

    #format into list
    num_pxvl = [[num, pxvl] for pxvl, num in pxvl_num.iteritems()]

    sys.stdout.write('\nKeeping the {} largest label(s)'.format(num_labels_to_keep)); sys.stdout.flush()
    #remove smaller labels     
    num_pxvl.sort(reverse=True)
    rmlst=num_pxvl[num_labels_to_keep:]
    for n, pxvl in rmlst:
        lbl[lbl==pxvl] = 0
    [sys.stdout.write('\n    Kept {} of pixel id({})'.format(xx[0], xx[1])) for xx in num_pxvl[:num_labels_to_keep]]; sys.stdout.flush()

    #remove nonzero pixels from original (preserving their original values)        
    injvol = injvolcp * lbl.astype('bool'); del injvolcp
    
    #save out points:
    if save:    
        if not resampledforelastix: svpth = os.path.join(kwargs['outputdirectory'], 'injection', '{}labelskept_{}threshold_injectionpixels_downsized.tif').format(num_labels_to_keep, threshold)        
        if resampledforelastix: svpth = os.path.join(kwargs['outputdirectory'], 'injection','{}labelskept_{}threshold_injectionpixels_resampledforelastix.tif').format(num_labels_to_keep, threshold)
        tifffile.imsave(svpth,injvol)        
        return svpth
    else:            
        return injvol
예제 #26
0
def find_location(src,
                  dst=False,
                  correspondence_type='post_elastix',
                  verbose=False):
    '''
    Function to transform an excel sheet (e.g.: lightsheet/supp_files/sample_coordinate_to_location.xlsx) and output transformed locations.
    
    Suggestion is to use imagej to find XYZ coordinates to input into excel sheet.
        
    Inputs
    ----------------
    src = excelsheet
    correspondence_type = 
                    'post_elastix': your coordinates are the corresponding post-registered elastix file (outputfolder/elastix/..../result....tif)
                    'full_size_data': you coordinates are from the "full_sizedatafld" where:
                        Z== #### in 'file_name_Z####.tif'
                        X,Y are the pixel of that tif file
    
    
    Returns
    ----------------
    dst = (optional) output excelfile. Ensure path ends with '.xlsx'
    
    '''
    #from __future__ import division
    #import shutil, os, tifffile, cv2, numpy as np, pandas as pd, sys, SimpleITK as sitk
    #from tools.utils.io import listdirfull, load_kwargs, writer, makedir
    #from tools.conv_net.read_roi import read_roi, read_roi_zip
    from tools.registration.register import transformed_pnts_to_allen_helper_func
    from tools.registration.transform import structure_lister
    from tools.utils.io import load_kwargs, listdirfull, listall
    import SimpleITK as sitk
    import pandas as pd, numpy as np, os
    from skimage.external import tifffile

    if correspondence_type == 'post_elastix':
        print(
            'This function assumes coordinates are from the corresponding post-registered elastix file. \nMake sure the excel file has number,<space>number,<space>number and not number,number,number'
        )

        #inputs
        df = pd.read_excel(src)

        for brain in df.columns[1:]:
            print(brain)

            #load and find files
            kwargs = load_kwargs(
                df[brain][df['Inputs'] == 'Path to folder'][0])
            ann = sitk.GetArrayFromImage(
                sitk.ReadImage(kwargs['annotationfile']))

            #Look up coordinates to pixel value
            xyz_points = np.asarray([(int(xx.split(',')[0]),
                                      int(xx.split(',')[1]),
                                      int(xx.split(',')[2]))
                                     for xx in df[brain][3:].tolist()])
            xyz_points = transformed_pnts_to_allen_helper_func(xyz_points,
                                                               ann=ann,
                                                               order='XYZ')

            #pixel id to transform
            if 'allen_id_table' in kwargs:
                structures = structure_lister(
                    pd.read_excel(kwargs['allen_id_table']), *xyz_points)
            else:
                structures = structure_lister(
                    pd.read_excel(kwargs['volumes'][0].allen_id_table),
                    *xyz_points)

            #update dataframe
            df[brain + ' point transform'] = df[brain][:3].tolist() + [
                str(s.tolist()[0]) for s in structures
            ]

        if not dst: dst = src[:-5] + '_output.xlsx'
        df.to_excel(dst)
        print('Saved as {}'.format(dst))

    if correspondence_type == 'full_size_data':
        from tools.imageprocessing.orientation import fix_dimension_orientation, fix_contour_orientation
        from tools.utils.directorydeterminer import pth_update
        from tools.registration.register import collect_points_post_transformix
        from tools.registration.transform import points_resample, points_transform
        print(
            'This function assumes coordinates are from the corresponding "full_sizedatafld". \nMake sure the excel file has number,<space>number,<space>number and not number,number,number'
        )

        #inputs
        df = pd.read_excel(src)

        for brain in df.columns[1:]:

            #load and find files
            kwargs = load_kwargs(
                df[brain][df['Inputs'] == 'Path to folder'][0])
            ann = sitk.GetArrayFromImage(
                sitk.ReadImage(kwargs['annotationfile']))
            ch_type = str(
                df[brain][df['Inputs'] == 'Channel Type'].tolist()[0])
            vol = [xx for xx in kwargs['volumes'] if xx.ch_type == ch_type][0]

            #Look up coordinates to pixel value
            zyx_points = np.asarray([(int(xx.split(',')[2]),
                                      int(xx.split(',')[1]),
                                      int(xx.split(',')[0]))
                                     for xx in df[brain][3:].tolist()])

            #Fix orientation
            zyx_points = fix_contour_orientation(np.asarray(zyx_points),
                                                 verbose=verbose,
                                                 **kwargs)

            #Fix Scaling
            trnsfmdpnts = points_resample(
                zyx_points,
                original_dims=fix_dimension_orientation(
                    vol.fullsizedimensions, **kwargs),
                resample_dims=tifffile.imread(
                    pth_update(vol.resampled_for_elastix_vol)).shape,
                verbose=verbose)

            #write out points for transformix
            transformfile = [
                xx for xx in listall(os.path.join(vol.inverse_elastixfld))
                if os.path.basename(vol.full_sizedatafld_vol)[:-5] in xx
                and 'atlas2reg2sig' in xx
                and 'reg2sig_TransformParameters.1.txt' in xx
            ][0]
            tmpdst = os.path.join(os.path.dirname(src),
                                  'coordinate_to_location_tmp')
            output = points_transform(src=trnsfmdpnts[:, :3],
                                      dst=tmpdst,
                                      transformfile=transformfile,
                                      verbose=True)

            #collect from transformix
            xyz_points = collect_points_post_transformix(output)

            #now ID:
            pix_ids = transformed_pnts_to_allen_helper_func(xyz_points,
                                                            ann=ann,
                                                            order='XYZ')

            #pixel id to transform
            aid = kwargs[
                'allen_id_table'] if 'allen_id_table' in kwargs else kwargs[
                    'volumes'][0].allen_id_table
            structures = structure_lister(pd.read_excel(aid), *pix_ids)

            #update dataframe
            df[brain + ' xyz points atlas space'] = df[brain][:3].tolist() + [
                str(s.tolist()[0]) for zyx in xyz_points
            ]
            df[brain + ' structures'] = df[brain][:3].tolist() + [
                str(s.tolist()[0]) for s in structures
            ]

        if not dst: dst = src[:-5] + '_output.xlsx'
        df.to_excel(dst)
        print('Saved as {}'.format(dst))

    return
def pool_injections_for_analysis(**kwargs):
    """Function to pool several injection sites. Assumes that the basic registration using this software has been run.
       
    Inputs
    -----------
    kwargs:
      "inputlist": inputlist, #list of folders generated previously from software
      "inputtype": "main_folder", "tiff" #specify the type of input. main_folder is the lightsheetpackage"s folder, tiff is the file 
      to use for injection site segmentation
      "channel": "01", 
      "channel_type": "injch",
      "filter_kernel": (3,3,3), #gaussian blur in pixels (if registered to ABA then 1px likely is 25um)
      "threshold": 3 (int, value to use for thresholding, this value represents the number of stand devs above the mean of the gblurred
      image)
      "num_sites_to_keep": int, number of injection sites to keep, useful if multiple distinct sites
      "injectionscale": 45000, #use to increase intensity of injection site visualizations generated - DOES NOT AFFECT DATA
      "imagescale": 2, #use to increase intensity of background  site visualizations generated - DOES NOT AFFECT DATA
      "reorientation": ("2","0","1"), #use to change image orientation for visualization only
      "crop": #use to crop volume, values below assume horizontal imaging and sagittal atlas
                False
                cerebellum: "[:,390:,:]"
                caudal midbrain: "[:,300:415,:]"
                midbrain: "[:,215:415,:]"
                thalamus: "[:,215:345,:]"
                anterior cortex: "[:,:250,:]"
      
      "dst": "/home/wanglab/Downloads/test", #save location
      "save_individual": True, #optional to save individual images, useful to inspect brains, which you can then remove bad brains from 
      list and rerun function
      "colormap": "plasma", 
      "atlas": "/jukebox/wang/pisano/Python/allenatlas/average_template_25_sagittal_forDVscans.tif",
      "annotation":"/jukebox/wang/pisano/Python/allenatlas/annotation_25_ccf2015_forDVscans.nrrd",
      "id_table": "/jukebox/temp_wang/pisano/Python/lightsheet/supp_files/allen_id_table.xlsx",
      
      Optional:
          ----------
          "save_array": path to folder to save out numpy array per brain of binarized detected site
          "save_tif": saves out tif volume per brain of binarized detected site
          "dpi": dots per square inch to save at
          "crop_atlas":(notfunctional) similiar to crop. Use when you would like to greatly restrain the cropping for injsite detection,
          but you want to display a larger area of overlay.
                      this will 0 pad the injection sites to accomodate the difference in size. Note this MUST be LARGER THAN crop.
          
      Returns
      ----------------count_threshold
      a pooled image consisting of max IP of reorientations provide in kwargs.
      a list of structures (csv file) with pixel counts, pooling across brains.
      if save individual will save individual images, useful for inspection and/or visualization
    """

    inputlist = kwargs["inputlist"]
    inputtype = kwargs["inputtype"] if "inputtype" in kwargs else "main_folder"
    dst = kwargs["dst"]
    makedir(dst)
    injscale = kwargs["injectionscale"] if "injectionscale" in kwargs else 1
    imagescale = kwargs["imagescale"] if "imagescale" in kwargs else 1
    axes = kwargs["reorientation"] if "reorientation" in kwargs else ("0", "1",
                                                                      "2")
    cmap = kwargs["colormap"] if "colormap" in kwargs else "plasma"
    id_table = kwargs[
        "id_table"] if "id_table" in kwargs else "/jukebox/LightSheetTransfer/atlas/ls_id_table_w_voxelcounts.xlsx"
    save_array = kwargs["save_array"] if "save_array" in kwargs else False
    save_tif = kwargs["save_tif"] if "save_tif" in kwargs else False
    num_sites_to_keep = kwargs[
        "num_sites_to_keep"] if "num_sites_to_keep" in kwargs else 1
    nonzeros = []
    ann = sitk.GetArrayFromImage(sitk.ReadImage(kwargs["annotation"]))
    if kwargs["crop"]: ann = eval("ann{}".format(kwargs["crop"]))
    allen_id_table = pd.read_excel(id_table)

    for i in range(len(inputlist)):
        pth = inputlist[i]
        print("\n\n_______\n{}".format(os.path.basename(pth)))
        #find the volume by loading the param dictionary generated using the light-sheet package
        if inputtype == "main_folder":
            dct = load_kwargs(pth)
            #print dct["AtlasFile"]
            try:
                vol = [
                    xx for xx in dct["volumes"]
                    if xx.ch_type in kwargs["channel_type"]
                    and xx.channel == kwargs["channel"]
                ][0]
            except:
                vol = [xx for xx in dct["volumes"] if xx.ch_type != "regch"][0]
                #risky for 3 channel images, but needed if param dict initially is mislabelled
            #done to account for different versions
            if os.path.exists(vol.ch_to_reg_to_atlas + "/result.1.tif"):
                impth = vol.ch_to_reg_to_atlas + "/result.1.tif"
            elif os.path.exists(vol.ch_to_reg_to_atlas
                                ) and vol.ch_to_reg_to_atlas[-4:] == ".tif":
                impth = vol.ch_to_reg_to_atlas
            elif os.path.exists(
                    os.path.dirname(vol.ch_to_reg_to_atlas) + "/result.1.tif"):
                impth = os.path.dirname(
                    vol.ch_to_reg_to_atlas) + "/result.1.tif"
            elif os.path.exists(
                    os.path.dirname(vol.ch_to_reg_to_atlas) + "/result.tif"):
                impth = os.path.dirname(vol.ch_to_reg_to_atlas) + "/result.tif"
        else:  #"tiff", use the file given
            impth = pth

        print("  loading:\n     {}".format(pth))
        im = tifffile.imread(impth)

        if kwargs["crop"]:
            im = eval("im{}".format(kwargs["crop"]))  #; print im.shape

        #segment
        arr = find_site(im,
                        thresh=kwargs["threshold"],
                        filter_kernel=kwargs["filter_kernel"],
                        num_sites_to_keep=num_sites_to_keep) * injscale
        if save_array:
            np.save(
                os.path.join(save_array,
                             "{}".format(os.path.basename(pth)) + ".npy"),
                arr.astype("float32"))
        if save_tif:
            tifffile.imsave(
                os.path.join(save_tif,
                             "{}".format(os.path.basename(pth)) + ".tif"),
                arr.astype("float32"))

        #optional "save_individual"
        if kwargs["save_individual"]:
            im = im * imagescale
            a = np.concatenate(
                (np.max(im, axis=0), np.max(arr.astype("uint16"), axis=0)),
                axis=1)
            b = np.concatenate((np.fliplr(
                np.rot90(np.max(fix_orientation(im, axes=axes), axis=0), k=3)),
                                np.fliplr(
                                    np.rot90(np.max(fix_orientation(
                                        arr.astype("uint16"), axes=axes),
                                                    axis=0),
                                             k=3))),
                               axis=1)
            plt.figure()
            plt.imshow(np.concatenate((b, a), axis=0), cmap=cmap, alpha=1)
            plt.axis("off")
            plt.savefig(os.path.join(
                dst, "{}".format(os.path.basename(pth)) + ".pdf"),
                        dpi=300,
                        transparent=True)
            plt.close()

        #cell counts to csv
        print("   finding nonzero pixels for voxel counts...")
        nz = np.nonzero(arr)
        nonzeros.append(list(zip(*nz)))  #<-for pooled image
        pos = transformed_pnts_to_allen_helper_func(
            np.asarray(list(zip(*[nz[2], nz[1], nz[0]]))), ann)
        tdf = count_structure_lister(allen_id_table, *pos)
        if i == 0:
            df = tdf.copy()
            countcol = "count" if "count" in df.columns else "cell_count"
            df.drop([countcol], axis=1, inplace=True)
        df[os.path.basename(pth)] = tdf[countcol]

    df.to_csv(os.path.join(dst, "voxel_counts.csv"), index=False)
    print("\n\nCSV file of cell counts, saved as {}\n\n\n".format(
        os.path.join(dst, "voxel_counts.csv")))

    #condense nonzero pixels
    nzs = [
        str(x) for xx in nonzeros for x in xx
    ]  #this list has duplicates if two brains had the same voxel w label
    c = Counter(nzs)
    array = np.zeros(im.shape)
    print("Collecting nonzero pixels for pooled image...")
    tick = 0
    #generating pooled array where voxel value = total number of brains with that voxel as positive
    for k, v in c.items():
        k = [int(xx) for xx in k.replace("(", "").replace(")", "").split(",")]
        array[k[0], k[1], k[2]] = int(v)
        tick += 1
        if tick % 50000 == 0: print("   {}".format(tick))

    #load atlas and generate final figure
    print("Generating final figure...")
    atlas = tifffile.imread(kwargs["atlas"])
    arr = fix_orientation(array, axes=axes)
    #cropping
    if kwargs["crop"]: atlas = eval("atlas{}".format(kwargs["crop"]))
    atlas = fix_orientation(atlas, axes=axes)

    my_cmap = eval("plt.cm.{}(np.arange(plt.cm.RdBu.N))".format(cmap))
    my_cmap[:1, :4] = 0.0
    my_cmap = mpl.colors.ListedColormap(my_cmap)
    my_cmap.set_under("w")
    plt.figure()
    plt.imshow(np.max(atlas, axis=0), cmap="gray")
    plt.imshow(np.max(arr, axis=0), alpha=0.99, cmap=my_cmap)
    plt.colorbar()
    plt.axis("off")
    dpi = int(kwargs["dpi"]) if "dpi" in kwargs else 300
    plt.savefig(os.path.join(dst, "heatmap.pdf"), dpi=dpi, transparent=True)
    plt.close()

    print("Saved as {}".format(os.path.join(dst, "heatmap.pdf")))

    return df
예제 #28
0
    flds = [
        os.path.join(src, xx) for xx in os.listdir(src)
        if os.path.isdir(os.path.join(src, xx))
    ]

    for fld in flds:
        start = time.time()

        brain = os.path.basename(fld)
        #load in ROIS - clicked in SAGITTAL volume
        roi_pth = fld + "/{}_20190602_fiber_points_RoiSet.zip".format(brain)
        #check if path exists obv
        if os.path.exists(roi_pth):

            #have to load kwargs...
            kwargs = load_kwargs(fld)

            #get rois
            zyx_rois_sag = np.asarray(
                [[int(yy) for yy in xx.replace(".roi", "").split("-")]
                 for xx in read_roi_zip(roi_pth, include_roi_name=True)])
            #slice into diff orientations
            zyx_rois_cor = np.asarray([[xx[1], xx[2], xx[0]]
                                       for xx in zyx_rois_sag])
            zyx_rois_hor = np.asarray([[xx[2], xx[1], xx[0]]
                                       for xx in zyx_rois_sag])

            #make destination path
            dst = os.path.join(fld, "points_merged_to_atlas")
            if not os.path.exists(dst): os.mkdir(dst)
예제 #29
0
def transformed_pnts_to_allen(points_file,
                              ch_type='injch',
                              point_or_index=None,
                              allen_id_table_pth=False,
                              **kwargs):
    '''function to take elastix point transform file and return anatomical locations of those points
    point_or_index=None/point/index: determines which transformix output to use: point is more accurate, index is pixel value(?)
    Elastix uses the xyz convention rather than the zyx numpy convention
    
    Inputs
    -----------
    points_file = 
    ch_type = 'injch' or 'cellch'
    allen_id_table_pth (optional) pth to allen_id_table
    
    Returns
    -----------
    excelfl = path to excel file
    
    '''
    kwargs = load_kwargs(**kwargs)
    #####inputs
    assert type(points_file) == str

    if point_or_index == None:
        point_or_index = 'OutputPoint'
    elif point_or_index == 'point':
        point_or_index = 'OutputPoint'
    elif point_or_index == 'index':
        point_or_index = 'OutputIndexFixed'

    #
    vols = kwargs['volumes']
    reg_vol = [xx for xx in vols if xx.ch_type == 'regch'][0]

    ####load files
    if not allen_id_table_pth:
        allen_id_table = pd.read_excel(
            os.path.join(reg_vol.packagedirectory,
                         'supp_files/allen_id_table.xlsx')
        )  ##use for determining neuroanatomical locations according to allen
    else:
        allen_id_table = pd.read_excel(allen_id_table_pth)
    ann = sitk.GetArrayFromImage(sitk.ReadImage(
        kwargs['annotationfile']))  ###zyx
    with open(points_file, "rb") as f:
        lines = f.readlines()
        f.close()

    #####populate post-transformed array of contour centers
    sys.stdout.write('\n{} points detected\n\n'.format(len(lines)))
    arr = np.empty((len(lines), 3))
    for i in range(len(lines)):
        arr[i,
            ...] = lines[i].split()[lines[i].split().index(point_or_index) +
                                    3:lines[i].split().index(point_or_index) +
                                    6]  #x,y,z

    #optional save out of points
    np.save(kwargs['outputdirectory'] + '/injection/zyx_voxels.npy',
            np.asarray([(z, y, x) for x, y, z in arr]))

    pnts = transformed_pnts_to_allen_helper_func(arr, ann)
    pnt_lst = [xx for xx in pnts if xx != 0]

    #check to see if any points where found
    if len(pnt_lst) == 0:
        raise ValueError('pnt_lst is empty')
    else:
        sys.stdout.write('\nlen of pnt_lst({})\n\n'.format(len(pnt_lst)))

    #generate dataframe with column
    df = count_structure_lister(allen_id_table, *pnt_lst)

    #save df
    nametosave = '{}_{}'.format(reg_vol.brainname, ch_type)
    excelfl = os.path.join(kwargs['outputdirectory'],
                           nametosave + '_stuctures_table.xlsx')
    df.to_excel(excelfl)
    print('\n\nfile saved as: {}'.format(excelfl))

    return excelfl