Beispiel #1
0
                "{}_points_merged_to_registered_image_coronal_site1_maxip_z{}_{}.pdf"
                .format(brain, min(zrange_site1), max(zrange_site1))),
                        dpi=300)

            plt.imshow(maxip2[..., 0], "gist_yarg")
            plt.imshow(maxip2[..., 1], cmap, alpha=alpha)
            plt.title("Points overlaid on registered volume", fontsize="small")
            plt.savefig(os.path.join(
                dst,
                "{}_points_merged_to_registered_image_coronal_site2_maxip_z{}_{}.pdf"
                .format(brain, min(zrange_site2), max(zrange_site2))),
                        dpi=300)

            print("\n\ntook {} seconds to make merged maps for {}\n".format(
                time.time() - start, brain))

            #make allen structure LUT
            zyx_rois = zyx_rois_sag

            #convert to structure
            annotation_file = "/jukebox/LightSheetTransfer/atlas/allen_atlas/annotation_template_25_sagittal_forDVscans.tif"
            ann = tifffile.imread(annotation_file)
            points = transformed_pnts_to_allen_helper_func(list(zyx_rois),
                                                           ann,
                                                           order="ZYX")

            #make dataframe
            lut_path = "/jukebox/LightSheetTransfer/atlas/allen_atlas/allen_id_table.xlsx"
            df = count_structure_lister(lut_path, *points)
            df.to_excel(
                os.path.join(dst, "{}_allen_structures.xlsx".format(brain)))
Beispiel #2
0
ann_right = np.zeros_like(ann)
ann_right[int(zr/2):,:,:] = ann[int(zr/2):,:,:]

plt.imshow(ann_right[300])
plt.imshow(ann_left[400])
#%%
side = "right"
#grab inj vols
nonzeros = []
vols = [os.path.join(src, xx) for xx in os.listdir(src) 
        if xx[-7:] == "inj.tif" and "crus1" in xx]
for i,vol in enumerate(vols):
    print("\n*******"+os.path.basename(vol)+"*******\n")
    nz = np.nonzero(tifffile.imread(vol))
    nonzeros.append(list(zip(*nz))) #<-for pooled image
    pos = transformed_pnts_to_allen_helper_func(np.asarray(list(zip(*[nz[2], 
                  nz[1], nz[0]]))), ann_right)
    tdf = count_structure_lister(allen_id_table, *pos)
    if i == 0: 
        df = tdf.copy()
        countcol = "count" if "count" in df.columns else "cell_count"
        df.drop([countcol], axis=1, inplace=True)
    df[os.path.basename(vol[:-7])] = tdf[countcol]

df.to_csv(os.path.join(dst,"voxel_counts_%s.csv" % side))
print("\n\nCSV file of cell counts, saved as {}\n\n\n".format(os.path.join(dst,
  "voxel_counts_%s.csv" % side)))  

side = "left"
#grab inj vols
nonzeros = []
vols = [os.path.join(src, xx) for xx in os.listdir(src) if xx[-7:] == "inj.tif"]
Beispiel #3
0
def pool_injections_for_analysis(**kwargs):
    
    inputlist = kwargs['inputlist']
    dst = kwargs['dst']; makedir(dst)
    injscale = kwargs['injectionscale'] if 'injectionscale' in kwargs else 1
    imagescale = kwargs['imagescale'] if 'imagescale' in kwargs else 1
    axes = kwargs['reorientation'] if 'reorientation' in kwargs else ('0','1','2')
    cmap = kwargs['colormap'] if 'colormap' in kwargs else 'plasma'
    id_table = kwargs['id_table'] if 'id_table' in kwargs else '/jukebox/temp_wang/pisano/Python/lightsheet/supp_files/allen_id_table.xlsx'
    save_tif = kwargs['save_tif'] if 'save_tif' in kwargs else False
    num_sites_to_keep = kwargs['num_sites_to_keep'] if 'num_sites_to_keep' in kwargs else 1
    nonzeros = []
    ann = sitk.GetArrayFromImage(sitk.ReadImage(kwargs['annotation']))
    if kwargs['crop']: ann = eval('ann{}'.format(kwargs['crop']))   
    allen_id_table=pd.read_excel(id_table)
    
    for i in range(len(inputlist)):
        impth = inputlist[i]
        animal = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(impth))))
        
        print('\n\n_______\n{}'.format(animal))
        
        print('  loading:\n     {}'.format(animal))
        im = tifffile.imread(impth)
            
        if kwargs['crop']: im = eval('im{}'.format(kwargs['crop']))#; print im.shape
        
        #reorient to coronal?
        
        #segment
        arr = find_site(im, thresh=kwargs['threshold'], filter_kernel=kwargs['filter_kernel'], num_sites_to_keep=num_sites_to_keep)*injscale
        if save_tif: tifffile.imsave(os.path.join(dst,'{}'.format(os.path.dirname(impth))+'_inj.tif'), arr.astype('float32'))
        
        #optional 'save_individual'
        if kwargs['save_individual']:
            im = im*imagescale
            a=np.concatenate((np.max(im, axis=0), np.max(arr.astype('uint16'), axis=0)), axis=1)
            b=np.concatenate((np.fliplr(np.rot90(np.max(fix_orientation(im, axes=axes), axis=0),k=3)), np.fliplr(np.rot90(np.max(fix_orientation(arr.astype('uint16'), axes=axes), axis=0),k=3))), axis=1)
            plt.figure()
            plt.imshow(np.concatenate((b,a), axis=0), cmap=cmap, alpha=1);  plt.axis('off')
            plt.savefig(os.path.join(dst,'{}'.format(animal)+'.pdf'), dpi=300, transparent=True)
            plt.close()

        #cell counts to csv
        print('   finding nonzero pixels for voxel counts...')      
        nz = np.nonzero(arr)
        nonzeros.append(zip(*nz)) #<-for pooled image
        pos = transformed_pnts_to_allen_helper_func(np.asarray(zip(*[nz[2], nz[1], nz[0]])), ann)
        tdf = count_structure_lister(allen_id_table, *pos)
        if i == 0: 
            df = tdf.copy()
            countcol = 'count' if 'count' in df.columns else 'cell_count'
            df.drop([countcol], axis=1, inplace=True)
        df[animal] = tdf[countcol]
        
    df.to_csv(os.path.join(dst,'voxel_counts.csv'))
    print('\n\nCSV file of cell counts, saved as {}\n\n\n'.format(os.path.join(dst,'voxel_counts.csv')))  
            
    #condense nonzero pixels
    nzs = [str(x) for xx in nonzeros for x in xx] #this list has duplicates if two brains had the same voxel w label
    c = Counter(nzs)
    array = np.zeros(im.shape)
    print('Collecting nonzero pixels for pooled image...')
    tick = 0
    #generating pooled array where voxel value = total number of brains with that voxel as positive
    for k,v in c.iteritems():
        k = [int(xx) for xx in k.replace('(','').replace(')','').split(',')]
        array[k[0], k[1], k[2]] = int(v)
        tick+=1
        if tick % 50000 == 0: print('   {}'.format(tick))
        
    #load atlas and generate final figure
    print('Generating final figure...')      
    atlas = tifffile.imread(kwargs['atlas'])
    arr = fix_orientation(array, axes=axes)
    #cropping
    #if 'crop_atlas' not in kwargs:
    if kwargs['crop']: atlas = eval('atlas{}'.format(kwargs['crop']))
    atlas = fix_orientation(atlas, axes=axes)
    
    my_cmap = eval('plt.cm.{}(np.arange(plt.cm.RdBu.N))'.format(cmap))
    my_cmap[:1,:4] = 0.0  
    my_cmap = mpl.colors.ListedColormap(my_cmap)
    my_cmap.set_under('w')
    plt.figure()
    plt.imshow(np.max(atlas, axis=0), cmap='gray')
    plt.imshow(np.max(arr, axis=0), alpha=0.99, cmap=my_cmap); plt.colorbar(); plt.axis('off')
    dpi = int(kwargs['dpi']) if 'dpi' in kwargs else 300
    plt.savefig(os.path.join(dst,'heatmap.pdf'), dpi=dpi, transparent=True);
    plt.close()
    
    print('Saved as {}'.format(os.path.join(dst,'heatmap.pdf')))  
        
    return df
def transformed_pnts_to_allen(points_file,
                              ann,
                              ch_type="injch",
                              point_or_index=None,
                              allen_id_table_pth=False,
                              **kwargs):
    """function to take elastix point transform file and return anatomical locations of those points
    point_or_index=None/point/index: determines which transformix output to use: point is more accurate, index is pixel value(?)
    Elastix uses the xyz convention rather than the zyx numpy convention
    NOTE: this modification does not output out a single excel file, but a data frame
    
    Inputs
    -----------
    points_file = 
    ch_type = "injch" or "cellch"
    allen_id_table_pth (optional) pth to allen_id_table
    ann = annotation file
    
    Returns
    -----------
    df = data frame containing voxel counts
    
    """
    kwargs = load_kwargs(**kwargs)
    #####inputs
    assert type(points_file) == str

    if point_or_index == None:
        point_or_index = "OutputPoint"
    elif point_or_index == "point":
        point_or_index = "OutputPoint"
    elif point_or_index == "index":
        point_or_index = "OutputIndexFixed"

    #
    vols = kwargs["volumes"]
    reg_vol = [xx for xx in vols if xx.ch_type == "regch"][0]

    ####load files
    if not allen_id_table_pth:
        allen_id_table = pd.read_excel(
            os.path.join(reg_vol.packagedirectory,
                         "supp_files/allen_id_table.xlsx")
        )  ##use for determining neuroanatomical locations according to allen
    else:
        allen_id_table = pd.read_excel(allen_id_table_pth)

    #####inputs
    assert type(points_file) == str
    point_or_index = 'OutputPoint'

    #get points
    with open(points_file, "r") as f:
        lines = f.readlines()
        f.close()

    #####populate post-transformed array of contour centers
    sys.stdout.write("\n\n{} points detected\n\n".format(len(lines)))
    arr = np.empty((len(lines), 3))
    for i in range(len(lines)):
        arr[i,
            ...] = lines[i].split()[lines[i].split().index(point_or_index) +
                                    3:lines[i].split().index(point_or_index) +
                                    6]  #x,y,z

    pnts = transformed_pnts_to_allen_helper_func(arr, ann)
    pnt_lst = [xx for xx in pnts if xx != 0]

    #check to see if any points where found
    if len(pnt_lst) == 0:
        raise ValueError("pnt_lst is empty")
    else:
        sys.stdout.write("\nlen of pnt_lst({})\n\n".format(len(pnt_lst)))

    #generate dataframe with column
    df = count_structure_lister(allen_id_table, *pnt_lst)

    return df
def pool_injections_for_analysis(**kwargs):
    """Function to pool several injection sites. Assumes that the basic registration using this software has been run.
       
    Inputs
    -----------
    kwargs:
      "inputlist": inputlist, #list of folders generated previously from software
      "inputtype": "main_folder", "tiff" #specify the type of input. main_folder is the lightsheetpackage"s folder, tiff is the file 
      to use for injection site segmentation
      "channel": "01", 
      "channel_type": "injch",
      "filter_kernel": (3,3,3), #gaussian blur in pixels (if registered to ABA then 1px likely is 25um)
      "threshold": 3 (int, value to use for thresholding, this value represents the number of stand devs above the mean of the gblurred
      image)
      "num_sites_to_keep": int, number of injection sites to keep, useful if multiple distinct sites
      "injectionscale": 45000, #use to increase intensity of injection site visualizations generated - DOES NOT AFFECT DATA
      "imagescale": 2, #use to increase intensity of background  site visualizations generated - DOES NOT AFFECT DATA
      "reorientation": ("2","0","1"), #use to change image orientation for visualization only
      "crop": #use to crop volume, values below assume horizontal imaging and sagittal atlas
                False
                cerebellum: "[:,390:,:]"
                caudal midbrain: "[:,300:415,:]"
                midbrain: "[:,215:415,:]"
                thalamus: "[:,215:345,:]"
                anterior cortex: "[:,:250,:]"
      
      "dst": "/home/wanglab/Downloads/test", #save location
      "save_individual": True, #optional to save individual images, useful to inspect brains, which you can then remove bad brains from 
      list and rerun function
      "colormap": "plasma", 
      "atlas": "/jukebox/wang/pisano/Python/allenatlas/average_template_25_sagittal_forDVscans.tif",
      "annotation":"/jukebox/wang/pisano/Python/allenatlas/annotation_25_ccf2015_forDVscans.nrrd",
      "id_table": "/jukebox/temp_wang/pisano/Python/lightsheet/supp_files/allen_id_table.xlsx",
      
      Optional:
          ----------
          "save_array": path to folder to save out numpy array per brain of binarized detected site
          "save_tif": saves out tif volume per brain of binarized detected site
          "dpi": dots per square inch to save at
          "crop_atlas":(notfunctional) similiar to crop. Use when you would like to greatly restrain the cropping for injsite detection,
          but you want to display a larger area of overlay.
                      this will 0 pad the injection sites to accomodate the difference in size. Note this MUST be LARGER THAN crop.
          
      Returns
      ----------------count_threshold
      a pooled image consisting of max IP of reorientations provide in kwargs.
      a list of structures (csv file) with pixel counts, pooling across brains.
      if save individual will save individual images, useful for inspection and/or visualization
    """

    inputlist = kwargs["inputlist"]
    inputtype = kwargs["inputtype"] if "inputtype" in kwargs else "main_folder"
    dst = kwargs["dst"]
    makedir(dst)
    injscale = kwargs["injectionscale"] if "injectionscale" in kwargs else 1
    imagescale = kwargs["imagescale"] if "imagescale" in kwargs else 1
    axes = kwargs["reorientation"] if "reorientation" in kwargs else ("0", "1",
                                                                      "2")
    cmap = kwargs["colormap"] if "colormap" in kwargs else "plasma"
    id_table = kwargs[
        "id_table"] if "id_table" in kwargs else "/jukebox/LightSheetTransfer/atlas/ls_id_table_w_voxelcounts.xlsx"
    save_array = kwargs["save_array"] if "save_array" in kwargs else False
    save_tif = kwargs["save_tif"] if "save_tif" in kwargs else False
    num_sites_to_keep = kwargs[
        "num_sites_to_keep"] if "num_sites_to_keep" in kwargs else 1
    nonzeros = []
    ann = sitk.GetArrayFromImage(sitk.ReadImage(kwargs["annotation"]))
    if kwargs["crop"]: ann = eval("ann{}".format(kwargs["crop"]))
    allen_id_table = pd.read_excel(id_table)

    for i in range(len(inputlist)):
        pth = inputlist[i]
        print("\n\n_______\n{}".format(os.path.basename(pth)))
        #find the volume by loading the param dictionary generated using the light-sheet package
        if inputtype == "main_folder":
            dct = load_kwargs(pth)
            #print dct["AtlasFile"]
            try:
                vol = [
                    xx for xx in dct["volumes"]
                    if xx.ch_type in kwargs["channel_type"]
                    and xx.channel == kwargs["channel"]
                ][0]
            except:
                vol = [xx for xx in dct["volumes"] if xx.ch_type != "regch"][0]
                #risky for 3 channel images, but needed if param dict initially is mislabelled
            #done to account for different versions
            if os.path.exists(vol.ch_to_reg_to_atlas + "/result.1.tif"):
                impth = vol.ch_to_reg_to_atlas + "/result.1.tif"
            elif os.path.exists(vol.ch_to_reg_to_atlas
                                ) and vol.ch_to_reg_to_atlas[-4:] == ".tif":
                impth = vol.ch_to_reg_to_atlas
            elif os.path.exists(
                    os.path.dirname(vol.ch_to_reg_to_atlas) + "/result.1.tif"):
                impth = os.path.dirname(
                    vol.ch_to_reg_to_atlas) + "/result.1.tif"
            elif os.path.exists(
                    os.path.dirname(vol.ch_to_reg_to_atlas) + "/result.tif"):
                impth = os.path.dirname(vol.ch_to_reg_to_atlas) + "/result.tif"
        else:  #"tiff", use the file given
            impth = pth

        print("  loading:\n     {}".format(pth))
        im = tifffile.imread(impth)

        if kwargs["crop"]:
            im = eval("im{}".format(kwargs["crop"]))  #; print im.shape

        #segment
        arr = find_site(im,
                        thresh=kwargs["threshold"],
                        filter_kernel=kwargs["filter_kernel"],
                        num_sites_to_keep=num_sites_to_keep) * injscale
        if save_array:
            np.save(
                os.path.join(save_array,
                             "{}".format(os.path.basename(pth)) + ".npy"),
                arr.astype("float32"))
        if save_tif:
            tifffile.imsave(
                os.path.join(save_tif,
                             "{}".format(os.path.basename(pth)) + ".tif"),
                arr.astype("float32"))

        #optional "save_individual"
        if kwargs["save_individual"]:
            im = im * imagescale
            a = np.concatenate(
                (np.max(im, axis=0), np.max(arr.astype("uint16"), axis=0)),
                axis=1)
            b = np.concatenate((np.fliplr(
                np.rot90(np.max(fix_orientation(im, axes=axes), axis=0), k=3)),
                                np.fliplr(
                                    np.rot90(np.max(fix_orientation(
                                        arr.astype("uint16"), axes=axes),
                                                    axis=0),
                                             k=3))),
                               axis=1)
            plt.figure()
            plt.imshow(np.concatenate((b, a), axis=0), cmap=cmap, alpha=1)
            plt.axis("off")
            plt.savefig(os.path.join(
                dst, "{}".format(os.path.basename(pth)) + ".pdf"),
                        dpi=300,
                        transparent=True)
            plt.close()

        #cell counts to csv
        print("   finding nonzero pixels for voxel counts...")
        nz = np.nonzero(arr)
        nonzeros.append(list(zip(*nz)))  #<-for pooled image
        pos = transformed_pnts_to_allen_helper_func(
            np.asarray(list(zip(*[nz[2], nz[1], nz[0]]))), ann)
        tdf = count_structure_lister(allen_id_table, *pos)
        if i == 0:
            df = tdf.copy()
            countcol = "count" if "count" in df.columns else "cell_count"
            df.drop([countcol], axis=1, inplace=True)
        df[os.path.basename(pth)] = tdf[countcol]

    df.to_csv(os.path.join(dst, "voxel_counts.csv"), index=False)
    print("\n\nCSV file of cell counts, saved as {}\n\n\n".format(
        os.path.join(dst, "voxel_counts.csv")))

    #condense nonzero pixels
    nzs = [
        str(x) for xx in nonzeros for x in xx
    ]  #this list has duplicates if two brains had the same voxel w label
    c = Counter(nzs)
    array = np.zeros(im.shape)
    print("Collecting nonzero pixels for pooled image...")
    tick = 0
    #generating pooled array where voxel value = total number of brains with that voxel as positive
    for k, v in c.items():
        k = [int(xx) for xx in k.replace("(", "").replace(")", "").split(",")]
        array[k[0], k[1], k[2]] = int(v)
        tick += 1
        if tick % 50000 == 0: print("   {}".format(tick))

    #load atlas and generate final figure
    print("Generating final figure...")
    atlas = tifffile.imread(kwargs["atlas"])
    arr = fix_orientation(array, axes=axes)
    #cropping
    if kwargs["crop"]: atlas = eval("atlas{}".format(kwargs["crop"]))
    atlas = fix_orientation(atlas, axes=axes)

    my_cmap = eval("plt.cm.{}(np.arange(plt.cm.RdBu.N))".format(cmap))
    my_cmap[:1, :4] = 0.0
    my_cmap = mpl.colors.ListedColormap(my_cmap)
    my_cmap.set_under("w")
    plt.figure()
    plt.imshow(np.max(atlas, axis=0), cmap="gray")
    plt.imshow(np.max(arr, axis=0), alpha=0.99, cmap=my_cmap)
    plt.colorbar()
    plt.axis("off")
    dpi = int(kwargs["dpi"]) if "dpi" in kwargs else 300
    plt.savefig(os.path.join(dst, "heatmap.pdf"), dpi=dpi, transparent=True)
    plt.close()

    print("Saved as {}".format(os.path.join(dst, "heatmap.pdf")))

    return df
def pool_injections_for_analysis(**kwargs):
    '''Function to pool several injection sites. Assumes that the basic registration using this software has been run.
    
   
    Inputs
    -----------
    kwargs:
      'inputlist': inputlist, #list of folders generated previously from software
      'channel': '01', 
      'channel_type': 'injch',
      'filter_kernel': (3,3,3), #gaussian blur in pixels (if registered to ABA then 1px likely is 25um)
      'threshold': 3 (int, value to use for thresholding, this value represents the number of stand devs above the mean of the gblurred image)
      'num_sites_to_keep': int, number of injection sites to keep, useful if multiple distinct sites
      'injectionscale': 45000, #use to increase intensity of injection site visualizations generated - DOES NOT AFFECT DATA
      'imagescale': 2, #use to increase intensity of background  site visualizations generated - DOES NOT AFFECT DATA
      'reorientation': ('2','0','1'), #use to change image orientation for visualization only
      'crop': #use to crop volume, values below assume horizontal imaging and sagittal atlas
                False
                cerebellum: '[:,390:,:]'
                caudal midbrain: '[:,300:415,:]'
                midbrain: '[:,215:415,:]'
                thalamus: '[:,215:345,:]'
                anterior cortex: '[:,:250,:]'
      
      'dst': '/home/wanglab/Downloads/test', #save location
      'save_individual': True, #optional to save individual images, useful to inspect brains, which you can then remove bad brains from list and rerun function
      'colormap': 'plasma', 
      'atlas': '/jukebox/wang/pisano/Python/allenatlas/average_template_25_sagittal_forDVscans.tif',
      'annotation':'/jukebox/wang/pisano/Python/allenatlas/annotation_25_ccf2015_forDVscans.nrrd',
      'id_table': '/jukebox/temp_wang/pisano/Python/lightsheet/supp_files/allen_id_table.xlsx',
      
      Optional:
          ----------
          'save_array': path to folder to save out numpy array per brain of binarized detected site
          'save_tif': saves out tif volume per brain of binarized detected site
          'dpi': dots per square inch to save at
          'crop_atlas':(notfunctional) similiar to crop. Use when you would like to greatly restrain the cropping for injsite detection, but you want to display a larger area of overlay.
                      this will 0 pad the injection sites to accomodate the difference in size. Note this MUST be LARGER THAN crop.
          
      Returns
      ----------------count_threshold
      a pooled image consisting of max IP of reorientations provide in kwargs.
      a list of structures (csv file) with pixel counts, pooling across brains.
      if save individual will save individual images, useful for inspection and/or visualization
    '''

    inputlist = kwargs['inputlist']
    dst = kwargs['dst']
    makedir(dst)
    injscale = kwargs['injectionscale'] if 'injectionscale' in kwargs else 1
    imagescale = kwargs['imagescale'] if 'imagescale' in kwargs else 1
    axes = kwargs['reorientation'] if 'reorientation' in kwargs else ('0', '1',
                                                                      '2')
    cmap = kwargs['colormap'] if 'colormap' in kwargs else 'plasma'
    id_table = kwargs[
        'id_table'] if 'id_table' in kwargs else '/jukebox/temp_wang/pisano/Python/lightsheet/supp_files/allen_id_table.xlsx'
    count_threshold = kwargs[
        'count_threshold'] if 'count_threshold' in kwargs else 10
    save_array = kwargs['save_array'] if 'save_array' in kwargs else False
    save_tif = kwargs['save_tif'] if 'save_tif' in kwargs else False
    num_sites_to_keep = kwargs[
        'num_sites_to_keep'] if 'num_sites_to_keep' in kwargs else 1
    nonzeros = []
    ann = sitk.GetArrayFromImage(sitk.ReadImage(kwargs['annotation']))
    if kwargs['crop']: ann = eval('ann{}'.format(kwargs['crop']))
    allen_id_table = pd.read_excel(id_table)

    for i in range(len(inputlist)):
        pth = inputlist[i]
        print('\n\n_______\n{}'.format(os.path.basename(pth)))
        dct = load_kwargs(pth)
        #print dct['AtlasFile']
        try:
            vol = [
                xx for xx in dct['volumes']
                if xx.ch_type == kwargs['channel_type']
                and xx.channel == kwargs['channel']
            ][0]
        except:
            vol = [
                xx for xx in dct['volumes']
                if xx.ch_type == "cellch" and xx.channel == kwargs['channel']
            ][0]
        #done to account for different versions
        if os.path.exists(vol.ch_to_reg_to_atlas + '/result.1.tif'):
            impth = vol.ch_to_reg_to_atlas + '/result.1.tif'
        elif os.path.exists(vol.ch_to_reg_to_atlas
                            ) and vol.ch_to_reg_to_atlas[-4:] == '.tif':
            impth = vol.ch_to_reg_to_atlas
        elif os.path.exists(
                os.path.dirname(vol.ch_to_reg_to_atlas) + '/result.1.tif'):
            impth = os.path.dirname(vol.ch_to_reg_to_atlas) + '/result.1.tif'
        elif os.path.exists(
                os.path.dirname(vol.ch_to_reg_to_atlas) + '/result.tif'):
            impth = os.path.dirname(vol.ch_to_reg_to_atlas) + '/result.tif'

        print('  loading:\n     {}'.format(pth))
        im = tifffile.imread(impth)

        if kwargs['crop']:
            im = eval('im{}'.format(kwargs['crop']))  #; print im.shape

        #segment
        arr = find_site(im,
                        thresh=kwargs['threshold'],
                        filter_kernel=kwargs['filter_kernel'],
                        num_sites_to_keep=num_sites_to_keep) * injscale
        if save_array:
            np.save(
                os.path.join(dst, '{}'.format(os.path.basename(pth)) + '.npy'),
                arr.astype('float32'))
        if save_tif:
            tifffile.imsave(
                os.path.join(dst, '{}'.format(os.path.basename(pth)) + '.tif'),
                arr.astype('float32'))

        #optional 'save_individual'
        if kwargs['save_individual']:
            im = im * imagescale
            a = np.concatenate(
                (np.max(im, axis=0), np.max(arr.astype('uint16'), axis=0)),
                axis=1)
            b = np.concatenate((np.fliplr(
                np.rot90(np.max(fix_orientation(im, axes=axes), axis=0), k=3)),
                                np.fliplr(
                                    np.rot90(np.max(fix_orientation(
                                        arr.astype('uint16'), axes=axes),
                                                    axis=0),
                                             k=3))),
                               axis=1)
            plt.figure()
            plt.imshow(np.concatenate((b, a), axis=0), cmap=cmap, alpha=1)
            plt.axis('off')
            plt.savefig(os.path.join(
                dst, '{}'.format(os.path.basename(pth)) + '.pdf'),
                        dpi=300,
                        transparent=True)
            plt.close()

        #cell counts to csv
        print('   finding nonzero pixels for voxel counts...')
        nz = np.nonzero(arr)
        nonzeros.append(zip(*nz))  #<-for pooled image
        pos = transformed_pnts_to_allen_helper_func(
            np.asarray(zip(*[nz[2], nz[1], nz[0]])), ann)
        tdf = count_structure_lister(allen_id_table, *pos)
        if i == 0:
            df = tdf.copy()
            countcol = 'count' if 'count' in df.columns else 'cell_count'
            df.drop([countcol], axis=1, inplace=True)
        df[os.path.basename(pth)] = tdf[countcol]

    df.to_csv(os.path.join(dst, 'voxel_counts.csv'))
    print('\n\nCSV file of cell counts, saved as {}\n\n\n'.format(
        os.path.join(dst, 'voxel_counts.csv')))

    #condense nonzero pixels
    nzs = [
        str(x) for xx in nonzeros for x in xx
    ]  #this list has duplicates if two brains had the same voxel w label
    c = Counter(nzs)
    array = np.zeros(im.shape)
    print('Collecting nonzero pixels for pooled image...')
    tick = 0
    #generating pooled array where voxel value = total number of brains with that voxel as positive
    for k, v in c.iteritems():
        k = [int(xx) for xx in k.replace('(', '').replace(')', '').split(',')]
        array[k[0], k[1], k[2]] = int(v)
        tick += 1
        if tick % 50000 == 0: print('   {}'.format(tick))

    #load atlas and generate final figure
    print('Generating final figure...')
    atlas = tifffile.imread(kwargs['atlas'])
    arr = fix_orientation(array, axes=axes)
    #cropping
    #if 'crop_atlas' not in kwargs:
    if kwargs['crop']: atlas = eval('atlas{}'.format(kwargs['crop']))
    atlas = fix_orientation(atlas, axes=axes)
    #elif 'crop_atlas' in kwargs:
    #if kwargs['crop_atlas']: atlas = eval('atlas{}'.format(kwargs['crop_atlas']))
    #atlas = fix_orientation(atlas, axes=axes)
    #accomodate for size difference
    #d0,d1,d2 = [(x-y)/2 for x,y in zip(atlas.shape, arr.shape)]
    #arr = np.pad(arr,((d0,d0),(d1,d1),(d2,d2)), mode='constant')
    ##allows for a single pixel shift - if needed
    #d0,d1,d2 = [(x-y) for x,y in zip(atlas.shape, arr.shape)]
    #arr = np.pad(arr,((d0,0),(d1,0),(d2,0)), mode='constant')

    my_cmap = eval('plt.cm.{}(np.arange(plt.cm.RdBu.N))'.format(cmap))
    my_cmap[:1, :4] = 0.0
    my_cmap = mpl.colors.ListedColormap(my_cmap)
    my_cmap.set_under('w')
    plt.figure()
    plt.imshow(np.max(atlas, axis=0), cmap='gray')
    plt.imshow(np.max(arr, axis=0), alpha=0.99, cmap=my_cmap)
    plt.colorbar()
    plt.axis('off')
    dpi = int(kwargs['dpi']) if 'dpi' in kwargs else 300
    plt.savefig(os.path.join(dst, 'heatmap.pdf'), dpi=dpi, transparent=True)
    plt.close()

    print('Saved as {}'.format(os.path.join(dst, 'heatmap.pdf')))

    return df
Beispiel #7
0
tifffile.imsave(os.path.join(brain, "%s_probe_track_overlay.tif" % brainname),
                merged.astype("uint16"))

#export coordinates
if os.path.exists(
        os.path.join(brain, "{}_allen_coordinates.txt".format(brainname))):
    os.remove(os.path.join(brain,
                           "{}_allen_coordinates.txt".format(brainname)))
with open(os.path.join(brain, "{}_allen_coordinates.txt".format(brainname)),
          "a") as txt:
    txt.write("\nAtlas coordinates (zyx) in the saggital orientation:\n%s\n" %
              pnts_sag)
    txt.write("\nAtlas coordinates (zyx) in the coronal orientation:\n%s" %
              pnts)

#convert to structure
annotation_file = kwargs["annotationfile"]
ann = tifffile.imread(annotation_file)
zpnts, ypnts, xpnts = np.nonzero(fix_orientation(
    track, ("1", "2", "0")))  #make it back to sagittal for mapping
points = transformed_pnts_to_allen_helper_func([(zi, ypnts[i], xpnts[i])
                                                for i, zi in enumerate(zpnts)],
                                               ann,
                                               order="ZYX")

#make dataframe
lut_path = "/jukebox/LightSheetTransfer/atlas/ls_id_table_w_voxelcounts.xlsx"  #corresponds with the atlas, change if changing atlas
df = count_structure_lister(lut_path, *points)
df.to_excel(os.path.join(brain, "%s_allen_structures.xlsx" % brainname))
def pool_injections_for_analysis(**kwargs):
    """

    Parameters
    ----------
    **kwargs : parameter dictionary consisting of
        'inputlist' --> list of strings; path to image to be segmented
        'filter_kernel' --> tuple; 3D kernel used for segmentation in 
        (https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.gaussian_filter.html)
        'threshold' --> int; threshold for making final segmentation volume in find_site() function
        'num_sites_to_keep' --> int; number of segmentation sites to keep, depends on injection sites in volume
        'injectionscale' --> int; used for visualization, typically can be 45000 
        'imagescale' --> int; used for visualization, typically can be 3
        'reorientation' --> tuple of strings; reorientation for visualization, sagittal to coronal=('2','0','1'),
                            sagittal to horizontal=('2','1','0')
                            default maintains current orientation
        'crop' --> string; if volume needs to be cropped before segmentation; for cerebellum, you can typically
                    crop in y as such = '[:, 450, :]'
                    default does not crop
        'crop_atlas' --> string; if atlas needs to be cropped the same way for final 2D visualization
                         default does not crop
        'dst' --> destination directory
        'save_individual' --> boolean; if you want to save 2D image of segmentation for each brain
        'save_tif' --> boolean; if you want to save the segmented volume for each brain for later use
        'colormap' --> string; matplotlib colormap used for visualization, default is plasma
        'atlas' --> string; path to atlas file the volumes are registered to
        'annotation' --> string; path to annotation file corresponding to the atlas
        'id_table' --> annotation look-up table corresponding to the annotation volume
                        default is '/jukebox/LightSheetTransfer/atlas/allen_atlas/allen_id_table.xlsx'

    Returns
    -------
    df : pandas dataframe
        pandas dataframe of brain (column) x structures (row) of injection voxels detected

    """

    inputlist = kwargs["inputlist"]
    dst = kwargs["dst"]
    makedir(dst)
    injscale = kwargs["injectionscale"] if "injectionscale" in kwargs else 1
    imagescale = kwargs["imagescale"] if "imagescale" in kwargs else 1
    axes = kwargs["reorientation"] if "reorientation" in kwargs else ("0", "1",
                                                                      "2")
    cmap = kwargs["colormap"] if "colormap" in kwargs else "plasma"
    id_table = kwargs[
        "id_table"] if "id_table" in kwargs else "/jukebox/LightSheetTransfer/atlas/allen_atlas/allen_id_table.xlsx"
    save_tif = kwargs["save_tif"] if "save_tif" in kwargs else False
    num_sites_to_keep = kwargs[
        "num_sites_to_keep"] if "num_sites_to_keep" in kwargs else 1
    nonzeros = []
    ann = sitk.GetArrayFromImage(sitk.ReadImage(kwargs["annotation"]))
    if kwargs["crop"]: ann = eval("ann{}".format(kwargs["crop"]))
    allen_id_table = pd.read_excel(id_table)

    for i in range(len(inputlist)):
        impth = inputlist[i]
        animal = os.path.basename(
            os.path.dirname(os.path.dirname(os.path.dirname(impth))))

        print("\n\n_______\n{}".format(animal))

        print("  loading:\n     {}".format(animal))
        im = tifffile.imread(impth)

        if kwargs["crop"]:
            im = eval("im{}".format(kwargs["crop"]))  #; print im.shape

        #segment
        arr = find_site(im,
                        thresh=kwargs["threshold"],
                        filter_kernel=kwargs["filter_kernel"],
                        num_sites_to_keep=num_sites_to_keep) * injscale
        if save_tif:
            tifffile.imsave(
                os.path.join(dst, "{}".format(animal) + "_inj.tif"),
                arr.astype("float32"))

        #optional "save_individual"
        if kwargs["save_individual"]:
            im = im * imagescale
            a = np.concatenate(
                (np.max(im, axis=0), np.max(arr.astype("uint16"), axis=0)),
                axis=1)
            b = np.concatenate((np.fliplr(
                np.rot90(np.max(fix_orientation(im, axes=axes), axis=0), k=3)),
                                np.fliplr(
                                    np.rot90(np.max(fix_orientation(
                                        arr.astype("uint16"), axes=axes),
                                                    axis=0),
                                             k=3))),
                               axis=1)
            plt.figure()
            plt.imshow(np.concatenate((b, a), axis=0), cmap=cmap, alpha=1)
            plt.axis("off")
            plt.savefig(os.path.join(dst, "{}".format(animal) + ".pdf"),
                        dpi=300,
                        transparent=True)
            plt.close()

        #cell counts to csv
        print("   finding nonzero pixels for voxel counts...")
        nz = np.nonzero(arr)
        nonzeros.append(list(zip(*nz)))  #<-for pooled image
        pos = transformed_pnts_to_allen_helper_func(
            np.asarray(list(zip(*[nz[2], nz[1], nz[0]]))), ann)
        tdf = count_structure_lister(allen_id_table, *pos)
        if i == 0:
            df = tdf.copy()
            countcol = "count" if "count" in df.columns else "cell_count"
            df.drop([countcol], axis=1, inplace=True)
        df[animal] = tdf[countcol]

    df.to_csv(os.path.join(dst, "voxel_counts.csv"))
    print("\n\nCSV file of cell counts, saved as {}\n\n\n".format(
        os.path.join(dst, "voxel_counts.csv")))

    #load atlas and generate final figure
    print("Generating final figure...")
    atlas = tifffile.imread(kwargs["atlas"])
    #cropping
    #if "crop_atlas" not in kwargs:
    if kwargs["crop_atlas"]:
        atlas = eval("atlas{}".format(kwargs["crop_atlas"]))

    #condense nonzero pixels
    nzs = [
        str(x) for xx in nonzeros for x in xx
    ]  #this list has duplicates if two brains had the same voxel w label
    c = Counter(nzs)
    array = np.zeros_like(atlas)
    print("Collecting nonzero pixels for pooled image...")
    tick = 0
    #generating pooled array where voxel value = total number of brains with that voxel as positive
    for k, v in c.items():
        k = [int(xx) for xx in k.replace("(", "").replace(")", "").split(",")]
        array[k[0], k[1], k[2]] = int(v)
        tick += 1
        if tick % 50000 == 0: print("   {}".format(tick))

    #reslice
    atlas = fix_orientation(atlas, axes=axes)
    arr = fix_orientation(array, axes=axes)

    my_cmap = eval("plt.cm.{}(np.arange(plt.cm.RdBu.N))".format(cmap))
    my_cmap[:1, :4] = 0.0
    my_cmap = mpl.colors.ListedColormap(my_cmap)
    my_cmap.set_under("w")
    plt.figure()
    plt.imshow(np.max(atlas, axis=0), cmap="gray")
    plt.imshow(np.max(arr, axis=0), alpha=0.99, cmap=my_cmap)
    cb = plt.colorbar()
    cb.set_label("# Brains expressing", fontsize="small", labelpad=3)
    cb.ax.tick_params(labelsize="x-small")
    cb.ax.set_visible(True)
    plt.axis("off")
    plt.savefig(os.path.join(dst, "heatmap.pdf"), dpi=300, transparent=True)
    plt.close()

    print("Saved as {}".format(os.path.join(dst, "heatmap.pdf")))

    return df