예제 #1
0
 def __init__(self,all_folders):
     
     self.all_folders = all_folders
     self.all_save_folders = List_Op.List_Annex(self.all_folders,['Results'])
     self.Aligned_frame_folders = List_Op.List_Annex(self.all_save_folders,['Aligned_Frames'])
     for i in range(len(self.all_save_folders)):
         OS_Tools.mkdir(self.all_save_folders[i])
         OS_Tools.mkdir(self.Aligned_frame_folders[i])
     self.Before_Align_Tif_Name = []
     for i in range(len(self.all_folders)):
         current_run_tif = OS_Tools.Get_File_Name(self.all_folders[i])
         self.Before_Align_Tif_Name.append(current_run_tif)
예제 #2
0
def Intensity_Selector(data_folder,
                       graph_type='.tif',
                       mode='biggest',
                       propotion=0.05,
                       list_write=True):
    '''
    Select frames have biggest or smallest a.i., and generate average graphs.

    Parameters
    ----------
    data_folder : (str)
        Data folder.
    graph_type : (str), optional
        Data type of . The default is '.tif'.
    mode : ('biggest' or 'smallest'), optional
        Type of frame selection. The default is 'biggest'.
    propotion : (float), optional
        Propotion of graph selection. The default is 0.05.
    list_write : (bool), optional
        Whether we write down graph intensity data. The default is True.

    Returns
    -------
    averaged_graph : (2D Array)
        Averaged graph of selected frames.
    selected_graph_name : (ND List)
        List of selected graph names.

    '''
    all_graph_name = np.array(
        OS_Tools.Get_File_Name(data_folder, file_type=graph_type))
    graph_Num = len(all_graph_name)
    bright_data = np.zeros(graph_Num, dtype='f8')
    for i in range(graph_Num):
        current_graph = cv2.imread(all_graph_name[i], -1)
        bright_data[i] = np.mean(current_graph)
        # write bright data if required.
    if list_write == True:
        OS_Tools.Save_Variable(data_folder, 'brightness_info', bright_data)
    # Then select given mode frames.
    used_graph_num = int(graph_Num * propotion)
    if mode == 'biggest':
        used_graph_id = np.argpartition(bright_data,
                                        -used_graph_num)[-used_graph_num:]
    elif mode == 'smallest':
        used_graph_id = np.argpartition(bright_data,
                                        used_graph_num)[0:used_graph_num]
    selected_graph_name = all_graph_name[used_graph_id]
    averaged_graph = Graph_Tools.Average_From_File(selected_graph_name)
    return averaged_graph, selected_graph_name
예제 #3
0
def Cell_Find(run_folder):
    output_folder = run_folder+r'\Results'
    aligned_frame_folder = output_folder+r'\Aligned_Frames'
    all_tif_name = OS_Tools.Get_File_Name(aligned_frame_folder)
    Stim_Frame_Dic = OS_Tools.Load_Variable(output_folder,'Stim_Frame_Align.pkl')
    on_off_graph,Finded_Cells = On_Off_Cell_Finder(all_tif_name, Stim_Frame_Dic,shape_boulder=[20,20,20,35],filter_method = 'Gaussian',LP_Para = ((5,5),1.5))
    cell_folder = output_folder+r'\Cells'
    OS_Tools.Save_Variable(cell_folder, 'Finded_Cells', Finded_Cells,'.cell')
    Graph_tools.Show_Graph(on_off_graph, 'on-off_graph', cell_folder)
    all_keys = list(Finded_Cells.keys())
    all_keys.remove('All_Cell_Information')
    for i in range(len(all_keys)):
        Graph_tools.Show_Graph(Finded_Cells[all_keys[i]], all_keys[i], cell_folder)
    return True
예제 #4
0
    def After_Align_Average(self):
        """
        This Functin will generate after align average graph of Run and Global, and then save them.
        
        Returns
        -------
        None.

        """
        print('Aligning done. ')
        self.After_Align_Graphs = {} # Initialize a dictionary, will record all aligned graphs averages and graph nums.
        # Fill After Align Graph Dictionary first
        total_graph_num = 0
        for i in range(len(self.Aligned_frame_folders)):
            current_run_names = OS_Tools.Get_File_Name(self.Aligned_frame_folders[i])
            temp_average = Graph_Tools.Average_From_File(current_run_names) # This will generate an average graph with 'f8' formation.
            current_graph_aligned = Graph_Tools.Clip_And_Normalize(temp_average,clip_std = 5)
            Graph_Tools.Show_Graph(current_graph_aligned, 'Run_Average_After_Align', self.all_save_folders[i])
            current_run_Frame_Num = len(current_run_names)
            total_graph_num += current_run_Frame_Num
            self.After_Align_Graphs[i] = (current_graph_aligned,current_run_Frame_Num)
        global_average_after_align = np.zeros(np.shape(current_graph_aligned),dtype = 'f8')
        
        # Then calculate global average in each run.
        for i in range(len(self.all_save_folders)):
            global_average_after_align += self.After_Align_Graphs[i][0].astype('f8')*self.After_Align_Graphs[i][1]/total_graph_num
        global_average_after_align = Graph_Tools.Clip_And_Normalize(global_average_after_align,clip_std = 5)
        
        # Then save global graph into each folder.
        for i in range(len(self.all_save_folders)):
            if i == 0:
                Graph_Tools.Show_Graph(global_average_after_align, 'Global_Average_After_Align', self.all_save_folders[i])
            else:
                Graph_Tools.Show_Graph(global_average_after_align, 'Global_Average_After_Align', self.all_save_folders[i],show_time = 0)
예제 #5
0
def Least_Tremble_Average_Graph(data_folder,
                                average_prop=0.1,
                                cut_shape=(9, 9)):
    all_tif_name = np.array(OS_Tools.Get_File_Name(data_folder))
    _, frac_disps = Tremble_Evaluator(data_folder, cut_shape=cut_shape)
    frac_num, frame_num, _ = frac_disps.shape
    # Then calculate average center and least error graph.
    frac_centers = np.zeros(shape=(frac_num, 2), dtype='f8')
    for i in range(frac_num):
        frac_centers[i, 0] = frac_disps[i, :, 0].mean()
        frac_centers[i, 1] = frac_disps[i, :, 1].mean()
    # And all frac_total movings
    total_movings = np.zeros(frame_num, dtype='f8')
    for i in range(frame_num):
        c_dist = 0
        for j in range(frac_num):
            c_dist += (frac_centers[j][0] - frac_disps[j, i, 0])**2 + (
                frac_centers[j][1] - frac_disps[j, i, 1])**2
        total_movings[i] = c_dist
    # Then find least props.
    used_num = int(frame_num * average_prop)
    if used_num < 300:  # least num of average is set to 300 to avoid problem.
        used_num = min(300, frame_num)
    print('Average of most stable ' + str(used_num) + ' Frames.')
    if used_num < 300:  # meaning all frame used
        graph_names = all_tif_name
    else:
        used_frame_ind = np.argpartition(total_movings, used_num)[0:used_num]
        graph_names = all_tif_name[used_frame_ind]
    averaged_graph = Graph_Tools.Average_From_File(graph_names)

    return averaged_graph, graph_names
예제 #6
0
def Partial_Average_From_File(data_folder,
                              start_frame,
                              stop_frame,
                              graph_type='.tif',
                              LP_Para=False,
                              HP_Para=False,
                              filter_method=False):
    '''
    Average specific part of graphs in the folder.

    Parameters
    ----------
    data_folder : (str)
        Data folder.
    start_frame : (int)
        Start ID of frame selection.
    stop_frame : (int)
        Stop ID of frame selection.
    graph_type : (str), optional
        Frame dtype. The default is '.tif'.
    LP_Para\HP_Para\filter_method : optional
        Filter parameters. The default is False.

    Returns
    -------
    Averaged_Graph : (2D Array)
        Averaged graphs.

    '''
    all_tif_name = np.array(
        OS_Tools.Get_File_Name(data_folder, file_type=graph_type))
    used_tif_name = all_tif_name[start_frame:stop_frame]
    Averaged_Graph = Graph_Tools.Average_From_File(used_tif_name, LP_Para,
                                                   HP_Para, filter_method)
    return Averaged_Graph
예제 #7
0
def Global_Averagor(all_folder_list, sub_folders=r'\Results\Affined_Frames'):
    '''
    Average global graph from all subgraphs.

    Parameters
    ----------
    all_folder_list : TYPE
        DESCRIPTION.
    sub_folders : TYPE, optional
        DESCRIPTION. The default is r'\Results\Affined_Frames'.

    Returns
    -------
    global_averaged_graph : TYPE
        DESCRIPTION.

    '''
    all_folders = lt.List_Annex(all_folder_list, [sub_folders])
    all_tif_name = []
    for i in range(len(all_folders)):
        current_tif_name = ot.Get_File_Name(all_folders[i])
        all_tif_name.extend(current_tif_name)
    global_averaged_graph = Average_From_File(all_tif_name)

    return global_averaged_graph
예제 #8
0
    def Do_Align(self):
        """
        Main Function. Use this function will finish align work, useful for module using

        Returns
        -------
        Align Properties(Dic):
            Property of this alignment, including useful path and useful names.

        """
        start_time = time.time() # Processing Start time
        self.Before_Run_Average()
        self.Align_Cores()
        self.After_Align_Average()
        finish_time = time.time()
        time_cost = finish_time-start_time
        print('Alignment Done, time cost = '+str(time_cost) +'s')
        
        # Output a dictionary, coding 
        Align_Properties = {}
        Align_Properties['all_save_folders'] = self.all_save_folders
        all_tif_name = []
        for i in range(len(self.Aligned_frame_folders)):
            current_tif_list = OS_Tools.Get_File_Name(self.Aligned_frame_folders[i],file_type = '.tif')
            all_tif_name.append(current_tif_list)
        Align_Properties['all_tif_name'] = all_tif_name
        return Align_Properties
def One_Key_Frame_Graphs(
        data_folder,
        sub_dic,
        show_clip=3,
        alinged_sub_folder=r'\Results\Final_Aligned_Frames',
        Stim_Align_sub_folder=r'\Results\Stim_Frame_Align.pkl'):
    result_folder = data_folder + r'\Results'
    graph_save_folder = result_folder + r'\Only_Frame_SubGraphs'
    OS_Tools.mkdir(result_folder)
    OS_Tools.mkdir(graph_save_folder)
    stim_path = data_folder + Stim_Align_sub_folder
    stim_dic = OS_Tools.Load_Variable(stim_path)
    all_tif_name = OS_Tools.Get_File_Name(data_folder + alinged_sub_folder)
    graph_num = len(sub_dic)
    all_sub_graph_names = list(sub_dic.keys())
    for i in range(graph_num):
        current_name = all_sub_graph_names[i]
        current_a = Frame_ID_Extractor(stim_dic, sub_dic[current_name][0])
        current_b = Frame_ID_Extractor(stim_dic, sub_dic[current_name][1])
        current_sub_graph, current_t_graph, current_info_dic = Single_Subgraph_Generator(
            all_tif_name, current_a, current_b)
        current_sub_graph = Graph_Tools.Clip_And_Normalize(
            current_sub_graph, show_clip)
        current_t_graph = Graph_Tools.Clip_And_Normalize(
            current_t_graph, show_clip)
        # Save graphs
        Graph_Tools.Show_Graph(current_sub_graph, current_name + '_Sub_Graph',
                               graph_save_folder)
        Graph_Tools.Show_Graph(current_t_graph, current_name + '_t_Graph',
                               graph_save_folder)
        OS_Tools.Save_Variable(graph_save_folder, current_name + r'_Sub_Info',
                               current_info_dic, '.info')
    return True
def Tremble_Calculator_From_File(
        data_folder,
        graph_type='.tif',
        cut_shape=(8, 8),
        boulder=20,
        base_method='former',
        base=[],
):
    '''
    Calculate align tremble from graph. This program is used to evaluate align quality.
    
    Parameters
    ----------
    data_folder : (str)
        Data folder of graphs.
    graph_type : (str),optional
        Extend name of input grahp. The default is '.tif'.
    cut_shape : (turple), optional
        Shape of fracture cut. Proper cut will . The default is (10,5).
    boulder : (int),optional
        Boulder of graph. Cut and not used in following calculation.The default is 20.        
    base_method : ('average'or'former'or'input'), optional
        Method of bais calculation. The default is 'former'. 
        'average' bais use all average; 'former' bais use fomer frame; 'input' bais need to be given.
    base : (2D_NdArray), optional
        If move_method == 'input', base should be given here. The default is [].

    Returns
    -------
    mass_center_maps(Graph)
        A plotted graph, showing movement trace of mass center.
    tremble_plots : (List)
        List of all fracture graph tremble list.
    tremble_information : (Dic)
        Dictionary of tramble informations.
        Data type of tremble_information:
    '''
    all_tif_name = OS_Tools.Get_File_Name(data_folder, file_type=graph_type)
    average_graph = Graph_Tools.Average_From_File(all_tif_name)
    tremble_information = {}
    #1. Get base graph first.
    if base_method == 'input':
        base_graph = base
    elif base_method == 'average':
        base_graph = average_graph
    elif base_method == 'former':
        base_graph = cv2.imread(all_tif_name[0], -1)  # First input graph.
    else:
        raise IOError('Invalid Base Method, check please.\n')
예제 #11
0
def Tremble_Evaluator(data_folder,
                      ftype='.tif',
                      boulder_ignore=20,
                      cut_shape=(9, 9),
                      mask_thres=0):
    all_file_name = OS_Tools.Get_File_Name(data_folder, ftype)
    template = cv2.imread(all_file_name[0], -1)
    origin_dtype = template.dtype
    graph_shape = template.shape
    graph_num = len(all_file_name)
    origin_graph_matrix = np.zeros(shape=graph_shape + (graph_num, ),
                                   dtype=origin_dtype)
    for i in range(graph_num):
        origin_graph_matrix[:, :, i] = cv2.imread(all_file_name[i], -1)
    average_graph = origin_graph_matrix.mean(axis=2).astype('u2')
    # Show schematic of cutted graph.
    schematic, _, _, _ = Graph_Cutter(average_graph, boulder_ignore, cut_shape)
    # Then,save cutted graphs into dics.
    cutted_graph_dic = {}
    fracture_num = cut_shape[0] * cut_shape[1]
    for i in range(fracture_num):  # initialize cut dics.
        cutted_graph_dic[i] = []
    for i in range(graph_num):  # Cycle all graphs
        current_graph = origin_graph_matrix[:, :, i]
        _, _, _, cutted_graphs = Graph_Cutter(current_graph, boulder_ignore,
                                              cut_shape)
        for j in range(fracture_num):  # save each fracture
            cutted_graph_dic[j].append(cutted_graphs[j])
    # Calculate graph center of each fracture trains. Use weighted center.
    all_frac_center = np.zeros(shape=(fracture_num, graph_num, 2), dtype='f8')
    for i in range(fracture_num):
        current_frac = cutted_graph_dic[i]
        for j in range(graph_num):
            current_graph = current_frac[j]
            if mask_thres == 'otsu':
                thres = filters.threshold_otsu(current_graph)
            elif (type(mask_thres) == int or type(mask_thres) == float):
                thres = mask_thres
            else:
                raise IOError('Invalid mask threshold.')
            mask = (current_graph > thres).astype(int)
            properties = regionprops(mask, current_graph)
            current_mc = properties[0].weighted_centroid
            all_frac_center[i, j, :] = current_mc  #In sequence YX
    return schematic, all_frac_center
예제 #12
0
def Cell_Find_And_Plot(
        graph_folder,
        graph_name,
        Cell_Label,
        find_thres = 2.5,
        max_pix = 1000,
        min_pix = 20,
        shape_boulder = [20,20,20,20], 
        sharp_gauss = ([7,7],1.5),
        back_gauss = ([15,15],7),
        size_limit = 20    
        ):
    """
    Cell find from file.

    Parameters
    ----------
    graph_folder : (str)
        Graph folder.
    graph_name : (str)
        Graph name. Extend name shall be contained.
    Cell_Label : (str)
        Save sub Folder. Cell data and cell graphs will be saved in this sub folder.
    find_thres,max_pix,min_pix,shape_boulder,sharp_gauss,back_gauss,size_limit : As Function 1, optional
        As Function 1.

    Returns
    -------
    Cell_Finded : TYPE
        DESCRIPTION.

    """
    Base_Graph = cv2.imread(graph_folder + r'\\' + graph_name,-1)
    graph_save_folder = graph_folder + r'\\' + Cell_Label
    Finded_Cells = Cell_Find_From_Graph(Base_Graph,find_thres,max_pix,min_pix,shape_boulder,sharp_gauss,back_gauss,size_limit)
    OS_Tools.Save_Variable(graph_save_folder,Cell_Label,Finded_Cells,extend_name = '.cell')
    all_keys = list(Finded_Cells.keys())
    all_keys.remove('All_Cell_Information')
    for i in range(len(all_keys)):
        Graph_Tools.Show_Graph(Finded_Cells[all_keys[i]],graph_name = all_keys[i],save_path = graph_save_folder,show_time = 2000,write = True)
    return Finded_Cells
예제 #13
0
def AI_Calculator(graph_folder, start_frame=0, end_frame=-1, masks='No_Mask'):
    '''
    This function is used to calculate average intensity variation. Masks can be given to calculate cells

    Parameters
    ----------
    graph_folder : (str)
        All graphs folder.
    start_frame : (int,optional)
        Start frame num. The default is 0.
    end_frame : (int,optional)
        End frame. The default is -1.
    masks : (2D_Array,optional)
        2D arrays. Input will be binary, so be careful. The default is None.

    Returns
    -------
    intensity_series : (Array)
        Return average intensity.

    '''
    #initialize
    all_tif_name = np.array(OS_Tools.Get_File_Name(graph_folder))
    used_tif_name = all_tif_name[start_frame:end_frame]
    frame_Num = len(used_tif_name)
    intensity_series = np.zeros(frame_Num, dtype='f8')
    graph_shape = np.shape(cv2.imread(used_tif_name[0], -1))
    #calculate mask
    if type(masks) == str:
        masks = np.ones(graph_shape, dtype='bool')
    elif masks.dtype != 'bool':
        masks = masks > (masks // 2)
    pix_num = masks.sum()
    #calculate ai trains
    for i in range(frame_Num):
        current_graph = cv2.imread(used_tif_name[i], -1)
        masked_graph = current_graph * masks
        current_ai = masked_graph.sum() / pix_num
        intensity_series[i] = current_ai
    return intensity_series
def One_Key_Stim_Maps(data_folder,
                      cell_folder,
                      sub_dic,
                      have_blank=None,
                      alinged_sub_folder=r'\Results\Aligned_Frames',
                      Stim_Align_sub_folder=r'\Results\Stim_Frame_Align.pkl'):
    '''
    1 key generate stim map. Befor using this, you need to :
        1.align graphs
        2.give cell file path.
        3.Finish stim frame align.
    '''
    result_folder = data_folder + r'\Results'
    stim_path = data_folder + Stim_Align_sub_folder
    cell_path = OS_Tools.Get_File_Name(cell_folder, '.cell')[0]
    cell_dic = OS_Tools.Load_Variable(cell_path)
    # Then generate spiketrain
    stim_train = OS_Tools.Load_Variable(stim_path)['Original_Stim_Train']
    all_tif_name = OS_Tools.Get_File_Name(data_folder + alinged_sub_folder)
    cell_information = cell_dic['All_Cell_Information']
    if have_blank != None:
        warnings.warn(
            'Have blank is detected automatically, this API is useless now.',
            FutureWarning)
    have_blank = (0 in stim_train)
    if have_blank == True:
        F_train, dF_F_train = Spike_Train_Generator(all_tif_name,
                                                    cell_information,
                                                    Base_F_type='nearest_0',
                                                    stim_train=stim_train)
    else:
        print('No blank, use previous ISI to calculate trains')
        F_train, dF_F_train = Spike_Train_Generator(all_tif_name,
                                                    cell_information,
                                                    Base_F_type='before_ISI',
                                                    stim_train=stim_train)
    # Then save F and dF/F trains
    OS_Tools.Save_Variable(result_folder, 'F_Trains', F_train)
    OS_Tools.Save_Variable(result_folder, 'dF_F_Trains', dF_F_train)
    # At last, calculate Maps.
    Standard_Stim_Processor(data_folder,
                            stim_path,
                            sub_dic,
                            cell_method=cell_path,
                            spike_train_path=result_folder +
                            r'\dF_F_Trains.pkl')
예제 #15
0
import My_Wheels.Graph_Operation_Kit as Graph_Tools
import numpy as np
import cv2
#%% First, read in config file. 
# All Read in shall be in this part to avoid bugs = =
f = open('Config.punch','r')
config_info = f.readlines()
del f
frame_folder = config_info[3][:-1]# Remove '\n'
stim_folder = config_info[6][:-1]# Remove '\n'
cap_freq = float(config_info[9])
frame_thres = float(config_info[12])
#%% Second do graph align.
save_folder = frame_folder+r'\Results'
aligned_tif_folder = save_folder+r'\Aligned_Frames'
all_tif_name = OS_Tools.Get_File_Name(frame_folder)
graph_size = np.shape(cv2.imread(all_tif_name[0],-1))
Translation_Alignment([frame_folder],align_range = 10,align_boulder = 40,big_memory_mode=True,graph_shape = graph_size)
aligned_all_tif_name = np.array(OS_Tools.Get_File_Name(aligned_tif_folder))
#%% Third, Stim Frame Align
jmp_step = int(5000//cap_freq)
_,Frame_Stim_Dic = Stim_Frame_Align(stim_folder,frame_thres = frame_thres,jmp_step = jmp_step)
#%% Forth, generate Morpho graph and find cell.
cell_Dic = Cell_Find_And_Plot(save_folder, 'Run_Average_After_Align.tif', 'Morpho_Cell')
cell_mask = (cell_Dic['Cell_Graph'][:,:,0])>0
#%% Fifth, calculate RF reaction.
RF_Data = np.zeros(shape = (5,5,2),dtype = 'f8')# use 5*5 matrix, set 0 are frames, set 1 are cells
loc_ids = np.array([1,26,51,76,101,126,151,176,201,226,251,276])
for i in range(5):# i as vector1
    for j in range(5):# j as vector2
        start_id = i*5+j
예제 #16
0
from My_Wheels.Cell_Find_From_Graph import Cell_Find_And_Plot
from Stim_Frame_Align import Stim_Frame_Align
all_stim_cell = Cell_Find_And_Plot(r'E:\Test_Data\2P\210112_L76_2P\1-007\Results', 'Global_Average_After_Align.tif','All_Stim',find_thres = 1.5)
all_stim_folders = [
    r'E:\Test_Data\2P\210112_L76_2P\210112_L76_stimuli\Run07_2P_OD8_auto',
    r'E:\Test_Data\2P\210112_L76_2P\210112_L76_stimuli\Run08_2P_G8',
    r'E:\Test_Data\2P\210112_L76_2P\210112_L76_stimuli\Run09_2P_G8_RF',
    r'E:\Test_Data\2P\210112_L76_2P\210112_L76_stimuli\Run11_2P_G8_RF',
    r'E:\Test_Data\2P\210112_L76_2P\210112_L76_stimuli\Run13_color7_dir8_grating_squarewave_prefsize_BG',
    r'E:\Test_Data\2P\210112_L76_2P\210112_L76_stimuli\Run14_2P_RGLum4_RF',
    r'E:\Test_Data\2P\210112_L76_2P\210112_L76_stimuli\Run15_2P_RGLum4',
    r'E:\Test_Data\2P\210112_L76_2P\210112_L76_stimuli\Run16_shape3_dir8_modified_WJY_201228'
    ]
for i in range(8):
    _,current_stim_dic = Stim_Frame_Align(all_stim_folders[i])
    OS_Tools.Save_Variable(all_stim_folders[i], 'Stim_Frame_Align', current_stim_dic)
#%%Get F and dF trains here.
cell_folder = r'H:\Test_Data\2P\210112_L76_2P\1-007\Results\All_Stim'
from Standard_Parameters.Sub_Graph_Dics import Sub_Dic_Generator
from Standard_Stim_Processor import One_Key_Stim_Maps
# Run07,OD
OD_Para = Sub_Dic_Generator('OD_2P')
One_Key_Stim_Maps(r'E:\Test_Data\2P\210112_L76_2P\1-007', cell_folder, OD_Para)
# Run08_G8
G8_Para = Sub_Dic_Generator('G8+90')
One_Key_Stim_Maps(r'E:\Test_Data\2P\210112_L76_2P\1-008', cell_folder, G8_Para)
One_Key_Stim_Maps(r'E:\Test_Data\2P\210112_L76_2P\1-009', cell_folder, G8_Para)
One_Key_Stim_Maps(r'E:\Test_Data\2P\210112_L76_2P\1-011', cell_folder, G8_Para)
RG_Para = Sub_Dic_Generator('RGLum4')
One_Key_Stim_Maps(r'E:\Test_Data\2P\210112_L76_2P\1-014', cell_folder, RG_Para)
One_Key_Stim_Maps(r'E:\Test_Data\2P\210112_L76_2P\1-015', cell_folder, RG_Para)
예제 #17
0
def Translation_Alignment(all_folders,
                          base_mode='global',
                          input_base=np.array([[0, 0], [0, 0]]),
                          align_range=20,
                          align_boulder=20,
                          before_average=True,
                          average_std=5,
                          big_memory_mode=False,
                          save_aligned_data=False,
                          graph_shape=(512, 512),
                          timer=True):
    '''
    
    This function will align all tif graphs in input folders. Only translation transaction here. Affine transformation need further discussion.
    
    Parameters
    ----------
    all_folders:(list)
        List of all tif folders, elements are strs.
    
    base_mode:('global',int,'input',optional. The default is 'global')
        How to select base frame. 'global': use global average as base. int: use average of specific run as base. 'input':Manually input base graph, need to be a 2D-Ndarray.
        
    input_base:(2D-Ndarray,optional. The default is none.)
        If base_mode = 'input', input_base must be given. This will be the base for alignment.
        
    align_range:(int,optional. The default is 20)
        Max pixel of alignment. 
        
    align_boulder:(int,optional. The default is 20)
        boulder cut for align. For different graph size, this variable shall be change.
        
    before_average:(bool,optional. The default is True)
        Whether before average is done. It can be set False to save time, on this case base graph shall be given.
        
    average_std:(float,optional. The default is 5)
        How much std you want for average graph generation. Different std can effect graph effect.
    
    big_memory_mode:(bool,optional. The default is False)
        If memory is big enough, use this mode is faster.
        
    save_aligned_data:(bool,optional. The default is False)
        Can be true only in big memory mode. This will save all aligned graph in a single 4D-Ndarray file.Save folder is the first folder.
        
    graph_shape:(2-element-turple,optional. The default is (512,512))
        Shape of graphs. All input graph must be in same shape.
        
    timer:(bool,optional. The default is True)
        Show runtime of function and each procedures.
    
        
    Returns
    -------
    bool
        Whether new folder is generated.
    
    '''
    time_tic_start = time.time()
    #%% Step1, generate folders and file cycle.
    all_save_folders = List_Op.List_Annex(all_folders, ['Results'])
    Aligned_frame_folders = List_Op.List_Annex(all_save_folders,
                                               ['Aligned_Frames'])
    for i in range(len(all_save_folders)):
        OS_Tools.mkdir(all_save_folders[i])
        OS_Tools.mkdir(Aligned_frame_folders[i])
    Before_Align_Tif_Name = []
    for i in range(len(all_folders)):
        current_run_tif = OS_Tools.Get_File_Name(all_folders[i])
        Before_Align_Tif_Name.append(current_run_tif)

    #%% Step2, Generate average map before align.
    if before_average == True:
        print('Before run averaging ...')
        Before_Align_Dics = {
        }  # This is the dictionary of all run averages. Keys are run id.
        total_graph_num = 0  # Counter of graph numbers.
        for i in range(len(Before_Align_Tif_Name)):
            current_run_graph_num = len(Before_Align_Tif_Name[i])
            total_graph_num += current_run_graph_num
            current_run_average = Graph_Tools.Average_From_File(
                Before_Align_Tif_Name[i])
            current_run_average = Graph_Tools.Clip_And_Normalize(
                current_run_average, clip_std=average_std)
            Before_Align_Dics[i] = (
                current_run_average, current_run_graph_num
            )  # Attention here, data recorded as turple.
            Graph_Tools.Show_Graph(
                current_run_average, 'Run_Average',
                all_save_folders[i])  # Show and save Run Average.
        # Then Use Weighted average method to generate global tif.
        global_average_graph = np.zeros(shape=np.shape(
            Before_Align_Dics[0][0]),
                                        dtype='f8')  # Base on shape of graph
        for i in range(len(Before_Align_Tif_Name)):
            global_average_graph += Before_Align_Dics[i][0].astype(
                'f8') * Before_Align_Dics[i][1] / total_graph_num
        global_average_graph = Graph_Tools.Clip_And_Normalize(
            global_average_graph, clip_std=average_std)
        # Then save global average in each run folder.
        if len(all_folders) > 1:
            for i in range(len(Before_Align_Tif_Name)):
                Graph_Tools.Show_Graph(global_average_graph,
                                       'Global_Average',
                                       all_save_folders[i],
                                       show_time=0)
        else:
            print('Only One run, no global average.')
    else:
        print('Before average Skipped.')
    time_tic_average0 = time.time()

    #%% Step3, Core Align Function.
    print('Aligning...')
    if base_mode == 'global':
        base = global_average_graph
    elif base_mode == 'input':
        base = input_base
    elif type(base_mode) == int:
        base = Before_Align_Dics[base_mode][0]
    else:
        raise IOError('Invalid base mode.')
    # In big memory mode, save aligned_data in a dictionary file.
    if big_memory_mode == True:
        All_Aligned_Frame = {}
        for i in range(len(Before_Align_Tif_Name)):
            All_Aligned_Frame[i] = np.zeros(
                shape=(graph_shape + (len(Before_Align_Tif_Name[i]), )),
                dtype='u2')  # Generate empty graph matrix.
    for i in range(len(Before_Align_Tif_Name)):  # Cycle all runs
        for j in range(len(
                Before_Align_Tif_Name[i])):  # Cycle all graphs in current run
            current_graph = cv2.imread(Before_Align_Tif_Name[i][j],
                                       -1)  # Read in current graph.
            _, _, current_aligned_graph = Alignment(base,
                                                    current_graph,
                                                    boulder=align_boulder,
                                                    align_range=align_range)
            graph_name = Before_Align_Tif_Name[i][j].split(
                '\\')[-1][:-4]  # Ignore extend name'.tif'.
            Graph_Tools.Show_Graph(current_aligned_graph,
                                   graph_name,
                                   Aligned_frame_folders[i],
                                   show_time=0)
            if big_memory_mode == True:
                All_Aligned_Frame[i][:, :, j] = current_aligned_graph
    print('Align Finished, generating average graphs...')
    time_tic_align_finish = time.time()

    #%% Step4, After Align Average
    After_Align_Graphs = {}
    if big_memory_mode == True:  # Average can be faster.
        temp_global_average_after_align = np.zeros(shape=graph_shape,
                                                   dtype='f8')
        for i in range(len(All_Aligned_Frame)):
            current_run_average = Graph_Tools.Clip_And_Normalize(
                np.mean(All_Aligned_Frame[i], axis=2),
                clip_std=average_std)  # Average run graphs, in type 'u2'
            After_Align_Graphs[i] = (current_run_average,
                                     len(All_Aligned_Frame[i][0, 0, :]))
            temp_global_average_after_align += After_Align_Graphs[i][0].astype(
                'f8') * After_Align_Graphs[i][1] / total_graph_num
        global_average_after_align = Graph_Tools.Clip_And_Normalize(
            temp_global_average_after_align, clip_std=average_std)
    else:  # Traditional ways.
        temp_global_average_after_align = np.zeros(shape=graph_shape,
                                                   dtype='f8')
        for i in range(len(Aligned_frame_folders)):
            current_run_names = OS_Tools.Get_File_Name(
                Aligned_frame_folders[i])
            current_run_average = Graph_Tools.Average_From_File(
                current_run_names)
            current_run_average = Graph_Tools.Clip_And_Normalize(
                current_run_average, clip_std=average_std)
            After_Align_Graphs[i] = (current_run_average,
                                     len(current_run_names))
            temp_global_average_after_align += After_Align_Graphs[i][0].astype(
                'f8') * After_Align_Graphs[i][1] / total_graph_num
        global_average_after_align = Graph_Tools.Clip_And_Normalize(
            temp_global_average_after_align, clip_std=average_std)
    # After average, save aligned graph in each save folder.
    for i in range(len(all_save_folders)):
        current_save_folder = all_save_folders[i]
        Graph_Tools.Show_Graph(After_Align_Graphs[i][0],
                               'Run_Average_After_Align', current_save_folder)
        if i == 0:  # Show global average only once.
            global_show_time = 5000
        else:
            global_show_time = 0
        if len(all_folders) > 1:
            Graph_Tools.Show_Graph(global_average_after_align,
                                   'Global_Average_After_Align',
                                   current_save_folder,
                                   show_time=global_show_time)
    time_tic_average1 = time.time()

    #%% Step5, save and timer
    if save_aligned_data == True:
        OS_Tools.Save_Variable(all_save_folders[0], 'All_Aligned_Frame_Data',
                               All_Aligned_Frame)

    if timer == True:
        whole_time = time_tic_average1 - time_tic_start
        before_average_time = time_tic_average0 - time_tic_start
        align_time = time_tic_align_finish - time_tic_average0
        after_average_time = time_tic_average1 - time_tic_align_finish
        print('Total Time = ' + str(whole_time) + ' s.')
        print('Before Average Time = ' + str(before_average_time) + ' s.')
        print('Align Time = ' + str(align_time) + ' s.')
        print('After Average Time = ' + str(after_average_time) + ' s.')

    return True
def Standard_Cell_Processor(
    animal_name,
    date,
    day_folder,
    cell_file_path,
    #average_graph_path, # not necessary.
    run_id_lists,
    location='A',  # For runs have 
    Stim_Frame_Align_name='_All_Stim_Frame_Infos.sfa',
    #Stim_Frame_Align_subfolder = r'\Results\Stim_Frame_Align.pkl',# API changed.
    align_subfolder=r'\Results\Aligned_Frames',
    response_head_extend=3,
    response_tail_extend=3,
    base_frame=[0, 1, 2],
    filter_para=(0.02, False)):
    # Folder and name initialization
    print('Just make sure average and cell find is already done.')
    cell_dic = ot.Load_Variable(cell_file_path)
    cell_info = cell_dic['All_Cell_Information']
    cell_name_prefix = animal_name + '_' + str(date) + location + '_'
    all_cell_num = len(cell_info)
    all_run_subfolders = lt.List_Annex([day_folder],
                                       lt.Run_Name_Producer_2P(run_id_lists))
    save_folder = day_folder
    all_Stim_Frame_Align = ot.Load_Variable(day_folder + r'\\' +
                                            Stim_Frame_Align_name)
    # Set cell data formats.
    all_cell_list = []
    for i in range(all_cell_num):
        current_cell_name = cell_name_prefix + ot.Bit_Filler(i, 4)
        current_cell_dic = {}
        current_cell_dic['Name'] = current_cell_name
        current_cell_dic['Cell_Info'] = cell_info[i]
        # Cycle all runs for F and dF trains.
        current_cell_dic['dF_F_train'] = {}
        current_cell_dic['F_train'] = {}
        current_cell_dic['Raw_CR_trains'] = {}
        current_cell_dic['CR_trains'] = {}
        all_cell_list.append(current_cell_dic)
    # Then cycle all runs, fill in
    for i in range(len(all_run_subfolders)):
        current_runid = 'Run' + (all_run_subfolders[i][-3:]
                                 )  # Use origin run id to avoid bugs.
        current_all_tif_name = ot.Get_File_Name(
            all_run_subfolders[i] + align_subfolder, '.tif')
        current_Stim_Frame_Align = all_Stim_Frame_Align[current_runid]
        if current_Stim_Frame_Align == None or len(
                current_Stim_Frame_Align
        ) == 302:  # meaning this run is spon or RF25.
            current_run_Fs, current_run_dF_Fs = Spike_Train_Generator(
                current_all_tif_name, cell_info, 'most_unactive', None)
        else:
            current_run_stim_train = current_Stim_Frame_Align[
                'Original_Stim_Train']
            if 0 in current_run_stim_train:  # having 0
                current_run_Fs, current_run_dF_Fs = Spike_Train_Generator(
                    current_all_tif_name,
                    cell_info,
                    Base_F_type='nearest_0',
                    stim_train=current_run_stim_train)
            else:
                current_run_Fs, current_run_dF_Fs = Spike_Train_Generator(
                    current_all_tif_name,
                    cell_info,
                    Base_F_type='before_ISI',
                    stim_train=current_run_stim_train)
        # Then put trains above into each cell files.
        for j in range(all_cell_num):
            all_cell_list[j]['dF_F_train'][current_runid] = current_run_dF_Fs[
                j]
            all_cell_list[j]['F_train'][current_runid] = current_run_Fs[j]
        # Then, we generate Condition Reaction Train for each cell and each condition.
        if current_Stim_Frame_Align == None:
            all_cell_list[j]['CR_trains'][current_runid] = None
            all_cell_list[j]['Raw_CR_trains'][current_runid] = None
        else:
            for j in range(all_cell_num):
                all_cell_list[j]['CR_trains'][current_runid], all_cell_list[j][
                    'Raw_CR_trains'][
                        current_runid] = Single_Condition_Train_Generator(
                            current_run_Fs[j], current_Stim_Frame_Align,
                            response_head_extend, response_tail_extend,
                            base_frame, filter_para)
    # Till now, all cell data of all runs is saved in 'all_cell_list'.
    # Last part, saving files. All cells in one file, dtype = dic.
    all_cell_dic = {}
    for i in range(all_cell_num):
        all_cell_dic[all_cell_list[i]['Name']] = all_cell_list[i]
    ot.Save_Variable(save_folder,
                     '_' + animal_name + '_' + date + location + '_All_Cells',
                     all_cell_dic, '.ac')
    return True
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 13:41:07 2020

@author: ZR
Codes to process L76 Data
"""

import My_Wheels.Graph_Operation_Kit as Graph_Tools
import My_Wheels.OS_Tools_Kit as OS_Tools
#%% Cell1, Average Graph.
graph_folder = r'I:\Test_Data\201023_L76_LM\1-003'
save_path = graph_folder + r'\Results'
OS_Tools.mkdir(save_path)
all_tif_name = OS_Tools.Get_File_Name(graph_folder)
average_graph = Graph_Tools.Average_From_File(all_tif_name)
norm_average_graph = Graph_Tools.Clip_And_Normalize(average_graph, clip_std=3)
Graph_Tools.Show_Graph(norm_average_graph, 'Average_Graph', save_path)
#%% Then Calculate Runs
graph_folder = r'I:\Test_Data\201023_L76_LM\1-013'
import My_Wheels.Translation_Align_Function as Align
Align.Translation_Alignment([graph_folder])
#%% Align Stim and Frame
import My_Wheels.Stim_Frame_Align as Stim_Frame_Align
stim_folder = r'I:\Test_Data\201023_L76_LM\201023_L76_LM_Stimulus\Run13_RGLum4'
Frame_Stim_Sequence, Frame_Stim_Dictionary = Stim_Frame_Align.Stim_Frame_Align(
    stim_folder)
aligned_tif_name = OS_Tools.Get_File_Name(
    r'I:\Test_Data\201023_L76_LM\1-013\Results\Aligned_Frames')
#%% Generate On-Off Map
on_id = []
예제 #20
0
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 30 14:37:51 2021

@author: ZR
"""

from My_Wheels.Standard_Parameters.Stim_Name_Tools import Stim_ID_Combiner
from My_Wheels.Cell_Processor import Cell_Processor
import My_Wheels.OS_Tools_Kit as ot
import matplotlib.pyplot as plt

day_folder = r'K:\Test_Data\2P\210320_L76_2P'
save_folder = day_folder + r'\_All_Results'
ot.mkdir(save_folder)
#%% Analyze Run05-G16 First.
G16_CP = Cell_Processor(day_folder, 'Run005')
all_cell_name = G16_CP.all_cell_names
Ori_IDs = Stim_ID_Combiner('G16_Oriens')
sub_sf = save_folder + r'\G16_Oriens'
ot.mkdir(sub_sf)
for i in range(len(all_cell_name)):
    _, raw_data, _ = G16_CP.Single_Cell_Response_Data(Ori_IDs,
                                                      all_cell_name[i])
    ot.Save_Variable(sub_sf, all_cell_name[i], raw_data, '.raw')
    test_fig = G16_CP.Average_Response_Map()
    test_fig.savefig(sub_sf + r'\\' + all_cell_name[i] + '_Response.png',
                     dpi=180)
    plt.clf()
#%% Then directions
Dir_IDs = Stim_ID_Combiner('G16_Dirs')
예제 #21
0
def Tremble_Comparision(before_folder,
                        after_folder,
                        boulder_ignore=20,
                        cut_shape=(9, 9),
                        mask_thres=0):
    # Initialization
    save_folder = after_folder + r'\Results'
    OS_Tools.mkdir(save_folder)
    save_folder = save_folder + r'\Tremble_Compare'
    OS_Tools.mkdir(save_folder)
    row_num = cut_shape[0]
    col_num = cut_shape[1]
    frac_num = row_num * col_num
    cov_matrix_dic = {}
    var_matrix_dic = {}
    variation = np.zeros(shape=(row_num, col_num, 2), dtype='f8')
    variation_change = np.zeros(shape=(row_num, col_num), dtype='f8')
    variation_prop = np.zeros(shape=(row_num, col_num), dtype='f8')
    # Calculation Begins
    before_schematic, before_frac_center = Tremble_Evaluator(
        before_folder,
        boulder_ignore=boulder_ignore,
        cut_shape=cut_shape,
        mask_thres=mask_thres)
    after_schematic, after_frac_center = Tremble_Evaluator(
        after_folder,
        boulder_ignore=boulder_ignore,
        cut_shape=cut_shape,
        mask_thres=mask_thres)
    fig, ax = plt.subplots(row_num, col_num,
                           figsize=(30, 28))  # Initialize graphs
    fig.suptitle('Mass Center Distribution', fontsize=54)
    # Cycle all fracture,get scatter map and variance
    for i in range(frac_num):
        # Graph_Plot
        current_row = i % row_num
        current_col = i // row_num
        ax[current_row, current_col].scatter(before_frac_center[i, :, 1],
                                             before_frac_center[i, :, 0],
                                             s=1,
                                             c='r')
        ax[current_row, current_col].scatter(after_frac_center[i, :, 1],
                                             after_frac_center[i, :, 0],
                                             s=1,
                                             c='g')
        # After plot, calculate cov matrix and variance.
        before_cov = np.cov(before_frac_center[i, :, :].T)
        after_cov = np.cov(after_frac_center[i, :, :].T)
        cov_matrix_dic[i] = (before_cov, after_cov)
        before_eig, _ = np.linalg.eig(before_cov)
        after_eig, _ = np.linalg.eig(after_cov)
        before_var = np.round(before_eig.sum(), 4)
        after_var = np.round(after_eig.sum(), 4)
        variation[current_row, current_col, 0] = before_var
        variation[current_row, current_col, 1] = after_var
        variation_change[current_row, current_col] = before_var - after_var
        variation_prop[current_row,
                       current_col] = (before_var - after_var) / before_var
        # Text annotate
        anchored_text = AnchoredText('Before variance:' + str(before_var) +
                                     '\n After variance:' + str(after_var),
                                     loc='lower left')
        ax[current_row, current_col].add_artist(anchored_text)

    # After this, save figures and matrixs.
    var_matrix_dic['Before'] = variation[:, :, 0]
    var_matrix_dic['After'] = variation[:, :, 1]
    Graph_Tools.Show_Graph(before_schematic, 'Before_Schematic', save_folder)
    Graph_Tools.Show_Graph(after_schematic, 'After_Schematic', save_folder)
    fig.savefig(save_folder + '\Scatter Plots.png', dpi=330)
    OS_Tools.Save_Variable(save_folder, 'cov_matrix', cov_matrix_dic)
    OS_Tools.Save_Variable(save_folder, 'variance_matrix', var_matrix_dic)
    # Calculate variance change and plot variance map.
    # Before variance map
    plt.clf()
    fig2 = plt.figure(figsize=(15, 15))
    plt.title('Before Align Variance', fontsize=36)
    fig2 = sns.heatmap(variation[:, :, 0],
                       cmap='bwr',
                       annot=True,
                       annot_kws={"size": 20},
                       square=True,
                       yticklabels=False,
                       xticklabels=False,
                       center=0)
    fig2.figure.savefig(save_folder + '\Before_Variance.png', dpi=330)
    # After variance map
    plt.clf()
    fig2 = plt.figure(figsize=(15, 15))
    plt.title('After Align Variance', fontsize=36)
    fig2 = sns.heatmap(variation[:, :, 1],
                       cmap='bwr',
                       annot=True,
                       annot_kws={"size": 20},
                       square=True,
                       yticklabels=False,
                       xticklabels=False,
                       center=0)
    fig2.figure.savefig(save_folder + '\After_Variance.png', dpi=330)
    # Variance change map
    plt.clf()
    fig2 = plt.figure(figsize=(15, 15))
    plt.title('Variance Change', fontsize=36)
    fig2 = sns.heatmap(variation_change,
                       cmap='bwr',
                       annot=True,
                       annot_kws={"size": 20},
                       square=True,
                       yticklabels=False,
                       xticklabels=False,
                       center=0)
    fig2.figure.savefig(save_folder + '\Variance_Change.png', dpi=330)
    # Variance change propotion map
    plt.clf()
    fig2 = plt.figure(figsize=(15, 15))
    plt.title('Variance Change Propotion', fontsize=36)
    fig2 = sns.heatmap(variation_prop,
                       cmap='bwr',
                       annot=True,
                       annot_kws={"size": 20},
                       square=True,
                       yticklabels=False,
                       xticklabels=False,
                       center=0)
    fig2.figure.savefig(save_folder + '\Variance_Change_Prop.png', dpi=330)
    return cov_matrix_dic, var_matrix_dic
예제 #22
0
def Tremble_Calculator_From_File(data_folder,
                                 graph_type='.tif',
                                 cut_shape=(10, 5),
                                 boulder=20,
                                 move_method='former',
                                 base=[],
                                 center_method='weight'):
    '''
    Calculate align tremble from graph. This program is used to evaluate align quality.
    
    Parameters
    ----------
    data_folder : (str)
        Data folder of graphs.
    graph_type : (str),optional
        Extend name of input grahp. The default is '.tif'.
    cut_shape : (turple), optional
        Shape of fracture cut. Proper cut will . The default is (10,5).
    boulder : (int),optional
        Boulder of graph. Cut and not used in following calculation.The default is 20.        
    move_method : ('average'or'former'or'input'), optional
        Method of bais calculation. The default is 'former'. 
        'average' bais use all average; 'former' bais use fomer frame; 'input' bais need to be given.
    base : (2D_NdArray), optional
        If move_method == 'input', base should be given here. The default is [].
    center_method : ('weight' or 'binary'), optional
        Method of center find. Whether we use weighted intense.The default is 'weight'.

    Returns
    -------
    mass_center_maps(Graph)
        A plotted graph, showing movement trace of mass center.
    tremble_plots : (List)
        List of all fracture graph tremble list.
    tremble_information : (Dic)
        Dictionary of tramble informations.
        Data type of tremble_information:
            keys:frame ID
            data are lists, every element in list indicate a fracture grpah, ID in cut graph.
            list elements are turples, each turple[0] are move vector, turple[1] as move distance.
            

    '''
    all_tif_name = OS_Tools.Get_File_Name(data_folder, graph_type)
    average_graph = Graph_Tools.Average_From_File(all_tif_name)
    tremble_information = {}
    # get base of align first.
    if move_method == 'average':
        base_graph = average_graph
    elif move_method == 'input':
        base_graph = base
    elif move_method == 'former':
        base_graph = cv2.imread(all_tif_name[0],
                                -1)  # Use first frame as base.

    # cycle all graph to generate tremble plots.
    for i in range(len(all_tif_name)):
        # Process input graph, get cell
        current_graph = cv2.imread(all_tif_name[i], -1)
        processed_cell_graph = None
        #Cut Graph as described
        _, _, _, cutted_current_graph = Graph_Cutter(processed_cell_graph,
                                                     boulder, cut_shape)
        _, _, _, cutted_base = Graph_Cutter(base_graph, boulder, cut_shape)
        # Renew base if former mode.
        if move_method == 'former':
            base_graph = cv2.imread(all_tif_name[i], -1)
        # Then cycle all cutted_fracture, to calculate movement of every fracture graph.
        current_frame_move_list = []
        for j in range(len(cutted_current_graph)):
            temp_graph_part = cutted_current_graph[j]
            temp_base_part = cutted_base[j]
            temp_graph_center, _ = Graph_Tools.Graph_Center_Calculator(
                temp_graph_part, center_mode=center_method)
            temp_base_center, _ = Graph_Tools.Graph_Center_Calculator(
                temp_base_part, center_mode=center_method)
            temp_tremble_vector, temp_tremble_dist = Calculator.Vector_Calculate(
                temp_base_center, temp_graph_center)
            current_frame_move_list.append(
                (temp_tremble_vector, temp_tremble_dist))
        tremble_information[i] = current_frame_move_list

    # Then, plot mass center plots. This will show change of mass center position.
    if move_method == 'input':
        print('No Mass Center plot Generated.')
        mass_center_maps = False
    elif move_method == 'average':  # If average, use current location
        mass_center_maps = []
        for i in range(len(tremble_information[0])):  # Cycle all fracture
            fig = plt.figure()
            ax = plt.subplot()
            for j in range(len(tremble_information)):  # Cycle all frame
                current_point = tremble_information[j][i][0]
                ax.scatter(current_point[1], current_point[0], alpha=1, s=5)
            mass_center_maps.append(fig)
            plt.close()
    elif move_method == 'former':
        mass_center_maps = []
        for i in range(len(tremble_information[0])):  # Cycle all fracture
            fig = plt.figure()
            ax = plt.subplot()
            current_point = (0, 0)
            for j in range(len(tremble_information)):  # Cycle all frame
                current_point = (current_point[0] +
                                 tremble_information[j][i][0][0],
                                 current_point[1] +
                                 tremble_information[j][i][0][1])
                ax.scatter(current_point[1], current_point[0], alpha=1, s=5)
            mass_center_maps.append(fig)
            plt.close()

    # At last, plot tremble dist plots. Each fracture have a plot.
    tremble_plots = {}
    for i in range(len(tremble_information[0])):  # Cycle all fractures
        current_tremble_plot = []
        for j in range(len(tremble_information)):  # Cycle all frame
            current_dist = tremble_information[j][i][1]
            current_tremble_plot.append(current_dist)
        tremble_plots[i] = np.asarray(current_tremble_plot)
    return mass_center_maps, tremble_plots, tremble_information
예제 #23
0
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 12 10:34:23 2021

@author: ZR
"""

from Standard_Aligner import Standard_Aligner
Sa = Standard_Aligner(r'K:\Test_Data\2P\210708_L76_2P',
                      list(range(1, 21)),
                      final_base='1-017')
Sa.One_Key_Aligner()
from My_Wheels.Stim_Frame_Align import One_Key_Stim_Align
One_Key_Stim_Align(r'K:\Test_Data\2P\210708_L76_2P\210709_L76_2P_stimuli')
from My_Wheels.Standard_Stim_Processor import One_Key_Frame_Graphs
from My_Wheels.Standard_Parameters.Sub_Graph_Dics import Sub_Dic_Generator
OD_Para = Sub_Dic_Generator('OD_2P')
One_Key_Frame_Graphs(r'K:\Test_Data\2P\210708_L76_2P\1-015', OD_Para)
G8_Para = Sub_Dic_Generator('G8+90')
One_Key_Frame_Graphs(r'K:\Test_Data\2P\210708_L76_2P\1-018', G8_Para)
RG_Para = Sub_Dic_Generator('RGLum4')
One_Key_Frame_Graphs(r'K:\Test_Data\2P\210708_L76_2P\1-020', RG_Para)

import My_Wheels.OS_Tools_Kit as ot
all_cell_dic = ot.Load_Variable(
    r'K:\Test_Data\2P\210629_L76_2P\L76_210629A_All_Cells.ac')
예제 #24
0
Translation_Alignment(all_folders,base_mode=1,align_range=35,align_boulder=35)
'''Attention here,1-012 and 1-013 have more movement than 20pix, making this hard to use.'''

#%% Align Stim and frame of each stim folder.
from My_Wheels.Stim_Frame_Align import Stim_Frame_Align
all_stim_folders = [
    r'G:\Test_Data\2P\201111_L76_LM\201111_L76_2P_stimuli\Run02_2P_G8',
    r'G:\Test_Data\2P\201111_L76_LM\201111_L76_2P_stimuli\Run03_2P_manual_OD8',
    r'G:\Test_Data\2P\201111_L76_LM\201111_L76_2P_stimuli\Run07_2P_RGLum4',
    r'G:\Test_Data\2P\201111_L76_LM\201111_L76_2P_stimuli\Run08_2P_RGLum4_RG',
    r'G:\Test_Data\2P\201111_L76_LM\201111_L76_2P_stimuli\Run09_2P_RGLum4',
    ]
for i in range(len(all_stim_folders)):
    current_stim_folder = all_stim_folders[i]
    _,current_Stim_Frame_Align = Stim_Frame_Align(current_stim_folder)
    OS_Tools.Save_Variable(current_stim_folder, 'Stim_Frame_Align', current_Stim_Frame_Align)
#%% Cell Find.
from My_Wheels.Cell_Find_From_Graph import On_Off_Cell_Finder
import My_Wheels.Graph_Operation_Kit as Graph_tools
def Cell_Find(run_folder):
    output_folder = run_folder+r'\Results'
    aligned_frame_folder = output_folder+r'\Aligned_Frames'
    all_tif_name = OS_Tools.Get_File_Name(aligned_frame_folder)
    Stim_Frame_Dic = OS_Tools.Load_Variable(output_folder,'Stim_Frame_Align.pkl')
    on_off_graph,Finded_Cells = On_Off_Cell_Finder(all_tif_name, Stim_Frame_Dic,shape_boulder=[20,20,20,35],filter_method = 'Gaussian',LP_Para = ((5,5),1.5))
    cell_folder = output_folder+r'\Cells'
    OS_Tools.Save_Variable(cell_folder, 'Finded_Cells', Finded_Cells,'.cell')
    Graph_tools.Show_Graph(on_off_graph, 'on-off_graph', cell_folder)
    all_keys = list(Finded_Cells.keys())
    all_keys.remove('All_Cell_Information')
    for i in range(len(all_keys)):
예제 #25
0
def Video_From_File(data_folder,
                    plot_range=(0, 9999),
                    graph_size=(472, 472),
                    file_type='.tif',
                    fps=15,
                    gain=20,
                    LP_Gaussian=([5, 5], 1.5),
                    frame_annotate=True,
                    cut_boulder=[20, 20, 20, 20]):
    '''
    Write all files in a folder as a video.

    Parameters
    ----------
    data_folder : (std)
        Frame folder. All frame in this folder will be write into video. Dtype shall be u2 or there will be a problem.
    graph_size : (2-element-turple), optional
        Frame size AFTER cut. The default is (472,472).
    file_type : (str), optional
        Data type of graph file. The default is '.tif'.
    fps : (int), optional
        Frame per second. The default is 15.
    gain : (int), optional
        Show gain. The default is 20.
    LP_Gaussian : (turple), optional
        LP Gaussian Filter parameter. Only do low pass. The default is ([5,5],1.5).
    frame_annotate : TYPE, optional
        Whether we annotate frame number on it. The default is True.
    cut_boulder : TYPE, optional
        Boulder cut of graphs, UDLR. The default is [20,20,20,20].


    Returns
    -------
    bool
        True if function processed.

    '''

    all_tif_name = OS_Tools.Get_File_Name(path=data_folder,
                                          file_type=file_type)
    start_frame = plot_range[0]
    end_frame = min(plot_range[1], len(all_tif_name))
    all_tif_name = all_tif_name[start_frame:end_frame]
    graph_num = len(all_tif_name)
    video_writer = cv2.VideoWriter(data_folder + r'\\Video.mp4',
                                   cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'),
                                   fps, graph_size, 0)
    #video_writer = cv2.VideoWriter(data_folder+r'\\Video.avi',-1,fps,graph_size,0)
    for i in range(graph_num):
        raw_graph = cv2.imread(all_tif_name[i], -1).astype('f8')
        # Cut graph boulder.
        raw_graph = Graph_Tools.Graph_Cut(raw_graph, cut_boulder)
        # Do gain then
        gained_graph = np.clip(raw_graph.astype('f8') * gain / 256, 0,
                               255).astype('u1')
        # Then do filter, then
        if LP_Gaussian != False:
            u1_writable_graph = Filters.Filter_2D(gained_graph, LP_Gaussian,
                                                  False)
        else:
            u1_writable_graph = gained_graph
        if frame_annotate == True:
            cv2.putText(u1_writable_graph, 'Stim ID = ' + str(i), (250, 30),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255), 1)
        video_writer.write(u1_writable_graph)
    del video_writer
    return True
예제 #26
0
    ]
all_run_folder = List_Tools.List_Annex(data_folder, run_folder)
Align.Translation_Alignment(all_run_folder,base_mode = 1,align_range=50,align_boulder=50,big_memory_mode=True)
#%% Then find cell from after align spon graph.
from My_Wheels.Cell_Find_From_Graph import Cell_Find_And_Plot
Cell_Find_And_Plot(r'G:\Test_Data\2P\201211_L76_2P\1-001\Results', 'Global_Average_After_Align.tif', 'Global_Morpho',find_thres= 1.5,shape_boulder = [20,20,30,20])
#%% Then calculate the stim train of each stim series.
from My_Wheels.Stim_Frame_Align import Stim_Frame_Align
all_stim_folder = [
    r'G:\Test_Data\2P\201211_L76_2P\201211_L76_2P_stimuli\Run10_2P_G8',
    r'G:\Test_Data\2P\201211_L76_2P\201211_L76_2P_stimuli\Run12_2P_OD8_auto',
    r'G:\Test_Data\2P\201211_L76_2P\201211_L76_2P_stimuli\Run14_2P_RGLum4',
    ]
for i in range(3):
    _,current_stim_dic = Stim_Frame_Align(all_stim_folder[i])
    OS_Tools.Save_Variable(all_stim_folder[i], 'Stim_Frame_Align', current_stim_dic)
#%% Then calculate spike train of different runs.
from My_Wheels.Spike_Train_Generator import Spike_Train_Generator
#Cycle basic stim map. this maps have 
for i,index in enumerate([1,2,4]):
    current_aligned_tif_name  = OS_Tools.Get_File_Name(all_run_folder[index]+r'\Results\Aligned_Frames')
    current_stim = OS_Tools.Load_Variable(all_stim_folder[i],file_name='Stim_Frame_Align.pkl')['Original_Stim_Train']
    current_cell_info = OS_Tools.Load_Variable(all_run_folder[index]+r'\Results\Global_Morpho\Global_Morpho.cell')['All_Cell_Information']
    F_train,dF_F_train = Spike_Train_Generator(current_aligned_tif_name, current_cell_info,Base_F_type= 'nearest_0',stim_train = current_stim)
    OS_Tools.Save_Variable(all_run_folder[index]+r'\Results', 'F_train', F_train)
    OS_Tools.Save_Variable(all_run_folder[index]+r'\Results', 'dF_F_train', dF_F_train)

#%% Then calculate standard stim map.
from My_Wheels.Standard_Stim_Processor import Standard_Stim_Processor
from My_Wheels.Standard_Parameters.Sub_Graph_Dics import Sub_Dic_Generator
Standard_Stim_Processor(r'G:\Test_Data\2P\201211_L76_2P\1-010',
예제 #27
0
base_graph = cv2.imread(r'E:\Test_Data\2P\210101_L76_2P\1-002\Results\Global_Average_After_Align.tif',-1)
Translation_Alignment(after_spons,base_mode = 'input',input_base = base_graph,graph_shape = (376,352))
#%% Then analyze full frame stim maps.
basic_stim_folders = [r'I:\Test_Data\2P\210101_L76_2P\1-014',
                      r'I:\Test_Data\2P\210101_L76_2P\1-016',
                      r'I:\Test_Data\2P\210101_L76_2P\1-017'
                      ]
Translation_Alignment(basic_stim_folders,base_mode = 0)
#Get stim frame aligns
all_stim_folders = [r'I:\Test_Data\2P\210101_L76_2P\210101_L76_2P_stimuli\Run14_2P_OD8_auto',
                    r'I:\Test_Data\2P\210101_L76_2P\210101_L76_2P_stimuli\Run16_2P_G8',
                    r'I:\Test_Data\2P\210101_L76_2P\210101_L76_2P_stimuli\Run17_2P_RGLum4'
                    ]
for i in range(3):
    _,Stim_Frame_Dic = Stim_Frame_Align(all_stim_folders[i])
    OS_Tools.Save_Variable(all_stim_folders[i], 'Stim_Frame_Align', Stim_Frame_Dic)
#%% Then cell find for all stim maps.
from My_Wheels.Cell_Find_From_Graph import Cell_Find_And_Plot
Cell_Find_And_Plot(r'I:\Test_Data\2P\210101_L76_2P\1-014\Results','Global_Average_After_Align.tif','Stim_Global',find_thres = 1.5) 
cell_folder = r'I:\Test_Data\2P\210101_L76_2P\1-014\Results\Stim_Global'
#%%Then calculate all stim graphs.
from My_Wheels.Standard_Parameters.Sub_Graph_Dics import Sub_Dic_Generator
from My_Wheels.Standard_Stim_Processor import One_Key_Stim_Maps
OD_para = Sub_Dic_Generator('OD_2P')
One_Key_Stim_Maps(r'I:\Test_Data\2P\210101_L76_2P\1-014', cell_folder, OD_para)
# Then G8
G8_para = Sub_Dic_Generator('G8+90')
One_Key_Stim_Maps(r'I:\Test_Data\2P\210101_L76_2P\1-016', cell_folder, G8_para)
# Then RGLum4 
RG_para = Sub_Dic_Generator('RGLum4')
One_Key_Stim_Maps(r'I:\Test_Data\2P\210101_L76_2P\1-017', cell_folder, RG_para)
예제 #28
0
def Cell_Find_From_Mannual(mask_graph_path,average_graph_path = None,boulder = 8,save = True):
    '''
    Find cell from mannual mask.

    Parameters
    ----------
    mask_graph_path : (str)
        Save path of manuual plotted mask.
    average_graph_path : (str,optional)
        If not given, combined graph will not be produced.
    boulder : int, optional
        Boulder of cells. Centroid of cell over this boulde will be ignored.  The default is 20.
    save : bool, optional
        Whether we save cell graphs in specific folder. The default is True.

    Returns
    -------
    Cell_Finded : (Dic)
        Same cell dtype as before.

    '''
    save_path = OS_Tools.CDdotdot(mask_graph_path)
    mask_graph = cv2.imread(mask_graph_path,0)
    height,width = mask_graph.shape
    thres = mask_graph.max()/2
    thres_graph = mask_graph>thres# Get binary cell graph
    washed_thres_graph = skimage.morphology.remove_small_objects(thres_graph,5,connectivity = 1)# remove draw errors.
    cell_label = skimage.measure.label(washed_thres_graph)
    All_Cells = skimage.measure.regionprops(cell_label)
    # Delete cell 
    for i in range(len(All_Cells)-1,-1,-1):
        current_cell = All_Cells[i]
        current_height,current_width = current_cell.centroid
        if current_height<boulder or (height-current_height)<boulder:
            All_Cells.pop(i)
        elif current_width<boulder or (width-current_width)<boulder:
            All_Cells.pop(i)
    # Visualization here.
    visual_cell_graph = Visualize.Cell_Visualize(All_Cells)
    annotated_graph = Visualize.Label_Cell(visual_cell_graph,All_Cells,color = (255,255,0))
    if average_graph_path == None:
        print('Average graph not given, no combined graph.')
        combined_graph = []
        labled_combined_graph = []
    else:
        average_graph = cv2.imread(average_graph_path,1)# Read in 8 bit color map
        # Then annotate cell mask on average graph.
        cell_mask = visual_cell_graph[:,:,0]/2
        combined_graph = average_graph.astype('f8')*0.7
        combined_graph[:,:,1] = np.clip((combined_graph[:,:,1]+cell_mask),0,255)
        combined_graph = combined_graph.astype('u1')
        labled_combined_graph = Visualize.Label_Cell(combined_graph, All_Cells,color = (255,255,0))
    # At last, save all cell information and cell maps,
    Cell_Finded = {}
    Cell_Finded['All_Cell_Information'] = All_Cells
    Cell_Finded['Cell_Graph'] = visual_cell_graph
    Cell_Finded['Annotate_Cell_Graph'] = annotated_graph
    Cell_Finded['Combined_Graph'] = combined_graph
    Cell_Finded['Combined_Graph_With_Number'] = labled_combined_graph
    if save == True:
        OS_Tools.Save_Variable(save_path, 'Manuall_Cells', Cell_Finded,'.cell')
        Graph_Tools.Show_Graph(visual_cell_graph, 'Cell_Graph', save_path)
        Graph_Tools.Show_Graph(annotated_graph, 'Annotate_Cell_Graph', save_path)
        if type(combined_graph) != type([]):
            Graph_Tools.Show_Graph(combined_graph, 'Combined_Graph', save_path)
            Graph_Tools.Show_Graph(labled_combined_graph, 'Combined_Graph_With_Number', save_path)

    return Cell_Finded
예제 #29
0
import My_Wheels.List_Operation_Kit as List_Tools
import My_Wheels.Graph_Operation_Kit as Graph_Tools
import My_Wheels.OS_Tools_Kit as OS_Tools
import cv2
data_folder = [r'E:\Test_Data\2P\201222_L76_2P']
run_list = [
    '1-001',  # Spon
    '1-008',  # OD
    '1-010',  # G8
    '1-011',  # RGLum4
    '1-014'  # Spon After
]
all_runs = List_Tools.List_Annex(data_folder, run_list)
#%% Add 3 list for run01 to fit ROI change.
run_1 = all_runs[0]
run1_all_tif = OS_Tools.Get_File_Name(run_1)
save_path = run_1 + r'\shape_extended'
OS_Tools.mkdir(save_path)
for i in range(len(run1_all_tif)):
    current_graph = cv2.imread(run1_all_tif[i], -1)
    extended_graph = Graph_Tools.Boulder_Extend(
        current_graph, [0, 0, 0, 3])  # 3 pix on the right.
    current_graph_name = run1_all_tif[i].split('\\')[-1]
    Graph_Tools.Show_Graph(extended_graph,
                           current_graph_name,
                           save_path,
                           show_time=0)
#%% Then align Run01_Spon.
from My_Wheels.Translation_Align_Function import Translation_Alignment
Translation_Alignment([all_runs[0] + r'\shape_extended'],
                      graph_shape=(325, 324))
예제 #30
0
All0_Graph[:,:,2] += ROI_boulder_prev
All0_Graph_Annotate = np.clip(All0_Graph,0,255).astype('u1')
Graph_Tools.Show_Graph(All0_Graph_Annotate, 'All-0_Annotate', save_folder)
#%% Last, RG-Lum
RG_Graph = cv2.imread(r'I:\Test_Data\2P\201211_L76_2P\1-014\Results\Subtraction_Graphs\RG-Lum_SubGraph.tif').astype('f8')
RG_Graph[:,:,2] += ROI_boulder_prev
RG_Graph_Annotate = np.clip(RG_Graph,0,255).astype('u1')
Graph_Tools.Show_Graph(RG_Graph_Annotate, 'RG_Annotate', save_folder)
#%% Next job, video compare.
import My_Wheels.Video_Writer as Video_Writer
roi_spon_folder = r'I:\Test_Data\2P\201222_L76_2P\1-001\Results\Aligned_Frames'
Video_Writer.Video_From_File(roi_spon_folder,(325,324),fps = 30,cut_boulder = [0,0,0,0])
full_frame_folder = r'I:\Test_Data\2P\201211_L76_2P\1-001\Results\Aligned_Frames'
Video_Writer.Video_From_File(full_frame_folder,(492,492),fps = 15,cut_boulder = [10,10,10,10])
#%% Then find a exp cell of ROI and full graph.
roi_cell_dic = OS_Tools.Load_Variable(r'I:\Test_Data\2P\201222_L76_2P\1-008\Results\Global_Morpho','Global_Morpho.cell')
ROI_G8_F_Train = OS_Tools.Load_Variable(r'I:\Test_Data\2P\201222_L76_2P\1-010\Results\F_Trains.pkl')
ROI_G8_Stim_Dic = OS_Tools.Load_Variable(r'I:\Test_Data\2P\201222_L76_2P\1-010\Results\Stim_Frame_Align.pkl')

full_cell_dic = OS_Tools.Load_Variable(r'I:\Test_Data\2P\201211_L76_2P\1-010\Results\Global_Morpho\Global_Morpho.cell')
full_G8_F_Train = OS_Tools.Load_Variable(r'I:\Test_Data\2P\201211_L76_2P\1-010\Results\F_train.pkl')
full_Stim_Dic = OS_Tools.Load_Variable(r'I:\Test_Data\2P\201211_L76_2P\1-010\Results\Stim_Frame_Align.pkl')
#%% Run01 Bright and Low
aligned_folder = r'I:\Test_Data\2P\201222_L76_2P\1-001\Results\Aligned_Frames'
save_path = r'I:\Test_Data\2P\201222_L76_2P\1-001\Results'
bright_graph,_ = Selector.Intensity_Selector(aligned_folder,list_write= False)
bright_graph = np.clip(bright_graph.astype('f8')*32,0,65535).astype('u2')
Graph_Tools.Show_Graph(bright_graph, 'Brightest_Graph', save_path)

low_graph,_ = Selector.Intensity_Selector(aligned_folder,list_write= False,mode = 'smallest')
low_graph = np.clip(low_graph.astype('f8')*32,0,65535).astype('u2')