def One_Key_Frame_Graphs(
        data_folder,
        sub_dic,
        show_clip=3,
        alinged_sub_folder=r'\Results\Final_Aligned_Frames',
        Stim_Align_sub_folder=r'\Results\Stim_Frame_Align.pkl'):
    result_folder = data_folder + r'\Results'
    graph_save_folder = result_folder + r'\Only_Frame_SubGraphs'
    OS_Tools.mkdir(result_folder)
    OS_Tools.mkdir(graph_save_folder)
    stim_path = data_folder + Stim_Align_sub_folder
    stim_dic = OS_Tools.Load_Variable(stim_path)
    all_tif_name = OS_Tools.Get_File_Name(data_folder + alinged_sub_folder)
    graph_num = len(sub_dic)
    all_sub_graph_names = list(sub_dic.keys())
    for i in range(graph_num):
        current_name = all_sub_graph_names[i]
        current_a = Frame_ID_Extractor(stim_dic, sub_dic[current_name][0])
        current_b = Frame_ID_Extractor(stim_dic, sub_dic[current_name][1])
        current_sub_graph, current_t_graph, current_info_dic = Single_Subgraph_Generator(
            all_tif_name, current_a, current_b)
        current_sub_graph = Graph_Tools.Clip_And_Normalize(
            current_sub_graph, show_clip)
        current_t_graph = Graph_Tools.Clip_And_Normalize(
            current_t_graph, show_clip)
        # Save graphs
        Graph_Tools.Show_Graph(current_sub_graph, current_name + '_Sub_Graph',
                               graph_save_folder)
        Graph_Tools.Show_Graph(current_t_graph, current_name + '_t_Graph',
                               graph_save_folder)
        OS_Tools.Save_Variable(graph_save_folder, current_name + r'_Sub_Info',
                               current_info_dic, '.info')
    return True
Пример #2
0
 def Before_Run_Average(self):
     """
     Generate global and per run average graph.
     This part is automatic,output averaged graph in folder, return nothing.
     
     Returns
     -------
     None.
     """
     print('Averaging before align...')
     #Get Run Average First
     self.Before_Align_Dics = {}# Define a dictionary to save before align graphs.
     total_graph_num = 0 # counter of graph numbers
     for i in range(len(self.Before_Align_Tif_Name)):
         run_graph_num = len(self.Before_Align_Tif_Name[i])# How many graphs in this run
         total_graph_num += run_graph_num
         current_run_average = Graph_Tools.Average_From_File(self.Before_Align_Tif_Name[i])
         current_run_average = Graph_Tools.Clip_And_Normalize(current_run_average,clip_std = 5)
         self.Before_Align_Dics[i] = (current_run_average,run_graph_num) # Write Current dict as a Tuple.
         Graph_Tools.Show_Graph(current_run_average, 'Run_Average',self.all_save_folders[i])# Show and save Run Average.
         
     # Then Use Weighted average method to generate global tif. This methos can be faster than original ones.
     global_average_graph = np.zeros(shape = np.shape(self.Before_Align_Dics[0][0]),dtype = 'f8')# Base on shape of graph
     for i in range(len(self.Before_Align_Tif_Name)):
         global_average_graph += self.Before_Align_Dics[i][0].astype('f8')*self.Before_Align_Dics[i][1]/total_graph_num
     global_average_graph = Graph_Tools.Clip_And_Normalize(global_average_graph,clip_std = 5)
     
     # At last, save global average graph in every run folders.
     for i in range(len(self.all_save_folders)):
         Graph_Tools.Show_Graph(global_average_graph, 'Global_Average', self.all_save_folders[i],show_time = 0)
     self.Align_Base = global_average_graph # Define Base of Alignment, use this to do the job.
Пример #3
0
def Alignment(base_graph, target_graph, boulder=20, align_range=20):
    """
    Move target graph to match base graph. fill blanks with line median.

    Parameters
    ----------
    base_graph : (2D Array)
        Base Graph. All target will be align to this one. Use global average usually.
    target_graph : (2D Array)
        Current Graph. This graph will be moved.
    boulder : (int), optional
        Use center to align graphs, base will cut a boulder. The default is 20.
    align_range : (int), optional
        Maximun pixel of Align. The default is 20.
        
    Returns
    -------
    x_bais : (int)
        X bais. Positive x_bais means target graph shall move right to match base.
    y_bais : (int)
        Y bais. Positive y_bais means target graph shall move down to match base.
    aligned_graph : (ndarray)
        moved graph. 

    """

    target_boulder = int(
        boulder +
        np.floor(align_range * 1.5))  # target will cut with a bigger boulder.
    cutted_target = Graph_Tools.Graph_Cut(
        target_graph,
        [target_boulder, target_boulder, target_boulder, target_boulder])
    target_height, target_width = np.shape(cutted_target)
    cutted_base = Graph_Tools.Graph_Cut(base_graph,
                                        [boulder, boulder, boulder, boulder])
    base_height, base_width = np.shape(cutted_base)
    extended_target = np.pad(
        np.rot90(cutted_target,
                 2), ((0, base_height - 1), (0, base_width - 1)), 'constant'
    )  # Extend graph here to make sure Returned FFT Matrix have same shape, making comparation easier.
    extended_base = np.pad(cutted_base,
                           ((0, target_height - 1), (0, target_width - 1)),
                           'constant')
    x_bais, y_bais = Bais_Correlation(extended_base, extended_target,
                                      align_range)
    temp_aligned_graph = np.pad(
        target_graph, ((align_range + y_bais, align_range - y_bais),
                       (align_range + x_bais, align_range - x_bais)),
        'median')  # Fill target graph with median graphs
    aligned_graph = temp_aligned_graph[
        align_range:-align_range,
        align_range:-align_range]  # Cut Boulder, return moved graph.

    return x_bais, y_bais, aligned_graph
Пример #4
0
def Cell_Find(run_folder):
    output_folder = run_folder+r'\Results'
    aligned_frame_folder = output_folder+r'\Aligned_Frames'
    all_tif_name = OS_Tools.Get_File_Name(aligned_frame_folder)
    Stim_Frame_Dic = OS_Tools.Load_Variable(output_folder,'Stim_Frame_Align.pkl')
    on_off_graph,Finded_Cells = On_Off_Cell_Finder(all_tif_name, Stim_Frame_Dic,shape_boulder=[20,20,20,35],filter_method = 'Gaussian',LP_Para = ((5,5),1.5))
    cell_folder = output_folder+r'\Cells'
    OS_Tools.Save_Variable(cell_folder, 'Finded_Cells', Finded_Cells,'.cell')
    Graph_tools.Show_Graph(on_off_graph, 'on-off_graph', cell_folder)
    all_keys = list(Finded_Cells.keys())
    all_keys.remove('All_Cell_Information')
    for i in range(len(all_keys)):
        Graph_tools.Show_Graph(Finded_Cells[all_keys[i]], all_keys[i], cell_folder)
    return True
def On_Off_Cell_Finder(
        all_tif_name,
        Stim_Frame_Dic,
        find_thres = 1.5,
        max_pix = 1000,
        min_pix = 20,
        filter_method = 'Gaussian',
        LP_Para = ((5,5),1.5),
        HP_Para = False,
        shape_boulder = [20,20,20,20], 
        sharp_gauss = ([7,7],1.5),
        back_gauss = ([15,15],7),
        size_limit = 20
        ):
    # Generater On-Off graph.
    off_list = Stim_Frame_Dic[0]
    all_keys = list(Stim_Frame_Dic.keys())
    all_keys.remove('Original_Stim_Train')
    all_keys.remove(-1)
    all_keys.remove(0)
    on_list = []
    for i in range(len(all_keys)):
        on_list.extend(Stim_Frame_Dic[all_keys[i]])
    on_off_graph,_,_ = Single_Subgraph_Generator(all_tif_name, on_list, off_list,filter_method,LP_Para,HP_Para,t_map = False)
    on_off_graph = Graph_Tools.Clip_And_Normalize(on_off_graph,clip_std = 2.5)
    Finded_Cells = Cell_Find_From_Graph(on_off_graph,find_thres,max_pix,min_pix,shape_boulder,sharp_gauss,back_gauss,size_limit)
    return on_off_graph,Finded_Cells
Пример #6
0
def Least_Tremble_Average_Graph(data_folder,
                                average_prop=0.1,
                                cut_shape=(9, 9)):
    all_tif_name = np.array(OS_Tools.Get_File_Name(data_folder))
    _, frac_disps = Tremble_Evaluator(data_folder, cut_shape=cut_shape)
    frac_num, frame_num, _ = frac_disps.shape
    # Then calculate average center and least error graph.
    frac_centers = np.zeros(shape=(frac_num, 2), dtype='f8')
    for i in range(frac_num):
        frac_centers[i, 0] = frac_disps[i, :, 0].mean()
        frac_centers[i, 1] = frac_disps[i, :, 1].mean()
    # And all frac_total movings
    total_movings = np.zeros(frame_num, dtype='f8')
    for i in range(frame_num):
        c_dist = 0
        for j in range(frac_num):
            c_dist += (frac_centers[j][0] - frac_disps[j, i, 0])**2 + (
                frac_centers[j][1] - frac_disps[j, i, 1])**2
        total_movings[i] = c_dist
    # Then find least props.
    used_num = int(frame_num * average_prop)
    if used_num < 300:  # least num of average is set to 300 to avoid problem.
        used_num = min(300, frame_num)
    print('Average of most stable ' + str(used_num) + ' Frames.')
    if used_num < 300:  # meaning all frame used
        graph_names = all_tif_name
    else:
        used_frame_ind = np.argpartition(total_movings, used_num)[0:used_num]
        graph_names = all_tif_name[used_frame_ind]
    averaged_graph = Graph_Tools.Average_From_File(graph_names)

    return averaged_graph, graph_names
Пример #7
0
def Partial_Average_From_File(data_folder,
                              start_frame,
                              stop_frame,
                              graph_type='.tif',
                              LP_Para=False,
                              HP_Para=False,
                              filter_method=False):
    '''
    Average specific part of graphs in the folder.

    Parameters
    ----------
    data_folder : (str)
        Data folder.
    start_frame : (int)
        Start ID of frame selection.
    stop_frame : (int)
        Stop ID of frame selection.
    graph_type : (str), optional
        Frame dtype. The default is '.tif'.
    LP_Para\HP_Para\filter_method : optional
        Filter parameters. The default is False.

    Returns
    -------
    Averaged_Graph : (2D Array)
        Averaged graphs.

    '''
    all_tif_name = np.array(
        OS_Tools.Get_File_Name(data_folder, file_type=graph_type))
    used_tif_name = all_tif_name[start_frame:stop_frame]
    Averaged_Graph = Graph_Tools.Average_From_File(used_tif_name, LP_Para,
                                                   HP_Para, filter_method)
    return Averaged_Graph
def Cell_Find_From_active(
        aligned_data_folder,
        find_thres = 1,
        active_mode = 'biggest',
        propotion = 0.05,
        max_pix = 1000,
        min_pix = 20,
        shape_boulder = [20,20,20,20], 
        sharp_gauss = ([7,7],1.5),
        back_gauss = ([15,15],7),
        size_limit = 20  
        ):
    intensity_selected_graph,_ = Graph_Selector.Intensity_Selector(aligned_data_folder,mode = active_mode,propotion=propotion,list_write=False)
    save_folder = '\\'.join(aligned_data_folder.split('\\')[:-1])
    Graph_Tools.Show_Graph(Graph_Tools.Clip_And_Normalize(intensity_selected_graph,clip_std = 5), 'Cell_Find_Base', save_folder)
    Cell_Finded = Cell_Find_And_Plot(save_folder, 'Cell_Find_Base.tif', 'Active_Cell',find_thres,max_pix,min_pix,shape_boulder,sharp_gauss,back_gauss,size_limit)
    return Cell_Finded
Пример #9
0
    def After_Align_Average(self):
        """
        This Functin will generate after align average graph of Run and Global, and then save them.
        
        Returns
        -------
        None.

        """
        print('Aligning done. ')
        self.After_Align_Graphs = {} # Initialize a dictionary, will record all aligned graphs averages and graph nums.
        # Fill After Align Graph Dictionary first
        total_graph_num = 0
        for i in range(len(self.Aligned_frame_folders)):
            current_run_names = OS_Tools.Get_File_Name(self.Aligned_frame_folders[i])
            temp_average = Graph_Tools.Average_From_File(current_run_names) # This will generate an average graph with 'f8' formation.
            current_graph_aligned = Graph_Tools.Clip_And_Normalize(temp_average,clip_std = 5)
            Graph_Tools.Show_Graph(current_graph_aligned, 'Run_Average_After_Align', self.all_save_folders[i])
            current_run_Frame_Num = len(current_run_names)
            total_graph_num += current_run_Frame_Num
            self.After_Align_Graphs[i] = (current_graph_aligned,current_run_Frame_Num)
        global_average_after_align = np.zeros(np.shape(current_graph_aligned),dtype = 'f8')
        
        # Then calculate global average in each run.
        for i in range(len(self.all_save_folders)):
            global_average_after_align += self.After_Align_Graphs[i][0].astype('f8')*self.After_Align_Graphs[i][1]/total_graph_num
        global_average_after_align = Graph_Tools.Clip_And_Normalize(global_average_after_align,clip_std = 5)
        
        # Then save global graph into each folder.
        for i in range(len(self.all_save_folders)):
            if i == 0:
                Graph_Tools.Show_Graph(global_average_after_align, 'Global_Average_After_Align', self.all_save_folders[i])
            else:
                Graph_Tools.Show_Graph(global_average_after_align, 'Global_Average_After_Align', self.all_save_folders[i],show_time = 0)
Пример #10
0
def Intensity_Selector(data_folder,
                       graph_type='.tif',
                       mode='biggest',
                       propotion=0.05,
                       list_write=True):
    '''
    Select frames have biggest or smallest a.i., and generate average graphs.

    Parameters
    ----------
    data_folder : (str)
        Data folder.
    graph_type : (str), optional
        Data type of . The default is '.tif'.
    mode : ('biggest' or 'smallest'), optional
        Type of frame selection. The default is 'biggest'.
    propotion : (float), optional
        Propotion of graph selection. The default is 0.05.
    list_write : (bool), optional
        Whether we write down graph intensity data. The default is True.

    Returns
    -------
    averaged_graph : (2D Array)
        Averaged graph of selected frames.
    selected_graph_name : (ND List)
        List of selected graph names.

    '''
    all_graph_name = np.array(
        OS_Tools.Get_File_Name(data_folder, file_type=graph_type))
    graph_Num = len(all_graph_name)
    bright_data = np.zeros(graph_Num, dtype='f8')
    for i in range(graph_Num):
        current_graph = cv2.imread(all_graph_name[i], -1)
        bright_data[i] = np.mean(current_graph)
        # write bright data if required.
    if list_write == True:
        OS_Tools.Save_Variable(data_folder, 'brightness_info', bright_data)
    # Then select given mode frames.
    used_graph_num = int(graph_Num * propotion)
    if mode == 'biggest':
        used_graph_id = np.argpartition(bright_data,
                                        -used_graph_num)[-used_graph_num:]
    elif mode == 'smallest':
        used_graph_id = np.argpartition(bright_data,
                                        used_graph_num)[0:used_graph_num]
    selected_graph_name = all_graph_name[used_graph_id]
    averaged_graph = Graph_Tools.Average_From_File(selected_graph_name)
    return averaged_graph, selected_graph_name
def Tremble_Calculator_From_File(
        data_folder,
        graph_type='.tif',
        cut_shape=(8, 8),
        boulder=20,
        base_method='former',
        base=[],
):
    '''
    Calculate align tremble from graph. This program is used to evaluate align quality.
    
    Parameters
    ----------
    data_folder : (str)
        Data folder of graphs.
    graph_type : (str),optional
        Extend name of input grahp. The default is '.tif'.
    cut_shape : (turple), optional
        Shape of fracture cut. Proper cut will . The default is (10,5).
    boulder : (int),optional
        Boulder of graph. Cut and not used in following calculation.The default is 20.        
    base_method : ('average'or'former'or'input'), optional
        Method of bais calculation. The default is 'former'. 
        'average' bais use all average; 'former' bais use fomer frame; 'input' bais need to be given.
    base : (2D_NdArray), optional
        If move_method == 'input', base should be given here. The default is [].

    Returns
    -------
    mass_center_maps(Graph)
        A plotted graph, showing movement trace of mass center.
    tremble_plots : (List)
        List of all fracture graph tremble list.
    tremble_information : (Dic)
        Dictionary of tramble informations.
        Data type of tremble_information:
    '''
    all_tif_name = OS_Tools.Get_File_Name(data_folder, file_type=graph_type)
    average_graph = Graph_Tools.Average_From_File(all_tif_name)
    tremble_information = {}
    #1. Get base graph first.
    if base_method == 'input':
        base_graph = base
    elif base_method == 'average':
        base_graph = average_graph
    elif base_method == 'former':
        base_graph = cv2.imread(all_tif_name[0], -1)  # First input graph.
    else:
        raise IOError('Invalid Base Method, check please.\n')
Пример #12
0
 def Align_Cores(self):
     """
     This Function will align every graph and save them in folder 'Aligned Frames'
     
     Returns
     -------
     None.
     """
     print('Pre work done. Aligning graphs...')
     for i in range(len(self.Before_Align_Tif_Name)): # Cycle all runs
         for j in range(len(self.Before_Align_Tif_Name[i])): # Cycle current run, between all graph.
             base = self.Align_Base # Use global average as base graph
             current_graph = cv2.imread(self.Before_Align_Tif_Name[i][j],-1)
             _,_,current_aligned_graph = Alignment(base,current_graph) # Calculate aligned graph 
             # Then save graph.
             graph_name = self.Before_Align_Tif_Name[i][j].split('\\')[-1][:-4]
             Graph_Tools.Show_Graph(current_aligned_graph,graph_name,self.Aligned_frame_folders[i],show_time = 0)
Пример #13
0
def Cell_Find_And_Plot(
        graph_folder,
        graph_name,
        Cell_Label,
        find_thres = 2.5,
        max_pix = 1000,
        min_pix = 20,
        shape_boulder = [20,20,20,20], 
        sharp_gauss = ([7,7],1.5),
        back_gauss = ([15,15],7),
        size_limit = 20    
        ):
    """
    Cell find from file.

    Parameters
    ----------
    graph_folder : (str)
        Graph folder.
    graph_name : (str)
        Graph name. Extend name shall be contained.
    Cell_Label : (str)
        Save sub Folder. Cell data and cell graphs will be saved in this sub folder.
    find_thres,max_pix,min_pix,shape_boulder,sharp_gauss,back_gauss,size_limit : As Function 1, optional
        As Function 1.

    Returns
    -------
    Cell_Finded : TYPE
        DESCRIPTION.

    """
    Base_Graph = cv2.imread(graph_folder + r'\\' + graph_name,-1)
    graph_save_folder = graph_folder + r'\\' + Cell_Label
    Finded_Cells = Cell_Find_From_Graph(Base_Graph,find_thres,max_pix,min_pix,shape_boulder,sharp_gauss,back_gauss,size_limit)
    OS_Tools.Save_Variable(graph_save_folder,Cell_Label,Finded_Cells,extend_name = '.cell')
    all_keys = list(Finded_Cells.keys())
    all_keys.remove('All_Cell_Information')
    for i in range(len(all_keys)):
        Graph_Tools.Show_Graph(Finded_Cells[all_keys[i]],graph_name = all_keys[i],save_path = graph_save_folder,show_time = 2000,write = True)
    return Finded_Cells
Пример #14
0
def Graph_Matcher(
    full_graph,
    ROI_graph,
):
    if full_graph.dtype == 'u2':
        full_graph = (full_graph // 256).astype('u1')
    aligned_ROI, h = Affine_Core_Point_Equal(ROI_graph,
                                             full_graph,
                                             targ_gain=1)
    aligned_ROI = (aligned_ROI // 256).astype('u1')
    # Here we get affine matrix h and aligned ROI graph.
    merged_graph = cv2.cvtColor(full_graph, cv2.COLOR_GRAY2RGB).astype('f8')
    location_graph = cv2.cvtColor(full_graph, cv2.COLOR_GRAY2RGB).astype(
        'f8')  # Define location here.
    merged_graph[:, :, 1] += aligned_ROI
    merged_graph = np.clip(merged_graph, 0, 255).astype('u1')
    # Then we annotate graph boulder in graph.
    rectangle = np.zeros(np.shape(ROI_graph), dtype='u1')
    rectangle = Graph_Tools.Boulder_Fill(rectangle, [3, 3, 3, 3], 255)
    ROI_boulder = cv2.warpPerspective(rectangle, h, (512, 512))
    location_graph[:, :, 2] += ROI_boulder
    location_graph = np.clip(location_graph, 0, 255).astype('u1')
    return merged_graph, location_graph, ROI_boulder, h
Пример #15
0
from My_Wheels.Stim_Frame_Align import Stim_Frame_Align
stim_folder = r'D:\ZR\Data_Temp\191101_L77_2P\191101_L77_2P_stimuli\Run02_RFloca25_shape3_dir4_noISI'
_, Stim_Frame_Align = Stim_Frame_Align(stim_folder,
                                       jmp_step=50,
                                       frame_thres=1.5)

#%% Step3, On Off Map Generation.
Num_Run = 1
off_list = Stim_Frame_Align[-1]
on_list = []
on_stim_ids = list(Stim_Frame_Align.keys())
on_stim_ids.remove(-1)
for i in range(len(on_stim_ids)):
    on_list.extend(Stim_Frame_Align[on_stim_ids[i]])
sub_graph, dF_F = Graph_Tools.Graph_Subtractor(all_tif_name,
                                               on_list,
                                               off_list,
                                               clip_std=5)
#Graph_Tools.Show_Graph(sub_graph,'On_Off_Graph',Align_Property['all_save_folders'][Num_Run])
print('dF/F Value is:' + str(dF_F))
#%% Step4, Cell find From Morphology and On-Off
from My_Wheels.Cell_Find_From_Graph import Cell_Find_And_Plot
Morphology_Graph_Name = r'Global_Average_After_Align.tif'
Morphology_Cells = Cell_Find_And_Plot(save_folder,
                                      Morphology_Graph_Name,
                                      'Morphology_Cells',
                                      find_thres=2)
On_Off_Graph_Name = r'On_Off_Graph.tif'
On_Off_Cells = Cell_Find_And_Plot(save_folder,
                                  On_Off_Graph_Name,
                                  'On_Off_Cells',
                                  find_thres=2)
Пример #16
0
def Graph_Cutter(
        input_graph,
        boulder=20,
        cut_shape=(4, 4),
):
    """
    Cut a whole graph into small pieces.

    Parameters
    ----------
    input_graph : (2D_NdArray)
        Input graph of calculation. 
    boulder : (int), optional
        Boulder cut to get rid of move error. The default is 20.
    cut_shape : (turple), optional
        Shape you want to cut graph into. The default is (4,4).


    Returns
    -------
    schametic : (2D-NdArray,dtype = 'u1')
        Schamatic graph of cut method, with ID on graph.
    graph_lacation_dics : (list)
        List of left upper coordinate of each graph.
    after_size : (turple)
        Size of small graph after cut. 2-element turple, yx.
    cutted_graph_dics : (Dic)
        Dictionary of cutted graphs.
    """
    length, width = np.shape(input_graph)
    length_after_cut = length - boulder * 2
    width_after_cut = width - boulder * 2
    # Get shape of cutted graph
    cutted_length = length_after_cut // cut_shape[0]
    cutted_width = width_after_cut // cut_shape[1]
    after_size = (cutted_length, cutted_width)
    graph_location_list = []
    # cycle all fractions.
    cutted_graph_dics = {}
    current_fraction_id = 0
    for i in range(cut_shape[1]):
        for j in range(cut_shape[0]):
            left_up_point = (boulder + j * cutted_length,
                             boulder + i * cutted_width)
            graph_location_list.append(left_up_point)
            current_image = input_graph[left_up_point[0]:left_up_point[0] +
                                        cutted_length,
                                        left_up_point[1]:left_up_point[1] +
                                        cutted_width]
            cutted_graph_dics[current_fraction_id] = current_image
            current_fraction_id += 1

    # Then draw schametic graph, and show id on it. cv2 location id sequence is xy!!
    schametic = Graph_Tools.Clip_And_Normalize(input_graph,
                                               clip_std=10,
                                               bit='u1')
    for i in range(cut_shape[0] + 1):  # Draw horizontal lines
        cv2.line(schametic, (boulder, boulder + i * cutted_length),
                 (width - boulder, boulder + i * cutted_length), (255), 2)
    for i in range(cut_shape[1] + 1):  #...And vertical lines
        cv2.line(schametic, (boulder + i * cutted_width, boulder),
                 (boulder + i * cutted_width, length - boulder), (255), 2)
    for i in range(len(graph_location_list)):  # And put graph id on them.
        text_loc = (graph_location_list[i][1] + cutted_width // 2,
                    graph_location_list[i][0] + cutted_length // 2)
        cv2.putText(schametic, str(i), text_loc,
                    cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255), 1)

    return schametic, graph_location_list, after_size, cutted_graph_dics
Пример #17
0
def Tremble_Comparision(before_folder,
                        after_folder,
                        boulder_ignore=20,
                        cut_shape=(9, 9),
                        mask_thres=0):
    # Initialization
    save_folder = after_folder + r'\Results'
    OS_Tools.mkdir(save_folder)
    save_folder = save_folder + r'\Tremble_Compare'
    OS_Tools.mkdir(save_folder)
    row_num = cut_shape[0]
    col_num = cut_shape[1]
    frac_num = row_num * col_num
    cov_matrix_dic = {}
    var_matrix_dic = {}
    variation = np.zeros(shape=(row_num, col_num, 2), dtype='f8')
    variation_change = np.zeros(shape=(row_num, col_num), dtype='f8')
    variation_prop = np.zeros(shape=(row_num, col_num), dtype='f8')
    # Calculation Begins
    before_schematic, before_frac_center = Tremble_Evaluator(
        before_folder,
        boulder_ignore=boulder_ignore,
        cut_shape=cut_shape,
        mask_thres=mask_thres)
    after_schematic, after_frac_center = Tremble_Evaluator(
        after_folder,
        boulder_ignore=boulder_ignore,
        cut_shape=cut_shape,
        mask_thres=mask_thres)
    fig, ax = plt.subplots(row_num, col_num,
                           figsize=(30, 28))  # Initialize graphs
    fig.suptitle('Mass Center Distribution', fontsize=54)
    # Cycle all fracture,get scatter map and variance
    for i in range(frac_num):
        # Graph_Plot
        current_row = i % row_num
        current_col = i // row_num
        ax[current_row, current_col].scatter(before_frac_center[i, :, 1],
                                             before_frac_center[i, :, 0],
                                             s=1,
                                             c='r')
        ax[current_row, current_col].scatter(after_frac_center[i, :, 1],
                                             after_frac_center[i, :, 0],
                                             s=1,
                                             c='g')
        # After plot, calculate cov matrix and variance.
        before_cov = np.cov(before_frac_center[i, :, :].T)
        after_cov = np.cov(after_frac_center[i, :, :].T)
        cov_matrix_dic[i] = (before_cov, after_cov)
        before_eig, _ = np.linalg.eig(before_cov)
        after_eig, _ = np.linalg.eig(after_cov)
        before_var = np.round(before_eig.sum(), 4)
        after_var = np.round(after_eig.sum(), 4)
        variation[current_row, current_col, 0] = before_var
        variation[current_row, current_col, 1] = after_var
        variation_change[current_row, current_col] = before_var - after_var
        variation_prop[current_row,
                       current_col] = (before_var - after_var) / before_var
        # Text annotate
        anchored_text = AnchoredText('Before variance:' + str(before_var) +
                                     '\n After variance:' + str(after_var),
                                     loc='lower left')
        ax[current_row, current_col].add_artist(anchored_text)

    # After this, save figures and matrixs.
    var_matrix_dic['Before'] = variation[:, :, 0]
    var_matrix_dic['After'] = variation[:, :, 1]
    Graph_Tools.Show_Graph(before_schematic, 'Before_Schematic', save_folder)
    Graph_Tools.Show_Graph(after_schematic, 'After_Schematic', save_folder)
    fig.savefig(save_folder + '\Scatter Plots.png', dpi=330)
    OS_Tools.Save_Variable(save_folder, 'cov_matrix', cov_matrix_dic)
    OS_Tools.Save_Variable(save_folder, 'variance_matrix', var_matrix_dic)
    # Calculate variance change and plot variance map.
    # Before variance map
    plt.clf()
    fig2 = plt.figure(figsize=(15, 15))
    plt.title('Before Align Variance', fontsize=36)
    fig2 = sns.heatmap(variation[:, :, 0],
                       cmap='bwr',
                       annot=True,
                       annot_kws={"size": 20},
                       square=True,
                       yticklabels=False,
                       xticklabels=False,
                       center=0)
    fig2.figure.savefig(save_folder + '\Before_Variance.png', dpi=330)
    # After variance map
    plt.clf()
    fig2 = plt.figure(figsize=(15, 15))
    plt.title('After Align Variance', fontsize=36)
    fig2 = sns.heatmap(variation[:, :, 1],
                       cmap='bwr',
                       annot=True,
                       annot_kws={"size": 20},
                       square=True,
                       yticklabels=False,
                       xticklabels=False,
                       center=0)
    fig2.figure.savefig(save_folder + '\After_Variance.png', dpi=330)
    # Variance change map
    plt.clf()
    fig2 = plt.figure(figsize=(15, 15))
    plt.title('Variance Change', fontsize=36)
    fig2 = sns.heatmap(variation_change,
                       cmap='bwr',
                       annot=True,
                       annot_kws={"size": 20},
                       square=True,
                       yticklabels=False,
                       xticklabels=False,
                       center=0)
    fig2.figure.savefig(save_folder + '\Variance_Change.png', dpi=330)
    # Variance change propotion map
    plt.clf()
    fig2 = plt.figure(figsize=(15, 15))
    plt.title('Variance Change Propotion', fontsize=36)
    fig2 = sns.heatmap(variation_prop,
                       cmap='bwr',
                       annot=True,
                       annot_kws={"size": 20},
                       square=True,
                       yticklabels=False,
                       xticklabels=False,
                       center=0)
    fig2.figure.savefig(save_folder + '\Variance_Change_Prop.png', dpi=330)
    return cov_matrix_dic, var_matrix_dic
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 13:41:07 2020

@author: ZR
Codes to process L76 Data
"""

import My_Wheels.Graph_Operation_Kit as Graph_Tools
import My_Wheels.OS_Tools_Kit as OS_Tools
#%% Cell1, Average Graph.
graph_folder = r'I:\Test_Data\201023_L76_LM\1-003'
save_path = graph_folder + r'\Results'
OS_Tools.mkdir(save_path)
all_tif_name = OS_Tools.Get_File_Name(graph_folder)
average_graph = Graph_Tools.Average_From_File(all_tif_name)
norm_average_graph = Graph_Tools.Clip_And_Normalize(average_graph, clip_std=3)
Graph_Tools.Show_Graph(norm_average_graph, 'Average_Graph', save_path)
#%% Then Calculate Runs
graph_folder = r'I:\Test_Data\201023_L76_LM\1-013'
import My_Wheels.Translation_Align_Function as Align
Align.Translation_Alignment([graph_folder])
#%% Align Stim and Frame
import My_Wheels.Stim_Frame_Align as Stim_Frame_Align
stim_folder = r'I:\Test_Data\201023_L76_LM\201023_L76_LM_Stimulus\Run13_RGLum4'
Frame_Stim_Sequence, Frame_Stim_Dictionary = Stim_Frame_Align.Stim_Frame_Align(
    stim_folder)
aligned_tif_name = OS_Tools.Get_File_Name(
    r'I:\Test_Data\201023_L76_LM\1-013\Results\Aligned_Frames')
#%% Generate On-Off Map
on_id = []
Пример #19
0
def Tremble_Calculator_From_File(data_folder,
                                 graph_type='.tif',
                                 cut_shape=(10, 5),
                                 boulder=20,
                                 move_method='former',
                                 base=[],
                                 center_method='weight'):
    '''
    Calculate align tremble from graph. This program is used to evaluate align quality.
    
    Parameters
    ----------
    data_folder : (str)
        Data folder of graphs.
    graph_type : (str),optional
        Extend name of input grahp. The default is '.tif'.
    cut_shape : (turple), optional
        Shape of fracture cut. Proper cut will . The default is (10,5).
    boulder : (int),optional
        Boulder of graph. Cut and not used in following calculation.The default is 20.        
    move_method : ('average'or'former'or'input'), optional
        Method of bais calculation. The default is 'former'. 
        'average' bais use all average; 'former' bais use fomer frame; 'input' bais need to be given.
    base : (2D_NdArray), optional
        If move_method == 'input', base should be given here. The default is [].
    center_method : ('weight' or 'binary'), optional
        Method of center find. Whether we use weighted intense.The default is 'weight'.

    Returns
    -------
    mass_center_maps(Graph)
        A plotted graph, showing movement trace of mass center.
    tremble_plots : (List)
        List of all fracture graph tremble list.
    tremble_information : (Dic)
        Dictionary of tramble informations.
        Data type of tremble_information:
            keys:frame ID
            data are lists, every element in list indicate a fracture grpah, ID in cut graph.
            list elements are turples, each turple[0] are move vector, turple[1] as move distance.
            

    '''
    all_tif_name = OS_Tools.Get_File_Name(data_folder, graph_type)
    average_graph = Graph_Tools.Average_From_File(all_tif_name)
    tremble_information = {}
    # get base of align first.
    if move_method == 'average':
        base_graph = average_graph
    elif move_method == 'input':
        base_graph = base
    elif move_method == 'former':
        base_graph = cv2.imread(all_tif_name[0],
                                -1)  # Use first frame as base.

    # cycle all graph to generate tremble plots.
    for i in range(len(all_tif_name)):
        # Process input graph, get cell
        current_graph = cv2.imread(all_tif_name[i], -1)
        processed_cell_graph = None
        #Cut Graph as described
        _, _, _, cutted_current_graph = Graph_Cutter(processed_cell_graph,
                                                     boulder, cut_shape)
        _, _, _, cutted_base = Graph_Cutter(base_graph, boulder, cut_shape)
        # Renew base if former mode.
        if move_method == 'former':
            base_graph = cv2.imread(all_tif_name[i], -1)
        # Then cycle all cutted_fracture, to calculate movement of every fracture graph.
        current_frame_move_list = []
        for j in range(len(cutted_current_graph)):
            temp_graph_part = cutted_current_graph[j]
            temp_base_part = cutted_base[j]
            temp_graph_center, _ = Graph_Tools.Graph_Center_Calculator(
                temp_graph_part, center_mode=center_method)
            temp_base_center, _ = Graph_Tools.Graph_Center_Calculator(
                temp_base_part, center_mode=center_method)
            temp_tremble_vector, temp_tremble_dist = Calculator.Vector_Calculate(
                temp_base_center, temp_graph_center)
            current_frame_move_list.append(
                (temp_tremble_vector, temp_tremble_dist))
        tremble_information[i] = current_frame_move_list

    # Then, plot mass center plots. This will show change of mass center position.
    if move_method == 'input':
        print('No Mass Center plot Generated.')
        mass_center_maps = False
    elif move_method == 'average':  # If average, use current location
        mass_center_maps = []
        for i in range(len(tremble_information[0])):  # Cycle all fracture
            fig = plt.figure()
            ax = plt.subplot()
            for j in range(len(tremble_information)):  # Cycle all frame
                current_point = tremble_information[j][i][0]
                ax.scatter(current_point[1], current_point[0], alpha=1, s=5)
            mass_center_maps.append(fig)
            plt.close()
    elif move_method == 'former':
        mass_center_maps = []
        for i in range(len(tremble_information[0])):  # Cycle all fracture
            fig = plt.figure()
            ax = plt.subplot()
            current_point = (0, 0)
            for j in range(len(tremble_information)):  # Cycle all frame
                current_point = (current_point[0] +
                                 tremble_information[j][i][0][0],
                                 current_point[1] +
                                 tremble_information[j][i][0][1])
                ax.scatter(current_point[1], current_point[0], alpha=1, s=5)
            mass_center_maps.append(fig)
            plt.close()

    # At last, plot tremble dist plots. Each fracture have a plot.
    tremble_plots = {}
    for i in range(len(tremble_information[0])):  # Cycle all fractures
        current_tremble_plot = []
        for j in range(len(tremble_information)):  # Cycle all frame
            current_dist = tremble_information[j][i][1]
            current_tremble_plot.append(current_dist)
        tremble_plots[i] = np.asarray(current_tremble_plot)
    return mass_center_maps, tremble_plots, tremble_information
Пример #20
0
def Video_From_File(data_folder,
                    plot_range=(0, 9999),
                    graph_size=(472, 472),
                    file_type='.tif',
                    fps=15,
                    gain=20,
                    LP_Gaussian=([5, 5], 1.5),
                    frame_annotate=True,
                    cut_boulder=[20, 20, 20, 20]):
    '''
    Write all files in a folder as a video.

    Parameters
    ----------
    data_folder : (std)
        Frame folder. All frame in this folder will be write into video. Dtype shall be u2 or there will be a problem.
    graph_size : (2-element-turple), optional
        Frame size AFTER cut. The default is (472,472).
    file_type : (str), optional
        Data type of graph file. The default is '.tif'.
    fps : (int), optional
        Frame per second. The default is 15.
    gain : (int), optional
        Show gain. The default is 20.
    LP_Gaussian : (turple), optional
        LP Gaussian Filter parameter. Only do low pass. The default is ([5,5],1.5).
    frame_annotate : TYPE, optional
        Whether we annotate frame number on it. The default is True.
    cut_boulder : TYPE, optional
        Boulder cut of graphs, UDLR. The default is [20,20,20,20].


    Returns
    -------
    bool
        True if function processed.

    '''

    all_tif_name = OS_Tools.Get_File_Name(path=data_folder,
                                          file_type=file_type)
    start_frame = plot_range[0]
    end_frame = min(plot_range[1], len(all_tif_name))
    all_tif_name = all_tif_name[start_frame:end_frame]
    graph_num = len(all_tif_name)
    video_writer = cv2.VideoWriter(data_folder + r'\\Video.mp4',
                                   cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'),
                                   fps, graph_size, 0)
    #video_writer = cv2.VideoWriter(data_folder+r'\\Video.avi',-1,fps,graph_size,0)
    for i in range(graph_num):
        raw_graph = cv2.imread(all_tif_name[i], -1).astype('f8')
        # Cut graph boulder.
        raw_graph = Graph_Tools.Graph_Cut(raw_graph, cut_boulder)
        # Do gain then
        gained_graph = np.clip(raw_graph.astype('f8') * gain / 256, 0,
                               255).astype('u1')
        # Then do filter, then
        if LP_Gaussian != False:
            u1_writable_graph = Filters.Filter_2D(gained_graph, LP_Gaussian,
                                                  False)
        else:
            u1_writable_graph = gained_graph
        if frame_annotate == True:
            cv2.putText(u1_writable_graph, 'Stim ID = ' + str(i), (250, 30),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255), 1)
        video_writer.write(u1_writable_graph)
    del video_writer
    return True
Пример #21
0
#                                                 Unpaired_B_Cell_Graph
#                                                 ),all_colors = ['c','g','r','b'])
#     Compare_Dictionary['Paired_A_Cell_Graph'] = Paired_A_Cell_Graph
#     Compare_Dictionary['Paired_B_Cell_Graph'] = Paired_B_Cell_Graph
#     Compare_Dictionary['Unpaired_A_Cell_Graph'] = Unpaired_A_Cell_Graph
#     Compare_Dictionary['Unpaired_B_Cell_Graph'] = Unpaired_B_Cell_Graph
#     Compare_Dictionary['Match_Graph_Combine'] = combine_graph
#     # if plot is true, plot all graphs in specific folder.
#     if plot == True:
#         OS_Tools.Save_Variable(save_folder,'Compare_Matrix',Compare_Dictionary)
#         all_keys = list(Compare_Dictionary.keys())
#         all_keys.remove('Cell_Areas')
#         all_keys.remove('Compare_Information')
#         all_keys.remove('Paired_Cell_Num')
#         for i in range(8):
#             Graph_Tools.Show_Graph(Compare_Dictionary[all_keys[i]],
#                                    all_keys[i],
#                                    save_path = save_folder,
#                                    show_time = show_time)
#     return Compare_Dictionary
# =============================================================================
#%% Test Runs
if __name__ == '__main__':
    from My_Wheels.Cell_Find_From_Graph import Cell_Find_From_Graph
    input_graph = cv2.imread(
        r'E:\ZR\Data_Temp\191215_L77_2P\Run01_V4_L11U_D210_GA_RFlocation_shape3_Sti2degStep2deg\Results\Global_Average_After_Align.tif',
        -1)
    test = Cell_Find_From_Graph(input_graph)['All_Cell_Information']
    visualized_graph = Cell_Visualize(test, color=[0, 0, 255], mode='Fill')
    Graph_Tools.Show_Graph(visualized_graph, 'test', save_path='', write=False)
def Standard_Stim_Processor(data_folder,
                            stim_folder,
                            sub_dic,
                            alinged_sub_folder=r'\Results\Aligned_Frames',
                            show_clip=3,
                            tuning_graph=False,
                            cell_method='Default',
                            filter_method='Gaussian',
                            LP_Para=((5, 5), 1.5),
                            HP_Para=False,
                            spike_train_path='Default',
                            spike_train_filter_para=(False, False),
                            spike_train_filter_method=False):
    '''
    Generate subtraction graph, cell graph and tuning graphs if requred.

    Parameters
    ----------
    data_folder : (str)
        Run folder.
    stim_folder : (str)
        Stim file folder or Frame_Stim_Align File folder. Pre align is advised.
    sub_dic : (Dic)
        Subtraction dicionary. This can be generated from My_Wheels.Standard_Parameters
    show_clip : (float), optional
        Clip of graph show. The default is 3.
    tuning_graph : (bool), optional
        Whether we generate tuning graph of each cells. The default is False.
    cell_method : (str), optional
        Cell find method. You can input cell file path here. The default is 'Default'.
    filter_method : (str), optional
        False to skip filter. Kernel function of graph filtering. The default is 'Gaussian'.
    LP_Para : (turple), optional
        False to skip. Low pass filter of graph. The default is ((5,5),1.5).
    HP_Para : (turple), optional
        False to skip. High pass filter of graph. Big HP can be very slow!. The default is False.
    spike_train_path : (str), optional
        Path of spike train.'Default' will generate spike train directly. The default is 'Default'.
    spike_train_filter_para : (turple), optional
        Signal filter bandpass propotion of spike train. Please be sure if you need this. The default is (False,False).
    spike_train_filter_method : (str), optional
        False to skip. Method of signal filtering. The default is False.

    Returns
    -------
    None.

    '''
    # Path Cycle.
    from Cell_Find_From_Graph import On_Off_Cell_Finder
    work_folder = data_folder + r'\Results'
    OS_Tools.mkdir(work_folder)
    aligned_frame_folder = data_folder + alinged_sub_folder
    OS_Tools.mkdir(aligned_frame_folder)

    # Step1, align graphs. If already aligned, just read
    if not os.listdir(aligned_frame_folder):  # if this is a new folder
        print('Aligned data not found. Aligning here..')
        Translation_Alignment([data_folder])
    aligned_all_tif_name = np.array(
        OS_Tools.Get_File_Name(aligned_frame_folder)
    )  # Use numpy array, this is easier for slice.

    # Step2, get stim fram align matrix. If already aligned, just read in aligned dictionary.
    file_detector = len(stim_folder.split('.'))
    if file_detector == 1:  # Which means input is a folder
        print('Frame Stim not Aligned, aligning here...')
        from My_Wheels.Stim_Frame_Align import Stim_Frame_Align
        _, Frame_Stim_Dic = Stim_Frame_Align(stim_folder)
    else:  # Input is a file
        Frame_Stim_Dic = OS_Tools.Load_Variable(stim_folder)

    # Step3, get cell information
    if cell_method == 'Default':  # meaning we will use On-Off graph to find cell.
        print('Cell information not found. Finding here..')
        cell_dic = On_Off_Cell_Finder(aligned_all_tif_name,
                                      Frame_Stim_Dic,
                                      filter_method=filter_method,
                                      LP_Para=LP_Para,
                                      HP_Para=HP_Para)
    else:
        cell_dic = OS_Tools.Load_Variable(cell_method)

    # Step4, calculate spike_train.
    if spike_train_path != 'Default':
        dF_F_train = OS_Tools.Load_Variable(spike_train_path)
    else:  # meaning we need to calculate spike train from the very begining.

        _, dF_F_train = Spike_Train_Generator(
            aligned_all_tif_name,
            cell_dic['All_Cell_Information'],
            Base_F_type='nearest_0',
            stim_train=Frame_Stim_Dic['Original_Stim_Train'],
            LP_Para=LP_Para,
            HP_Para=HP_Para,
            filter_method=filter_method)
    #Step5, filt spike trains.
    if spike_train_filter_method != False:  # Meaning we need to do train filter.
        for i in range(len(dF_F_train)):
            dF_F_train[i] = My_Filter.Signal_Filter(dF_F_train,
                                                    spike_train_filter_method,
                                                    spike_train_filter_para)
    # Step6, get each frame graph and cell graph.
    all_graph_keys = list(sub_dic.keys())
    for i in range(len(sub_dic)):
        output_folder = work_folder + r'\Subtraction_Graphs'
        current_key = all_graph_keys[i]
        current_sub_list = sub_dic[current_key]
        A_conds = current_sub_list[0]  # condition of A graph
        B_conds = current_sub_list[1]  # condition of B graph
        A_IDs = []
        B_IDs = []
        for i in range(len(A_conds)):
            A_IDs.extend(Frame_Stim_Dic[A_conds[i]])
        for i in range(len(B_conds)):
            B_IDs.extend(Frame_Stim_Dic[B_conds[i]])
        # Get frame maps.
        current_sub_graph, current_t_graph, current_F_info = Single_Subgraph_Generator(
            aligned_all_tif_name, A_IDs, B_IDs, filter_method, LP_Para,
            HP_Para)

        current_sub_graph = Graph_Tools.Clip_And_Normalize(
            current_sub_graph, show_clip)
        Graph_Tools.Show_Graph(current_sub_graph, current_key + '_SubGraph',
                               output_folder)
        current_t_graph = Graph_Tools.Clip_And_Normalize(
            current_t_graph, show_clip)
        Graph_Tools.Show_Graph(current_t_graph, current_key + '_T_Graph',
                               output_folder)
        OS_Tools.Save_Variable(output_folder,
                               current_key + '_Sub_Info',
                               current_F_info,
                               extend_name='.info')
        # Get cell maps
        cell_info = cell_dic['All_Cell_Information']
        current_cell_sub_graph, current_cell_t_graph, current_cell_info = Single_Cellgraph_Generator(
            dF_F_train, cell_info, show_clip, A_IDs, B_IDs)
        Graph_Tools.Show_Graph(current_cell_sub_graph,
                               current_key + '_Cell_SubGraph', output_folder)
        Graph_Tools.Show_Graph(current_cell_t_graph,
                               current_key + '_Cell_T_Graph', output_folder)
        OS_Tools.Save_Variable(output_folder,
                               current_key + '_Cell_Info',
                               current_cell_info,
                               extend_name='.info')
    #Step7, calculate cell tuning graph.
    if tuning_graph == True:
        print('Not finished yet.')
Пример #23
0
def Translation_Alignment(all_folders,
                          base_mode='global',
                          input_base=np.array([[0, 0], [0, 0]]),
                          align_range=20,
                          align_boulder=20,
                          before_average=True,
                          average_std=5,
                          big_memory_mode=False,
                          save_aligned_data=False,
                          graph_shape=(512, 512),
                          timer=True):
    '''
    
    This function will align all tif graphs in input folders. Only translation transaction here. Affine transformation need further discussion.
    
    Parameters
    ----------
    all_folders:(list)
        List of all tif folders, elements are strs.
    
    base_mode:('global',int,'input',optional. The default is 'global')
        How to select base frame. 'global': use global average as base. int: use average of specific run as base. 'input':Manually input base graph, need to be a 2D-Ndarray.
        
    input_base:(2D-Ndarray,optional. The default is none.)
        If base_mode = 'input', input_base must be given. This will be the base for alignment.
        
    align_range:(int,optional. The default is 20)
        Max pixel of alignment. 
        
    align_boulder:(int,optional. The default is 20)
        boulder cut for align. For different graph size, this variable shall be change.
        
    before_average:(bool,optional. The default is True)
        Whether before average is done. It can be set False to save time, on this case base graph shall be given.
        
    average_std:(float,optional. The default is 5)
        How much std you want for average graph generation. Different std can effect graph effect.
    
    big_memory_mode:(bool,optional. The default is False)
        If memory is big enough, use this mode is faster.
        
    save_aligned_data:(bool,optional. The default is False)
        Can be true only in big memory mode. This will save all aligned graph in a single 4D-Ndarray file.Save folder is the first folder.
        
    graph_shape:(2-element-turple,optional. The default is (512,512))
        Shape of graphs. All input graph must be in same shape.
        
    timer:(bool,optional. The default is True)
        Show runtime of function and each procedures.
    
        
    Returns
    -------
    bool
        Whether new folder is generated.
    
    '''
    time_tic_start = time.time()
    #%% Step1, generate folders and file cycle.
    all_save_folders = List_Op.List_Annex(all_folders, ['Results'])
    Aligned_frame_folders = List_Op.List_Annex(all_save_folders,
                                               ['Aligned_Frames'])
    for i in range(len(all_save_folders)):
        OS_Tools.mkdir(all_save_folders[i])
        OS_Tools.mkdir(Aligned_frame_folders[i])
    Before_Align_Tif_Name = []
    for i in range(len(all_folders)):
        current_run_tif = OS_Tools.Get_File_Name(all_folders[i])
        Before_Align_Tif_Name.append(current_run_tif)

    #%% Step2, Generate average map before align.
    if before_average == True:
        print('Before run averaging ...')
        Before_Align_Dics = {
        }  # This is the dictionary of all run averages. Keys are run id.
        total_graph_num = 0  # Counter of graph numbers.
        for i in range(len(Before_Align_Tif_Name)):
            current_run_graph_num = len(Before_Align_Tif_Name[i])
            total_graph_num += current_run_graph_num
            current_run_average = Graph_Tools.Average_From_File(
                Before_Align_Tif_Name[i])
            current_run_average = Graph_Tools.Clip_And_Normalize(
                current_run_average, clip_std=average_std)
            Before_Align_Dics[i] = (
                current_run_average, current_run_graph_num
            )  # Attention here, data recorded as turple.
            Graph_Tools.Show_Graph(
                current_run_average, 'Run_Average',
                all_save_folders[i])  # Show and save Run Average.
        # Then Use Weighted average method to generate global tif.
        global_average_graph = np.zeros(shape=np.shape(
            Before_Align_Dics[0][0]),
                                        dtype='f8')  # Base on shape of graph
        for i in range(len(Before_Align_Tif_Name)):
            global_average_graph += Before_Align_Dics[i][0].astype(
                'f8') * Before_Align_Dics[i][1] / total_graph_num
        global_average_graph = Graph_Tools.Clip_And_Normalize(
            global_average_graph, clip_std=average_std)
        # Then save global average in each run folder.
        if len(all_folders) > 1:
            for i in range(len(Before_Align_Tif_Name)):
                Graph_Tools.Show_Graph(global_average_graph,
                                       'Global_Average',
                                       all_save_folders[i],
                                       show_time=0)
        else:
            print('Only One run, no global average.')
    else:
        print('Before average Skipped.')
    time_tic_average0 = time.time()

    #%% Step3, Core Align Function.
    print('Aligning...')
    if base_mode == 'global':
        base = global_average_graph
    elif base_mode == 'input':
        base = input_base
    elif type(base_mode) == int:
        base = Before_Align_Dics[base_mode][0]
    else:
        raise IOError('Invalid base mode.')
    # In big memory mode, save aligned_data in a dictionary file.
    if big_memory_mode == True:
        All_Aligned_Frame = {}
        for i in range(len(Before_Align_Tif_Name)):
            All_Aligned_Frame[i] = np.zeros(
                shape=(graph_shape + (len(Before_Align_Tif_Name[i]), )),
                dtype='u2')  # Generate empty graph matrix.
    for i in range(len(Before_Align_Tif_Name)):  # Cycle all runs
        for j in range(len(
                Before_Align_Tif_Name[i])):  # Cycle all graphs in current run
            current_graph = cv2.imread(Before_Align_Tif_Name[i][j],
                                       -1)  # Read in current graph.
            _, _, current_aligned_graph = Alignment(base,
                                                    current_graph,
                                                    boulder=align_boulder,
                                                    align_range=align_range)
            graph_name = Before_Align_Tif_Name[i][j].split(
                '\\')[-1][:-4]  # Ignore extend name'.tif'.
            Graph_Tools.Show_Graph(current_aligned_graph,
                                   graph_name,
                                   Aligned_frame_folders[i],
                                   show_time=0)
            if big_memory_mode == True:
                All_Aligned_Frame[i][:, :, j] = current_aligned_graph
    print('Align Finished, generating average graphs...')
    time_tic_align_finish = time.time()

    #%% Step4, After Align Average
    After_Align_Graphs = {}
    if big_memory_mode == True:  # Average can be faster.
        temp_global_average_after_align = np.zeros(shape=graph_shape,
                                                   dtype='f8')
        for i in range(len(All_Aligned_Frame)):
            current_run_average = Graph_Tools.Clip_And_Normalize(
                np.mean(All_Aligned_Frame[i], axis=2),
                clip_std=average_std)  # Average run graphs, in type 'u2'
            After_Align_Graphs[i] = (current_run_average,
                                     len(All_Aligned_Frame[i][0, 0, :]))
            temp_global_average_after_align += After_Align_Graphs[i][0].astype(
                'f8') * After_Align_Graphs[i][1] / total_graph_num
        global_average_after_align = Graph_Tools.Clip_And_Normalize(
            temp_global_average_after_align, clip_std=average_std)
    else:  # Traditional ways.
        temp_global_average_after_align = np.zeros(shape=graph_shape,
                                                   dtype='f8')
        for i in range(len(Aligned_frame_folders)):
            current_run_names = OS_Tools.Get_File_Name(
                Aligned_frame_folders[i])
            current_run_average = Graph_Tools.Average_From_File(
                current_run_names)
            current_run_average = Graph_Tools.Clip_And_Normalize(
                current_run_average, clip_std=average_std)
            After_Align_Graphs[i] = (current_run_average,
                                     len(current_run_names))
            temp_global_average_after_align += After_Align_Graphs[i][0].astype(
                'f8') * After_Align_Graphs[i][1] / total_graph_num
        global_average_after_align = Graph_Tools.Clip_And_Normalize(
            temp_global_average_after_align, clip_std=average_std)
    # After average, save aligned graph in each save folder.
    for i in range(len(all_save_folders)):
        current_save_folder = all_save_folders[i]
        Graph_Tools.Show_Graph(After_Align_Graphs[i][0],
                               'Run_Average_After_Align', current_save_folder)
        if i == 0:  # Show global average only once.
            global_show_time = 5000
        else:
            global_show_time = 0
        if len(all_folders) > 1:
            Graph_Tools.Show_Graph(global_average_after_align,
                                   'Global_Average_After_Align',
                                   current_save_folder,
                                   show_time=global_show_time)
    time_tic_average1 = time.time()

    #%% Step5, save and timer
    if save_aligned_data == True:
        OS_Tools.Save_Variable(all_save_folders[0], 'All_Aligned_Frame_Data',
                               All_Aligned_Frame)

    if timer == True:
        whole_time = time_tic_average1 - time_tic_start
        before_average_time = time_tic_average0 - time_tic_start
        align_time = time_tic_align_finish - time_tic_average0
        after_average_time = time_tic_average1 - time_tic_align_finish
        print('Total Time = ' + str(whole_time) + ' s.')
        print('Before Average Time = ' + str(before_average_time) + ' s.')
        print('Align Time = ' + str(align_time) + ' s.')
        print('After Average Time = ' + str(after_average_time) + ' s.')

    return True
Пример #24
0
def Cell_Find_From_Mannual(mask_graph_path,average_graph_path = None,boulder = 8,save = True):
    '''
    Find cell from mannual mask.

    Parameters
    ----------
    mask_graph_path : (str)
        Save path of manuual plotted mask.
    average_graph_path : (str,optional)
        If not given, combined graph will not be produced.
    boulder : int, optional
        Boulder of cells. Centroid of cell over this boulde will be ignored.  The default is 20.
    save : bool, optional
        Whether we save cell graphs in specific folder. The default is True.

    Returns
    -------
    Cell_Finded : (Dic)
        Same cell dtype as before.

    '''
    save_path = OS_Tools.CDdotdot(mask_graph_path)
    mask_graph = cv2.imread(mask_graph_path,0)
    height,width = mask_graph.shape
    thres = mask_graph.max()/2
    thres_graph = mask_graph>thres# Get binary cell graph
    washed_thres_graph = skimage.morphology.remove_small_objects(thres_graph,5,connectivity = 1)# remove draw errors.
    cell_label = skimage.measure.label(washed_thres_graph)
    All_Cells = skimage.measure.regionprops(cell_label)
    # Delete cell 
    for i in range(len(All_Cells)-1,-1,-1):
        current_cell = All_Cells[i]
        current_height,current_width = current_cell.centroid
        if current_height<boulder or (height-current_height)<boulder:
            All_Cells.pop(i)
        elif current_width<boulder or (width-current_width)<boulder:
            All_Cells.pop(i)
    # Visualization here.
    visual_cell_graph = Visualize.Cell_Visualize(All_Cells)
    annotated_graph = Visualize.Label_Cell(visual_cell_graph,All_Cells,color = (255,255,0))
    if average_graph_path == None:
        print('Average graph not given, no combined graph.')
        combined_graph = []
        labled_combined_graph = []
    else:
        average_graph = cv2.imread(average_graph_path,1)# Read in 8 bit color map
        # Then annotate cell mask on average graph.
        cell_mask = visual_cell_graph[:,:,0]/2
        combined_graph = average_graph.astype('f8')*0.7
        combined_graph[:,:,1] = np.clip((combined_graph[:,:,1]+cell_mask),0,255)
        combined_graph = combined_graph.astype('u1')
        labled_combined_graph = Visualize.Label_Cell(combined_graph, All_Cells,color = (255,255,0))
    # At last, save all cell information and cell maps,
    Cell_Finded = {}
    Cell_Finded['All_Cell_Information'] = All_Cells
    Cell_Finded['Cell_Graph'] = visual_cell_graph
    Cell_Finded['Annotate_Cell_Graph'] = annotated_graph
    Cell_Finded['Combined_Graph'] = combined_graph
    Cell_Finded['Combined_Graph_With_Number'] = labled_combined_graph
    if save == True:
        OS_Tools.Save_Variable(save_path, 'Manuall_Cells', Cell_Finded,'.cell')
        Graph_Tools.Show_Graph(visual_cell_graph, 'Cell_Graph', save_path)
        Graph_Tools.Show_Graph(annotated_graph, 'Annotate_Cell_Graph', save_path)
        if type(combined_graph) != type([]):
            Graph_Tools.Show_Graph(combined_graph, 'Combined_Graph', save_path)
            Graph_Tools.Show_Graph(labled_combined_graph, 'Combined_Graph_With_Number', save_path)

    return Cell_Finded
Пример #25
0
run_list = [
    '1-001',  # Spon
    '1-008',  # OD
    '1-010',  # G8
    '1-011',  # RGLum4
    '1-014'  # Spon After
]
all_runs = List_Tools.List_Annex(data_folder, run_list)
#%% Add 3 list for run01 to fit ROI change.
run_1 = all_runs[0]
run1_all_tif = OS_Tools.Get_File_Name(run_1)
save_path = run_1 + r'\shape_extended'
OS_Tools.mkdir(save_path)
for i in range(len(run1_all_tif)):
    current_graph = cv2.imread(run1_all_tif[i], -1)
    extended_graph = Graph_Tools.Boulder_Extend(
        current_graph, [0, 0, 0, 3])  # 3 pix on the right.
    current_graph_name = run1_all_tif[i].split('\\')[-1]
    Graph_Tools.Show_Graph(extended_graph,
                           current_graph_name,
                           save_path,
                           show_time=0)
#%% Then align Run01_Spon.
from My_Wheels.Translation_Align_Function import Translation_Alignment
Translation_Alignment([all_runs[0] + r'\shape_extended'],
                      graph_shape=(325, 324))
#%% Then Use this base to align other runs.
base = cv2.imread(
    r'I:\Test_Data\2P\201222_L76_2P\1-001\Results\Run_Average_After_Align.tif',
    -1)
Translation_Alignment(all_runs[1:5],
                      base_mode='input',
Пример #26
0
def Cell_Find_From_Graph(
        input_graph,
        find_thres = 'otsu',
        max_pix = 1000,
        min_pix = 20,
        shape_boulder = [20,20,20,20], 
        sharp_gauss = ([7,7],1.5),
        back_gauss = ([15,15],7),
        size_limit = 20
        ):
    """
    Find Cell From Graph,this graph can be average intensity or On-Off sub map.

    Parameters
    ----------
    input_graph : (2D Array)
        Input graph. Use this graph to find cell.
    find_thres : (float or 'otsu'), optional
        How many std we use in finding cells. The default is 'otsu', meaning we use ootsu method to find threshold here.
    max_pix : (int), optional
        Max cell area. Bigger than this will be ignored. The default is 1000.
    min_pix : (int), optional
        Mininum cell area. Smaller than this will be ignored. The default is 20.
    shape_boulder : (4 element list), optional
        Cell find boulder, usually same as align range. The default is 20.
        [Up,Down,Left,Right]
    sharp_gauss : (turple), optional
        Sharp gaussian blur. Cell data will be found there. The default is ([7,7],1.5).
    blur_gauss : (turple), optional
        Coarse gaussian blur, this value determine local average areas. The default is ([15,15],7).
    size_limit : (int), optional
        Width and Length limitarion. Area longer than this will be ignored. The default is 20.

    Returns
    -------
    Cell_Finded : (Dictionary)
        All value will return to this dictionary. Keys are shown below:
            ['All_Cell_Information']: skimage data, contains all cell location and areas.
            ['Cell_Graph']:Cell location graph.
            ['Annotate_Cell_Graph']:Annotated cell graph, cell graph with number.
            ['Combine_Graph']:Circle cell on input graph.
            ['Combine_Graph_With_Number']:Circle Cell on input graph, and annotate cell number on input graph.

    """
    # Get Original Cell Graph
    Cell_Finded = {}
    sharp_mask = Calculator.Normalized_2D_Gaussian_Generator(sharp_gauss)
    back_mask = Calculator.Normalized_2D_Gaussian_Generator(back_gauss)
    sharp_blur = scipy.ndimage.correlate(input_graph,sharp_mask,mode = 'reflect').astype('f8')
    back_blur = scipy.ndimage.correlate(input_graph,back_mask,mode = 'reflect').astype('f8')
    im_cell = (sharp_blur-back_blur)/np.max(sharp_blur-back_blur) # Local Max value shown in this graph.
    if find_thres == 'otsu':# Add otsu method here.
        level = filters.threshold_otsu(im_cell)
    else:
        level = np.mean(im_cell)+find_thres*np.std(im_cell)
    origin_cells = im_cell>level # Original cell graphs, need to be filtered later.
    # Revome boulder.
    origin_cells = Graph_Tools.Boulder_Fill(origin_cells, shape_boulder, 0)
    # Then remove small areas, other removal have no direct function, so we have to do it later.
    cell_washed = skimage.morphology.remove_small_objects(origin_cells,min_pix,connectivity = 1)
    # Get Cell Group Description here.
    cell_label = skimage.measure.label(cell_washed)
    All_Cells = skimage.measure.regionprops(cell_label)
    
    # Then, wash bigger and oversize cell
    for i in range(len(All_Cells)-1,-1,-1): # opposite sequence to avoid id change of pop.
        current_cell = All_Cells[i]
        area = current_cell.convex_area # Cell areas
        # wash too big cell
        if area > max_pix:
            All_Cells.pop(i)
        # wash oversize cell
        else:
            size = np.shape(All_Cells[i].image)
            if np.max(size)>size_limit:
                All_Cells.pop(i)
    # Till now, Cell find is done, Graph Show shall be done here. 
    Cell_Finded['All_Cell_Information'] = All_Cells
    Cell_Finded['Cell_Graph'] = Visualize.Cell_Visualize(All_Cells)
    Cell_Finded['Annotate_Cell_Graph'] = Visualize.Label_Cell(Cell_Finded['Cell_Graph'],All_Cells)
    # Then draw cell on input graph.
    circled_cell = Visualize.Cell_Visualize(All_Cells,color = [0,255,100],mode = 'Boulder')
    input_8bit = Graph_Tools.Graph_Depth_Change(input_graph,output_bit = 'u1')
    input_8bit = cv2.cvtColor(input_8bit,cv2.COLOR_GRAY2RGB)
    Cell_Finded['Combine_Graph'] = Graph_Tools.Graph_Combine(input_8bit,circled_cell)
    Cell_Finded['Combine_Graph_With_Number'] = Visualize.Label_Cell(Cell_Finded['Combine_Graph'], All_Cells)
    
    return Cell_Finded
Пример #27
0
# Then Align all rest runs.
base_graph = cv2.imread(
    r'K:\Test_Data\2P\210320_L76_2P\1-001\Results\Global_Average_After_Align.tif',
    -1)
Translation_Alignment(all_run_folder[2:],
                      base_mode='input',
                      input_base=base_graph,
                      align_range=35)
# Find some small mismatch, try affine methods.
affine_base = cv2.imread(
    r'K:\Test_Data\2P\210320_L76_2P\_Affine_Affairs\Affine_Base.tif', -1)
Affine_Aligner_Gaussian(all_run_folder[0], affine_base, write_file=False)
for i in range(2, 15):
    Affine_Aligner_Gaussian(all_run_folder[i], affine_base, write_file=False)
# Get all aligned_average
all_avr = gt.Global_Averagor(all_run_folder)
gt.Show_Graph(gt.Clip_And_Normalize(all_avr, 5), 'Global_Average_After_Affine',
              r'K:\Test_Data\2P\210320_L76_2P\_Affine_Affairs')
# Then align all stim runs
from My_Wheels.Stim_Frame_Align import One_Key_Stim_Align
One_Key_Stim_Align(r'K:\Test_Data\2P\210320_L76_2P\210320_L76_2P_stimuli')
# calculate frame submaps.
from Standard_Parameters.Sub_Graph_Dics import Sub_Dic_Generator
from Standard_Stim_Processor import One_Key_Frame_Graphs
G16_Para = Sub_Dic_Generator('G16_2P')
One_Key_Frame_Graphs(r'K:\Test_Data\2P\210320_L76_2P\1-005',
                     G16_Para,
                     alinged_sub_folder=r'\Results\Affined_Frames')
OD_Para = Sub_Dic_Generator('OD_2P')
One_Key_Frame_Graphs(r'K:\Test_Data\2P\210320_L76_2P\1-010',
                     OD_Para,
Пример #28
0
"""

import My_Wheels.ROI_Matcher as Matcher
import cv2
import My_Wheels.Graph_Operation_Kit as Graph_Tools
import numpy as np
import My_Wheels.OS_Tools_Kit as OS_Tools
import My_Wheels.Graph_Selector as Selector
#%% Get ROI 
ROI_graph = cv2.imread(r'I:\Test_Data\2P\201222_L76_2P\ROI_Analyzer\ROI_Run01.tif',-1)
ful_graph_today = cv2.imread(r'I:\Test_Data\2P\201222_L76_2P\ROI_Analyzer\1222_Full_Graph_Run05.tif',-1)
ful_graph_prev = cv2.imread(r'I:\Test_Data\2P\201222_L76_2P\ROI_Analyzer\1211_Full_Graph_Run01.tif',-1)
#%% Match today's data
save_folder = r'I:\Test_Data\2P\201222_L76_2P\ROI_Analyzer'
merged_graph_today,loc_graph_today,ROI_boulder_today = Matcher.ROI_Matcher(ful_graph_today, ROI_graph)
Graph_Tools.Show_Graph(merged_graph_today, 'Today_Merged', save_folder)
Graph_Tools.Show_Graph(loc_graph_today, 'Today_Local', save_folder)
#%% Then Match last day.
merged_graph_prev,loc_graph_prev,ROI_boulder_prev = Matcher.ROI_Matcher(ful_graph_prev, ROI_graph)
Graph_Tools.Show_Graph(merged_graph_prev, 'Prev_Merged', save_folder)
Graph_Tools.Show_Graph(loc_graph_prev, 'Prev_Local', save_folder)
#%% Then match ROI data with stim full graph data.
OD_Graph = cv2.imread(r'I:\Test_Data\2P\201211_L76_2P\1-012\Results\Subtraction_Graphs\OD_SubGraph.tif').astype('f8')
OD_Graph[:,:,2] += ROI_boulder_prev
OD_Graph_Annotate = np.clip(OD_Graph,0,255).astype('u1')
Graph_Tools.Show_Graph(OD_Graph_Annotate, 'OD_Annotate', save_folder)
#%% Then H-V
HV_Graph = cv2.imread(r'I:\Test_Data\2P\201211_L76_2P\1-010\Results\Subtraction_Graphs\H-V_SubGraph.tif').astype('f8')
HV_Graph[:,:,2] += ROI_boulder_prev
HV_Graph_Annotate = np.clip(HV_Graph,0,255).astype('u1')
Graph_Tools.Show_Graph(HV_Graph_Annotate, 'HV_Annotate', save_folder)
Пример #29
0
_,Frame_Stim_Dic = Stim_Frame_Align(stim_folder,frame_thres = frame_thres,jmp_step = jmp_step)
#%% Forth, generate Morpho graph and find cell.
cell_Dic = Cell_Find_And_Plot(save_folder, 'Run_Average_After_Align.tif', 'Morpho_Cell')
cell_mask = (cell_Dic['Cell_Graph'][:,:,0])>0
#%% Fifth, calculate RF reaction.
RF_Data = np.zeros(shape = (5,5,2),dtype = 'f8')# use 5*5 matrix, set 0 are frames, set 1 are cells
loc_ids = np.array([1,26,51,76,101,126,151,176,201,226,251,276])
for i in range(5):# i as vector1
    for j in range(5):# j as vector2
        start_id = i*5+j
        current_keys = loc_ids+start_id
        current_loc_frame_id = []
        for k in range(len(current_keys)):
            current_loc_frame_id.extend(Frame_Stim_Dic[current_keys[k]])
        current_loc_graph_name = aligned_all_tif_name[current_loc_frame_id]
        current_graph = Graph_Tools.Average_From_File(current_loc_graph_name)
        all_cells = current_graph*cell_mask
        RF_Data[i,j,0] = current_graph.mean()
        RF_Data[i,j,1] = all_cells.mean()
# then sub average value.
frame_average = RF_Data[:,:,0].min()
cell_average = RF_Data[:,:,1].min()
prop_RF_Data = np.zeros(shape = (5,5,2),dtype = 'f8')
prop_RF_Data[:,:,0] = RF_Data[:,:,0]/frame_average -1
prop_RF_Data[:,:,1] = RF_Data[:,:,1]/cell_average -1
OS_Tools.Save_Variable(save_folder, 'Origin_RF_Data', RF_Data)
#%% Then do Spline interpolation here.
import pylab as pl
# Show graph first.
x = np.array([1,2,3,4,5])
y = np.array([1,2,3,4,5])
def Single_Subgraph_Generator(all_tif_name,
                              A_IDs,
                              B_IDs,
                              filter_method='Gaussian',
                              LP_Para=((5, 5), 1.5),
                              HP_Para=False,
                              t_map=True,
                              t_sig=1):
    '''
    Generate single subtraction map of 2P data. A-B graph is generated.

    Parameters
    ----------
    all_tif_name : (list or nparray)
        All graph name. Usually aligned tif name.
    A_IDs : (list)
        List of A ID.
    B_IDs : (list)
        List of B ID.
    filter_method : (str), optional
        Can be set False to skip. Filter used before and after subtraction. The default is 'Gaussian'.
    LP_Para: (turple),optional
        Can be set False to skip. Low pass filter parameter. The default is ((5,5),1.5).
    HP_Para: (turple),optional
        Can be set False to skip. High pass filter parameter. The default is False.
    t_map : (bool), optional
        Whether t map is generated. The default is True.
    t_sig:(0~1),optional.
        Threshold of significant t map. The default is 1.

    Returns
    -------
    sub_graph : (2D array)
        Subtraction dF/F graph. Origin data, clip and normalize shall be done before plot.
    t_graph : (2D array)
        T test graph.0-1 value normalized array. If t_map == False, this will be None.
    F_info_Dics : (Dic)
        Information dictionary. Including origin F & dF/F information of input graph.

    '''
    warnings.filterwarnings('ignore')
    F_info_Dics = {}
    all_tif_name = np.array(all_tif_name)  # Change into nparray to slice.
    A_Set_Graph_names = all_tif_name[A_IDs]
    B_Set_Graph_names = all_tif_name[B_IDs]
    # Calculate sub graph.
    F_info_Dics['Graph_Shape'] = np.shape(cv2.imread(all_tif_name[0], -1))
    F_info_Dics['Origin_Data_Type'] = str((cv2.imread(all_tif_name[0],
                                                      -1)).dtype)
    F_info_Dics['Average_A_Graph'] = Graph_Tools.Average_From_File(
        A_Set_Graph_names, LP_Para, HP_Para, filter_method)
    F_info_Dics['Average_B_Graph'] = Graph_Tools.Average_From_File(
        B_Set_Graph_names, LP_Para, HP_Para, filter_method)
    F_info_Dics['dF_Map'] = (F_info_Dics['Average_A_Graph'].astype('f8') -
                             F_info_Dics['Average_B_Graph'].astype('f8'))
    F_info_Dics['Average_dF_value'] = abs(
        F_info_Dics['dF_Map']).mean()  # Average dF value.
    F_info_Dics['Average_dF/F_value'] = F_info_Dics['Average_dF_value'] / (
        F_info_Dics['Average_B_Graph'].mean())
    F_info_Dics['dF/F_Graph'] = np.nan_to_num(
        F_info_Dics['dF_Map'] / F_info_Dics['Average_B_Graph'].astype('f8'))
    sub_graph = F_info_Dics['dF_Map']

    # Then calculate F value graph.
    if t_map == False:
        F_info_Dics['t_value_map'] = None
        F_info_Dics['p_value_map'] = None
        t_graph = None
    else:
        import random
        sample_size = min(len(A_Set_Graph_names), len(B_Set_Graph_names))
        selected_A_name = np.array(
            random.sample(list(A_Set_Graph_names), sample_size))
        selected_B_name = np.array(
            random.sample(list(B_Set_Graph_names), sample_size))
        A_graph_arrays = np.zeros(shape=(F_info_Dics['Graph_Shape'] +
                                         (sample_size, )),
                                  dtype='f8')
        B_graph_arrays = np.zeros(shape=(F_info_Dics['Graph_Shape'] +
                                         (sample_size, )),
                                  dtype='f8')
        # Then we will fill filtered data into graph.
        # First, we will read in AB graphs together.
        for i in range(sample_size):
            current_a_graph = cv2.imread(selected_A_name[i], -1)
            current_b_graph = cv2.imread(selected_B_name[i], -1)
            if filter_method != False:
                A_graph_arrays[:, :, i] = My_Filter.Filter_2D(
                    current_a_graph, LP_Para, HP_Para, filter_method)
                B_graph_arrays[:, :, i] = My_Filter.Filter_2D(
                    current_b_graph, LP_Para, HP_Para, filter_method)
        # After that, we calculate t and p value pix by pix.
        t_value_graph = np.zeros(shape=F_info_Dics['Graph_Shape'], dtype='f8')
        p_value_graph = np.zeros(shape=F_info_Dics['Graph_Shape'], dtype='f8')
        from scipy.stats import ttest_rel
        for i in range(F_info_Dics['Graph_Shape'][0]):
            for j in range(F_info_Dics['Graph_Shape'][1]):
                t_value_graph[i, j], p_value_graph[i, j] = ttest_rel(
                    A_graph_arrays[i, j, :], B_graph_arrays[i, j, :])
        # avoid nan
        t_graph_origin = np.nan_to_num(t_value_graph)
        p_value_graph = np.nan_to_num(p_value_graph)
        F_info_Dics['t_graph_origin'] = t_graph_origin
        F_info_Dics['p_value_of_t_test'] = p_value_graph
        t_graph = t_graph_origin * (p_value_graph < t_sig)
        F_info_Dics['t_graph'] = t_graph

    return sub_graph, t_graph, F_info_Dics