コード例 #1
0
    def Do_Align(self):
        """
        Main Function. Use this function will finish align work, useful for module using

        Returns
        -------
        Align Properties(Dic):
            Property of this alignment, including useful path and useful names.

        """
        start_time = time.time() # Processing Start time
        self.Before_Run_Average()
        self.Align_Cores()
        self.After_Align_Average()
        finish_time = time.time()
        time_cost = finish_time-start_time
        print('Alignment Done, time cost = '+str(time_cost) +'s')
        
        # Output a dictionary, coding 
        Align_Properties = {}
        Align_Properties['all_save_folders'] = self.all_save_folders
        all_tif_name = []
        for i in range(len(self.Aligned_frame_folders)):
            current_tif_list = OS_Tools.Get_File_Name(self.Aligned_frame_folders[i],file_type = '.tif')
            all_tif_name.append(current_tif_list)
        Align_Properties['all_tif_name'] = all_tif_name
        return Align_Properties
コード例 #2
0
def Global_Averagor(all_folder_list, sub_folders=r'\Results\Affined_Frames'):
    '''
    Average global graph from all subgraphs.

    Parameters
    ----------
    all_folder_list : TYPE
        DESCRIPTION.
    sub_folders : TYPE, optional
        DESCRIPTION. The default is r'\Results\Affined_Frames'.

    Returns
    -------
    global_averaged_graph : TYPE
        DESCRIPTION.

    '''
    all_folders = lt.List_Annex(all_folder_list, [sub_folders])
    all_tif_name = []
    for i in range(len(all_folders)):
        current_tif_name = ot.Get_File_Name(all_folders[i])
        all_tif_name.extend(current_tif_name)
    global_averaged_graph = Average_From_File(all_tif_name)

    return global_averaged_graph
コード例 #3
0
    def After_Align_Average(self):
        """
        This Functin will generate after align average graph of Run and Global, and then save them.
        
        Returns
        -------
        None.

        """
        print('Aligning done. ')
        self.After_Align_Graphs = {} # Initialize a dictionary, will record all aligned graphs averages and graph nums.
        # Fill After Align Graph Dictionary first
        total_graph_num = 0
        for i in range(len(self.Aligned_frame_folders)):
            current_run_names = OS_Tools.Get_File_Name(self.Aligned_frame_folders[i])
            temp_average = Graph_Tools.Average_From_File(current_run_names) # This will generate an average graph with 'f8' formation.
            current_graph_aligned = Graph_Tools.Clip_And_Normalize(temp_average,clip_std = 5)
            Graph_Tools.Show_Graph(current_graph_aligned, 'Run_Average_After_Align', self.all_save_folders[i])
            current_run_Frame_Num = len(current_run_names)
            total_graph_num += current_run_Frame_Num
            self.After_Align_Graphs[i] = (current_graph_aligned,current_run_Frame_Num)
        global_average_after_align = np.zeros(np.shape(current_graph_aligned),dtype = 'f8')
        
        # Then calculate global average in each run.
        for i in range(len(self.all_save_folders)):
            global_average_after_align += self.After_Align_Graphs[i][0].astype('f8')*self.After_Align_Graphs[i][1]/total_graph_num
        global_average_after_align = Graph_Tools.Clip_And_Normalize(global_average_after_align,clip_std = 5)
        
        # Then save global graph into each folder.
        for i in range(len(self.all_save_folders)):
            if i == 0:
                Graph_Tools.Show_Graph(global_average_after_align, 'Global_Average_After_Align', self.all_save_folders[i])
            else:
                Graph_Tools.Show_Graph(global_average_after_align, 'Global_Average_After_Align', self.all_save_folders[i],show_time = 0)
コード例 #4
0
def Least_Tremble_Average_Graph(data_folder,
                                average_prop=0.1,
                                cut_shape=(9, 9)):
    all_tif_name = np.array(OS_Tools.Get_File_Name(data_folder))
    _, frac_disps = Tremble_Evaluator(data_folder, cut_shape=cut_shape)
    frac_num, frame_num, _ = frac_disps.shape
    # Then calculate average center and least error graph.
    frac_centers = np.zeros(shape=(frac_num, 2), dtype='f8')
    for i in range(frac_num):
        frac_centers[i, 0] = frac_disps[i, :, 0].mean()
        frac_centers[i, 1] = frac_disps[i, :, 1].mean()
    # And all frac_total movings
    total_movings = np.zeros(frame_num, dtype='f8')
    for i in range(frame_num):
        c_dist = 0
        for j in range(frac_num):
            c_dist += (frac_centers[j][0] - frac_disps[j, i, 0])**2 + (
                frac_centers[j][1] - frac_disps[j, i, 1])**2
        total_movings[i] = c_dist
    # Then find least props.
    used_num = int(frame_num * average_prop)
    if used_num < 300:  # least num of average is set to 300 to avoid problem.
        used_num = min(300, frame_num)
    print('Average of most stable ' + str(used_num) + ' Frames.')
    if used_num < 300:  # meaning all frame used
        graph_names = all_tif_name
    else:
        used_frame_ind = np.argpartition(total_movings, used_num)[0:used_num]
        graph_names = all_tif_name[used_frame_ind]
    averaged_graph = Graph_Tools.Average_From_File(graph_names)

    return averaged_graph, graph_names
コード例 #5
0
def One_Key_Frame_Graphs(
        data_folder,
        sub_dic,
        show_clip=3,
        alinged_sub_folder=r'\Results\Final_Aligned_Frames',
        Stim_Align_sub_folder=r'\Results\Stim_Frame_Align.pkl'):
    result_folder = data_folder + r'\Results'
    graph_save_folder = result_folder + r'\Only_Frame_SubGraphs'
    OS_Tools.mkdir(result_folder)
    OS_Tools.mkdir(graph_save_folder)
    stim_path = data_folder + Stim_Align_sub_folder
    stim_dic = OS_Tools.Load_Variable(stim_path)
    all_tif_name = OS_Tools.Get_File_Name(data_folder + alinged_sub_folder)
    graph_num = len(sub_dic)
    all_sub_graph_names = list(sub_dic.keys())
    for i in range(graph_num):
        current_name = all_sub_graph_names[i]
        current_a = Frame_ID_Extractor(stim_dic, sub_dic[current_name][0])
        current_b = Frame_ID_Extractor(stim_dic, sub_dic[current_name][1])
        current_sub_graph, current_t_graph, current_info_dic = Single_Subgraph_Generator(
            all_tif_name, current_a, current_b)
        current_sub_graph = Graph_Tools.Clip_And_Normalize(
            current_sub_graph, show_clip)
        current_t_graph = Graph_Tools.Clip_And_Normalize(
            current_t_graph, show_clip)
        # Save graphs
        Graph_Tools.Show_Graph(current_sub_graph, current_name + '_Sub_Graph',
                               graph_save_folder)
        Graph_Tools.Show_Graph(current_t_graph, current_name + '_t_Graph',
                               graph_save_folder)
        OS_Tools.Save_Variable(graph_save_folder, current_name + r'_Sub_Info',
                               current_info_dic, '.info')
    return True
コード例 #6
0
def Partial_Average_From_File(data_folder,
                              start_frame,
                              stop_frame,
                              graph_type='.tif',
                              LP_Para=False,
                              HP_Para=False,
                              filter_method=False):
    '''
    Average specific part of graphs in the folder.

    Parameters
    ----------
    data_folder : (str)
        Data folder.
    start_frame : (int)
        Start ID of frame selection.
    stop_frame : (int)
        Stop ID of frame selection.
    graph_type : (str), optional
        Frame dtype. The default is '.tif'.
    LP_Para\HP_Para\filter_method : optional
        Filter parameters. The default is False.

    Returns
    -------
    Averaged_Graph : (2D Array)
        Averaged graphs.

    '''
    all_tif_name = np.array(
        OS_Tools.Get_File_Name(data_folder, file_type=graph_type))
    used_tif_name = all_tif_name[start_frame:stop_frame]
    Averaged_Graph = Graph_Tools.Average_From_File(used_tif_name, LP_Para,
                                                   HP_Para, filter_method)
    return Averaged_Graph
コード例 #7
0
def One_Key_Stim_Maps(data_folder,
                      cell_folder,
                      sub_dic,
                      have_blank=None,
                      alinged_sub_folder=r'\Results\Aligned_Frames',
                      Stim_Align_sub_folder=r'\Results\Stim_Frame_Align.pkl'):
    '''
    1 key generate stim map. Befor using this, you need to :
        1.align graphs
        2.give cell file path.
        3.Finish stim frame align.
    '''
    result_folder = data_folder + r'\Results'
    stim_path = data_folder + Stim_Align_sub_folder
    cell_path = OS_Tools.Get_File_Name(cell_folder, '.cell')[0]
    cell_dic = OS_Tools.Load_Variable(cell_path)
    # Then generate spiketrain
    stim_train = OS_Tools.Load_Variable(stim_path)['Original_Stim_Train']
    all_tif_name = OS_Tools.Get_File_Name(data_folder + alinged_sub_folder)
    cell_information = cell_dic['All_Cell_Information']
    if have_blank != None:
        warnings.warn(
            'Have blank is detected automatically, this API is useless now.',
            FutureWarning)
    have_blank = (0 in stim_train)
    if have_blank == True:
        F_train, dF_F_train = Spike_Train_Generator(all_tif_name,
                                                    cell_information,
                                                    Base_F_type='nearest_0',
                                                    stim_train=stim_train)
    else:
        print('No blank, use previous ISI to calculate trains')
        F_train, dF_F_train = Spike_Train_Generator(all_tif_name,
                                                    cell_information,
                                                    Base_F_type='before_ISI',
                                                    stim_train=stim_train)
    # Then save F and dF/F trains
    OS_Tools.Save_Variable(result_folder, 'F_Trains', F_train)
    OS_Tools.Save_Variable(result_folder, 'dF_F_Trains', dF_F_train)
    # At last, calculate Maps.
    Standard_Stim_Processor(data_folder,
                            stim_path,
                            sub_dic,
                            cell_method=cell_path,
                            spike_train_path=result_folder +
                            r'\dF_F_Trains.pkl')
コード例 #8
0
 def __init__(self,all_folders):
     
     self.all_folders = all_folders
     self.all_save_folders = List_Op.List_Annex(self.all_folders,['Results'])
     self.Aligned_frame_folders = List_Op.List_Annex(self.all_save_folders,['Aligned_Frames'])
     for i in range(len(self.all_save_folders)):
         OS_Tools.mkdir(self.all_save_folders[i])
         OS_Tools.mkdir(self.Aligned_frame_folders[i])
     self.Before_Align_Tif_Name = []
     for i in range(len(self.all_folders)):
         current_run_tif = OS_Tools.Get_File_Name(self.all_folders[i])
         self.Before_Align_Tif_Name.append(current_run_tif)
コード例 #9
0
def Intensity_Selector(data_folder,
                       graph_type='.tif',
                       mode='biggest',
                       propotion=0.05,
                       list_write=True):
    '''
    Select frames have biggest or smallest a.i., and generate average graphs.

    Parameters
    ----------
    data_folder : (str)
        Data folder.
    graph_type : (str), optional
        Data type of . The default is '.tif'.
    mode : ('biggest' or 'smallest'), optional
        Type of frame selection. The default is 'biggest'.
    propotion : (float), optional
        Propotion of graph selection. The default is 0.05.
    list_write : (bool), optional
        Whether we write down graph intensity data. The default is True.

    Returns
    -------
    averaged_graph : (2D Array)
        Averaged graph of selected frames.
    selected_graph_name : (ND List)
        List of selected graph names.

    '''
    all_graph_name = np.array(
        OS_Tools.Get_File_Name(data_folder, file_type=graph_type))
    graph_Num = len(all_graph_name)
    bright_data = np.zeros(graph_Num, dtype='f8')
    for i in range(graph_Num):
        current_graph = cv2.imread(all_graph_name[i], -1)
        bright_data[i] = np.mean(current_graph)
        # write bright data if required.
    if list_write == True:
        OS_Tools.Save_Variable(data_folder, 'brightness_info', bright_data)
    # Then select given mode frames.
    used_graph_num = int(graph_Num * propotion)
    if mode == 'biggest':
        used_graph_id = np.argpartition(bright_data,
                                        -used_graph_num)[-used_graph_num:]
    elif mode == 'smallest':
        used_graph_id = np.argpartition(bright_data,
                                        used_graph_num)[0:used_graph_num]
    selected_graph_name = all_graph_name[used_graph_id]
    averaged_graph = Graph_Tools.Average_From_File(selected_graph_name)
    return averaged_graph, selected_graph_name
コード例 #10
0
ファイル: main_file.py プロジェクト: adolescent/2P_Analysis
def Cell_Find(run_folder):
    output_folder = run_folder+r'\Results'
    aligned_frame_folder = output_folder+r'\Aligned_Frames'
    all_tif_name = OS_Tools.Get_File_Name(aligned_frame_folder)
    Stim_Frame_Dic = OS_Tools.Load_Variable(output_folder,'Stim_Frame_Align.pkl')
    on_off_graph,Finded_Cells = On_Off_Cell_Finder(all_tif_name, Stim_Frame_Dic,shape_boulder=[20,20,20,35],filter_method = 'Gaussian',LP_Para = ((5,5),1.5))
    cell_folder = output_folder+r'\Cells'
    OS_Tools.Save_Variable(cell_folder, 'Finded_Cells', Finded_Cells,'.cell')
    Graph_tools.Show_Graph(on_off_graph, 'on-off_graph', cell_folder)
    all_keys = list(Finded_Cells.keys())
    all_keys.remove('All_Cell_Information')
    for i in range(len(all_keys)):
        Graph_tools.Show_Graph(Finded_Cells[all_keys[i]], all_keys[i], cell_folder)
    return True
コード例 #11
0
def Tremble_Calculator_From_File(
        data_folder,
        graph_type='.tif',
        cut_shape=(8, 8),
        boulder=20,
        base_method='former',
        base=[],
):
    '''
    Calculate align tremble from graph. This program is used to evaluate align quality.
    
    Parameters
    ----------
    data_folder : (str)
        Data folder of graphs.
    graph_type : (str),optional
        Extend name of input grahp. The default is '.tif'.
    cut_shape : (turple), optional
        Shape of fracture cut. Proper cut will . The default is (10,5).
    boulder : (int),optional
        Boulder of graph. Cut and not used in following calculation.The default is 20.        
    base_method : ('average'or'former'or'input'), optional
        Method of bais calculation. The default is 'former'. 
        'average' bais use all average; 'former' bais use fomer frame; 'input' bais need to be given.
    base : (2D_NdArray), optional
        If move_method == 'input', base should be given here. The default is [].

    Returns
    -------
    mass_center_maps(Graph)
        A plotted graph, showing movement trace of mass center.
    tremble_plots : (List)
        List of all fracture graph tremble list.
    tremble_information : (Dic)
        Dictionary of tramble informations.
        Data type of tremble_information:
    '''
    all_tif_name = OS_Tools.Get_File_Name(data_folder, file_type=graph_type)
    average_graph = Graph_Tools.Average_From_File(all_tif_name)
    tremble_information = {}
    #1. Get base graph first.
    if base_method == 'input':
        base_graph = base
    elif base_method == 'average':
        base_graph = average_graph
    elif base_method == 'former':
        base_graph = cv2.imread(all_tif_name[0], -1)  # First input graph.
    else:
        raise IOError('Invalid Base Method, check please.\n')
コード例 #12
0
def Tremble_Evaluator(data_folder,
                      ftype='.tif',
                      boulder_ignore=20,
                      cut_shape=(9, 9),
                      mask_thres=0):
    all_file_name = OS_Tools.Get_File_Name(data_folder, ftype)
    template = cv2.imread(all_file_name[0], -1)
    origin_dtype = template.dtype
    graph_shape = template.shape
    graph_num = len(all_file_name)
    origin_graph_matrix = np.zeros(shape=graph_shape + (graph_num, ),
                                   dtype=origin_dtype)
    for i in range(graph_num):
        origin_graph_matrix[:, :, i] = cv2.imread(all_file_name[i], -1)
    average_graph = origin_graph_matrix.mean(axis=2).astype('u2')
    # Show schematic of cutted graph.
    schematic, _, _, _ = Graph_Cutter(average_graph, boulder_ignore, cut_shape)
    # Then,save cutted graphs into dics.
    cutted_graph_dic = {}
    fracture_num = cut_shape[0] * cut_shape[1]
    for i in range(fracture_num):  # initialize cut dics.
        cutted_graph_dic[i] = []
    for i in range(graph_num):  # Cycle all graphs
        current_graph = origin_graph_matrix[:, :, i]
        _, _, _, cutted_graphs = Graph_Cutter(current_graph, boulder_ignore,
                                              cut_shape)
        for j in range(fracture_num):  # save each fracture
            cutted_graph_dic[j].append(cutted_graphs[j])
    # Calculate graph center of each fracture trains. Use weighted center.
    all_frac_center = np.zeros(shape=(fracture_num, graph_num, 2), dtype='f8')
    for i in range(fracture_num):
        current_frac = cutted_graph_dic[i]
        for j in range(graph_num):
            current_graph = current_frac[j]
            if mask_thres == 'otsu':
                thres = filters.threshold_otsu(current_graph)
            elif (type(mask_thres) == int or type(mask_thres) == float):
                thres = mask_thres
            else:
                raise IOError('Invalid mask threshold.')
            mask = (current_graph > thres).astype(int)
            properties = regionprops(mask, current_graph)
            current_mc = properties[0].weighted_centroid
            all_frac_center[i, j, :] = current_mc  #In sequence YX
    return schematic, all_frac_center
コード例 #13
0
def AI_Calculator(graph_folder, start_frame=0, end_frame=-1, masks='No_Mask'):
    '''
    This function is used to calculate average intensity variation. Masks can be given to calculate cells

    Parameters
    ----------
    graph_folder : (str)
        All graphs folder.
    start_frame : (int,optional)
        Start frame num. The default is 0.
    end_frame : (int,optional)
        End frame. The default is -1.
    masks : (2D_Array,optional)
        2D arrays. Input will be binary, so be careful. The default is None.

    Returns
    -------
    intensity_series : (Array)
        Return average intensity.

    '''
    #initialize
    all_tif_name = np.array(OS_Tools.Get_File_Name(graph_folder))
    used_tif_name = all_tif_name[start_frame:end_frame]
    frame_Num = len(used_tif_name)
    intensity_series = np.zeros(frame_Num, dtype='f8')
    graph_shape = np.shape(cv2.imread(used_tif_name[0], -1))
    #calculate mask
    if type(masks) == str:
        masks = np.ones(graph_shape, dtype='bool')
    elif masks.dtype != 'bool':
        masks = masks > (masks // 2)
    pix_num = masks.sum()
    #calculate ai trains
    for i in range(frame_Num):
        current_graph = cv2.imread(used_tif_name[i], -1)
        masked_graph = current_graph * masks
        current_ai = masked_graph.sum() / pix_num
        intensity_series[i] = current_ai
    return intensity_series
コード例 #14
0
def Translation_Alignment(all_folders,
                          base_mode='global',
                          input_base=np.array([[0, 0], [0, 0]]),
                          align_range=20,
                          align_boulder=20,
                          before_average=True,
                          average_std=5,
                          big_memory_mode=False,
                          save_aligned_data=False,
                          graph_shape=(512, 512),
                          timer=True):
    '''
    
    This function will align all tif graphs in input folders. Only translation transaction here. Affine transformation need further discussion.
    
    Parameters
    ----------
    all_folders:(list)
        List of all tif folders, elements are strs.
    
    base_mode:('global',int,'input',optional. The default is 'global')
        How to select base frame. 'global': use global average as base. int: use average of specific run as base. 'input':Manually input base graph, need to be a 2D-Ndarray.
        
    input_base:(2D-Ndarray,optional. The default is none.)
        If base_mode = 'input', input_base must be given. This will be the base for alignment.
        
    align_range:(int,optional. The default is 20)
        Max pixel of alignment. 
        
    align_boulder:(int,optional. The default is 20)
        boulder cut for align. For different graph size, this variable shall be change.
        
    before_average:(bool,optional. The default is True)
        Whether before average is done. It can be set False to save time, on this case base graph shall be given.
        
    average_std:(float,optional. The default is 5)
        How much std you want for average graph generation. Different std can effect graph effect.
    
    big_memory_mode:(bool,optional. The default is False)
        If memory is big enough, use this mode is faster.
        
    save_aligned_data:(bool,optional. The default is False)
        Can be true only in big memory mode. This will save all aligned graph in a single 4D-Ndarray file.Save folder is the first folder.
        
    graph_shape:(2-element-turple,optional. The default is (512,512))
        Shape of graphs. All input graph must be in same shape.
        
    timer:(bool,optional. The default is True)
        Show runtime of function and each procedures.
    
        
    Returns
    -------
    bool
        Whether new folder is generated.
    
    '''
    time_tic_start = time.time()
    #%% Step1, generate folders and file cycle.
    all_save_folders = List_Op.List_Annex(all_folders, ['Results'])
    Aligned_frame_folders = List_Op.List_Annex(all_save_folders,
                                               ['Aligned_Frames'])
    for i in range(len(all_save_folders)):
        OS_Tools.mkdir(all_save_folders[i])
        OS_Tools.mkdir(Aligned_frame_folders[i])
    Before_Align_Tif_Name = []
    for i in range(len(all_folders)):
        current_run_tif = OS_Tools.Get_File_Name(all_folders[i])
        Before_Align_Tif_Name.append(current_run_tif)

    #%% Step2, Generate average map before align.
    if before_average == True:
        print('Before run averaging ...')
        Before_Align_Dics = {
        }  # This is the dictionary of all run averages. Keys are run id.
        total_graph_num = 0  # Counter of graph numbers.
        for i in range(len(Before_Align_Tif_Name)):
            current_run_graph_num = len(Before_Align_Tif_Name[i])
            total_graph_num += current_run_graph_num
            current_run_average = Graph_Tools.Average_From_File(
                Before_Align_Tif_Name[i])
            current_run_average = Graph_Tools.Clip_And_Normalize(
                current_run_average, clip_std=average_std)
            Before_Align_Dics[i] = (
                current_run_average, current_run_graph_num
            )  # Attention here, data recorded as turple.
            Graph_Tools.Show_Graph(
                current_run_average, 'Run_Average',
                all_save_folders[i])  # Show and save Run Average.
        # Then Use Weighted average method to generate global tif.
        global_average_graph = np.zeros(shape=np.shape(
            Before_Align_Dics[0][0]),
                                        dtype='f8')  # Base on shape of graph
        for i in range(len(Before_Align_Tif_Name)):
            global_average_graph += Before_Align_Dics[i][0].astype(
                'f8') * Before_Align_Dics[i][1] / total_graph_num
        global_average_graph = Graph_Tools.Clip_And_Normalize(
            global_average_graph, clip_std=average_std)
        # Then save global average in each run folder.
        if len(all_folders) > 1:
            for i in range(len(Before_Align_Tif_Name)):
                Graph_Tools.Show_Graph(global_average_graph,
                                       'Global_Average',
                                       all_save_folders[i],
                                       show_time=0)
        else:
            print('Only One run, no global average.')
    else:
        print('Before average Skipped.')
    time_tic_average0 = time.time()

    #%% Step3, Core Align Function.
    print('Aligning...')
    if base_mode == 'global':
        base = global_average_graph
    elif base_mode == 'input':
        base = input_base
    elif type(base_mode) == int:
        base = Before_Align_Dics[base_mode][0]
    else:
        raise IOError('Invalid base mode.')
    # In big memory mode, save aligned_data in a dictionary file.
    if big_memory_mode == True:
        All_Aligned_Frame = {}
        for i in range(len(Before_Align_Tif_Name)):
            All_Aligned_Frame[i] = np.zeros(
                shape=(graph_shape + (len(Before_Align_Tif_Name[i]), )),
                dtype='u2')  # Generate empty graph matrix.
    for i in range(len(Before_Align_Tif_Name)):  # Cycle all runs
        for j in range(len(
                Before_Align_Tif_Name[i])):  # Cycle all graphs in current run
            current_graph = cv2.imread(Before_Align_Tif_Name[i][j],
                                       -1)  # Read in current graph.
            _, _, current_aligned_graph = Alignment(base,
                                                    current_graph,
                                                    boulder=align_boulder,
                                                    align_range=align_range)
            graph_name = Before_Align_Tif_Name[i][j].split(
                '\\')[-1][:-4]  # Ignore extend name'.tif'.
            Graph_Tools.Show_Graph(current_aligned_graph,
                                   graph_name,
                                   Aligned_frame_folders[i],
                                   show_time=0)
            if big_memory_mode == True:
                All_Aligned_Frame[i][:, :, j] = current_aligned_graph
    print('Align Finished, generating average graphs...')
    time_tic_align_finish = time.time()

    #%% Step4, After Align Average
    After_Align_Graphs = {}
    if big_memory_mode == True:  # Average can be faster.
        temp_global_average_after_align = np.zeros(shape=graph_shape,
                                                   dtype='f8')
        for i in range(len(All_Aligned_Frame)):
            current_run_average = Graph_Tools.Clip_And_Normalize(
                np.mean(All_Aligned_Frame[i], axis=2),
                clip_std=average_std)  # Average run graphs, in type 'u2'
            After_Align_Graphs[i] = (current_run_average,
                                     len(All_Aligned_Frame[i][0, 0, :]))
            temp_global_average_after_align += After_Align_Graphs[i][0].astype(
                'f8') * After_Align_Graphs[i][1] / total_graph_num
        global_average_after_align = Graph_Tools.Clip_And_Normalize(
            temp_global_average_after_align, clip_std=average_std)
    else:  # Traditional ways.
        temp_global_average_after_align = np.zeros(shape=graph_shape,
                                                   dtype='f8')
        for i in range(len(Aligned_frame_folders)):
            current_run_names = OS_Tools.Get_File_Name(
                Aligned_frame_folders[i])
            current_run_average = Graph_Tools.Average_From_File(
                current_run_names)
            current_run_average = Graph_Tools.Clip_And_Normalize(
                current_run_average, clip_std=average_std)
            After_Align_Graphs[i] = (current_run_average,
                                     len(current_run_names))
            temp_global_average_after_align += After_Align_Graphs[i][0].astype(
                'f8') * After_Align_Graphs[i][1] / total_graph_num
        global_average_after_align = Graph_Tools.Clip_And_Normalize(
            temp_global_average_after_align, clip_std=average_std)
    # After average, save aligned graph in each save folder.
    for i in range(len(all_save_folders)):
        current_save_folder = all_save_folders[i]
        Graph_Tools.Show_Graph(After_Align_Graphs[i][0],
                               'Run_Average_After_Align', current_save_folder)
        if i == 0:  # Show global average only once.
            global_show_time = 5000
        else:
            global_show_time = 0
        if len(all_folders) > 1:
            Graph_Tools.Show_Graph(global_average_after_align,
                                   'Global_Average_After_Align',
                                   current_save_folder,
                                   show_time=global_show_time)
    time_tic_average1 = time.time()

    #%% Step5, save and timer
    if save_aligned_data == True:
        OS_Tools.Save_Variable(all_save_folders[0], 'All_Aligned_Frame_Data',
                               All_Aligned_Frame)

    if timer == True:
        whole_time = time_tic_average1 - time_tic_start
        before_average_time = time_tic_average0 - time_tic_start
        align_time = time_tic_align_finish - time_tic_average0
        after_average_time = time_tic_average1 - time_tic_align_finish
        print('Total Time = ' + str(whole_time) + ' s.')
        print('Before Average Time = ' + str(before_average_time) + ' s.')
        print('Align Time = ' + str(align_time) + ' s.')
        print('After Average Time = ' + str(after_average_time) + ' s.')

    return True
コード例 #15
0
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 13:41:07 2020

@author: ZR
Codes to process L76 Data
"""

import My_Wheels.Graph_Operation_Kit as Graph_Tools
import My_Wheels.OS_Tools_Kit as OS_Tools
#%% Cell1, Average Graph.
graph_folder = r'I:\Test_Data\201023_L76_LM\1-003'
save_path = graph_folder + r'\Results'
OS_Tools.mkdir(save_path)
all_tif_name = OS_Tools.Get_File_Name(graph_folder)
average_graph = Graph_Tools.Average_From_File(all_tif_name)
norm_average_graph = Graph_Tools.Clip_And_Normalize(average_graph, clip_std=3)
Graph_Tools.Show_Graph(norm_average_graph, 'Average_Graph', save_path)
#%% Then Calculate Runs
graph_folder = r'I:\Test_Data\201023_L76_LM\1-013'
import My_Wheels.Translation_Align_Function as Align
Align.Translation_Alignment([graph_folder])
#%% Align Stim and Frame
import My_Wheels.Stim_Frame_Align as Stim_Frame_Align
stim_folder = r'I:\Test_Data\201023_L76_LM\201023_L76_LM_Stimulus\Run13_RGLum4'
Frame_Stim_Sequence, Frame_Stim_Dictionary = Stim_Frame_Align.Stim_Frame_Align(
    stim_folder)
aligned_tif_name = OS_Tools.Get_File_Name(
    r'I:\Test_Data\201023_L76_LM\1-013\Results\Aligned_Frames')
#%% Generate On-Off Map
on_id = []
コード例 #16
0
def Tremble_Calculator_From_File(data_folder,
                                 graph_type='.tif',
                                 cut_shape=(10, 5),
                                 boulder=20,
                                 move_method='former',
                                 base=[],
                                 center_method='weight'):
    '''
    Calculate align tremble from graph. This program is used to evaluate align quality.
    
    Parameters
    ----------
    data_folder : (str)
        Data folder of graphs.
    graph_type : (str),optional
        Extend name of input grahp. The default is '.tif'.
    cut_shape : (turple), optional
        Shape of fracture cut. Proper cut will . The default is (10,5).
    boulder : (int),optional
        Boulder of graph. Cut and not used in following calculation.The default is 20.        
    move_method : ('average'or'former'or'input'), optional
        Method of bais calculation. The default is 'former'. 
        'average' bais use all average; 'former' bais use fomer frame; 'input' bais need to be given.
    base : (2D_NdArray), optional
        If move_method == 'input', base should be given here. The default is [].
    center_method : ('weight' or 'binary'), optional
        Method of center find. Whether we use weighted intense.The default is 'weight'.

    Returns
    -------
    mass_center_maps(Graph)
        A plotted graph, showing movement trace of mass center.
    tremble_plots : (List)
        List of all fracture graph tremble list.
    tremble_information : (Dic)
        Dictionary of tramble informations.
        Data type of tremble_information:
            keys:frame ID
            data are lists, every element in list indicate a fracture grpah, ID in cut graph.
            list elements are turples, each turple[0] are move vector, turple[1] as move distance.
            

    '''
    all_tif_name = OS_Tools.Get_File_Name(data_folder, graph_type)
    average_graph = Graph_Tools.Average_From_File(all_tif_name)
    tremble_information = {}
    # get base of align first.
    if move_method == 'average':
        base_graph = average_graph
    elif move_method == 'input':
        base_graph = base
    elif move_method == 'former':
        base_graph = cv2.imread(all_tif_name[0],
                                -1)  # Use first frame as base.

    # cycle all graph to generate tremble plots.
    for i in range(len(all_tif_name)):
        # Process input graph, get cell
        current_graph = cv2.imread(all_tif_name[i], -1)
        processed_cell_graph = None
        #Cut Graph as described
        _, _, _, cutted_current_graph = Graph_Cutter(processed_cell_graph,
                                                     boulder, cut_shape)
        _, _, _, cutted_base = Graph_Cutter(base_graph, boulder, cut_shape)
        # Renew base if former mode.
        if move_method == 'former':
            base_graph = cv2.imread(all_tif_name[i], -1)
        # Then cycle all cutted_fracture, to calculate movement of every fracture graph.
        current_frame_move_list = []
        for j in range(len(cutted_current_graph)):
            temp_graph_part = cutted_current_graph[j]
            temp_base_part = cutted_base[j]
            temp_graph_center, _ = Graph_Tools.Graph_Center_Calculator(
                temp_graph_part, center_mode=center_method)
            temp_base_center, _ = Graph_Tools.Graph_Center_Calculator(
                temp_base_part, center_mode=center_method)
            temp_tremble_vector, temp_tremble_dist = Calculator.Vector_Calculate(
                temp_base_center, temp_graph_center)
            current_frame_move_list.append(
                (temp_tremble_vector, temp_tremble_dist))
        tremble_information[i] = current_frame_move_list

    # Then, plot mass center plots. This will show change of mass center position.
    if move_method == 'input':
        print('No Mass Center plot Generated.')
        mass_center_maps = False
    elif move_method == 'average':  # If average, use current location
        mass_center_maps = []
        for i in range(len(tremble_information[0])):  # Cycle all fracture
            fig = plt.figure()
            ax = plt.subplot()
            for j in range(len(tremble_information)):  # Cycle all frame
                current_point = tremble_information[j][i][0]
                ax.scatter(current_point[1], current_point[0], alpha=1, s=5)
            mass_center_maps.append(fig)
            plt.close()
    elif move_method == 'former':
        mass_center_maps = []
        for i in range(len(tremble_information[0])):  # Cycle all fracture
            fig = plt.figure()
            ax = plt.subplot()
            current_point = (0, 0)
            for j in range(len(tremble_information)):  # Cycle all frame
                current_point = (current_point[0] +
                                 tremble_information[j][i][0][0],
                                 current_point[1] +
                                 tremble_information[j][i][0][1])
                ax.scatter(current_point[1], current_point[0], alpha=1, s=5)
            mass_center_maps.append(fig)
            plt.close()

    # At last, plot tremble dist plots. Each fracture have a plot.
    tremble_plots = {}
    for i in range(len(tremble_information[0])):  # Cycle all fractures
        current_tremble_plot = []
        for j in range(len(tremble_information)):  # Cycle all frame
            current_dist = tremble_information[j][i][1]
            current_tremble_plot.append(current_dist)
        tremble_plots[i] = np.asarray(current_tremble_plot)
    return mass_center_maps, tremble_plots, tremble_information
コード例 #17
0
ファイル: main_file.py プロジェクト: adolescent/2P_Analysis
    r'G:\Test_Data\2P\201111_L76_LM\1-002',
    r'G:\Test_Data\2P\201111_L76_LM\1-003',
    r'G:\Test_Data\2P\201111_L76_LM\1-009'
    ]
for i in range(3):
    Cell_Find(run_list[i])
#%% Calculate spike train of all finded cells.
from My_Wheels.Spike_Train_Generator import Spike_Train_Generator
run_list = [
    r'G:\Test_Data\2P\201111_L76_LM\1-002',
    r'G:\Test_Data\2P\201111_L76_LM\1-003',
    r'G:\Test_Data\2P\201111_L76_LM\1-009'
    ]
for i in range(3):
    cell_dic = OS_Tools.Load_Variable(run_list[i]+r'\Results\Cells\Finded_Cells.cell')
    all_tif_name = OS_Tools.Get_File_Name(run_list[i]+r'\Results\Aligned_Frames')
    stim_train = OS_Tools.Load_Variable(run_list[i]+r'\Results\Stim_Frame_Align.pkl')['Original_Stim_Train']
    F_train,dF_F_train = Spike_Train_Generator(all_tif_name, cell_dic['All_Cell_Information'])
    OS_Tools.Save_Variable(run_list[i]+r'\Results\Cells', 'F_train', F_train)
    OS_Tools.Save_Variable(run_list[i]+r'\Results\Cells', 'dF_F_train', dF_F_train)
#%% Calculate subgraph one by one.
from My_Wheels.Standard_Parameters.Sub_Graph_Dics import Sub_Dic_Generator
from My_Wheels.Standard_Stim_Processor import Standard_Stim_Processor

G8_Subdic = Sub_Dic_Generator('G8+90')
Standard_Stim_Processor(r'G:\Test_Data\2P\201111_L76_LM\1-002',
                        stim_folder = r'G:\Test_Data\2P\201111_L76_LM\1-002\Results\Stim_Frame_Align.pkl',
                        sub_dic = G8_Subdic,
                        tuning_graph=False,
                        cell_method = r'G:\Test_Data\2P\201111_L76_LM\1-002\Results\Cells\Finded_Cells.cell',
                        spike_train_path=r'G:\Test_Data\2P\201111_L76_LM\1-002\Results\Cells\dF_F_train.pkl',
コード例 #18
0
Cell_Find_And_Plot(r'G:\Test_Data\2P\201211_L76_2P\1-001\Results', 'Global_Average_After_Align.tif', 'Global_Morpho',find_thres= 1.5,shape_boulder = [20,20,30,20])
#%% Then calculate the stim train of each stim series.
from My_Wheels.Stim_Frame_Align import Stim_Frame_Align
all_stim_folder = [
    r'G:\Test_Data\2P\201211_L76_2P\201211_L76_2P_stimuli\Run10_2P_G8',
    r'G:\Test_Data\2P\201211_L76_2P\201211_L76_2P_stimuli\Run12_2P_OD8_auto',
    r'G:\Test_Data\2P\201211_L76_2P\201211_L76_2P_stimuli\Run14_2P_RGLum4',
    ]
for i in range(3):
    _,current_stim_dic = Stim_Frame_Align(all_stim_folder[i])
    OS_Tools.Save_Variable(all_stim_folder[i], 'Stim_Frame_Align', current_stim_dic)
#%% Then calculate spike train of different runs.
from My_Wheels.Spike_Train_Generator import Spike_Train_Generator
#Cycle basic stim map. this maps have 
for i,index in enumerate([1,2,4]):
    current_aligned_tif_name  = OS_Tools.Get_File_Name(all_run_folder[index]+r'\Results\Aligned_Frames')
    current_stim = OS_Tools.Load_Variable(all_stim_folder[i],file_name='Stim_Frame_Align.pkl')['Original_Stim_Train']
    current_cell_info = OS_Tools.Load_Variable(all_run_folder[index]+r'\Results\Global_Morpho\Global_Morpho.cell')['All_Cell_Information']
    F_train,dF_F_train = Spike_Train_Generator(current_aligned_tif_name, current_cell_info,Base_F_type= 'nearest_0',stim_train = current_stim)
    OS_Tools.Save_Variable(all_run_folder[index]+r'\Results', 'F_train', F_train)
    OS_Tools.Save_Variable(all_run_folder[index]+r'\Results', 'dF_F_train', dF_F_train)

#%% Then calculate standard stim map.
from My_Wheels.Standard_Stim_Processor import Standard_Stim_Processor
from My_Wheels.Standard_Parameters.Sub_Graph_Dics import Sub_Dic_Generator
Standard_Stim_Processor(r'G:\Test_Data\2P\201211_L76_2P\1-010',
                        r'G:\Test_Data\2P\201211_L76_2P\1-010\Results\Stim_Frame_Align.pkl',
                        Sub_Dic_Generator('G8+90'),
                        cell_method = r'G:\Test_Data\2P\201211_L76_2P\1-010\Results\Global_Morpho\Global_Morpho.cell',
                        spike_train_path=r'G:\Test_Data\2P\201211_L76_2P\1-010\Results\dF_F_train.pkl'
                        )
コード例 #19
0
def Standard_Cell_Processor(
    animal_name,
    date,
    day_folder,
    cell_file_path,
    #average_graph_path, # not necessary.
    run_id_lists,
    location='A',  # For runs have 
    Stim_Frame_Align_name='_All_Stim_Frame_Infos.sfa',
    #Stim_Frame_Align_subfolder = r'\Results\Stim_Frame_Align.pkl',# API changed.
    align_subfolder=r'\Results\Aligned_Frames',
    response_head_extend=3,
    response_tail_extend=3,
    base_frame=[0, 1, 2],
    filter_para=(0.02, False)):
    # Folder and name initialization
    print('Just make sure average and cell find is already done.')
    cell_dic = ot.Load_Variable(cell_file_path)
    cell_info = cell_dic['All_Cell_Information']
    cell_name_prefix = animal_name + '_' + str(date) + location + '_'
    all_cell_num = len(cell_info)
    all_run_subfolders = lt.List_Annex([day_folder],
                                       lt.Run_Name_Producer_2P(run_id_lists))
    save_folder = day_folder
    all_Stim_Frame_Align = ot.Load_Variable(day_folder + r'\\' +
                                            Stim_Frame_Align_name)
    # Set cell data formats.
    all_cell_list = []
    for i in range(all_cell_num):
        current_cell_name = cell_name_prefix + ot.Bit_Filler(i, 4)
        current_cell_dic = {}
        current_cell_dic['Name'] = current_cell_name
        current_cell_dic['Cell_Info'] = cell_info[i]
        # Cycle all runs for F and dF trains.
        current_cell_dic['dF_F_train'] = {}
        current_cell_dic['F_train'] = {}
        current_cell_dic['Raw_CR_trains'] = {}
        current_cell_dic['CR_trains'] = {}
        all_cell_list.append(current_cell_dic)
    # Then cycle all runs, fill in
    for i in range(len(all_run_subfolders)):
        current_runid = 'Run' + (all_run_subfolders[i][-3:]
                                 )  # Use origin run id to avoid bugs.
        current_all_tif_name = ot.Get_File_Name(
            all_run_subfolders[i] + align_subfolder, '.tif')
        current_Stim_Frame_Align = all_Stim_Frame_Align[current_runid]
        if current_Stim_Frame_Align == None or len(
                current_Stim_Frame_Align
        ) == 302:  # meaning this run is spon or RF25.
            current_run_Fs, current_run_dF_Fs = Spike_Train_Generator(
                current_all_tif_name, cell_info, 'most_unactive', None)
        else:
            current_run_stim_train = current_Stim_Frame_Align[
                'Original_Stim_Train']
            if 0 in current_run_stim_train:  # having 0
                current_run_Fs, current_run_dF_Fs = Spike_Train_Generator(
                    current_all_tif_name,
                    cell_info,
                    Base_F_type='nearest_0',
                    stim_train=current_run_stim_train)
            else:
                current_run_Fs, current_run_dF_Fs = Spike_Train_Generator(
                    current_all_tif_name,
                    cell_info,
                    Base_F_type='before_ISI',
                    stim_train=current_run_stim_train)
        # Then put trains above into each cell files.
        for j in range(all_cell_num):
            all_cell_list[j]['dF_F_train'][current_runid] = current_run_dF_Fs[
                j]
            all_cell_list[j]['F_train'][current_runid] = current_run_Fs[j]
        # Then, we generate Condition Reaction Train for each cell and each condition.
        if current_Stim_Frame_Align == None:
            all_cell_list[j]['CR_trains'][current_runid] = None
            all_cell_list[j]['Raw_CR_trains'][current_runid] = None
        else:
            for j in range(all_cell_num):
                all_cell_list[j]['CR_trains'][current_runid], all_cell_list[j][
                    'Raw_CR_trains'][
                        current_runid] = Single_Condition_Train_Generator(
                            current_run_Fs[j], current_Stim_Frame_Align,
                            response_head_extend, response_tail_extend,
                            base_frame, filter_para)
    # Till now, all cell data of all runs is saved in 'all_cell_list'.
    # Last part, saving files. All cells in one file, dtype = dic.
    all_cell_dic = {}
    for i in range(all_cell_num):
        all_cell_dic[all_cell_list[i]['Name']] = all_cell_list[i]
    ot.Save_Variable(save_folder,
                     '_' + animal_name + '_' + date + location + '_All_Cells',
                     all_cell_dic, '.ac')
    return True
コード例 #20
0
import My_Wheels.List_Operation_Kit as List_Tools
import My_Wheels.Graph_Operation_Kit as Graph_Tools
import My_Wheels.OS_Tools_Kit as OS_Tools
import cv2
data_folder = [r'E:\Test_Data\2P\201222_L76_2P']
run_list = [
    '1-001',  # Spon
    '1-008',  # OD
    '1-010',  # G8
    '1-011',  # RGLum4
    '1-014'  # Spon After
]
all_runs = List_Tools.List_Annex(data_folder, run_list)
#%% Add 3 list for run01 to fit ROI change.
run_1 = all_runs[0]
run1_all_tif = OS_Tools.Get_File_Name(run_1)
save_path = run_1 + r'\shape_extended'
OS_Tools.mkdir(save_path)
for i in range(len(run1_all_tif)):
    current_graph = cv2.imread(run1_all_tif[i], -1)
    extended_graph = Graph_Tools.Boulder_Extend(
        current_graph, [0, 0, 0, 3])  # 3 pix on the right.
    current_graph_name = run1_all_tif[i].split('\\')[-1]
    Graph_Tools.Show_Graph(extended_graph,
                           current_graph_name,
                           save_path,
                           show_time=0)
#%% Then align Run01_Spon.
from My_Wheels.Translation_Align_Function import Translation_Alignment
Translation_Alignment([all_runs[0] + r'\shape_extended'],
                      graph_shape=(325, 324))
コード例 #21
0
def Video_From_File(data_folder,
                    plot_range=(0, 9999),
                    graph_size=(472, 472),
                    file_type='.tif',
                    fps=15,
                    gain=20,
                    LP_Gaussian=([5, 5], 1.5),
                    frame_annotate=True,
                    cut_boulder=[20, 20, 20, 20]):
    '''
    Write all files in a folder as a video.

    Parameters
    ----------
    data_folder : (std)
        Frame folder. All frame in this folder will be write into video. Dtype shall be u2 or there will be a problem.
    graph_size : (2-element-turple), optional
        Frame size AFTER cut. The default is (472,472).
    file_type : (str), optional
        Data type of graph file. The default is '.tif'.
    fps : (int), optional
        Frame per second. The default is 15.
    gain : (int), optional
        Show gain. The default is 20.
    LP_Gaussian : (turple), optional
        LP Gaussian Filter parameter. Only do low pass. The default is ([5,5],1.5).
    frame_annotate : TYPE, optional
        Whether we annotate frame number on it. The default is True.
    cut_boulder : TYPE, optional
        Boulder cut of graphs, UDLR. The default is [20,20,20,20].


    Returns
    -------
    bool
        True if function processed.

    '''

    all_tif_name = OS_Tools.Get_File_Name(path=data_folder,
                                          file_type=file_type)
    start_frame = plot_range[0]
    end_frame = min(plot_range[1], len(all_tif_name))
    all_tif_name = all_tif_name[start_frame:end_frame]
    graph_num = len(all_tif_name)
    video_writer = cv2.VideoWriter(data_folder + r'\\Video.mp4',
                                   cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'),
                                   fps, graph_size, 0)
    #video_writer = cv2.VideoWriter(data_folder+r'\\Video.avi',-1,fps,graph_size,0)
    for i in range(graph_num):
        raw_graph = cv2.imread(all_tif_name[i], -1).astype('f8')
        # Cut graph boulder.
        raw_graph = Graph_Tools.Graph_Cut(raw_graph, cut_boulder)
        # Do gain then
        gained_graph = np.clip(raw_graph.astype('f8') * gain / 256, 0,
                               255).astype('u1')
        # Then do filter, then
        if LP_Gaussian != False:
            u1_writable_graph = Filters.Filter_2D(gained_graph, LP_Gaussian,
                                                  False)
        else:
            u1_writable_graph = gained_graph
        if frame_annotate == True:
            cv2.putText(u1_writable_graph, 'Stim ID = ' + str(i), (250, 30),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255), 1)
        video_writer.write(u1_writable_graph)
    del video_writer
    return True
コード例 #22
0
def Affine_Aligner_Gaussian(data_folder,
                            base_graph,
                            window_size=1,
                            max_point=50000,
                            good_match_prop=0.3,
                            dist_lim=120,
                            match_checker=1,
                            sector_num=4,
                            write_file=False,
                            save_folder='Default'):
    if save_folder == 'Default':
        save_folder = data_folder + r'\Results'
    aligned_tif_folder = save_folder + r'\Affined_Frames'
    OS_Tools.mkdir(save_folder)
    OS_Tools.mkdir(aligned_tif_folder)

    all_tif_name = OS_Tools.Get_File_Name(data_folder)
    graph_num = len(all_tif_name)
    graph_shape = cv2.imread(all_tif_name[0], -1).shape
    height, width = graph_shape
    origin_tif_matrix = np.zeros(shape=graph_shape + (graph_num, ), dtype='u2')
    # Read in all tif name.
    for i in range(graph_num):
        origin_tif_matrix[:, :, i] = cv2.imread(all_tif_name[i], -1)
    # Then get window slipped average graph.
    if window_size == 1:
        slipped_average_matrix = origin_tif_matrix
    else:
        slipped_average_matrix = Filters.Window_Average(
            origin_tif_matrix, window_size=window_size)
    # Use slip average to get deformation parameters.
    aligned_tif_matrix = np.zeros(shape=origin_tif_matrix.shape, dtype='u2')
    h_dic = {}  # Deformation parameters
    for i in range(graph_num):
        target = slipped_average_matrix[:, :, i]
        _, current_h = Affine_Core_Point_Equal(target,
                                               base_graph,
                                               max_point=max_point,
                                               good_match_prop=good_match_prop,
                                               sector_num=sector_num,
                                               dist_lim=dist_lim,
                                               match_checker=match_checker)
        h_dic[i] = current_h
        current_deformed_graph = cv2.warpPerspective(
            origin_tif_matrix[:, :, i], current_h, (width, height))
        Graph_Tools.Show_Graph(current_deformed_graph,
                               all_tif_name[i].split('\\')[-1],
                               aligned_tif_folder,
                               show_time=0,
                               graph_formation='')
        aligned_tif_matrix[:, :, i] = current_deformed_graph
    OS_Tools.Save_Variable(save_folder, 'Deform_H', h_dic)
    if write_file == True:
        OS_Tools.Save_Variable(save_folder, 'Affine_Aligned_Graphs',
                               aligned_tif_matrix)
    # At last, generate average graphs
    graph_before_align = origin_tif_matrix.mean(axis=2).astype('u2')
    graph_after_align = aligned_tif_matrix.mean(axis=2).astype('u2')
    graph_before_align = Graph_Tools.Clip_And_Normalize(graph_before_align,
                                                        clip_std=5)
    graph_after_align = Graph_Tools.Clip_And_Normalize(graph_after_align,
                                                       clip_std=5)
    Graph_Tools.Show_Graph(graph_before_align, 'Graph_Before_Affine',
                           save_folder)
    Graph_Tools.Show_Graph(graph_after_align, 'Graph_After_Affine',
                           save_folder)
    return True
コード例 #23
0
def Standard_Stim_Processor(data_folder,
                            stim_folder,
                            sub_dic,
                            alinged_sub_folder=r'\Results\Aligned_Frames',
                            show_clip=3,
                            tuning_graph=False,
                            cell_method='Default',
                            filter_method='Gaussian',
                            LP_Para=((5, 5), 1.5),
                            HP_Para=False,
                            spike_train_path='Default',
                            spike_train_filter_para=(False, False),
                            spike_train_filter_method=False):
    '''
    Generate subtraction graph, cell graph and tuning graphs if requred.

    Parameters
    ----------
    data_folder : (str)
        Run folder.
    stim_folder : (str)
        Stim file folder or Frame_Stim_Align File folder. Pre align is advised.
    sub_dic : (Dic)
        Subtraction dicionary. This can be generated from My_Wheels.Standard_Parameters
    show_clip : (float), optional
        Clip of graph show. The default is 3.
    tuning_graph : (bool), optional
        Whether we generate tuning graph of each cells. The default is False.
    cell_method : (str), optional
        Cell find method. You can input cell file path here. The default is 'Default'.
    filter_method : (str), optional
        False to skip filter. Kernel function of graph filtering. The default is 'Gaussian'.
    LP_Para : (turple), optional
        False to skip. Low pass filter of graph. The default is ((5,5),1.5).
    HP_Para : (turple), optional
        False to skip. High pass filter of graph. Big HP can be very slow!. The default is False.
    spike_train_path : (str), optional
        Path of spike train.'Default' will generate spike train directly. The default is 'Default'.
    spike_train_filter_para : (turple), optional
        Signal filter bandpass propotion of spike train. Please be sure if you need this. The default is (False,False).
    spike_train_filter_method : (str), optional
        False to skip. Method of signal filtering. The default is False.

    Returns
    -------
    None.

    '''
    # Path Cycle.
    from Cell_Find_From_Graph import On_Off_Cell_Finder
    work_folder = data_folder + r'\Results'
    OS_Tools.mkdir(work_folder)
    aligned_frame_folder = data_folder + alinged_sub_folder
    OS_Tools.mkdir(aligned_frame_folder)

    # Step1, align graphs. If already aligned, just read
    if not os.listdir(aligned_frame_folder):  # if this is a new folder
        print('Aligned data not found. Aligning here..')
        Translation_Alignment([data_folder])
    aligned_all_tif_name = np.array(
        OS_Tools.Get_File_Name(aligned_frame_folder)
    )  # Use numpy array, this is easier for slice.

    # Step2, get stim fram align matrix. If already aligned, just read in aligned dictionary.
    file_detector = len(stim_folder.split('.'))
    if file_detector == 1:  # Which means input is a folder
        print('Frame Stim not Aligned, aligning here...')
        from My_Wheels.Stim_Frame_Align import Stim_Frame_Align
        _, Frame_Stim_Dic = Stim_Frame_Align(stim_folder)
    else:  # Input is a file
        Frame_Stim_Dic = OS_Tools.Load_Variable(stim_folder)

    # Step3, get cell information
    if cell_method == 'Default':  # meaning we will use On-Off graph to find cell.
        print('Cell information not found. Finding here..')
        cell_dic = On_Off_Cell_Finder(aligned_all_tif_name,
                                      Frame_Stim_Dic,
                                      filter_method=filter_method,
                                      LP_Para=LP_Para,
                                      HP_Para=HP_Para)
    else:
        cell_dic = OS_Tools.Load_Variable(cell_method)

    # Step4, calculate spike_train.
    if spike_train_path != 'Default':
        dF_F_train = OS_Tools.Load_Variable(spike_train_path)
    else:  # meaning we need to calculate spike train from the very begining.

        _, dF_F_train = Spike_Train_Generator(
            aligned_all_tif_name,
            cell_dic['All_Cell_Information'],
            Base_F_type='nearest_0',
            stim_train=Frame_Stim_Dic['Original_Stim_Train'],
            LP_Para=LP_Para,
            HP_Para=HP_Para,
            filter_method=filter_method)
    #Step5, filt spike trains.
    if spike_train_filter_method != False:  # Meaning we need to do train filter.
        for i in range(len(dF_F_train)):
            dF_F_train[i] = My_Filter.Signal_Filter(dF_F_train,
                                                    spike_train_filter_method,
                                                    spike_train_filter_para)
    # Step6, get each frame graph and cell graph.
    all_graph_keys = list(sub_dic.keys())
    for i in range(len(sub_dic)):
        output_folder = work_folder + r'\Subtraction_Graphs'
        current_key = all_graph_keys[i]
        current_sub_list = sub_dic[current_key]
        A_conds = current_sub_list[0]  # condition of A graph
        B_conds = current_sub_list[1]  # condition of B graph
        A_IDs = []
        B_IDs = []
        for i in range(len(A_conds)):
            A_IDs.extend(Frame_Stim_Dic[A_conds[i]])
        for i in range(len(B_conds)):
            B_IDs.extend(Frame_Stim_Dic[B_conds[i]])
        # Get frame maps.
        current_sub_graph, current_t_graph, current_F_info = Single_Subgraph_Generator(
            aligned_all_tif_name, A_IDs, B_IDs, filter_method, LP_Para,
            HP_Para)

        current_sub_graph = Graph_Tools.Clip_And_Normalize(
            current_sub_graph, show_clip)
        Graph_Tools.Show_Graph(current_sub_graph, current_key + '_SubGraph',
                               output_folder)
        current_t_graph = Graph_Tools.Clip_And_Normalize(
            current_t_graph, show_clip)
        Graph_Tools.Show_Graph(current_t_graph, current_key + '_T_Graph',
                               output_folder)
        OS_Tools.Save_Variable(output_folder,
                               current_key + '_Sub_Info',
                               current_F_info,
                               extend_name='.info')
        # Get cell maps
        cell_info = cell_dic['All_Cell_Information']
        current_cell_sub_graph, current_cell_t_graph, current_cell_info = Single_Cellgraph_Generator(
            dF_F_train, cell_info, show_clip, A_IDs, B_IDs)
        Graph_Tools.Show_Graph(current_cell_sub_graph,
                               current_key + '_Cell_SubGraph', output_folder)
        Graph_Tools.Show_Graph(current_cell_t_graph,
                               current_key + '_Cell_T_Graph', output_folder)
        OS_Tools.Save_Variable(output_folder,
                               current_key + '_Cell_Info',
                               current_cell_info,
                               extend_name='.info')
    #Step7, calculate cell tuning graph.
    if tuning_graph == True:
        print('Not finished yet.')
コード例 #24
0
ファイル: RFOnline.py プロジェクト: adolescent/2P_Analysis
import My_Wheels.Graph_Operation_Kit as Graph_Tools
import numpy as np
import cv2
#%% First, read in config file. 
# All Read in shall be in this part to avoid bugs = =
f = open('Config.punch','r')
config_info = f.readlines()
del f
frame_folder = config_info[3][:-1]# Remove '\n'
stim_folder = config_info[6][:-1]# Remove '\n'
cap_freq = float(config_info[9])
frame_thres = float(config_info[12])
#%% Second do graph align.
save_folder = frame_folder+r'\Results'
aligned_tif_folder = save_folder+r'\Aligned_Frames'
all_tif_name = OS_Tools.Get_File_Name(frame_folder)
graph_size = np.shape(cv2.imread(all_tif_name[0],-1))
Translation_Alignment([frame_folder],align_range = 10,align_boulder = 40,big_memory_mode=True,graph_shape = graph_size)
aligned_all_tif_name = np.array(OS_Tools.Get_File_Name(aligned_tif_folder))
#%% Third, Stim Frame Align
jmp_step = int(5000//cap_freq)
_,Frame_Stim_Dic = Stim_Frame_Align(stim_folder,frame_thres = frame_thres,jmp_step = jmp_step)
#%% Forth, generate Morpho graph and find cell.
cell_Dic = Cell_Find_And_Plot(save_folder, 'Run_Average_After_Align.tif', 'Morpho_Cell')
cell_mask = (cell_Dic['Cell_Graph'][:,:,0])>0
#%% Fifth, calculate RF reaction.
RF_Data = np.zeros(shape = (5,5,2),dtype = 'f8')# use 5*5 matrix, set 0 are frames, set 1 are cells
loc_ids = np.array([1,26,51,76,101,126,151,176,201,226,251,276])
for i in range(5):# i as vector1
    for j in range(5):# j as vector2
        start_id = i*5+j
コード例 #25
0
                           Frame_Stim_Dic)
#%%Cell Find from Run01 Morphology graph.
from My_Wheels.Cell_Find_From_Graph import Cell_Find_And_Plot
Cell_Find_And_Plot(r'G:\Test_Data\2P\201121_L76_LM\1-001\Results',
                   'Run_Average_After_Align.tif',
                   'Morpho',
                   find_thres=1.5)
#%% Calculate Spike Train of Run01 Morpho cell into each run.
from My_Wheels.Spike_Train_Generator import Spike_Train_Generator
all_run_folder = [
    r'G:\Test_Data\2P\201121_L76_LM\1-002',
    r'G:\Test_Data\2P\201121_L76_LM\1-003',
    r'G:\Test_Data\2P\201121_L76_LM\1-004'
]
for i in range(3):
    all_tif_name = OS_Tools.Get_File_Name(all_run_folder[i] +
                                          r'\Results\Aligned_Frames')
    cell_information = OS_Tools.Load_Variable(
        all_run_folder[i] +
        r'\Results\Morpho\Morpho.cell')['All_Cell_Information']
    stim_train = OS_Tools.Load_Variable(
        all_run_folder[i] +
        r'\Results\Stim_Fram_Align.pkl')['Original_Stim_Train']
    F_train, dF_F_train = Spike_Train_Generator(all_tif_name,
                                                cell_information,
                                                Base_F_type='nearest_0',
                                                stim_train=stim_train)
    OS_Tools.Save_Variable(all_run_folder[i] + r'\Results\Morpho', 'F_train',
                           F_train)
    OS_Tools.Save_Variable(all_run_folder[i] + r'\Results\Morpho',
                           'dF_F_train', dF_F_train)
#%% Then Get graph of each run.