示例#1
0
    def __init__(self,day_folder,
                 runname,
                 average_graph = None,
                 series_mode = 'F',
                 filter_para = (0.02,False)
                 
                 ):
        '''
        Some basic information for calculation
        
        Parameters
        ----------
        day_folder : (str)
            Day folder of runs. All variables in this folder.
        runname : (str)
            Run name. Format as 'Run001'.
        average_graph : (2D Array), optional
            Average graph in a day. For cell annotate. The default is None.
        series_mode : ('F' or 'dF'), optional
            Which series to use, raw F series or dF series. F is recommended. The default is 'F'.
        filter_para : (2 element truple), optional
            HP and LP filter para. Detail in Filters. The default is (0.02,False),~0.013Hz HP.

        '''
        # Read in variables
        print('Make sure cell data and SFA data in day folder.')
        cell_data_path = ot.Get_File_Name(day_folder,'.ac')[0]
        all_stim_dic_path = ot.Get_File_Name(day_folder,'.sfa')[0]
        self.all_cell_dic = ot.Load_Variable(cell_data_path)
        self.average_graph = average_graph
        self.all_stim_dic = ot.Load_Variable(all_stim_dic_path)
        self.stim_frame_align = self.all_stim_dic[runname]
        # Then get each cell spike train.
        cell_num = len(self.all_cell_dic)
        self.all_cells_train = {}
        self.all_cell_names = list(self.all_cell_dic.keys())
        for i in range(cell_num):
            current_name = self.all_cell_names[i]
            if series_mode == 'F':
                cell_series = self.all_cell_dic[current_name]['F_train'][runname]
            elif series_mode == 'dF':
                cell_series = self.all_cell_dic[current_name]['dF_F_train'][runname]
            else:
                raise IOError('Invalid input mode, please check.')
            cell_series = Filters.Signal_Filter(cell_series,filter_para = filter_para)# Then filter cell train
            self.all_cells_train[current_name] = cell_series
def Average_From_File(Name_List,
                      LP_Para=False,
                      HP_Para=False,
                      filter_method=False):
    '''
    Average graph from file. filter is allowed.

    Parameters
    ----------
    Name_List : (list)
        File name of all input graph.
    LP_Para : (turple), optional
        Use False to skip. Low pass parameter. The default is False.
    HP_Para : (turple), optional
        Use False to skip. High pass parameter. The default is False.
    filter_method : (str), optional
        Use False to skip. Filter method. The default is False.

    Returns
    -------
    averaged_graph : TYPE
        DESCRIPTION.

    '''
    graph_num = len(Name_List)
    temple_graph = cv2.imread(Name_List[0], -1)
    origin_type = temple_graph.dtype
    averaged_graph = np.zeros(shape=temple_graph.shape, dtype='f8')
    for i in range(graph_num):
        current_graph = cv2.imread(Name_List[i], -1).astype(
            'f8')  # Read in graph as origin depth, and change into f8
        if filter_method != False:  # Meaning we need to filter graph.
            current_graph = Filter.Filter_2D(current_graph, LP_Para, HP_Para,
                                             filter_method)

        averaged_graph += current_graph / graph_num
    averaged_graph = averaged_graph.astype(origin_type)
    return averaged_graph
def Single_Condition_Train_Generator(F_train,
                                     Stim_Frame_Align,
                                     response_head_extend=3,
                                     response_tail_extend=3,
                                     base_frame=[0, 1, 2],
                                     filter_para=(0.02, False)):
    '''
    This function will produce single cell & single run condition matrixs.

    Parameters
    ----------
    F_train : (Array)
        Originl F value train.
    Stim_Frame_Align : (Dic)
        Stim Frame Align dics. All condition(except-1) will be calculated later.
    response_head_extend : (int)
        Number of frame before stim onset.
    response_tail_extend : (int)
        Number of frame after stim onset.
    filter_para : (2-element-turple), optional
        This is for filter. Check Filters for detail.The default is (0.02,False),~0.013Hz HP.

    Returns
    -------
    sc_dic : (dic)
        Matrix of all condition values.
    raw_sc_dic : (dic)
        DESCRIPTION.

    '''
    condition_frames = SDT.Condition_Response_Frames(Stim_Frame_Align,
                                                     response_head_extend,
                                                     response_tail_extend)
    # extend and filt F_train to avoid error.
    F_train = np.append(
        F_train,
        F_train[0:response_tail_extend])  # Extend head to tail avoiding error.
    F_train_filted = My_Filter.Signal_Filter(F_train, filter_para=filter_para)
    sc_dic = {}
    raw_sc_dic = {}
    # get each condition have same length.
    condition_length = 65535
    all_conditions = list(condition_frames.keys())
    for i in range(len(all_conditions)):  # get proper length
        current_cond_length = len(condition_frames[all_conditions[i]][0])
        if current_cond_length < condition_length:
            condition_length = current_cond_length
    for i in range(len(all_conditions)):  # cut too long condition.
        current_condition = condition_frames[all_conditions[i]]
        if len(current_condition[0]
               ) > condition_length:  # meaning too long conds.
            for j in range(len(current_condition)):
                current_condition[j] = current_condition[j][:condition_length]
    # Get raw_sc dic & processed sc dic
    for i in range(len(all_conditions)):
        c_condition = all_conditions[i]
        c_frame_lists = condition_frames[c_condition]
        c_F_matrix = np.zeros(shape=(len(c_frame_lists),
                                     len(c_frame_lists[0])),
                              dtype='f8')
        c_raw_F_matrix = np.zeros(shape=(len(c_frame_lists),
                                         len(c_frame_lists[0])),
                                  dtype='f8')
        for j in range(len(c_frame_lists)):
            cs_cond = c_frame_lists[j]
            c_raw_F_matrix[j, :] = F_train_filted[cs_cond]
            c_F_base = F_train_filted[np.array(cs_cond)[base_frame]].mean()
            c_F_matrix[j, :] = np.nan_to_num(
                ((F_train_filted[cs_cond] - c_F_base) / c_F_base))
        raw_sc_dic[c_condition] = c_raw_F_matrix
        # filter F matrix, if a condition have 1/3 frame over 3.5std, ignore this run.
        h_thres = c_F_matrix.mean(0) + 3.5 * c_F_matrix.std(0)
        l_thres = c_F_matrix.mean(0) - 3.5 * c_F_matrix.std(0)
        for k in range(c_F_matrix.shape[0] - 1, -1, -1):
            check_series = c_F_matrix[k, :]
            if ((check_series > h_thres).sum() +
                (check_series < l_thres).sum()) > (c_F_matrix.shape[1] / 3):
                c_F_matrix = np.delete(c_F_matrix, k, axis=0)
            sc_dic[c_condition] = c_F_matrix
    return sc_dic, raw_sc_dic
def Spike_Train_Generator(all_tif_name,
                          cell_information,
                          Base_F_type='most_unactive',
                          stim_train=None,
                          ignore_ISI_frame=1,
                          unactive_prop=0.1,
                          LP_Para=False,
                          HP_Para=False,
                          filter_method=False):
    """
    
    Generate Spike Train from graphs. Multiple find method provided.
    Filter here indicating 2D spacial filter. No time course filter provided here.
    
    Parameters
    ----------
    all_tif_name : (list)
        List of all tif graph.
    cell_information : (list)
        Skimage generated cell information lists.
    Base_F_type : ('global','most_unactive','last_ISI','begining_ISI','all_ISI','nearest_0','all_0'), optional
        Base F find method. Describtion as below:
            'global' : Use all frame average.
            'most_unactive': Use most lazy frames of every cell.
            'before_ISI': Use the very ISI before stim onset as base. 
            'begining_ISI': Use ISI before stim onset as base.
            'all_ISI': Use average of all ISI as base. Each ISI will be cut based on ignore_ISI_frame.
            'nearest_0': Use nearest stim id 0 as base.
            'all_0': Use average of all id 0 data as base.
        The default is 'global'.
    stim_train : (list), optional
        Stim id train. If Base type include stim information, this must be given. The default is None.
    ignore_ISI_frame : TYPE, optional
        For mode 'last_ISI'/'all_ISI'/'begining_ISI'. How much ISI fram will be ingored. The default is 1.
    unactive_prop : (float), optional
        For mode 'most_unactive'. Propotion of most unactive frame used. The default is 0.1.

    Returns
    -------
    dF_F_trains : (Dictionary)
        Spike train of every cell. Note there is only spike train, submap will be processed later.
    F_value_Dictionary : (Dictionary)
        Origional F value dictionary.
    
    """
    # Initialization
    Cell_Num = len(cell_information)
    Frame_Num = len(all_tif_name)
    F_value_Dictionary = {}
    height, width = np.shape(cv2.imread(all_tif_name[0], -1))
    all_graph_matrix = np.zeros(shape=(height, width, Frame_Num), dtype='u2')
    # Step 1, read in all graphs. Do filter is required.
    for i in range(Frame_Num):
        current_graph = cv2.imread(all_tif_name[i], -1)
        if filter_method != False:  # Meaning we need filter here.
            current_graph = My_Filter.Filter_2D(current_graph, LP_Para,
                                                HP_Para, filter_method)
        all_graph_matrix[:, :, i] = current_graph

    # Step 2, generate origin F value list first.
    for i in range(Cell_Num):  # cycle cell
        cell_location = cell_information[i].coords
        cell_area = len(cell_location)
        current_cell_train = all_graph_matrix[cell_location[:, 0],
                                              cell_location[:,
                                                            1], :].astype('f8')
        current_cell_F_train = np.sum(current_cell_train, axis=0) / cell_area
        F_value_Dictionary[i] = current_cell_F_train
    del all_graph_matrix
    # Step3, after getting F Dictionary, it's time to calculate dF/F matrix.
    dF_F_trains = {}
    all_keys = list(F_value_Dictionary.keys())

    if Base_F_type == 'global':
        for i in range(len(all_keys)):
            current_cell_F_train = F_value_Dictionary[all_keys[i]]
            base_F = current_cell_F_train.mean()
            current_spike_train = np.nan_to_num(
                (current_cell_F_train - base_F) / base_F)
            dF_F_trains[all_keys[i]] = current_spike_train

    elif Base_F_type == 'most_unactive':
        for i in range(len(all_keys)):
            current_cell_F_train = F_value_Dictionary[all_keys[i]]
            # Base is avr. of most unactive frames.
            sorted_list = sorted(current_cell_F_train)  # Use this to get mean.
            unactive_frame_num = round(len(sorted_list) * unactive_prop)
            sorted_list = sorted_list[:unactive_frame_num]
            base_F = np.mean(sorted_list)
            current_spike_train = np.nan_to_num(
                (current_cell_F_train - base_F) / base_F)
            dF_F_trains[all_keys[i]] = current_spike_train

    elif Base_F_type == 'before_ISI':  # Use ISI Before stim onset as base.
        if stim_train == None:
            raise IOError('Please input stim train!')
        stim_train = np.asarray(stim_train)
        #ignore_ISI_frame = 1
        all_keys = list(F_value_Dictionary.keys())
        cutted_stim_train = list(
            mit.split_when(stim_train, lambda x, y: (x - y) > 0))
        for i in range(len(all_keys)):
            current_cell_train = F_value_Dictionary[all_keys[i]]
            frame_counter = 0
            current_cell_dF_train = []
            for j in range(len(cutted_stim_train)):
                current_stim_train = np.asarray(cutted_stim_train[j])
                current_F_train = np.asarray(current_cell_train[frame_counter:(
                    frame_counter + len(current_stim_train))])
                null_id = np.where(current_stim_train == -1)[0]
                if len(null_id) > 1:
                    null_id = null_id[ignore_ISI_frame:]
                else:
                    warnings.warn("ISI frame less than 2, use all ISIs",
                                  UserWarning)
                current_base = current_F_train[null_id].mean()
                current_dF_train = np.nan_to_num(
                    (current_F_train - current_base) / current_base)
                current_cell_dF_train.extend(current_dF_train)
                # Then add frame counter at last.
                frame_counter = frame_counter + len(cutted_stim_train[j])
            dF_F_trains[all_keys[i]] = np.asarray(current_cell_dF_train)

    elif Base_F_type == 'begining_ISI':  # Use First ISI as global base.
        if stim_train == None:
            raise IOError('Please input stim train!')
        first_stim_id = np.where(np.asarray(stim_train) > 0)[0][0]
        all_keys = list(F_value_Dictionary.keys())
        for i in range(len(all_keys)):
            current_F_series = F_value_Dictionary[all_keys[i]]
            base_F_series = current_F_series[ignore_ISI_frame:first_stim_id]
            base_F = base_F_series.mean()
            current_spike_train = np.nan_to_num(
                (current_F_series - base_F) / base_F)
            dF_F_trains[all_keys[i]] = current_spike_train

    elif Base_F_type == 'all_ISI':
        if stim_train == None:
            raise IOError('Please input stim train!')
        stim_train = np.asarray(stim_train)
        all_ISI_frame_loc = np.where(stim_train == -1)[0]
        cutted_ISI_frame_loc = list(
            mit.split_when(all_ISI_frame_loc, lambda x, y: (y - x) > 1))
        used_ISI_id = []
        for i in range(len(cutted_ISI_frame_loc)):
            used_ISI_id.extend(cutted_ISI_frame_loc[i][ignore_ISI_frame:])
        all_keys = list(F_value_Dictionary.keys())
        for i in range(len(all_keys)):
            current_cell_F_train = F_value_Dictionary[all_keys[i]]
            current_base_F = current_cell_F_train[used_ISI_id]
            base_F = current_base_F.mean()
            current_dF_train = np.nan_to_num(
                (current_cell_F_train - base_F) / base_F)
            dF_F_trains[all_keys[i]] = current_dF_train

    elif Base_F_type == 'nearest_0':
        stim_train = np.asarray(stim_train)
        blank_location = np.where(stim_train == 0)[0]
        cutted_blank_location = list(
            mit.split_when(blank_location, lambda x, y: (y - x) > 1))
        all_blank_start_frame = []  # This is the start frame of every blank.
        for i in range(len(cutted_blank_location)):
            all_blank_start_frame.append(cutted_blank_location[i][0])
        #%% Get base_F_of every blank.
        all_keys = list(F_value_Dictionary.keys())
        for i in range(len(all_keys)):
            current_key = all_keys[i]
            current_cell_F_train = F_value_Dictionary[current_key]
            # First, get base F of every blank.
            all_blank_base_F = []  # base F of every blank.
            for j in range(len(cutted_blank_location)):
                all_blank_base_F.append(
                    current_cell_F_train[cutted_blank_location[j]].mean())
            # Then, generate dF train.
            current_dF_train = []
            for j in range(len(current_cell_F_train)):
                current_F = current_cell_F_train[j]
                _, current_base_loc = List_Tools.Find_Nearest(
                    all_blank_start_frame, j)
                current_base = all_blank_base_F[current_base_loc]
                current_dF_F = np.nan_to_num(
                    (current_F - current_base) / current_base)
                current_dF_train.append(current_dF_F)
            dF_F_trains[all_keys[i]] = np.asarray(current_dF_train)

    elif Base_F_type == 'all_0':
        stim_train = np.asarray(stim_train)
        all_blank_frame_id = np.where(stim_train == 0)[0]
        all_keys = list(F_value_Dictionary.keys())
        for i in range(len(all_keys)):
            current_cell_F_train = F_value_Dictionary[all_keys[i]]
            current_base = current_cell_F_train[all_blank_frame_id].mean()
            current_dF_train = np.nan_to_num(
                (current_cell_F_train - current_base) / current_base)
            dF_F_trains[all_keys[i]] = current_dF_train
    else:
        raise IOError('Not finished functions.')

    return F_value_Dictionary, dF_F_trains
示例#5
0
def Video_From_File(data_folder,
                    plot_range=(0, 9999),
                    graph_size=(472, 472),
                    file_type='.tif',
                    fps=15,
                    gain=20,
                    LP_Gaussian=([5, 5], 1.5),
                    frame_annotate=True,
                    cut_boulder=[20, 20, 20, 20]):
    '''
    Write all files in a folder as a video.

    Parameters
    ----------
    data_folder : (std)
        Frame folder. All frame in this folder will be write into video. Dtype shall be u2 or there will be a problem.
    graph_size : (2-element-turple), optional
        Frame size AFTER cut. The default is (472,472).
    file_type : (str), optional
        Data type of graph file. The default is '.tif'.
    fps : (int), optional
        Frame per second. The default is 15.
    gain : (int), optional
        Show gain. The default is 20.
    LP_Gaussian : (turple), optional
        LP Gaussian Filter parameter. Only do low pass. The default is ([5,5],1.5).
    frame_annotate : TYPE, optional
        Whether we annotate frame number on it. The default is True.
    cut_boulder : TYPE, optional
        Boulder cut of graphs, UDLR. The default is [20,20,20,20].


    Returns
    -------
    bool
        True if function processed.

    '''

    all_tif_name = OS_Tools.Get_File_Name(path=data_folder,
                                          file_type=file_type)
    start_frame = plot_range[0]
    end_frame = min(plot_range[1], len(all_tif_name))
    all_tif_name = all_tif_name[start_frame:end_frame]
    graph_num = len(all_tif_name)
    video_writer = cv2.VideoWriter(data_folder + r'\\Video.mp4',
                                   cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'),
                                   fps, graph_size, 0)
    #video_writer = cv2.VideoWriter(data_folder+r'\\Video.avi',-1,fps,graph_size,0)
    for i in range(graph_num):
        raw_graph = cv2.imread(all_tif_name[i], -1).astype('f8')
        # Cut graph boulder.
        raw_graph = Graph_Tools.Graph_Cut(raw_graph, cut_boulder)
        # Do gain then
        gained_graph = np.clip(raw_graph.astype('f8') * gain / 256, 0,
                               255).astype('u1')
        # Then do filter, then
        if LP_Gaussian != False:
            u1_writable_graph = Filters.Filter_2D(gained_graph, LP_Gaussian,
                                                  False)
        else:
            u1_writable_graph = gained_graph
        if frame_annotate == True:
            cv2.putText(u1_writable_graph, 'Stim ID = ' + str(i), (250, 30),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255), 1)
        video_writer.write(u1_writable_graph)
    del video_writer
    return True
def Standard_Stim_Processor(data_folder,
                            stim_folder,
                            sub_dic,
                            alinged_sub_folder=r'\Results\Aligned_Frames',
                            show_clip=3,
                            tuning_graph=False,
                            cell_method='Default',
                            filter_method='Gaussian',
                            LP_Para=((5, 5), 1.5),
                            HP_Para=False,
                            spike_train_path='Default',
                            spike_train_filter_para=(False, False),
                            spike_train_filter_method=False):
    '''
    Generate subtraction graph, cell graph and tuning graphs if requred.

    Parameters
    ----------
    data_folder : (str)
        Run folder.
    stim_folder : (str)
        Stim file folder or Frame_Stim_Align File folder. Pre align is advised.
    sub_dic : (Dic)
        Subtraction dicionary. This can be generated from My_Wheels.Standard_Parameters
    show_clip : (float), optional
        Clip of graph show. The default is 3.
    tuning_graph : (bool), optional
        Whether we generate tuning graph of each cells. The default is False.
    cell_method : (str), optional
        Cell find method. You can input cell file path here. The default is 'Default'.
    filter_method : (str), optional
        False to skip filter. Kernel function of graph filtering. The default is 'Gaussian'.
    LP_Para : (turple), optional
        False to skip. Low pass filter of graph. The default is ((5,5),1.5).
    HP_Para : (turple), optional
        False to skip. High pass filter of graph. Big HP can be very slow!. The default is False.
    spike_train_path : (str), optional
        Path of spike train.'Default' will generate spike train directly. The default is 'Default'.
    spike_train_filter_para : (turple), optional
        Signal filter bandpass propotion of spike train. Please be sure if you need this. The default is (False,False).
    spike_train_filter_method : (str), optional
        False to skip. Method of signal filtering. The default is False.

    Returns
    -------
    None.

    '''
    # Path Cycle.
    from Cell_Find_From_Graph import On_Off_Cell_Finder
    work_folder = data_folder + r'\Results'
    OS_Tools.mkdir(work_folder)
    aligned_frame_folder = data_folder + alinged_sub_folder
    OS_Tools.mkdir(aligned_frame_folder)

    # Step1, align graphs. If already aligned, just read
    if not os.listdir(aligned_frame_folder):  # if this is a new folder
        print('Aligned data not found. Aligning here..')
        Translation_Alignment([data_folder])
    aligned_all_tif_name = np.array(
        OS_Tools.Get_File_Name(aligned_frame_folder)
    )  # Use numpy array, this is easier for slice.

    # Step2, get stim fram align matrix. If already aligned, just read in aligned dictionary.
    file_detector = len(stim_folder.split('.'))
    if file_detector == 1:  # Which means input is a folder
        print('Frame Stim not Aligned, aligning here...')
        from My_Wheels.Stim_Frame_Align import Stim_Frame_Align
        _, Frame_Stim_Dic = Stim_Frame_Align(stim_folder)
    else:  # Input is a file
        Frame_Stim_Dic = OS_Tools.Load_Variable(stim_folder)

    # Step3, get cell information
    if cell_method == 'Default':  # meaning we will use On-Off graph to find cell.
        print('Cell information not found. Finding here..')
        cell_dic = On_Off_Cell_Finder(aligned_all_tif_name,
                                      Frame_Stim_Dic,
                                      filter_method=filter_method,
                                      LP_Para=LP_Para,
                                      HP_Para=HP_Para)
    else:
        cell_dic = OS_Tools.Load_Variable(cell_method)

    # Step4, calculate spike_train.
    if spike_train_path != 'Default':
        dF_F_train = OS_Tools.Load_Variable(spike_train_path)
    else:  # meaning we need to calculate spike train from the very begining.

        _, dF_F_train = Spike_Train_Generator(
            aligned_all_tif_name,
            cell_dic['All_Cell_Information'],
            Base_F_type='nearest_0',
            stim_train=Frame_Stim_Dic['Original_Stim_Train'],
            LP_Para=LP_Para,
            HP_Para=HP_Para,
            filter_method=filter_method)
    #Step5, filt spike trains.
    if spike_train_filter_method != False:  # Meaning we need to do train filter.
        for i in range(len(dF_F_train)):
            dF_F_train[i] = My_Filter.Signal_Filter(dF_F_train,
                                                    spike_train_filter_method,
                                                    spike_train_filter_para)
    # Step6, get each frame graph and cell graph.
    all_graph_keys = list(sub_dic.keys())
    for i in range(len(sub_dic)):
        output_folder = work_folder + r'\Subtraction_Graphs'
        current_key = all_graph_keys[i]
        current_sub_list = sub_dic[current_key]
        A_conds = current_sub_list[0]  # condition of A graph
        B_conds = current_sub_list[1]  # condition of B graph
        A_IDs = []
        B_IDs = []
        for i in range(len(A_conds)):
            A_IDs.extend(Frame_Stim_Dic[A_conds[i]])
        for i in range(len(B_conds)):
            B_IDs.extend(Frame_Stim_Dic[B_conds[i]])
        # Get frame maps.
        current_sub_graph, current_t_graph, current_F_info = Single_Subgraph_Generator(
            aligned_all_tif_name, A_IDs, B_IDs, filter_method, LP_Para,
            HP_Para)

        current_sub_graph = Graph_Tools.Clip_And_Normalize(
            current_sub_graph, show_clip)
        Graph_Tools.Show_Graph(current_sub_graph, current_key + '_SubGraph',
                               output_folder)
        current_t_graph = Graph_Tools.Clip_And_Normalize(
            current_t_graph, show_clip)
        Graph_Tools.Show_Graph(current_t_graph, current_key + '_T_Graph',
                               output_folder)
        OS_Tools.Save_Variable(output_folder,
                               current_key + '_Sub_Info',
                               current_F_info,
                               extend_name='.info')
        # Get cell maps
        cell_info = cell_dic['All_Cell_Information']
        current_cell_sub_graph, current_cell_t_graph, current_cell_info = Single_Cellgraph_Generator(
            dF_F_train, cell_info, show_clip, A_IDs, B_IDs)
        Graph_Tools.Show_Graph(current_cell_sub_graph,
                               current_key + '_Cell_SubGraph', output_folder)
        Graph_Tools.Show_Graph(current_cell_t_graph,
                               current_key + '_Cell_T_Graph', output_folder)
        OS_Tools.Save_Variable(output_folder,
                               current_key + '_Cell_Info',
                               current_cell_info,
                               extend_name='.info')
    #Step7, calculate cell tuning graph.
    if tuning_graph == True:
        print('Not finished yet.')
def Single_Subgraph_Generator(all_tif_name,
                              A_IDs,
                              B_IDs,
                              filter_method='Gaussian',
                              LP_Para=((5, 5), 1.5),
                              HP_Para=False,
                              t_map=True,
                              t_sig=1):
    '''
    Generate single subtraction map of 2P data. A-B graph is generated.

    Parameters
    ----------
    all_tif_name : (list or nparray)
        All graph name. Usually aligned tif name.
    A_IDs : (list)
        List of A ID.
    B_IDs : (list)
        List of B ID.
    filter_method : (str), optional
        Can be set False to skip. Filter used before and after subtraction. The default is 'Gaussian'.
    LP_Para: (turple),optional
        Can be set False to skip. Low pass filter parameter. The default is ((5,5),1.5).
    HP_Para: (turple),optional
        Can be set False to skip. High pass filter parameter. The default is False.
    t_map : (bool), optional
        Whether t map is generated. The default is True.
    t_sig:(0~1),optional.
        Threshold of significant t map. The default is 1.

    Returns
    -------
    sub_graph : (2D array)
        Subtraction dF/F graph. Origin data, clip and normalize shall be done before plot.
    t_graph : (2D array)
        T test graph.0-1 value normalized array. If t_map == False, this will be None.
    F_info_Dics : (Dic)
        Information dictionary. Including origin F & dF/F information of input graph.

    '''
    warnings.filterwarnings('ignore')
    F_info_Dics = {}
    all_tif_name = np.array(all_tif_name)  # Change into nparray to slice.
    A_Set_Graph_names = all_tif_name[A_IDs]
    B_Set_Graph_names = all_tif_name[B_IDs]
    # Calculate sub graph.
    F_info_Dics['Graph_Shape'] = np.shape(cv2.imread(all_tif_name[0], -1))
    F_info_Dics['Origin_Data_Type'] = str((cv2.imread(all_tif_name[0],
                                                      -1)).dtype)
    F_info_Dics['Average_A_Graph'] = Graph_Tools.Average_From_File(
        A_Set_Graph_names, LP_Para, HP_Para, filter_method)
    F_info_Dics['Average_B_Graph'] = Graph_Tools.Average_From_File(
        B_Set_Graph_names, LP_Para, HP_Para, filter_method)
    F_info_Dics['dF_Map'] = (F_info_Dics['Average_A_Graph'].astype('f8') -
                             F_info_Dics['Average_B_Graph'].astype('f8'))
    F_info_Dics['Average_dF_value'] = abs(
        F_info_Dics['dF_Map']).mean()  # Average dF value.
    F_info_Dics['Average_dF/F_value'] = F_info_Dics['Average_dF_value'] / (
        F_info_Dics['Average_B_Graph'].mean())
    F_info_Dics['dF/F_Graph'] = np.nan_to_num(
        F_info_Dics['dF_Map'] / F_info_Dics['Average_B_Graph'].astype('f8'))
    sub_graph = F_info_Dics['dF_Map']

    # Then calculate F value graph.
    if t_map == False:
        F_info_Dics['t_value_map'] = None
        F_info_Dics['p_value_map'] = None
        t_graph = None
    else:
        import random
        sample_size = min(len(A_Set_Graph_names), len(B_Set_Graph_names))
        selected_A_name = np.array(
            random.sample(list(A_Set_Graph_names), sample_size))
        selected_B_name = np.array(
            random.sample(list(B_Set_Graph_names), sample_size))
        A_graph_arrays = np.zeros(shape=(F_info_Dics['Graph_Shape'] +
                                         (sample_size, )),
                                  dtype='f8')
        B_graph_arrays = np.zeros(shape=(F_info_Dics['Graph_Shape'] +
                                         (sample_size, )),
                                  dtype='f8')
        # Then we will fill filtered data into graph.
        # First, we will read in AB graphs together.
        for i in range(sample_size):
            current_a_graph = cv2.imread(selected_A_name[i], -1)
            current_b_graph = cv2.imread(selected_B_name[i], -1)
            if filter_method != False:
                A_graph_arrays[:, :, i] = My_Filter.Filter_2D(
                    current_a_graph, LP_Para, HP_Para, filter_method)
                B_graph_arrays[:, :, i] = My_Filter.Filter_2D(
                    current_b_graph, LP_Para, HP_Para, filter_method)
        # After that, we calculate t and p value pix by pix.
        t_value_graph = np.zeros(shape=F_info_Dics['Graph_Shape'], dtype='f8')
        p_value_graph = np.zeros(shape=F_info_Dics['Graph_Shape'], dtype='f8')
        from scipy.stats import ttest_rel
        for i in range(F_info_Dics['Graph_Shape'][0]):
            for j in range(F_info_Dics['Graph_Shape'][1]):
                t_value_graph[i, j], p_value_graph[i, j] = ttest_rel(
                    A_graph_arrays[i, j, :], B_graph_arrays[i, j, :])
        # avoid nan
        t_graph_origin = np.nan_to_num(t_value_graph)
        p_value_graph = np.nan_to_num(p_value_graph)
        F_info_Dics['t_graph_origin'] = t_graph_origin
        F_info_Dics['p_value_of_t_test'] = p_value_graph
        t_graph = t_graph_origin * (p_value_graph < t_sig)
        F_info_Dics['t_graph'] = t_graph

    return sub_graph, t_graph, F_info_Dics
def Affine_Aligner_Gaussian(data_folder,
                            base_graph,
                            window_size=1,
                            max_point=50000,
                            good_match_prop=0.3,
                            dist_lim=120,
                            match_checker=1,
                            sector_num=4,
                            write_file=False,
                            save_folder='Default'):
    if save_folder == 'Default':
        save_folder = data_folder + r'\Results'
    aligned_tif_folder = save_folder + r'\Affined_Frames'
    OS_Tools.mkdir(save_folder)
    OS_Tools.mkdir(aligned_tif_folder)

    all_tif_name = OS_Tools.Get_File_Name(data_folder)
    graph_num = len(all_tif_name)
    graph_shape = cv2.imread(all_tif_name[0], -1).shape
    height, width = graph_shape
    origin_tif_matrix = np.zeros(shape=graph_shape + (graph_num, ), dtype='u2')
    # Read in all tif name.
    for i in range(graph_num):
        origin_tif_matrix[:, :, i] = cv2.imread(all_tif_name[i], -1)
    # Then get window slipped average graph.
    if window_size == 1:
        slipped_average_matrix = origin_tif_matrix
    else:
        slipped_average_matrix = Filters.Window_Average(
            origin_tif_matrix, window_size=window_size)
    # Use slip average to get deformation parameters.
    aligned_tif_matrix = np.zeros(shape=origin_tif_matrix.shape, dtype='u2')
    h_dic = {}  # Deformation parameters
    for i in range(graph_num):
        target = slipped_average_matrix[:, :, i]
        _, current_h = Affine_Core_Point_Equal(target,
                                               base_graph,
                                               max_point=max_point,
                                               good_match_prop=good_match_prop,
                                               sector_num=sector_num,
                                               dist_lim=dist_lim,
                                               match_checker=match_checker)
        h_dic[i] = current_h
        current_deformed_graph = cv2.warpPerspective(
            origin_tif_matrix[:, :, i], current_h, (width, height))
        Graph_Tools.Show_Graph(current_deformed_graph,
                               all_tif_name[i].split('\\')[-1],
                               aligned_tif_folder,
                               show_time=0,
                               graph_formation='')
        aligned_tif_matrix[:, :, i] = current_deformed_graph
    OS_Tools.Save_Variable(save_folder, 'Deform_H', h_dic)
    if write_file == True:
        OS_Tools.Save_Variable(save_folder, 'Affine_Aligned_Graphs',
                               aligned_tif_matrix)
    # At last, generate average graphs
    graph_before_align = origin_tif_matrix.mean(axis=2).astype('u2')
    graph_after_align = aligned_tif_matrix.mean(axis=2).astype('u2')
    graph_before_align = Graph_Tools.Clip_And_Normalize(graph_before_align,
                                                        clip_std=5)
    graph_after_align = Graph_Tools.Clip_And_Normalize(graph_after_align,
                                                       clip_std=5)
    Graph_Tools.Show_Graph(graph_before_align, 'Graph_Before_Affine',
                           save_folder)
    Graph_Tools.Show_Graph(graph_after_align, 'Graph_After_Affine',
                           save_folder)
    return True
def Affine_Core_Point_Equal(target,
                            base,
                            targ_gain=20,
                            max_point=50000,
                            good_match_prop=0.3,
                            sector_num=4,
                            dist_lim=200,
                            Filter=True,
                            match_checker=1):
    '''
    Core function of affine align, will selece equal spetial point vertically.

    Parameters
    ----------
    target : (2D Array, dtype = u1/u2)
        The graph will be aligned.
    base : (2D Array, dtype = u1/u2)
        Base graph. Target will be aligned to this.
    targ_gain : (int), optional
        Gain used to do align. The default is 20.
    max_point : (int), optional
        Max number of feature points. The default is 50000.
    good_match_prop : (float), optional
        Propotion of good match in all matches. The default is 0.3.
    sector_num : (int), optional
        Cut graph vertically in several sections, all section have equal points. The default is 4.
    dist_lim : (int), optional
        Distance limitation of 2 matches, match above this will be ignored. The default is 200.
    Filter : (bool), optional
        Whether we do space filter. The default is True.
    match_checker : (float), optional
        A checker for h matrix. Bigger checher means we tolerate more graph deformation. The default is 1.


    Returns
    -------
    matched_graph : (2D Array)
        Deformed graph. Shape will be the same as base graph.
    h : TYPE
        DESCRIPTION.

    '''
    height, width = base.shape
    # Check data type.
    if base.dtype == np.dtype('u2'):
        base = (base / 256).astype('u1')
    elif base.dtype != np.dtype('u1'):
        raise IOError('Base graph dtype shall be u1 or u2.')
    if target.dtype == np.dtype('u1'):
        target = target.astype('u2') * 256
    elif target.dtype == np.dtype('u2'):
        target = target
    else:
        raise IOError('Target graph dtype shall be u1 or u2!')
    # Change graph data type. Only 8bit 1channel graph is allowed.
    if Filter == True:
        target_filted = Filters.Filter_2D(target, HP_Para=False)
    else:
        target_filted = target
    target_8bit = np.clip((target_filted.astype('f8') * targ_gain / 256), 0,
                          255).astype('u1')
    # Detect ORB features and compute descriptors.
    orb = cv2.ORB_create(max_point)
    keypoints1, descriptors1 = orb.detectAndCompute(target_8bit, None)
    keypoints2, descriptors2 = orb.detectAndCompute(base, None)
    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)
    # Then eliminate match with bigger dist.
    matches.sort(key=lambda x: x.distance, reverse=False)
    while (matches[-1].distance > dist_lim):
        matches.pop(-1)
    # Then get num of good matches and distribute them into quaters.
    good_match_num = round(len(matches) * good_match_prop)
    max_point_per_sector = good_match_num // sector_num
    sector_height = height // sector_num
    sector_counter = np.zeros(sector_num)
    used_matches = []
    for i in range(len(matches)):
        current_y = keypoints2[matches[i].trainIdx].pt[
            1]  # Use y loc in base graph as indicator.
        current_sector = int(current_y // sector_height)
        if sector_counter[current_sector] < max_point_per_sector:
            used_matches.append(matches[i])
            sector_counter[current_sector] += 1
    # Extract location of good matches
    points1 = np.zeros((len(used_matches), 2), dtype=np.float32)
    points2 = np.zeros((len(used_matches), 2), dtype=np.float32)
    for i, match in enumerate(used_matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt
    # Find homography
    h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
    # h Check here to avoid bad mistake. This part can be revised and discussed.
    if abs(h[0, 1]) > match_checker:
        warnings.warn('Bad match, please check parameters.', UserWarning)
    height, width = base.shape
    matched_graph = cv2.warpPerspective(target, h, (width, height))
    #matched_graph = np.maximum(matched_graph,1)
    return matched_graph, h