コード例 #1
0
def Single_Run_Fvalue_Frame(day_folder, runname):
    '''
    Get Single run data frame from .ac file. ONLY F 

    Parameters
    ----------
    day_folder : (str)
        Day folder of data. Cell data need to be generated first.
    runname : (str)
        Data of which run? e.g. 'Run001'

    Returns
    -------
    cell_frame : (pd Frame)
        Pandas frame of cell data, rows are cells, columns are each frames.

    '''
    ac_fn = ot.Get_File_Name(day_folder, '.ac')[0]
    ac_dic = ot.Load_Variable(ac_fn)
    acn = list(ac_dic.keys())
    cell_frame = pd.DataFrame()
    for i in range(len(acn)):
        c_cn = acn[i]
        tc = ac_dic[c_cn]
        if tc['In_Run'][runname]:
            tc_train = tc[runname]['F_train']
            cell_frame[c_cn] = tc_train
    # To make column as dimension, row as cell num, we need a transfer.
    cell_frame = cell_frame.T
    return cell_frame
コード例 #2
0
def Multi_Run_Fvalue_Cat(day_folder,
                         runlists,
                         rest_time=(600, 600),
                         fps=1.301):

    ac_fn = ot.Get_File_Name(day_folder, '.ac')[0]
    ac_dic = ot.Load_Variable(ac_fn)
    acn = list(ac_dic.keys())
    All_Data_Frame = pd.DataFrame()
    # Get all runs one by one.
    for i in range(len(runlists)):
        c_runname = runlists[i]
        c_run_frame = pd.DataFrame()
        for j in range(len(acn)):
            c_cn = acn[j]
            tc = ac_dic[c_cn]
            if tc['In_Run'][c_runname]:
                tc_train = tc[c_runname]['F_train']
                # add blank frame as last value.
                if i < (len(runlists) - 1):
                    blank_frames = int(rest_time[i] * fps)
                    blank_array = np.ones(blank_frames) * tc_train[-1]
                    tc_train = np.concatenate((tc_train, blank_array), axis=0)
                c_run_frame[c_cn] = tc_train
        # To make column as dimension, row as cell num, we need a transfer.
        c_run_frame = c_run_frame.T
        All_Data_Frame = pd.concat([All_Data_Frame, c_run_frame], axis=1)
    # Remove any zero frame, avoid null cell.
    All_Data_Frame = All_Data_Frame.dropna(axis=0, how='any')
    return All_Data_Frame
コード例 #3
0
    def Single_Cell_Plotter(self, cell_name, mode='circle', show_time=5000):
        '''
        Generate Single Cell Location map.

        Parameters
        ----------
        cell_name : (str)
            Name of cell you want to plot,'self.all_cell_names' can help.
        mode : ('circle' or 'fill'), optional
            Method of plot. The default is 'circle'.


        '''
        cell_save_path = self.save_folder + r'\Single_Cells'
        ot.mkdir(cell_save_path)
        if type(self.average_graph) == type(None):
            raise IOError('We need Average graph to generate cell loc.')
        c_cell_dic = self.all_cell_dic[cell_name]
        c_cell_info = c_cell_dic['Cell_Info']
        # Then plot cells, in mode we want.
        if mode == 'fill':  # meaning we will paint graph into green.
            base_graph = cv2.cvtColor(self.average_graph,
                                      cv2.COLOR_GRAY2RGB) * 0.7
            cell_y, cell_x = c_cell_info.coords[:, 0], c_cell_info.coords[:, 1]
            base_graph[cell_y, cell_x, 1] += 32768
            base_graph = np.clip(base_graph, 0, 65535).astype('u2')
        elif mode == 'circle':
            base_graph = cv2.cvtColor(self.average_graph, cv2.COLOR_GRAY2RGB)
            loc_y, loc_x = c_cell_info.centroid
            base_graph = cv2.circle(base_graph, (int(loc_x), int(loc_y)),
                                    radius=10,
                                    color=(0, 0, 65535),
                                    thickness=2)
        else:
            raise IOError('Wrong mode, check please.')
        # After plot, annotate cell name.
        base_graph = gt.Graph_Depth_Change(base_graph, 'u1')
        from PIL import ImageFont
        from PIL import Image
        from PIL import ImageDraw
        font = ImageFont.truetype('arial.ttf', 15)
        im = Image.fromarray(base_graph)
        y, x = c_cell_info.centroid
        draw = ImageDraw.Draw(im)
        draw.text((x + 10, y + 10),
                  cell_name, (0, 255, 100),
                  font=font,
                  align='center')
        annotated_graph = np.array(im)
        #base_graph = gt.Clip_And_Normalize(base_graph,5)
        gt.Show_Graph(annotated_graph,
                      cell_name + '_Annotate',
                      cell_save_path,
                      show_time=show_time)
        return True
コード例 #4
0
    def F_Value_Disp(self):
        F_folder = self.save_folder + r'\_All_F_disps'
        ot.mkdir(F_folder)
        all_cell_F_disp = []
        for i in range(len(self.all_cellnames)):
            c_cell = self.All_Cells[self.all_cellnames[i]]
            all_runnames = list(c_cell['In_Run'].keys())
            all_F_list = []
            for j in range(len(self.all_runnames)):
                c_run = all_runnames[j]
                if c_cell['In_Run'][c_run]:
                    c_F_list = list(c_cell[c_run]['F_train'])
                    all_F_list.extend(c_F_list)
                    self.All_Cells[self.all_cellnames[i]][c_run][
                        'Mean_F'] = c_cell[c_run]['F_train'].mean()
                    self.All_Cells[self.all_cellnames[i]][c_run][
                        'STD_F'] = c_cell[c_run]['F_train'].std()

            # after that, we have all F lists, and can plot general graphs.
            total_avi = np.array(all_F_list).mean()
            total_std = np.array(all_F_list).std()
            fig, ax = plt.subplots(figsize=(6, 4))
            ax.set_title(self.all_cellnames[i] + ' F Distribution')
            ax.hist(all_F_list, bins=50)
            ax.annotate('Average:' + str(round(total_avi, 2)),
                        xycoords='figure fraction',
                        xy=(0.7, 0.83))
            ax.annotate('STD:' + str(round(total_std, 2)),
                        xycoords='figure fraction',
                        xy=(0.7, 0.78))
            self.All_Cells[self.all_cellnames[i]]['Average_F'] = total_avi
            self.All_Cells[self.all_cellnames[i]]['Average_F_std'] = total_std
            fig.savefig(F_folder + '\\' + self.all_cellnames[i] + '_F_disps',
                        dpi=120)
            plt.clf()
            plt.close()
            all_cell_F_disp.append(total_avi)
        fig, ax = plt.subplots(figsize=(6, 4))
        ax.set_title('All Cell F Distribution')
        hc_mean = np.array(all_cell_F_disp).mean()
        hc_std = np.array(all_cell_F_disp).std()
        ax.hist(all_cell_F_disp, bins=25)
        ax.annotate('Average:' + str(round(hc_mean, 2)),
                    xycoords='figure fraction',
                    xy=(0.7, 0.83))
        ax.annotate('STD:' + str(round(hc_std, 2)),
                    xycoords='figure fraction',
                    xy=(0.7, 0.78))
        fig.savefig(F_folder + '\\_All_Cells_F_disps', dpi=120)
コード例 #5
0
    def __init__(self,
                 day_folder,
                 spon_run='Run001',
                 fps=1.301,
                 passed_band=(0.05, 0.5)):
        fp = fps / 2
        if passed_band[0] != False:
            HP_par = passed_band[0] / fp
        else:
            HP_par = False
        if passed_band[1] != False:
            LP_par = passed_band[1] / fp
        else:
            LP_par = False
        self.save_folder = day_folder + r'\_All_Results\Spon_Analyze'
        ot.mkdir(self.save_folder)
        self.base_graph = cv2.imread(day_folder + r'\Global_Average.tif', -1)
        self.fps = fps
        cell_file_name = ot.Get_File_Name(day_folder, '.ac')[0]
        cell_dic = ot.Load_Variable(cell_file_name)
        all_cell_name = list(cell_dic.keys())
        spon_train_F = {}
        spon_train_dF = {}
        self.all_cell_info = {}
        for i in range(len(all_cell_name)):
            tc = cell_dic[all_cell_name[i]]
            self.all_cell_info[all_cell_name[i]] = tc['Cell_Info']
            if tc['In_Run'][spon_run]:
                c_name = tc['Name']
                c_train = tc[spon_run]['F_train']
                # do filter
                filted_c_train = Filters.Signal_Filter(c_train,
                                                       filter_para=(HP_par,
                                                                    LP_par))
                spon_train_F[c_name] = filted_c_train
                average_F = filted_c_train.mean()
                c_dF_series = (filted_c_train - average_F) / average_F
                c_dF_series = np.clip(
                    c_dF_series,
                    c_dF_series.mean() - c_dF_series.std() * 3,
                    c_dF_series.mean() + c_dF_series.std() * 3)
                spon_train_dF[c_name] = c_dF_series
        self.Spon_Data_Frame_Raw = pd.DataFrame(spon_train_F).T
        self.Spon_Data_Frame_centered = pd.DataFrame(spon_train_dF).T
        self.spon_cellname = list(self.Spon_Data_Frame_centered.index)
        self.frame_num = self.Spon_Data_Frame_centered.shape[1]

        del cell_dic
コード例 #6
0
 def Generate_Cells(self):
     self.Cell_Struct_Generator()
     self.Firing_Trains()
     self.Condition_Data()
     self.F_Value_Disp()
     ot.Save_Variable(self.save_folder, self.cell_prefix + 'All_Cells',
                      self.All_Cells, '.ac')
コード例 #7
0
    def Cell_Struct_Generator(self, mask=r'\Location_Mask.tif'):
        self.All_Cells = {}  # output variable,including all cell informations.
        for i in range(self.cell_num):
            c_cell_name = self.cell_prefix + ot.Bit_Filler(i, 4)
            self.All_Cells[c_cell_name] = {}  # single cell working space.
            self.All_Cells[c_cell_name]['Name'] = c_cell_name
            self.All_Cells[c_cell_name]['Cell_Info'] = self.cell_infos[i]
            self.All_Cells[c_cell_name]['Cell_Area'] = self.cell_infos[
                i].convex_area
            # Then we determine whether this cell in each run.
            self.All_Cells[c_cell_name]['In_Run'] = {}
            for j in range(len(self.all_runsubfolders)):
                c_sp = self.all_runsubfolders[j] + '\Results'
                c_mask = cv2.imread(c_sp + mask, -1)
                c_mask = c_mask > (c_mask / 2)

                cell_location = self.cell_infos[i].coords
                inmask_area = c_mask[cell_location[:, 0],
                                     cell_location[:, 1]].sum()
                if inmask_area == self.cell_infos[i].area:
                    self.All_Cells[c_cell_name]['In_Run'][
                        self.all_runnames[j]] = True
                    self.All_Cells[c_cell_name][self.all_runnames[j]] = {}
                else:
                    self.All_Cells[c_cell_name]['In_Run'][
                        self.all_runnames[j]] = False
            self.all_cellnames = list(self.All_Cells.keys())
コード例 #8
0
    def __init__(self,day_folder,
                 runname,
                 average_graph = None,
                 series_mode = 'F',
                 filter_para = (0.02,False)
                 
                 ):
        '''
        Some basic information for calculation
        
        Parameters
        ----------
        day_folder : (str)
            Day folder of runs. All variables in this folder.
        runname : (str)
            Run name. Format as 'Run001'.
        average_graph : (2D Array), optional
            Average graph in a day. For cell annotate. The default is None.
        series_mode : ('F' or 'dF'), optional
            Which series to use, raw F series or dF series. F is recommended. The default is 'F'.
        filter_para : (2 element truple), optional
            HP and LP filter para. Detail in Filters. The default is (0.02,False),~0.013Hz HP.

        '''
        # Read in variables
        print('Make sure cell data and SFA data in day folder.')
        cell_data_path = ot.Get_File_Name(day_folder,'.ac')[0]
        all_stim_dic_path = ot.Get_File_Name(day_folder,'.sfa')[0]
        self.all_cell_dic = ot.Load_Variable(cell_data_path)
        self.average_graph = average_graph
        self.all_stim_dic = ot.Load_Variable(all_stim_dic_path)
        self.stim_frame_align = self.all_stim_dic[runname]
        # Then get each cell spike train.
        cell_num = len(self.all_cell_dic)
        self.all_cells_train = {}
        self.all_cell_names = list(self.all_cell_dic.keys())
        for i in range(cell_num):
            current_name = self.all_cell_names[i]
            if series_mode == 'F':
                cell_series = self.all_cell_dic[current_name]['F_train'][runname]
            elif series_mode == 'dF':
                cell_series = self.all_cell_dic[current_name]['dF_F_train'][runname]
            else:
                raise IOError('Invalid input mode, please check.')
            cell_series = Filters.Signal_Filter(cell_series,filter_para = filter_para)# Then filter cell train
            self.all_cells_train[current_name] = cell_series
コード例 #9
0
def One_Key_Stim_Align(stims_folder):
    '''
    Generate all stim trains 

    Parameters
    ----------
    stims_folder : (str)
        Folder of all stim files.

    Returns
    -------
    bool
        API of operation finish.

    '''
    all_stim_folders = os_tools.Get_Sub_Folders(stims_folder)
    # remove folders not stim run
    for i in range(len(all_stim_folders) - 1, -1, -1):
        if all_stim_folders[i].find('Run') == -1:
            all_stim_folders = np.delete(all_stim_folders, i)
    total_save_path = os_tools.CDdotdot(stims_folder)
    All_Stim_Dic = {}
    # Then generate all align folders.
    for i in range(len(all_stim_folders)):
        current_stim_folder = all_stim_folders[i]
        current_runname = list(
            current_stim_folder[current_stim_folder.
                                find('Run'):current_stim_folder.find('Run') +
                                5])
        current_runname.insert(3, '0')
        current_runname = ''.join(current_runname)
        not_spon = (os_tools.Get_File_Name(current_stim_folder,
                                           file_type='.smr') != [])
        if not_spon:
            _, current_align_dic = Stim_Frame_Align(current_stim_folder)
            os_tools.Save_Variable(current_stim_folder, 'Stim_Frame_Align',
                                   current_align_dic)
            All_Stim_Dic[current_runname] = current_align_dic
        else:
            All_Stim_Dic[current_runname] = None
    os_tools.Save_Variable(total_save_path, '_All_Stim_Frame_Infos',
                           All_Stim_Dic, '.sfa')
    return True
コード例 #10
0
    def __init__(self, day_folder, average_graph='Default'):
        '''
        Cell Processor with given inputs.

        Parameters
        ----------
        day_folder : (str)
            Save folder of day data. All_Cell,All_Stim_Frame Align need to be in this file.
        average_graph : (2D Array,dtype = u16), optional
            Global average graph. Need to be 16bit gray graph. The default is 'Default',meaning this graph is read from root path.

        '''

        print('Cell data processor, makesure you have cell data.')
        cell_data_path = ot.Get_File_Name(day_folder, '.ac')[0]
        all_stim_dic_path = ot.Get_File_Name(day_folder, '.sfa')[0]
        self.all_cell_dic = ot.Load_Variable(cell_data_path)
        if average_graph == 'Default':
            self.average_graph = cv2.imread(
                day_folder + r'\Global_Average.tif', -1)
        else:
            self.average_graph = average_graph
        self.all_stim_dic = ot.Load_Variable(all_stim_dic_path)
        self.all_cell_names = list(self.all_cell_dic.keys())
        self.cell_num = len(self.all_cell_dic)
        self.save_folder = day_folder + r'\_All_Results'
        ot.mkdir(self.save_folder)
コード例 #11
0
def Cross_Day_Cell_Layout(base_dayfolder, target_dayfolder, base_cellnamelist,
                          target_cellnamelist):
    # Step1, do global average and calculate h.
    save_folder = base_dayfolder + r'\_All_Results\Cross_Day'
    ot.mkdir(save_folder)
    base_average_graph = cv2.imread(base_dayfolder + r'\Global_Average.tif',
                                    -1)
    target_average_graph = cv2.imread(
        target_dayfolder + r'\Global_Average.tif', -1)
    merged_graph, _, _, h = Graph_Matcher(base_average_graph,
                                          target_average_graph)
    aligned_graph = cv2.warpPerspective(target_average_graph, h,
                                        base_average_graph.shape)
    gt.Show_Graph(base_average_graph, 'Base_Graph', save_folder)
    gt.Show_Graph(target_average_graph, 'Target_Graph', save_folder)
    gt.Show_Graph(aligned_graph, 'Aligned_Graph', save_folder)
    gt.Show_Graph(merged_graph, 'Merged_Graph', save_folder)
    # Step2, generate all cell mask alone.
    base_cell_path = ot.Get_File_Name(base_dayfolder, '.ac')[0]
    base_cell = ot.Load_Variable(base_cell_path)
    target_cell_path = ot.Get_File_Name(target_dayfolder, '.ac')[0]
    target_cell = ot.Load_Variable(target_cell_path)
    base_cm = np.zeros(shape=base_average_graph.shape, dtype='u2')
    target_cm = np.zeros(shape=target_average_graph.shape, dtype='u2')
    for i in range(len(base_cellnamelist)):
        bc_name = base_cellnamelist[i]
        bc_cellinfo = base_cell[bc_name]['Cell_Info']
        y_list, x_list = bc_cellinfo.coords[:, 0], bc_cellinfo.coords[:, 1]
        base_cm[y_list, x_list] = 65535
    for i in range(len(target_cellnamelist)):
        tc_name = target_cellnamelist[i]
        tc_cellinfo = target_cell[tc_name]['Cell_Info']
        y_list, x_list = tc_cellinfo.coords[:, 0], tc_cellinfo.coords[:, 1]
        target_cm[y_list, x_list] = 65535
    aligned_target_cm = cv2.warpPerspective(target_cm, h,
                                            base_average_graph.shape)
    gt.Show_Graph(base_cm, 'Base_Cell_Mask', save_folder)
    gt.Show_Graph(target_cm, 'Target_Cell_Mask', save_folder)
    gt.Show_Graph(aligned_target_cm, 'Aligned_Target_Cell_Mask', save_folder)
    # Step3,merge graph together
    #Merge
    Layout_Graph = cv2.cvtColor(base_average_graph,
                                cv2.COLOR_GRAY2RGB).astype('f8') * 0.7
    Layout_Graph[:, :, 1] += base_cm
    Layout_Graph[:, :, 2] += aligned_target_cm
    Layout_Graph = np.clip(Layout_Graph, 0, 65535).astype('u2')
    gt.Show_Graph(Layout_Graph, 'Cell_Layout_Merge', save_folder)
    #Base
    Base_Cell_Graph = cv2.cvtColor(base_average_graph,
                                   cv2.COLOR_GRAY2RGB).astype('f8') * 0.7
    Base_Cell_Graph[:, :, 1] += base_cm
    Base_Cell_Graph = np.clip(Base_Cell_Graph, 0, 65535).astype('u2')
    gt.Show_Graph(Base_Cell_Graph, 'Base_Cell_Annotate', save_folder)
    #Target
    Target_Cell_Graph = cv2.cvtColor(base_average_graph,
                                     cv2.COLOR_GRAY2RGB).astype('f8') * 0.7
    Target_Cell_Graph[:, :, 2] += aligned_target_cm
    Target_Cell_Graph = np.clip(Target_Cell_Graph, 0, 65535).astype('u2')
    gt.Show_Graph(Target_Cell_Graph, 'Target_Cell_Annotate', save_folder)
    return h
コード例 #12
0
 def __init__(self,
              animal_name,
              date,
              day_folder,
              runid_lists,
              location='A',
              Stim_Frame_Align_name='_All_Stim_Frame_Infos.sfa',
              cell_subfolder=r'\_Manual_Cell'):
     print('Align,Cell Find,Stim Frame Align shall be done before.')
     self.save_folder = day_folder
     self.cell_prefix = animal_name + '_' + date + location + '_'
     self.all_SFA_dic = ot.Load_Variable(day_folder + '\\' +
                                         Stim_Frame_Align_name)
     cell_path = ot.Get_File_Name(day_folder + cell_subfolder, '.cell')[0]
     self.cell_infos = ot.Load_Variable(cell_path)['All_Cell_Information']
     self.cell_num = len(self.cell_infos)
     self.all_runnames = []
     for i in range(len(runid_lists)):
         c_runid = runid_lists[i]
         self.all_runnames.append('Run' + str(ot.Bit_Filler(c_runid, 3)))
     self.all_runsubfolders = lt.List_Annex(
         [day_folder], lt.Run_Name_Producer_2P(runid_lists))
コード例 #13
0
 def __init__(self, day_folder, average_graph=None):
     print('Make sure cell data and SFA data in day folder.')
     cell_data_path = ot.Get_File_Name(day_folder, '.ac')[0]
     all_stim_dic_path = ot.Get_File_Name(day_folder, '.sfa')[0]
     self.all_cell_dic = ot.Load_Variable(cell_data_path)
     self.average_graph = average_graph
     self.all_stim_dic = ot.Load_Variable(all_stim_dic_path)
     self.all_cell_names = list(self.all_cell_dic.keys())
     self.cell_num = len(self.all_cell_dic)
     self.save_folder = day_folder + r'\_All_Results'
     ot.mkdir(self.save_folder)
コード例 #14
0
 def Firing_Trains(self, align_subfolder=r'\Results\Final_Aligned_Frames'):
     # cycle all runs
     for i in range(len(self.all_runsubfolders)):
         all_tif_name = ot.Get_File_Name(self.all_runsubfolders[i] +
                                         align_subfolder)
         c_F_train, c_dF_F_train = Spike_Train_Generator(
             all_tif_name, self.cell_infos)
         # seperate trains into cell if in run.
         c_runname = self.all_runnames[i]
         for j in range(len(self.all_cellnames)):
             c_cellname = self.all_cellnames[j]
             if c_runname in self.All_Cells[
                     c_cellname]:  # meaning we have this run
                 self.All_Cells[c_cellname][c_runname][
                     'F_train'] = c_F_train[j]
                 self.All_Cells[c_cellname][c_runname][
                     'dF_F_train'] = c_dF_F_train[j]
コード例 #15
0
def OI_Graph_Cutter(
        OI_Graph_Folder,
        area_mask_path,
        rotate_angle=90,  # clock wise rotation
        OI_Graph_Type='.bmp'):

    all_OI_Map_Name = OS_Tools.Get_File_Name(OI_Graph_Folder,
                                             file_type=OI_Graph_Type)
    OI_Graph_Num = len(all_OI_Map_Name)
    save_folder = OI_Graph_Folder + r'\Cutted_Graph'
    mask_graph = cv2.imread(area_mask_path, 0)  # Read in 8bit gray.
    mask_graph = mask_graph > (mask_graph.max() / 2)
    mask_graph = skimage.morphology.remove_small_objects(mask_graph,
                                                         100,
                                                         connectivity=1)
    for i in range(OI_Graph_Num):
        current_OI_graph = cv2.imread(all_OI_Map_Name[i], -1)
        current_graph_name = all_OI_Map_Name[i].split('\\')[-1][:-4]
        non_zero_loc = np.where(mask_graph > 0)  # unmasked location.
        LU_loc = (non_zero_loc[0].min(), non_zero_loc[1].min()
                  )  #Left upper graph
        RD_loc = (non_zero_loc[0].max() + 1, non_zero_loc[1].max() + 1)
        current_masked_OI_graph = current_OI_graph * mask_graph
        cutted_graph = current_masked_OI_graph[LU_loc[0]:RD_loc[0],
                                               LU_loc[1]:RD_loc[1]]
        rotated_graph = Graph_Tools.Graph_Twister(cutted_graph, rotate_angle)
        origin_shape = rotated_graph.shape
        resized_graph = cv2.resize(rotated_graph,
                                   (origin_shape[0] * 7, origin_shape[1] * 7))
        Graph_Tools.Show_Graph(rotated_graph,
                               current_graph_name,
                               save_folder,
                               show_time=0,
                               graph_formation='.png')
        Graph_Tools.Show_Graph(resized_graph,
                               '_Resized_' + current_graph_name,
                               save_folder,
                               show_time=0,
                               graph_formation='.png')
        #需要再加一个标注在原图中位置的图!
    return True
コード例 #16
0
    def Condition_Data(self,
                       response_extend=(3, 3),
                       base_frame=[0, 1, 2],
                       filter_para=(0.02, False),
                       ROI_extend=(7, 7),
                       ROI_base_frame=[0, 1, 2, 3, 4],
                       ROI_filter_para=(0.01, False),
                       full_size=(512, 512)):

        # cycle all runs
        for i in range(len(self.all_runnames)):
            c_runname = self.all_runnames[i]
            examp_graph = cv2.imread(
                ot.Get_File_Name(self.all_runsubfolders[i])[0], -1)
            if examp_graph.shape == full_size:
                is_ROI = False
            else:
                is_ROI = True
            # Cycle all cells
            for j in range(len(self.all_cellnames)):
                c_cell_dic = self.All_Cells[self.all_cellnames[j]]
                if (c_runname in c_cell_dic) and (
                        self.all_SFA_dic[c_runname] !=
                        None):  # This cell in in this run and not spon.
                    t_F_train = c_cell_dic[c_runname]['F_train']
                    if is_ROI:
                        t_CR_Train, t_Raw_CR_Train = Single_Condition_Train_Generator(
                            t_F_train, self.all_SFA_dic[c_runname],
                            ROI_extend[0], ROI_extend[1], ROI_base_frame,
                            ROI_filter_para)
                    else:
                        t_CR_Train, t_Raw_CR_Train = Single_Condition_Train_Generator(
                            t_F_train, self.all_SFA_dic[c_runname],
                            response_extend[0], response_extend[1], base_frame,
                            filter_para)

                    self.All_Cells[self.all_cellnames[j]][c_runname][
                        'CR_Train'] = t_CR_Train
                    self.All_Cells[self.all_cellnames[j]][c_runname][
                        'Raw_CR_Train'] = t_Raw_CR_Train
コード例 #17
0
 def __init__(self, day_folder, fps, passed_band=(0.05, 0.5)):
     self.day_folder = day_folder
     self.fps = fps
     self.passed_band = passed_band
     ot.mkdir(day_folder + r'\_All_Results')
     self.save_folder = day_folder + r'\_All_Results\Spon_Analyze'
     ot.mkdir(self.save_folder)
     self.base_graph = cv2.imread(day_folder + r'\Global_Average.tif', -1)
     cell_file_name = ot.Get_File_Name(day_folder, '.ac')[0]
     self.cell_dic = ot.Load_Variable(cell_file_name)
     self.all_cell_name = list(self.cell_dic.keys())
     self.all_cell_num = len(self.all_cell_name)
     # get all cell information.
     self.all_cell_info = {}
     for i in range(len(self.all_cell_name)):
         self.all_cell_info[self.all_cell_name[i]] = self.cell_dic[
             self.all_cell_name[i]]['Cell_Info']
コード例 #18
0
#%% Get Cell From Manual
from Cell_Find_From_Graph import Cell_Find_From_Mannual
All_Cell = Cell_Find_From_Mannual(r'K:\Test_Data\2P\210721_L76_2P\_Manual_Cell\Cell_Mask.png',
                                  average_graph_path=r'K:\Test_Data\2P\210721_L76_2P\_Manual_Cell\Global_Average.tif',boulder = 15)
#%% Get All Cell Dic (except Run03)
from Standard_Cell_Generator import Standard_Cell_Generator
Scg = Standard_Cell_Generator('L76', '210721', r'K:\Test_Data\2P\210721_L76_2P', [1,2,4,5,6,7])
Scg.Generate_Cells()
#%% Get Run03 Cells & Trains
Run03_Cells = Cell_Find_From_Mannual(r'K:\Test_Data\2P\210721_L76_2P\1-003\Results\Cells_Run03\Cell_Mask_For_Run03.png',
                                     average_graph_path=r'K:\Test_Data\2P\210721_L76_2P\1-003\Results\Cells_Run03\Run03_Average.tif')
# Calculate F & dF Trains in specific run.
from Spike_Train_Generator import Spike_Train_Generator
import OS_Tools_Kit as ot
Run03_Cell_Info = Run03_Cells['All_Cell_Information']
all_03_tif_name = ot.Get_File_Name(r'K:\Test_Data\2P\210721_L76_2P\1-003\Results\Aligned_Frames')
all_03_F,all_03_dF = Spike_Train_Generator(all_03_tif_name, Run03_Cell_Info)
# Read in cell compare data.
import csv
csv_path = r'K:\Test_Data\2P\210721_L76_2P\1-003\Results\Cell_Compare.csv'
compare_list = []
with open(csv_path) as f:
    f_tsv = csv.reader(f, delimiter=',')
    headers = next(f_tsv)
    for row in f_tsv:
        compare_list.append(row)
#%% Last, add Run 03 data into origin.
new_cell_dic = {}
for i in range(len(compare_list)):
    c_cellname = 'L76_210721A_'+ot.Bit_Filler(i)
    tc = all_cells[c_cellname]
コード例 #19
0
    def Radar_Maps(self,
                   runname,
                   Radar_Cond,
                   on_frames=[3, 4, 5, 6],
                   bais_angle=0,
                   mode='processed',
                   error_bar=True):
        '''
        Generate radar map of given tuning properties. Not all stim can draw this.

        Parameters
        ----------
        runname : (str)
            Run we use. In format 'Run001'
        Radar_Cond : (Dic)
            Dictionary of condition for radar plot. This is generated by 'Stim_ID_Combiner'.
        on_frames : (list), optional
            Stim On frames. Use max response of this as reaction. ROI can be different.The default is [3,4,5,6].
        bais_angle : (float), optional
            Turning angle of axis, anti-clock wise. The default is 0.
        mode : ('processed' or 'raw'), optional
            CR or Raw_CR. The default is 'processed'.
        error_bar : bool, optional
            Whether we plot error bar on graph. The default is True.

        Raises
        ------
        IOError
            DESCRIPTION.

        Returns
        -------
        bool
            DESCRIPTION.

        '''
        radar_folder = self.save_folder + r'\\' + runname + '_Radar_Maps'
        ot.mkdir(radar_folder)
        for i in range(self.cell_num):  # all cells
            c_cellname = self.all_cell_names[i]
            tc = self.all_cell_dic[c_cellname]
            #Is this cell in run?
            if runname not in tc:
                print('Cell ' + c_cellname + ' Not in ' + runname)
                continue
            # Do we have CR train in this cell?
            if 'CR_Train' not in tc[runname]:
                print('Cell ' + c_cellname + ' have no respose data.')
                continue
            # get cr trains & plotable data.
            if mode == 'processed':
                cr_train = tc[runname]['CR_Train']
            elif mode == 'raw':
                cr_train = tc[runname]['Raw_CR_Train']
            else:
                raise IOError('Wrong CR Mode.')
            radar_data = SDT.CR_Train_Combiner(cr_train, Radar_Cond)
            all_radar_names = list(radar_data.keys())
            plotable_data = {}
            plotable_data['Names'] = []
            plotable_data['Values'] = np.zeros(len(all_radar_names),
                                               dtype='f8')
            plotable_data['Stds'] = np.zeros(len(all_radar_names), dtype='f8')
            for j in range(len(all_radar_names)):
                c_name = all_radar_names[j]
                plotable_data['Names'].append(c_name)
                c_conds, c_ses = radar_data[c_name].mean(
                    0), radar_data[c_name].std(0) * 2 / np.sqrt(
                        radar_data[c_name].shape[0])
                cutted_conds, cutted_std = c_conds[on_frames], c_ses[on_frames]
                max_ps = np.where(cutted_conds == cutted_conds.max())[0][0]
                plotable_data['Values'][j] = cutted_conds[max_ps]
                plotable_data['Stds'][j] = cutted_std[max_ps]
            # plot radar maps.
            fig = plt.figure(figsize=(8, 8))
            fig.suptitle(c_cellname + '_Radar Maps', fontsize=22)
            ax = plt.axes(polar=True)
            ax.set_theta_zero_location("N")
            ax_num = len(all_radar_names)
            angle_series = 2 * np.pi / 360 * (
                np.linspace(0, 360, ax_num + 1, dtype='f8') + bais_angle)
            ax.set_xticks(angle_series[:-1])
            ax.set_xticklabels(plotable_data['Names'])
            if error_bar == True:
                ax.errorbar(angle_series,
                            np.append(plotable_data['Values'],
                                      plotable_data['Values'][0]),
                            np.append(plotable_data['Stds'],
                                      plotable_data['Stds'][0]),
                            fmt='bo-',
                            ecolor='r')
            else:
                ax.plot(angle_series,
                        np.append(plotable_data['Values'],
                                  plotable_data['Values'][0]),
                        'bo-')  # Add one to close plots.
            # at last, save graphs.
            ot.Save_Variable(radar_folder, c_cellname + '_Radar_Data',
                             plotable_data)
            fig.savefig(radar_folder + r'\\' + c_cellname + '_Radar.png',
                        dpi=180)
            plt.clf()
            plt.close()
        return True
コード例 #20
0
 def Radar_Maps(self,
                runname,
                Radar_Cond,
                on_frames=[3, 4, 5, 6],
                bais_angle=0,
                mode='processed',
                error_bar=True):
     radar_folder = self.save_folder + r'\\' + runname + '_Radar_Maps'
     ot.mkdir(radar_folder)
     # Cycle all cells
     for i in range(self.cell_num):
         # get cr train
         c_cellname = self.all_cell_names[i]
         if mode == 'processed':
             if runname in self.all_cell_dic[c_cellname]['CR_trains']:
                 cr_train = self.all_cell_dic[c_cellname]['CR_trains'][
                     runname]
             else:
                 cr_train = None
         elif mode == 'raw':
             if runname in self.all_cell_dic[c_cellname]['Raw_CR_trains']:
                 cr_train = self.all_cell_dic[c_cellname]['Raw_CR_trains'][
                     runname]
             else:
                 cr_train = None
         if cr_train == None:
             continue
         # get radar data and std
         radar_data = SDT.CR_Train_Combiner(cr_train, Radar_Cond)
         all_radar_names = list(radar_data.keys())
         plotable_data = {}
         plotable_data['Names'] = []
         plotable_data['Values'] = np.zeros(len(all_radar_names),
                                            dtype='f8')
         plotable_data['Stds'] = np.zeros(len(all_radar_names), dtype='f8')
         for j in range(len(all_radar_names)):
             c_name = all_radar_names[j]
             plotable_data['Names'].append(c_name)
             c_conds, c_stds = radar_data[c_name].mean(
                 0), radar_data[c_name].std(0)
             cutted_conds, cutted_std = c_conds[on_frames], c_stds[
                 on_frames]
             max_ps = np.where(cutted_conds == cutted_conds.max())[0][0]
             plotable_data['Values'][j] = cutted_conds[max_ps]
             plotable_data['Stds'][j] = cutted_std[max_ps]
         # plot radar maps.
         fig = plt.figure(figsize=(8, 8))
         fig.suptitle(c_cellname + '_Radar Maps', fontsize=22)
         ax = plt.axes(polar=True)
         ax.set_theta_zero_location("N")
         ax_num = len(all_radar_names)
         angle_series = 2 * np.pi / 360 * (
             np.linspace(0, 360, ax_num + 1, dtype='f8') + bais_angle)
         ax.set_xticks(angle_series[:-1])
         ax.set_xticklabels(plotable_data['Names'])
         if error_bar == True:
             ax.errorbar(angle_series,
                         np.append(plotable_data['Values'],
                                   plotable_data['Values'][0]),
                         np.append(plotable_data['Stds'],
                                   plotable_data['Stds'][0]),
                         fmt='bo-',
                         ecolor='r')
         else:
             ax.plot(angle_series,
                     np.append(plotable_data['Values'],
                               plotable_data['Values'][0]),
                     'bo-')  # Add one to close plots.
         # at last, save graphs.
         ot.Save_Variable(radar_folder, c_cellname + '_Radar_Data',
                          plotable_data)
         fig.savefig(radar_folder + r'\\' + c_cellname + '_Radar.png',
                     dpi=180)
         plt.clf()
         plt.close()
     return True
コード例 #21
0
Sr = SP.Single_Run_Spontaneous_Processor(r'K:\Test_Data\2P\210629_L76_2P',
                                         spon_run='Run001')
PCA_Dic = Sr.Do_PCA(3700, 9999)
Sr.Pairwise_Correlation_Plot(Sr.spon_cellname,
                             3700,
                             9999,
                             'All_Before',
                             cor_range=(-0.2, 0.8))
Mu = SP.Multi_Run_Spontaneous_Processor(r'K:\Test_Data\2P\210629_L76_2P',
                                        1.301)

#%% Evaluate cell fluctuation.
import Cell_2_DataFrame as C2D
import Cell_Train_Analyzer.Cell_Activity_Evaluator as CAE
All_Spon_Before = C2D.Multi_Run_Fvalue_Cat(r'K:\Test_Data\2P\210629_L76_2P',
                                           ['Run001', 'Run002', 'Run003'],
                                           rest_time=(600, 600))
spike_count, Z_count = CAE.Spike_Count(All_Spon_Before)

#%% Get Tuing property of this day's run.
from Stimulus_Cell_Processor.Tuning_Property_Calculator import Tuning_Property_Calculator
import OS_Tools_Kit as ot

Tuning_0629 = Tuning_Property_Calculator(r'K:\Test_Data\2P\210629_L76_2P',
                                         Orien_para=('Run004', 'G8_2P'),
                                         OD_para=('Run006', 'OD_2P'),
                                         Hue_para=('Run007', 'RGLum', False))

ot.Save_Variable(r'K:\Test_Data\2P\210629_L76_2P', 'All_Cell_Tuning',
                 Tuning_0629, '.tuning')
コード例 #22
0
    def Cell_Response_Maps(self,
                           runname,
                           Condition_dics,
                           mode='processed',
                           stim_on=(3, 6),
                           error_bar=True,
                           figsize='Default',
                           subshape='Default'):
        graph_folder = self.save_folder + r'\\' + runname
        ot.mkdir(graph_folder)
        for i in range(self.cell_num):  # all cells
            c_cellname = self.all_cell_names[i]
            # get cr trains
            if mode == 'processed':
                if runname in self.all_cell_dic[c_cellname]['CR_trains']:
                    cr_train = self.all_cell_dic[c_cellname]['CR_trains'][
                        runname]
                else:
                    cr_train = None
            elif mode == 'raw':
                if runname in self.all_cell_dic[c_cellname]['Raw_CR_trains']:
                    cr_train = self.all_cell_dic[c_cellname]['Raw_CR_trains'][
                        runname]
                else:
                    cr_train = None

            # generate plotable data.
            if cr_train == None:  # no cr train, continue check another cell.
                continue
            else:  #Combine conditions to get plotable data
                plotable_data = SDT.CR_Train_Combiner(cr_train, Condition_dics)
            # Plot graphs.
            response_plot_dic = {}
            subgraph_num = len(plotable_data)
            all_subgraph_name = list(plotable_data.keys())
            y_max = 0  # get y sticks
            y_min = 65535
            for j in range(subgraph_num):
                current_graph_response = plotable_data[all_subgraph_name[j]]
                average_plot = current_graph_response.mean(0)
                average_std = current_graph_response.std(0)
                response_plot_dic[all_subgraph_name[j]] = (average_plot,
                                                           average_std)
                # renew y min and y max.
                if average_plot.min() < y_min:
                    y_min = average_plot.min()
                if average_plot.max() > y_max:
                    y_max = average_plot.max()
            y_range = [y_min - 0.3, y_max + 0.3]
            # Graph Plotting
            if subshape == 'Default':
                col_num = int(np.ceil(np.sqrt(subgraph_num)))
                row_num = int(np.ceil(subgraph_num / col_num))
            else:
                col_num = subshape[1]
                row_num = subshape[0]
            if figsize == 'Default':
                fig, ax = plt.subplots(row_num, col_num,
                                       figsize=(15, 15))  # Initialize graphs:
            else:
                fig, ax = plt.subplots(row_num, col_num, figsize=figsize)
            fig.suptitle(c_cellname + '_Response Maps', fontsize=30)
            for j in range(subgraph_num):
                current_col = j % col_num
                current_row = j // col_num
                current_graph_name = all_subgraph_name[j]
                current_data = response_plot_dic[current_graph_name]
                frame_num = len(current_data[0])
                # Start plot
                ax[current_row, current_col].hlines(y_range[0] + 0.05,
                                                    stim_on[0],
                                                    stim_on[1],
                                                    color="r")
                ax[current_row, current_col].set_ylim(y_range)
                ax[current_row, current_col].set_xticks(range(frame_num))
                ax[current_row, current_col].set_title(current_graph_name)
                # Whether we plot error bar on graph.
                if error_bar == True:
                    ax[current_row, current_col].errorbar(range(frame_num),
                                                          current_data[0],
                                                          current_data[1],
                                                          fmt='bo-',
                                                          ecolor='g')
                else:
                    ax[current_row, current_col].errorbar(range(frame_num),
                                                          current_data[0],
                                                          fmt='bo-')
            # Save ploted graph.
            ot.Save_Variable(graph_folder, c_cellname + '_Response_Data',
                             plotable_data)
            fig.savefig(graph_folder + r'\\' + c_cellname + '_Response.png',
                        dpi=180)
            plt.clf()
            plt.close()
        return True
コード例 #23
0
import OS_Tools_Kit as ot
import numpy as np
from Cell_Processor import Cell_Processor
import random
import Statistic_Tools as st
import matplotlib.pyplot as plt
from Spontaneous_Processor import Spontaneous_Processor
from Cross_Day_Cell_Layout import Cross_Day_Cell_Layout
from Spontaneous_Processor import Cross_Run_Pair_Correlation
import seaborn as sns
import pandas as pd

work_path = r'D:\ZR\_MyCodes\2P_Analysis\_Projects\210616_Annual_Report'
#%% Graph1, generate average graph of different run.
# Use G8 response as graph base.
graph_names_0604 = ot.Get_File_Name(
    r'K:\Test_Data\2P\210604_L76_2P\1-016\Results\Final_Aligned_Frames')
avr_0604 = gt.Average_From_File(graph_names_0604)
graph_names_0123 = ot.Get_File_Name(
    r'K:\Test_Data\2P\210123_L76_2P\1-011\Results\Aligned_Frames')
avr_0123 = gt.Average_From_File(graph_names_0123)
clipped_0604 = np.clip((avr_0604.astype('f8')) * 30, 0, 65535).astype('u2')
clipped_0123 = np.clip((avr_0123.astype('f8')) * 30, 0, 65535).astype('u2')
gt.Show_Graph(clipped_0604, 'Average_0604', work_path)
gt.Show_Graph(clipped_0123, 'Average_0123', work_path)
#%% Graph2, get cell layout of 210401 and 210413
CP_0401 = Cell_Processor(r'K:\Test_Data\2P\210401_L76_2P')
CP_0413 = Cell_Processor(r'K:\Test_Data\2P\210413_L76_2P')
all_cell_name_0401 = CP_0401.all_cell_names
all_cell_name_0413 = CP_0413.all_cell_names
h = Cross_Day_Cell_Layout(r'K:\Test_Data\2P\210401_L76_2P',
                          r'K:\Test_Data\2P\210413_L76_2P', all_cell_name_0401,
コード例 #24
0
    def Cell_Response_Maps(self,
                           runname,
                           Condition_dics,
                           mode='processed',
                           stim_on=(3, 6),
                           error_bar=True,
                           figsize='Default',
                           subshape='Default'):
        '''
        Cell Response map generator

        Parameters
        ----------
        runname : (int)
            Run for plot. In format 'Run001'
        Condition_dics : (Dic)
            Condition-ID combiner. This can be generated from Stim_ID_Combiner.
        mode : 'processed' or 'raw', optional
            Determine wheter we use CR or Raw_CR train. The default is 'processed'.
        stim_on : (turple), optional
            Range of stim on. The default is (3,6).
        error_bar : bool, optional
            Whether we plot error bar on graph. The default is True.
        figsize : (turple), optional
            Size of figure. Only need for too many condition. The default is 'Default'.
        subshape : (turple), optional
            Shape of subgraph layout.Row*Colume. The default is 'Default'.


        '''
        graph_folder = self.save_folder + r'\\' + runname
        ot.mkdir(graph_folder)

        for i in range(self.cell_num):  # all cells
            c_cellname = self.all_cell_names[i]
            tc = self.all_cell_dic[c_cellname]
            #Is this cell in run?
            if runname not in tc:
                print('Cell ' + c_cellname + ' Not in ' + runname)
                continue
            # Do we have CR train in this cell?
            if 'CR_Train' not in tc[runname]:
                print('Cell ' + c_cellname + ' have no respose data.')
                continue
            # get cr trains & plotable data.
            if mode == 'processed':
                cr_train = tc[runname]['CR_Train']
            elif mode == 'raw':
                cr_train = tc[runname]['Raw_CR_Train']
            else:
                raise IOError('Wrong CR Mode.')
            plotable_data = SDT.CR_Train_Combiner(cr_train, Condition_dics)
            # Plot graphs.
            response_plot_dic = {}
            subgraph_num = len(plotable_data)
            all_subgraph_name = list(plotable_data.keys())
            y_max = 0  # get y sticks
            y_min = 65535
            for j in range(subgraph_num):
                current_graph_response = plotable_data[all_subgraph_name[j]]
                average_plot = current_graph_response.mean(0)
                se_2 = current_graph_response.std(0) / np.sqrt(
                    current_graph_response.shape[0]) * 2
                response_plot_dic[all_subgraph_name[j]] = (average_plot, se_2)
                # renew y min and y max.
                if average_plot.min() < y_min:
                    y_min = average_plot.min()
                if average_plot.max() > y_max:
                    y_max = average_plot.max()
            y_range = [y_min - 0.3, y_max + 0.3]
            # Graph Plotting
            if subshape == 'Default':
                col_num = int(np.ceil(np.sqrt(subgraph_num)))
                row_num = int(np.ceil(subgraph_num / col_num))
            else:
                col_num = subshape[1]
                row_num = subshape[0]
            if figsize == 'Default':
                fig, ax = plt.subplots(row_num, col_num,
                                       figsize=(15, 15))  # Initialize graphs:
            else:
                fig, ax = plt.subplots(row_num, col_num, figsize=figsize)
            fig.suptitle(c_cellname + '_Response Maps', fontsize=30)
            for j in range(subgraph_num):
                current_col = j % col_num
                current_row = j // col_num
                current_graph_name = all_subgraph_name[j]
                current_data = response_plot_dic[current_graph_name]
                frame_num = len(current_data[0])
                # Start plot
                ax[current_row, current_col].hlines(y_range[0] + 0.05,
                                                    stim_on[0],
                                                    stim_on[1],
                                                    color="r")
                ax[current_row, current_col].set_ylim(y_range)
                ax[current_row, current_col].set_xticks(range(frame_num))
                ax[current_row, current_col].set_title(current_graph_name)
                # Whether we plot error bar on graph.
                if error_bar == True:
                    ax[current_row, current_col].errorbar(range(frame_num),
                                                          current_data[0],
                                                          current_data[1],
                                                          fmt='bo-',
                                                          ecolor='g')
                else:
                    ax[current_row, current_col].errorbar(range(frame_num),
                                                          current_data[0],
                                                          fmt='bo-')
            # Save ploted graph.
            ot.Save_Variable(graph_folder, c_cellname + '_Response_Data',
                             plotable_data)
            fig.savefig(graph_folder + r'\\' + c_cellname + '_Response.png',
                        dpi=180)
            plt.clf()
            plt.close()
        return True
コード例 #25
0
G16_Dic = Stim_ID_Combiner('G16_Dirs')
CP.Cell_Response_Maps('Run013', G16_Dic,subshape = (3,8))
G16_Rad = Stim_ID_Combiner('G16_Radar')
CP.Radar_Maps('Run013', G16_Rad)
OD_Dic = Stim_ID_Combiner('OD_2P')
CP.Cell_Response_Maps('Run010', OD_Dic,subshape = (3,5))
OD_Rad = Stim_ID_Combiner('OD_2P_Radar')
CP.Radar_Maps('Run010', OD_Rad,bais_angle=22.5)
S3D8_Dic = Stim_ID_Combiner('Shape3Dir8_Single')
CP.Cell_Response_Maps('Run015', S3D8_Dic,subshape = (4,8))
S3D8_General = Stim_ID_Combiner('Shape3Dir8_General')
CP.Cell_Response_Maps('Run015', S3D8_General)
H7O4_SC = Stim_ID_Combiner('HueNOrien4_SC',{'Hue':['Red','Yellow','Green','Cyan','Blue','Purple','White']})
CP.Cell_Response_Maps('Run016', H7O4_SC,subshape = (6,7),figsize = (20,20))
All_Black_Cells = CP.Black_Cell_Identifier(['Run010','Run013','Run015','Run016'])
ot.Save_Variable(r'K:\Test_Data\2P\210504_L76_2P', '_All_Black', All_Black_Cells)
#%% Then statictic all single condition blacks.
for i in range(CP.cell_num):
    CP.Single_Cell_Plotter(CP.all_cell_names[i],show_time = 0)
black_cell_num = len(All_Black_Cells)
all_black_cell_name = list(All_Black_Cells.keys())
OD_neg_cell = []
G16_neg_cell = []
Hue_neg_cell= []
Shape_neg_cell = []
for i in range(black_cell_num):
    c_cell_name = all_black_cell_name[i]
    if 'Run010' in All_Black_Cells[c_cell_name]:
        OD_neg_cell.append(c_cell_name)
    if 'Run013' in All_Black_Cells[c_cell_name]:
        G16_neg_cell.append(c_cell_name)
コード例 #26
0
    def Do_PCA(self,
               start_time=0,
               end_time=99999,
               plot=True,
               mode='processed'):
        '''
        Do PCA Analyze for spon series of given time.

        Parameters
        ----------
        start_time : int, optional
            Second of series ON. The default is 0.
        end_time : TYPE, optional
            Second of series OFF. The default is 9999.
        mode : 'processed' or 'raw', optional
            Which mode we use to plot PCA on. The default is 'processed'.

        Returns
        -------
        PCA_Dic : Dic
            Dictionary of PCA information.

        '''
        print('Do PCA for spontaneous cells')
        PCA_Dic = {}
        data_use = self.Series_Select(start_time, end_time, mode)
        data_for_pca = np.array(data_use).T
        pca = decomposition.PCA()
        pca.fit(data_for_pca)

        PCA_Dic['All_Components'] = pca.components_
        PCA_Dic['Variance_Ratio'] = pca.explained_variance_ratio_
        PCA_Dic['Variance'] = pca.explained_variance_
        PCA_Dic['Cell_Name_List'] = self.spon_cellname
        # plot ROC curve of PCA results.
        accumulated_ratio = np.zeros(len(PCA_Dic['Variance_Ratio']),
                                     dtype='f8')
        accumulated_variance = np.zeros(len(PCA_Dic['Variance']), dtype='f8')
        random_ratio = np.zeros(len(PCA_Dic['Variance_Ratio']), dtype='f8')
        for i in range(len(accumulated_ratio) - 1):
            accumulated_ratio[
                i + 1] = accumulated_ratio[i] + PCA_Dic['Variance_Ratio'][i]
            accumulated_variance[
                i + 1] = accumulated_variance[i] + PCA_Dic['Variance'][i]
            random_ratio[i + 1] = (i + 1) / len(accumulated_ratio)
        PCA_Dic['Accumulated_Variance_Ratio'] = accumulated_ratio
        PCA_Dic['Accumulated_Variance'] = accumulated_variance

        if plot == True:
            pca_save_folder = self.save_folder + r'\PC_Graphs'
            ot.mkdir(pca_save_folder)
            for i in range(len(pca.components_[:, 0])):
                visual_data, folded_map, gray_graph = self.Component_Visualize(
                    PCA_Dic['All_Components'][i, :])
                fig = plt.figure(figsize=(15, 15))
                plt.title('PC' + str(i + 1), fontsize=36)
                fig = sns.heatmap(visual_data,
                                  square=True,
                                  yticklabels=False,
                                  xticklabels=False,
                                  center=0)
                fig.figure.savefig(pca_save_folder + '\PC' + str(i + 1) +
                                   '.png')
                plt.clf()
                cv2.imwrite(
                    pca_save_folder + '\PC' + str(i + 1) + '_Folded.tif',
                    folded_map)
                cv2.imwrite(pca_save_folder + '\PC' + str(i + 1) + '_Gray.jpg',
                            gray_graph)

            fig, ax = plt.subplots(figsize=(8, 6))
            plt.title('Accumulated Variance')
            plt.plot(range(len(accumulated_ratio)), accumulated_ratio)
            plt.plot(range(len(accumulated_ratio)), random_ratio)
            plt.savefig(pca_save_folder + '\_ROC.png')
            ot.Save_Variable(pca_save_folder, 'PCA_Dic', PCA_Dic)
        return PCA_Dic
コード例 #27
0
def Stim_Frame_Align(
    stim_folder,
    stim_thres=2,
    frame_thres=1,
    jmp_step=3000,
    head_extend=1,
    tail_extend=0,
):
    """
    Get stim belongings of every frame.

    Parameters
    ----------
    stim_folder : (str)
        Stimlus data folder. '.smr' file and '.txt' file shall be in the same folder.
    stim_thres :(number),optional
        Threshold voltage used to binary square wave. The default is 2.
    frame_thres:(number),optional
        Threshold voltage used to binary triangel wave. The default is 1.
    jmp_step:(int),optional
        How many point you jump after find a frame. Usually, 10000 point = 1s
    head_extend(int),optional
        Number of frame regarded as stim on before stim. Positive will extend frame on, Negative will cut.
    tail_extend(int),optional
        Number of frame ragarded as stim on after stim. Positive will extend frame on, Negative will cut.
    Returns
    -------
    Frame_Stim_Sequence : (list)
        List type of frame belongings. This can be used if ISI base changes.
    Frame_Stim_Dictionary : (Dictionary)
        Dictionary type. This Dictionary have stim id belonged frames. Can be used directly.
    """
    # Step 1, read in data.
    smr_name = os_tools.Get_File_Name(stim_folder, file_type='.smr')[0]
    frame_train = os_tools.Spike2_Reader(smr_name,
                                         physical_channel=3)['Channel_Data']
    stim_train = os_tools.Spike2_Reader(smr_name,
                                        physical_channel=0)['Channel_Data']
    txt_name = os_tools.Last_Saved_name(stim_folder, file_type='.txt')

    # Step 2, square wave series processing
    binary_stim_train = (stim_train > stim_thres).astype('i4')
    cutted_stim_list = list(
        mit.split_when(binary_stim_train, lambda x, y: (x - y) == -1))
    # If stop at high voltage level, change last square to -1.
    last_part_set = np.unique(cutted_stim_list[-1])
    if len(last_part_set) == 1:  # Which means stop at high voltage
        last_part = np.array(cutted_stim_list[-1])
        last_part[:] = 0
        cutted_stim_list[-1] = list(last_part)
    # Combine stimuls lists
    final_stim_list = []
    for i in range(len(cutted_stim_list)):
        current_list = np.dot(cutted_stim_list[i], i + 1) - 1
        final_stim_list.extend(current_list)
    del cutted_stim_list, stim_train, binary_stim_train
    # square wave process done, final_stim_list is stim-time relation.
    # Step3, triangle wave list processing.
    binary_frame_train = (frame_train > frame_thres).astype('i4').ravel()
    dislocation_binary_frame_train = np.append(binary_frame_train[1:], 0)
    frame_time_finder = binary_frame_train - dislocation_binary_frame_train
    stop_point = np.where(frame_time_finder == -1)[
        0]  # Not filtered yet, mis calculation are many.
    # Wash stop points, make sure they have
    all_graph_time = [stop_point[0]]  # Use first stop as first graph.
    last_frame_time = all_graph_time[0]  # First stop
    for i in range(1, len(stop_point)):  # Frame 0 ignored.
        current_time = stop_point[i]
        if (current_time - last_frame_time) > jmp_step:
            all_graph_time.append(current_time)
            last_frame_time = current_time
    all_graph_time = all_graph_time[:-2]  # Last 2 frame may not be saved.
    # Triangle wave process done, all_graph_time is list of every frame time.

    # Step4,Original frame stim relation acquire.
    frame_belongings = []
    for i in range(len(all_graph_time)):
        current_graph_time = all_graph_time[i]
        frame_belongings.append(final_stim_list[current_graph_time]
                                [0])  # Frame belong before adjust

    # Step5, Adjust frame stim relation.
    cutted_frame_list = list(
        mit.split_when(frame_belongings, lambda x, y: x != y))
    # Adjust every single part. Stim add means ISI subs.
    adjusted_frame_list = []
    import My_Wheels.List_Operation_Kit as List_Ops
    # Process head first
    adjusted_frame_list.append(
        List_Ops.List_extend(cutted_frame_list[0], 0, -head_extend))
    # Then Process middle
    for i in range(1,
                   len(cutted_frame_list) -
                   1):  # First and last frame use differently.
        if (i % 2) != 0:  # odd id means stim on.
            adjusted_frame_list.append(
                List_Ops.List_extend(cutted_frame_list[i], head_extend,
                                     tail_extend))
        else:  # even id means ISI.
            adjusted_frame_list.append(
                List_Ops.List_extend(cutted_frame_list[i], -tail_extend,
                                     -head_extend))
    # Process last part then.
    adjusted_frame_list.append(
        List_Ops.List_extend(cutted_frame_list[-1], -tail_extend, 0))
    # After adjustion, we need to combine the list.
    frame_stim_list = []
    for i in range(len(adjusted_frame_list) -
                   1):  # Ignore last ISI, this might be harmful.
        frame_stim_list.extend(adjusted_frame_list[i])
    # Till now, frame_stim_list is adjusted frame stim relations.

    # Step6, Combine frame with stim id.
    with open(txt_name, 'r') as file:
        data = file.read()
    del file
    stim_sequence = data.split()
    stim_sequence = [int(x) for x in stim_sequence]
    Frame_Stim_Sequence = []
    for i in range(len(frame_stim_list)):
        current_id = frame_stim_list[i]
        if current_id != -1:
            Frame_Stim_Sequence.append(stim_sequence[current_id - 1])
        else:
            Frame_Stim_Sequence.append(-1)
    Frame_Stim_Dictionary = List_Ops.List_To_Dic(Frame_Stim_Sequence)
    Frame_Stim_Dictionary['Original_Stim_Train'] = Frame_Stim_Sequence
    return Frame_Stim_Sequence, Frame_Stim_Dictionary
コード例 #28
0
                             })
CP.Cell_Response_Maps('Run016', Hue11_Dic)
Hue_11_SC_Dic = Stim_ID_Combiner('HueNOrien4_SC',
                                 para_dic={
                                     'Hue': [
                                         'Red0.6', 'Red0.5', 'Red0.4',
                                         'Red0.3', 'Red0.2', 'Yellow', 'Green',
                                         'Cyan', 'Blue', 'Purple', 'White'
                                     ]
                                 })
CP.Cell_Response_Maps('Run016',
                      Hue_11_SC_Dic,
                      subshape=(6, 11),
                      figsize=(25, 20))
All_Black = CP.Black_Cell_Identifier(['Run009', 'Run014', 'Run016'])
ot.Save_Variable(r'K:\Test_Data\2P\210423_L76_2P', '_All_Black', All_Black)
#%% Plot all cells
for i in range(CP.cell_num):
    CP.Single_Cell_Plotter(CP.all_cell_names[i], show_time=0)
all_black_cell_name = list(All_Black.keys())
CP.Part_Cell_Plotter(all_black_cell_name)
CP.Part_Cell_Plotter(all_black_cell_name, mode='fill')
black_cell_num = len(All_Black)
#%% let's see different run seperately.
OD_neg_cell = []
G16_neg_cell = []
Hue_neg_cell = []
for i in range(black_cell_num):
    c_cell_name = all_black_cell_name[i]
    if 'Run009' in All_Black[c_cell_name]:
        OD_neg_cell.append(c_cell_name)