Ejemplo n.º 1
0
 def MiniBach_SparcePCA(self,N_component):
     MiniPCA_calculator = skdecomp.MiniBatchSparsePCA(N_component,batch_size=15,normalize_components=True)
     self.MiniPCs = MiniPCA_calculator.fit(self.vector_centered)
     all_MiniPCs = self.MiniPCs.components_
     pp.save_variable(all_MiniPCs,save_folder+r'\\MiniPCed_Data.pkl')
     print('MiniBach Sparce PCA done, generating graphs')
     self.cell_graph_plot('MINIPCA',all_MiniPCs)
Ejemplo n.º 2
0
 def ICA(self,N_component):
     ICA_calculator = skdecomp.FastICA(N_component,max_iter=1500,tol=0.03,whiten=True)
     self.ICAed_data = ICA_calculator.fit(self.vector_centered)
     all_ICs = self.ICAed_data.components_
     pp.save_variable(all_ICs,save_folder+r'\\ICAed_Data.pkl')
     print('ICA calculation done, generating graphs')
     self.cell_graph_plot('ICA',all_ICs)
Ejemplo n.º 3
0
 def NMF(self,N_component):#Non-Negative Matrix Factorization
     NMF_calculator = skdecomp.NMF(N_component,max_iter=1500,tol = 0.0001)
     self.NMFed_data = NMF_calculator.fit(self.vector)
     all_NMFs = self.NMFed_data.components_
     pp.save_variable(all_NMFs,save_folder+r'\\NMFed_Data.pkl')
     print('NMF calculation done, generating graphs')
     self.cell_graph_plot('NMF',all_NMFs)
Ejemplo n.º 4
0
 def FactorAnalysis(self,N_component):
     Factor_Analysis_calculator = skdecomp.FactorAnalysis(N_component)
     self.Analysized_Factors = Factor_Analysis_calculator.fit(self.vector_centered)
     all_FAs = self.Analysized_Factors.components_
     pp.save_variable(all_FAs,save_folder+r'\\Factor_Analysis_Data.pkl')
     print('Factor Analysis done, generating graphs')
     self.cell_graph_plot('Analyzed Factors',all_FAs)
Ejemplo n.º 5
0
 def MiniBach_DictionaryLearning(self,N_component):
     MiniDL_calculator = skdecomp.MiniBatchDictionaryLearning(N_component,batch_size = 25)
     self.MiniDLs = MiniDL_calculator.fit(self.vector_centered)
     all_MiniDLs = self.MiniDLs.components_
     pp.save_variable(all_MiniDLs,save_folder+r'\\Dictionary_Learning_Data.pkl')
     print('MiniBach Dictionary Learning Done, generating graphs')
     self.cell_graph_plot('Dictionary_Learning',all_MiniDLs)
Ejemplo n.º 6
0
 def MiniBach_KMeans(self,N_component):
     MiniKM_calculator = skcluster.MiniBatchKMeans(N_component,batch_size = 50)
     self.MiniKM = MiniKM_calculator.fit(self.vector_centered)
     all_MiniKMs = self.MiniKM.cluster_centers_
     pp.save_variable(all_MiniKMs,save_folder+r'\\Mini_KMeans_Data.pkl')
     print('Mini bach K-means done, generating graphs')
     self.cell_graph_plot('MiniBach_KMeans',all_MiniKMs)
    def Cell_Graph(self):

        # =============================================================================
        #       定义在一开始进行
        #         self.spike_train = pp.read_variable(save_folder+r'\\spike_train.pkl')
        #         self.cell_group = pp.read_variable(save_folder+r'\\Cell_Group.pkl')
        # =============================================================================
        cell_tuning = np.zeros(shape=(np.shape(self.spike_train)[0], 1),
                               dtype=np.float64)
        for i in range(np.shape(self.spike_train)[0]):  #全部细胞循环
            temp_cell_A = 0
            for j in range(len(self.frame_set_A)):  #叠加平均刺激setA下的细胞反应
                temp_cell_A = temp_cell_A + self.spike_train[
                    i, self.frame_set_A[j]] / len(self.frame_set_A)
            temp_cell_B = 0
            for j in range(len(self.frame_set_B)):
                temp_cell_B = temp_cell_B + self.spike_train[
                    i, self.frame_set_B[j]] / len(self.frame_set_B)
            cell_tuning[i] = temp_cell_A - temp_cell_B
        pp.save_variable(cell_tuning, self.map_folder + r'\\' + self.map_name +
                         '_Cells.pkl')  #把这个刺激的cell data存下来
        #至此得到的cell tuning是有正负的,我们按照绝对值最大把它放到-1~1的范围里
        norm_cell_tuning = cell_tuning / abs(cell_tuning).max()
        clip_min_cell = norm_cell_tuning.mean() - 3 * norm_cell_tuning.std()
        clip_max_cell = norm_cell_tuning.mean() + 3 * norm_cell_tuning.std()
        cell_clipped = np.clip(norm_cell_tuning, clip_min_cell,
                               clip_max_cell)  #对减图进行最大和最小值的clip
        #接下来plot出来细胞减图
        sub_graph_cell = np.ones(shape=(512, 512), dtype=np.float64) * 127
        for i in range(len(cell_clipped)):
            x_list, y_list = pp.cell_location(self.cell_group[i])
            sub_graph_cell[y_list, x_list] = (cell_clipped[i] + 1) * 127
        pp.save_graph(self.map_name + '_Cell', sub_graph_cell, self.map_folder,
                      '.png', 8, 1)
    def Sub_Map(self):

        aligned_tif_name = pp.read_variable(save_folder +
                                            r'\\aligned_tif_name.pkl')
        average_frame_A = np.zeros(shape=(512, 512), dtype=np.float64)
        average_frame_B = np.zeros(shape=(512, 512), dtype=np.float64)
        for i in range(0, len(self.frame_set_A)):  #得到A的平均图
            temp_frame = np.float64(
                cv2.imread(aligned_tif_name[self.frame_set_A[i]], -1))
            average_frame_A = average_frame_A + temp_frame / len(
                self.frame_set_A)
        for i in range(0, len(self.frame_set_B)):  #得到B的平均图
            temp_frame = np.float64(
                cv2.imread(aligned_tif_name[self.frame_set_B[i]], -1))
            average_frame_B = average_frame_B + temp_frame / len(
                self.frame_set_B)
        #接下来做减图和clip
        sub_graph = average_frame_A - average_frame_B
        clip_min = sub_graph.mean() - 3 * sub_graph.std()
        clip_max = sub_graph.mean() + 3 * sub_graph.std()
        sub_graph_clipped = np.clip(sub_graph, clip_min,
                                    clip_max)  #对减图进行最大和最小值的clip
        norm_sub_graph = (sub_graph_clipped - sub_graph_clipped.min()) / (
            sub_graph_clipped.max() - sub_graph_clipped.min())
        #保存原始图片
        pp.save_variable(
            norm_sub_graph,
            self.map_folder + r'\\' + self.map_name + '_Graph.pkl')
        #以上得到了clip且归一化了的map
        real_sub_map = np.uint8(np.clip(norm_sub_graph * 255, 0, 255))
        pp.save_graph(map_name, real_sub_map, self.map_folder, '.png', 8, 1)
        #接下来画滤波后的
        sub_map_filtered = scimg.filters.gaussian_filter(real_sub_map, 1)
        pp.save_graph(map_name + '_Filtered', sub_map_filtered,
                      self.map_folder, '.png', 8, 1)
Ejemplo n.º 9
0
    def shuffle_in_frame(
        self, shuffle_count
    ):  #This part will shuffle cell in every frame, with frame series not change

        shuffled_data_in_frame = np.zeros(shape=(self.N_cell, self.N_frame),
                                          dtype=np.float64)
        for i in range(self.N_frame):
            shuffled_data_in_frame[:, i] = random.sample(
                list(self.spike_train[:, i]), self.N_cell)
        pp.save_variable(
            shuffled_data_in_frame, self.shuffled_folder +
            r'\\spike_train_shuffle_in_frame_' + str(shuffle_count) + '.pkl')
Ejemplo n.º 10
0
    def shuffle_cross_frames(
        self, shuffle_count
    ):  #This part will shuffle every spike train across frame, with cell location not change

        shuffled_data_cross_frames = np.zeros(shape=(self.N_cell,
                                                     self.N_frame),
                                              dtype=np.float64)
        for i in range(self.N_cell):
            shuffled_data_cross_frames[i, :] = random.sample(
                list(self.spike_train[i, :]), self.N_frame)
        pp.save_variable(
            shuffled_data_cross_frames,
            self.shuffled_folder + r'\\spike_train_shuffle_cross_frames_' +
            str(shuffle_count) + '.pkl')
Ejemplo n.º 11
0
    def Stim_Dict_Generation(self):

        Frame_Stim_Check = pp.read_variable(self.save_folder +
                                            r'\\Frame_Stim_Check.pkl')
        all_keys = list(Frame_Stim_Check.keys())
        self.Frame_Dics = {}  #在这里新建一个字典,key是frame,value是归属的刺激id,stimoff就写成-1
        for i in range(len(all_keys)):
            temp_list = Frame_Stim_Check[all_keys[i]]
            for j in range(len(temp_list)):
                if all_keys[i] == 'Stim_Off':  #把off定义为-1
                    self.Frame_Dics[temp_list[j]] = -1
                else:  #其他的则定义为正常id
                    self.Frame_Dics[temp_list[j]] = int(all_keys[i])
        pp.save_variable(self.Frame_Dics,
                         self.save_folder + r'\Frame_ID_Charts.pkl')
Ejemplo n.º 12
0
 def PCA(self,N_component,whiten_flag):#注意,N_component = None即保留了全部的PCA成分,数目就是维度的数目
      
     PCA_calculator = skdecomp.PCA(n_components = N_component,whiten = whiten_flag)
     self.PCAed_data = PCA_calculator.fit(self.vector_centered)
     all_PCs = self.PCAed_data.components_#这个输出归一化过,所有的PC模长都是1
     PC_variance_ratio = self.PCAed_data.explained_variance_ratio_
     pp.save_variable(all_PCs,save_folder+r'\\PCAed_Data.pkl')
     #注意调用的时候.components_为主成分合集,第一个维度是components数,第二个是feature,所以每一横行是一个图。
     ####之后把PCA成分可视化保存。
     print('PCA calculation done, generating graphs')
     self.cell_graph_plot('PCA',all_PCs)
     
     f = open(self.save_folder+r'\PCA\Frame_Count.txt','w')
     for i in range(len(PC_variance_ratio)):
         f.write('PC:'+str(i+1)+' expianed '+str(PC_variance_ratio[i])+'of all variance.\n')
     f.close()
 def cell_plot(self):
     thres_graph = np.zeros(shape=(512, 512), dtype=np.uint8)
     for i in range(len(self.cell_group)):
         x_list, y_list = pp.cell_location(self.cell_group[i])
         thres_graph[y_list, x_list] = 255
     RGB_graph = cv2.cvtColor(thres_graph, cv2.COLOR_GRAY2BGR)  #转灰度为RGB
     for i in range(len(self.save_folder)):
         cv2.imwrite(self.save_folder[i] + r'\\' + self.save_name + '.tif',
                     RGB_graph)
         cv2.imwrite(
             self.save_folder[i] + r'\\' + self.save_name + '_resized.tif',
             cv2.resize(RGB_graph, (1024, 1024)))
         pp.show_cell(self.save_folder[i] + r'\\' + self.save_name +
                      '_resized.tif', self.cell_group)  # 在细胞图上标上细胞的编号。
         pp.save_variable(
             self.cell_group,
             self.save_folder[i] + r'\\' + self.save_name + '_Groups.pkl')
Ejemplo n.º 14
0
    def main_calculation(self):

        self.Shuffle_Dictionary = {}
        for i in range(len(self.stim_graph_set)):  #Cycle all stim graphs
            current_name = list(self.stim_graph_set.keys())[i]
            current_stim_graph = list(self.stim_graph_set.values())[i]
            current_correlation = np.zeros(
                shape=[3, self.Frame_Num],
                dtype=np.float64)  #3D series, mean+-2.5std
            for j in range(self.Frame_Num):  #Then cycle all frames
                mean, std = self.single_frame_calculation(
                    current_stim_graph, j)
                current_correlation[0, j] = mean - 2.5 * std
                current_correlation[1, j] = mean
                current_correlation[2, j] = mean + 2.5 * std
            self.Shuffle_Dictionary[current_name] = current_correlation
        pp.save_variable(self.Shuffle_Dictionary,
                         save_folder + r'\\Stim_Shuffle_Correlation.pkl')
 def after_align(self):  #这个用来在对齐后平均。
     print('Generationg averaged graphs...')
     self.aligned_tif_name = []
     global_aligned_tif_name = []
     for i in range(len(
             self.aligned_frame_folder)):  #把每个run里的tif_name存到各自文件夹里
         self.aligned_tif_name.append(
             pp.tif_name(self.aligned_frame_folder[i]))
         global_aligned_tif_name.extend(self.aligned_tif_name[i])
         temp_aligned_tif_name = self.save_folder[
             i] + r'\\aligned_tif_name.pkl'
         pp.save_variable(self.aligned_tif_name[i], temp_aligned_tif_name)
         run_average = self.frame_average(self.aligned_tif_name[i])
         pp.save_variable(run_average,
                          self.save_folder[i] + r'\\Run_Average_graph.pkl')
         self.save_graph('After_Align_Run' + self.run_lists[i], run_average,
                         self.save_folder[i])
     #接下来保存全局平均,也存在每个文件里
     global_average_graph = self.frame_average(global_aligned_tif_name)
     for i in range(len(self.save_folder)):
         self.save_graph('After_Align_Global', global_average_graph,
                         self.save_folder[i])
         pp.save_variable(
             global_average_graph,
             self.save_folder[i] + r'\\Global_Average_graph.pkl')
Ejemplo n.º 16
0
 def correlation_calculation(self):#这里把所有的相关数值计算出来,然后plot分布
     
     self.r_value = np.zeros(np.shape(clustered_data)[0],dtype = np.float64)
     for i in range(len(self.r_value)):
         self.r_value[i] = self.calculation_unit(i)
     pp.save_variable(self.r_value,self.correlation_folder+r'\\R_Values_'+self.correlation_name+'.pkl')
Ejemplo n.º 17
0
        if 'pool' in self_dict:
            del self_dict['pool']
        return self_dict

    def __setstate__(self, state):
        self.__dict__.update(state)

    def main(self):
        self.condition_spikes()
        self.Axis_Define()
        self.pool_set()
        self.pool.map(self.Graph_plot,
                      range(np.shape(self.cell_condition_data)[0]))
        self.pool.close()
        self.pool.join()


if __name__ == '__main__':

    start_time = time.time()
    save_folder = r'E:\ZR\Data_Temp\190412_L74_LM\1-004\results'
    have_blank = True
    cell_type = 'Morphology'
    spike_train = pp.read_variable(save_folder + r'\spike_train_' + cell_type +
                                   '.pkl')
    rm = Radar_Map(save_folder, spike_train, have_blank, cell_type)
    rm.main()
    pp.save_variable(rm.cell_condition_data,
                     save_folder + r'\Cell_Tunings_' + cell_type + r'.pkl')
    finish_time = time.time()
    print('Plot Done, time cost :' + str(finish_time - start_time) + 's')
Ejemplo n.º 18
0
低通滤波器可能会导致相位出现一个延迟,延迟的时间和低通的频率有关。
因此,在处理时我们选择高通滤波。
"""

import General_Functions.my_tools as pp
from scipy import signal
import numpy as np

#%% 这里是参数设定,决定了滤波的个性化设计
critical_freq = 0.01  #截止频率,即这个频率以上的信号可以通过
save_folder = r'E:\ZR\Data_Temp\190412_L74_LM\1-004\results'

#%%这里是默认参数,一般不需要修改,不过仍要注意
order = 10  #滤波阶数
capture_rate = 1.301  #这个是采样频率,对GA就是1.301,RG要看bin
spike_train = pp.read_variable(save_folder + r'\\spike_train_Morphology.pkl')

#%%滤波核心部分
spike_train_filtered = np.zeros(shape=np.shape(spike_train), dtype=np.float64)
for i in range(np.shape(spike_train)[0]):
    sos = signal.butter(order,
                        critical_freq,
                        'highpass',
                        fs=capture_rate,
                        output='sos')
    filtered_temp = signal.sosfilt(sos, spike_train[i, :])
    spike_train_filtered[i, :] = filtered_temp

pp.save_variable(spike_train_filtered,
                 save_folder + r'\\spike_train_Morphology_filtered.pkl')
        
        
#%%        
if __name__ == '__main__':
    
    #Attention! Only 1 txt file in stim folder is acceptable.
    start_time = time.time()
    save_folder = r'E:\ZR\Data_Temp\191026_L69_LM\1-010\results'
    #save_folder = r'E:\ZR\Data_Temp\190412_L74_LM\190412_L74_stimuli\Run02_2P_G8\test'
    stim_folder = r'E:\ZR\Data_Temp\191026_L69_LM\191026_L69_Virus_Check\Run10_2P_G8'
    pre_stim_frame = 0#这里指的是方波开始刺激没放,需要删除的帧数。
    after_stim_drift = 0#这里指的是锯齿波消失之后,再计算几帧属于其中。
    sf= Stim_Frame_Align(stim_folder,pre_stim_frame,after_stim_drift)
    sf.stim_file_name()
    sf.spike2_series_extract()
    smr_data_2p = sf.twoP
    smr_data_Stim = sf.Stimuli
    stim_id = sf.stim_id
    sf.Stim_Time_Align()
    sf.Frame_Time_Align()
    Stim_ID_Time = sf.Stim_ID_Time
    Frame_time = sf.frame_time
    print('Stim Frame Aligning...\n')
    sf.run_Stim_Frame_find()
    sf.Stim_Off_Get()
    sf.Frame_adjust()
    pp.save_variable(sf.Frame_Stim_Check,save_folder+r'\Frame_Stim_Check.pkl')
    
    finish_time = time.time()
    print('Task Time Cost:'+str(finish_time-start_time)+'s')
    
Ejemplo n.º 20
0
        self.pool.join()
        print('Calculation Done, Plotting...\n')
        self.plot_initialize()
        self.pool_set()
        self.pool.map(self.plot_spike_train, range(0, self.cell_Num))
        self.pool.close()
        self.pool.join()

    #%%


if __name__ == '__main__':

    save_folder = r'E:\ZR\Data_Temp\191026_L69_LM\1-010\results'
    start_time = time.time()
    print('Spike_Train Calculating...\n')
    cell_group = pp.read_variable(save_folder +
                                  r'\\Cell_Groups_Morphology.pkl')  #画细胞的来源
    aligned_frame_name = pp.tif_name(save_folder + r'\\Aligned_Frames')
    graph_after_align = pp.read_variable(
        save_folder + r'\Run_Average_graph.pkl')  #注意这个是df/f的基准帧
    cell_find_type = 'Morphology'
    st = Spike_Train(cell_group, aligned_frame_name, graph_after_align,
                     save_folder, 5, cell_find_type)
    st.main()
    spike_train = st.spike_train
    pp.save_variable(spike_train,
                     save_folder + r'\spike_train_' + cell_find_type + '.pkl')
    print('Calculation Done!\n')
    finish_time = time.time()
    print('Task Time Cost:' + str(finish_time - start_time) + 's')
Ejemplo n.º 21
0
        base_graph_path = self.save_folder + '\\' + self.save_name
        cv2.imwrite(base_graph_path + '.tif', RGB_graph)
        cv2.imwrite(base_graph_path + '_resized.tif',
                    cv2.resize(RGB_graph, (1024, 1024)))  #把细胞图放大一倍并保存起来
        # pp.show_cell(base_graph_path+'.tif',self.cell_group)# 在细胞图上标上细胞的编号。
        pp.show_cell(base_graph_path + '_resized.tif',
                     self.cell_group)  # 在细胞图上标上细胞的编号。

    def main(self):  #主函数,一次完成执行工作。
        self.Gauss_generation()
        self.graph_binary()
        self.cell_wash()
        self.show_cell()


if __name__ == '__main__':
    start_time = time.time()  #任务开始时间
    show_gain = 32
    save_folder = r'D:\datatemp\190412_L74\test_data\results'
    model_frame_name = 'After_Align_Global.tif'
    save_name = 'Cell_Graph_On_Off'
    cf = Cell_Found(show_gain, save_folder, 1.5, model_frame_name,
                    save_name)  #这两个变量可以从上一步里读出来
    cf.main()
    cell_group = cf.cell_group
    pp.save_variable(cell_group, save_name + '.pkl')
    #    variable_name = 'Step2_Variable.pkl'
    #    dill.dump_session(variable_name)
    finish_time = time.time()
    print('Task Time Cost:' + str(finish_time - start_time) + 's')