def __init__(self, stim_set_A, stim_set_B, map_name, save_folder, cell_find_type, spike_train, cell_group): self.map_folder = save_folder + r'\\Stim_Graphs_' + cell_find_type pp.mkdir(self.map_folder) self.stim_set_A = stim_set_A self.stim_set_B = stim_set_B self.map_name = map_name self.spike_train = pp.read_variable(spike_train) self.cell_group = pp.read_variable(cell_group)
def Sub_Map(self): aligned_tif_name = pp.read_variable(save_folder + r'\\aligned_tif_name.pkl') average_frame_A = np.zeros(shape=(512, 512), dtype=np.float64) average_frame_B = np.zeros(shape=(512, 512), dtype=np.float64) for i in range(0, len(self.frame_set_A)): #得到A的平均图 temp_frame = np.float64( cv2.imread(aligned_tif_name[self.frame_set_A[i]], -1)) average_frame_A = average_frame_A + temp_frame / len( self.frame_set_A) for i in range(0, len(self.frame_set_B)): #得到B的平均图 temp_frame = np.float64( cv2.imread(aligned_tif_name[self.frame_set_B[i]], -1)) average_frame_B = average_frame_B + temp_frame / len( self.frame_set_B) #接下来做减图和clip sub_graph = average_frame_A - average_frame_B clip_min = sub_graph.mean() - 3 * sub_graph.std() clip_max = sub_graph.mean() + 3 * sub_graph.std() sub_graph_clipped = np.clip(sub_graph, clip_min, clip_max) #对减图进行最大和最小值的clip norm_sub_graph = (sub_graph_clipped - sub_graph_clipped.min()) / ( sub_graph_clipped.max() - sub_graph_clipped.min()) #保存原始图片 pp.save_variable( norm_sub_graph, self.map_folder + r'\\' + self.map_name + '_Graph.pkl') #以上得到了clip且归一化了的map real_sub_map = np.uint8(np.clip(norm_sub_graph * 255, 0, 255)) pp.save_graph(map_name, real_sub_map, self.map_folder, '.png', 8, 1) #接下来画滤波后的 sub_map_filtered = scimg.filters.gaussian_filter(real_sub_map, 1) pp.save_graph(map_name + '_Filtered', sub_map_filtered, self.map_folder, '.png', 8, 1)
def stim_graph_cycle(self): #Read in all self.all_stim_graph = {} all_graph_name = pp.file_name(self.graph_folder, '.pkl') for i in range(len(all_graph_name)): temp_graph = pp.read_variable(all_graph_name[i])[:, 0] temp_name = all_graph_name[i].split('\\')[-1][:-10] self.all_stim_graph[temp_name] = temp_graph
def __init__(self, save_folder, spike_train, have_blank, cell_type): self.save_folder = save_folder self.Frame_Stim_Check = pp.read_variable(save_folder + r'\Frame_Stim_Check.pkl') self.have_blank = have_blank self.cell_type = cell_type self.spike_train = spike_train
def ID_Configuration(self): Frame_Stim_Check = pp.read_variable(save_folder + r'\\Frame_Stim_Check.pkl') self.frame_set_A = [] self.frame_set_B = [] for i in range(len(self.stim_set_A)): self.frame_set_A.extend(Frame_Stim_Check[self.stim_set_A[i]]) for i in range(len(self.stim_set_B)): self.frame_set_B.extend(Frame_Stim_Check[self.stim_set_B[i]])
def __init__(self, save_folder, graph_folder, spike_train, shuffle_flag): self.save_folder = save_folder self.graph_folder = graph_folder self.spike_train = spike_train #The spike train of spontaneous dF/F self.N_Cell, self.N_frame = np.shape(self.spike_train) self.shuffle_flag = shuffle_flag if shuffle_flag == True: self.Shuffled_Dictionary = pp.read_variable( save_folder + r'\\Stim_Shuffle_Correlation.pkl')
def __init__(self, save_folder, spike_train_name, shuffle_times, shuffle_type): self.save_folder = save_folder self.spike_train = pp.read_variable(save_folder + r'\\' + spike_train_name) self.N_cell, self.N_frame = np.shape(self.spike_train) self.shuffled_folder = save_folder + r'\\Shuffled_trains' pp.mkdir(self.shuffled_folder) self.shuffle_times = shuffle_times #How many times you want to shuffle self.shuffle_type = shuffle_type ####in_frame or cross_frames
def read_in(self): #Read in all stim graph first all_graph_name = pp.file_name(self.graph_folder, '.pkl') self.stim_graph_set = {} for i in range(len(all_graph_name)): name = all_graph_name[i].split('\\')[-1][0:-10] self.stim_graph_set[name] = pp.read_variable(all_graph_name[i]) ##Then read in all shuffled trains as a vector. shuffled_folder = save_folder + r'\Shuffled_trains' all_shuffled_train_name = pp.file_name(shuffled_folder, '.pkl') self.shuffle_times = len(all_shuffled_train_name) self.Cell_Num, self.Frame_Num = np.shape( pp.read_variable( all_shuffled_train_name[0])) #Use train0 to read shapes self.shuffle_matrix = np.zeros(shape=(self.shuffle_times, self.Cell_Num, self.Frame_Num), dtype=np.float64) for i in range(self.shuffle_times): self.shuffle_matrix[i, :, :] = pp.read_variable( all_shuffled_train_name[i])
def Stim_Dict_Generation(self): Frame_Stim_Check = pp.read_variable(self.save_folder + r'\\Frame_Stim_Check.pkl') all_keys = list(Frame_Stim_Check.keys()) self.Frame_Dics = {} #在这里新建一个字典,key是frame,value是归属的刺激id,stimoff就写成-1 for i in range(len(all_keys)): temp_list = Frame_Stim_Check[all_keys[i]] for j in range(len(temp_list)): if all_keys[i] == 'Stim_Off': #把off定义为-1 self.Frame_Dics[temp_list[j]] = -1 else: #其他的则定义为正常id self.Frame_Dics[temp_list[j]] = int(all_keys[i]) pp.save_variable(self.Frame_Dics, self.save_folder + r'\Frame_ID_Charts.pkl')
def __init__(self,save_folder,spike_train_name,cell_group_name): self.save_folder = save_folder spike_train = pp.read_variable(self.save_folder+r'\\'+spike_train_name) self.cell_group = pp.read_variable(self.save_folder+r'\\'+cell_group_name) self.vector = spike_train.T#转置矩阵,并记作vector
plt.fill_between(range(self.N_frame), up_lim, 0, color='#dfdfdf') plt.fill_between(range(self.N_frame), down_lim, 0, color='#dfdfdf') plt.plot(shuffle_avr, color='#8d8d8d') current_plot = self.correlation_calculator( self.all_stim_graph[plot_graphs[i]]) plt.plot(current_plot, color=all_colors[i], label=plot_graphs[i]) plt.legend() plt.savefig(correlation_folder + r'\\Target_Plots.png') plt.close('all') if __name__ == '__main__': save_folder = r'E:\ZR\Data_Temp\190412_L74_LM\1-001\results' spike_train = pp.read_variable(save_folder + r'\spike_train_Morphology.pkl') graph_folder = r'E:\ZR\Data_Temp\190412_L74_LM\All-Stim-Maps\Run02' shuffle_flag = True # if true, annotate shuffle lines in graph. CF = Correlation_Functions(save_folder, graph_folder, spike_train, shuffle_flag) CF.stim_graph_cycle() CF.plot_correlation_single() CF.plot_all() all_stim_graph = CF.all_stim_graph #%%Manual Plot, in need of comparation CF.plot_target(graph_1='Orien0-0', graph_2='Orien90-0', graph_3='Orien135-0', graph_4='Orien45-0')
self.pool.join() print('Calculation Done, Plotting...\n') self.plot_initialize() self.pool_set() self.pool.map(self.plot_spike_train, range(0, self.cell_Num)) self.pool.close() self.pool.join() #%% if __name__ == '__main__': save_folder = r'E:\ZR\Data_Temp\191026_L69_LM\1-010\results' start_time = time.time() print('Spike_Train Calculating...\n') cell_group = pp.read_variable(save_folder + r'\\Cell_Groups_Morphology.pkl') #画细胞的来源 aligned_frame_name = pp.tif_name(save_folder + r'\\Aligned_Frames') graph_after_align = pp.read_variable( save_folder + r'\Run_Average_graph.pkl') #注意这个是df/f的基准帧 cell_find_type = 'Morphology' st = Spike_Train(cell_group, aligned_frame_name, graph_after_align, save_folder, 5, cell_find_type) st.main() spike_train = st.spike_train pp.save_variable(spike_train, save_folder + r'\spike_train_' + cell_find_type + '.pkl') print('Calculation Done!\n') finish_time = time.time() print('Task Time Cost:' + str(finish_time - start_time) + 's')
#Then normalize all data,keep 0 as 0 unchanged. self.sub_series = self.sub_series / abs(self.sub_series).max() #Recover cell data to frame sub_video_context = np.zeros(shape=(self.frame_Num, 512, 512), dtype=np.uint8) for i in range(self.frame_Num): current_frame = np.ones(shape=(512, 512), dtype=np.uint8) * 127 for j in range(len(self.cell_group)): x_list, y_list = pp.cell_location(self.cell_group[j]) current_frame[y_list, x_list] = np.uint8( (self.sub_series[i, j] + 1) * 127) sub_video_context[i, :, :] = current_frame self.video_plot('Sub_Video_Cell.avi', sub_video_context) if __name__ == '__main__': save_folder = r'E:\ZR\Data_Temp\190412_L74_LM\1-002\results' cell_group = pp.read_variable(save_folder + r'\\Cell_Groups_Morphology.pkl') show_gain = 32 start_frame = 0 stop_frame = 2220 CV = Cell_Video(cell_group, save_folder, show_gain, start_frame, stop_frame) CV.frame_sets_generation() CV.F_calculation() CV.F_video() #CV.sub_video() b = CV.F_series
self.pool.join() print('Calculation Done, Plotting...\n') self.plot_initialize() self.pool_set() self.pool.map(self.plot_spike_train, range(0, self.cell_Num)) self.pool.close() self.pool.join() #%% if __name__ == '__main__': save_folder = r'E:\ZR\Data_Temp\190412_L74_LM\1-002\results' start_time = time.time() print('Spike_Train Calculating...\n') cell_group = pp.read_variable(save_folder + r'\\Cell_Group.pkl') aligned_frame_name = pp.tif_name(save_folder + r'\\Aligned_Frames') graph_after_align = pp.read_variable(save_folder + r'\Run_Average_graph.pkl') cell_find_type = 'Morphology' st = Spike_Train(cell_group, aligned_frame_name, graph_after_align, save_folder, 5, cell_find_type) st.main() spike_train = st.spike_train pp.save_variable(spike_train, save_folder + r'\spike_train_' + cell_find_type + '.pkl') print('Calculation Done!\n') finish_time = time.time() print('Task Time Cost:' + str(finish_time - start_time) + 's')
f.write('Correlations have std = '+str(temp_std)+', mean = '+str(temp_mean)+'\n') f.write('Max correlation = '+str(self.r_value.max())+', Minimum = '+str(self.r_value.min())+'\n') f.write('Correlation components above 2std are as below:\n') for i in range(len(similar_ids)): f.write('Component '+str(similar_ids[i]+1)+', Pearson r = '+str(self.r_value[similar_ids[i]])+'\n') f.close() if __name__ == '__main__': #写成批处理形式 save_folder = r'E:\ZR\Data_Temp\190412_L74_LM\1-002\results' graph_folder = r'E:\ZR\Data_Temp\190412_L74_LM\All-Stim-Maps\Run02' clustered_data = pp.read_variable(save_folder+r'\PCAed_Data.pkl') all_graph_name = pp.file_name(graph_folder,'.pkl') cluster_type = 'PCA_cross_frame_shuffle' for i in range(len(all_graph_name)): correlation_name = cluster_type+'_vs_'+all_graph_name[i].split('\\')[-1][:-4] target_graph = pp.read_variable(all_graph_name[i]) CD = Correlation_Description(save_folder,target_graph,clustered_data,correlation_name) CD.correlation_calculation() CD.correlation_discription() # ============================================================================= # target_graph = pp.read_variable(r'E:\ZR\Data_Temp\190412_L74_LM\All-Stim-Maps\Run02\Orien90-0_Cells.pkl') # clustered_data = pp.read_variable(save_folder+r'\ICAed_Data.pkl') # correlation_name = 'PCA_vs_Orien90-0' # CD = Correlation_Description(save_folder,target_graph,clustered_data,correlation_name)
if 'pool' in self_dict: del self_dict['pool'] return self_dict def __setstate__(self, state): self.__dict__.update(state) def main(self): self.condition_spikes() self.Axis_Define() self.pool_set() self.pool.map(self.Graph_plot, range(np.shape(self.cell_condition_data)[0])) self.pool.close() self.pool.join() if __name__ == '__main__': start_time = time.time() save_folder = r'E:\ZR\Data_Temp\190412_L74_LM\1-004\results' have_blank = True cell_type = 'Morphology' spike_train = pp.read_variable(save_folder + r'\spike_train_' + cell_type + '.pkl') rm = Radar_Map(save_folder, spike_train, have_blank, cell_type) rm.main() pp.save_variable(rm.cell_condition_data, save_folder + r'\Cell_Tunings_' + cell_type + r'.pkl') finish_time = time.time() print('Plot Done, time cost :' + str(finish_time - start_time) + 's')
@author: ZR """ #%% Test filter import General_Functions.my_tools as pp import matplotlib.pyplot as plt import numpy as np from scipy import signal from scipy.stats import pearsonr spike_train = pp.read_variable(r'E:\ZR\Data_Temp\190412_L74_LM\1-001\results\spike_train.pkl') plot_sample = spike_train[53,:] #plt.plot(plot_sample) freq = np.fft.fft(plot_sample) sos = signal.butter(10,0.0000001,'highpass',fs = 1.301,output = 'sos') #建立滤波器的二项式表达,顺序是阶数,[低高频],类型,采样频率,输出sos filtered = signal.sosfilt(sos, plot_sample) freq_filtered = abs(np.fft.fft(filtered)) plt.plot(filtered[0:200]) pearsonr(plot_sample,filtered)[0] #%% Test band cut freq_cut = freq freq_cut[0:100]=0 freq_cut[1481:1581] = 0 test = np.fft.ifft(freq_cut)
def add_element(self, current_path): temp_graph = list(pp.read_variable(current_path)[:, 0]) #把数据一维化,一定要注意格式 temp_name = current_path.split('\\')[-1].split('.')[0][:-6] self.all_variable[temp_name] = temp_graph
# -*- coding: utf-8 -*- """ Created on Mon Jul 8 15:17:32 2019 @author: ZR """ import General_Functions.my_tools as pp import numpy as np import matplotlib.pyplot as plt save_folder = r'E:\ZR\Data_Temp\190412_L74_LM\1-002\results' spike_data = pp.read_variable(save_folder+r'\\spike_train_Morphology.pkl') spike_data_filtered = pp.read_variable(save_folder+r'\\spike_train_Morphology_filtered.pkl') #%% example_cell = spike_data[135,:] example_cell_filtered = spike_data_filtered[135,:] #%% from scipy import signal critical_freq = 0.005 order = 10#滤波阶数 capture_rate = 1.301 sos = signal.butter(order,critical_freq,'lowpass',fs = capture_rate,output = 'sos') filtered = signal.sosfilt(sos, example_cell) plt.plot(filtered) #%% def Test(a,**kwargs): test1 = a