def trial_averaged_tuning_curve_errorbar(sheet, folder, stimulus, parameter, start, end, xlabel="", ylabel="", color="black", percentile=False, useXlog=False, useYlog=False, ylim=[0., 100.], xlim=False, opposite=False, box=None, radius=None, addon="", data=None, data_curve=True): print inspect.stack()[0][3] print "folder: ", folder print "sheet: ", sheet data_store = PickledDataStore(load=True, parameters=ParameterSet({ 'root_directory': folder, 'store_stimuli': False }), replace=True) data_store.print_content(full_recordings=False) neurons = [] neurons = param_filter_query( data_store, sheet_name=sheet, st_name=stimulus).get_segments()[0].get_stored_spike_train_ids() print "Recorded neurons:", len(neurons) if radius: sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet, neuron_ids=neurons) positions = data_store.get_neuron_postions()[sheet] if radius: ids1 = select_ids_by_position(positions, sheet_ids, radius=radius) neurons = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1) NeuronAnnotationsToPerNeuronValues(data_store, ParameterSet({})).analyse() l4_exc_or = data_store.get_analysis_result( identifier='PerNeuronValue', value_name='LGNAfferentOrientation', sheet_name=sheet)[0] l4_exc_or_many = numpy.array(neurons)[numpy.nonzero( numpy.array([ circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi) for i in neurons ]) < .1)[0]] neurons = list(l4_exc_or_many) print "Selected neurons:", len(neurons) #, neurons if len(neurons) < 1: return TrialAveragedFiringRate( param_filter_query(data_store, sheet_name=sheet, st_name=stimulus), ParameterSet({'neurons': list(neurons)})).analyse() PlotTuningCurve( param_filter_query(data_store, st_name=stimulus, analysis_algorithm=['TrialAveragedFiringRate']), ParameterSet({ 'polar': False, 'pool': False, 'centered': False, 'percent': False, 'mean': True, 'parameter_name': parameter, 'neurons': list(neurons), 'sheet_name': sheet }), fig_param={ 'dpi': 200 }, plot_file_name=folder + "/TrialAveragedSensitivityNew_" + stimulus + "_" + parameter + "_" + str(sheet) + "_" + addon + "_mean.svg" ).plot({ # '*.y_lim':(0,30), # '*.x_lim':(-10,100), # '*.x_scale':'log', '*.x_scale_base':10, '*.fontsize': 17 }) return
def trial_averaged_LFP_rate(sheet, folder, stimulus, parameter, start, end, xlabel="", ylabel="", color="black", ylim=[0., 100.], radius=None, addon=""): print inspect.stack()[0][3] print "folder: ", folder print "sheet: ", sheet data_store = PickledDataStore(load=True, parameters=ParameterSet({ 'root_directory': folder, 'store_stimuli': False }), replace=True) data_store.print_content(full_recordings=False) neurons = [] neurons = param_filter_query( data_store, sheet_name=sheet, st_name=stimulus).get_segments()[0].get_stored_spike_train_ids() print "Recorded neurons:", len(neurons) ### cascading requirements if radius: sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet, neuron_ids=neurons) positions = data_store.get_neuron_postions()[sheet] if radius: ids1 = select_ids_by_position(positions, sheet_ids, radius=radius) neurons = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1) #### # if orientation: # NeuronAnnotationsToPerNeuronValues(data_store,ParameterSet({})).analyse() # l4_or = data_store.get_analysis_result(identifier='PerNeuronValue',value_name='LGNAfferentOrientation', sheet_name=sheet) # l4_phase = data_store.get_analysis_result(identifier='PerNeuronValue',value_name='LGNAfferentPhase', sheet_name=sheet) # # print "l4_phase", l4_phase # neurons = numpy.array([neurons[numpy.argmin([circular_dist(o,numpy.pi/2,numpy.pi) for (o,p) in zip(l4_or[0].get_value_by_id(neurons),l4_phase[0].get_value_by_id(neurons))])] ]) print "Selected neurons:", len(neurons) #, neurons if len(neurons) < 1: return SpikeCount( param_filter_query(data_store, sheet_name=sheet, st_name=stimulus), ParameterSet({ 'bin_length': 5, 'neurons': list(neurons), 'null': False }) # ParameterSet({'bin_length':bin, 'neurons':list(neurons), 'null':False}) ).analyse() # datastore.save() TrialMean( param_filter_query(data_store, name='AnalogSignalList', analysis_algorithm='SpikeCount'), ParameterSet({ 'vm': False, 'cond_exc': False, 'cond_inh': False })).analyse() dsvTM = param_filter_query(data_store, sheet_name=sheet, st_name=stimulus, analysis_algorithm='TrialMean') # dsvTM.print_content(full_recordings=False) pnvsTM = [dsvTM.get_analysis_result()] # print pnvsTM # get stimuli from PerNeuronValues st = [MozaikParametrized.idd(s.stimulus_id) for s in pnvsTM[-1]] asl_id = numpy.array([z.get_asl_by_id(neurons) for z in pnvsTM[-1]]) print asl_id.shape # Example: # (8, 133, 1029) # 8 stimuli # 133 cells # 1029 bins dic = colapse_to_dictionary([z.get_asl_by_id(neurons) for z in pnvsTM[-1]], st, parameter) for k in dic: (b, a) = dic[k] par, val = zip(*sorted(zip(b, numpy.array(a)))) dic[k] = (par, numpy.array(val)) stimuli = dic.values()[0][0] means = asl_id.mean(axis=1) # mean of print means.shape # print "means", means, "stimuli", stimuli #plot the LFP for each stimulus for s in range(0, len(means)): # for each stimulus plot the average conductance per cell over time matplotlib.rcParams.update({'font.size': 22}) fig, ax = plt.subplots() ax.plot(range(0, len(means[s])), means[s], color=color, linewidth=3) # ax.set_ylim([lfp.min(), lfp.max()]) # ax.set_ylim(ylim) ax.set_ylabel("LFP (uV)") ax.set_xlabel("Time (us)") ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) # text plt.tight_layout() plt.savefig(folder + "/TimecourseLFPrate_" + sheet + "_" + parameter + "_" + str(s) + "_" + addon + ".svg", dpi=200, transparent=True) fig.clf() plt.close() # garbage gc.collect()
def trial_averaged_raster(sheet, folder, stimulus, parameter, opposite=False, box=None, radius=None, addon=""): print inspect.stack()[0][3] print "folder: ", folder print "sheet: ", sheet data_store = PickledDataStore(load=True, parameters=ParameterSet({ 'root_directory': folder, 'store_stimuli': False }), replace=True) data_store.print_content(full_recordings=False) spike_ids = param_filter_query( data_store, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids() if spike_ids == None: print "No spikes recorded.\n" return print "Recorded neurons:", len(spike_ids) if sheet == 'V1_Exc_L4' or sheet == 'V1_Inh_L4': NeuronAnnotationsToPerNeuronValues(data_store, ParameterSet({})).analyse() l4_exc_or = data_store.get_analysis_result( identifier='PerNeuronValue', value_name='LGNAfferentOrientation', sheet_name=sheet)[0] if opposite: addon = addon + "_opposite" l4_exc_or_many = numpy.array(spike_ids)[numpy.nonzero( numpy.array([ circular_dist(l4_exc_or.get_value_by_id(i), numpy.pi / 2, numpy.pi) for i in spike_ids ]) < .1)[0]] else: addon = addon + "_same" l4_exc_or_many = numpy.array(spike_ids)[numpy.nonzero( numpy.array([ circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi) for i in spike_ids ]) < .1)[0]] spike_ids = list(l4_exc_or_many) if radius or box: sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet, neuron_ids=spike_ids) positions = data_store.get_neuron_postions()[sheet] if box: ids1 = select_ids_by_position(positions, sheet_ids, box=box) if radius: ids1 = select_ids_by_position(positions, sheet_ids, radius=radius) spike_ids = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1) print "Selected neurons:", len(spike_ids) if len(spike_ids) < 1: return dsv = param_filter_query(data_store, sheet_name=sheet, st_name=stimulus) dist = box if not radius else radius # Raster + Histogram RasterPlot(dsv, ParameterSet({ 'sheet_name': sheet, 'neurons': list(spike_ids), 'trial_averaged_histogram': True, 'spontaneous': True }), fig_param={ 'dpi': 100, 'figsize': (100, 50) }, plot_file_name=folder + "/HistRaster_" + parameter + "_" + str(sheet) + "_radius" + str(dist) + "_" + addon + ".svg").plot( {'SpikeRasterPlot.group_trials': True})
def trial_averaged_Vm(sheet, folder, stimulus, parameter, opposite=False, box=None, radius=None, addon=""): print inspect.stack()[0][3] print "folder: ", folder print "sheet: ", sheet data_store = PickledDataStore(load=True, parameters=ParameterSet({ 'root_directory': folder, 'store_stimuli': False }), replace=True) data_store.print_content(full_recordings=False) analog_ids = param_filter_query( data_store, sheet_name=sheet).get_segments()[0].get_stored_vm_ids() if analog_ids == None: print "No Vm recorded.\n" return print "Recorded neurons:", len(analog_ids) if sheet == 'V1_Exc_L4' or sheet == 'V1_Inh_L4': NeuronAnnotationsToPerNeuronValues(data_store, ParameterSet({})).analyse() l4_exc_or = data_store.get_analysis_result( identifier='PerNeuronValue', value_name='LGNAfferentOrientation', sheet_name=sheet)[0] if opposite: addon = addon + "_opposite" l4_exc_or_many = numpy.array(analog_ids)[numpy.nonzero( numpy.array([ circular_dist(l4_exc_or.get_value_by_id(i), numpy.pi / 2, numpy.pi) for i in analog_ids ]) < .1)[0]] else: addon = addon + "_same" l4_exc_or_many = numpy.array(analog_ids)[numpy.nonzero( numpy.array([ circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi) for i in analog_ids ]) < .1)[0]] analog_ids = list(l4_exc_or_many) if radius or box: sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet, neuron_ids=analog_ids) positions = data_store.get_neuron_postions()[sheet] if box: ids1 = select_ids_by_position(positions, sheet_ids, box=box) if radius: ids1 = select_ids_by_position(positions, sheet_ids, radius=radius) analog_ids = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1) print "Selected neurons:", len(analog_ids) if len(analog_ids) < 1: return dsv = param_filter_query(data_store, sheet_name=sheet, st_name=stimulus) dist = box if not radius else radius for n in analog_ids: VmPlot( dsv, ParameterSet({ 'neuron': n, 'sheet_name': sheet, 'spontaneous': True, }), fig_param={ 'dpi': 300, 'figsize': (40, 5) }, # plot_file_name=folder+"/Vm_"+parameter+"_"+str(sheet)+"_"+str(dist)+"_"+str(n)+"_"+addon+".png" plot_file_name=folder + "/Vm_" + parameter + "_" + str(sheet) + "_radius" + str(dist) + "_" + str(n) + "_" + addon + ".svg" ).plot({ # '*.y_lim':(0,60), # '*.x_scale':'log', '*.x_scale_base':2, # '*.y_ticks':[5, 10, 25, 50, 60], # # '*.y_scale':'linear', # '*.y_scale':'log', '*.y_scale_base':2, # '*.fontsize':24 })
def perform_comparison_size_tuning(sheet, reference_position, step, sizes, folder_full, folder_inactive, reverse=False, Ismaller=[2, 3], Iequal=[4, 5], Ilarger=[6, 8], box=[], csvfile=None): print folder_full data_store_full = PickledDataStore(load=True, parameters=ParameterSet({ 'root_directory': folder_full, 'store_stimuli': False }), replace=True) data_store_full.print_content(full_recordings=False) print folder_inactive data_store_inac = PickledDataStore(load=True, parameters=ParameterSet({ 'root_directory': folder_inactive, 'store_stimuli': False }), replace=True) data_store_inac.print_content(full_recordings=False) print "Checking data..." # Full dsv1 = queries.param_filter_query(data_store_full, identifier='PerNeuronValue', sheet_name=sheet) # dsv1.print_content(full_recordings=False) pnvs1 = [dsv1.get_analysis_result()] # get stimuli st1 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs1[-1]] # print st1 # Inactivated dsv2 = queries.param_filter_query(data_store_inac, identifier='PerNeuronValue', sheet_name=sheet) pnvs2 = [dsv2.get_analysis_result()] # get stimuli st2 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs2[-1]] # rings analysis neurons_full = [] neurons_inac = [] rowplots = 0 max_size = 0.6 # GET RECORDINGS BY POSITION (either step or box. In case of using box, inefficiently repetition of box-ing step times!) slice_ranges = numpy.arange(step, max_size + step, step) print "slice_ranges:", slice_ranges for col, cur_range in enumerate(slice_ranges): radius = [cur_range - step, cur_range] print col # get the list of all recorded neurons in X_ON # Full spike_ids1 = param_filter_query( data_store_full, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids() positions1 = data_store_full.get_neuron_postions()[sheet] # print numpy.min(positions1), numpy.max(positions1) sheet_ids1 = data_store_full.get_sheet_indexes(sheet_name=sheet, neuron_ids=spike_ids1) radius_ids1 = select_ids_by_position(reference_position, radius, sheet_ids1, positions1, reverse, box) neurons1 = data_store_full.get_sheet_ids(sheet_name=sheet, indexes=radius_ids1) # Inactivated spike_ids2 = param_filter_query( data_store_inac, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids() positions2 = data_store_inac.get_neuron_postions()[sheet] sheet_ids2 = data_store_inac.get_sheet_indexes(sheet_name=sheet, neuron_ids=spike_ids2) radius_ids2 = select_ids_by_position(reference_position, radius, sheet_ids2, positions2, reverse, box) neurons2 = data_store_inac.get_sheet_ids(sheet_name=sheet, indexes=radius_ids2) print neurons1 print neurons2 if not set(neurons1) == set(neurons2): neurons1 = numpy.intersect1d(neurons1, neurons2) neurons2 = neurons1 if len(neurons1) > rowplots: rowplots = len(neurons1) neurons_full.append(neurons1) neurons_inac.append(neurons2) print "radius_ids", radius_ids2 print "neurons_full:", len(neurons_full[col]), neurons_full[col] print "neurons_inac:", len(neurons_inac[col]), neurons_inac[col] assert len(neurons_full[col] ) > 0, "ERROR: the number of recorded neurons is 0" # subplot figure creation plotOnlyPop = False print 'rowplots', rowplots print "Starting plotting ..." print "slice_ranges:", len(slice_ranges), slice_ranges if len(slice_ranges) > 1: fig, axes = plt.subplots(nrows=len(slice_ranges), ncols=rowplots + 1, figsize=(3 * rowplots, 3 * len(slice_ranges)), sharey=False) else: fig, axes = plt.subplots(nrows=2, ncols=2, sharey=False) plotOnlyPop = True print axes.shape p_significance = .02 for col, cur_range in enumerate(slice_ranges): radius = [cur_range - step, cur_range] print col interval = str(radius[0]) + " - " + str(radius[1]) + " deg radius" print interval axes[col, 0].set_ylabel(interval + "\n\nResponse change (%)") print "range:", col if len(neurons_full[col]) < 1: continue tc_dict1 = [] tc_dict2 = [] # Full # group values dic = colapse_to_dictionary( [z.get_value_by_id(neurons_full[col]) for z in pnvs1[-1]], st1, 'radius') for k in dic: (b, a) = dic[k] par, val = zip(*sorted(zip(b, numpy.array(a)))) dic[k] = (par, numpy.array(val)) tc_dict1.append(dic) # Inactivated # group values dic = colapse_to_dictionary( [z.get_value_by_id(neurons_inac[col]) for z in pnvs2[-1]], st2, 'radius') for k in dic: (b, a) = dic[k] par, val = zip(*sorted(zip(b, numpy.array(a)))) dic[k] = (par, numpy.array(val)) tc_dict2.append(dic) print "(stimulus conditions, cells):", tc_dict1[0].values()[0][ 1].shape # ex. (10, 32) firing rate for each stimulus condition (10) and each cell (32) # Population histogram diff_full_inac = [] sem_full_inac = [] num_cells = tc_dict1[0].values()[0][1].shape[1] smaller_pvalue = 0. equal_pvalue = 0. larger_pvalue = 0. # 1. SELECT ONLY CHANGING UNITS all_open_values = tc_dict2[0].values()[0][1] all_closed_values = tc_dict1[0].values()[0][1] # 1.1 Search for the units that are NOT changing (within a certain absolute tolerance) unchanged_units = numpy.isclose(all_closed_values, all_open_values, rtol=0., atol=4.) # print unchanged_units.shape # 1.2 Reverse them into those that are changing changed_units = numpy.invert(unchanged_units) # print numpy.nonzero(changed_units) # 1.3 Get the indexes of all units that are changing changing_idxs = [] for i in numpy.nonzero(changed_units)[0]: for j in numpy.nonzero(changed_units)[1]: if j not in changing_idxs: changing_idxs.append(j) # print sorted(changing_idxs) # 1.4 Get the changing units open_values = [x[changing_idxs] for x in all_open_values] open_values = numpy.array(open_values) closed_values = [x[changing_idxs] for x in all_closed_values] closed_values = numpy.array(closed_values) print "chosen open units:", open_values.shape print "chosen closed units:", closed_values.shape num_cells = closed_values.shape[1] # 2. AUTOMATIC SEARCH FOR INTERVALS # peak = max(numpy.argmax(closed_values, axis=0 )) peaks = numpy.argmax(closed_values, axis=0) # peak = int( numpy.argmax( closed_values ) / closed_values.shape[1] ) # the returned single value is from the flattened array # print "numpy.argmax( closed_values ):", numpy.argmax( closed_values ) print "peaks:", peaks # minimum = int( numpy.argmin( closed_values ) / closed_values.shape[1] ) # minimum = min(numpy.argmin(closed_values, axis=0 )) minimums = numpy.argmin( closed_values, axis=0) + 1 # +N to get the response out of the smallest # print "numpy.argmin( closed_values ):", numpy.argmin( closed_values ) print "minimums:", minimums # ------------------------------------- # DIFFERENCE BETWEEN INACTIVATED AND CONTROL # We want to have a summary measure of the population of cells with and without inactivation. # Our null-hypothesis is that the inactivation does not change the activity of cells. # A different result will tell us that the inactivation DOES something. # Therefore our null-hypothesis is the result obtained in the intact system. # Procedure: # We have several stimulus sizes # We want to group them in three: smaller than optimal, optimal, larger than optimal # We do the mean response for each cell for the grouped stimuli # i.e. sum the responses for each cell across stimuli in the group, divided by the number of stimuli in the group # We repeat for each group # average of all trial-averaged response for each cell for grouped stimulus size # we want the difference / normalized by the highest value * expressed as percentage # print num_cells # print "inac",numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) # print "full",numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0) # print "diff",(numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) # print "diff_norm",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))) # print "diff_norm_perc",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))) * 100 # USING PROVIDED INTERVALS # diff_smaller = ((numpy.sum(open_values[Ismaller[0]:Ismaller[1]], axis=0) - numpy.sum(closed_values[Ismaller[0]:Ismaller[1]], axis=0)) / numpy.sum(closed_values[Ismaller[0]:Ismaller[1]], axis=0)) * 100 # diff_equal = ((numpy.sum(open_values[Iequal[0]:Iequal[1]], axis=0) - numpy.sum(closed_values[Iequal[0]:Iequal[1]], axis=0)) / numpy.sum(closed_values[Iequal[0]:Iequal[1]], axis=0)) * 100 # diff_larger = ((numpy.sum(open_values[Ilarger[0]:Ilarger[1]], axis=0) - numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) / numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) * 100 # USING AUTOMATIC SEARCH # print "open" # print open_values[minimums] # print "closed" # print closed_values[minimums] # print open_values[peaks] # print closed_values[peaks] diff_smaller = ((numpy.sum(open_values[minimums], axis=0) - numpy.sum(closed_values[minimums], axis=0)) / numpy.sum(closed_values[minimums], axis=0)) * 100 diff_equal = ((numpy.sum(open_values[peaks], axis=0) - numpy.sum(closed_values[peaks], axis=0)) / numpy.sum(closed_values[peaks], axis=0)) * 100 diff_larger = ( (numpy.sum(open_values[Ilarger[0]:Ilarger[1]], axis=0) - numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) / numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) * 100 # print "diff_smaller", diff_smaller # print "diff_equal", diff_smaller # print "diff_larger", diff_smaller # average of all cells smaller = sum(diff_smaller) / num_cells equal = sum(diff_equal) / num_cells larger = sum(diff_larger) / num_cells print "smaller", smaller print "equal", equal print "larger", larger if csvfile: csvfile.write("(" + str(smaller) + ", " + str(equal) + ", " + str(larger) + "), ") # 0/0 # Check using scipy # and we want to compare the responses of full and inactivated # smaller, smaller_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][0:3], axis=0)/3, numpy.sum(tc_dict1[0].values()[0][1][0:3], axis=0)/3 ) # equal, equal_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][3:5], axis=0)/2, numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2 ) # larger, larger_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][5:], axis=0)/5, numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5 ) # print "smaller, smaller_pvalue:", smaller, smaller_pvalue # print "equal, equal_pvalue:", equal, equal_pvalue # print "larger, larger_pvalue:", larger, larger_pvalue diff_full_inac.append(smaller) diff_full_inac.append(equal) diff_full_inac.append(larger) # ------------------------------------- # Standard Error Mean calculated on the full sequence sem_full_inac.append(scipy.stats.sem(diff_smaller)) sem_full_inac.append(scipy.stats.sem(diff_equal)) sem_full_inac.append(scipy.stats.sem(diff_larger)) # print diff_full_inac # print sem_full_inac barlist = axes[col, 0].bar([0.5, 1.5, 2.5], diff_full_inac, yerr=sem_full_inac, width=0.8) axes[col, 0].plot([0, 4], [0, 0], 'k-') # horizontal 0 line for ba in barlist: ba.set_color('white') if smaller_pvalue < p_significance: barlist[0].set_color('brown') if equal_pvalue < p_significance: barlist[1].set_color('darkgreen') if larger_pvalue < p_significance: barlist[2].set_color('blue') # Plotting tuning curves x_full = tc_dict1[0].values()[0][0] x_inac = tc_dict2[0].values()[0][0] # each cell couple axes[col, 1].set_ylabel("Response (spikes/sec)", fontsize=10) for j, nid in enumerate(neurons_full[col][changing_idxs]): # print col,j,nid if len(neurons_full[col][changing_idxs] ) > 1: # case with just one neuron in the group y_full = closed_values[:, j] y_inac = open_values[:, j] else: y_full = closed_values y_inac = open_values if not plotOnlyPop: axes[col, j + 1].plot(x_full, y_full, linewidth=2, color='b') axes[col, j + 1].plot(x_inac, y_inac, linewidth=2, color='r') axes[col, j + 1].set_title(str(nid), fontsize=10) axes[col, j + 1].set_xscale("log") fig.subplots_adjust(hspace=0.4) # fig.suptitle("All recorded cells grouped by circular distance", size='xx-large') fig.text(0.5, 0.04, 'cells', ha='center', va='center') fig.text(0.06, 0.5, 'ranges', ha='center', va='center', rotation='vertical') for ax in axes.flatten(): ax.set_ylim([0, 60]) ax.set_xticks(sizes) ax.set_xticklabels([0.1, '', '', '', '', 1, '', 2, 4, 6]) # ax.set_xticklabels([0.1, '', '', '', '', '', '', '', '', '', '', 1, '', '', 2, '', '', '', 4, '', 6]) for col, _ in enumerate(slice_ranges): # axes[col,0].set_ylim([-.8,.8]) axes[col, 0].set_ylim([-60, 60]) axes[col, 0].set_yticks([-60, -40, -20, 0., 20, 40, 60]) axes[col, 0].set_yticklabels([-60, -40, -20, 0, 20, 40, 60]) axes[col, 0].set_xlim([0, 4]) axes[col, 0].set_xticks([.9, 1.9, 2.9]) axes[col, 0].set_xticklabels(['small', 'equal', 'larger']) axes[col, 0].spines['right'].set_visible(False) axes[col, 0].spines['top'].set_visible(False) axes[col, 0].spines['bottom'].set_visible(False) # plt.show() plt.savefig(folder_inactive + "/TrialAveragedSizeTuningComparison_" + sheet + "_step" + str(step) + "_box" + str(box) + ".png", dpi=100) # plt.savefig( folder_full+"/TrialAveragedSizeTuningComparison_"+sheet+"_"+interval+".png", dpi=100 ) fig.clf() plt.close() # garbage gc.collect()
def perform_comparison_size_tuning( sheet, reference_position, step, sizes, folder_full, folder_inactive, reverse=False, Ssmaller=3, Sequal=4, SequalStop=5, Slarger=6, box=[] ): print folder_full data_store_full = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_full, 'store_stimuli' : False}),replace=True) data_store_full.print_content(full_recordings=False) print folder_inactive data_store_inac = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_inactive, 'store_stimuli' : False}),replace=True) data_store_inac.print_content(full_recordings=False) print "Checking data..." # Full dsv1 = queries.param_filter_query( data_store_full, identifier='PerNeuronValue', sheet_name=sheet ) # dsv1.print_content(full_recordings=False) pnvs1 = [ dsv1.get_analysis_result() ] # get stimuli st1 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs1[-1]] # print st1 # Inactivated dsv2 = queries.param_filter_query( data_store_inac, identifier='PerNeuronValue', sheet_name=sheet ) pnvs2 = [ dsv2.get_analysis_result() ] # get stimuli st2 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs2[-1]] # rings analysis neurons_full = [] neurons_inac = [] rowplots = 0 max_size = 0.6 slice_ranges = numpy.arange(step, max_size+step, step) for col,cur_range in enumerate(slice_ranges): radius = [cur_range-step,cur_range] print col # get the list of all recorded neurons in X_ON # Full spike_ids1 = param_filter_query(data_store_full, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids() positions1 = data_store_full.get_neuron_postions()[sheet] # print numpy.min(positions1), numpy.max(positions1) sheet_ids1 = data_store_full.get_sheet_indexes(sheet_name=sheet,neuron_ids=spike_ids1) radius_ids1 = select_ids_by_position(reference_position, radius, sheet_ids1, positions1, reverse, box) # 0/0 neurons1 = data_store_full.get_sheet_ids(sheet_name=sheet, indexes=radius_ids1) if len(neurons1) > rowplots: rowplots = len(neurons1) neurons_full.append(neurons1) # Inactivated spike_ids2 = param_filter_query(data_store_inac, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids() positions2 = data_store_inac.get_neuron_postions()[sheet] sheet_ids2 = data_store_inac.get_sheet_indexes(sheet_name=sheet,neuron_ids=spike_ids2) radius_ids2 = select_ids_by_position(reference_position, radius, sheet_ids2, positions2, reverse, box) neurons2 = data_store_inac.get_sheet_ids(sheet_name=sheet, indexes=radius_ids2) neurons_inac.append(neurons2) print "radius_ids", radius_ids2 print "neurons_full", neurons_full print "neurons_inac", neurons_inac assert len(neurons_full[col]) == len(neurons_inac[col]) , "ERROR: the number of recorded neurons is different" assert set(neurons_full[col]) == set(neurons_inac[col]) , "ERROR: the neurons in the two arrays are not the same" # to analyse old simulation it is necessary to choose corresponding ids, # do it by hand, running this script several times and noting them down here: # neurons_full = [numpy.array([2912, 3205, 1867, 2731, 2248])] # neurons_inac = [numpy.array([2912, 3205, 1867, 2731, 2248])] # neurons_full =[numpy.array([10921, 10024, 13851, 9855, 11648, 13277])] # neurons_inac =[numpy.array([10921, 10024, 13851, 9855, 11648, 13277])] # subplot figure creation print 'rowplots', rowplots print "Starting plotting ..." print len(slice_ranges), slice_ranges fig, axes = plt.subplots(nrows=len(slice_ranges), ncols=rowplots+1, figsize=(3*rowplots, 3*len(slice_ranges)), sharey=False) # fig, axes = plt.subplots(nrows=2, ncols=rowplots+1, figsize=(3*rowplots, 3*len(slice_ranges)), sharey=False) print axes.shape p_significance = .02 for col,cur_range in enumerate(slice_ranges): radius = [cur_range-step,cur_range] print col interval = str(radius[0]) +" - "+ str(radius[1]) +" deg radius" print interval axes[col,0].set_ylabel(interval+"\n\nResponse change (%)") print "range:",col if len(neurons_full[col]) < 1: continue print "neurons_full:", len(neurons_full[col]), neurons_full[col] print "neurons_inac:", len(neurons_inac[col]), neurons_inac[col] tc_dict1 = [] tc_dict2 = [] # Full # group values dic = colapse_to_dictionary([z.get_value_by_id(neurons_full[col]) for z in pnvs1[-1]], st1, 'radius') for k in dic: (b, a) = dic[k] par, val = zip( *sorted( zip(b, numpy.array(a)) ) ) dic[k] = (par,numpy.array(val)) tc_dict1.append(dic) # Inactivated # group values dic = colapse_to_dictionary([z.get_value_by_id(neurons_inac[col]) for z in pnvs2[-1]], st2, 'radius') for k in dic: (b, a) = dic[k] par, val = zip( *sorted( zip(b, numpy.array(a)) ) ) dic[k] = (par,numpy.array(val)) tc_dict2.append(dic) # Plotting tuning curves x_full = tc_dict1[0].values()[0][0] x_inac = tc_dict2[0].values()[0][0] # each cell couple print "(stimulus conditions, cells):", tc_dict1[0].values()[0][1].shape # ex. (10, 32) firing rate for each stimulus condition (10) and each cell (32) axes[col,1].set_ylabel("Response (spikes/sec)", fontsize=10) for j,nid in enumerate(neurons_full[col]): # print col,j,nid if len(neurons_full[col])>1: # case with just one neuron in the group y_full = tc_dict1[0].values()[0][1][:,j] y_inac = tc_dict2[0].values()[0][1][:,j] else: y_full = tc_dict1[0].values()[0][1] y_inac = tc_dict2[0].values()[0][1] axes[col,j+1].plot(x_full, y_full, linewidth=2, color='b') axes[col,j+1].plot(x_inac, y_inac, linewidth=2, color='r') axes[col,j+1].set_title(str(nid), fontsize=10) axes[col,j+1].set_xscale("log") # Population histogram diff_full_inac = [] sem_full_inac = [] num_cells = tc_dict1[0].values()[0][1].shape[1] smaller_pvalue = 0. equal_pvalue = 0. larger_pvalue = 0. # ------------------------------------- # NON-PARAMETRIC TWO-TAILED TEST ON THE DIFFERENCE BETWEEN INACTIVATED AND CONTROL # We want to have a summary measure of the population of cells with and without inactivation. # Our null-hypothesis is that the inactivation does not change the activity of cells. # A different result will tell us that the inactivation DOES something. # Therefore our null-hypothesis is the result obtained in the intact system. # Procedure: # We have several stimulus sizes # We want to group them in three: smaller than optimal, optimal, larger than optimal # We do the mean response for each cell for the grouped stimuli # i.e. sum the responses for each cell across stimuli in the group, divided by the number of stimuli in the group # We repeat for each group # average of all trial-averaged response for each cell for grouped stimulus size # we want the difference / normalized by the highest value * expressed as percentage # print num_cells # print "inac",numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) # print "full",numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0) # print "diff",(numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) # print "diff_norm",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))) # print "diff_norm_perc",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))) * 100 # diff_smaller = ((numpy.sum(tc_dict2[0].values()[0][1][1:3], axis=0)/2 - numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)/2) / (numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)/2)) * 100 # diff_equal = ((numpy.sum(tc_dict2[0].values()[0][1][3:5], axis=0)/2 - numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2) / (numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2)) * 100 # diff_larger = ((numpy.sum(tc_dict2[0].values()[0][1][5:], axis=0)/5 - numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5) / (numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5)) * 100 # diff_smaller = ((numpy.sum(tc_dict2[0].values()[0][1][1:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)) * 100 diff_smaller = ((numpy.sum(tc_dict2[0].values()[0][1][Ssmaller:Sequal], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][Ssmaller:Sequal], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][Ssmaller:Sequal], axis=0)) * 100 diff_equal = ((numpy.sum(tc_dict2[0].values()[0][1][Sequal:SequalStop], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][Sequal:SequalStop], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][Sequal:SequalStop], axis=0)) * 100 diff_larger = ((numpy.sum(tc_dict2[0].values()[0][1][Slarger:], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][Slarger:], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][Slarger:], axis=0)) * 100 # print "diff_smaller", diff_smaller # average of all cells smaller = sum(diff_smaller) / num_cells equal = sum(diff_equal) / num_cells larger = sum(diff_larger) / num_cells # Check using scipy # and we want to compare the responses of full and inactivated # smaller, smaller_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][0:3], axis=0)/3, numpy.sum(tc_dict1[0].values()[0][1][0:3], axis=0)/3 ) # equal, equal_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][3:5], axis=0)/2, numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2 ) # larger, larger_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][5:], axis=0)/5, numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5 ) # print "smaller, smaller_pvalue:", smaller, smaller_pvalue # print "equal, equal_pvalue:", equal, equal_pvalue # print "larger, larger_pvalue:", larger, larger_pvalue diff_full_inac.append( smaller ) diff_full_inac.append( equal ) diff_full_inac.append( larger ) # ------------------------------------- # Standard Error Mean calculated on the full sequence sem_full_inac.append( scipy.stats.sem(diff_smaller) ) sem_full_inac.append( scipy.stats.sem(diff_equal) ) sem_full_inac.append( scipy.stats.sem(diff_larger) ) # print diff_full_inac # print sem_full_inac barlist = axes[col,0].bar([0.5,1.5,2.5], diff_full_inac, width=0.8) axes[col,0].plot([0,4], [0,0], 'k-') # horizontal 0 line for ba in barlist: ba.set_color('white') if smaller_pvalue < p_significance: barlist[0].set_color('brown') if equal_pvalue < p_significance: barlist[1].set_color('darkgreen') if larger_pvalue < p_significance: barlist[2].set_color('blue') # colors = ['brown', 'darkgreen', 'blue'] # for patch, color in zip(bp['boxes'], colors): # patch.set_facecolor(color) fig.subplots_adjust(hspace=0.4) # fig.suptitle("All recorded cells grouped by circular distance", size='xx-large') fig.text(0.5, 0.04, 'cells', ha='center', va='center') fig.text(0.06, 0.5, 'ranges', ha='center', va='center', rotation='vertical') for ax in axes.flatten(): ax.set_ylim([0,60]) ax.set_xticks(sizes) # ax.set_xticklabels([0.1, '', '', '', '', 1, '', 2, 4, 6]) ax.set_xticklabels([0.1, '', '', '', '', '', '', '', '', '', '', 1, '', '', 2, '', '', '', 4, '', 6]) for col,_ in enumerate(slice_ranges): # axes[col,0].set_ylim([-.8,.8]) axes[col,0].set_ylim([-60,60]) axes[col,0].set_yticks([-60, -40, -20, 0., 20, 40, 60]) axes[col,0].set_yticklabels([-60, -40, -20, 0, 20, 40, 60]) axes[col,0].set_xlim([0,4]) axes[col,0].set_xticks([.9,1.9,2.9]) axes[col,0].set_xticklabels(['small', 'equal', 'larger']) axes[col,0].spines['right'].set_visible(False) axes[col,0].spines['top'].set_visible(False) axes[col,0].spines['bottom'].set_visible(False) # plt.show() plt.savefig( folder_inactive+"/TrialAveragedSizeTuningComparison_"+sheet+"_step"+str(step)+"_box"+str(box)+".png", dpi=100 ) # plt.savefig( folder_full+"/TrialAveragedSizeTuningComparison_"+sheet+"_"+interval+".png", dpi=100 ) fig.clf() plt.close() # garbage gc.collect()