def run(): global results # allow inspection from console n_types = params['n_types'] Wab = np.array([[float(str(wab_table.item(i, j).text())) for j in range(n_types)] for i in range(n_types)]) with pg.BusyCursor(): results = run_expt(Wab, n_cells=params['n_cells'], n_expts=params['n_expts'], n_trials=params['n_trials']) cprobs = results['conn'].sum(axis=1) / results['probed'].sum(axis=1) rprobs = results['recip'].sum(axis=1) / results['probed'].sum(axis=1) ex_rprobs = cprobs**2 ratios = rprobs / ex_rprobs y = pg.pseudoScatter(cprobs) c_plt.plot(cprobs, y, clear=True, pen=None, symbol='o') c_plt.addLine(x=Wab.mean()) y = pg.pseudoScatter(rprobs) r_plt.plot(rprobs, y, clear=True, pen=None, symbol='o') y = pg.pseudoScatter(ratios) ratio_plt.plot(ratios, y, clear=True, pen=None, symbol='o') ratio_plt.addLine(x=1) rr_plt.plot(ex_rprobs, rprobs, clear=True, pen=None, symbol='o') l = pg.InfiniteLine(angle=45) rr_plt.addItem(l)
def run(): global results # allow inspection from console n_types = params['n_types'] Wab = np.array( [[float(str(wab_table.item(i, j).text())) for j in range(n_types)] for i in range(n_types)]) with pg.BusyCursor(): results = run_expt(Wab, n_cells=params['n_cells'], n_expts=params['n_expts'], n_trials=params['n_trials']) cprobs = results['conn'].sum(axis=1) / results['probed'].sum(axis=1) rprobs = results['recip'].sum(axis=1) / results['probed'].sum(axis=1) ex_rprobs = cprobs**2 ratios = rprobs / ex_rprobs y = pg.pseudoScatter(cprobs) c_plt.plot(cprobs, y, clear=True, pen=None, symbol='o') c_plt.addLine(x=Wab.mean()) y = pg.pseudoScatter(rprobs) r_plt.plot(rprobs, y, clear=True, pen=None, symbol='o') y = pg.pseudoScatter(ratios) ratio_plt.plot(ratios, y, clear=True, pen=None, symbol='o') ratio_plt.addLine(x=1) rr_plt.plot(ex_rprobs, rprobs, clear=True, pen=None, symbol='o') l = pg.InfiniteLine(angle=45) rr_plt.addItem(l)
def plot_element_data(self, pre_class, post_class, element, field_name, color='g', trace_plt=None): trace_plt = None val = element[field_name].mean() line = pg.InfiniteLine(val, pen={'color': color, 'width': 2}, movable=False) scatter = None baseline_window = int(db.default_sample_rate * 5e-3) values = [] traces = [] point_data = [] for pair, value in element[field_name].iteritems(): if np.isnan(value): continue traces = [] if trace_plt is not None: trace = cs.ic_average_response if field_name.startswith('ic') else cs.vc_average_response x_offset = cs.ic_fit_latency if field_name.startswith('ic') else cs.vc_fit_latency trace = format_trace(trace, baseline_window, x_offset, align='psp') trace_plt.plot(trace.time_values, trace.data) traces.append(trace) values.append(value) y_values = pg.pseudoScatter(np.asarray(values, dtype=float), spacing=1) scatter = pg.ScatterPlotItem(symbol='o', brush=(color + (150,)), pen='w', size=12) scatter.setData(values, y_values + 10.) if trace_plt is not None: grand_trace = TraceList(traces).mean() trace_plt.plot(grand_trace.time_values, grand_trace.data, pen={'color': color, 'width': 3}) units = 'V' if field_name.startswith('ic') else 'A' trace_plt.setXRange(0, 20e-3) trace_plt.setLabels(left=('', units), bottom=('Time from stimulus', 's')) return line, scatter
def plot_example(name, means, stds): # create a plot canvas item = pg.PlotItem() # sample some normal-distributed data clusters data = np.random.normal(size=(20, 4), loc=means, scale=stds).T # add scatter plots on top. symbolBrush is used to cycle through colors. for i in range(4): xvals = pg.pseudoScatter(data[i], spacing=0.4, bidir=True) * 0.2 item.plot(x=xvals + i, y=data[i], pen=None, symbol='o', symbolBrush=pg.intColor(i, 6, maxValue=128)) # show some error bars err = pg.ErrorBarItem(x=np.arange(4), y=data.mean(axis=1), height=data.std(axis=1), beam=0.5, pen={ 'width': 2, 'color': 'k' }) item.addItem(err) # important: return the PlotItem (or a list of them), instead of showing it return item
def summary_plot_pulse(grand_trace, feature_list, feature_mean, labels, titles, i, plot=None, color=None, name=None): if type(feature_list) is tuple: n_features = len(feature_list) else: n_features = 1 if plot is None: plot = PlotGrid() plot.set_shape(n_features, 2) plot.show() for g in range(n_features): plot[g, 1].addLegend() plot[g, 1].setLabels(left=('Vm', 'V')) plot[g, 1].setLabels(bottom=('t', 's')) for feature in range(n_features): if n_features > 1: features = feature_list[feature] mean = feature_mean[feature] label = labels[feature] title = titles[feature] else: features = feature_list mean = feature_mean label = labels title = titles plot[feature, 0].setLabels(left=(label[0], label[1])) plot[feature, 0].hideAxis('bottom') plot[feature, 0].setTitle(title) plot[feature, 1].plot(grand_trace.time_values, grand_trace.data, pen=color, name=name) dx = pg.pseudoScatter(np.array(features).astype(float), 0.3, bidir=True) plot[feature, 0].plot((0.3 * dx / dx.max()) + i, features, pen=None, symbol='x', symbolSize=5, symbolBrush=color, symbolPen=None) plot[feature, 0].plot([i], [mean], pen=None, symbol='o', symbolBrush=color, symbolPen='w', symbolSize=10) return plot
def swarm(groups, width=0.7, spacing=1.0, shared_scale=True): """Helper function for generating swarm plots. Given groups of y values to be show in a swarm plot, return appropriate x values. Parameters ---------- groups : list List of y-value groups; each group is a list of y values that will appear together in a swarm. width : float The fraction of the total x-axis width to fill spacing : float The x-axis distance between adjacent groups shared_scale : bool If True, then the x values in all groups are scaled by the same amount. If False, then each group is scaled independently such that all groups attempt to fill their alloted width. """ from pyqtgraph import pseudoScatter x_grps = [] for i, y_grp in enumerate(groups): y_grp = np.asarray(y_grp) mask = np.isfinite(y_grp) x = np.empty(y_grp.shape) x[mask] = pseudoScatter(y_grp[mask], method='histogram', bidir=True) x = x * (0.5 * width * spacing / np.abs(x).max()) + spacing * i x[~mask] = np.nan x_grps.append(x) return x_grps
def violinPlotScatter(ax, data, symbolColor='k', symbolSize=4, symbol='o'): """ Plot data as violin plot with scatter and error bar Parameters ---------- ax : pyqtgraph plot instance is the axs to plot into data : dict dictionary containing {pos1: data1, pos2: data2}, where pos is the x position for the data in data. Each data set iis plotted as a separate column symcolor : string, optional color of the symbols, defaults to 'k' (black) symbolSize : int, optional Size of the symbols in the scatter plot, points, defaults to 4 symbol : string, optoinal The symbol to use, defaults to 'o' (circle) """ y = [] x = [] xb = np.arange(0, len(data.keys()), 1) ybm = [0] * len(data.keys()) # np.zeros(len(sdat.keys())) ybs = [0] * len(data.keys()) # np.zeros(len(sdat.keys())) for i, k in enumerate(data.keys()): yvals = np.array(data[k]) xvals = pg.pseudoScatter(yvals, spacing=0.4, bidir=True) * 0.2 ax.plot(x=xvals + i, y=yvals, pen=None, symbol=symbol, symbolSize=symbolSize, symbolBrush=pg.mkBrush(symbolColor)) y.append(yvals) x.append([i] * len(yvals)) ybm[i] = np.nanmean(yvals) ybs[i] = np.nanstd(yvals) mbar = pg.PlotDataItem(x=np.array([xb[i] - 0.2, xb[i] + 0.2]), y=np.array([ybm[i], ybm[i]]), pen={ 'color': 'k', 'width': 0.75 }) ax.addItem(mbar) bar = pg.ErrorBarItem(x=xb, y=np.array(ybm), height=np.array(ybs), beam=0.2, pen={ 'color': 'k', 'width': 0.75 }) violin_plot(ax, y, xb, bp=False) ax.addItem(bar) ticks = [[(v, k) for v, k in enumerate(data.keys())], []] ax.getAxis('bottom').setTicks(ticks)
def plot_element_data(self, pre_class, post_class, element, field_name, color='g', trace_plt=None): trace_plt = None val = element[field_name].mean() line = pg.InfiniteLine(val, pen={ 'color': color, 'width': 2 }, movable=False) scatter = None baseline_window = int(db.default_sample_rate * 5e-3) values = [] traces = [] point_data = [] for pair, value in element[field_name].iteritems(): if np.isnan(value): continue traces = [] if trace_plt is not None: trace = cs.ic_average_response if field_name.startswith( 'ic') else cs.vc_average_response x_offset = cs.ic_fit_latency if field_name.startswith( 'ic') else cs.vc_fit_latency trace = format_trace(trace, baseline_window, x_offset, align='psp') trace_plt.plot(trace.time_values, trace.data) traces.append(trace) values.append(value) y_values = pg.pseudoScatter(np.asarray(values, dtype=float), spacing=1) scatter = pg.ScatterPlotItem(symbol='o', brush=(color + (150, )), pen='w', size=12) scatter.setData(values, y_values + 10.) if trace_plt is not None: grand_trace = TraceList(traces).mean() trace_plt.plot(grand_trace.time_values, grand_trace.data, pen={ 'color': color, 'width': 3 }) units = 'V' if field_name.startswith('ic') else 'A' trace_plt.setXRange(0, 20e-3) trace_plt.setLabels(left=('', units), bottom=('Time from stimulus', 's')) return line, scatter
def redraw(self): self.food_curve.setData(self.food_history) self.animals_curve.setData(self.animals_history) self.animals_deaths_plot.plot([x[1] for x in self.world.animal_deaths], clear=True) self.new_animals_energy_plot.plot([x[1] for x in self.world.new_animal_avg_energy], clear=True) vals = np.array([animal.energy_for_birth for animal in self.world.animals]) y = pyqtgraph.pseudoScatter(vals, spacing=0.15) self.energy_for_birth_plot.plot( vals, y, pen=None, symbol='o', symbolSize=5, symbolPen=(255, 255, 255, 200), symbolBrush=(0, 0, 255, 150), clear=True ) vals = np.array([animal.useless_param for animal in self.world.animals]) y = pyqtgraph.pseudoScatter(vals, spacing=0.15) self.useless_param_plot.plot( vals, y, pen=None, symbol='o', symbolSize=5, symbolPen=(255, 255, 255, 200), symbolBrush=(0, 0, 255, 150), clear=True )
def plot_element_data(self, pre_class, post_class, element, field_name, color='g', trace_plt=None): fn = field_name.split('_all')[0] if field_name.endswith('all') else field_name.split('_first_pulse')[0] val = element[field_name].mean() line = pg.InfiniteLine(val, pen={'color': color, 'width': 2}, movable=False) scatter = None baseline_window = int(db.default_sample_rate * 5e-3) values = [] traces = [] point_data = [] for pair, value in element[field_name].iteritems(): if pair.synapse is not True: continue if np.isnan(value): continue if field_name.endswith('all'): cs = pair.connection_strength trace = cs.ic_average_response if field_name.startswith('ic') else cs.vc_average_response x_offset = cs.ic_fit_xoffset if field_name.startswith('ic') else cs.vc_fit_xoffset elif field_name.endswith('first_pulse'): fpf = pair.avg_first_pulse_fit if fpf is None: continue trace = fpf.ic_avg_psp_data if field_name.startswith('ic') else fpf.vc_avg_psp_data x_offset = fpf.ic_latency if field_name.startswith('ic') else fpf.vc_latency if trace is None: continue values.append(value) trace = format_trace(trace, baseline_window, x_offset, align='psp') trace_item = trace_plt.plot(trace.time_values, trace.data) point_data.append(pair) trace_item.pair = pair trace_item.curve.setClickable(True) trace_item.sigClicked.connect(self.trace_plot_clicked) traces.append(trace) self.pair_items[pair.id] = [trace_item] y_values = pg.pseudoScatter(np.asarray(values, dtype=float), spacing=1) scatter = pg.ScatterPlotItem(symbol='o', brush=(color + (150,)), pen='w', size=12) scatter.setData(values, y_values + 10., data=point_data) for point in scatter.points(): pair_id = point.data().id self.pair_items[pair_id].append(point) scatter.sigClicked.connect(self.scatter_plot_clicked) grand_trace = TraceList(traces).mean() name = ('%s->%s, n=%d' % (pre_class, post_class, len(traces))) trace_plt.plot(grand_trace.time_values, grand_trace.data, pen={'color': color, 'width': 3}, name=name) units = 'V' if field_name.startswith('ic') else 'A' trace_plt.setXRange(0, 20e-3) trace_plt.setLabels(left=('', units), bottom=('Time from stimulus', 's')) return line, scatter
def plot_energy_for_birth(self, world, column): energy_for_birth_plot = pyqtgraph.PlotWidget() energy_for_birth_plot.setXRange( world.constants.ENERGY_FOR_BIRTH_MIN, world.constants.ENERGY_FOR_BIRTH_MAX ) energy_for_birth_plot.enableAutoRange('y') self.grid_layout.addWidget(energy_for_birth_plot, self.row, column) vals = np.array([animal.energy_for_birth for animal in world.animals]) y = pyqtgraph.pseudoScatter(vals, spacing=0.15) energy_for_birth_plot.plot( vals, y, pen=None, symbol='o', symbolSize=5, symbolPen=(255, 255, 255, 200), symbolBrush=(0, 0, 255, 150), clear=True )
def summary_plot_pulse(feature_list, labels, titles, i, median=False, grand_trace=None, plot=None, color=None, name=None): if type(feature_list) is tuple: n_features = len(feature_list) else: n_features = 1 if plot is None: plot = PlotGrid() plot.set_shape(n_features, 2) plot.show() for g in range(n_features): plot[g, 1].addLegend() for feature in range(n_features): if n_features > 1: current_feature = feature_list[feature] if median is True: mean = np.nanmedian(current_feature) else: mean = np.nanmean(current_feature) label = labels[feature] title = titles[feature] else: current_feature = feature_list mean = np.nanmean(current_feature) label = labels title = titles plot[feature, 0].setLabels(left=(label[0], label[1])) plot[feature, 0].hideAxis('bottom') plot[feature, 0].setTitle(title) if grand_trace is not None: plot[feature, 1].plot(grand_trace.time_values, grand_trace.data, pen=color, name=name) if len(current_feature) > 1: dx = pg.pseudoScatter(np.array(current_feature).astype(float), 0.7, bidir=True) #bar = pg.BarGraphItem(x=[i], height=mean, width=0.7, brush='w', pen={'color': color, 'width': 2}) #plot[feature, 0].addItem(bar) plot[feature, 0].plot([i], [mean], symbol='o', symbolSize=20, symbolPen='k', symbolBrush=color) sem = stats.sem(current_feature, nan_policy='omit') #err = pg.ErrorBarItem(x=np.asarray([i]), y=np.asarray([mean]), height=sem, beam=0.1) #plot[feature, 0].addItem(err) plot[feature, 0].plot((0.3 * dx / dx.max()) + i, current_feature, pen=None, symbol='o', symbolSize=10, symbolPen='w', symbolBrush=(color[0], color[1], color[2], 100)) else: plot[feature, 0].plot([i], current_feature, pen=None, symbol='o', symbolSize=10, symbolPen='w', symbolBrush=color) return plot
def plot_energy_for_birth(self, world, column): energy_for_birth_plot = pyqtgraph.PlotWidget() energy_for_birth_plot.setXRange(world.constants.ENERGY_FOR_BIRTH_MIN, world.constants.ENERGY_FOR_BIRTH_MAX) energy_for_birth_plot.enableAutoRange('y') self.grid_layout.addWidget(energy_for_birth_plot, self.row, column) vals = np.array([animal.energy_for_birth for animal in world.animals]) y = pyqtgraph.pseudoScatter(vals, spacing=0.15) energy_for_birth_plot.plot(vals, y, pen=None, symbol='o', symbolSize=5, symbolPen=(255, 255, 255, 200), symbolBrush=(0, 0, 255, 150), clear=True)
def violinPlotScatter(ax, data, symbolColor='k', symbolSize=4, symbol='o'): """ Plot data as violin plot with scatter and error bar Parameters ---------- ax : pyqtgraph plot instance is the axs to plot into data : dict dictionary containing {pos1: data1, pos2: data2}, where pos is the x position for the data in data. Each data set iis plotted as a separate column symcolor : string, optional color of the symbols, defaults to 'k' (black) symbolSize : int, optional Size of the symbols in the scatter plot, points, defaults to 4 symbol : string, optoinal The symbol to use, defaults to 'o' (circle) """ y = [] x = [] xb=np.arange(0,len(data.keys()), 1) ybm = [0]*len(data.keys()) # np.zeros(len(sdat.keys())) ybs = [0]*len(data.keys()) # np.zeros(len(sdat.keys())) for i, k in enumerate(data.keys()): yvals = np.array(data[k]) xvals = pg.pseudoScatter(yvals, spacing=0.4, bidir=True) * 0.2 ax.plot(x=xvals+i, y=yvals, pen=None, symbol=symbol, symbolSize=symbolSize, symbolBrush=pg.mkBrush(symbolColor)) y.append(yvals) x.append([i]*len(yvals)) ybm[i] = np.nanmean(yvals) ybs[i] = np.nanstd(yvals) mbar = pg.PlotDataItem(x=np.array([xb[i]-0.2, xb[i]+0.2]), y=np.array([ybm[i], ybm[i]]), pen={'color':'k', 'width':0.75}) ax.addItem(mbar) bar = pg.ErrorBarItem(x=xb, y=np.array(ybm), height=np.array(ybs), beam=0.2, pen={'color':'k', 'width':0.75}) violin_plot(ax, y, xb, bp=False) ax.addItem(bar) ticks = [[(v, k) for v, k in enumerate(data.keys())], []] ax.getAxis('bottom').setTicks(ticks)
def plot_element_data(self, pre_class, post_class, element, field_name, color='g', trace_plt=None): trace_plt = None val = element[field_name].mean() line = pg.InfiniteLine(val, pen={'color': color, 'width': 2}, movable=False) scatter = None baseline_window = int(db.default_sample_rate * 5e-3) values = [] traces = [] point_data = [] for pair, value in element[field_name].iteritems(): if np.isnan(value): continue traces = [] if trace_plt is not None: if rsf is not None: trace = rsf.ic_avg_data start_time = rsf.ic_avg_data_start_time latency = pair.synapse.latency if latency is not None and start_time is not None: xoffset = start_time - latency trace = format_trace(trace, baseline_window, x_offset=xoffset, align='psp') trace_plt.plot(trace.time_values, trace.data) traces.append(trace) values.append(value) point_data.append(pair) y_values = pg.pseudoScatter(np.asarray(values, dtype=float), spacing=1) scatter = pg.ScatterPlotItem(symbol='o', brush=(color + (150,)), pen='w', size=12) scatter.setData(values, y_values + 10., data=point_data) for point in scatter.points(): pair_id = point.data().id self.pair_items[pair_id] = [point, color] scatter.sigClicked.connect(self.scatter_plot_clicked) if len(traces) > 0: grand_trace = TSeriesList(traces).mean() trace_plt.plot(grand_trace.time_values, grand_trace.data, pen={'color': color, 'width': 3}) units = 'V' if field_name.startswith('ic') else 'A' trace_plt.setXRange(0, 20e-3) trace_plt.setLabels(left=('', units), bottom=('Time from stimulus', 's')) return line, scatter
# plot the dose-response curve for this cell plt.plot(power, tot, pen=color) # beeswarm plot of maximum connection strength for each cell # (seems to be the best way to determine relative connection strength) plt = pg.plot(title="Response strengths at maximum stimulus intensity", labels={ 'left': ('evoked synaptic strength', 'V'), 'bottom': 'connection type' }) plt.getAxis('bottom').setTicks([[(i, 'type %d' % i) for i in range(len(connection_types))]]) for i, color in enumerate(type_colors): y = np.array(max_strength[i]) x = pg.pseudoScatter(y, bidir=True) x = 0.2 * x / x.max() plt.plot(x + i, y, pen=None, symbol='o', symbolBrush=color, symbolPen=None) x = np.array([0, 1]) y = np.array(max_strength).mean(axis=1) h = np.array(max_strength).std(axis=1) err = pg.ErrorBarItem(x=x, y=y, height=h, width=0.1) plt.addItem(err) # Plot the slope at the end of each dose-response curve versus max connection strength # plt = pg.plot( labels={ 'left': 'final normalized dose-response slope', 'bottom': 'evoked synaptic strength' })
def plot_element_data(self, pre_class, post_class, element, field_name, color='g', trace_plt=None): val = element[field_name].mean() line = pg.InfiniteLine(val, pen={'color': color, 'width': 2}, movable=False) scatter = None baseline_window = int(db.default_sample_rate * 5e-3) values = [] tracesA = [] tracesB = [] point_data = [] for pair, value in element[field_name].iteritems(): latency = self.results.loc[pair]['Latency'] trace_itemA = None trace_itemB = None if pair.has_synapse is not True: continue if np.isnan(value): continue syn_typ = pair.synapse.synapse_type rsf = pair.resting_state_fit if rsf is not None: nrmse = rsf.vc_nrmse if field_name.startswith('PSC') else rsf.ic_nrmse # if nrmse is None or nrmse > 0.8: # continue data = rsf.vc_avg_data if field_name.startswith('PSC') else rsf.ic_avg_data traceA = TSeries(data=data, sample_rate=db.default_sample_rate) if field_name.startswith('PSC'): traceA = bessel_filter(traceA, 5000, btype='low', bidir=True) bessel_filter(traceA, 5000, btype='low', bidir=True) start_time = rsf.vc_avg_data_start_time if field_name.startswith('PSC') else rsf.ic_avg_data_start_time if latency is not None and start_time is not None: if field_name == 'Latency': xoffset = start_time + latency else: xoffset = start_time - latency baseline_window = [abs(xoffset)-1e-3, abs(xoffset)] traceA = format_trace(traceA, baseline_window, x_offset=xoffset, align='psp') trace_itemA = trace_plt[1].plot(traceA.time_values, traceA.data) trace_itemA.pair = pair trace_itemA.curve.setClickable(True) trace_itemA.sigClicked.connect(self.trace_plot_clicked) tracesA.append(traceA) if field_name == 'Latency' and rsf.vc_nrmse is not None: #and rsf.vc_nrmse < 0.8: traceB = TSeries(data=rsf.vc_avg_data, sample_rate=db.default_sample_rate) traceB = bessel_filter(traceB, 5000, btype='low', bidir=True) start_time = rsf.vc_avg_data_start_time if latency is not None and start_time is not None: xoffset = start_time + latency baseline_window = [abs(xoffset)-1e-3, abs(xoffset)] traceB = format_trace(traceB, baseline_window, x_offset=xoffset, align='psp') trace_itemB = trace_plt[0].plot(traceB.time_values, traceB.data) trace_itemB.pair = pair trace_itemB.curve.setClickable(True) trace_itemB.sigClicked.connect(self.trace_plot_clicked) tracesB.append(traceB) self.pair_items[pair.id] = [trace_itemA, trace_itemB] if trace_itemA is not None: values.append(value) point_data.append(pair) y_values = pg.pseudoScatter(np.asarray(values, dtype=float), spacing=1) scatter = pg.ScatterPlotItem(symbol='o', brush=(color + (150,)), pen='w', size=12) scatter.setData(values, y_values + 10., data=point_data) for point in scatter.points(): pair_id = point.data().id self.pair_items[pair_id].extend([point, color]) scatter.sigClicked.connect(self.scatter_plot_clicked) if len(tracesA) > 0: if field_name == 'Latency': spike_line = pg.InfiniteLine(0, pen={'color': 'w', 'width': 1, 'style': pg.QtCore.Qt.DotLine}, movable=False) trace_plt[0].addItem(spike_line) x_label = 'Time from presynaptic spike' else: x_label = 'Response Onset' grand_trace = TSeriesList(tracesA).mean() name = ('%s->%s, n=%d' % (pre_class, post_class, len(tracesA))) trace_plt[1].plot(grand_trace.time_values, grand_trace.data, pen={'color': color, 'width': 3}, name=name) units = 'A' if field_name.startswith('PSC') else 'V' title = 'Voltage Clamp' if field_name.startswith('PSC') else 'Current Clamp' trace_plt[1].setXRange(-5e-3, 20e-3) trace_plt[1].setLabels(left=('', units), bottom=(x_label, 's')) trace_plt[1].setTitle(title) if len(tracesB) > 0: trace_plt[1].setLabels(right=('', units)) trace_plt[1].hideAxis('left') spike_line = pg.InfiniteLine(0, pen={'color': 'w', 'width': 1, 'style': pg.QtCore.Qt.DotLine}, movable=False) trace_plt[0].addItem(spike_line) grand_trace = TSeriesList(tracesB).mean() trace_plt[0].plot(grand_trace.time_values, grand_trace.data, pen={'color': color, 'width': 3}) trace_plt[0].setXRange(-5e-3, 20e-3) trace_plt[0].setLabels(left=('', 'A'), bottom=('Time from presynaptic spike', 's')) trace_plt[0].setTitle('Voltage Clamp') return line, scatter
def updatePlot(self): self.plot.clear() if self.data is None or len(self.data) == 0: return if self.filtered is None: mask = self.filter.generateMask(self.data) self.filtered = self.data[mask] self.filteredIndices = self.indices[mask] data = self.filtered if len(data) == 0: return colors = np.array([pg.mkBrush(*x) for x in self.colorMap.map(data)]) style = self.style.map(data) ## Look up selected columns and units sel = list([str(item.text()) for item in self.fieldList.selectedItems()]) units = list([item.opts.get('units', '') for item in self.fieldList.selectedItems()]) if len(sel) == 0: self.plot.setTitle('') return if len(sel) == 1: self.plot.setLabels(left=('N', ''), bottom=(sel[0], units[0]), title='') if len(data) == 0: return #x = data[sel[0]] #y = None xy = [data[sel[0]], None] elif len(sel) == 2: self.plot.setLabels(left=(sel[1],units[1]), bottom=(sel[0],units[0])) if len(data) == 0: return xy = [data[sel[0]], data[sel[1]]] #xydata = [] #for ax in [0,1]: #d = data[sel[ax]] ### scatter catecorical values just a bit so they show up better in the scatter plot. ##if sel[ax] in ['MorphologyBSMean', 'MorphologyTDMean', 'FIType']: ##d += np.random.normal(size=len(cells), scale=0.1) #xydata.append(d) #x,y = xydata ## convert enum-type fields to float, set axis labels enum = [False, False] for i in [0,1]: axis = self.plot.getAxis(['bottom', 'left'][i]) if xy[i] is not None and (self.fields[sel[i]].get('mode', None) == 'enum' or xy[i].dtype.kind in ('S', 'O')): vals = self.fields[sel[i]].get('values', list(set(xy[i]))) xy[i] = np.array([vals.index(x) if x in vals else len(vals) for x in xy[i]], dtype=float) axis.setTicks([list(enumerate(vals))]) enum[i] = True else: axis.setTicks(None) # reset to automatic ticking ## mask out any nan values mask = np.ones(len(xy[0]), dtype=bool) if xy[0].dtype.kind == 'f': mask &= np.isfinite(xy[0]) if xy[1] is not None and xy[1].dtype.kind == 'f': mask &= np.isfinite(xy[1]) xy[0] = xy[0][mask] for k in style.keys(): if style[k] is None: continue style[k] = style[k][mask] style['symbolBrush'] = colors[mask] data = data[mask] indices = self.filteredIndices[mask] ## Scatter y-values for a histogram-like appearance if xy[1] is None: ## column scatter plot xy[1] = pg.pseudoScatter(xy[0]) else: xy[1] = xy[1][mask] ## beeswarm plots for ax in [0,1]: if not enum[ax]: continue imax = int(xy[ax].max()) if len(xy[ax]) > 0 else 0 for i in range(imax+1): keymask = xy[ax] == i scatter = pg.pseudoScatter(xy[1-ax][keymask], bidir=True) if len(scatter) == 0: continue smax = np.abs(scatter).max() if smax != 0: scatter *= 0.2 / smax xy[ax][keymask] += scatter if self.scatterPlot is not None: try: self.scatterPlot.sigPointsClicked.disconnect(self.plotClicked) except: pass self._visibleXY = xy self._visibleData = data self._visibleIndices = indices self._indexMap = None self.scatterPlot = self.plot.plot(xy[0], xy[1], data=data, **style) self.scatterPlot.sigPointsClicked.connect(self.plotClicked) self.updateSelected()
def distance_plot(connected, distance, plots=None, color=(100, 100, 255), size=10, window=40e-6, name=None, fill_alpha=30): """Draw connectivity vs distance profiles with confidence intervals. Parameters ---------- connected : boolean array Whether a synaptic connection was found for each probe distance : array Distance between cells for each probe plots : list of PlotWidget | PlotItem (optional) Two plots used to display distance profile and scatter plot. color : tuple (R, G, B) color values for line and confidence interval. The confidence interval will be drawn with reduced opacity (see *fill_alpha*) size: int size of scatter plot symbol window : float Width of distance window over which proportions are calculated for each point on the profile line. fill_alpha : int Opacity of confidence interval fill (0-255) Note: using a spacing value that is smaller than the window size may cause an otherwise smooth decrease over distance to instead look more like a series of downward steps. """ color = pg.colorTuple(pg.mkColor(color))[:3] connected = np.array(connected).astype(float) distance = np.array(distance) # scatter plot connections probed if plots is None: grid = PlotGrid() grid.set_shape(2, 1) grid.grid.ci.layout.setRowStretchFactor(0, 5) grid.grid.ci.layout.setRowStretchFactor(1, 10) plots = (grid[1, 0], grid[0, 0]) plots[0].grid = grid plots[0].addLegend() grid.show() plots[0].setLabels(bottom=('distance', 'm'), left='connection probability') if plots[1] is not None: # scatter points a bit pts = np.vstack([distance, connected]).T conn = pts[:, 1] == 1 unconn = pts[:, 1] == 0 if np.any(conn): cscat = pg.pseudoScatter(pts[:, 0][conn], spacing=10e-6, bidir=False) mx = abs(cscat).max() if mx != 0: cscat = cscat * 0.2 # / mx pts[:, 1][conn] = -5e-5 - cscat if np.any(unconn): uscat = pg.pseudoScatter(pts[:, 0][unconn], spacing=10e-6, bidir=False) mx = abs(uscat).max() if mx != 0: uscat = uscat * 0.2 # / mx pts[:, 1][unconn] = uscat plots[1].setXLink(plots[0]) plots[1].hideAxis('bottom') plots[1].hideAxis('left') color2 = color + (100, ) scatter = plots[1].plot(pts[:, 0], pts[:, 1], pen=None, symbol='o', labels={'bottom': ('distance', 'm')}, size=size, symbolBrush=color2, symbolPen=None, name=name) scatter.scatter.opts[ 'compositionMode'] = pg.QtGui.QPainter.CompositionMode_Plus # use a sliding window to plot the proportion of connections found along with a 95% confidence interval # for connection probability bin_edges = np.arange(0, 500e-6, window) xvals, prop, lower, upper = connectivity_profile(connected, distance, bin_edges) # plot connection probability and confidence intervals color2 = [c / 3.0 for c in color] xvals = (xvals[:-1] + xvals[1:]) * 0.5 mid_curve = plots[0].plot(xvals, prop, pen={ 'color': color, 'width': 3 }, antialias=True, name=name) upper_curve = plots[0].plot(xvals, upper, pen=(0, 0, 0, 0), antialias=True) lower_curve = plots[0].plot(xvals, lower, pen=(0, 0, 0, 0), antialias=True) upper_curve.setVisible(False) lower_curve.setVisible(False) color2 = color + (fill_alpha, ) fill = pg.FillBetweenItem(upper_curve, lower_curve, brush=color2) fill.setZValue(-10) plots[0].addItem(fill, ignoreBounds=True) return plots, xvals, prop, upper, lower
def update_display(self): ModelResultView.update_display(self) result = self._parent.result spikes = result['result']['spike_time'] amps = result['result']['amplitude'] meta = result['event_meta'] self.corr_plot.clear() # generate a list of all trains sorted by stimulus trains = { } # {ind_f: {rec_d: [[a1, a2, ..a12], [b1, b2, ..b12], ...], ...}, ...} current_sweep = None skip_sweep = False current_train = [] for i in range(len(amps)): sweep_id = meta['sync_rec_ext_id'][i] ind_f = meta['induction_frequency'][i] rec_d = meta['recovery_delay'][i] if sweep_id != current_sweep: skip_sweep = False current_sweep = sweep_id current_train = [] ind_trains = trains.setdefault(ind_f, {}) rec_trains = ind_trains.setdefault(rec_d, []) rec_trains.append(current_train) if skip_sweep: continue if not np.isfinite(amps[i]) or not np.isfinite(spikes[i]): skip_sweep = True continue current_train.append(amps[i]) # scatter plots of event amplitudes sorted by pulse number for ind_i, ind_f in enumerate([20, 50, 100]): ind_trains = trains.get(ind_f, {}) # collect all induction events by pulse number ind_pulses = [[] for i in range(12)] for rec_d, rec_trains in ind_trains.items(): for train in rec_trains: for i, amp in enumerate(train): ind_pulses[i].append(amp) x = [] y = [] for i in range(12): if len(ind_pulses[i]) == 0: continue y.extend(ind_pulses[i]) xs = pg.pseudoScatter(np.array(ind_pulses[i]), bidir=True, shuffle=True) xs /= np.abs(xs).max() * 4 x.extend(xs + i) self.ind_plots[ind_i].clear() self.ind_plots[ind_i].plot(x, y, pen=None, symbol='o') # re-model based on mean amplitudes mean_times = np.arange(12) / ind_f mean_times[8:] += 0.25 model = result['model'] params = result['params'].copy() params.update(result['optimized_params']) mean_result = model.measure_likelihood(mean_times, amplitudes=None, params=params) expected_amps = mean_result['result']['expected_amplitude'] self.ind_plots[ind_i].plot(expected_amps, pen='w', symbol='d', symbolBrush='y') # normalize events by model prediction x = [] y = [] for rec_d, rec_trains in ind_trains.items(): for train in rec_trains: train = [t - expected_amps[i] for i, t in enumerate(train)] for i in range(1, len(train)): x.append(train[i - 1]) y.append(train[i]) x = np.array(x) y = np.array(y) y1 = y[x < 0] y2 = y[x > 0] x1 = pg.pseudoScatter(y1, bidir=True) x2 = pg.pseudoScatter(y2, bidir=True) x1 = 0.25 * x1 / x1.max() x2 = 0.25 * x2 / x2.max() self.corr_plot.plot(x1, y1, pen=None, symbol='o') self.corr_plot.plot(x2 + 1, y2, pen=None, symbol='o')
def plot_element_data(self, pre_class, post_class, element, field_name, color='g', trace_plt=None): fn = field_name.split('_all')[0] if field_name.endswith( 'all') else field_name.split('_first_pulse')[0] val = element[field_name].mean() line = pg.InfiniteLine(val, pen={ 'color': color, 'width': 2 }, movable=False) scatter = None baseline_window = int(db.default_sample_rate * 5e-3) values = [] traces = [] point_data = [] for pair, value in element[field_name].iteritems(): if pair.synapse is not True: continue if np.isnan(value): continue if field_name.endswith('all'): cs = pair.connection_strength trace = cs.ic_average_response if field_name.startswith( 'ic') else cs.vc_average_response x_offset = cs.ic_fit_xoffset if field_name.startswith( 'ic') else cs.vc_fit_xoffset elif field_name.endswith('first_pulse'): fpf = pair.avg_first_pulse_fit if fpf is None: continue trace = fpf.ic_avg_psp_data if field_name.startswith( 'ic') else fpf.vc_avg_psp_data x_offset = fpf.ic_latency if field_name.startswith( 'ic') else fpf.vc_latency if trace is None: continue values.append(value) trace = format_trace(trace, baseline_window, x_offset, align='psp') trace_item = trace_plt.plot(trace.time_values, trace.data) point_data.append(pair) trace_item.pair = pair trace_item.curve.setClickable(True) trace_item.sigClicked.connect(self.trace_plot_clicked) traces.append(trace) self.pair_items[pair.id] = [trace_item] y_values = pg.pseudoScatter(np.asarray(values, dtype=float), spacing=1) scatter = pg.ScatterPlotItem(symbol='o', brush=(color + (150, )), pen='w', size=12) scatter.setData(values, y_values + 10., data=point_data) for point in scatter.points(): pair_id = point.data().id self.pair_items[pair_id].append(point) scatter.sigClicked.connect(self.scatter_plot_clicked) grand_trace = TraceList(traces).mean() name = ('%s->%s, n=%d' % (pre_class, post_class, len(traces))) trace_plt.plot(grand_trace.time_values, grand_trace.data, pen={ 'color': color, 'width': 3 }, name=name) units = 'V' if field_name.startswith('ic') else 'A' trace_plt.setXRange(0, 20e-3) trace_plt.setLabels(left=('', units), bottom=('Time from stimulus', 's')) return line, scatter
plt2 = win.addPlot() ## make interesting distribution of values vals = np.hstack( [np.random.normal(size=500), np.random.normal(size=260, loc=4)]) ## compute standard histogram y, x = np.histogram(vals, bins=np.linspace(-3, 8, 40)) ## Using stepMode=True causes the plot to draw two lines for each sample. ## notice that len(x) == len(y)+1 plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) ## Now draw all points as a nicely-spaced scatter plot y = pg.pseudoScatter(vals, spacing=0.15) #plt2.plot(vals, y, pen=None, symbol='o', symbolSize=5) plt2.plot(vals, y, pen=None, symbol='+', symbolSize=5, symbolPen=(255, 255, 255, 200), symbolBrush=(0, 0, 255, 150)) win.show() ## Start Qt event loop unless running in interactive mode or using pyside. if __name__ == '__main__': import sys if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui import numpy as np win = pg.GraphicsLayoutWidget(show=True) win.resize(800,350) win.setWindowTitle('pyqtgraph example: Histogram') plt1 = win.addPlot() plt2 = win.addPlot() ## make interesting distribution of values vals = np.hstack([np.random.normal(size=500), np.random.normal(size=260, loc=4)]) ## compute standard histogram y,x = np.histogram(vals, bins=np.linspace(-3, 8, 40)) ## Using stepMode=True causes the plot to draw two lines for each sample. ## notice that len(x) == len(y)+1 plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150)) ## Now draw all points as a nicely-spaced scatter plot y = pg.pseudoScatter(vals, spacing=0.15) #plt2.plot(vals, y, pen=None, symbol='o', symbolSize=5) plt2.plot(vals, y, pen=None, symbol='o', symbolSize=5, symbolPen=(255,255,255,200), symbolBrush=(0,0,255,150)) ## Start Qt event loop unless running in interactive mode or using pyside. if __name__ == '__main__': import sys if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): QtGui.QApplication.instance().exec_()
def distance_plot(connected, distance, plots=None, color=(100, 100, 255), size=10, window=40e-6, spacing=None, name=None, fill_alpha=30): """Draw connectivity vs distance profiles with confidence intervals. Parameters ---------- connected : boolean array Whether a synaptic connection was found for each probe distance : array Distance between cells for each probe plots : list of PlotWidget | PlotItem (optional) Two plots used to display distance profile and scatter plot. color : tuple (R, G, B) color values for line and confidence interval. The confidence interval will be drawn with alpha=100 size: int size of scatter plot symbol window : float Width of distance window over which proportions are calculated for each point on the profile line. spacing : float Distance spacing between points on the profile line Note: using a spacing value that is smaller than the window size may cause an otherwise smooth decrease over distance to instead look more like a series of downward steps. """ color = pg.colorTuple(pg.mkColor(color))[:3] connected = np.array(connected).astype(float) distance = np.array(distance) pts = np.vstack([distance, connected]).T # scatter points a bit conn = pts[:,1] == 1 unconn = pts[:,1] == 0 if np.any(conn): cscat = pg.pseudoScatter(pts[:,0][conn], spacing=10e-6, bidir=False) mx = abs(cscat).max() if mx != 0: cscat = cscat * 0.2# / mx pts[:,1][conn] = -5e-5 - cscat if np.any(unconn): uscat = pg.pseudoScatter(pts[:,0][unconn], spacing=10e-6, bidir=False) mx = abs(uscat).max() if mx != 0: uscat = uscat * 0.2# / mx pts[:,1][unconn] = uscat # scatter plot connections probed if plots is None: grid = PlotGrid() grid.set_shape(2, 1) grid.grid.ci.layout.setRowStretchFactor(0, 5) grid.grid.ci.layout.setRowStretchFactor(1, 10) plots = (grid[1,0], grid[0,0]) plots[0].grid = grid plots[0].addLegend() grid.show() plots[0].setLabels(bottom=('distance', 'm'), left='connection probability') if plots[1] is not None: plots[1].setXLink(plots[0]) plots[1].hideAxis('bottom') plots[1].hideAxis('left') color2 = color + (100,) scatter = plots[1].plot(pts[:,0], pts[:,1], pen=None, symbol='o', labels={'bottom': ('distance', 'm')}, size=size, symbolBrush=color2, symbolPen=None, name=name) scatter.scatter.opts['compositionMode'] = pg.QtGui.QPainter.CompositionMode_Plus # use a sliding window to plot the proportion of connections found along with a 95% confidence interval # for connection probability if spacing is None: spacing = window / 4.0 xvals = np.arange(window / 2.0, 500e-6, spacing) upper = [] lower = [] prop = [] ci_xvals = [] for x in xvals: minx = x - window / 2.0 maxx = x + window / 2.0 # select points inside this window mask = (distance >= minx) & (distance <= maxx) pts_in_window = connected[mask] # compute stats for window n_probed = pts_in_window.shape[0] n_conn = pts_in_window.sum() if n_probed == 0: prop.append(np.nan) else: prop.append(n_conn / n_probed) ci = proportion_confint(n_conn, n_probed, method='beta') lower.append(ci[0]) upper.append(ci[1]) ci_xvals.append(x) # plot connection probability and confidence intervals color2 = [c / 3.0 for c in color] mid_curve = plots[0].plot(xvals, prop, pen={'color': color, 'width': 3}, antialias=True, name=name) upper_curve = plots[0].plot(ci_xvals, upper, pen=(0, 0, 0, 0), antialias=True) lower_curve = plots[0].plot(ci_xvals, lower, pen=(0, 0, 0, 0), antialias=True) upper_curve.setVisible(False) lower_curve.setVisible(False) color2 = color + (fill_alpha,) fill = pg.FillBetweenItem(upper_curve, lower_curve, brush=color2) fill.setZValue(-10) plots[0].addItem(fill, ignoreBounds=True) return plots, ci_xvals, prop, upper, lower
def summary_plot_pulse(feature_list, labels, titles, i, median=False, grand_trace=None, plot=None, color=None, name=None): """ Plots features of single-pulse responses such as amplitude, latency, etc. for group analysis. Can be used for one group by ideal for comparing across many groups in the feature_list Parameters ---------- feature_list : list of lists of floats single-pulse features such as amplitude. Can be multiple features each a list themselves labels : list of pyqtgraph.LabelItem axis labels, must be a list of same length as feature_list titles : list of strings plot title, must be a list of same length as feature_list i : integer iterator to place groups along x-axis median : boolean to calculate median (True) vs mean (False), default is False grand_trace : neuroanalysis.data.TraceView object option to plot response trace alongside scatter plot, default is None plot : pyqtgraph.PlotItem If not None, plot the data on the referenced pyqtgraph object. color : tuple plot color name : pyqtgraph.LegendItem Returns ------- plot : pyqtgraph.PlotItem 2 x n plot with scatter plot and optional trace response plot for each feature (n) """ if type(feature_list) is tuple: n_features = len(feature_list) else: n_features = 1 if plot is None: plot = PlotGrid() plot.set_shape(n_features, 2) plot.show() for g in range(n_features): plot[g, 1].addLegend() for feature in range(n_features): if n_features > 1: current_feature = feature_list[feature] if median is True: mean = np.nanmedian(current_feature) else: mean = np.nanmean(current_feature) label = labels[feature] title = titles[feature] else: current_feature = feature_list mean = np.nanmean(current_feature) label = labels title = titles plot[feature, 0].setLabels(left=(label[0], label[1])) plot[feature, 0].hideAxis('bottom') plot[feature, 0].setTitle(title) if grand_trace is not None: plot[feature, 1].plot(grand_trace.time_values, grand_trace.data, pen=color, name=name) if len(current_feature) > 1: dx = pg.pseudoScatter(np.array(current_feature).astype(float), 0.7, bidir=True) plot[feature, 0].plot([i], [mean], symbol='o', symbolSize=20, symbolPen='k', symbolBrush=color) sem = stats.sem(current_feature, nan_policy='omit') if len(color) != 3: new_color = pg.glColor(color) color = (new_color[0]*255, new_color[1]*255, new_color[2]*255) plot[feature, 0].plot((0.3 * dx / dx.max()) + i, current_feature, pen=None, symbol='o', symbolSize=10, symbolPen='w', symbolBrush=(color[0], color[1], color[2], 100)) else: plot[feature, 0].plot([i], current_feature, pen=None, symbol='o', symbolSize=10, symbolPen='w', symbolBrush=color) return plot
def __init__(self, parent=None): #Préliminaires: QtWidgets.QMainWindow.__init__(self, parent) self.ui = gui.Ui_MainWindow() self.ui.setupUi(self) # On affiche un texte en bas de la fenêtre (status bar). Provisoirement c'est le nom de PSC. self.ui.statusBar.showMessage("PSC Finance Expérimentale") # On rempli la liste avec les noms des actifs: self.ui.listWidget.addItem("Starbucks") self.ui.listWidget.addItem("ExxonMobil") self.ui.listWidget.addItem("Berkshire Hathaway") self.ui.listWidget.addItem("Facebook") # Un clic sur un élément de la liste appellera la méthode 'on_item_changed'. # Pour l'instant la sélection d'un actif ne produit rien sur la fenêtre, mais affiche le nom de l'actif sur la console Python. # Dans le futur, cela devra ouvrir une sous-fenêtre contenant les cours de l'action. self.ui.listWidget.currentItemChanged.connect(self.on_item_changed) # On définit le bouton "Calculer portefeuille": # Pour chacun des deux boutons, un clic appellera la méthode "action_bouton" correspondante (cf. infra). self.ui.pushButton.clicked.connect(self.action_bouton) self.ui.pushButton_2.clicked.connect(self.action_bouton2) self.ui.pushButton_2.hide() self.ui.lineEdit_7.hide() # Graphiques (temporaires: c'est un test) dans le deuxième onglet. ## Distributions des valeurs : vals = np.hstack( [np.random.normal(size=500), np.random.normal(size=260, loc=4)]) ## Histogramme : y, x = np.histogram(vals, bins=np.linspace(-3, 8, 40)) ## Nuage de points : z = pg.pseudoScatter(vals, spacing=0.15) ## On trace les graphiques : self.ui.graphicsView.plot(x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) self.ui.graphicsView_2.plot(vals, z, pen=None, symbol='o', symbolSize=5, symbolPen=(255, 255, 255, 200), symbolBrush=(0, 0, 255, 150)) # Vraie Enveloppe: Std, E = GenererFrontiereReelle(Mu, Sigma) self.ui.graphicsView_3.plot(Std, E) # Dans un premier temps, on la cache, et à sa place on affiche les cours des actions. self.ui.graphicsView_3.hide() # On affiche les cours des actions: for colour in range(4): X, Y = CoursAction(100) self.ui.graphicsView_4.plot(X, Y)
def summary_plot_pulse(feature_list, labels, titles, i, median=False, grand_trace=None, plot=None, color=None, name=None): """ Plots features of single-pulse responses such as amplitude, latency, etc. for group analysis. Can be used for one group by ideal for comparing across many groups in the feature_list Parameters ---------- feature_list : list of lists of floats single-pulse features such as amplitude. Can be multiple features each a list themselves labels : list of pyqtgraph.LabelItem axis labels, must be a list of same length as feature_list titles : list of strings plot title, must be a list of same length as feature_list i : integer iterator to place groups along x-axis median : boolean to calculate median (True) vs mean (False), default is False grand_trace : neuroanalysis.data.TSeriesView object option to plot response trace alongside scatter plot, default is None plot : pyqtgraph.PlotItem If not None, plot the data on the referenced pyqtgraph object. color : tuple plot color name : pyqtgraph.LegendItem Returns ------- plot : pyqtgraph.PlotItem 2 x n plot with scatter plot and optional trace response plot for each feature (n) """ if type(feature_list) is tuple: n_features = len(feature_list) else: n_features = 1 if plot is None: plot = PlotGrid() plot.set_shape(n_features, 2) plot.show() for g in range(n_features): plot[g, 1].addLegend() for feature in range(n_features): if n_features > 1: current_feature = feature_list[feature] if median is True: mean = np.nanmedian(current_feature) else: mean = np.nanmean(current_feature) label = labels[feature] title = titles[feature] else: current_feature = feature_list mean = np.nanmean(current_feature) label = labels title = titles plot[feature, 0].setLabels(left=(label[0], label[1])) plot[feature, 0].hideAxis('bottom') plot[feature, 0].setTitle(title) if grand_trace is not None: plot[feature, 1].plot(grand_trace.time_values, grand_trace.data, pen=color, name=name) if len(current_feature) > 1: dx = pg.pseudoScatter(np.array(current_feature).astype(float), 0.7, bidir=True) plot[feature, 0].plot([i], [mean], symbol='o', symbolSize=20, symbolPen='k', symbolBrush=color) sem = stats.sem(current_feature, nan_policy='omit') if len(color) != 3: new_color = pg.glColor(color) color = (new_color[0] * 255, new_color[1] * 255, new_color[2] * 255) plot[feature, 0].plot( (0.3 * dx / dx.max()) + i, current_feature, pen=None, symbol='o', symbolSize=10, symbolPen='w', symbolBrush=(color[0], color[1], color[2], 100)) else: plot[feature, 0].plot([i], current_feature, pen=None, symbol='o', symbolSize=10, symbolPen='w', symbolBrush=color) return plot
i = 0 amp_plot = pg.plot() amp_plot2 = pg.plot() label = [] avg_amps = [a[1] for a in abs_sort_amp] amps_sem = [] bar = pg.BarGraphItem(x=range(len(avg_amps)), height=avg_amps, width=0.7) amp_plot.addItem(bar) for key, avg_amp in abs_sort_amp: color= get_color (key[0], key[1]) connection = ('%s->%s' % (key[0], key[1])) label.append((i, connection)) amps = [abs(a) for a in features['Amplitudes'][key]] amps_sem.append(stats.sem(amps)) dx = pg.pseudoScatter(np.array(amps).astype(float), 0.3, bidir=True) amp_plot.plot((0.3 * dx / dx.max()) + i, amps, pen=None, symbol='o', symbolSize=8, symbolBrush=color, symbolPen='w') amp_plot2.plot((0.3 * dx / dx.max()) + i, amps, pen=None, symbol='x', symbolSize=8, symbolBrush=color, symbolPen=None) amp_plot2.plot([i], [avg_amp], pen=None, symbol='o', symbolBrush=color, symbolPen='w', symbolSize=15) i += 1 amp_plot.setLabels(left=('Abs Amplitude', 'V')) amp_plot.getAxis('bottom').setTicks([label]) amp_plot.setYRange(0, 3e-3) err = pg.ErrorBarItem(x=np.array(range(len(amps_sem))), y=np.array(avg_amps), height=np.array(amps_sem), beam=0.3) amp_plot.addItem(err) amp_plot2.setLabels(left=('Abs Amplitude', 'V')) amp_plot2.getAxis('bottom').setTicks([label]) amp_plot2.setYRange(0, 3e-3)
import numpy as np win = pg.plot() win.setWindowTitle('pyqtgraph example: beeswarm') data = np.random.normal(size=(4,20)) data[0] += 5 data[1] += 7 data[2] += 5 data[3] = 10 + data[3] * 2 ## Make bar graph #bar = pg.BarGraphItem(x=range(4), height=data.mean(axis=1), width=0.5, brush=0.4) #win.addItem(bar) ## add scatter plots on top for i in range(4): xvals = pg.pseudoScatter(data[i], spacing=0.4, bidir=True) * 0.2 win.plot(x=xvals+i, y=data[i], pen=None, symbol='o', symbolBrush=pg.intColor(i,6,maxValue=128)) ## Make error bars err = pg.ErrorBarItem(x=np.arange(4), y=data.mean(axis=1), height=data.std(axis=1), beam=0.5, pen={'color':'w', 'width':2}) win.addItem(err) ## Start Qt event loop unless running in interactive mode or using pyside. if __name__ == '__main__': import sys if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): QtGui.QApplication.instance().exec_()
i = 0 amp_plot = pg.plot() amp_plot2 = pg.plot() label = [] avg_amps = [a[1] for a in abs_sort_amp] amps_sem = [] bar = pg.BarGraphItem(x=range(len(avg_amps)), height=avg_amps, width=0.7) amp_plot.addItem(bar) for key, avg_amp in abs_sort_amp: color = get_color(key[0], key[1]) connection = ('%s->%s' % (key[0], key[1])) label.append((i, connection)) amps = [abs(a) for a in features['Amplitudes'][key]] amps_sem.append(stats.sem(amps)) dx = pg.pseudoScatter(np.array(amps).astype(float), 0.3, bidir=True) amp_plot.plot((0.3 * dx / dx.max()) + i, amps, pen=None, symbol='o', symbolSize=8, symbolBrush=color, symbolPen='w') amp_plot2.plot((0.3 * dx / dx.max()) + i, amps, pen=None, symbol='x', symbolSize=8, symbolBrush=color, symbolPen=None) amp_plot2.plot([i], [avg_amp],
def distance_plot(connected, distance, plots=None, color=(100, 100, 255), window=40e-6, spacing=None, name=None, fill_alpha=30): """Draw connectivity vs distance profiles with confidence intervals. Parameters ---------- connected : boolean array Whether a synaptic connection was found for each probe distance : array Distance between cells for each probe plots : list of PlotWidget | PlotItem (optional) Two plots used to display distance profile and scatter plot. color : tuple (R, G, B) color values for line and confidence interval. The confidence interval will be drawn with alpha=100 window : float Width of distance window over which proportions are calculated for each point on the profile line. spacing : float Distance spacing between points on the profile line Note: using a spacing value that is smaller than the window size may cause an otherwise smooth decrease over distance to instead look more like a series of downward steps. """ color = pg.colorTuple(pg.mkColor(color))[:3] connected = np.array(connected).astype(float) distance = np.array(distance) pts = np.vstack([distance, connected]).T # scatter points a bit conn = pts[:, 1] == 1 unconn = pts[:, 1] == 0 if np.any(conn): cscat = pg.pseudoScatter(pts[:, 0][conn], spacing=10e-6, bidir=False) mx = abs(cscat).max() if mx != 0: cscat = cscat * 0.2 # / mx pts[:, 1][conn] = -2e-5 - cscat if np.any(unconn): uscat = pg.pseudoScatter(pts[:, 0][unconn], spacing=10e-6, bidir=False) mx = abs(uscat).max() if mx != 0: uscat = uscat * 0.2 # / mx pts[:, 1][unconn] = uscat # scatter plot connections probed if plots is None: grid = PlotGrid() grid.set_shape(2, 1) grid.grid.ci.layout.setRowStretchFactor(0, 5) grid.grid.ci.layout.setRowStretchFactor(1, 10) plots = (grid[1, 0], grid[0, 0]) plots[0].grid = grid plots[0].addLegend() grid.show() plots[0].setLabels(bottom=('distance', 'm'), left='connection probability') if plots[1] is not None: plots[1].setXLink(plots[0]) plots[1].hideAxis('bottom') plots[1].hideAxis('left') color2 = color + (100, ) scatter = plots[1].plot(pts[:, 0], pts[:, 1], pen=None, symbol='o', labels={'bottom': ('distance', 'm')}, symbolBrush=color2, symbolPen=None, name=name) scatter.scatter.opts[ 'compositionMode'] = pg.QtGui.QPainter.CompositionMode_Plus # use a sliding window to plot the proportion of connections found along with a 95% confidence interval # for connection probability if spacing is None: spacing = window / 4.0 xvals = np.arange(window / 2.0, 500e-6, spacing) upper = [] lower = [] prop = [] ci_xvals = [] for x in xvals: minx = x - window / 2.0 maxx = x + window / 2.0 # select points inside this window mask = (distance >= minx) & (distance <= maxx) pts_in_window = connected[mask] # compute stats for window n_probed = pts_in_window.shape[0] n_conn = pts_in_window.sum() if n_probed == 0: prop.append(np.nan) else: prop.append(n_conn / n_probed) ci = binomial_ci(n_conn, n_probed) lower.append(ci[0]) upper.append(ci[1]) ci_xvals.append(x) # plot connection probability and confidence intervals color2 = [c / 3.0 for c in color] mid_curve = plots[0].plot(xvals, prop, pen={ 'color': color, 'width': 3 }, antialias=True, name=name) upper_curve = plots[0].plot(ci_xvals, upper, pen=(0, 0, 0, 0), antialias=True) lower_curve = plots[0].plot(ci_xvals, lower, pen=(0, 0, 0, 0), antialias=True) upper_curve.setVisible(False) lower_curve.setVisible(False) color2 = color + (fill_alpha, ) fill = pg.FillBetweenItem(upper_curve, lower_curve, brush=color2) fill.setZValue(-10) plots[0].addItem(fill, ignoreBounds=True) return plots
def summary_plot_pulse(feature_list, feature_mean, labels, titles, i, grand_trace=None, plot=None, color=None, name=None): if type(feature_list) is tuple: n_features = len(feature_list) else: n_features = 1 if plot is None: plot = PlotGrid() plot.set_shape(n_features, 2) plot.show() for g in range(n_features): plot[g, 1].addLegend() plot[g, 1].setLabels(left=('Vm', 'V')) plot[g, 1].setLabels(bottom=('t', 's')) for feature in range(n_features): if n_features > 1: features = feature_list[feature] mean = feature_mean[feature] label = labels[feature] title = titles[feature] else: features = feature_list mean = feature_mean label = labels title = titles plot[feature, 0].setLabels(left=(label[0], label[1])) plot[feature, 0].hideAxis('bottom') plot[feature, 0].setTitle(title) if grand_trace is not None: plot[feature, 1].plot(grand_trace.time_values, grand_trace.data, pen=color, name=name) if len(features) > 1: dx = pg.pseudoScatter(np.array(features).astype(float), 0.3, bidir=True) bar = pg.BarGraphItem(x=[i], height=mean, width=0.7, brush='w', pen={ 'color': color, 'width': 2 }) plot[feature, 0].addItem(bar) sem = stats.sem(features) err = pg.ErrorBarItem(x=np.asarray([i]), y=np.asarray([mean]), height=sem, beam=0.3) plot[feature, 0].addItem(err) plot[feature, 0].plot((0.3 * dx / dx.max()) + i, features, pen=None, symbol='o', symbolSize=10, symbolPen='w', symbolBrush=color) else: plot[feature, 0].plot([i], features, pen=None, symbol='o', symbolSize=10, symbolPen='w', symbolBrush=color) return plot
def updatePlot(self): self.plot.clear() if self.data is None: return if self.filtered is None: self.filtered = self.filter.filterData(self.data) data = self.filtered if len(data) == 0: return colors = np.array([fn.mkBrush(*x) for x in self.colorMap.map(data)]) style = self.style.copy() ## Look up selected columns and units sel = list([str(item.text()) for item in self.fieldList.selectedItems()]) units = list([item.opts.get("units", "") for item in self.fieldList.selectedItems()]) if len(sel) == 0: self.plot.setTitle("") return if len(sel) == 1: self.plot.setLabels(left=("N", ""), bottom=(sel[0], units[0]), title="") if len(data) == 0: return # x = data[sel[0]] # y = None xy = [data[sel[0]], None] elif len(sel) == 2: self.plot.setLabels(left=(sel[1], units[1]), bottom=(sel[0], units[0])) if len(data) == 0: return xy = [data[sel[0]], data[sel[1]]] # xydata = [] # for ax in [0,1]: # d = data[sel[ax]] ### scatter catecorical values just a bit so they show up better in the scatter plot. ##if sel[ax] in ['MorphologyBSMean', 'MorphologyTDMean', 'FIType']: ##d += np.random.normal(size=len(cells), scale=0.1) # xydata.append(d) # x,y = xydata ## convert enum-type fields to float, set axis labels enum = [False, False] for i in [0, 1]: axis = self.plot.getAxis(["bottom", "left"][i]) if xy[i] is not None and ( self.fields[sel[i]].get("mode", None) == "enum" or xy[i].dtype.kind in ("S", "O") ): vals = self.fields[sel[i]].get("values", list(set(xy[i]))) xy[i] = np.array([vals.index(x) if x in vals else len(vals) for x in xy[i]], dtype=float) axis.setTicks([list(enumerate(vals))]) enum[i] = True else: axis.setTicks(None) # reset to automatic ticking ## mask out any nan values mask = np.ones(len(xy[0]), dtype=bool) if xy[0].dtype.kind == "f": mask &= ~np.isnan(xy[0]) if xy[1] is not None and xy[1].dtype.kind == "f": mask &= ~np.isnan(xy[1]) xy[0] = xy[0][mask] style["symbolBrush"] = colors[mask] ## Scatter y-values for a histogram-like appearance if xy[1] is None: ## column scatter plot xy[1] = fn.pseudoScatter(xy[0]) else: ## beeswarm plots xy[1] = xy[1][mask] for ax in [0, 1]: if not enum[ax]: continue imax = int(xy[ax].max()) if len(xy[ax]) > 0 else 0 for i in range(imax + 1): keymask = xy[ax] == i scatter = pg.pseudoScatter(xy[1 - ax][keymask], bidir=True) if len(scatter) == 0: continue smax = np.abs(scatter).max() if smax != 0: scatter *= 0.2 / smax xy[ax][keymask] += scatter if self.scatterPlot is not None: try: self.scatterPlot.sigPointsClicked.disconnect(self.plotClicked) except: pass self.scatterPlot = self.plot.plot(xy[0], xy[1], data=data[mask], **style) self.scatterPlot.sigPointsClicked.connect(self.plotClicked)
def update_display(self): ModelResultView.update_display(self) result = self._parent.result spikes = result.result['spike_time'] amps = result.result['amplitude'] meta = result.event_meta # re-simulate one random trial model_result = self.parent.random_model_result() model_amps = model_result.result['amplitude'] # generate a list of all trains sorted by stimulus trains = result.events_by_stimulus() # scatter plots of event amplitudes sorted by pulse number for ind_i, ind_f in enumerate(self.induction_freqs): ind_trains = trains.get(ind_f, {}) # collect all induction events by pulse number ind_pulses = [[] for i in range(12)] for rec_d, rec_trains in ind_trains.items(): for train in rec_trains: for i,ev_ind in enumerate(train): ind_pulses[i].append(ev_ind) real_x = [] real_y = [] real_avg_y = [] model_x = [] model_y = [] model_avg_y = [] for i in range(12): if len(ind_pulses[i]) == 0: real_avg_y = [np.nan] * 12 continue inds = np.array(ind_pulses[i]) for this_amp, x, avg_y, y, sign in ((amps, real_x, real_avg_y, real_y, -1), (model_amps, model_x, model_avg_y, model_y, 1)): amp = this_amp[inds] y.extend(amp) if len(amp) == 0: avg_y.append(np.nan) else: avg_y.append(amp.mean()) xs = pg.pseudoScatter(np.array(amp), bidir=False, shuffle=True) xs /= np.abs(xs).max() * 4 x.extend(i + xs * sign) self.ind_plots[ind_i].clear() self.ind_plots[ind_i].plot(real_x, real_y, pen=None, symbol='o', symbolPen=None, symbolBrush=data_color+(200,), symbolSize=5) self.ind_plots[ind_i].plot(np.arange(12), real_avg_y, pen=data_color+(100,), symbol='d', symbolPen=None, symbolBrush=data_color+(100,), symbolSize=5) self.ind_plots[ind_i].plot(np.array(model_x)+0.1, model_y, pen=None, symbol='o', symbolPen=None, symbolBrush=model_color+(200,), symbolSize=5, zValue=-1) # re-model based on mean amplitudes mean_times = np.arange(12) / ind_f mean_times[8:] += 0.25 model = result.model params = result.all_params mean_result = model.run_model(mean_times, amplitudes='expected', params=params) # plot model distribution expectation values expected_amps = mean_result.result['expected_amplitude'] self.ind_plots[ind_i].plot(expected_amps, pen=model_color+(100,), symbol='d', symbolBrush=model_color+(100,), symbolPen=None, zValue=-10)