def measure_limit(pair, session, classifier):
    pair_id = pair.experiment.acq_timestamp, pair.pre_cell.ext_id, pair.post_cell.ext_id
    print(pair_id)

    amps = strength_analysis.get_amps(session, pair)
    base_amps = strength_analysis.get_baseline_amps(session, pair, amps=amps, clamp_mode='ic')
    
    q = strength_analysis.response_query(session)
    q = q.join(strength_analysis.PulseResponseStrength)
    q = q.filter(strength_analysis.PulseResponseStrength.id.in_(amps['id']))
    q = q.join(db.MultiPatchProbe)
    q = q.filter(db.MultiPatchProbe.induction_frequency < 100)

    fg_recs = q.all()

    traces = []
    deconvs = []
    #for rec in fg_recs[:100]:
        #result = strength_analysis.analyze_response_strength(rec, source='pulse_response', lpf=True, lowpass=2000,
                                            #remove_artifacts=False, bsub=True)
        #trace = result['raw_trace']
        #trace.t0 = -result['spike_time']
        #trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
        #traces.append(trace)            
        #trace_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))

        #trace = result['dec_trace']
        #trace.t0 = -result['spike_time']
        #trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
        #deconvs.append(trace)            
        #deconv_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))

    ## plot average trace
    #mean = TraceList(traces).mean()
    #trace_plot.plot(mean.time_values, mean.data, pen={'color':'g', 'width': 2}, shadowPen={'color':'k', 'width': 3}, antialias=True)
    #mean = TraceList(deconvs).mean()
    #deconv_plot.plot(mean.time_values, mean.data, pen={'color':'g', 'width': 2}, shadowPen={'color':'k', 'width': 3}, antialias=True)

    #bins = np.arange(-0.001, 0.015, 0.0005) 
    #field = 'pos_dec_amp'
    #n = min(len(amps), len(base_amps))
    #hist_y, hist_bins = np.histogram(base_amps[:n][field], bins=bins)
    #hist_plot.plot(hist_bins, hist_y, stepMode=True, pen=None, brush=(200, 0, 0, 150), fillLevel=0)
    #hist_y, hist_bins = np.histogram(amps[:n][field], bins=bins)
    #hist_plot.plot(hist_bins, hist_y, stepMode=True, pen='k', brush=(0, 150, 150, 100), fillLevel=0)

    q = strength_analysis.baseline_query(session)
    q = q.join(strength_analysis.BaselineResponseStrength)
    q = q.filter(strength_analysis.BaselineResponseStrength.id.in_(base_amps['id']))
    bg_recs = q.all()

    # measure background connection strength
    bg_results = [strength_analysis.analyze_response_strength(rec, 'baseline') for rec in bg_recs]
    bg_results = strength_analysis.str_analysis_result_table(bg_results, bg_recs)

    # for this example, we use background data to simulate foreground
    # (but this will be biased due to lack of crosstalk in background data)
    fg_recs = bg_recs

    # now measure foreground simulated under different conditions
    amps = 2e-6 * 2**np.arange(9)
    amps[0] = 0
    rtime = 2e-3
    dt = 1 / db.default_sample_rate
    results = np.empty(len(amps), dtype=[('results', object), ('prediction', float), ('confidence', float), ('traces', object), ('rise_time', float), ('amp', float)])
    print("  Simulating synaptic events..")
    # pool = multiprocessing.Pool(4)
    limit_entry = DetectionLimit(pair=pair)

    results = []
    avg_conf = []
    limit = None
    for i,amp in enumerate(amps):
        print("----- %d/%d  %0.3g      \r" % (i, len(amps), amp),)
        result = strength_analysis.simulate_connection(fg_recs, bg_results, classifier, amp, rtime)
        results.append({'amp': amp, 'rise_time': rtime, 'predictions': list(result['predictions']), 'confidence': list(result['confidence'])})
        avg_conf.append(result['confidence'].mean())
        print(results[-1])
        # if we crossed threshold, interpolate to estimate the minimum amplitude
        # and terminate the loop early
        if limit is None and i > 0 and avg_conf[-1] > classifier.prob_threshold:
            a1 = amps[i-1]
            a2 = amp
            c1,c2 = avg_conf[-2:]
            s = (classifier.prob_threshold - c1) / (c2 - c1)
            limit = a1 + s * (a2 - a1)
            break

    limit_entry.simulation_results = results
    limit_entry.minimum_amplitude = limit

    session.add(limit_entry)
    session.commit()
def measure_limit(pair, session, classifier):
    pair_id = pair.experiment.acq_timestamp, pair.pre_cell.ext_id, pair.post_cell.ext_id
    print(pair_id)

    amps = strength_analysis.get_amps(session, pair)
    base_amps = strength_analysis.get_baseline_amps(session,
                                                    pair,
                                                    amps=amps,
                                                    clamp_mode='ic')

    q = strength_analysis.response_query(session)
    q = q.join(strength_analysis.PulseResponseStrength)
    q = q.filter(strength_analysis.PulseResponseStrength.id.in_(amps['id']))
    q = q.join(db.MultiPatchProbe)
    q = q.filter(db.MultiPatchProbe.induction_frequency < 100)

    fg_recs = q.all()

    traces = []
    deconvs = []
    #for rec in fg_recs[:100]:
    #result = strength_analysis.analyze_response_strength(rec, source='pulse_response', lpf=True, lowpass=2000,
    #remove_artifacts=False, bsub=True)
    #trace = result['raw_trace']
    #trace.t0 = -result['spike_time']
    #trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
    #traces.append(trace)
    #trace_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))

    #trace = result['dec_trace']
    #trace.t0 = -result['spike_time']
    #trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
    #deconvs.append(trace)
    #deconv_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))

    ## plot average trace
    #mean = TSeriesList(traces).mean()
    #trace_plot.plot(mean.time_values, mean.data, pen={'color':'g', 'width': 2}, shadowPen={'color':'k', 'width': 3}, antialias=True)
    #mean = TSeriesList(deconvs).mean()
    #deconv_plot.plot(mean.time_values, mean.data, pen={'color':'g', 'width': 2}, shadowPen={'color':'k', 'width': 3}, antialias=True)

    #bins = np.arange(-0.001, 0.015, 0.0005)
    #field = 'pos_dec_amp'
    #n = min(len(amps), len(base_amps))
    #hist_y, hist_bins = np.histogram(base_amps[:n][field], bins=bins)
    #hist_plot.plot(hist_bins, hist_y, stepMode=True, pen=None, brush=(200, 0, 0, 150), fillLevel=0)
    #hist_y, hist_bins = np.histogram(amps[:n][field], bins=bins)
    #hist_plot.plot(hist_bins, hist_y, stepMode=True, pen='k', brush=(0, 150, 150, 100), fillLevel=0)

    q = strength_analysis.baseline_query(session)
    q = q.join(strength_analysis.BaselineResponseStrength)
    q = q.filter(
        strength_analysis.BaselineResponseStrength.id.in_(base_amps['id']))
    bg_recs = q.all()

    # measure background connection strength
    bg_results = [
        strength_analysis.analyze_response_strength(rec, 'baseline')
        for rec in bg_recs
    ]
    bg_results = strength_analysis.str_analysis_result_table(
        bg_results, bg_recs)

    # for this example, we use background data to simulate foreground
    # (but this will be biased due to lack of crosstalk in background data)
    fg_recs = bg_recs

    # now measure foreground simulated under different conditions
    amps = 2e-6 * 2**np.arange(9)
    amps[0] = 0
    rtime = 2e-3
    dt = 1 / db.default_sample_rate
    results = np.empty(len(amps),
                       dtype=[('results', object), ('prediction', float),
                              ('confidence', float), ('traces', object),
                              ('rise_time', float), ('amp', float)])
    print("  Simulating synaptic events..")
    # pool = multiprocessing.Pool(4)
    limit_entry = DetectionLimit(pair=pair)

    results = []
    avg_conf = []
    limit = None
    for i, amp in enumerate(amps):
        print("----- %d/%d  %0.3g      \r" % (i, len(amps), amp), )
        result = strength_analysis.simulate_connection(fg_recs, bg_results,
                                                       classifier, amp, rtime)
        results.append({
            'amp': amp,
            'rise_time': rtime,
            'predictions': list(result['predictions']),
            'confidence': list(result['confidence'])
        })
        avg_conf.append(result['confidence'].mean())
        print(results[-1])
        # if we crossed threshold, interpolate to estimate the minimum amplitude
        # and terminate the loop early
        if limit is None and i > 0 and avg_conf[-1] > classifier.prob_threshold:
            a1 = amps[i - 1]
            a2 = amp
            c1, c2 = avg_conf[-2:]
            s = (classifier.prob_threshold - c1) / (c2 - c1)
            limit = a1 + s * (a2 - a1)
            break

    limit_entry.simulation_results = results
    limit_entry.minimum_amplitude = limit

    session.add(limit_entry)
    session.commit()
    def add_connection_plots(i, name, timestamp, pre_id, post_id):
        global session, win, filtered
        p = pg.debug.Profiler(disabled=True, delayed=False)
        trace_plot = win.addPlot(i, 1)
        trace_plots.append(trace_plot)
        deconv_plot = win.addPlot(i, 2)
        deconv_plots.append(deconv_plot)
        hist_plot = win.addPlot(i, 3)
        hist_plots.append(hist_plot)
        limit_plot = win.addPlot(i, 4)
        limit_plot.addLegend()
        limit_plot.setLogMode(True, True)
        # Find this connection in the pair list
        idx = np.argwhere((abs(filtered['acq_timestamp'] - timestamp) < 1) & (filtered['pre_cell_id'] == pre_id) & (filtered['post_cell_id'] == post_id))
        if idx.size == 0:
            print("not in filtered connections")
            return
        idx = idx[0,0]
        p()

        # Mark the point in scatter plot
        scatter_plot.plot([background[idx]], [signal[idx]], pen='k', symbol='o', size=10, symbolBrush='r', symbolPen=None)
            
        # Plot example traces and histograms
        for plts in [trace_plots, deconv_plots]:
            plt = plts[-1]
            plt.setXLink(plts[0])
            plt.setYLink(plts[0])
            plt.setXRange(-10e-3, 17e-3, padding=0)
            plt.hideAxis('left')
            plt.hideAxis('bottom')
            plt.addLine(x=0)
            plt.setDownsampling(auto=True, mode='peak')
            plt.setClipToView(True)
            hbar = pg.QtGui.QGraphicsLineItem(0, 0, 2e-3, 0)
            hbar.setPen(pg.mkPen(color='k', width=5))
            plt.addItem(hbar)
            vbar = pg.QtGui.QGraphicsLineItem(0, 0, 0, 100e-6)
            vbar.setPen(pg.mkPen(color='k', width=5))
            plt.addItem(vbar)


        hist_plot.setXLink(hist_plots[0])
        
        pair = session.query(db.Pair).filter(db.Pair.id==filtered[idx]['pair_id']).all()[0]
        p()
        amps = strength_analysis.get_amps(session, pair)
        p()
        base_amps = strength_analysis.get_baseline_amps(session, pair)
        p()
        
        q = strength_analysis.response_query(session)
        p()
        q = q.join(strength_analysis.PulseResponseStrength)
        q = q.filter(strength_analysis.PulseResponseStrength.id.in_(amps['id']))
        q = q.join(db.Recording, db.Recording.id==db.PulseResponse.recording_id).join(db.PatchClampRecording).join(db.MultiPatchProbe)
        q = q.filter(db.MultiPatchProbe.induction_frequency < 100)
        # pre_cell = db.aliased(db.Cell)
        # post_cell = db.aliased(db.Cell)
        # q = q.join(db.Pair).join(db.Experiment).join(pre_cell, db.Pair.pre_cell_id==pre_cell.id).join(post_cell, db.Pair.post_cell_id==post_cell.id)
        # q = q.filter(db.Experiment.id==filtered[idx]['experiment_id'])
        # q = q.filter(pre_cell.ext_id==pre_id)
        # q = q.filter(post_cell.ext_id==post_id)

        fg_recs = q.all()
        p()

        traces = []
        deconvs = []
        for rec in fg_recs[:100]:
            result = strength_analysis.analyze_response_strength(rec, source='pulse_response', lpf=True, lowpass=2000,
                                                remove_artifacts=False, bsub=True)
            trace = result['raw_trace']
            trace.t0 = -result['spike_time']
            trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
            traces.append(trace)            
            trace_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))

            trace = result['dec_trace']
            trace.t0 = -result['spike_time']
            trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
            deconvs.append(trace)            
            deconv_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))

        # plot average trace
        mean = TraceList(traces).mean()
        trace_plot.plot(mean.time_values, mean.data, pen={'color':'g', 'width': 2}, shadowPen={'color':'k', 'width': 3}, antialias=True)
        mean = TraceList(deconvs).mean()
        deconv_plot.plot(mean.time_values, mean.data, pen={'color':'g', 'width': 2}, shadowPen={'color':'k', 'width': 3}, antialias=True)

        # add label
        label = pg.LabelItem(name)
        label.setParentItem(trace_plot)


        p("analyze_response_strength")

        # bins = np.arange(-0.0005, 0.002, 0.0001) 
        # field = 'pos_amp'
        bins = np.arange(-0.001, 0.015, 0.0005) 
        field = 'pos_dec_amp'
        n = min(len(amps), len(base_amps))
        hist_y, hist_bins = np.histogram(base_amps[:n][field], bins=bins)
        hist_plot.plot(hist_bins, hist_y, stepMode=True, pen=None, brush=(200, 0, 0, 150), fillLevel=0)
        hist_y, hist_bins = np.histogram(amps[:n][field], bins=bins)
        hist_plot.plot(hist_bins, hist_y, stepMode=True, pen='k', brush=(0, 150, 150, 100), fillLevel=0)
        p()

        pg.QtGui.QApplication.processEvents()


        # Plot detectability analysis
        q = strength_analysis.baseline_query(session)
        q = q.join(strength_analysis.BaselineResponseStrength)
        q = q.filter(strength_analysis.BaselineResponseStrength.id.in_(base_amps['id']))
        # q = q.limit(100)
        bg_recs = q.all()

        def clicked(sp, pts):
            traces = pts[0].data()['traces']
            print([t.amp for t in traces])
            plt = pg.plot()
            bsub = [t.copy(data=t.data - np.median(t.time_slice(0, 1e-3).data)) for t in traces]
            for t in bsub:
                plt.plot(t.time_values, t.data, pen=(0, 0, 0, 50))
            mean = TraceList(bsub).mean()
            plt.plot(mean.time_values, mean.data, pen='g')

        # first measure background a few times
        N = len(fg_recs)
        N = 50  # temporary for testing
        print("Testing %d trials" % N)


        bg_results = []
        M = 500
        print("  Grinding on %d background trials" % len(bg_recs))
        for i in range(M):
            amps = base_amps.copy()
            np.random.shuffle(amps)
            bg_results.append(np.median(amps[:N]['pos_dec_amp']) / np.std(amps[:N]['pos_dec_latency']))
            print("    %d/%d      \r" % (i, M),)
        print("    done.            ")
        print("    ", bg_results)


        # now measure foreground simulated under different conditions
        amps = 5e-6 * 2**np.arange(6)
        amps[0] = 0
        rtimes = 1e-3 * 1.71**np.arange(4)
        dt = 1 / db.default_sample_rate
        results = np.empty((len(amps), len(rtimes)), dtype=[('pos_dec_amp', float), ('latency_stdev', float), ('result', float), ('percentile', float), ('traces', object)])
        print("  Simulating synaptic events..")
        for j,rtime in enumerate(rtimes):
            for i,amp in enumerate(amps):
                trial_results = []
                t = np.arange(0, 15e-3, dt)
                template = Psp.psp_func(t, xoffset=0, yoffset=0, rise_time=rtime, decay_tau=15e-3, amp=1, rise_power=2)

                for l in range(20):
                    print("    %d/%d  %d/%d      \r" % (i,len(amps),j,len(rtimes)),)
                    r_amps = amp * 2**np.random.normal(size=N, scale=0.5)
                    r_latency = np.random.normal(size=N, scale=600e-6, loc=12.5e-3)
                    fg_results = []
                    traces = []
                    np.random.shuffle(bg_recs)
                    for k,rec in enumerate(bg_recs[:N]):
                        data = rec.data.copy()
                        start = int(r_latency[k] / dt)
                        length = len(rec.data) - start
                        rec.data[start:] += template[:length] * r_amps[k]

                        fg_result = strength_analysis.analyze_response_strength(rec, 'baseline')
                        fg_results.append((fg_result['pos_dec_amp'], fg_result['pos_dec_latency']))

                        traces.append(Trace(rec.data.copy(), dt=dt))
                        traces[-1].amp = r_amps[k]
                        rec.data[:] = data  # can't modify rec, so we have to muck with the array (and clean up afterward) instead
                    
                    fg_amp = np.array([r[0] for r in fg_results])
                    fg_latency = np.array([r[1] for r in fg_results])
                    trial_results.append(np.median(fg_amp) / np.std(fg_latency))
                results[i,j]['result'] = np.median(trial_results) / np.median(bg_results)
                results[i,j]['percentile'] = stats.percentileofscore(bg_results, results[i,j]['result'])
                results[i,j]['traces'] = traces

            assert all(np.isfinite(results[i]['pos_dec_amp']))
            print(i, results[i]['result'])
            print(i, results[i]['percentile'])
            

            # c = limit_plot.plot(rtimes, results[i]['result'], pen=(i, len(amps)*1.3), symbol='o', antialias=True, name="%duV"%(amp*1e6), data=results[i], symbolSize=4)
            # c.scatter.sigClicked.connect(clicked)
            # pg.QtGui.QApplication.processEvents()
            c = limit_plot.plot(amps, results[:,j]['result'], pen=(j, len(rtimes)*1.3), symbol='o', antialias=True, name="%dus"%(rtime*1e6), data=results[:,j], symbolSize=4)
            c.scatter.sigClicked.connect(clicked)
            pg.QtGui.QApplication.processEvents()

                
        pg.QtGui.QApplication.processEvents()
예제 #4
0
    def add_connection_plots(i, name, timestamp, pre_id, post_id):
        global session, win, filtered
        p = pg.debug.Profiler(disabled=True, delayed=False)
        trace_plot = win.addPlot(i, 1)
        trace_plots.append(trace_plot)
        trace_plot.setYRange(-1.4e-3, 2.1e-3)
        # deconv_plot = win.addPlot(i, 2)
        # deconv_plots.append(deconv_plot)
        # deconv_plot.hide()

        hist_plot = win.addPlot(i, 2)
        hist_plots.append(hist_plot)
        limit_plot = win.addPlot(i, 3)
        limit_plot.addLegend()
        limit_plot.setLogMode(True, False)
        limit_plot.addLine(y=classifier.prob_threshold)

        # Find this connection in the pair list
        idx = np.argwhere((abs(filtered['acq_timestamp'] - timestamp) < 1)
                          & (filtered['pre_cell_id'] == pre_id)
                          & (filtered['post_cell_id'] == post_id))
        if idx.size == 0:
            print("not in filtered connections")
            return
        idx = idx[0, 0]
        p()

        # Mark the point in scatter plot
        scatter_plot.plot([background[idx]], [signal[idx]],
                          pen='k',
                          symbol='o',
                          size=10,
                          symbolBrush='r',
                          symbolPen=None)

        # Plot example traces and histograms
        for plts in [trace_plots]:  #, deconv_plots]:
            plt = plts[-1]
            plt.setXLink(plts[0])
            plt.setYLink(plts[0])
            plt.setXRange(-10e-3, 17e-3, padding=0)
            plt.hideAxis('left')
            plt.hideAxis('bottom')
            plt.addLine(x=0)
            plt.setDownsampling(auto=True, mode='peak')
            plt.setClipToView(True)
            hbar = pg.QtGui.QGraphicsLineItem(0, 0, 2e-3, 0)
            hbar.setPen(pg.mkPen(color='k', width=5))
            plt.addItem(hbar)
            vbar = pg.QtGui.QGraphicsLineItem(0, 0, 0, 100e-6)
            vbar.setPen(pg.mkPen(color='k', width=5))
            plt.addItem(vbar)

        hist_plot.setXLink(hist_plots[0])

        pair = session.query(
            db.Pair).filter(db.Pair.id == filtered[idx]['pair_id']).all()[0]
        p()
        amps = strength_analysis.get_amps(session, pair)
        p()
        base_amps = strength_analysis.get_baseline_amps(session,
                                                        pair,
                                                        amps=amps,
                                                        clamp_mode='ic')
        p()

        q = strength_analysis.response_query(session)
        p()
        q = q.join(strength_analysis.PulseResponseStrength)
        q = q.filter(strength_analysis.PulseResponseStrength.id.in_(
            amps['id']))
        q = q.join(db.MultiPatchProbe)
        q = q.filter(db.MultiPatchProbe.induction_frequency < 100)
        # pre_cell = db.aliased(db.Cell)
        # post_cell = db.aliased(db.Cell)
        # q = q.join(db.Pair).join(db.Experiment).join(pre_cell, db.Pair.pre_cell_id==pre_cell.id).join(post_cell, db.Pair.post_cell_id==post_cell.id)
        # q = q.filter(db.Experiment.id==filtered[idx]['experiment_id'])
        # q = q.filter(pre_cell.ext_id==pre_id)
        # q = q.filter(post_cell.ext_id==post_id)

        fg_recs = q.all()
        p()

        traces = []
        deconvs = []
        for i, rec in enumerate(fg_recs[:100]):
            result = strength_analysis.analyze_response_strength(
                rec,
                source='pulse_response',
                lpf=True,
                lowpass=2000,
                remove_artifacts=False,
                bsub=True)
            trace = result['raw_trace']
            trace.t0 = -result['spike_time']
            trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
            traces.append(trace)
            trace_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))
            write_csv(
                csv_file, trace,
                "Figure 3B; {name}; trace {trace_n}".format(name=name,
                                                            trace_n=i))

            # trace = result['dec_trace']
            # trace.t0 = -result['spike_time']
            # trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
            # deconvs.append(trace)
            # # deconv_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))

        # plot average trace
        mean = TSeriesList(traces).mean()
        trace_plot.plot(mean.time_values,
                        mean.data,
                        pen={
                            'color': 'g',
                            'width': 2
                        },
                        shadowPen={
                            'color': 'k',
                            'width': 3
                        },
                        antialias=True)
        write_csv(csv_file, mean,
                  "Figure 3B; {name}; average".format(name=name))
        # mean = TSeriesList(deconvs).mean()
        # # deconv_plot.plot(mean.time_values, mean.data, pen={'color':'g', 'width': 2}, shadowPen={'color':'k', 'width': 3}, antialias=True)

        # add label
        label = pg.LabelItem(name)
        label.setParentItem(trace_plot)

        p("analyze_response_strength")

        # bins = np.arange(-0.0005, 0.002, 0.0001)
        # field = 'pos_amp'
        bins = np.arange(-0.001, 0.015, 0.0005)
        field = 'pos_dec_amp'
        n = min(len(amps), len(base_amps))
        hist_y, hist_bins = np.histogram(base_amps[:n][field], bins=bins)
        hist_plot.plot(hist_bins,
                       hist_y,
                       stepMode=True,
                       pen=None,
                       brush=(200, 0, 0, 150),
                       fillLevel=0)
        write_csv(
            csv_file, hist_bins,
            "Figure 3C; {name}; background noise amplitude distribution bin edges (V)"
            .format(name=name))
        write_csv(
            csv_file, hist_y,
            "Figure 3C; {name}; background noise amplitude distribution counts per bin"
            .format(name=name))

        hist_y, hist_bins = np.histogram(amps[:n][field], bins=bins)
        hist_plot.plot(hist_bins,
                       hist_y,
                       stepMode=True,
                       pen='k',
                       brush=(0, 150, 150, 100),
                       fillLevel=0)
        write_csv(
            csv_file, hist_bins,
            "Figure 3C; {name}; PSP amplitude distribution bin edges (V)".
            format(name=name))
        write_csv(
            csv_file, hist_y,
            "Figure 3C; {name}; PSP amplitude distribution counts per bin".
            format(name=name))
        p()

        pg.QtGui.QApplication.processEvents()

        # Plot detectability analysis
        q = strength_analysis.baseline_query(session)
        q = q.join(strength_analysis.BaselineResponseStrength)
        q = q.filter(
            strength_analysis.BaselineResponseStrength.id.in_(base_amps['id']))
        # q = q.limit(100)
        bg_recs = q.all()

        def clicked(sp, pts):
            data = pts[0].data()
            print("-----------------------\nclicked:", data['rise_time'],
                  data['amp'], data['prediction'], data['confidence'])
            for r in data['results']:
                print({k: r[k] for k in classifier.features})
            traces = data['traces']
            plt = pg.plot()
            bsub = [
                t.copy(data=t.data - np.median(t.time_slice(0, 1e-3).data))
                for t in traces
            ]
            for t in bsub:
                plt.plot(t.time_values, t.data, pen=(0, 0, 0, 50))
            mean = TSeriesList(bsub).mean()
            plt.plot(mean.time_values, mean.data, pen='g')

        # def analyze_response_strength(recs, source, dtype):
        #     results = []
        #     for i,rec in enumerate(recs):
        #         result = strength_analysis.analyze_response_strength(rec, source)
        #         results.append(result)
        #     return str_analysis_result_table(results)

        # measure background connection strength
        bg_results = [
            strength_analysis.analyze_response_strength(rec, 'baseline')
            for rec in bg_recs
        ]
        bg_results = strength_analysis.str_analysis_result_table(
            bg_results, bg_recs)

        # for this example, we use background data to simulate foreground
        # (but this will be biased due to lack of crosstalk in background data)
        fg_recs = bg_recs

        # now measure foreground simulated under different conditions
        amps = 2e-6 * 2**np.arange(9)
        amps[0] = 0
        rtimes = [1e-3, 2e-3, 4e-3, 6e-3]
        dt = 1 / db.default_sample_rate
        results = np.empty((len(amps), len(rtimes)),
                           dtype=[('results', object), ('predictions', object),
                                  ('confidence', object), ('traces', object),
                                  ('rise_time', float), ('amp', float)])
        print("  Simulating synaptic events..")

        cachefile = 'fig_3_cache.pkl'
        if os.path.exists(cachefile):
            cache = pickle.load(open(cachefile, 'rb'))
        else:
            cache = {}
        pair_key = (timestamp, pre_id, post_id)
        pair_cache = cache.setdefault(pair_key, {})

        for j, rtime in enumerate(rtimes):
            new_results = False
            for i, amp in enumerate(amps):
                print(
                    "---------------------------------------    %d/%d  %d/%d      \r"
                    % (i, len(amps), j, len(rtimes)), )
                result = pair_cache.get((rtime, amp))
                if result is None:
                    result = strength_analysis.simulate_connection(
                        fg_recs, bg_results, classifier, amp, rtime)
                    pair_cache[rtime, amp] = result
                    new_results = True

                for k, v in result.items():
                    results[i, j][k] = v

            x, y = amps, [np.mean(x) for x in results[:, j]['confidence']]
            c = limit_plot.plot(x,
                                y,
                                pen=pg.intColor(j,
                                                len(rtimes) * 1.3,
                                                maxValue=150),
                                symbol='o',
                                antialias=True,
                                name="%dus" % (rtime * 1e6),
                                data=results[:, j],
                                symbolSize=4)
            write_csv(
                csv_file, x,
                "Figure 3D; {name}; {rise_time:0.3g} ms rise time; simulated PSP amplitude (V)"
                .format(name=name, rise_time=rtime * 1000))
            write_csv(
                csv_file, y,
                "Figure 3D; {name}; {rise_time:0.3g} ms rise time; classifier decision probability"
                .format(name=name, rise_time=rtime * 1000))
            c.scatter.sigClicked.connect(clicked)
            pg.QtGui.QApplication.processEvents()

            if new_results:
                pickle.dump(cache, open(cachefile, 'wb'))

        pg.QtGui.QApplication.processEvents()
    def add_connection_plots(i, name, timestamp, pre_id, post_id):
        global session, win, filtered
        p = pg.debug.Profiler(disabled=True, delayed=False)
        trace_plot = win.addPlot(i, 1)
        trace_plots.append(trace_plot)
        trace_plot.setYRange(-1.4e-3, 2.1e-3)
        # deconv_plot = win.addPlot(i, 2)
        # deconv_plots.append(deconv_plot)
        # deconv_plot.hide()
        
        hist_plot = win.addPlot(i, 2)
        hist_plots.append(hist_plot)
        limit_plot = win.addPlot(i, 3)
        limit_plot.addLegend()
        limit_plot.setLogMode(True, False)
        limit_plot.addLine(y=classifier.prob_threshold)

        # Find this connection in the pair list
        idx = np.argwhere((abs(filtered['acq_timestamp'] - timestamp) < 1) & (filtered['pre_cell_id'] == pre_id) & (filtered['post_cell_id'] == post_id))
        if idx.size == 0:
            print("not in filtered connections")
            return
        idx = idx[0,0]
        p()

        # Mark the point in scatter plot
        scatter_plot.plot([background[idx]], [signal[idx]], pen='k', symbol='o', size=10, symbolBrush='r', symbolPen=None)
        
        # Plot example traces and histograms
        for plts in [trace_plots]:#, deconv_plots]:
            plt = plts[-1]
            plt.setXLink(plts[0])
            plt.setYLink(plts[0])
            plt.setXRange(-10e-3, 17e-3, padding=0)
            plt.hideAxis('left')
            plt.hideAxis('bottom')
            plt.addLine(x=0)
            plt.setDownsampling(auto=True, mode='peak')
            plt.setClipToView(True)
            hbar = pg.QtGui.QGraphicsLineItem(0, 0, 2e-3, 0)
            hbar.setPen(pg.mkPen(color='k', width=5))
            plt.addItem(hbar)
            vbar = pg.QtGui.QGraphicsLineItem(0, 0, 0, 100e-6)
            vbar.setPen(pg.mkPen(color='k', width=5))
            plt.addItem(vbar)


        hist_plot.setXLink(hist_plots[0])
        
        pair = session.query(db.Pair).filter(db.Pair.id==filtered[idx]['pair_id']).all()[0]
        p()
        amps = strength_analysis.get_amps(session, pair)
        p()
        base_amps = strength_analysis.get_baseline_amps(session, pair, amps=amps, clamp_mode='ic')
        p()
        
        q = strength_analysis.response_query(session)
        p()
        q = q.join(strength_analysis.PulseResponseStrength)
        q = q.filter(strength_analysis.PulseResponseStrength.id.in_(amps['id']))
        q = q.join(db.MultiPatchProbe)
        q = q.filter(db.MultiPatchProbe.induction_frequency < 100)
        # pre_cell = db.aliased(db.Cell)
        # post_cell = db.aliased(db.Cell)
        # q = q.join(db.Pair).join(db.Experiment).join(pre_cell, db.Pair.pre_cell_id==pre_cell.id).join(post_cell, db.Pair.post_cell_id==post_cell.id)
        # q = q.filter(db.Experiment.id==filtered[idx]['experiment_id'])
        # q = q.filter(pre_cell.ext_id==pre_id)
        # q = q.filter(post_cell.ext_id==post_id)

        fg_recs = q.all()
        p()

        traces = []
        deconvs = []
        for i,rec in enumerate(fg_recs[:100]):
            result = strength_analysis.analyze_response_strength(rec, source='pulse_response', lpf=True, lowpass=2000,
                                                remove_artifacts=False, bsub=True)
            trace = result['raw_trace']
            trace.t0 = -result['spike_time']
            trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
            traces.append(trace)
            trace_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))
            write_csv(csv_file, trace, "Figure 3B; {name}; trace {trace_n}".format(name=name, trace_n=i))

            # trace = result['dec_trace']
            # trace.t0 = -result['spike_time']
            # trace = trace - np.median(trace.time_slice(-0.5e-3, 0.5e-3).data)
            # deconvs.append(trace)            
            # # deconv_plot.plot(trace.time_values, trace.data, pen=(0, 0, 0, 20))

        # plot average trace
        mean = TraceList(traces).mean()
        trace_plot.plot(mean.time_values, mean.data, pen={'color':'g', 'width': 2}, shadowPen={'color':'k', 'width': 3}, antialias=True)
        write_csv(csv_file, mean, "Figure 3B; {name}; average".format(name=name))
        # mean = TraceList(deconvs).mean()
        # # deconv_plot.plot(mean.time_values, mean.data, pen={'color':'g', 'width': 2}, shadowPen={'color':'k', 'width': 3}, antialias=True)

        # add label
        label = pg.LabelItem(name)
        label.setParentItem(trace_plot)


        p("analyze_response_strength")

        # bins = np.arange(-0.0005, 0.002, 0.0001) 
        # field = 'pos_amp'
        bins = np.arange(-0.001, 0.015, 0.0005) 
        field = 'pos_dec_amp'
        n = min(len(amps), len(base_amps))
        hist_y, hist_bins = np.histogram(base_amps[:n][field], bins=bins)
        hist_plot.plot(hist_bins, hist_y, stepMode=True, pen=None, brush=(200, 0, 0, 150), fillLevel=0)
        write_csv(csv_file, hist_bins, "Figure 3C; {name}; background noise amplitude distribution bin edges (V)".format(name=name))
        write_csv(csv_file, hist_y, "Figure 3C; {name}; background noise amplitude distribution counts per bin".format(name=name))
        
        hist_y, hist_bins = np.histogram(amps[:n][field], bins=bins)
        hist_plot.plot(hist_bins, hist_y, stepMode=True, pen='k', brush=(0, 150, 150, 100), fillLevel=0)
        write_csv(csv_file, hist_bins, "Figure 3C; {name}; PSP amplitude distribution bin edges (V)".format(name=name))
        write_csv(csv_file, hist_y, "Figure 3C; {name}; PSP amplitude distribution counts per bin".format(name=name))
        p()

        pg.QtGui.QApplication.processEvents()


        # Plot detectability analysis
        q = strength_analysis.baseline_query(session)
        q = q.join(strength_analysis.BaselineResponseStrength)
        q = q.filter(strength_analysis.BaselineResponseStrength.id.in_(base_amps['id']))
        # q = q.limit(100)
        bg_recs = q.all()

        def clicked(sp, pts):
            data = pts[0].data()
            print("-----------------------\nclicked:", data['rise_time'], data['amp'], data['prediction'], data['confidence'])
            for r in data['results']:
                print({k:r[k] for k in classifier.features})
            traces = data['traces']
            plt = pg.plot()
            bsub = [t.copy(data=t.data - np.median(t.time_slice(0, 1e-3).data)) for t in traces]
            for t in bsub:
                plt.plot(t.time_values, t.data, pen=(0, 0, 0, 50))
            mean = TraceList(bsub).mean()
            plt.plot(mean.time_values, mean.data, pen='g')


        # def analyze_response_strength(recs, source, dtype):
        #     results = []
        #     for i,rec in enumerate(recs):
        #         result = strength_analysis.analyze_response_strength(rec, source)
        #         results.append(result)
        #     return str_analysis_result_table(results)



        # measure background connection strength
        bg_results = [strength_analysis.analyze_response_strength(rec, 'baseline') for rec in bg_recs]
        bg_results = strength_analysis.str_analysis_result_table(bg_results, bg_recs)

        # for this example, we use background data to simulate foreground
        # (but this will be biased due to lack of crosstalk in background data)
        fg_recs = bg_recs

        # now measure foreground simulated under different conditions
        amps = 2e-6 * 2**np.arange(9)
        amps[0] = 0
        rtimes = [1e-3, 2e-3, 4e-3, 6e-3]
        dt = 1 / db.default_sample_rate
        results = np.empty((len(amps), len(rtimes)), dtype=[('results', object), ('predictions', object), ('confidence', object), ('traces', object), ('rise_time', float), ('amp', float)])
        print("  Simulating synaptic events..")

        cachefile = 'fig_3_cache.pkl'
        if os.path.exists(cachefile):
            cache = pickle.load(open(cachefile, 'rb'))
        else:
            cache = {}
        pair_key = (timestamp, pre_id, post_id)
        pair_cache = cache.setdefault(pair_key, {})

        for j,rtime in enumerate(rtimes):
            new_results = False
            for i,amp in enumerate(amps):
                print("---------------------------------------    %d/%d  %d/%d      \r" % (i,len(amps),j,len(rtimes)),)
                result = pair_cache.get((rtime, amp))
                if result is None:
                    result = strength_analysis.simulate_connection(fg_recs, bg_results, classifier, amp, rtime)
                    pair_cache[rtime, amp] = result
                    new_results = True

                for k,v in result.items():
                    results[i,j][k] = v

            x, y = amps, [np.mean(x) for x in results[:,j]['confidence']]
            c = limit_plot.plot(x, y, pen=pg.intColor(j, len(rtimes)*1.3, maxValue=150), symbol='o', antialias=True, name="%dus"%(rtime*1e6), data=results[:,j], symbolSize=4)
            write_csv(csv_file, x, "Figure 3D; {name}; {rise_time:0.3g} ms rise time; simulated PSP amplitude (V)".format(name=name, rise_time=rtime*1000))
            write_csv(csv_file, y, "Figure 3D; {name}; {rise_time:0.3g} ms rise time; classifier decision probability".format(name=name, rise_time=rtime*1000))
            c.scatter.sigClicked.connect(clicked)
            pg.QtGui.QApplication.processEvents()

            if new_results:
                pickle.dump(cache, open(cachefile, 'wb'))

        pg.QtGui.QApplication.processEvents()