def responses(expt, pre, post):
    key = (expt.nwb_file, pre, post)
    result_cache = load_cache(result_cache_file)
    if key in result_cache:
        res = result_cache[key]
        if 'avg_est' not in res:
            return None, None, None
        avg_est = res['avg_est']
        avg_amp = Trace(data=res['data'], dt=res['dt'])
        n_sweeps = res['n_sweeps']
        return avg_est, avg_amp, n_sweeps

    analyzer = DynamicsAnalyzer(expt, pre, post, align_to='spike')
    avg_est, _, avg_amp, _, n_sweeps = analyzer.estimate_amplitude(plot=False)
    if n_sweeps == 0:
        result_cache[key] = {}
        ret = None, None, n_sweeps
    else:
        result_cache[key] = {
            'avg_est': avg_est,
            'data': avg_amp.data,
            'dt': avg_amp.dt,
            'n_sweeps': n_sweeps
        }
        ret = avg_est, avg_amp, n_sweeps

    data = pickle.dumps(result_cache)
    open(result_cache_file, 'wb').write(data)
    print(key)
    return ret
Esempio n. 2
0
def get_response(expt, pre, post, type='pulse'):
    analyzer = DynamicsAnalyzer(expt,
                                pre,
                                post,
                                method='deconv',
                                align_to='spike')
    if type == 'pulse':
        response = analyzer.pulse_responses
        # pulse = 0  # only pull first pulse
        # response = {'data': [], 'dt': [], 'stim_param': []}
        # responses = analyzer.pulse_responses
        # for i,stim_params in enumerate(responses.keys()):
        #     resp = responses[stim_params]
        #     for trial in resp:
        #         r = trial[pulse]['response']
        #         r.meta['stim_params'] = stim_params
        #         response['data'].append(r.data)
        #         response['dt'].append(r.dt)
        #         response['stim_param'].append(r.meta['stim_params'])
    elif type == 'train':
        responses = analyzer.train_responses
        pulse_offset = analyzer.pulse_offsets
        response = {'responses': responses, 'pulse_offsets': pulse_offset}
    else:
        "Must select either pulse responses or train responses"
    if len(response) == 0:
        print "No suitable data found for cell %d -> cell %d in expt %s" % (
            pre, post, expt.source_id)
        return response, None
    artifact = analyzer.cross_talk()
    return response, artifact
def responses(expt, pre, post, thresh, filter=None):
    key = (expt.nwb_file, pre, post)
    result_cache = load_cache(result_cache_file)
    if key in result_cache:
        res = result_cache[key]
        if 'avg_amp' not in res:
            return None, None, None
        avg_amp = res['avg_amp']
        avg_trace = Trace(data=res['data'], dt=res['dt'])
        n_sweeps = res['n_sweeps']
        return avg_amp, avg_trace, n_sweeps

    if filter is not None:
        responses, artifact = get_response(expt, pre, post, type='pulse')
        response_subset = response_filter(responses, freq_range=filter[0], holding_range=filter[1], pulse=True)
        if len(response_subset) > 0:
            avg_trace, avg_amp, _, _ = get_amplitude(response_subset)
        n_sweeps = len(response_subset)
    else:
        analyzer = DynamicsAnalyzer(expt, pre, post, align_to='spike')
        avg_amp, _, avg_trace, _, n_sweeps = analyzer.estimate_amplitude(plot=False)
        artifact = analyzer.cross_talk()
    if n_sweeps == 0 or artifact > thresh:
        result_cache[key] = {}
        ret = None, None, n_sweeps
    else:
        result_cache[key] = {'avg_amp': avg_amp, 'data': avg_trace.data, 'dt': avg_trace.dt, 'n_sweeps': n_sweeps}
        ret = avg_amp, avg_trace, n_sweeps

    data = pickle.dumps(result_cache)
    open(result_cache_file, 'wb').write(data)
    print (key)
    return ret
def train_response_plot(expt_list, name=None, summary_plots=[None, None], color=None):
    ind_base_subtract = []
    rec_base_subtract = []
    train_plots = pg.plot()
    train_plots.setLabels(left=('Vm', 'V'))
    tau =15e-3
    lp = 1000
    for expt in expt_list:
        for pre, post in expt.connections:
            if expt.cells[pre].cre_type == cre_type[0] and expt.cells[post].cre_type == cre_type[1]:
                print ('Processing experiment: %s' % (expt.nwb_file))
                ind = []
                rec = []
                analyzer = DynamicsAnalyzer(expt, pre, post)
                train_responses = analyzer.train_responses
                artifact = analyzer.cross_talk()
                if artifact > 0.03e-3:
                    continue
                for i, stim_params in enumerate(train_responses.keys()):
                     rec_t = int(np.round(stim_params[1] * 1e3, -1))
                     if stim_params[0] == 50 and rec_t == 250:
                        pulse_offsets = analyzer.pulse_offsets
                        if len(train_responses[stim_params][0]) != 0:
                            ind_group = train_responses[stim_params][0]
                            rec_group = train_responses[stim_params][1]
                            for j in range(len(ind_group)):
                                ind.append(ind_group.responses[j])
                                rec.append(rec_group.responses[j])
                if len(ind) > 5:
                    ind_avg = TraceList(ind).mean()
                    rec_avg = TraceList(rec).mean()
                    rec_avg.t0 = 0.3
                    base = float_mode(ind_avg.data[:int(10e-3 / ind_avg.dt)])
                    ind_base_subtract.append(ind_avg.copy(data=ind_avg.data - base))
                    rec_base_subtract.append(rec_avg.copy(data=rec_avg.data - base))
                    train_plots.plot(ind_avg.time_values, ind_avg.data - base)
                    train_plots.plot(rec_avg.time_values, rec_avg.data - base)
                    app.processEvents()
    if len(ind_base_subtract) != 0:
        print (name + ' n = %d' % len(ind_base_subtract))
        ind_grand_mean = TraceList(ind_base_subtract).mean()
        rec_grand_mean = TraceList(rec_base_subtract).mean()
        ind_grand_mean_dec = bessel_filter(exp_deconvolve(ind_grand_mean, tau), lp)
        train_plots.addLegend()
        train_plots.plot(ind_grand_mean.time_values, ind_grand_mean.data, pen={'color': 'g', 'width': 3}, name=name)
        train_plots.plot(rec_grand_mean.time_values, rec_grand_mean.data, pen={'color': 'g', 'width': 3}, name=name)
        #train_plots.plot(ind_grand_mean_dec.time_values, ind_grand_mean_dec.data, pen={'color': 'g', 'dash': [1,5,3,2]})
        train_amps = train_amp([ind_base_subtract, rec_base_subtract], pulse_offsets, '+')
        if ind_grand_mean is not None:
            train_plots = summary_plot_train(ind_grand_mean, plot=summary_plots[0], color=color,
                                             name=(legend + ' 50 Hz induction'))
            train_plots = summary_plot_train(rec_grand_mean, plot=summary_plots[0], color=color)
            train_plots2 = summary_plot_train(ind_grand_mean_dec, plot=summary_plots[1], color=color,
                                              name=(legend + ' 50 Hz induction'))
            return train_plots, train_plots2, train_amps
    else:
        print ("No Traces")
        return None
def get_response(expt, pre, post, analysis_type='pulse'):
    analyzer = DynamicsAnalyzer(expt, pre, post, method='deconv', align_to='spike')
    if analysis_type == 'pulse':
        response = analyzer.pulse_responses
    elif analysis_type == 'train':
        responses = analyzer.train_responses
        pulse_offset = analyzer.pulse_offsets
        response = {'responses': responses, 'pulse_offsets': pulse_offset}
    else:
        "Must select either pulse responses or train responses"
    if len(response) == 0:
        print "No suitable data found for cell %d -> cell %d in expt %s" % (pre, post, expt.source_id)
        return response, None
    artifact = analyzer.cross_talk()
    return response, artifact
    def selection_changed(self):
        with pg.BusyCursor():
            sel = self.syn_tree.selectedItems()[0]
            expt = sel.expt

            self.expt_info.set_experiment(expt)

            pre_cell = sel.cells[0].cell_id
            post_cell = sel.cells[1].cell_id

            key = (expt, pre_cell, post_cell)
            if key not in self.analyzers:
                self.analyzers[key] = DynamicsAnalyzer(*key)
            analyzer = self.analyzers[key]

            if len(analyzer.pulse_responses) == 0:
                raise Exception(
                    "No suitable data found for cell %d -> cell %d in expt %s"
                    % (pre_cell, post_cell, expt))

            # Plot all individual and averaged train responses for all sets of stimulus parameters
            self.train_plots.clear()
            analyzer.plot_train_responses(plot_grid=self.train_plots)
Esempio n. 7
0
from multipatch_analysis.synaptic_dynamics import DynamicsAnalyzer

if __name__ == '__main__':
    app = pg.mkQApp()
    pg.dbg()

    expt_ind = sys.argv[1]
    all_expts = ExperimentList(cache='expts_cache.pkl')
    expt = all_expts[expt_ind]

    pre_cell = int(sys.argv[2])
    post_cell = int(sys.argv[3])

    method = 'fit' if '--no-deconv' in sys.argv else 'deconv'

    analyzer = DynamicsAnalyzer(expt, pre_cell, post_cell, method=method)
    if len(analyzer.pulse_responses) == 0:
        raise Exception(
            "No suitable data found for cell %d -> cell %d in expt %s" %
            (pre_cell, post_cell, expt_ind))

    # Plot all individual and averaged train responses for all sets of stimulus parameters
    train_plots = analyzer.plot_train_responses()

    if '--no-fit' in sys.argv:
        sys.exit(0)  # user requested no fitting; bail out early

    if '--no-deconv' in sys.argv:
        # Estimate PSP amplitude
        amp_est, amp_sign, avg_amp, amp_plot, n_sweeps = analyzer.estimate_amplitude(
            plot=True)