def make_time_frequency_plot(dtup, event_name, Tpre, Tpost, freqs, baseline_interval): # get lfp data print "Fetching data: " + str(dtup) lfp = dbio.fetch_all_such_LFP(dbname, *dtup).censor().zscore() # get events evt = dbio.fetch(dbname, 'events', *dtup[:2]) times = evt[event_name].dropna() # horrible kludge to drop pathological channels bad_channel_list = [(16, 2, 22), (18, 1, 32)] all_wavs = [] for channel in lfp.columns: # horrible kludge to exclude pathological channels if dtup + (channel,) in bad_channel_list: continue print "Channel " + str(channel) wav_normed, im = lfp.avg_time_frequency(channel, times, Tpre, Tpost, method='wav', doplot=False, normfun=None, freqs=freqs) all_wavs.append(wav_normed) # take mean power across all channels all_wav_mean = reduce(lambda x, y: x.add(y, fill_value=0), all_wavs) / len(all_wavs) # normalize across frequencies normfun = lambda x: x / x[slice(*baseline_interval)].mean() fig = physutils.tf.plot_time_frequency(normfun(all_wav_mean)) return fig
def make_time_frequency_plot(dtup, event_names, Tpre, Tpost, freqs, baseline_interval, thresh): # get lfp data print "Fetching data: " + str(dtup) lfp = dbio.fetch_all_such_LFP(dbname, *dtup).censor().zscore() # get events evt = dbio.fetch(dbname, 'events', *dtup[:2]) times0 = evt[event_names[0]].dropna() times1 = evt[event_names[1]].dropna() # horrible kludge to drop pathological channels bad_channel_list = [(16, 2, 22), (18, 1, 32)] all_wavs0 = [] all_wavs1 = [] for channel in lfp.columns: # horrible kludge to exclude pathological channels if dtup + (channel,) in bad_channel_list: continue print "Channel " + str(channel) wav_normed0, im = lfp.avg_time_frequency(channel, times0, Tpre, Tpost, method='wav', doplot=False, normfun=None, freqs=freqs) all_wavs0.append(wav_normed0) wav_normed1, im = lfp.avg_time_frequency(channel, times1, Tpre, Tpost, method='wav', doplot=False, normfun=None, freqs=freqs) all_wavs1.append(wav_normed1) # take mean power across all channels all_wav_mean0 = reduce(lambda x, y: x.add(y, fill_value=0), all_wavs0) / len(all_wavs0) all_wav_mean1 = reduce(lambda x, y: x.add(y, fill_value=0), all_wavs1) / len(all_wavs1) fig1 = physutils.tf.plot_time_frequency(all_wav_mean0/all_wav_mean1) return (fig1,)
def get_traces_split(dtup, event, bands): # load data os.chdir(os.path.expanduser('~/code/hephys/bartc')) dbname = os.path.expanduser('~/data/bartc/plexdata/bartc.hdf5') # get lfp data print "Fetching Data..." lfp = dbio.fetch_all_such_LFP(dbname, *dtup) # bandpass filter print "Filtering..." lfp = lfp.bandlimit(bands) # decimate to 100 Hz effective sampling print "Decimating..." lfp = lfp.decimate(5) # instantaneous power print "Calculating Power..." lfp = lfp.instpwr() # remove censored regions print "Censoring..." lfp = lfp.censor() # get events evt = dbio.fetch(dbname, 'events', *dtup) evtseries = evt[event].dropna() # median split by inflate time infl_times = evt['inflate_time'] med_inflate_time = np.median(infl_times) evt_grps = [] evt_grps.append(evtseries[infl_times <= med_inflate_time]) evt_grps.append(evtseries[infl_times > med_inflate_time]) grpnames = ['Low value', 'High Value'] subsets = [] for grp in evt_grps: # split lfp around stops lfp_split = lfp.evtsplit(grp, Tpre, Tpost) # group by time and get median for each channel for each time medians = lfp_split.groupby(level=1).median() # get median across channels grand_median = medians.median(axis=1) subsets.append(grand_median) combined = pd.concat(subsets, axis=1) combined.columns = grpnames # make peri-stop frame df = physutils.LFPset(combined, meta=lfp.meta.copy()).zscore() df = df.smooth(smwid) return df
def get_traces(dtup, event, bands): # load data os.chdir(os.path.expanduser('~/code/hephys/bartc')) dbname = os.path.expanduser('~/data/bartc/plexdata/bartc.hdf5') lfp = dbio.fetch_all_such_LFP(dbname, *dtup) # bandpass filter lfp = lfp.bandlimit(bands) # decimate to 100 Hz lfp = lfp.decimate(5) # instantaneous power lfp = lfp.instpwr() # censor lfp = lfp.censor() # zscore to facilitate cross-channel comparisons lfp = lfp.zscore() # get events evt = dbio.fetch(dbname, 'events', *dtup) # restrict to non-control trials evt = evt[evt.trial_type.isin([1, 2, 3])] # get only those trials where outcome is not NA evtseries = evt[[event, 'trial_type']].dropna() # split lfp around stops lfp_split = lfp.evtsplit(evtseries[event], Tpre, Tpost) # groupby accepts a list of functions to group by # each function gets an index tuple group_by_trial_type = lambda x: evtseries.iloc[x[0]].trial_type group_by_time = lambda x: x[1] grpfuns = [group_by_trial_type, group_by_time] grouped = lfp_split.groupby(grpfuns) # now get median across all trials for each (type, time, channel) med_by_type = grouped.median() med_across_chans = med_by_type.median(axis=1) # make trial type a column medians = med_across_chans.unstack(level=0) medians.index.name = 'time' medians.columns = map(int, medians.columns) # make peri-stop frame df = physutils.LFPset(medians, meta=lfp.meta.copy()).zscore() df = df.smooth(smwid) return df
def make_time_frequency_plot(dbname, dtup, event_name, Tpre, Tpost, freqs, baseline_interval): # get lfp data print "Fetching data: " + str(dtup) lfp = dbio.fetch_all_such_LFP(dbname, *dtup).censor() # get events evt = dbio.fetch(dbname, 'events', *dtup[:2]) times = evt[event_name].dropna() wav_normed, fig = lfp.avg_time_frequency(dtup[2], times, Tpre, Tpost, method='wav', normfun=physutils.norm_by_mean(baseline_interval)) return fig
def make_time_frequency_plot(dtup, event_names, Tpre, Tpost, freqs, baseline_interval, thresh): # get lfp data print "Fetching data: " + str(dtup) lfp = dbio.fetch_all_such_LFP(dbname, *dtup).censor() # get events evt = dbio.fetch(dbname, 'events', *dtup[:2]) times0 = evt[event_names[0]].dropna() times1 = evt[event_names[1]].dropna() nf = physutils.norm_by_mean(baseline_interval) contr_tf, fig1 = lfp.contrast_time_frequency(dtup[2], [times0, times1], Tpre, Tpost, method='wav', normfun=nf, doplot=True, freqs=freqs) mcontr, fig2 = lfp.significant_time_frequency(dtup[2], [times0, times1], Tpre, Tpost, thresh=thresh, niter=1000, method='wav', doplot=True, normfun=nf, freqs=freqs) return fig1, fig2
def get_traces(dtup, event, bands): # load data os.chdir(os.path.expanduser('~/code/hephys/bartc')) dbname = os.path.expanduser('~/data/bartc/plexdata/bartc.hdf5') # get lfp data print "Fetching Data..." lfp = dbio.fetch_all_such_LFP(dbname, *dtup) # bandpass filter print "Filtering..." lfp = lfp.bandlimit(bands) # decimate to 100 Hz effective sampling print "Decimating..." lfp = lfp.decimate(5) # instantaneous power print "Calculating Power..." lfp = lfp.instpwr() # remove censored regions print "Censoring..." lfp = lfp.censor() # get events evt = dbio.fetch(dbname, 'events', *dtup) evtseries = evt[event].dropna() # split lfp around stops lfp_split = lfp.evtsplit(evtseries, Tpre, Tpost) # group by time and get median for each channel for each time medians = lfp_split.groupby(level=1).median() # make peri-stop frame df = physutils.LFPset(medians, meta=lfp.meta.copy()).zscore() df = df.smooth(smwid) return df
if __name__ == "__main__": # set a random seed np.random.seed(12345) # name of database to use dbname = os.path.expanduser("~/data/bartc/plexdata/bartc.hdf5") # first, get a list of lfp channels setlist = pd.read_hdf(dbname, "/meta/spklist") for idx, row in setlist.iterrows(): dtup = tuple(row) print dtup spks = dbio.load_spikes(dbname, dtup) evt = dbio.fetch(dbname, "events", *dtup[0:2]) regressors = make_regressor_frame(spks, evt) # make spikes the first column in dataframe df = pd.concat([spks, regressors], axis=1) # write out outdir = os.path.expanduser("~/data/bartc/") outfile = outdir + ".".join(map(str, dtup)) + ".spkglmdata.csv" df.to_csv(outfile)
# append to whole dataset allchans.append(banded.dataframe) # concatenate data from all channels print 'Merging channels...' groupdata = pd.concat(allchans, axis=1) groupdata = physutils.LFPset(groupdata, banded.meta) # specify peri-event times dt = 1. / np.array(banded.meta['sr']).round(3) # dt in ms Tpre = 2 # time relative to event to start Tpost = 1.5 # time following event to exclude # grab events (successful stops = true positives for training) print 'Fetching events (true positives)...' evt = dbio.fetch(dbname, 'events', *dtup[:2])['banked'].dropna() evt = np.around(evt / dt) * dt # round to nearest dt # extend with nearby times truepos = (pd.DataFrame(evt.values, columns=['time'])) truepos['outcome'] = 1 # grab random timepoints (true negatives in training set) print 'Generating true negatives...' maxT = np.max(groupdata.index.values) # make some candidate random times Nrand = truepos.shape[0] # number to keep Ncand = Nrand * 10 # number of candidates to generate candidates = np.random.rand(Ncand) * (maxT - Tpre) + Tpre candidates = np.around(candidates / dt) * dt # round to nearest dt candidates = np.unique(candidates) np.random.shuffle(candidates)