def get_concat_split_psths(spike_trials_by_stim, psth_lens, bin_size): """ Takes an array of arrays of spike times, splits each into even and odd trials, and computes the PSTH for the even and odd trials. """ N = psth_lens.sum() concat_even_psths = np.zeros([N]) concat_odd_psths = np.zeros([N]) offset = 0 for m,spike_trials in enumerate(spike_trials_by_stim): even_trials = [ti for k,ti in enumerate(spike_trials) if k % 2] odd_trials = [ti for k,ti in enumerate(spike_trials) if not k % 2] duration = psth_lens[m] * bin_size even_psth = compute_psth(even_trials, duration, bin_size=bin_size) odd_psth = compute_psth(odd_trials, duration, bin_size=bin_size) e = offset + psth_lens[m] concat_even_psths[offset:e] = even_psth concat_odd_psths[offset:e] = odd_psth offset = e return concat_even_psths,concat_odd_psths
def get_concat_split_psths(spike_trials_by_stim, psth_lens, bin_size): """ Takes an array of arrays of spike times, splits each into even and odd trials, and computes the PSTH for the even and odd trials. """ N = psth_lens.sum() concat_even_psths = np.zeros([N]) concat_odd_psths = np.zeros([N]) offset = 0 for m, spike_trials in enumerate(spike_trials_by_stim): even_trials = [ti for k, ti in enumerate(spike_trials) if k % 2] odd_trials = [ti for k, ti in enumerate(spike_trials) if not k % 2] duration = psth_lens[m] * bin_size even_psth = compute_psth(even_trials, duration, bin_size=bin_size) odd_psth = compute_psth(odd_trials, duration, bin_size=bin_size) e = offset + psth_lens[m] concat_even_psths[offset:e] = even_psth concat_odd_psths[offset:e] = odd_psth offset = e return concat_even_psths, concat_odd_psths
def get_concat_psth(spike_trials_by_stim, psth_lens, bin_size): """ Takes a bunch of spike trials, separated by stimulus, creates a PSTH per stimulus, and concatenates each PSTH into a long array. """ N = np.sum(psth_lens) concat_psths = np.zeros([N]) offset = 0 for k,spike_trials in enumerate(spike_trials_by_stim): duration = psth_lens[k] * bin_size psth = compute_psth(spike_trials, duration, bin_size=bin_size) e = offset + psth_lens[k] concat_psths[offset:e] = psth offset = e return concat_psths
def get_concat_psth(spike_trials_by_stim, psth_lens, bin_size): """ Takes a bunch of spike trials, separated by stimulus, creates a PSTH per stimulus, and concatenates each PSTH into a long array. """ N = np.sum(psth_lens) concat_psths = np.zeros([N]) offset = 0 for k, spike_trials in enumerate(spike_trials_by_stim): duration = psth_lens[k] * bin_size psth = compute_psth(spike_trials, duration, bin_size=bin_size) e = offset + psth_lens[k] concat_psths[offset:e] = psth offset = e return concat_psths
def get_full_data(bird, block, segment, hemi, stim_id, data_dir='/auto/tdrive/mschachter/data'): bdir = os.path.join(data_dir, bird) tdir = os.path.join(bdir, 'transforms') aprops = USED_ACOUSTIC_PROPS # load the BioSound bs_file = os.path.join(tdir, 'BiosoundTransform_%s.h5' % bird) bs = BiosoundTransform.load(bs_file) # load the StimEvent transform se_file = os.path.join(tdir, 'StimEvent_%s_%s_%s_%s.h5' % (bird,block,segment,hemi)) print 'Loading %s...' % se_file se = StimEventTransform.load(se_file, rep_types_to_load=['raw']) se.zscore('raw') se.segment_stims_from_biosound(bs_file) # load the pairwise CF transform pcf_file = os.path.join(tdir, 'PairwiseCF_%s_%s_%s_%s_raw.h5' % (bird,block,segment,hemi)) print 'Loading %s...' % pcf_file pcf = PairwiseCFTransform.load(pcf_file) def log_transform(x, dbnoise=100.): x /= x.max() zi = x > 0 x[zi] = 20*np.log10(x[zi]) + dbnoise x[x < 0] = 0 x /= x.max() all_lfp_psds = deepcopy(pcf.psds) log_transform(all_lfp_psds) all_lfp_psds -= all_lfp_psds.mean(axis=0) all_lfp_psds /= all_lfp_psds.std(axis=0, ddof=1) # get overall biosound stats bs_stats = dict() for aprop in aprops: amean = bs.stim_df[aprop].mean() astd = bs.stim_df[aprop].std(ddof=1) bs_stats[aprop] = (amean, astd) for (stim_id2,stim_type2),gdf in se.segment_df.groupby(['stim_id', 'stim_type']): print '%d: %s' % (stim_id2, stim_type2) # get the spectrogram i = se.segment_df.stim_id == stim_id last_end_time = se.segment_df.end_time[i].max() spec_freq = se.spec_freq stim_spec = se.spec_by_stim[stim_id] spec_t = np.arange(stim_spec.shape[1]) / se.lfp_sample_rate speci = np.min(np.where(spec_t > last_end_time)[0]) spec_t = spec_t[:speci] stim_spec = stim_spec[:, :speci] stim_dur = spec_t.max() - spec_t.min() # get the raw LFP si = int(se.pre_stim_time*se.lfp_sample_rate) ei = int(stim_dur*se.lfp_sample_rate) + si lfp = se.lfp_reps_by_stim['raw'][stim_id][:, :, si:ei] ntrials,nelectrodes,nt = lfp.shape # get the raw spikes, spike_mat is ragged array of shape (num_trials, num_cells, num_spikes) spike_mat = se.spikes_by_stim[stim_id] assert ntrials == len(spike_mat) ncells = len(se.cell_df) print 'ncells=%d' % ncells ntrials = len(spike_mat) # compute the PSTH psth = list() for n in range(ncells): # get the spikes across all trials for neuron n spikes = [spike_mat[k][n] for k in range(ntrials)] # make a PSTH _psth_t,_psth = compute_psth(spikes, stim_dur, bin_size=1.0/se.lfp_sample_rate) psth.append(_psth) psth = np.array(psth) if hemi == 'L': electrode_order = ROSTRAL_CAUDAL_ELECTRODES_LEFT else: electrode_order = ROSTRAL_CAUDAL_ELECTRODES_RIGHT # get acoustic props and LFP/spike power spectra for each syllable syllable_props = list() i = bs.stim_df.stim_id == stim_id orders = sorted(bs.stim_df.order[i].values) cell_index2electrode = None for o in orders: i = (bs.stim_df.stim_id == stim_id) & (bs.stim_df.order == o) assert i.sum() == 1 d = dict() d['start_time'] = bs.stim_df.start_time[i].values[0] d['end_time'] = bs.stim_df.end_time[i].values[0] d['order'] = o for aprop in aprops: amean,astd = bs_stats[aprop] d[aprop] = (bs.stim_df[aprop][i].values[0] - amean) / astd # get the LFP power spectra lfp_psd = list() for k,e in enumerate(electrode_order): i = (pcf.df.stim_id == stim_id) & (pcf.df.order == o) & (pcf.df.decomp == 'full') & \ (pcf.df.electrode1 == e) & (pcf.df.electrode2 == e) assert i.sum() == 1, "i.sum()=%d" % i.sum() index = pcf.df[i]['index'].values[0] lfp_psd.append(all_lfp_psds[index, :]) d['lfp_psd'] = np.array(lfp_psd) syllable_props.append(d) return {'stim_id':stim_id, 'spec_t':spec_t, 'spec_freq':spec_freq, 'spec':stim_spec, 'lfp':lfp, 'spikes':spike_mat, 'lfp_sample_rate':se.lfp_sample_rate, 'psth':psth, 'syllable_props':syllable_props, 'electrode_order':electrode_order, 'psd_freq':pcf.freqs, 'cell_index2electrode':cell_index2electrode, 'aprops':aprops}