def corr_freezing_sleep(fpath, spath, frecs, srecs, trials, sleep_stats=0, istate=1, tstart=0, tend=-1, ma_thr=20, min_dur=0, pplot=True): """ Correlated freeezing behavior with sleep. The percentage of freezing can be correlated with any quantify calculated by sleepy.sleep_stats :param fpath: base folder of "fear" sessions :param spath: base folder of sleep recordings :param frecs: list of fear sessions NOTE: the order of mice in @frecs must be the same as in @srecs! :param srecs: list of sleep recordings :param trials: list of trials numbers (counting starts with 1) :param sleep_stats: Measured sleep variable (statistics): 0 - percentage, 1 - episode duration, 2 - episode frequency, 3 - latency to first state occurance of state $istate :param istate: 1 - REM, 2 - Wake, 3 - NREM :param tstart: float, quantificiation of sleep starts at $start s :param tend: float, quantification of sleep ends at $tend s :param pplot: if True, plot figure showing scatter plot of freezing vs sleep :param ma_thr: float, wake periods shorter than $ma_thr are considered as microarousals and further converted to NREM :param min_dur: only used for sleep_stats == 3, minimal duration of state $istate to be counted :return: r ravlue, p value of linear fit """ states = {1: 'REM', 2: 'Wake', 3: 'NREM'} stats_label = { 0: '(%)', 1: 'Duration (s)', 2: 'Freq. (1/h)', 3: 'Onset latency (min)' } trials = [t - 1 for t in trials] fmouse_order = [] for rec in frecs: idf = re.split('_', rec)[0] if not idf in fmouse_order: fmouse_order.append(idf) smouse_order = [] for rec in srecs: idf = re.split('_', rec)[0] if not idf in smouse_order: smouse_order.append(idf) if not smouse_order == fmouse_order: print("ERROR: sleep and fear recordings do not match!") print("...stopping") return if sleep_stats <= 2: perc = sleepy.sleep_stats(spath, srecs, pplot=False, tstart=tstart, tend=tend, ma_thr=ma_thr)[sleep_stats][:, istate - 1] else: perc = sleepy.state_onset(spath, srecs, istate, min_dur, tstart=tstart, tend=tend, pplot=False) freezing = tone_freezing_mice(fpath, frecs, pplot=False)[1] freezing = np.mean(freezing[:, trials], axis=1) xmin = min(perc) xmax = max(perc) x = np.arange(xmin - (xmax - xmin) / 10., xmax + (xmax - xmin) / 10., .1) slope, intercept, r_value, p_value, _ = stats.linregress(perc, freezing) print("linear regression results: r value: %.3f, p value: %.3f" % (r_value, p_value)) if pplot: # get as many different colors as mice clrs = sns.color_palette("husl", len(fmouse_order)) plt.ion() plt.figure() ax = plt.subplot(111) for i in range(len(fmouse_order)): plt.scatter(perc[i], freezing[i], color=clrs[i]) plt.text(perc[i], freezing[i] + 1, fmouse_order[i], fontsize=11) plt.plot(x, x * slope + intercept, '--', color='gray') sleepy.box_off(ax) plt.xlabel(states[istate] + ' ' + stats_label[sleep_stats]) plt.ylabel('Freezing (%)') plt.show() return r_value, p_value
def behavioral_spectrogram(ppath, name, beh_file, twin=3, fmax=30, mu=[10, 100], pplot=True): """ calculate EEG spectrogram and EMG amplitude for each symbol specified in the given annotation file ($beh_file), generated by video_processor_stack.py :param ppath: base folder :param name: recording :param beh_file: annotation file generated by video_processor_stack.py :param twin: time window for power spectrum calculation :param fmax: maximum frequency for EEG spectrogram :param mu: tuple, lower and upper limit for EEG frequency axis :param pplot: if True, generate plot :return: dict: symbols -> EEG spectrogram, frequency axis, dict: symbols -> EMG amplitude """ sr = sleepy.get_snr(ppath, name) nwin = np.round(twin * sr) ann, _ = vypro.load_behann_file(os.path.join(ppath, name, beh_file)) time = list(ann.keys()) time.sort() symbols = list(set(ann.values())) # load EEG P = so.loadmat(os.path.join(ppath, name, 'EEG.mat'), squeeze_me=True) EEG = P['EEG'] * 1000 EMG = so.loadmat(os.path.join(ppath, name, 'EMG.mat'), squeeze_me=True)['EMG'] * 1000 # save for each symbol a list of indices, at which the symbol occurs sym2idx = {s: [] for s in symbols} for sym in symbols: i = 0 idx = [] for t in time: if ann[t] == sym: idx.append(i) i += 1 sym2idx[sym] = np.array(idx) sym2pow = dict() sym2emg = dict() for sym in symbols: idx = sym2idx[sym] seq = sleepy.get_sequences(idx) pow_list = [] emg_list = [] for s in seq: d = time[s[-1]] - time[s[0]] if d >= twin: i = time[s[0]] j = time[s[-1]] i = int(np.round(i * sr)) j = int(np.round(j * sr)) pow_eeg, f = sleepy.power_spectrum(EEG[i:j], nwin, 1.0 / sr) pow_list.append(pow_eeg) pow_emg, f = sleepy.power_spectrum(EMG[i:j], nwin, 1.0 / sr) emg_list.append(pow_emg) sym2pow[sym] = np.array(pow_list).mean(axis=0) sym2emg[sym] = np.array(emg_list).mean(axis=0) ifreq = np.where(f <= fmax)[0] df = f[1] - f[0] imu = np.where((f >= mu[0]) & (f <= mu[1]))[0] for sym in symbols: sym2emg[sym] = np.sqrt(np.sum(sym2emg[sym][imu]) * df) sym2pow[sym] = sym2pow[sym][ifreq] if pplot: clrs = sns.color_palette("husl", len(symbols)) plt.ion() plt.figure(figsize=(8, 4)) ax = plt.subplot(121) i = 0 for sym in symbols: plt.plot(f[ifreq], sym2pow[sym], label=sym, color=clrs[i]) i += 1 sleepy.box_off(ax) ax = plt.subplot(122) i = 0 idx = [] for sym in symbols: plt.bar(i, sym2emg[sym], color=clrs[i]) idx.append(i) i += 1 sleepy.box_off(ax) plt.xticks(idx) ax.set_xticklabels(symbols) plt.show() return sym2pow, f[ifreq], sym2emg
def behavioral_spectrogram_recordings(ppath, recordings, beh_file, twin=3, fmax=30, mu=[10, 100]): """ plot the EEG spectrogram and EMG amplitude across the given list of recordings for each symbol in annotation file $beh_file. I assume that each recording's annotation file has the same file name ($be_file). Average are calculated over recordings (not mice) :param ppath: base folder :param recordings: list of recordings :param beh_file: annotation file generated by video_processor_stack.py :param twin: time window used for power spectrum calculation; the longer the window the finer the frequency scale, but the noisier the power estimate. :param fmax: float, maximum EEG frequency shown in EEG spectrogram :param mu: tuple, lower and upper limit of frequency range used to calculate EMG amplitude :return: dict: symbols -> np.array(recordings x frequencies), vector(frequency axis), dict -> vector(EMG Amplitude for each recording) """ rec2pow = {} rec2emg = {} for rec in recordings: rec2pow[rec], f, rec2emg[rec] = behavioral_spectrogram(ppath, rec, beh_file, twin=twin, fmax=fmax, mu=mu, pplot=False) symbols = list(rec2pow[list(rec2pow.keys())[0]].keys()) symbols.sort() sym2pow = {s: [] for s in symbols} sym2emg = {s: [] for s in symbols} for sym in symbols: for rec in recordings: sym2pow[sym].append(rec2pow[rec][sym]) sym2emg[sym].append(rec2emg[rec][sym]) for sym in symbols: sym2pow[sym] = np.array(sym2pow[sym]) sym2emg[sym] = np.array(sym2emg[sym]) clrs = sns.color_palette("husl", len(symbols)) labels = [] for s in symbols: if len(s) > 0: labels.append(s) else: labels.append('other') plt.ion() plt.figure(figsize=(10, 5)) ax = plt.subplot(121) i = 0 for sym in symbols: tmp = sym2pow[sym].mean(axis=0) std = sym2pow[sym].std(axis=0) plt.fill_between(f, tmp - std, tmp + std, label=labels[i], color=clrs[i], alpha=0.5) i += 1 sleepy.box_off(ax) plt.legend() plt.xlabel('Freq. (Hz)') plt.ylabel('Power ($\mu V^2$)') ax = plt.subplot(122) i = 0 for sym in symbols: plt.bar(i, sym2emg[sym].mean(axis=0), edgecolor=clrs[i], fill=False) for j in range(len(recordings)): plt.plot(i, sym2emg[sym][j], 'o', color=clrs[i]) i += 1 sleepy.box_off(ax) plt.ylabel('EMG Ampl. ($\mu V$)') plt.xticks(range(len(symbols))) ax.set_xticklabels(labels) plt.show() return sym2pow, f, sym2emg
def tone_freezing_mice(ppath, mice, pplot=True, psingle_mouse=False, pstd=True, alpha=5.0, csv_file=''): """ calculate and plot duration/percenage of freezing during each tone across mice :param ppath: base folder :param mice: list of recordings :param pplot: if True, plot figure :param psingle_mouse: if True, plot each single mouse :param pstd, if True plot STD, otherwise plot percentiles :param alpha: if pstd == False, plot the 1-alpha confidence interval (i.e. the lower and upper ends of the errorbars correspond to the $alpha/2 and 1-$alpha/2 percentiles :param csv_file, string, if non-empty, write data into the given csv file (file ending ".csv"). $csv_file can be single file name or full path including file name :return: two np.arrays, number of mice x number of trials, for duration and percentage of freezing during each tone same data in pd.DataFrame with mice as index and two levels of columns. To get the percentage freezing of all mice during trial2, type df['percentage']['trial2'] """ duration = [] percentage = [] mouse_order = [] for name in mice: dur, perc = tone_freezing(ppath, name) duration.append(dur) percentage.append(perc) idf = re.split('_', name)[0] mouse_order.append(idf) duration = np.array(duration) percentage = np.array(percentage) ntrials = duration.shape[1] if pplot: clrs = sns.color_palette("husl", len(mouse_order)) plt.ion() plt.figure() ax = plt.axes([0.2, 0.1, 0.5, 0.8]) if psingle_mouse: for i in range(len(mouse_order)): plt.plot(range(1, ntrials + 1), percentage[i, :], label=mouse_order[i], color=clrs[i]) ax.legend(mouse_order, bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False) else: if pstd: ax.errorbar(range(1, ntrials + 1), percentage.mean(axis=0), yerr=percentage.std(axis=0), fmt='', marker='o', color='gray', linewidth=2, elinewidth=1.5) else: # plot using percentiles on errorbars err = np.vstack( (percentage.mean(axis=0) - np.percentile(percentage, alpha / 2.0, axis=0), np.percentile(percentage, (100 - alpha / 2.0), axis=0) - percentage.mean(axis=0))) ax.errorbar(range(1, ntrials + 1), percentage.mean(axis=0), yerr=err, fmt='', marker='o', color='gray', linewidth=2, elinewidth=1.5) plt.xticks(range(1, ntrials + 1)) sleepy.box_off(ax) plt.xlabel('Trial No.') plt.ylabel('% Freezing') plt.ylim((0, 100)) plt.show() # put duration and percentage into data frame trials = ['trial' + str(i) for i in range(1, ntrials + 1)] cols = pd.MultiIndex.from_product([['duration', 'percentage'], trials], names=['stats', 'trials']) df = pd.DataFrame(index=mouse_order, columns=cols, data=np.hstack((duration, percentage))) if len(csv_file) > 0: df.to_csv(os.path.join(ppath, csv_file)) return duration, percentage, df
def tone_freezing(ppath, name, freeze_file='', pplot=False): """ calculate duration/percentage of freezing during each tone :param ppath: base folder :param name: recording :param freeze_file: annotation file for freezing :param pplot: if True, plot percentage freezing for each tone/trial :return: two vectors with total duration of freezing during each tone and percentages of freezing; """ sr = get_snr(ppath, name) dt = 1 / sr if len(freeze_file) == 0: freeze_file = 'vip_f.txt' ann, K = vypro.load_behann_file(os.path.join(ppath, name, freeze_file)) time = list(ann.keys()) time.sort() dt_beh = time[1] - time[0] time_arr = np.array(time) tone = so.loadmat(os.path.join(ppath, name, 'tone.mat'), squeeze_me=True)['tone'] idxs, idxe = sleepy.laser_start_end(tone) idxs = [s * dt for s in idxs] idxe = [s * dt for s in idxe] freezing = [] for t in time: freezing.append(ann[t]) resp_dur = [] resp_per = [] for (i, j) in zip(idxs, idxe): idx = np.where((time_arr >= i) & (time_arr <= j))[0] beh = [] for f in idx: state = ann[time[f]] if state == 'f': beh.append(1) else: beh.append(0) beh = np.array(beh) dur_freezing = np.sum(beh) * dt_beh per_freezing = 100 * np.sum(beh) / (1.0 * len(idx)) resp_dur.append(dur_freezing) resp_per.append(per_freezing) if pplot: plt.ion() plt.figure() #plt.figure(figsize=(4,5)) ax = plt.axes([0.2, 0.1, 0.5, 0.8]) plt.bar(range(1, len(idxs) + 1), resp_per, color='gray') plt.xticks(range(1, len(idxs) + 1)) sleepy.box_off(ax) plt.xlabel('Trial No.') plt.ylabel('% Freezing') plt.ylim((0, 100)) plt.show() return np.array(resp_dur), np.array(resp_per)
def opto_video(ppath, name, ts, te, fmax=20, emg_legend=1000, vm=2.0, time_legend=10, ffmpeg_path='ffmpeg'): """ Generate video for optogenetic sleep recording. The function requires that ffmpeg is installed on your system (http://ffmpeg.org). Windows Users: Specify the full path to the ffmpeg program The resulting video has 1 Hz resolution and will be saved in folder $ppath/$name :param ppath: base folder :param name: name of recording :param ts: start time in seconds :param te: end time in second :param fmax: maximum frequency on EEG spectrogram :param emg_legend: EMG legend in micro Volts :param vm: controls saturation of EEG spectrogram; a value in the range from 1 to 2 should work best :param time_legend: time legend in seconds :param ffmpeg_path: full, absolute path to ffmpeg program; important for to set in Windows :return: n/a """ # helper function ###################### def closest_neighbor(vec, x): d = np.abs(vec - x) el = np.min(d) idx = np.argmin(d) return el, idx ######################################## # setup figure arrangement sleepy.set_fontsize(12) plt.ion() plt.figure() plt.figure(figsize=(8, 6)) ax_video = plt.axes([0.1, 0.52, 0.8, 0.45]) ax_laser = plt.axes([0.1, 0.45, 0.8, 0.03]) ax_eeg = plt.axes([0.1, 0.28, 0.8, 0.15]) ax_emg = plt.axes([0.1, 0.11, 0.8, 0.15]) ax_bs = plt.axes([0.1, 0.05, 0.8, 0.05]) ax_time = plt.axes([0.1, 0.01, 0.8, 0.031]) movie_stack = os.path.join(ppath, name, 'MStack') if not (os.path.isdir(movie_stack)): os.mkdir(movie_stack) sr = sleepy.get_snr(ppath, name) M = sleepy.load_stateidx(ppath, name)[0] dt = 1.0 / sr nbins = int(np.round(sr) * 5.0 / 2) Mup = spyke.upsample_mx(M, nbins) EEG = so.loadmat(os.path.join(ppath, name, 'EEG.mat'), squeeze_me=True)['EEG'] EMG = so.loadmat(os.path.join(ppath, name, 'EMG.mat'), squeeze_me=True)['EMG'] vid_time = so.loadmat(os.path.join(ppath, name, 'video_timing.mat'), squeeze_me=True)['onset'] len_eeg = EEG.shape[0] t = np.arange(0, len_eeg) * dt its = closest_neighbor(t, ts)[1] ite = closest_neighbor(t, te)[1] data_eeg = EEG[its:ite] states = sleepy.downsample_states(Mup[its:ite], int(np.round(sr))) state_map = [[0, 1, 1], [0.5, 0, 1], [0.6, 0.6, 0.6]] # load laser laser_map = [[1, 1, 1], [0, 0.3, 1]] laser = sleepy.load_laser(ppath, name) laser = laser[its:ite] idxs, idxe = sleepy.laser_start_end(laser, SR=sr) npers = int(np.round(sr)) idxs = [int(i / npers) for i in idxs] idxe = [int(i / npers) for i in idxe] # setup axis for video ax_video.get_xaxis().set_visible(False) ax_video.get_yaxis().set_visible(False) ax_video.spines["top"].set_visible(False) ax_video.spines["right"].set_visible(False) ax_video.spines["bottom"].set_visible(False) ax_video.spines["left"].set_visible(False) # setup axes for EEG spectrogram sleepy.box_off(ax_eeg) ax_eeg.set_xticks([]) plt.gcf().text(0.11, 0.38, 'EEG', color='white') # setup axes for EMG ax_emg.get_xaxis().set_visible(False) ax_emg.get_yaxis().set_visible(False) ax_emg.spines["top"].set_visible(False) ax_emg.spines["right"].set_visible(False) ax_emg.spines["bottom"].set_visible(False) ax_emg.spines["left"].set_visible(False) emg_max = np.max(np.abs(EMG[its:ite])) emg_max = emg_max + emg_max * 0.1 # write "EMG" and label EMG legend plt.gcf().text(0.905, 0.21, "%.1f mV" % (emg_legend / 1000.0), rotation=90) plt.gcf().text(0.11, 0.25, 'EMG') ax_bs.get_xaxis().set_visible(False) ax_bs.get_yaxis().set_visible(False) ax_bs.spines["top"].set_visible(False) ax_bs.spines["right"].set_visible(False) ax_bs.spines["bottom"].set_visible(False) ax_bs.spines["left"].set_visible(False) # calculate spectrogram fspec, tspec, Sxx = scipy.signal.spectrogram(data_eeg, fs=sr, nperseg=2 * npers, noverlap=npers) ifreq = np.where(fspec <= fmax)[0] med = np.median(Sxx.max(axis=0)) nspec = len(tspec) laser = np.zeros((nspec, )) for (i, j) in zip(idxs, idxe): laser[i:j + 1] = 1 # setup axis for laser ax_laser.get_xaxis().set_visible(False) ax_laser.get_yaxis().set_visible(False) ax_laser.spines["top"].set_visible(False) ax_laser.spines["right"].set_visible(False) ax_laser.spines["bottom"].set_visible(False) ax_laser.spines["left"].set_visible(False) # write "Laser" plt.gcf().text(0.11, 0.46, 'Laser', color=laser_map[1]) # legend for brain states plt.gcf().text(0.7, 0.01, 'REM', color=state_map[0]) plt.gcf().text(0.77, 0.01, 'Wake', color=state_map[1]) plt.gcf().text(0.84, 0.01, 'NREM', color=state_map[2]) # setup time legend (beolow DF/F panel) ax_time.plot((tspec[0], tspec[0] + time_legend), [1, 1], color='black', linewidth=2) ax_time.set_xlim((tspec[0], tspec[-1])) ax_time.set_ylim((-1, 1.1)) ax_time.get_xaxis().set_visible(False) ax_time.get_yaxis().set_visible(False) ax_time.spines["top"].set_visible(False) ax_time.spines["right"].set_visible(False) ax_time.spines["bottom"].set_visible(False) ax_time.spines["left"].set_visible(False) ax_time.text(tspec[0], -1, '%s s' % str(time_legend)) tstart = t[its] for i in range(2, len(tspec)): curr_t = tstart + tspec[i] ax_eeg.cla() ax_eeg.pcolor(tspec[:i], fspec[ifreq], Sxx[ifreq, :i], vmin=0, vmax=med * vm) ax_eeg.set_xlim((tspec[0], tspec[-1])) ax_eeg.set_xticks([]) ax_eeg.set_ylabel('Freq. (Hz)') # displary current movie frame ax_video.cla() iframe = closest_neighbor(vid_time, curr_t)[1] img = cv2.imread( os.path.join(ppath, name, 'Stack', 'fig%d.jpg' % (iframe + 1))) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ax_video.imshow(img) # show EMG emg = EMG[its:closest_neighbor(t, curr_t)[1]] ax_emg.cla() temg = np.arange(0, len(emg)) * dt ax_emg.plot(temg, emg, color='black', linewidth=0.5) ax_emg.set_xlim((tspec[0], tspec[-1])) ax_emg.set_ylim((-emg_max, emg_max)) # plot EMG legend ax_emg.plot(([tspec[-1] - 1, tspec[-1] - 1]), (-emg_legend / 2.0, emg_legend / 2.0), color='black', linewidth=2) # plot brain state patches j = i - 1 ax_bs.add_patch( patches.Rectangle((tspec[j - 1], 0), tspec[j] - tspec[j - 1], 1, facecolor=state_map[int(states[j]) - 1], edgecolor=state_map[int(states[j]) - 1])) ax_bs.set_xlim((tspec[0], tspec[-1])) ax_bs.set_ylim((0, 1)) # plot laser #pdb.set_trace() ax_laser.add_patch( patches.Rectangle((tspec[j - 1], 0), tspec[j] - tspec[j - 1], 1, facecolor=laser_map[int(laser[j])], edgecolor=laser_map[int(laser[j])])) ax_laser.set_ylim((0, 1)) ax_laser.set_xlim((tspec[0], tspec[-1])) if i % 10 == 0: print("done with frame %d out of %d frames" % (i, len(tspec))) plt.savefig(os.path.join(movie_stack, 'fig%d.png' % i)) encode_video(ppath, name, stack='MStack', ending='.png', fr=5, outpath=os.path.join(ppath, name), ffmpeg_path=ffmpeg_path, vidname='movie_opto_')
def fibpho_video(ppath, name, ts, te, fmax=20, emg_legend=1000, vm=2.0, time_legend=10, dff_legend=10, ffmpeg_path='ffmpeg'): """ Generate video for fiber photometry recording. The function requires that ffmpeg is installed on your system (http://ffmpeg.org). Windows Users: Specify the full path to the ffmpeg program The resulting video has 1 Hz resolution and will be saved in folder $ppath/$name :param ppath: base folder :param name: :param ts: start time in seconds :param te: end time in second :param fmax: maximum frequency on EEG spectrogram :param emg_legend: EMG legend in microVolts! :param vm: controls saturation of EEG spectrogram; a value in the range from 1 to 2 should work best :param time_legend: time legend in seconds :param dff_legend: DF/F in % :param ffmpeg_path: full, absolute path to ffmpeg program; important for to set in Windows :return: n/a """ # helper function ###################### def closest_neighbor(vec, x): d = np.abs(vec - x) el = np.min(d) idx = np.argmin(d) return el, idx ######################################## # setup figure arrangement sleepy.set_fontsize(12) plt.ion() plt.figure() plt.figure(figsize=(8, 6)) ax_video = plt.axes([0.1, 0.55, 0.8, 0.43]) ax_eeg = plt.axes([0.1, 0.38, 0.8, 0.15]) ax_emg = plt.axes([0.1, 0.25, 0.8, 0.1]) ax_bs = plt.axes([0.1, 0.22, 0.8, 0.02]) ax_bs_legend = plt.axes([0.1, 0.24, 0.2, 0.02]) ax_dff = plt.axes([0.1, 0.05, 0.8, 0.15]) ax_dff_legend = plt.axes([0.05, 0.05, 0.05, 0.15]) ax_time = plt.axes([0.1, 0.001, 0.8, 0.03]) movie_stack = os.path.join(ppath, name, 'MStack') if not (os.path.isdir(movie_stack)): os.mkdir(movie_stack) sr = sleepy.get_snr(ppath, name) M = sleepy.load_stateidx(ppath, name)[0] dt = 1.0 / sr nbins = int(np.round(sr) * 5.0 / 2) Mup = spyke.upsample_mx(M, nbins) EEG = so.loadmat(os.path.join(ppath, name, 'EEG.mat'), squeeze_me=True)['EEG'] EMG = so.loadmat(os.path.join(ppath, name, 'EMG.mat'), squeeze_me=True)['EMG'] vid_time = so.loadmat(os.path.join(ppath, name, 'video_timing.mat'), squeeze_me=True)['onset'] len_eeg = EEG.shape[0] t = np.arange(0, len_eeg) * dt its = closest_neighbor(t, ts)[1] ite = closest_neighbor(t, te)[1] data_eeg = EEG[its:ite] states = sleepy.downsample_states(Mup[its:ite], int(np.round(sr))) state_map = [[0, 1, 1], [0.5, 0, 1], [0.6, 0.6, 0.6]] # load and resample DF/F dff = so.loadmat(os.path.join(ppath, name, 'DFF.mat'), squeeze_me=True)['dff'] * 100 dff = spyke.downsample_vec(dff[its:ite], int(np.round(sr))) dff_max = np.max(dff) dff_max = dff_max + 0.2 * dff_max dff_min = np.min(dff) dff_min = dff_min - 0.1 * dff_min # setup axis for video ax_video.get_xaxis().set_visible(False) ax_video.get_yaxis().set_visible(False) ax_video.spines["top"].set_visible(False) ax_video.spines["right"].set_visible(False) ax_video.spines["bottom"].set_visible(False) ax_video.spines["left"].set_visible(False) # setup axes for EEG spectrogram sleepy.box_off(ax_eeg) ax_eeg.set_xticks([]) plt.gcf().text(0.11, 0.49, 'EEG', color='white') plt.gcf().text(0.11, 0.18, '$\mathrm{\Delta F/F}$', color='blue') # setup axes for EMG ax_emg.get_xaxis().set_visible(False) ax_emg.get_yaxis().set_visible(False) ax_emg.spines["top"].set_visible(False) ax_emg.spines["right"].set_visible(False) ax_emg.spines["bottom"].set_visible(False) ax_emg.spines["left"].set_visible(False) emg_max = np.max(np.abs(EMG[its:ite])) emg_max = emg_max + emg_max * 0.1 plt.gcf().text(0.905, 0.31, "%.1f mV" % (emg_legend / 1000.0), rotation=90) plt.gcf().text(0.11, 0.35, 'EMG') ax_dff.get_xaxis().set_visible(False) ax_dff.get_yaxis().set_visible(False) ax_dff.spines["top"].set_visible(False) ax_dff.spines["right"].set_visible(False) ax_dff.spines["bottom"].set_visible(False) ax_dff.spines["left"].set_visible(False) ax_bs.get_xaxis().set_visible(False) ax_bs.get_yaxis().set_visible(False) ax_bs.spines["top"].set_visible(False) ax_bs.spines["right"].set_visible(False) ax_bs.spines["bottom"].set_visible(False) ax_bs.spines["left"].set_visible(False) # calculate spectrogram fspec, tspec, Sxx = scipy.signal.spectrogram(data_eeg, fs=sr, nperseg=int(2 * np.round(sr)), noverlap=int(np.round(sr))) ifreq = np.where(fspec <= fmax)[0] med = np.median(Sxx.max(axis=0)) # setup time legend (beolow DF/F panel) ax_time.plot((tspec[0], tspec[0] + time_legend), [1, 1], color='black', linewidth=2) ax_time.set_xlim((tspec[0], tspec[-1])) ax_time.set_ylim((-1, 1.1)) ax_time.get_xaxis().set_visible(False) ax_time.get_yaxis().set_visible(False) ax_time.spines["top"].set_visible(False) ax_time.spines["right"].set_visible(False) ax_time.spines["bottom"].set_visible(False) ax_time.spines["left"].set_visible(False) ax_time.text(tspec[0], -1, '%s s' % str(time_legend)) # setup legend for DF/F ax_dff_legend.set_xlim((tspec[0], tspec[-1])) ax_dff_legend.set_ylim((-1, 1)) ax_dff_legend.get_xaxis().set_visible(False) ax_dff_legend.get_yaxis().set_visible(False) ax_dff_legend.spines["top"].set_visible(False) ax_dff_legend.spines["right"].set_visible(False) ax_dff_legend.spines["bottom"].set_visible(False) ax_dff_legend.spines["left"].set_visible(False) ax_dff_legend.set_ylim((dff_min, dff_max)) ax_dff_legend.set_xlim((0, 1)) ax_dff_legend.plot((0.5, 0.5), (dff_min, dff_min + dff_legend), color='black', linewidth=2) ax_dff_legend.text(0, dff_min + dff_legend / 2.0, str(dff_legend) + '%', rotation=90) # legend for brain states ax_bs_legend.set_ylim((0, 2)) ax_bs_legend.set_xlim((0, 10)) ax_bs_legend.text(0.5, 0.5, 'REM', color=state_map[0]) ax_bs_legend.text(3.3, 0.5, 'NREM', color=state_map[2]) ax_bs_legend.text(7, 0.5, 'Wake', color=state_map[1]) ax_bs_legend.get_xaxis().set_visible(False) ax_bs_legend.get_yaxis().set_visible(False) ax_bs_legend.spines["top"].set_visible(False) ax_bs_legend.spines["right"].set_visible(False) ax_bs_legend.spines["bottom"].set_visible(False) ax_bs_legend.spines["left"].set_visible(False) tstart = t[its] for i in range(2, len(tspec)): curr_t = tstart + tspec[i] ax_eeg.cla() ax_eeg.pcolor(tspec[:i], fspec[ifreq], Sxx[ifreq, :i], vmin=0, vmax=med * vm) ax_eeg.set_xlim((tspec[0], tspec[-1])) ax_eeg.set_xticks([]) ax_eeg.set_ylabel('Freq. (Hz)') # displary current movie frame ax_video.cla() iframe = closest_neighbor(vid_time, curr_t)[1] img = cv2.imread( os.path.join(ppath, name, 'Stack', 'fig%d.jpg' % (iframe + 1))) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) ax_video.imshow(cv2.transpose(img)) # show EMG emg = EMG[its:closest_neighbor(t, curr_t)[1]] ax_emg.cla() temg = np.arange(0, len(emg)) * dt ax_emg.plot(temg, emg, color='black', linewidth=0.5) ax_emg.set_xlim((tspec[0], tspec[-1])) ax_emg.set_ylim((-emg_max, emg_max)) # plot EMG legend ax_emg.plot(([tspec[-1] - 1, tspec[-1] - 1]), (-emg_legend / 2.0, emg_legend / 2.0), color='black', linewidth=2) # plot brain state patches j = i - 1 ax_bs.add_patch( patches.Rectangle((tspec[j], 0), tspec[j + 1] - tspec[j], 1, facecolor=state_map[int(states[j]) - 1], edgecolor=state_map[int(states[j]) - 1])) ax_bs.set_xlim((tspec[0], tspec[-1])) ax_bs.set_ylim((0, 1)) ax_dff.cla() ax_dff.plot(tspec[:i], dff[:i], color='blue') ax_dff.set_xlim((tspec[0], tspec[-1])) ax_dff.set_ylim((dff_min, dff_max)) plt.savefig(os.path.join(movie_stack, 'fig%d.png' % i)) encode_video(ppath, name, stack='MStack', ending='.png', fr=10, outpath=os.path.join(ppath, name), ffmpeg_path=ffmpeg_path, vidname='movie_fibpho_')
ampl_mx = np.zeros((len(mice), n)) spec_mx = np.zeros((len(ifreq), n, len(mice))) for (i, idf) in zip(range(n), mice): ampl_mx[i,:] = np.array(ampl_mice[idf]).mean(axis=0) spec_mx[:,:,i] = np.array(spec_mice[idf]).mean(axis=0) # plot figure ################## ## sns.set() plt.figure() t = np.arange(-ipre, ipost)*dt ax = plt.axes([0.1, 0.4, 0.8, 0.5]) ax.pcolor(t, freq[ifreq], spec_mx.mean(axis=2), cmap='jet') sleepy.box_off(ax) ax.set_xticklabels('') plt.ylabel('Freq (Hz)') amp_data = list(np.reshape(ampl_mx, (len(mice)*len(t),))) amp_time = list(t)*len(mice) amp_idf = reduce(lambda x,y: x+y, [[b]*len(t) for b in mice]) data = [[a,b,c] for (a,b,c) in zip(amp_idf, amp_time, amp_data)] df = pd.DataFrame(columns=['Idf', 'Time', 'Ampl'], data=data) ax2 = plt.axes([0.1, 0.1, 0.8, 0.2]) sns.lineplot(data=df, x='Time', y='Ampl', ci='sd') plt.plot(t, ampl_mx.mean(axis=0), color='r') plt.ylim([0, 30]) plt.xlabel('Time (s)')