def plot_sample_of_signal( load_loc, out_dir=None, name=None, offseta=0, length=50, filt_params=(False, None, None)): """ Plot a small filtered sample of the LFP signal in the given band. offseta and length are times """ in_dir = os.path.dirname(load_loc) lfp = NLfp() lfp.load(load_loc) if out_dir is None: out_loc = "nc_signal" out_dir = os.path.join(in_dir, out_loc) if name is None: name = "full_signal_filt.png" make_dir_if_not_exists(out_dir) out_name = os.path.join(out_dir, name) fs = lfp.get_sampling_rate() filt, lower, upper = filt_params lfp_to_plot = lfp if filt: lfp_to_plot = deepcopy(lfp) lfp_samples = lfp.get_samples() lfp_samples = butter_filter( lfp_samples, fs, 10, lower, upper, 'bandpass') lfp_to_plot._set_samples(lfp_samples) plot_long_lfp( lfp_to_plot, out_name, nsplits=1, ylim=(-0.325, 0.325), figsize=(20, 2), offset=lfp.get_sampling_rate() * offseta, nsamples=lfp.get_sampling_rate() * length)
def main(in_dir, tetrode): container = NDataContainer(load_on_fly=True) regex = ".*objects.*" container.add_axona_files_from_dir( in_dir, True, False, tetrode_list=[tetrode], re_filter=regex) container.setup() out_dir = os.path.join(in_dir, "nc_results") make_dir_if_not_exists(out_dir) out_dict = OrderedDict() headers = [] base_list = ["NW", "NE", "SW", "SE"] for ap in ["Spikes", "Rate", "Norm_Spikes", "Norm_Rate"]: mod_list = [b + "_" + ap for b in base_list] headers = headers + mod_list out_dict["File"] = headers out_vec = OrderedDict() out_vec, out_dict = calculate_directional_stats( container, out_vec, out_dict) out_dict["Summary Stats Rate"] = [ "Rot_Dist", "Rot_U_Dist", "Shuf_Dist", "Shuf_U_Dist", "Shuf_UR_Dist"] out_dict["Summary Stats Rate"] += ["Rate" + b for b in base_list] out_dict["Summary Stats Rate"] += ["Rot Rate" + b for b in base_list] out_dict["Summary Stats Rate"] += ["Undo Rot Rate" + b for b in base_list] out_dict["Summary Stats Rate"] += ["Shuf Rate" + b for b in base_list] out_dict["Summary Stats Rate"] += ["Undo Shuf Rate" + b for b in base_list] out_dict["Summary Stats Rate"] += ["Undo ShufR Rate" + b for b in base_list] for key, vec in out_vec.items(): res, p_vecs, pu_vecs, ur = distance_between( vec, key=key, measure=euc_dist) out_dict["Rate Unit " + str(key)] = np.concatenate( [res, p_vecs[0], p_vecs[1], pu_vecs[1], p_vecs[2], pu_vecs[2], ur]) fig, ax = plt.subplots() heat_arr = np.zeros(shape=(2, 12)) heat_arr[:, :2] = p_vecs[0].reshape(2, 2) heat_arr[:, 2:4] = p_vecs[1].reshape(2, 2) heat_arr[:, 4:6] = pu_vecs[1].reshape(2, 2) heat_arr[:, 6:8] = p_vecs[2].reshape(2, 2) heat_arr[:, 8:10] = pu_vecs[2].reshape(2, 2) heat_arr[:, 10:] = ur.reshape(2, 2) sns.heatmap( heat_arr, ax=ax, annot=True, square=True, center=0.25, cmap="Blues") ax.invert_yaxis() ax.set_ylim(2, 0) ax.set_xlim(0, 12) ax.vlines([k for k in range(2, 12, 2)], 2, 0, colors="r") fig.savefig( os.path.join(out_dir, str(key) + "_heatmap.png")) out_dict["Summary Stats Rank"] = [ "Rot_Dist", "Rot_U_Dist", "Shuf_Dist", "Shuf_U_Dist", "Shuf_UR_Dist"] out_dict["Summary Stats Rank"] += ["Rank" + b for b in base_list] out_dict["Summary Stats Rank"] += ["Rot Rank" + b for b in base_list] out_dict["Summary Stats Rank"] += ["Undo Rot Rank" + b for b in base_list] out_dict["Summary Stats Rank"] += ["Shuf Rank" + b for b in base_list] out_dict["Summary Stats Rank"] += ["Undo Shuf Rank" + b for b in base_list] out_dict["Summary Stats Rank"] += ["Undo ShufR Rank" + b for b in base_list] out_vec = to_rank(out_vec) for key, vec in out_vec.items(): res, p_vecs, pu_vecs, ur = distance_between( vec, key=key, measure=euc_dist) out_dict["Rank Unit " + str(key)] = np.concatenate( [res, p_vecs[0], p_vecs[1], pu_vecs[1], p_vecs[2], pu_vecs[2], ur]) print("Saving results to", os.path.join( out_dir, str(tetrode) + "_obj.csv")) save_mixed_dict_to_csv(out_dict, out_dir, str(tetrode) + "_obj.csv")
def main(cfg, args, **kwargs): in_dir = cfg.get("Setup", "in_dir") out_dir = cfg.get("Output", "out_dirname") plot_dir = cfg.get("Output", "plot_dirname") re_filter = cfg.get("Setup", "regex_filter") s_filt = cfg.getboolean("LFP", "should_filter") filter_range = json.loads(cfg.get("LFP", "filter_range")) re_filter = None if re_filter == "None" else re_filter analysis_flags = json.loads(cfg.get("Setup", "analysis_flags")) res_name = kwargs.get("res_name", "") channel_dict_vc = cfg["VC"] channel_dict_cla = cfg["CLA"] channels = {} for key in channel_dict_vc.keys(): channels[key] = [channel_dict_vc[key], channel_dict_cla[key]] setup_logging(in_dir) filenames = get_all_files_in_dir( in_dir, ext=".set", recursive=True, verbose=True, re_filter=re_filter, case_sensitive_ext=True, ) filenames = [fname[:-4] for fname in filenames] if len(filenames) == 0: print("No set files found for analysis!") exit(-1) # Plot signal on each loaded channel if analysis_flags[0]: for fname in filenames: lfp_odict = LfpODict( fname, channels="all", filt_params=(s_filt, *filter_range) ) o_dir = os.path.join(in_dir, out_dir, os.path.basename(fname)) r = json.loads(cfg.get("LFP", "plot_time")) seg_len = float(cfg.get("LFP", "plot_seg_length")) make_dir_if_not_exists(o_dir) plot_lfp( o_dir, lfp_odict.get_filt_signal(), in_range=r, segment_length=seg_len, dpi=100, ) if analysis_flags[1]: # t_out_dir = os.path.join(in_dir, plot_dir) # make_dir_if_not_exists(t_out_dir) res_dict = OrderedDict() headers = [ "Low freq chan", "high freq chan", "MVL", "MVL 95", "Z-score", "P-val", ] res_dict["Name"] = headers for fname in filenames: for key, val in channels.items(): if key in fname: chan_list = val[::-1] break else: raise ValueError("No key in {}, keys {}".format(fname, channels.keys())) print("Computing mean value for {}, {}".format(fname, chan_list)) res_dict = compute_mvl(fname, chan_list, res_dict) save_mixed_dict_to_csv(res_dict, out_dir, "no_mp_norm.csv") if analysis_flags[2]: res_dict = OrderedDict() theta_delta_dict = OrderedDict() theta_delta_dict["Name"] = [ "delta_peak_D", "theta_peak_D", "delta_avg_D", "theta_avg_D", "delta_peak_L1", "theta_peak_L1", "delta_avg_L1", "theta_avg_L1", "delta_peak_L2", "theta_peak_L2", "delta_avg_L2", "theta_avg_L2", ] make_dir_if_not_exists(os.path.join(out_dir, plot_dir, "coherence")) for fname in filenames: for key, val in channels.items(): if key in fname: chan_list = val break else: raise ValueError("No key in {}, keys {}".format(fname, channels.keys())) if "green" in fname: continue out_basename = "{}_{}.png".format(os.path.basename(fname), chan_list) out_name = os.path.join(out_dir, plot_dir, "coherence", out_basename) print("Saving coherence to {}".format(out_name)) lfp_odict = LfpODict(fname, chan_list, (False, 0, 80)) f, Cxy = calc_coherence( lfp_odict.get_filt_signal(0), lfp_odict.get_filt_signal(1) ) if "Name" not in res_dict: res_dict["Name"] = f res_dict[fname] = Cxy plot_coherence(f, Cxy, out_name, dpi=200) close("all") fname_without_end = "-".join(fname.split("-")[:-1]) if fname_without_end not in theta_delta_dict: theta_delta_dict[fname_without_end] = [] delta_bit = np.nonzero(np.logical_and(f >= 1.5, f <= 4.0)) theta_bit = np.nonzero(np.logical_and(f >= 5.0, f <= 11.0)) v1 = np.max(Cxy[delta_bit]) v2 = np.max(Cxy[theta_bit]) v3 = np.mean(Cxy[delta_bit]) v4 = np.mean(Cxy[theta_bit]) for val in [v1, v2, v3, v4]: theta_delta_dict[fname_without_end].append(val) save_mixed_dict_to_csv( res_dict, os.path.join(out_dir, plot_dir), f"Coherence_{res_name}.csv" ) save_mixed_dict_to_csv( theta_delta_dict, os.path.join(out_dir, plot_dir), f"Coherence_avg_{res_name}.csv", ) if analysis_flags[3]: import neurochat.nc_plot as nc_plot from lfp_plot import plot_long_lfp make_dir_if_not_exists(os.path.join(out_dir, plot_dir)) for fname in filenames: for key, val in channels.items(): if key in fname: chan_list = val break else: raise ValueError("No key in {}, keys {}".format(fname, channels.keys())) lfp_odict = LfpODict(fname, chan_list, (True, 1, 90)) # Green was corrupted by 50Hz current in LFP if "green" in fname: lfp_odict.notch_filter(channels=["1", "2"]) for chan in chan_list: out_basepart = os.path.join( out_dir, plot_dir, os.path.basename(fname), chan ) make_path_if_not_exists(out_basepart) print("Saving plot results to {}".format(out_basepart)) out_name = out_basepart + "_full_signal_filt.png" plot_long_lfp(lfp_odict.get_filt_signal(chan), out_name) graph_data = lfp_odict.get_filt_signal(chan).spectrum( fmax=90, db=False, tr=False, prefilt=False, filtset=(10, 1.5, 90, "bandpass"), ) fig = nc_plot.lfp_spectrum(graph_data) fig.savefig(out_basepart + "_spec.png") graph_data = lfp_odict.get_filt_signal(chan).spectrum( fmax=90, db=True, tr=True, prefilt=False, filtset=(10, 1.5, 90, "bandpass"), ) fig = nc_plot.lfp_spectrum_tr(graph_data) fig.savefig(out_basepart + "_tr_spec.png") close("all") if analysis_flags[4]: out_dirname = os.path.join(out_dir, plot_dir) print( "Caculating power results to save to {}".format( os.path.join(out_dirname, f"power_res_{res_name}.csv") ) ) results = OrderedDict() results["Names"] = [ "VC Chan", "Delta VC", "Theta VC", "Beta VC", "Gamma VC", "Total VC", "CLA Chan", "Delta CLA", "Theta CLA", "Beta CLA", "Gamma CLA", "Total CLA", ] for fname in filenames: for key, val in channels.items(): if key in fname: chan_list = val break else: raise ValueError("No key in {}, keys {}".format(fname, channels.keys())) lfp_odict = LfpODict(fname, chan_list, (True, 1, 90)) if "green" in fname: lfp_odict.notch_filter(channels=["1", "2"]) o_arr = np.zeros(12) if "green" in fname: o_arr[:6] = None for i, chan in enumerate(chan_list): if "green" in fname and i == 0: continue start_idx = i * 6 o_arr[start_idx] = chan window_sec = 1.3 lfp = lfp_odict.get_filt_signal(chan) delta_power = lfp.bandpower(band=[1.5, 4], window_sec=window_sec)[ "bandpower" ] theta_power = lfp.bandpower(band=[5, 11], window_sec=window_sec)[ "bandpower" ] beta_power = lfp.bandpower(band=[12, 30], window_sec=window_sec)[ "bandpower" ] h_gamma_power = lfp.bandpower(band=[30, 90], window_sec=window_sec)[ "bandpower" ] o_arr[start_idx + 1 : start_idx + 5] = [ delta_power, theta_power, beta_power, h_gamma_power, ] total_power = lfp.bandpower(band=[1, 90], window_sec=window_sec)[ "bandpower" ] o_arr[start_idx + 5] = total_power results[os.path.basename(fname)] = o_arr save_mixed_dict_to_csv(results, out_dirname, f"power_res_{res_name}.csv")
def single_main(parsed): """ Main control function. An LFP signal power is analysed across multiple split up times. The times can be split to evaluate relationships over the course of change. The distribution of the LFP signal is also calculated over the channels. Proceeds as follows: 1. Parse out the information from command line args. 2. From this, set up the correct splits to analyse over. 3. Plot a part of the given signal number to show effect of filtering. 4. Calculate measures on the signal in each split total lfp power, entropy 5. Calculate these measures for each channel in the recording. 6. Calculate theta and delta power for each channel in the recording. Args: parsed (SimpleNamespace): A namespace controlling the behaviour. Returns: tuple(dict, np.ndarray, np.ndarray) - (power and entropy summary values, raw power for each channel and each split, shape is (chans, splits), bandpowers for each channel, shape is (6, chans, splits)) """ def setup_splits(every_min, split_s): """ Determine the length of times to split recordings into. This is specifically set up for a 30 minute long recording. The full recording is always included in this. """ if every_min: splits = [(60 * i, 60 * (i + 1)) for i in range(recording_dur)] splits.append((0, 600)) splits.append((600, 1200)) splits.append((1200, 1800)) else: splits = [] for i in range(len(split_s) // 2): splits.append((split_s[i * 2], split_s[i * 2 + 1])) splits.append((0, recording_dur * 60)) return splits # Extract parsed args loc = parsed.loc if not loc: print("Please pass a file in through CLI") exit(-1) max_lfp = parsed.max_freq filt = not parsed.nofilt eeg_num = parsed.eeg_num split_s = parsed.splits out_loc = parsed.out_loc every_min = parsed.every_min recording_dur = parsed.recording_dur get_entropy = parsed.get_entropy return_all = True in_dir = os.path.dirname(loc) out_dir = os.path.join(in_dir, out_loc) print("Saving results to {}".format(out_dir)) make_dir_if_not_exists(out_dir) splits = setup_splits(every_min, split_s) # Load the data # TODO only load certain channels here lfp_odict = LfpODict(loc, filt_params=(filt, 1.5, max_lfp)) # Plot signals out_name = os.path.join(out_dir, "full_signal.png") plot_long_lfp(lfp_odict.get_signal(eeg_num), out_name) out_name = os.path.join(in_dir, out_dir, "full_signal_filt.png") plot_long_lfp(lfp_odict.get_filt_signal(eeg_num), out_name) graph_data = lfp_odict.get_signal(eeg_num).spectrum(fmax=90, db=False, tr=False, prefilt=True, filtset=(10, 1.5, 90, "bandpass")) fig = nc_plot.lfp_spectrum(graph_data) fig.savefig(os.path.join(out_dir, "spec.png")) graph_data = lfp_odict.get_signal(eeg_num).spectrum(fmax=90, db=True, tr=True, prefilt=True, filtset=(10, 1.5, 90, "bandpass")) fig = nc_plot.lfp_spectrum_tr(graph_data) fig.savefig(os.path.join(out_dir, "tr_spec.png")) plt.close("all") # Calculate power on this lfp channel lfp_to_use = (lfp_odict.get_filt_signal(eeg_num) if filt else lfp_odict.get_signal(eeg_num)) p_results = raw_lfp_power(lfp_to_use, splits) # Calculate measures over the dist d_result = lfp_distribution_measures(lfp_odict, out_dir, splits[-4:], prefilt=filt, get_entropy=get_entropy, return_all=return_all) if get_entropy: # Calculate entropy on this lfp channel e_results = lfp_entropy(lfp_to_use) results = { "power": p_results, "entropy": e_results, "avg_power": d_result[0], "avg_entropy": d_result[1] } else: results = {"power": p_results, "avg_power": d_result[0]} save_mixed_dict_to_csv(results, out_dir) t_results = lfp_theta_dist(lfp_odict, splits, filt, 1.5, max_lfp) return results, d_result[-1], t_results
def main(fname, analysis_flags, o_main_dir=None, alignment=None): ''' Parameters ---------- fname : str filenames to be analysed analysis_flags : bool, optional. Defaults to True. Sets analysis to be used. 0 - plot periodograms and ptrs in seperate plots for each tetrode 1 - plot graphs from all tetrodes in 1 .png alignment : bool, optional. Defaults to None #TODO write alignment function Sets alignment points to be used. 0 - Align to reward 1 - Align to pellet drop 2 - Align to FI 3 - Align to Tone o_main_dir: dir, optional. Defaults to None. None - Saves plots in a LFP folder where .eeg was found Else - Saves plots in a LFP folder of given drive ''' # Setup Region info for eeg # Axona single screw Drive settings chans = [i for i in range(1, 17 * 2 - 1)] regions = ["CLA"] * 28 + ["ACC"] * 2 + ["RSC"] * 2 # # Single Hemi Multisite Drive settings # chans = [i for i in range(1, 17)] # regions = ["CLA"] * 8 + ["ACC"] * 4 + ["RSC"] * 4 gm = plot_org.GroupManager(regions) # Change filt values here. Default order 10. filt_btm = 1.0 filt_top = 50 lfp_list = [] for chans in chunks(chans, 16): lfp_odict = LfpODict(fname, channels=chans, filt_params=(True, filt_btm, filt_top)) lfp_list.append(lfp_odict) if o_main_dir is None: o_dir = os.path.join(os.path.dirname(fname), "!LFP") else: o_dir = os.path.join(o_main_dir, "!LFP") make_dir_if_not_exists(o_dir) if analysis_flags[ 0]: # Plot periodograms and ptr for each tetrode seperately for p, lfp_odict in enumerate(lfp_list): # # Plot periodogram for each eeg # for i, (key, lfp) in enumerate(lfp_odict.get_filt_signal().items()): # graph_data = lfp.spectrum( # ptype='psd', prefilt=False, # db=False, tr=False) # fig = nc_plot.lfp_spectrum(graph_data) # plt.ylim(0, 0.01) # # plt.xlim(0, 40) # out_name = os.path.join(o_dir, "p", key + "p.png") # make_path_if_not_exists(out_name) # fig.suptitle("T" + key + " " + regions[i] + " Periodogram") # fig.savefig(out_name) # plt.close() # Setup summary grid rows, cols = [4, 4] gf = plot_org.GridFig(rows, cols, wspace=0.3, hspace=0.3, tight_layout=False) # Plot individual periodograms on 1 image for i, (key, lfp) in enumerate(lfp_odict.get_filt_signal().items()): graph_data = lfp.spectrum(ptype='psd', prefilt=False, db=False, tr=False) ax = gf.get_next(along_rows=False) color = gm.get_next_color() nc_plot.lfp_spectrum(graph_data, ax, color) plt.ylim(0, 0.015) # plt.xlim(0, 40) if i % 4 == 0: ax.text(0.49, 1.08, regions[i + p * 16], fontsize=20, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) if p: gf.fig.suptitle( (fname.split("\\")[-1][4:] + " Periodogram " + str(p)), fontsize=30) out_name = os.path.join( o_dir, fname.split("\\")[-1] + "_p_sum_" + str(p) + ".png") else: gf.fig.suptitle((fname.split("\\")[-1][4:] + " Periodogram"), fontsize=30) out_name = os.path.join(o_dir, fname.split("\\")[-1] + "_p_sum.png") make_path_if_not_exists(out_name) print("Saving result to {}".format(out_name)) gf.fig.savefig(out_name) plt.close() # Plot spectrogram for each eeg for i, (key, lfp) in enumerate(lfp_odict.get_filt_signal().items()): graph_data = lfp.spectrum(ptype='psd', prefilt=False, db=True, tr=True) if graph_data['t'][-1] > 305: block_size = 305 rows, cols = [6, 1] gf = plot_org.GridFig(rows, cols, wspace=0.3, hspace=0.3, size_multiplier_x=40, tight_layout=False) for j in range(0, block_size * 6, block_size): tone_ts = range(j + 5, j + 305, 300) ax = gf.get_next(along_rows=True) new_lfp = lfp.subsample(sample_range=(j, j + block_size)) graph_data = new_lfp.spectrum(ptype='psd', prefilt=False, db=True, tr=True) nc_plot.lfp_spectrum_tr(graph_data, ax) if j == 0: plt.title("T" + key + " " + regions[i + p * 16] + " Spectrogram", fontsize=40) plt.ylim(0, filt_top) ax.axvline(tone_ts, linestyle='-', color='r', linewidth='1') fig = gf.get_fig() else: fig, ax = plt.subplots(figsize=(20, 5)) nc_plot.lfp_spectrum_tr(graph_data, ax) plt.ylim(0, filt_top) fig.suptitle("T" + key + " " + regions[i + p * 16] + " Spectrogram") out_name = os.path.join(o_dir, "ptr", key + "ptr.png") make_path_if_not_exists(out_name) print("Saving result to {}".format(out_name)) fig.savefig(out_name) plt.close() # plot_lfp(o_dir, lfp_odict.get_filt_signal(), segment_length=60) # Plot raw LFP for all tetrodes in segments if analysis_flags[1]: # Complie graphs per session in a single .png # Plot all periodograms on 1 plot fig, ax = plt.subplots(figsize=(20, 20)) legend = [] for p, lfp_odict in enumerate(lfp_list): for i, (key, lfp) in enumerate(lfp_odict.get_filt_signal().items()): graph_data = lfp.spectrum(ptype='psd', prefilt=False, db=False, tr=False) color = gm.get_next_color() nc_plot.lfp_spectrum(graph_data, ax, color) legend.append(regions[i + p * 16] + " T" + key) plt.ylim(0, 0.015) plt.xlim(0, filt_top) plt.legend(legend) plt.title(fname.split("\\")[-1][4:] + " Compiled Periodogram", fontsize=25) out_name = os.path.join(o_dir, fname.split("\\")[-1] + "_p.png") make_path_if_not_exists(out_name) fig.savefig(out_name) plt.close() for p, lfp_odict in enumerate(lfp_list): # Plot spectrograms in set of 16s rows, cols = [4, 4] gf = plot_org.GridFig(rows, cols, wspace=0.5, hspace=0.5) for i, (key, lfp) in enumerate(lfp_odict.get_filt_signal().items()): graph_data = lfp.spectrum(ptype='psd', prefilt=True, db=True, tr=True) ax = gf.get_next(along_rows=False) nc_plot.lfp_spectrum_tr(graph_data, ax) plt.ylim(0, 40) # plt.xlim(0, 40) color = gm.get_next_color() ax.text(0.49, 1.08, regions[i + p * 16], fontsize=20, color=color, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) if p: gf.fig.suptitle( (fname.split("\\")[-1][4:] + " Spectrogram " + str(p)), fontsize=30) out_name = os.path.join( o_dir, "Sum_ptr", fname.split("\\")[-1] + "_ptr_sum_" + str(p) + ".png") else: gf.fig.suptitle((fname.split("\\")[-1][4:] + " Spectrogram"), fontsize=30) out_name = os.path.join(o_dir, "Sum_ptr", fname.split("\\")[-1] + "_ptr_sum.png") make_path_if_not_exists(out_name) print("Saving result to {}".format(out_name)) gf.fig.savefig(out_name) plt.close() # Complie graphs per session in a single .png aligned to particular points if analysis_flags[3]: for p, lfp_odict in enumerate(lfp_list): # Setup summary grid rows, cols = [4, 4] gf = plot_org.GridFig(rows, cols, wspace=0.3, hspace=0.3, tight_layout=False) # Plot summary periodogram for i, (key, lfp) in enumerate(lfp_odict.get_signal().items()): graph_data = lfp.spectrum(ptype='psd', prefilt=False, db=False, tr=False, filtset=[10, 1.0, 40, 'bandpass']) ax = gf.get_next(along_rows=False) nc_plot.lfp_spectrum(graph_data, ax) plt.ylim(0, 0.015) plt.xlim(0, 40) if i % 4 == 0: ax.text(0.49, 1.08, regions[i + p * 16], fontsize=20, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) gf.fig.suptitle((fname.split("\\")[-1][4:] + " Periodogram"), fontsize=30) if p: out_name = os.path.join( o_dir, "Sum_p", fname.split("\\")[-1] + "_p_sum_" + str(p) + ".png") else: out_name = os.path.join(o_dir, "Sum_p", fname.split("\\")[-1] + "_p_sum.png") make_path_if_not_exists(out_name) print("Saving result to {}".format(out_name)) gf.fig.savefig(out_name) plt.close() # Plot summary periodogram tr gf = plot_org.GridFig(rows, cols, wspace=0.5, hspace=0.5) for i, (key, lfp) in enumerate(lfp_odict.get_signal().items()): graph_data = lfp.spectrum(ptype='psd', prefilt=True, db=True, tr=True, filtset=[10, 1.0, 40, 'bandpass']) ax = gf.get_next(along_rows=False) nc_plot.lfp_spectrum_tr(graph_data, ax) plt.ylim(0, 40) # plt.xlim(0, 40) if i % 4 == 0: ax.text(0.49, 1.08, regions[i + p * 16], fontsize=20, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) gf.fig.suptitle( (fname.split("\\")[-1][4:] + " Time Resolved Periodogram"), fontsize=30) if p: out_name = os.path.join( o_dir, "Sum_ptr", fname.split("\\")[-1] + "_ptr" + str(p) + "_sum.png") else: out_name = os.path.join(o_dir, "Sum_ptr", fname.split("\\")[-1] + "_ptr_sum.png") make_path_if_not_exists(out_name) print("Saving result to {}".format(out_name)) gf.fig.savefig(out_name) plt.close()
def main(fname): chans = [i for i in range(1, 17)] lfp_odict = LfpODict( fname, channels=chans, filt_params=(True, 1.5, 90)) o_dir = os.path.join( os.path.dirname(fname), "LFP") make_dir_if_not_exists(o_dir) # Plot periodogram for each eeg for i, (key, lfp) in enumerate(lfp_odict.get_signal().items()): graph_data = lfp.spectrum( ptype='psd', prefilt=False, db=False, tr=False, filtset=[10, 1.0, 40, 'bandpass']) fig = nc_plot.lfp_spectrum(graph_data) plt.ylim(0, 0.01) plt.xlim(0, 40) out_name = os.path.join(o_dir, "p", key + "p.png") make_path_if_not_exists(out_name) fig.savefig(out_name) plt.close() graph_data = lfp.spectrum( ptype='psd', prefilt=False, db=True, tr=True, filtset=[10, 1.0, 40, 'bandpass']) fig = nc_plot.lfp_spectrum_tr(graph_data) # plt.ylim(0, 0.01) # plt.xlim(0, 40) out_name = os.path.join(o_dir, "ptr", key + "ptr.png") make_path_if_not_exists(out_name) print("Saving result to {}".format(out_name)) fig.savefig(out_name) plt.close() plot_lfp(o_dir, lfp_odict.get_filt_signal(), segment_length=60) # Summary plots # Region info for eeg cla_idx = list(range(1, 9)) acc_idx = list(range(9, 13)) rsc_idx = list(range(13, 17)) names = ["CLA"] * 8 + ["ACC"] * 4 + ["RSC"] * 4 # Setup summary grid rows, cols = [4, 4] gf = plot_org.GridFig(rows, cols, wspace=0.5, hspace=0.5) # Plot summary periodogram for i, (key, lfp) in enumerate(lfp_odict.get_signal().items()): graph_data = lfp.spectrum( ptype='psd', prefilt=False, db=False, tr=False, filtset=[10, 1.0, 40, 'bandpass']) ax = gf.get_next(along_rows=False) nc_plot.lfp_spectrum(graph_data, ax) plt.ylim(0, 0.01) plt.xlim(0, 40) if i%4 == 0: ax.text(0.49, 1.08, names[i], fontsize=20, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) out_name = os.path.join(o_dir, "Sum", fname.split("\\")[-1] + "_p_sum.png") make_path_if_not_exists(out_name) print("Saving result to {}".format(out_name)) gf.fig.savefig(out_name) plt.close() # Plot summary periodogram tr gf = plot_org.GridFig(rows, cols, wspace=0.5, hspace=0.5) for i, (key, lfp) in enumerate(lfp_odict.get_signal().items()): graph_data = lfp.spectrum( ptype='psd', prefilt=True, db=True, tr=True, filtset=[10, 1.0, 40, 'bandpass']) ax = gf.get_next(along_rows=False) nc_plot.lfp_spectrum_tr(graph_data, ax) plt.ylim(0, 40) # plt.xlim(0, 40) if i%4 == 0: ax.text(0.49, 1.08, names[i], fontsize=20, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) out_name = os.path.join(o_dir, "Sum", fname.split("\\")[-1] + "_ptr_sum.png") make_path_if_not_exists(out_name) print("Saving result to {}".format(out_name)) gf.fig.savefig(out_name) plt.close()