def __init__(self, exp_counter, low_freq=0.1, hi_freq=3, pick_channels=['Cz'], signal_tmin=-3, signal_tmax=5, noise_tmin=3, noise_tmax=11, generate_report=False): self.exp_counter = exp_counter self.pick_channels = pick_channels self.data_loader = DataLoader(exp_counter=self.exp_counter) self.data_loader.init_task_dependent_variables() self.data_loader.load_data() self.exp_name = self.data_loader.exp_name self.channel_dict = self.data_loader.channel_dict self.fs = self.data_loader.fs self.low_freq = low_freq self.hi_freq = hi_freq self.signal_tmin = signal_tmin self.signal_tmax = signal_tmax self.noise_tmin = noise_tmin self.noise_tmax = noise_tmax self.report = mne.Report(verbose=True) self.generate_report = generate_report
def report_erps(self, evoked: mne.Evoked, erp_name: str): # set report and condition name name_info = erp_name.split('_') report_name = name_info[-1] report_name = self.folder_tracker(['erp', self.header], f'report_{report_name}.h5') cnd_name = '_'.join(map(str, name_info[:-1])) # check whether report exists if os.path.isfile(report_name): with mne.open_report(report_name) as report: # if section exists delete it first report.remove(title=cnd_name) report.add_evokeds(evokeds=evoked, titles=cnd_name, n_time_points=21) report.save(report_name.rsplit(".", 1)[0] + '.html', overwrite=True) else: report = mne.Report(title='Single subject evoked overview') report.add_evokeds(evokeds=evoked, titles=cnd_name, n_time_points=21) report.save(report_name) report.save(report_name.rsplit(".", 1)[0] + '.html')
def open(self,**kwargs): """ :param kwargs: :return: """ self._update_from_kwargs(**kwargs) self._isOpen = False # logger.info("report path (stage): {}".format(self.fullname)) if self.overwrite: try: for fext in [ self.h5_extention,self.html_extention ]: fname = self.fullname+fext if os.path.isfile(fname): os.remove(self.fname) except: logger.exception("ERROR: can not overwrite report ile: {}".format(self.fullname)) return False # https://mne.tools/dev/auto_tutorials/misc/plot_report.html try: if os.path.isfile(self.hdf_name): self._MNE_REPORT = mne.open_report(self.hdf_name) else: self._MNE_REPORT = mne.Report(info_fname=self.info_name,title=self.title,image_format=self.image_format, raw_psd=self.raw_psd,verbose=self.verbose) logger.info("open Report (h5): \n -> {}".format(self._MNE_REPORT)) self._isOpen = True except: logger.exception("ERROR: can not open or create MNE Report {}".format(self.hdf_name)) return self._isOpen
def _generate_report(raw, report_fname, quit_on_error): from .._mnefun import _set_static from .._sss import _maxbad, _load_meg_bads from .._report import (report_context, _report_good_hpi, _report_chpi_snr, _report_head_movement, _report_raw_segments, _report_events, _report_raw_psd) report = mne.Report(verbose=False) raw.load_data() with report_context(): import matplotlib.pyplot as plt p = mne.utils.Bunch(mf_badlimit=7, mf_autobad_type='python', hp_type='python', tmpdir=mne.utils._TempDir(), coil_dist_limit=0.01, coil_t_window='auto', coil_gof_limit=0.95, coil_t_step_min=0.01, lp_trans=10, lp_cut=40, movecomp=True, coil_bad_count_duration_limit=np.inf, sss_origin='auto') maxbad_file = op.join(p.tmpdir, 'maxbad.txt') _set_static(p) _maxbad(p, raw, maxbad_file) # Maxbads _load_meg_bads(raw, maxbad_file, disp=False) section = 'MF Autobad' htmls = _HTML_TEMPLATE.format( title='%d bad channel%s detected' % (len(raw.info['bads']), mne.utils._pl(raw.info['bads'])), text=', '.join(raw.info['bads'], )) report.add_htmls_to_section(htmls, section, section) # HPI count, SNR, head position funcs = ( [_report_good_hpi, 'Good HPI count'], [_report_chpi_snr, 'cHPI SNR'], [_report_head_movement, 'Head movement'], [_report_events, 'Events'], ) if raw.info['dev_head_t'] is None: # don't even try the first three funcs = funcs[3:] for func, section in funcs: try: func(report, [raw], p=p) except Exception as exp: if quit_on_error: raise htmls = _HTML_TEMPLATE.format(title='Error', text=str(exp)) report.add_htmls_to_section(htmls, section, section) # Raw segments (ignoring warnings about dev_head_t) with mne.utils.use_log_level('error'): _report_raw_segments(report, raw, lowpass=p.lp_cut) # Raw PSD _report_raw_psd(report, raw, p=p) os.makedirs(op.dirname(report_fname), exist_ok=True) report.save(report_fname, open_browser=False) plt.close('all')
def _gen_empty_report(*, cfg: SimpleNamespace, subject: str, session: Optional[str]) -> mne.Report: title = f'sub-{subject}' if session is not None: title += f', ses-{session}' if cfg.task is not None: title += f', task-{cfg.task}' report = mne.Report(title=title, raw_psd=True) return report
def run_report(subject): print("Processing %s" % subject) meg_subject_dir = op.join(config.meg_dir, subject) extension = '-ave' fname_ave = op.join(meg_subject_dir, config.base_fname.format(**locals())) fname_trans = op.join(meg_subject_dir, config.base_fname_trans.format(**locals())) subjects_dir = config.subjects_dir if not op.exists(fname_trans): subject = None subjects_dir = None rep = mne.Report(info_fname=fname_ave, subject=subject, subjects_dir=subjects_dir) rep.parse_folder(meg_subject_dir) evokeds = mne.read_evokeds(fname_ave) figs = list() captions = list() for evoked in evokeds: fig = evoked.plot(spatial_colors=True, show=False, gfp=True) figs.append(fig) captions.append(evoked.comment) if op.exists(fname_trans): mne.viz.plot_alignment(evoked.info, fname_trans, subject=subject, subjects_dir=config.subjects_dir, meg=True, dig=True, eeg=True) fig = mlab.gcf() figs.append(fig) captions.append('Coregistration') rep.add_figs_to_section(figs, captions) for evoked in evokeds: fname = op.join(meg_subject_dir, 'mne_dSPM_inverse-%s' % evoked.comment) stc = mne.read_source_estimate(fname, subject) brain = stc.plot(views=['ven'], hemi='both') brain.set_data_time_index(112) fig = mlab.gcf() rep._add_figs_to_section(fig, evoked.condition) rep.save(fname=op.join(meg_subject_dir, 'report_%s.html' % subject), open_browser=False, overwrite=True)
def run_report(subject): print("processing %s" % subject) meg_subject_dir = op.join(config.meg_dir, subject) ave_fname = op.join(meg_subject_dir, "%s-ave.fif" % subject) rep = mne.Report(info_fname=ave_fname, subject=subject, subjects_dir=config.subjects_dir) rep.parse_folder(meg_subject_dir) evokeds = mne.read_evokeds(ave_fname) figs = list() captions = list() for evoked in evokeds: fig = evoked.plot(spatial_colors=True, show=False, gfp=True) figs.append(fig) captions.append(evoked.condition) fname_trans = op.join(config.meg_subject_dir, '%s-trans.fif' % subject) mne.viz.plot_trans(evoked.info, fname_trans, subject=subject, subjects_dir=config.subjects_dir, meg_sensors=True, eeg_sensors=True) fig = mlab.gcf() figs.append(fig) captions.append('Coregistration') # rep.add_figs_to_section(figs, captions) # for evoked in evokeds: # fname = op.join(meg_path, 'mne_dSPM_inverse-%s' # % evoked.condition) # stc = mne.read_source_estimate(fname, subject) # brain = stc.plot(views=['ven'], hemi='both') # brain.set_data_time_index(112) # fig = mlab.gcf() # rep._add_figs_to_section(fig, cond) rep.save(fname=op.join(config.meg_subject_dir, 'report_%s.html' % subject), open_browser=False, overwrite=True)
def main(): """Make reports.""" parallel, run_func, _ = parallel_func(run_report, n_jobs=config.N_JOBS) parallel( run_func(subject, session) for subject, session in itertools.product( config.subjects_list, config.sessions)) # Group report evoked_fname = op.join(config.bids_root, 'derivatives', config.PIPELINE_NAME, '%s_grand_average-ave.fif' % config.study_name) rep = mne.Report(info_fname=evoked_fname, subject='fsaverage', subjects_dir=config.subjects_dir) evokeds = mne.read_evokeds(evoked_fname) for evoked, condition in zip(evokeds, config.conditions): rep.add_figs_to_section( evoked.plot(spatial_colors=True, gfp=True, show=False), 'Average %s' % condition) stc_fname = op.join(config.bids_root, 'derivatives', config.PIPELINE_NAME, 'average_dSPM-%s' % condition) if op.exists(stc_fname + "-lh.stc"): stc = mne.read_source_estimate(stc_fname, subject='fsaverage') brain = stc.plot(views=['lat'], hemi='both', subject='fsaverage', subjects_dir=config.subjects_dir) brain.set_data_time_index(165) fig = mlab.gcf() rep.add_figs_to_section(fig, 'Average %s' % condition) rep.save(fname=op.join(config.bids_root, 'derivatives', config.PIPELINE_NAME, 'report_average.html'), open_browser=False, overwrite=True)
def run_report(subject, session=None): bids_path = BIDSPath(subject=subject, session=session, task=config.get_task(), acquisition=config.acq, run=None, recording=config.rec, space=config.space, extension='.fif', datatype=config.get_datatype(), root=config.deriv_root, check=False) fname_ave = bids_path.copy().update(suffix='ave') fname_trans = bids_path.copy().update(suffix='trans') fname_epo = bids_path.copy().update(suffix='epo') fname_trans = bids_path.copy().update(suffix='trans') fname_ica = bids_path.copy().update(suffix='ica') fname_decoding = fname_epo.copy().update(suffix='decoding', extension='.mat') fs_subject = config.get_fs_subject(subject) fs_subjects_dir = config.get_fs_subjects_dir() params: Dict[str, Any] = dict(info_fname=fname_ave, raw_psd=True) if op.exists(fname_trans): params['subject'] = fs_subject params['subjects_dir'] = fs_subjects_dir rep = mne.Report(**params) rep_kwargs: Dict[str, Any] = dict(data_path=fname_ave.fpath.parent, verbose=False) if not op.exists(fname_trans): rep_kwargs['render_bem'] = False task = config.get_task() if task is not None: rep_kwargs['pattern'] = f'*_task-{task}*' if mne.viz.get_3d_backend() is not None: with mne.viz.use_3d_backend('pyvista'): rep.parse_folder(**rep_kwargs) else: rep.parse_folder(**rep_kwargs) # Visualize automated noisy channel detection. if config.find_noisy_channels_meg: figs, captions = plot_auto_scores(subject=subject, session=session) rep.add_figs_to_section(figs=figs, captions=captions, section='Data Quality') # Visualize events. events_fig = plot_events(subject=subject, session=session) rep.add_figs_to_section(figs=events_fig, captions='Events in filtered continuous data', section='Events') ########################################################################### # # Visualize effect of ICA artifact rejection. # if config.use_ica: epochs = mne.read_epochs(fname_epo) ica = mne.preprocessing.read_ica(fname_ica) fig = ica.plot_overlay(epochs.average(), show=False) rep.add_figs_to_section(fig, captions='Evoked response (across all epochs) ' 'before and after ICA', section='ICA') ########################################################################### # # Visualize evoked responses. # conditions: List[Condition_T] = list(config.conditions) conditions.extend(config.contrasts) evokeds = mne.read_evokeds(fname_ave) if config.analyze_channels: for evoked in evokeds: evoked.pick(config.analyze_channels) for condition, evoked in zip(conditions, evokeds): if condition in config.conditions: caption = f'Condition: {condition}' section = 'Evoked' else: # It's a contrast of two conditions. caption = f'Contrast: {condition[0]} – {condition[1]}' section = 'Contrast' fig = evoked.plot(spatial_colors=True, gfp=True, show=False) rep.add_figs_to_section(figs=fig, captions=caption, comments=evoked.comment, section=section) ########################################################################### # # Visualize decoding results. # if config.decode: epochs = mne.read_epochs(fname_epo) for contrast in config.contrasts: cond_1, cond_2 = contrast a_vs_b = f'{cond_1}-{cond_2}'.replace(op.sep, '') processing = f'{a_vs_b}+{config.decoding_metric}' processing = processing.replace('_', '-').replace('-', '') fname_decoding_ = (fname_decoding.copy().update( processing=processing)) decoding_data = loadmat(fname_decoding_) del fname_decoding_, processing, a_vs_b fig = plot_decoding_scores( times=epochs.times, cross_val_scores=decoding_data['scores'], metric=config.decoding_metric) caption = f'Time-by-time Decoding: {cond_1} ./. {cond_2}' comment = (f'{len(epochs[cond_1])} × {cond_1} ./. ' f'{len(epochs[cond_2])} × {cond_2}') rep.add_figs_to_section(figs=fig, captions=caption, comments=comment, section='Decoding') del decoding_data, cond_1, cond_2, caption, comment del epochs ########################################################################### # # Visualize the coregistration & inverse solutions. # evokeds = mne.read_evokeds(fname_ave) if op.exists(fname_trans): # We can only plot the coregistration if we have a valid 3d backend. if mne.viz.get_3d_backend() is not None: fig = mne.viz.plot_alignment(evoked.info, fname_trans, subject=fs_subject, subjects_dir=fs_subjects_dir, meg=True, dig=True, eeg=True) rep.add_figs_to_section(figs=fig, captions='Coregistration', section='Coregistration') else: msg = ('Cannot render sensor alignment (coregistration) because ' 'no usable 3d backend was found.') logger.warning( gen_log_message(message=msg, step=99, subject=subject, session=session)) for condition, evoked in zip(conditions, evokeds): msg = f'Rendering inverse solution for {evoked.comment} …' logger.info( gen_log_message(message=msg, step=99, subject=subject, session=session)) if condition in config.conditions: full_condition = config.sanitize_cond_name(evoked.comment) caption = f'Condition: {full_condition}' del full_condition else: # It's a contrast of two conditions. # XXX Will change once we process contrasts here too continue method = config.inverse_method cond_str = config.sanitize_cond_name(condition) inverse_str = method hemi_str = 'hemi' # MNE will auto-append '-lh' and '-rh'. fname_stc = bids_path.copy().update( suffix=f'{cond_str}+{inverse_str}+{hemi_str}', extension=None) if op.exists(str(fname_stc) + "-lh.stc"): stc = mne.read_source_estimate(fname_stc, subject=fs_subject) _, peak_time = stc.get_peak() # Plot using 3d backend if available, and use Matplotlib # otherwise. import matplotlib.pyplot as plt if mne.viz.get_3d_backend() is not None: brain = stc.plot(views=['lat'], hemi='split', initial_time=peak_time, backend='pyvista', time_viewer=True, subjects_dir=fs_subjects_dir) brain.toggle_interface() brain._renderer.plotter.reset_camera() brain._renderer.plotter.subplot(0, 0) brain._renderer.plotter.reset_camera() figs, ax = plt.subplots(figsize=(15, 10)) ax.imshow(brain.screenshot(time_viewer=True)) ax.axis('off') comments = evoked.comment captions = caption else: fig_lh = plt.figure() fig_rh = plt.figure() brain_lh = stc.plot(views='lat', hemi='lh', initial_time=peak_time, backend='matplotlib', subjects_dir=fs_subjects_dir, figure=fig_lh) brain_rh = stc.plot(views='lat', hemi='rh', initial_time=peak_time, subjects_dir=fs_subjects_dir, backend='matplotlib', figure=fig_rh) figs = [brain_lh, brain_rh] comments = [ f'{evoked.comment} - left hemisphere', f'{evoked.comment} - right hemisphere' ] captions = [f'{caption} - left', f'{caption} - right'] rep.add_figs_to_section(figs=figs, captions=captions, comments=comments, section='Sources') del peak_time if config.process_er: fig_er_psd = plot_er_psd(subject=subject, session=session) rep.add_figs_to_section(figs=fig_er_psd, captions='Empty-Room Power Spectral Density ' '(after filtering)', section='Empty-Room') fname_report = bids_path.copy().update(suffix='report', extension='.html') rep.save(fname=fname_report, open_browser=False, overwrite=True) import matplotlib.pyplot as plt # nested import to help joblib plt.close('all') # close all figures to save memory
# ^^^^^^^^^^^^^^^^^^^^^^^^^ # # Raw data can be added via the :meth:`mne.Report.add_raw` method. It can # operate with a path to a raw file and `~mne.io.Raw` objects, and will # produce – among other output – a slider that allows you to scrub through 10 # equally-spaced 1-second segments of the data: # # .. warning:: # In the following example, we crop the raw data to 60 seconds merely to # speed up processing; this is not usually recommended! raw_path = sample_dir / 'sample_audvis_filt-0-40_raw.fif' raw = mne.io.read_raw(raw_path) raw.pick_types(eeg=True, eog=True, stim=True).crop(tmax=60).load_data() report = mne.Report(title='Raw example') # This method also accepts a path, e.g., raw=raw_path report.add_raw(raw=raw, title='Raw', psd=False) # omit PSD plot report.save('report_raw.html', overwrite=True) # %% # Adding events # ^^^^^^^^^^^^^ # # Events can be added via :meth:`mne.Report.add_events`. You also need to # supply the sampling frequency used during the recording; this information # is used to generate a meaningful time axis. events_path = sample_dir / 'sample_audvis_filt-0-40_raw-eve.fif' events = mne.find_events(raw=raw) sfreq = raw.info['sfreq']
def create_report(self): if self.subject == None: raise Exception fif = opj(self.spikes, self.subject + "-epo.fif") fif = mne.read_epochs(fif) now = str(datetime.now()) aquisition_date = self._get_aquisition_date(fif) # logger logfile = opj(self.freport, "Reporter.log") logging.basicConfig(filename=logfile, filemode="w", format="\n%(levelname)s --> %(message)s") rootlog = logging.getLogger() rootlog.setLevel(logging.INFO) rootlog.info( f"Now creating report for: {self.subject.split('sub-')[-1]} \n \ MEG aquisition date --> {aquisition_date}\n \ Report created --> {now}") try: title = (self.subject + ' _MEG_vom_' + aquisition_date + '_Befund-' + now) h5title = (self.subject + ' _MEG_vom_' + aquisition_date + '_Befund') except NameError as ne: rootlog.warning(f"Title setting with aquisition date failed: {ne}") title = (self.subject + ' _MEG_Befund-' + now) h5title = (self.subject + ' _MEG_Befund-' + now) report = mne.Report(subject=self.subject, subjects_dir=self.fanat, title=title, verbose=True) # Add title image try: cover_file = opj(self.extras_dir, "MEG_title.png") cover_title = self.subject + " MEG Befund" report.add_images_to_section(cover_file, section=cover_title, captions=cover_title) except FileNotFoundError as fnfe: rootlog.warning(f"MEG title page not found: {fnfe}") # Event selection --> omit events by renaming the folder/ adding a . in front desired_events = glob.glob(opj(self.spikes, "*")) rootlog.info( f"The following event-folders were found:\n{desired_events}") # Add topomaps epo_filename = opj(self.spikes, str(self.subject) + "-epo.fif") concat_epochs = mne.read_epochs(epo_filename) noise_cov_file = opj(self.spikes, "Spikes_noise_covariance.pkl") times = linspace(-0.02, 0.01, 6) with open(noise_cov_file, 'rb') as f: noise_cov = load(f) for e in desired_events: event = os.path.basename(e) matplotlib.rcParams["figure.facecolor"] = "white" if self._is_desired_event(event): viz_eve = concat_epochs[event].average().crop(-0.15, 0.1) fig = viz_eve.plot_joint(times=times, show=False) title = str(event + " - Topomap") report.add_figure(fig, title=title) # add stcs modalities = ["eLORETA"] # later also: "dSPM"? rootlog.info( f"For event \"{event}\" the following stc-modalities were included: {modalities}." ) for modality in modalities: try: stc_file = self._return_stc(event=e, modality=modality) title = str(event + " - " + modality) report.add_stc(stc=stc_file, title=title, subject=self.subject, subjects_dir=self.fanat, n_time_points=100) except Exception as ex: rootlog.warning( f"Couldn't include {modality} - stc to report because of: {ex}" ) # add ECD pics rootlog.info(f"Now generating ECD-plot for {event}...") generic_pics_folder = os.path.join(self.spikes, event, "generic_pics") drei = sorted( glob.glob(generic_pics_folder + "/img_3d_ecd*.png")) T1 = sorted(glob.glob(generic_pics_folder + "/img_ecd_*.png")) matplotlib.rcParams["figure.facecolor"] = "black" if drei != [] and T1 != []: ECD_fig = self._plot_ECD_table(T1=T1, drei=drei, event=event) caption = str(event + " - ECD") report.add_figure(ECD_fig, title=caption, caption=caption) # add custom pics and custom time series custom_pics_folder = os.path.join(self.spikes, event, "custom_pics") custom_pics = glob.glob(custom_pics_folder + "/*.png") custom_ts_folder = os.path.join(self.spikes, e, "custom_time_series") custom_ts = glob.glob(custom_ts_folder + "/*.png") if custom_pics is not []: rootlog.info(f"Now adding custom pics for {event}...") for cst in custom_pics: cst_title = cst.split('/')[-1] cst_title = cst_title.split('.')[0] caption = event + ' - ' + cst_title report.add_image(cst, title=cst_title, caption=caption) if custom_ts is not []: rootlog.info( f"Now adding custom time series for {event}...") for _ in custom_ts: caption = event + ' - Time course' fig = plt.figure(figsize=(30, 30), dpi=150, facecolor="k") fig = self._plot_time_course(event=e) plt.tight_layout() report.add_figure(fig, title=caption, caption=caption) break # add frequency distribution #freq_file = opj(self.freq, self.subject + "_Freqs-stc-psd-MNE.pkl") # --> would be nice, but doesn't work, freq = time-index #with open(freq_file, "rb") as f: # stc_freqs = load(f) #title = str(self.subject.split("sub-")[-1] + " - Frequency distribution") #report.add_stc(stc=stc_freqs, title=title, # tags=("Frequency distribution"), # subject=self.subject, subjects_dir=self.fanat) freq_bands = ["delta", "theta", "alpha", "beta", "gamma" ] #the frequency bands of interest for the analysis rootlog.info(f"Now adding frequency distribution...") for freq in freq_bands: try: fig = self._plot_frequencies(freq) title = str( self.subject.split("sub-")[-1] + " - Frequency distribution - " + freq) report.add_figure(fig, title=title) except Exception as ex: rootlog.warning( f"Something went wrong trying to add freqs: {ex}") # BEM try: rootlog.info(f"Now adding BEM.") report.add_bem_to_section(self.subject, decim=4, subjects_dir=self.fanat, section='BEM') except ValueError as ve: rootlog.info( "Could not add BEM to report, maybe a spherical model was used? Error was: {ve}" ) # Add disclaimer image try: rootlog.info(f"Now adding Disclaimer.") disclaimer_file = opj(self.extras_dir, 'MEG_disclaimer.png') report.add_images_to_section(disclaimer_file, section='disclaimer', captions='End notes') except FileNotFoundError as fnfe: rootlog.warning(f"Disclaimer file not found - {e}") # Save all try: rootlog.info(f"Saving...") title = (self.subject + " _MEG_Befund.html") save_name_html = os.path.join(self.freport, title) save_name_h5 = os.path.join(self.freport, (h5title + '.h5')) report.save(save_name_html, overwrite=True) #report.save(save_name_h5) rootlog.info(f"Saving complete.") except Exception as ex: rootlog.warning(f"Saving failed because of: {ex}")
def create_ica_report(ica, epochs, filename, ncomponents=None): try: import nice_ext layout, outlines = nice_ext.equipments.prepare_layout( epochs.info['description'], epochs.info) except ImportError: layout = mne.channels.make_eeg_layout(epochs.info) outlines = 'head' topomap_args = {'outlines': outlines, 'layout': layout} json_fname = filename.replace('.fif', '.json') if ncomponents is None or ncomponents == -1: ncomponents = ica.n_components_ else: ncomponents = min(ncomponents, ica.n_components_) report = mne.Report(title='ICA Components') style = u""" <style type="text/css"> div.ica_menu, form.ica_select { text-align: center; } div#ica_selected { font-size: 20px; margin-bottom: 5px; } form.ica_select input { margin-left: 10px; margin-right: 10px; } </style> <script type="text/javascript"> $('document').ready(function(){ $('.report_Details').each( function() { $(this).css('width', '50%').css('float', 'left').css('height', '800px'); } ) }); function ica_summarize() { var to_reject = $('input[value="reject"]:checked').map( function(o, i) {return parseInt(i.name.replace('ica_', ''));}); var to_render = '<h3> Components to reject</h3><br />' for (var ica_comp = 0; ica_comp < to_reject.length; ica_comp ++) { if (ica_comp > 0) { to_render += ' - '; } to_render += to_reject[ica_comp]; } $('#ica_selected').html(to_render); } function ica_json() { var to_reject = $('input[value="reject"]:checked').map( function(o, i) {return parseInt(i.name.replace('ica_', ''));}); var txtFile = $('#json_fname').val(); var data = {reject: to_reject.get()} var str = JSON.stringify(data); download(str, txtFile, 'text/plain'); } function download(strData, strFileName, strMimeType) { var D = document, A = arguments, a = D.createElement("a"), d = A[0], n = A[1], t = A[2] || "text/plain"; //build download link: a.href = "data:" + strMimeType + "charset=utf-8," + escape(strData); if (window.MSBlobBuilder) { // IE10 var bb = new MSBlobBuilder(); bb.append(strData); return navigator.msSaveBlob(bb, strFileName); } /* end if(window.MSBlobBuilder) */ if ('download' in a) { //FF20, CH19 a.setAttribute("download", n); a.innerHTML = "downloading..."; D.body.appendChild(a); setTimeout(function() { var e = D.createEvent("MouseEvents"); e.initMouseEvent("click", true, false, window, 0, 0, 0, 0, 0, false, false, false, false, 0, null); a.dispatchEvent(e); D.body.removeChild(a); }, 66); return true; }; /* end if('download' in a) */ //do iframe dataURL download: (older W3) var f = D.createElement("iframe"); D.body.appendChild(f); f.src = "data:" + (A[2] ? A[2] : "application/octet-stream") + (window.btoa ? ";base64" : "") + "," + (window.btoa ? window.btoa : escape)(strData); setTimeout(function() { D.body.removeChild(f); }, 333); return true; } </script>""" report.include += style fig_comps = ica.plot_components(inst=epochs, outlines=outlines, layout=layout, picks=range(ncomponents)) overall_comment = u""" <div class="ica_menu"> <input id="ica_check" type="button" value="Summarize" onclick="ica_summarize();" /> <div id="ica_selected"></div> <input id="ica_save" type="button" value="Save to JSON" onclick="ica_json();" /> <input type="hidden" id="json_fname" value="{0}"> </div>""" report.add_figs_to_section(figs=[fig_comps], captions=['Topographies'], section='Overall', comments=[overall_comment.format(json_fname)]) plt.close(fig_comps) figs_props = ica.plot_properties(epochs, picks=range(ncomponents), topomap_args=topomap_args) figs_ts = [] sources = ica.get_sources(epochs).get_data() n_sources = sources.shape[0] n_random = 5 n_epochs = 5 for i_comp in range(ncomponents): logger.info('Plotting component {} of {}'.format( i_comp + 1, ncomponents)) idx = np.random.randint(n_sources - n_epochs, size=n_random) fig, axes = plt.subplots(n_random, 1, figsize=(7, 4)) for i, ax in zip(idx, axes): data = sources[i:i + n_epochs, i_comp, :] ax.plot(np.hstack(data), lw=0.5, color='k') [ ax.axvline(data.shape[1] * x, ls='--', lw=0.2, color='k') for x in range(n_epochs) ] figs_ts.append(fig) eptype = list(epochs.event_id.keys())[0].split('/')[-1] props_captions = ['{} - {}'.format(eptype, x) for x in range(ncomponents)] captions = [ elt for sublist in zip(props_captions, props_captions) for elt in sublist ] ts_comments = [''] * len(figs_ts) prop_comment = u""" <form action="" class="ica_select"> <input type="radio" name="ica_{0}" value="accept" checked>Accept <input type="radio" name="ica_{0}" value="reject">Reject </form>""" comments = [prop_comment.format(x) for x in range(ncomponents)] comments = [ elt for sublist in zip(comments, ts_comments) for elt in sublist ] figs = [elt for sublist in zip(figs_props, figs_ts) for elt in sublist] report.add_figs_to_section(figs=figs, captions=captions, comments=comments, section='Details') [plt.close(x) for x in figs_props] return report
def _generate_report(raw_before_preprocessing, raw_after_preprocessing, auto_scores, auto_noisy_chs, auto_flat_chs, data_file_before, report_cross_talk_file, report_calibration_file, report_head_pos_file, param_h_freq, param_origin, param_return_scores, param_limit, param_duration, param_min_count, param_int_order, param_ext_order, param_coord_frame, param_regularize, param_ignore_ref, param_bad_condition, param_skip_by_annotation, param_mag_scale, param_extended_proj): # Generate a report # Create instance of mne.Report # report = mne.Report(title='Results identification of bad channels', verbose=True) ## Give some info about the file before preprocessing ## bad_channels = raw_before_preprocessing.info['bads'] sampling_frequency = raw_before_preprocessing.info['sfreq'] highpass = raw_before_preprocessing.info['highpass'] lowpass = raw_before_preprocessing.info['lowpass'] # Put this info in html format # # Info on data html_text_info = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>Input file: {data_file_before}</td> </tr> <tr> <td>Bad channels before automated detection: {bad_channels}</td> </tr> <tr> <td>Sampling frequency: {sampling_frequency}Hz</td> </tr> <tr> <td>Highpass: {highpass}Hz</td> </tr> <tr> <td>Lowpass: {lowpass}Hz</td> </tr> </table> </body> </html>""" # Add html to reports report.add_htmls_to_section(html_text_info, captions='MEG recording features', section='Data info', replace=False) ## Plot diagnostic figures ## # Scores for automated noisy channels detection # # Only select the data for gradiometer channels ch_type = 'grad' ch_subset = auto_scores['ch_types'] == ch_type ch_names = auto_scores['ch_names'][ch_subset] scores = auto_scores['scores_noisy'][ch_subset] limits = auto_scores['limits_noisy'][ch_subset] bins = auto_scores['bins'] # The windows that were evaluated # We will label each segment by its start and stop time, with up to 3 # digits before and 3 digits after the decimal place (1 ms precision). bin_labels = [f'{start:3.3f} – {stop:3.3f}' for start, stop in bins] # Store the data in a Pandas DataFrame. The seaborn heatmap function # we will call below will then be able to automatically assign the correct # labels to all axes. data_to_plot = pd.DataFrame(data=scores, columns=pd.Index(bin_labels, name='Time (s)'), index=pd.Index(ch_names, name='Channel')) # Plot the "raw" scores fig_noisy, ax = plt.subplots(1, 2, figsize=(18, 16)) sns.heatmap(data=data_to_plot, cmap='Reds', cbar_kws=dict(label='Score'), ax=ax[0]) [ax[0].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray') for x in range(1, len(bins))] ax[0].set_title('All Scores', fontweight='bold') # Adjust the color range to highlight segments that exceeded the limit sns.heatmap(data=data_to_plot, vmin=np.nanmin(limits), # bads in input data have NaN limits cmap='Reds', cbar_kws=dict(label='Score'), ax=ax[1]) [ax[1].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray') for x in range(1, len(bins))] ax[1].set_title('Scores > Limit', fontweight='bold') # Add figures to report report.add_figs_to_section(fig_noisy, captions=f'Automated noisy channel detection: {ch_type}', comments=f'Noisy channels detected (grad and mag): {auto_noisy_chs}', section='Diagnostic figures') # Scores for automated flat channels detection # # Only select the data for gradiometer channels scores = auto_scores['scores_flat'][ch_subset] limits = auto_scores['limits_flat'][ch_subset] # Store the data in a Pandas DataFrame data_to_plot = pd.DataFrame(data=scores, columns=pd.Index(bin_labels, name='Time (s)'), index=pd.Index(ch_names, name='Channel')) # Plot the "raw" scores fig_flat, ax = plt.subplots(1, 2, figsize=(18, 16)) sns.heatmap(data=data_to_plot, cmap='Reds', cbar_kws=dict(label='Score'), ax=ax[0]) [ax[0].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray') for x in range(1, len(bins))] ax[0].set_title('All Scores', fontweight='bold') # Adjust the color range to highlight segments that are below the limit sns.heatmap(data=data_to_plot, vmax=np.nanmax(limits), # bads in input data have NaN limits cmap='Reds_r', cbar_kws=dict(label='Score'), ax=ax[1]) [ax[1].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray') for x in range(1, len(bins))] ax[1].set_title('Scores < Limit', fontweight='bold') # Add figures to report report.add_figs_to_section(fig_flat, captions=f'Automated flat channel detection: {ch_type}', comments=f'Flat channels detected (grad and mag): {auto_flat_chs}', section='Diagnostic figures') ## Plot PSD ## # Select only meg signals # raw_before_preprocessing.pick_types(meg=True) raw_after_preprocessing.pick_types(meg=True) ## Plot PSD before and after flat channels detection ## # Select good channels channels = raw_after_preprocessing.info['ch_names'] # Define list of good and flat channels good_channels_and_flats = auto_flat_chs + channels raw_flat_channels_before_preprocessing = raw_before_preprocessing.copy() raw_flat_channels_before_preprocessing = raw_flat_channels_before_preprocessing.pick(picks=good_channels_and_flats) # Plot PSD of gradiometers # # Select only gradiometers for data before preprocessing raw_select_grad_before_preprocessing = raw_flat_channels_before_preprocessing.copy() raw_grad_before_preprocessing = raw_select_grad_before_preprocessing.pick(picks='grad') grad_channels = raw_grad_before_preprocessing.info['ch_names'] # Select only gradiometers for data after preprocessing raw_select_grad_after_preprocessing = raw_after_preprocessing.copy() raw_grad_after_preprocessing = raw_select_grad_after_preprocessing.pick(picks='grad') # Plot PSD for grad + flat grad fig_raw_psd_all_before_grad = mne.viz.plot_raw_psd(raw_grad_before_preprocessing, picks=grad_channels, show=False) # Add figures to report captions_fig_raw_psd_all_before_grad = f'Power spectral density of MEG signals including the automated ' \ f'detected flat channels (Gradiometers)' report.add_figs_to_section(figs=fig_raw_psd_all_before_grad, captions=captions_fig_raw_psd_all_before_grad, comments='Noisy channels are not included', section='Power Spectral Density for Gradiometers') ## Plot PSD before and after noisy channels detection ## # Define list of good and flat channels good_channels_and_noisy = auto_noisy_chs + channels raw_noisy_channels_before_preprocessing = raw_before_preprocessing.copy() raw_noisy_channels_before_preprocessing = raw_noisy_channels_before_preprocessing.pick(picks=good_channels_and_noisy) # Plot PSD of gradiometers # # Select only gradiometers for data before preprocessing raw_select_grad_before_preprocessing = raw_noisy_channels_before_preprocessing.copy() raw_grad_before_preprocessing = raw_select_grad_before_preprocessing.pick(picks='grad') grad_channels = raw_grad_before_preprocessing.info['ch_names'] # Select only gradiometers for data after preprocessing raw_grad_after_preprocessing = raw_select_grad_after_preprocessing.pick(picks='grad') # Plot PSD for grad + flat grad fig_raw_psd_all_before_grad = mne.viz.plot_raw_psd(raw_grad_before_preprocessing, picks=grad_channels, show=False) # Add figures to report captions_fig_raw_psd_all_before_grad = f'Power spectral density of MEG signals including the automated ' \ f'detected noisy channels (Gradiometers)' report.add_figs_to_section(figs=fig_raw_psd_all_before_grad, captions=captions_fig_raw_psd_all_before_grad, comments='Flat channels are not included', section='Power Spectral Density for Gradiometers') # Plot PSD of grad excluding flat grads # fig_raw_psd_all_after_grad = mne.viz.plot_raw_psd(raw_grad_after_preprocessing, picks='meg', show=False) captions_fig_raw_psd_all_after_grad = f'Power spectral density of MEG signals without the automated ' \ f'detected noisy and flat channels (Gradiometers)' report.add_figs_to_section(figs=fig_raw_psd_all_after_grad, captions=captions_fig_raw_psd_all_after_grad, section='Power Spectral Density for Gradiometers') ## Plot PSD before and after flat channels detection ## # Plot PSD of magnetometers # # Select only magnetometers for data before preprocessing raw_select_mag_before_preprocessing = raw_flat_channels_before_preprocessing.copy() raw_mag_before_preprocessing = raw_select_mag_before_preprocessing.pick(picks='mag') mag_channels = raw_mag_before_preprocessing.info['ch_names'] # Select only gradiometers for data after preprocessing raw_select_mag_after_preprocessing = raw_after_preprocessing.copy() raw_mag_after_preprocessing = raw_select_mag_after_preprocessing.pick(picks='mag') # Plot PSD for mag + flat grad fig_raw_psd_all_before_mag = mne.viz.plot_raw_psd(raw_mag_before_preprocessing, picks=mag_channels, show=False) # Add figures to report captions_fig_raw_psd_all_before_mag = f'Power spectral density of MEG signals including the automated ' \ f'detected flat channels (Magnetometers)' report.add_figs_to_section(figs=fig_raw_psd_all_before_mag, captions=captions_fig_raw_psd_all_before_mag, comments='Noisy channels are not included', section='Power Spectral Density for Magnetometers') ## Plot PSD before and after noisy channels detection ## # Plot PSD of magnetometers # # Select only magnetometers for data before preprocessing raw_select_mag_before_preprocessing = raw_noisy_channels_before_preprocessing.copy() raw_mag_before_preprocessing = raw_select_mag_before_preprocessing.pick(picks='mag') mag_channels = raw_mag_before_preprocessing.info['ch_names'] # Select only magnetometers for data after preprocessing raw_mag_after_preprocessing = raw_select_mag_after_preprocessing.pick(picks='mag') # Plot PSD for mag + noisy mag fig_raw_psd_all_before_mag = mne.viz.plot_raw_psd(raw_mag_before_preprocessing, picks=mag_channels, show=False) # Add figures to report captions_fig_raw_psd_all_before_grad = f'Power spectral density of MEG signals including the automated ' \ f'detected noisy channels (Magnetometers)' report.add_figs_to_section(figs=fig_raw_psd_all_before_mag, captions=captions_fig_raw_psd_all_before_mag, comments='Flat channels are not included', section='Power Spectral Density for Magnetometers') # Plot PSD of mag excluding noisy mag # fig_raw_psd_all_after_mag = mne.viz.plot_raw_psd(raw_mag_after_preprocessing, picks='meg', show=False) captions_fig_raw_psd_all_after_mag = f'Power spectral density of MEGsignals without the automated ' \ f'detected noisy and flat channels (Magnetometers)' report.add_figs_to_section(figs=fig_raw_psd_all_after_mag, captions=captions_fig_raw_psd_all_after_mag, section='Power Spectral Density for Magnetometers') # Delete useless copies del raw_select_grad_before_preprocessing del raw_select_grad_after_preprocessing del raw_grad_before_preprocessing del raw_grad_after_preprocessing del raw_select_mag_before_preprocessing del raw_select_mag_after_preprocessing del raw_mag_before_preprocessing del raw_mag_after_preprocessing ## If they exist, plot bad channels in time domain ## # Noisy channels if auto_noisy_chs: # Select random grad channels to plot including the noisy ones ch_to_plot = random.sample(ch_names.tolist(), 49) ch_to_plot += auto_noisy_chs raw_ch_to_plot = raw_after_preprocessing.copy() # Plot channels in time domain raw_ch_to_plot.pick_channels(ch_to_plot) fig_raw_noisy_channels = raw_ch_to_plot.plot(duration=20, n_channels=50, scalings='auto', butterfly=False, show_scrollbars=False) del raw_ch_to_plot report.add_figs_to_section(fig_raw_noisy_channels, captions=f'MEG signals including automated ' f'detected noisy channels', comments='The noisy channels are in gray.', section='Time domain') # Flat channels if auto_flat_chs: # Select random grad channels to plot including the flat ones ch_to_plot = random.sample(ch_names.tolist(), 49) ch_to_plot += auto_flat_chs raw_ch_to_plot = raw_after_preprocessing.copy() # Plot channels in time domain raw_ch_to_plot.pick_channels(ch_to_plot) fig_raw_flat_channels = raw_ch_to_plot.plot(duration=20, n_channels=50, scalings='auto', butterfly=False, show_scrollbars=False) del raw_ch_to_plot report.add_figs_to_section(fig_raw_flat_channels, captions=f'MEG signals including automated ' f'detected flat channels', comments='The flat channels are in gray.', section='Time domain') ## Values of the parameters of the App ## mne_version = mne.__version__ # Put this info in html format # html_text_parameters = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>Cross-talk file: {report_cross_talk_file}</td> </tr> <tr> <td>Calibration file: {report_calibration_file}</td> </tr> <tr> <td>Headshape file: {report_head_pos_file}</td> </tr> <tr> <td>Origin: {param_origin}</td> </tr> <tr> <td>Limit: {param_limit} noisy segments</td> </tr> <tr> <td>Duration: {param_duration}s</td> </tr> <tr>out_dir_name <td>Min count: {param_min_count} times</td> </tr> <tr> <td>Order of internal component of sherical expansion: {param_int_order}</td> </tr> <tr> <td>Order of external component of sherical expansion: {param_ext_order}</td> </tr> <tr> <td>Coordinate frame: {param_coord_frame}</td> </tr> <tr> <td>Regularize: {param_regularize}</td> </tr> <tr> <td>Ignore reference channel: {param_ignore_ref}</td> </tr> <tr> <td>Bad condition: {param_bad_condition}</td> </tr> <tr> <td>Magnetomer scale-factor: {param_mag_scale}</td> </tr> <tr> <td>Skip by annotation: {param_skip_by_annotation}</td> </tr> <tr> <td>Cutoff frequency of the low-pass filter: {param_h_freq}Hz</td> </tr> <tr> <td>Empty-room projection vectors: {param_extended_proj}</td> </tr> <tr> <td>MNE version used: {mne_version}</td> </tr> </table> </body> </html>""" # Add html to reports report.add_htmls_to_section(html_text_parameters, captions='Values of the parameters of the App', section='Parameters of the App', replace=False) # Save report report.save('out_dir_report/report_bad_channels.html', overwrite=True)
# # On successful creation of the report, the :meth:`~mne.Report.save` method # will open the HTML in a new tab in the browser. To disable this, use the # ``open_browser=False`` parameter of :meth:`~mne.Report.save`. # # For our first example, we'll generate a barebones report for all the # :file:`.fif` files containing raw data in the sample dataset, by passing the # pattern ``*raw.fif`` to :meth:`~mne.Report.parse_folder`. We'll omit the # ``subject`` and ``subjects_dir`` parameters from the :class:`~mne.Report` # constructor, but we'll also pass ``render_bem=False`` to the # :meth:`~mne.Report.parse_folder` method — otherwise we would get a warning # about not being able to render MRI and ``trans`` files without knowing the # subject. path = mne.datasets.sample.data_path(verbose=False) report = mne.Report(verbose=True) report.parse_folder(path, pattern='*raw.fif', render_bem=False) report.save('report_basic.html') ############################################################################### # This report yields a textual summary of the :class:`~mne.io.Raw` files # selected by the pattern. For a slightly more useful report, we'll ask for the # power spectral density of the :class:`~mne.io.Raw` files, by passing # ``raw_psd=True`` to the :class:`~mne.Report` constructor. Let's also refine # our pattern to select only the filtered raw recording (omitting the # unfiltered data and the empty-room noise recordings): pattern = 'sample_audvis_filt-0-40_raw.fif' report = mne.Report(verbose=True, raw_psd=True) report.parse_folder(path, pattern=pattern, render_bem=False) report.save('report_raw_psd.html')
# modified from 70_report.py import os import matplotlib.pyplot as plt import mne import sys path = mne.datasets.sample.data_path(verbose=False) subjects_dir = os.path.join(path, 'subjects') report = mne.Report(subject='sample', subjects_dir=subjects_dir, raw_psd=True, projs=True, verbose=True) report.parse_folder(path, render_bem=True) fname_stc = os.path.join(path, 'MEG', 'sample', 'sample_audvis-meg') stc = mne.read_source_estimate(fname_stc, subject='sample') figs = list() kwargs = dict(subjects_dir=subjects_dir, initial_time=0.13, clim=dict(kind='value', lims=[3, 6, 9])) for hemi in ('lh', 'rh'): brain = stc.plot(hemi=hemi, **kwargs) brain.toggle_interface(False) figs.append(brain.screenshot(time_viewer=True)) brain.close() # add the stc plot to the report: report.add_slider_to_section(figs)
def main(): """Make reports.""" msg = 'Running Step 99: Create reports' logger.info(gen_log_message(step=99, message=msg)) parallel, run_func, _ = parallel_func(run_report, n_jobs=config.N_JOBS) parallel( run_func(subject, session) for subject, session in itertools.product( config.get_subjects(), config.get_sessions())) # Group report subject = 'average' # XXX to fix if config.get_sessions(): session = config.get_sessions()[0] else: session = None evoked_fname = BIDSPath(subject=subject, session=session, task=config.get_task(), acquisition=config.acq, run=None, recording=config.rec, space=config.space, suffix='ave', extension='.fif', datatype=config.get_datatype(), root=config.deriv_root, check=False) rep = mne.Report(info_fname=evoked_fname, subject='fsaverage', subjects_dir=config.get_fs_subjects_dir()) evokeds = mne.read_evokeds(evoked_fname) subjects_dir = config.get_fs_subjects_dir() method = config.inverse_method inverse_str = method hemi_str = 'hemi' # MNE will auto-append '-lh' and '-rh'. morph_str = 'morph2fsaverage' conditions = config.conditions.copy() conditions.extend(config.contrasts) ########################################################################### # # Visualize evoked responses. # for condition, evoked in zip(conditions, evokeds): if condition in config.conditions: caption = f'Average: {condition}' section = 'Evoked' else: # It's a contrast of two conditions. caption = f'Average Contrast: {condition[0]} – {condition[1]}' section = 'Contrast' fig = evoked.plot(show=False, gfp=True, spatial_colors=True) fig = evoked.plot(spatial_colors=True, gfp=True, show=False) rep.add_figs_to_section(figs=fig, captions=caption, comments=evoked.comment, section=section) ########################################################################### # # Visualize inverse solutions. # for condition, evoked in zip(conditions, evokeds): if condition in config.conditions: caption = f'Average: {condition}' cond_str = condition.replace(op.sep, '').replace('_', '') else: # It's a contrast of two conditions. # XXX Will change once we process contrasts here too continue section = 'Source' fname_stc_avg = evoked_fname.copy().update( suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}', extension=None) if op.exists(str(fname_stc_avg) + "-lh.stc"): stc = mne.read_source_estimate(fname_stc_avg, subject='fsaverage') _, peak_time = stc.get_peak() # Plot using 3d backend if available, and use Matplotlib # otherwise. if mne.viz.get_3d_backend() is not None: brain = stc.plot(views=['lat'], hemi='both', initial_time=peak_time, backend='mayavi', subjects_dir=subjects_dir) figs = brain._figures[0] captions = caption else: import matplotlib.pyplot as plt fig_lh = plt.figure() fig_rh = plt.figure() brain_lh = stc.plot(views='lat', hemi='lh', initial_time=peak_time, backend='matplotlib', figure=fig_lh, subjects_dir=subjects_dir) brain_rh = stc.plot(views='lat', hemi='rh', initial_time=peak_time, backend='matplotlib', figure=fig_rh, subjects_dir=subjects_dir) figs = [brain_lh, brain_rh] captions = [f'{caption} - left', f'{caption} - right'] rep.add_figs_to_section(figs=figs, captions=captions, section='Sources') del peak_time fname_report = evoked_fname.copy().update(task=config.get_task(), suffix='report', extension='.html') rep.save(fname=fname_report, open_browser=False, overwrite=True) msg = 'Completed Step 99: Create reports' logger.info(gen_log_message(step=99, message=msg))
def create_mxne_summary( subjects, p, morph_subject=None, n_display=None, pattern_in='', pattern_out='_mxne', path_out='./', title='%s Dipoles', ): '''Create a report and spreadsheet about mixed-norm dipoles.''' src_file = os.path.join(p.subjects_dir, 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif') src_fsavg = mne.read_source_spaces(src_file) src_file = os.path.join(p.subjects_dir, '14mo_surr', 'bem', '14mo_surr-oct-6-src.fif') src_14mo = mne.read_source_spaces(src_file) mni_labels = mne.read_labels_from_annot('fsaverage', 'HCPMMP1', subjects_dir=p.subjects_dir) labels = mne.read_labels_from_annot('14mo_surr', use_parc, subjects_dir=p.subjects_dir) for subject in subjects: # Load STCs and other saved data # cond = p.stc_params['condition'] stc_path = os.path.join(p.work_dir, subject, p.stc_dir) stc_stem = subject + '_' + cond + pattern_in stc_file = os.path.join(stc_path, stc_stem) if not os.path.isfile(stc_file + '-lh.stc'): print(f'** STC file matching {stc_stem} not found ********.\n') continue stc_mxne = mne.read_source_estimate(stc_file) n_dipoles, n_times = stc_mxne.data.shape meta_data = np.load(stc_file + '.npy', allow_pickle=True) gof_mxne = meta_data[0] residual_mxne = meta_data[1] evk_path = os.path.join(p.work_dir, subject, p.inverse_dir) evk_file = f'Locations_40-sss_eq_{subject}-ave.fif' evk_file = os.path.join(evk_path, evk_file) evoked = mne.read_evokeds(evk_file, condition=cond, kind='average') evoked.pick_types(meg=True) cov_path = os.path.join(p.work_dir, subject, p.cov_dir) cov_file = f'{subject}-40-sss-cov.fif' cov_file = os.path.join(cov_path, cov_file) cov = mne.read_cov(cov_file) trans_path = os.path.join(p.work_dir, subject, p.trans_dir) trans_file = f'{subject}-trans.fif' trans_file = os.path.join(trans_path, trans_file) trans = mne.read_trans(trans_file, verbose=False) fwd_path = os.path.join(p.work_dir, subject, p.forward_dir) fwd_file = f'{subject}-sss-fwd.fif' fwd_file = os.path.join(fwd_path, fwd_file) fwd = mne.read_forward_solution(fwd_file, verbose=False) assert fwd['src'][0]['nuse'] == src_14mo[0]['nuse'] assert fwd['src'][1]['nuse'] == src_14mo[1]['nuse'] # Run analysis on the dipoles, then sort then by goodness-of-fit # results = analyze_dipoles(stc_mxne, gof_mxne, evoked, cov, p.stc_params['gof_t_range']) results, sort_idx = sort_dipoles(results) # stc still unsorted gof_results, amp_results = results assert len(gof_results) == n_dipoles # Collect info for the top dipoles, in order # n_show = n_dipoles if n_display: n_show = min(n_display, n_show) n_left = len(stc_mxne.vertices[0]) # .data stacked lh then rh postop, mnitop, wavtop = [], [], [] for i in range(n_dipoles): di = sort_idx[i] hemid = int(di >= n_left) vidx = di - hemid * n_left vert = stc_mxne.vertices[hemid][vidx] pos = fwd['src'][hemid]['rr'][vert] postop.append(pos) mni = mne.vertex_to_mni(vert, hemid, subject, subjects_dir=p.subjects_dir) mnitop.append(mni) wav = stc_mxne.data[di, :] wavtop.append(wav) assert wav[amp_results[i].pidx] == amp_results[i].peak # check last # Make various figures # figure_list, figure_info, figure_comment = [], [], [] # 1) Top dipoles in one set of surface maps. if morph_subject: src_subject = morph_subject caption = 'Surface Plots | ' + morph_subject else: src_subject = subject caption = 'Surface Plots | Coreg.' fig_surface = make_surfaceplots(stc_mxne, src_subject, p.subjects_dir, sort_idx, parc=use_parc) figure_list.append(fig_surface) figure_info.append([caption, 'Surface Plots']) figure_comment.append(color_comment) # 2) Top dipoles in 3D slices (non-morphed and MNI). mri_file = os.path.join(p.subjects_dir, subject, 'mri', 'T1.mgz') postop_mri = mne.head_to_mri(postop, mri_head_t=trans, subject=subject, subjects_dir=p.subjects_dir) postop_mni = mne.head_to_mni(postop, mri_head_t=trans, subject=subject, subjects_dir=p.subjects_dir) assert_allclose(mnitop[0], postop_mni[0], atol=0.01) assert_allclose(mnitop[-1], postop_mni[-1], atol=0.01) fig_orthog1 = make_orthogplots(mri_file, postop_mri[:n_show]) fig_orthog2 = make_orthogplots(mni_template, postop_mni[:n_show]) figure_list.append(fig_orthog1) figure_info.append(['Orthogonal Plots | Coreg.', 'Orthogonal Plots']) figure_comment.append(None) figure_list.append(fig_orthog2) figure_info.append(['Orthogonal Plots | MNI)', 'Orthogonal Plots']) figure_comment.append(f'Top {n_show} of {n_dipoles} dipoles ' 'displayed.') # 3) Top dipoles' time waveforms. fig_wav = make_sourcewavs(wavtop, stc_mxne.times, p.stc_params['gof_t_range']) figure_list.append(fig_wav) figure_info.append(['STC Time Course', 'Temporal Waveforms']) figure_comment.append(None) # 4) Evoked and residual waveforms (averages across sensors) fig_sensor = make_sensorwavs(evoked, residual_mxne) figure_list.append(fig_sensor) figure_info.append(['Sensor Time Course', 'Temporal Waveforms']) figure_comment.append(None) # Determine 14-mo surrogate "aparc" label for each dipole # labels_stc = [] # note these are not gof-ordered for hh, hemi in enumerate(('lh', 'rh')): for vert in stc_mxne.vertices[hh]: label = which_label(vert, hemi, labels) if label: labels_stc.append(label.name) else: labels_stc.append('no_label') # Expand the sparse STC so it can be morphed (X='expanded') # # v_lh = fwd['src'][0]['vertno'] # the full source space # v_rh = fwd['src'][1]['vertno'] # n_vtotal = vertices = len(v_lh) + len(v_rh) # data_mxneX = np.zeros((n_vtotal, n_times)) # idx_vrts = np.isin(v_lh, stc_mxne.vertices[0]) # idx_vrts = np.where(idx_vrts)[0] # data_mxneX[idx_vrts, :] = stc_mxne.data[:n_left, :] # idx_vrts = np.isin(v_rh, stc_mxne.vertices[1]) # idx_vrts = np.where(idx_vrts)[0] # data_mxneX[idx_vrts, :] = stc_mxne.data[n_left:, :] # stc_mxneX = mne.SourceEstimate(data_mxneX, [v_lh, v_rh], # tmin=stc_mxne.tmin, tstep=stc_mxne.tstep, # subject=stc_mxne.subject) # Determine fsaverage "HCPMMP1" labels for each dipole # # Note: 'sparse' doesn't give a 1-to-1 mapping. morph_fcn = mne.compute_source_morph(stc_mxne, src_to=src_fsavg, smooth='nearest', spacing=None, warn=False, subjects_dir=p.subjects_dir, niter_sdr=(), sparse=True, subject_from=subject, subject_to='fsaverage') mlabels_stc = [] # like above, but now for fsaverage:HCPMMP1 verts_mni = [] for di in range(n_dipoles): stc_temp = stc_mxne.copy() # zero all but dipole of interest stc_temp.data = np.zeros((n_dipoles, n_times)) stc_temp.data[di, :] = stc_mxne.data[di, :] mstc_temp = morph_fcn.apply(stc_temp) vidx = np.where(mstc_temp.data[:, 0] > 0)[0] vidx_lh = [i for i in vidx if i < n_left] # don't assume hemi vidx_rh = [i - n_left for i in vidx if i >= n_left] verts_byhemi = [None, None] verts_byhemi[0] = mstc_temp.vertices[0][vidx_lh] verts_byhemi[1] = mstc_temp.vertices[1][vidx_rh] verts_mni.append(verts_byhemi) cnt = 0 for verts, hemi, prefix in zip(verts_byhemi, ['lh', 'rh'], ['L_', 'R_']): if not verts: continue vert = verts[0] # should only be one with sparse arg. lbl = which_label(vert, hemi, mni_labels) if lbl: lbl = lbl.name else: lbl = 'no_label' lbl = re.sub(rf"^{prefix}", "", lbl) lbl = re.sub(r"_ROI", "", lbl) cnt += 1 assert cnt == 1 # only one hemisphere should be valid mlabels_stc.append(lbl) # Create formatted tables for a report section # # SMB: Saving as string objects in case they can be added to report. strobj1 = StringIO() # TABLE 1: sorted gof and amplitude info sprint = lambda *x: print(*x, file=strobj1, end='') ff = '<8.2f' # format: center on 8-char field, 2 decimal places sprint(f'{"Dip #":^6} {"Peak/Mean Amp":<16} ' f'{"Peak/Mean GOF":<16} {"GOF Time":<8}\n') for i in range(n_dipoles): amp_m = 1e9 * amp_results[i].mean amp_p = 1e9 * amp_results[i].peak gof_m = gof_results[i].mean gof_p = gof_results[i].peak time_p = evoked.times[gof_results[i].pidx] sprint(f'{i:^6} {amp_p:{ff}}{amp_m:{ff}} ' f'{gof_p:{ff}}{gof_m:{ff}} ' f'{time_p:{"<8.3f"}}\n') sprint('\n') strobj2 = StringIO() # TABLE 2: coordinate and label info sprint = lambda *x: print(*x, file=strobj2, end='') ff = '<20' sprint(f'{"Dip #":^6} {"14mo Coord":{ff}} {"MNI Coord":{ff}} ' f'{"14mo Aparc | Fsavg HCPMMP1":{ff}}\n') for i in range(n_dipoles): di = sort_idx[i] hemid = int(di >= n_left) # hemi = 'rh' if hemid else 'lh' vidx = di - hemid * n_left vert = stc_mxne.vertices[hemid][vidx] coord = src_14mo[hemid]['rr'][vert] * 1000 coord_str = ' '.join([f'{x:.1f}' for x in coord]) vert = verts_mni[di][hemid][0] # just the first one coord = src_fsavg[hemid]['rr'][vert] * 1000 mcoord_str = ' '.join([f'{x:.1f}' for x in coord]) sprint(f'{i:^6} {coord_str:{ff}} {mcoord_str:{ff}} ' f'{labels_stc[di]:{ff}}\n {"":<47} ' f'{mlabels_stc[di]:{ff}}\n') # Print out the tables # print(f'\nGOF-sorted dipole info for {subject}:') strobj1.seek(0) print(strobj1.read()) strobj1.close() print(f'\nGOF-sorted position info for {subject}:') strobj2.seek(0) print(strobj2.read()) strobj2.close() # Compile all figures into a report # print(f'Compiling report for {subject}.') if not os.path.exists(path_out): os.mkdir(path_out) if '%s ' in title: title_use = title.replace('%s ', 'Group') else: title_use = title report = mne.Report(title=title_use, image_format='png') for fig, info, cstr in zip(figure_list, figure_info, figure_comment): report.add_figs_to_section(fig, captions=info[0], scale=1.0, section=info[1], comments=cstr) report_file = os.path.join(path_out, subject + pattern_out + '.html') report.save(report_file, open_browser=False, overwrite=True)
def run_report(*, cfg, subject, session=None): bids_path = BIDSPath(subject=subject, session=session, task=cfg.task, acquisition=cfg.acq, run=None, recording=cfg.rec, space=cfg.space, extension='.fif', datatype=cfg.datatype, root=cfg.deriv_root, check=False) fname_ave = bids_path.copy().update(suffix='ave') fname_epo = bids_path.copy().update(suffix='epo') if cfg.use_template_mri: fname_trans = 'fsaverage' has_trans = True else: fname_trans = bids_path.copy().update(suffix='trans') has_trans = op.exists(fname_trans) fname_epo = bids_path.copy().update(processing='clean', suffix='epo') fname_ica = bids_path.copy().update(suffix='ica') fname_decoding = fname_epo.copy().update(processing=None, suffix='decoding', extension='.mat') fname_tfr_pow = bids_path.copy().update(suffix='power+condition+tfr', extension='.h5') title = f'sub-{subject}' if session is not None: title += f', ses-{session}' if cfg.task is not None: title += f', task-{cfg.task}' params: Dict[str, Any] = dict(info_fname=fname_epo, raw_psd=True, subject=cfg.fs_subject, title=title) if has_trans: params['subjects_dir'] = cfg.fs_subjects_dir rep = mne.Report(**params) rep_kwargs: Dict[str, Any] = dict(data_path=fname_ave.fpath.parent, verbose=False) if not has_trans: rep_kwargs['render_bem'] = False if cfg.task is not None: rep_kwargs['pattern'] = f'*_task-{cfg.task}*' if mne.viz.get_3d_backend() is not None: with mne.viz.use_3d_backend('pyvistaqt'): rep.parse_folder(**rep_kwargs) else: rep.parse_folder(**rep_kwargs) # Visualize automated noisy channel detection. if cfg.find_noisy_channels_meg: figs, captions = plot_auto_scores(cfg=cfg, subject=subject, session=session) rep.add_figs_to_section(figs=figs, captions=captions, section='Data Quality') # Visualize events. if cfg.task.lower() != 'rest': events_fig = plot_events(cfg=cfg, subject=subject, session=session) rep.add_figs_to_section(figs=events_fig, captions='Events in filtered continuous data', section='Events') ########################################################################### # # Visualize effect of ICA artifact rejection. # if cfg.spatial_filter == 'ica': epochs = mne.read_epochs(fname_epo) ica = mne.preprocessing.read_ica(fname_ica) fig = ica.plot_overlay(epochs.average(), show=False) rep.add_figs_to_section( fig, captions=f'Evoked response (across all epochs) ' f'before and after ICA ' f'({len(ica.exclude)} ICs removed)', section='ICA') ########################################################################### # # Visualize TFR as topography. # if cfg.time_frequency_conditions is None: conditions = [] elif isinstance(cfg.time_frequency_conditions, dict): conditions = list(cfg.time_frequency_conditions.keys()) else: conditions = cfg.time_frequency_conditions.copy() for condition in conditions: cond = config.sanitize_cond_name(condition) fname_tfr_pow_cond = str(fname_tfr_pow.copy()).replace( "+condition+", f"+{cond}+") power = mne.time_frequency.read_tfrs(fname_tfr_pow_cond) fig = power[0].plot_topo(show=False, fig_facecolor='w', font_color='k', border='k') rep.add_figs_to_section(figs=fig, captions=f"TFR Power: {condition}", section="TFR") ########################################################################### # # Visualize evoked responses. # if cfg.conditions is None: conditions = [] elif isinstance(cfg.conditions, dict): conditions = list(cfg.conditions.keys()) else: conditions = cfg.conditions.copy() conditions.extend(cfg.contrasts) if conditions: evokeds = mne.read_evokeds(fname_ave) else: evokeds = [] for condition, evoked in zip(conditions, evokeds): if cfg.analyze_channels: evoked.pick(cfg.analyze_channels) if condition in cfg.conditions: caption = f'Condition: {condition}' section = 'Evoked' else: # It's a contrast of two conditions. caption = f'Contrast: {condition[0]} – {condition[1]}' section = 'Contrast' fig = evoked.plot(spatial_colors=True, gfp=True, show=False) rep.add_figs_to_section(figs=fig, captions=caption, comments=evoked.comment, section=section) ########################################################################### # # Visualize decoding results. # if cfg.decode: epochs = mne.read_epochs(fname_epo) for contrast in cfg.contrasts: cond_1, cond_2 = contrast a_vs_b = f'{cond_1}+{cond_2}'.replace(op.sep, '') processing = f'{a_vs_b}+{cfg.decoding_metric}' processing = processing.replace('_', '-').replace('-', '') fname_decoding_ = (fname_decoding.copy().update( processing=processing)) decoding_data = loadmat(fname_decoding_) del fname_decoding_, processing, a_vs_b fig = plot_decoding_scores( times=epochs.times, cross_val_scores=decoding_data['scores'], metric=cfg.decoding_metric) caption = f'Time-by-time Decoding: {cond_1} ./. {cond_2}' comment = (f'{len(epochs[cond_1])} × {cond_1} ./. ' f'{len(epochs[cond_2])} × {cond_2}') rep.add_figs_to_section(figs=fig, captions=caption, comments=comment, section='Decoding') del decoding_data, cond_1, cond_2, caption, comment del epochs ########################################################################### # # Visualize the coregistration & inverse solutions. # if has_trans: evokeds = mne.read_evokeds(fname_ave) # Omit our custom coreg plot here – this is now handled through # parse_folder() automatically. Keep the following code around for # future reference. # # # We can only plot the coregistration if we have a valid 3d backend. # if mne.viz.get_3d_backend() is not None: # fig = mne.viz.plot_alignment(evoked.info, fname_trans, # subject=cfg.fs_subject, # subjects_dir=cfg.fs_subjects_dir, # meg=True, dig=True, eeg=True) # rep.add_figs_to_section(figs=fig, captions='Coregistration', # section='Coregistration') # else: # msg = ('Cannot render sensor alignment (coregistration) because ' # 'no usable 3d backend was found.') # logger.warning(gen_log_message(message=msg, # subject=subject, session=session)) for condition, evoked in zip(conditions, evokeds): msg = f'Rendering inverse solution for {evoked.comment} …' logger.info(**gen_log_kwargs( message=msg, subject=subject, session=session)) if condition in cfg.conditions: full_condition = config.sanitize_cond_name(evoked.comment) caption = f'Condition: {full_condition}' del full_condition else: # It's a contrast of two conditions. # XXX Will change once we process contrasts here too continue method = cfg.inverse_method cond_str = config.sanitize_cond_name(condition) inverse_str = method hemi_str = 'hemi' # MNE will auto-append '-lh' and '-rh'. fname_stc = bids_path.copy().update( suffix=f'{cond_str}+{inverse_str}+{hemi_str}', extension=None) if op.exists(str(fname_stc) + "-lh.stc"): stc = mne.read_source_estimate(fname_stc, subject=cfg.fs_subject) _, peak_time = stc.get_peak() # Plot using 3d backend if available, and use Matplotlib # otherwise. import matplotlib.pyplot as plt if mne.viz.get_3d_backend() is not None: brain = stc.plot(views=['lat'], hemi='split', initial_time=peak_time, backend='pyvistaqt', time_viewer=True, subjects_dir=cfg.fs_subjects_dir) brain.toggle_interface() brain._renderer.plotter.reset_camera() brain._renderer.plotter.subplot(0, 0) brain._renderer.plotter.reset_camera() figs, ax = plt.subplots(figsize=(15, 10)) ax.imshow(brain.screenshot(time_viewer=True)) ax.axis('off') comments = evoked.comment captions = caption else: fig_lh = plt.figure() fig_rh = plt.figure() brain_lh = stc.plot(views='lat', hemi='lh', initial_time=peak_time, backend='matplotlib', subjects_dir=cfg.fs_subjects_dir, figure=fig_lh) brain_rh = stc.plot(views='lat', hemi='rh', initial_time=peak_time, subjects_dir=cfg.fs_subjects_dir, backend='matplotlib', figure=fig_rh) figs = [brain_lh, brain_rh] comments = [ f'{evoked.comment} - left hemisphere', f'{evoked.comment} - right hemisphere' ] captions = [f'{caption} - left', f'{caption} - right'] rep.add_figs_to_section(figs=figs, captions=captions, comments=comments, section='Sources') del peak_time if cfg.process_er: fig_er_psd = plot_er_psd(cfg=cfg, subject=subject, session=session) rep.add_figs_to_section(figs=fig_er_psd, captions='Empty-Room Power Spectral Density ' '(after filtering)', section='Empty-Room') fname_report = bids_path.copy().update(suffix='report', extension='.html') rep.save(fname=fname_report, open_browser=False, overwrite=True) import matplotlib.pyplot as plt # nested import to help joblib plt.close('all') # close all figures to save memory
from EEG_pareidolia_utils import * import mne from mne.datasets.brainstorm import bst_raw from mne.time_frequency import tfr_morlet, psd_multitaper, psd_welch import numpy as np import matplotlib.pyplot as plt from scipy import stats import scipy.io as sio from scipy.io import savemat, loadmat from param import * RUN_LIST = {'pareidolia': ['1', '2', '3', '4'], 'RS': ['1', '2']} SUBJ_LIST = ['01', '02'] TASK_LIST = ['pareidolia'] COMPLEX_LIST = ['Spectral Entropy', 'SVD', 'Hurst'] report = mne.Report(verbose=True) def p_values_boolean(p_values): p_values_boolean = p_values.copy() for e in range(p_values.shape[1]): for c in range(p_values.shape[0]): if p_values[c, e] < 0.05: p_values_boolean[c, e] = True else: p_values_boolean[c, e] = False p_values_boolean = np.array(p_values_boolean, dtype='bool') return (p_values_boolean) def comp2array(cp):
def eeg_emg_alignment(eeg_fName, emg_df, sfreq_final, emg_freq, report_fName=None, start_marker=True, fir=[1, None], PREP=True, montage='standard_1020'): """ This function takes .set format for eeg and txt format for emg. Parameters ----------- eeg_fName : string eeg_fName should be in .set form emg_fName : string emg_fName should be in .txt form report_fName: string optional, default to None which will not generate a report for the reprocessing result. When reproty_fName is specified, two reports will be generated at the suggested directory including one in HTML, and one in .h5 format. The .h5 format is an edittable report. montage: string The defaut montage is set to standard 1020 montage start_marker : boolean if start_marker is true, the segment before the first marker will be cropped, defaults to True fir: list the lower and upper boundary of filter. If the boundary is set to None, then the filter become high-pass or low-pass filter. PREP: boolean the EEG preprocessing pipeline process, defauts to True. It can de deactivated when set to false. emg_chs_selected : list of int the index of emg channels (columns) that are supposed to be used in further analysis, defaults to 'all' Returns ------- mne raw object containing aligned eeg and emg data Examples -------- >>>eeg_fName = r'D:\Data\RuiJinFirstStroke11Jan\EEG\subj1_healthy_session1.set' >>>emg_fName = r'D:\Data\RuiJinFirstStroke11Jan\EMG\subj1_healthy_session1.txt' >>>emg_df = pd.read_csv(emg_fName, header = None, skiprows=3, sep = ' ',engine = 'python') >>>eeg_emg_alignment(eeg_fName,emg_df,emg_freq=1000,sfreq_final=500,report_fName=None,PREP=False) See Also -------- Warnings -------- Notes -------- Make sure the eeg recording are no less than the emg recording. The current version of this function crop emg signal with respect to emg in order to keep these data aligned. """ import mne, os import pandas as pd raw_eeg = mne.io.read_raw_eeglab(eeg_fName) raw_eeg.set_montage(montage) if start_marker == True: eeg_onset = mne.events_from_annotations(raw_eeg)[0][0, 0] raw_eeg.crop(tmin=eeg_onset / raw_eeg.info["sfreq"]) if report_fName != None: #### report raw properties ### report = mne.Report(verbose=True) report_fname_editable = os.path.join(report_fName, '.h5') report_fname_2check = os.path.join(report_fName, '.html') fig_raw = raw_eeg.plot(scalings=4e-5, n_channels=32, duration=8) fig_raw_psd = raw_eeg.plot_psd() report.add_figs_to_section(fig_raw, captions='raw_data', section='raw') report.add_figs_to_section(fig_raw_psd, captions='raw_psd', section='raw') raw_eeg.filter(fir[0], h_freq=fir[1]) if PREP == True: #### prep steps, require pyprep eeg_index = mne.pick_types(raw_eeg.info, eeg=True, eog=False, meg=False, emg=False) ch_names = raw_eeg.info["ch_names"] ch_names_eeg = list(np.asarray(ch_names)[eeg_index]) sample_rate = raw_eeg.info["sfreq"] montage_kind = "standard_1020" montage = mne.channels.make_standard_montage(montage_kind) raw_eeg_copy = raw_eeg.copy() # Fit prep prep_params = { 'ref_chs': ch_names_eeg, 'reref_chs': ch_names_eeg, 'line_freqs': np.arange(50, sample_rate / 2, 50) } prep = PrepPipeline(raw_eeg_copy, prep_params, montage) prep.fit() raw_eeg = prep.raw if report_fName != None: fig_raw = raw_eeg.plot(scalings=4e-5, n_channels=32, duration=8) fig_raw_psd = raw_eeg.plot_psd() report.add_figs_to_section(fig_raw, captions='prep signal space', section='prep') report.add_figs_to_section(fig_raw_psd, captions='prep_psd', section='prep') report.add_htmls_to_section(htmls=[ str(prep.interpolated_channels), str(prep.noisy_channels_original['bad_all']), str(prep.still_noisy_channels) ], captions=[ 'interpolated channels', 'bad channels detected', "still noisy channels" ], section='prep', replace=False) report.save(report_fname_2check, overwrite=True) report.save(report_fname_editable, overwrite=True) eeg_len = raw_eeg._data.shape[1] emg_df = emg_df.iloc[0:round(emg_freq / raw_eeg.info["sfreq"]) * eeg_len] ch_types = ['emg'] * len(emg_df.columns) ch_names = [] for i in range(len(emg_df.columns)): ch_names.append('emg' + str(i + 1)) info = mne.create_info(ch_names=ch_names, sfreq=emg_freq, ch_types=ch_types) raw_emg = mne.io.RawArray(emg_df.T, info) raw_emg.resample(sfreq=sfreq_final) raw_emg.info['highpass'] = fir[0] raw_hybrid = raw_eeg.copy().add_channels([raw_emg]) return raw_hybrid
def run_ica1(method, t_start, t_end, fit_params=None): #def run_ica(method, my_epoch, fit_params=None): #raw = my_epoch #??? chn=['AF3','F7','F3','FC5','T7','P7','O1','O2','P8','T8','FC6','F4','F8','AF4'] ica = ICA(n_components=14, method=method, fit_params=fit_params, random_state=75) filt_raw = raw.copy() # Establecer tiempos filt_raw.crop(tmin=t_start, tmax=t_end) filt_raw.load_data().filter(l_freq=2.5, h_freq=None) t0 = time() ica.fit(filt_raw) fit_time = time() - t0 title = ('ICA decomposition using %s (took %.1fs)' % (method, fit_time)) #ica.plot_sources(raw) ica.plot_sources(filt_raw).savefig('my_sources.png') #print("A list------------------------------------------------") ica.plot_components(title=title) #print("------------------------------------------------") # plt.savefig(ica.plot_components(title=title), 'comp.png') # No funciona :'( #plt.savefig('fig1.png') # Imagen blanca ica.exclude = [] # find which ICs match the EOG pattern #eog_indices, eog_scores = ica.find_bads_eog(raw,ch_name='AF4') eog_indices, eog_scores = ica.find_bads_eog(filt_raw,ch_name='AF4') ica.exclude = eog_indices # barplot of ICA component "EOG match" scores ica.plot_scores(eog_scores) print(eog_indices) inpre=input("está de acuerdo con los indices?: (y/n)") if(inpre=='y'): print('ok, sigamos') else: eog_indices=[] n=int(input("elija el numero de indices a incluir: ")) for i in range(0,n): print("introduzca el indice ") eog_indices.append(int(input(": "))) #ica.plot_properties(raw, picks=eog_indices) #ica.plot_properties(filt_raw, picks=eog_indices) # ERROR, porque? ica.exclude = eog_indices # indices chosen based on various plots above # plot ICs applied to raw data, with EOG matches highlighted #ica.plot_sources(raw) ica.plot_sources(filt_raw) # plot ICs applied to the averaged EOG epochs, with EOG matches highlighted #ica.plot_overlay(raw,exclude=eog_indices) ica.plot_overlay(filt_raw,exclude=eog_indices) # ica.apply() changes the Raw object in-place, so let's make a copy first: #print('copiando el original') #reconst_raw = raw.copy() #print('aplicando ICA') #ica.apply(reconst_raw) #raw.plot() filt_raw.plot() #print('muestra reconstruida') #reconst_raw.plot() #plt.show() # Guardar archivo fif #filt_raw.save('filt_raw.fif') path = mne.datasets.sample.data_path(verbose=False) report = mne.Report(verbose=True) report.parse_folder(path, pattern='*raw.fif', render_bem=false) report.save('report_basic.html')
def camcan_preproc(subject): data_folder = '/Users/work/camcan' data_file = os.path.join(data_folder, subject, '_ses-rest_task-rest_proc-raw.fif') #data_file = os.path.join(data_folder, # 'sub-CC _ses-rest_task-rest_proc-raw_sss.fif') report = mne.Report(verbose=True) report.save('report_basic.html', overwrite=True) # read file raw = mne.io.read_raw_fif(data_file, preload=True) print(raw.info) fig = raw.plot_psd(fmax=50, show=False) report.add_figs_to_section(fig, captions='Raw', section='PSD') meg_channels = mne.pick_types(raw.info, meg=True, stim=False, ref_meg=False) eog1 = mne.pick_channels(raw.ch_names, ['EOG061']) eog2 = mne.pick_channels(raw.ch_names, ['EOG062']) ecg = mne.pick_channels(raw.ch_names, ['ECG063']) mag_channels = mne.pick_types(raw.info, meg='mag') # Bandpass between 1 and 45 Hz, bandstop between 50 and 100 raw.filter(1, 45) freqs = (50, 100) raw.notch_filter(freqs=freqs) fig = raw.plot_psd(fmax=50, show=False) report.add_figs_to_section(fig, captions='After bandpass and notch filters', section='PSD') # 2s epochs events = mne.make_fixed_length_events(raw, start=0, stop=None, duration=2.0, overlap=0) reject = dict(grad=4000e-13, mag=4e-12, eog=250e-6) #from mne tutorial epochs = mne.Epochs(raw, events, baseline=None, preload=True, reject=reject) # Topoplot before ICA fig = epochs.plot_psd_topomap(ch_type='mag', normalize=True, show=False) report.add_figs_to_section(fig, captions='Topoplot before ICA', section='Topoplots') # ICA to remove artefacts from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs ica = ICA(n_components=0.95, method='fastica').fit(epochs) fig = ica.plot_components(show=False) ica.exclude = [] report.add_figs_to_section(fig, captions=('ICA components', ' '), section='ICA') # Find ECG artefacts ecg_epochs = create_ecg_epochs(raw) ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, threshold='auto') fig = ica.plot_components(ecg_inds, show=False) ica.exclude += ecg_inds report.add_figs_to_section(fig, captions='ECG components', section='ICA') # Find EOG artefacts eog_epochs = create_eog_epochs(raw, tmin=-.5, tmax=.5) eog_inds, scores = ica.find_bads_eog(eog_epochs) fig = ica.plot_components(eog_inds, show=False) ica.exclude += eog_inds report.add_figs_to_section(fig, captions='EOG components', section='ICA') # Apply ICA cleaned = epochs.copy() ica.apply(cleaned) fig = cleaned.plot_psd_topomap(ch_type='mag', normalize=True, show=False) report.add_figs_to_section(fig, captions='Topoplot after artefact rejection', section='Preprocessed') report.save('report.html', overwrite=True) # save fif file clean_file = os.path.join(data_folder, subject, '_cleaned.fif') cleaned.save(clean_file, overwrite=True)
def _generate_report(data_file_before, raw_before_preprocessing, raw_after_preprocessing, bad_channels, report_cross_talk_file, report_calibration_file, report_head_pos_file, report_destination_file, report_param_destination, param_st_duration, param_st_correlation, param_origin, param_int_order, param_ext_order, param_coord_frame, param_regularize, param_ignore_ref, param_bad_condition, param_st_fixed, param_st_only, param_skip_by_annotation, param_mag_scale, param_extended_proj): # Generate a report # Create instance of mne.Report report = mne.Report(title='Results Maxwell filter', verbose=True) ## Give some info about the file before preprocessing ## sampling_frequency = raw_before_preprocessing.info['sfreq'] highpass = raw_before_preprocessing.info['highpass'] lowpass = raw_before_preprocessing.info['lowpass'] # Put this info in html format # # Info on data html_text_info = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>Input file: {data_file_before}</td> </tr> <tr> <td>Bad channels: {bad_channels}</td> </tr> <tr> <td>Sampling frequency: {sampling_frequency}Hz</td> </tr> <tr> <td>Highpass: {highpass}Hz</td> </tr> <tr> <td>Lowpass: {lowpass}Hz</td> </tr> </table> </body> </html>""" # Add html to reports report.add_htmls_to_section(html_text_info, captions='MEG recording features', section='Data info', replace=False) # Plot MEG signals in temporal domain fig_raw = raw_before_preprocessing.pick(['meg'], exclude='bads').plot( duration=10, scalings='auto', butterfly=False, show_scrollbars=False, proj=False, show=False) fig_raw_maxfilter = raw_after_preprocessing.pick( ['meg'], exclude='bads').plot(duration=10, scalings='auto', butterfly=False, show_scrollbars=False, proj=False, show=False) # Plot power spectral density fig_raw_psd = raw_before_preprocessing.plot_psd(show=False) fig_raw_maxfilter_psd = raw_after_preprocessing.plot_psd(show=False) # Add figures to report report.add_figs_to_section(fig_raw, captions='MEG signals before Maxwell Filter', section='Temporal domain') report.add_figs_to_section(fig_raw_maxfilter, captions='MEG signals after Maxwell Filter', section='Temporal domain') report.add_figs_to_section( fig_raw_psd, captions='Power spectral density before Maxwell Filter', section='Frequency domain') report.add_figs_to_section( fig_raw_maxfilter_psd, captions='Power spectral density after Maxwell Filter', section='Frequency domain') # Info on SNR # html_text_snr = f"""<html> # <head> # <style type="text/css"> # table {{ border-collapse: collapse;}} # td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} # </style> # </head> # <body> # <table width="50%" height="80%" border="2px"> # <tr> # <td>SNR before MaxFilter: {snr_before}</td> # </tr> # <tr> # <td>SNR after MaxFilter: {snr_after}</td> # </tr> # </table> # </body> # </html>""" # report.add_htmls_to_section(html_text_snr, captions='Signal to noise ratio', section='Signal to noise ratio', # replace=False) ## Values of the parameters of the App ## mne_version = mne.__version__ # Put this info in html format # html_text_parameters = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>Cross-talk file: {report_cross_talk_file}</td> </tr> <tr> <td>Calibration file: {report_calibration_file}</td> </tr> <tr> <td>Headshape file: {report_head_pos_file}</td> </tr> <tr> <td>Destination file: {report_destination_file}</td> </tr> <tr> <td>Destination (if no destination file provided): {report_param_destination}</td> </tr> <tr> <td>Origin: {param_origin}</td> </tr> <tr> <td>Order of internal component of sherical expansion: {param_int_order}</td> </tr> <tr> <td>Order of external component of sherical expansion: {param_ext_order}</td> </tr> <tr> <td>Buffer duration: {param_st_duration}(in seconds)</td> </tr> <tr> <td>Correlation limit between inner and outer subspaces: {param_st_correlation}</td> </tr> <tr> <td>Coordinate frame: {param_coord_frame}</td> </tr> <tr> <td>Regularize: {param_regularize}</td> </tr> <tr> <td>Ignore reference channel: {param_ignore_ref}</td> </tr> <tr> <td>Bad condition: {param_bad_condition}</td> </tr> <tr> <td>Apply tSSS using the median head position: {param_st_fixed}</td> </tr> <tr> <td>Only tSSS projection of MEG data: {param_st_only}</td> </tr> <tr> <td>Magnetomer scale-factor: {param_mag_scale}</td> </tr> <tr> <td>Skip by annotation: {param_skip_by_annotation}</td> </tr> <tr> <td>Empty-room projection vectors: {param_extended_proj}</td> </tr> <tr> <td>MNE version used: {mne_version}</td> </tr> </table> </body> </html>""" # Add html to reports report.add_htmls_to_section(html_text_parameters, captions='Values of the parameters of the App', section='Parameters of the App', replace=False) # Save report report.save('out_dir_report/report_maxwell_filter.html', overwrite=True)
def _generate_report(data_file_before, raw_before_preprocessing, raw_after_preprocessing, bad_channels, snr_before, snr_after): # Generate a report # Instance of mne.Report report = mne.Report(title='Results Maxfilter', verbose=True) # Plot MEG signals in temporal domain fig_raw = raw_before_preprocessing.pick(['meg'], exclude='bads').plot( duration=10, scalings='auto', butterfly=False, show_scrollbars=False, proj=False) fig_raw_maxfilter = raw_after_preprocessing.pick( ['meg'], exclude='bads').plot(duration=10, scalings='auto', butterfly=False, show_scrollbars=False, proj=False) # Plot power spectral density fig_raw_psd = raw_before_preprocessing.plot_psd() fig_raw_maxfilter_psd = raw_after_preprocessing.plot_psd() # Add figures to report # Add figures to report report.add_figs_to_section(fig_raw, captions='MEG signals before MaxFilter', section='Temporal domain') report.add_figs_to_section(fig_raw_maxfilter, captions='MEG signals after MaxFilter', section='Temporal domain') report.add_figs_to_section( fig_raw_psd, captions='Power spectral density before MaxFilter', section='Frequency domain') report.add_figs_to_section( fig_raw_maxfilter_psd, captions='Power spectral density after MaxFilter', section='Frequency domain') # Put this info in html format # Give some info about the file before preprocessing sampling_frequency = raw_before_preprocessing.info['sfreq'] highpass = raw_before_preprocessing.info['highpass'] lowpass = raw_before_preprocessing.info['lowpass'] # Put this info in html format # Info on data html_text_info = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>Input file: {data_file_before}</td> </tr> <tr> <td>Bad channels: {bad_channels}</td> </tr> <tr> <td>Sampling frequency: {sampling_frequency}Hz</td> </tr> <tr> <td>Highpass: {highpass}Hz</td> </tr> <tr> <td>Lowpass: {lowpass}Hz</td> </tr> </table> </body> </html>""" # Info on SNR html_text_snr = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>SNR before MaxFilter: {snr_before}</td> </tr> <tr> <td>SNR after MaxFilter: {snr_after}</td> </tr> </table> </body> </html>""" # Add html to reports report.add_htmls_to_section(html_text_info, captions='MEG recording features', section='Info', replace=False) report.add_htmls_to_section(html_text_snr, captions='Signal to noise ratio', section='Signal to noise ratio', replace=False) # Save report report.save('out_dir_report/report_maxfilter.html', overwrite=True)
import mne import mne_bids import numpy as np from mayavi import mlab report = mne.Report() # Paths bids_root = mne.datasets.somato.data_path() raw_fname = bids_root + '/sub-01/meg/sub-01_task-somato_meg.fif' fwd_fname = bids_root + '/derivatives/sub-01/sub-01_task-somato-fwd.fif' subjects_dir = bids_root + '/derivatives/freesurfer/subjects' subject_id = '01' # Load raw data (tSSS already applied) raw = mne_bids.read_raw_bids(raw_fname, bids_root) raw.load_data() report.add_figs_to_section(raw.plot_psd(), 'PSD of unfiltered raw', 'Raw', replace=True) # Filter the data raw_filtered = raw.copy().filter(1, 40) report.add_figs_to_section(raw_filtered.plot_psd(), 'PSD of bandpass filtered raw', 'Bandpass filter', replace=True) # Fit ICA to the continuous data ica = mne.preprocessing.ICA(n_components=0.999).fit(raw_filtered)
def _generate_report(data_file_before, data_before_preprocessing, data_after_preprocessing, bad_channels, comments_about_filtering, param_epoched_data, param_l_freq, param_h_freq, param_picks_by_channel_types_or_names, param_filter_length, param_picks_by_channel_indices, param_l_trans_bandwidth, param_h_trans_bandwidth, param_n_jobs, param_method, param_iir_params, param_phase, param_fir_window, param_fir_design, param_skip_by_annotation, param_raw_pad, param_epoch_pad): # Generate a report # Instance of mne.Report report = mne.Report(title='Results of filtering ', verbose=True) # Put this info in html format # # Check if MaxFilter was already applied on the data # if data_before_preprocessing.info['proc_history']: sss_info = data_before_preprocessing.info['proc_history'][0][ 'max_info']['sss_info'] tsss_info = data_before_preprocessing.info['proc_history'][0][ 'max_info']['max_st'] if bool(sss_info) or bool(tsss_info) is True: message_channels = f'Bad channels have been interpolated during MaxFilter' else: message_channels = bad_channels else: message_channels = bad_channels # Give some info about the file before preprocessing sampling_frequency = data_before_preprocessing.info['sfreq'] highpass = data_before_preprocessing.info['highpass'] lowpass = data_before_preprocessing.info['lowpass'] # Info on data # html_text_info = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>Input file: {data_file_before}</td> </tr> <tr> <td>Epoched data: {param_epoched_data}</td> </tr> <tr> <td>Bad channels: {message_channels}</td> </tr> <tr> <td>Sampling frequency: {sampling_frequency}Hz</td> </tr> <tr> <td>Highpass before preprocessing: {highpass}Hz</td> </tr> <tr> <td>Lowpass before preprocessing: {lowpass}Hz</td> </tr> </table> </body> </html>""" # Add html to reports report.add_htmls_to_section(html_text_info, captions='Data recording features', section='Data info', replace=False) # Define param_picks if param_picks_by_channel_types_or_names is None and param_picks_by_channel_indices is not None: param_picks = param_picks_by_channel_indices elif param_picks_by_channel_types_or_names is not None and param_picks_by_channel_indices is None: param_picks = param_picks_by_channel_types_or_names else: param_picks = None ## Plot figures for raw data ## if param_epoched_data is False: # Plot MEG signals in temporal domain fig_raw = data_before_preprocessing.pick( param_picks, exclude='bads').plot(duration=10, scalings='auto', butterfly=False, show_scrollbars=False, proj=False, show=False) fig_raw_filtered = data_after_preprocessing.pick( param_picks, exclude='bads').plot(duration=10, scalings='auto', butterfly=False, show_scrollbars=False, proj=False, show=False) # Plot power spectral density fig_raw_psd = data_before_preprocessing.plot_psd(picks=param_picks, show=False) fig_raw_filtered_psd = data_after_preprocessing.plot_psd( picks=param_picks, show=False) # Add figures to report report.add_figs_to_section(fig_raw, captions='Signals before filtering', section='Temporal domain') report.add_figs_to_section(fig_raw_filtered, captions='Signals after filtering', comments=comments_about_filtering, section='Temporal domain') report.add_figs_to_section( fig_raw_psd, captions='Power spectral density before filtering', section='Frequency domain') report.add_figs_to_section( fig_raw_filtered_psd, captions='Power spectral density after filtering', comments=comments_about_filtering, section='Frequency domain') param_pad = param_raw_pad ## Plot figures for epoched data ## else: # Plot MEG signals in temporal domain fig_epoch = data_before_preprocessing.plot(picks=param_picks, scalings="auto", butterfly=False, show_scrollbars=False, show=False) fig_epoch_filtered = data_after_preprocessing.plot( picks=param_picks, scalings="auto", butterfly=False, show_scrollbars=False, show=False) # Plot power spectral density fig_epoch_psd = data_before_preprocessing.plot_psd(picks=param_picks, show=False) fig_epoch_filtered_psd = data_after_preprocessing.plot_psd( picks=param_picks, show=False) # Add figures to report report.add_figs_to_section(fig_epoch, captions='Signals before filtering', section='Temporal domain') report.add_figs_to_section(fig_epoch_filtered, captions='Signals after filtering', comments=comments_about_filtering, section='Temporal domain') report.add_figs_to_section( fig_epoch_psd, captions='Power spectral density before filtering', section='Frequency domain') report.add_figs_to_section( fig_epoch_filtered_psd, captions='Power spectral density after filtering', comments=comments_about_filtering, section='Frequency domain') param_pad = param_epoch_pad # # Info on SNR # html_text_snr = f"""<html> # <head> # <style type="text/css"> # table {{ border-collapse: collapse;}} # td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} # </style> # </head> # <body> # <table width="50%" height="80%" border="2px"> # <tr> # <td>SNR before filtering: {snr_before}</td> # </tr> # <tr> # <td>SNR after filtering: {snr_after}</td> # </tr> # </table> # </body> # </html>""" ## Values of the parameters of the App ## mne_version = mne.__version__ # Put this info in html format # html_text_parameters = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>Temporal filtering: {comments_about_filtering}</td> </tr> <tr> <td>Types or names of channels to include: {param_picks_by_channel_types_or_names}</td> </tr> <tr> <td>Indices of channels to include: {param_picks_by_channel_indices}</td> </tr> <tr> <td>Filter length: {param_filter_length}</td> </tr> <tr> <td>Width of the transition band at the low cut-off frequency: {param_l_trans_bandwidth}</td> </tr> <tr> <td>Width of the transition band at the high cut-off frequency: {param_l_trans_bandwidth}</td> </tr> <tr> <td>Number of jobs to run in parallel: {param_n_jobs}</td> </tr> <tr> <td>Method: {param_method}</td> </tr> <tr> <td>IIR parameters: {param_iir_params}</td> </tr> <tr> <td>Phase: {param_phase}</td> </tr> <tr> <td>FIR window: {param_fir_window}</td> </tr> <tr> <td>FIR design: {param_fir_design}</td> </tr> <tr> <td>Skip by annotation: {param_skip_by_annotation}</td> </tr> <tr> <td>Type of padding: {param_pad}</td> </tr> <tr> <td>MNE version used: {mne_version}</td> </tr> </table> </body> </html>""" # Add html to report report.add_htmls_to_section(html_text_parameters, captions='Summary filtering applied', section='Filtering info', replace=False) # report.add_htmls_to_section(html_text_snr, captions='Signal to noise ratio', section='Signal to noise ratio', # replace=False) # Save report report.save('out_dir_report/report_filtering.html', overwrite=True)
def run_report_average(*, cfg, subject: str, session: str) -> None: # Group report import matplotlib.pyplot as plt # nested import to help joblib evoked_fname = BIDSPath(subject=subject, session=session, task=cfg.task, acquisition=cfg.acq, run=None, recording=cfg.rec, space=cfg.space, suffix='ave', extension='.fif', datatype=cfg.datatype, root=cfg.deriv_root, check=False) title = f'sub-{subject}' if session is not None: title += f', ses-{session}' if cfg.task is not None: title += f', task-{cfg.task}' rep = mne.Report(info_fname=evoked_fname, subject='fsaverage', subjects_dir=cfg.fs_subjects_dir, title=title) evokeds = mne.read_evokeds(evoked_fname) if cfg.analyze_channels: for evoked in evokeds: evoked.pick(cfg.analyze_channels) method = cfg.inverse_method inverse_str = method hemi_str = 'hemi' # MNE will auto-append '-lh' and '-rh'. morph_str = 'morph2fsaverage' if isinstance(cfg.conditions, dict): conditions = list(cfg.conditions.keys()) else: conditions = cfg.conditions.copy() conditions.extend(cfg.contrasts) ####################################################################### # # Add events end epochs drop log stats. # add_event_counts(cfg=cfg, report=rep, session=session) ####################################################################### # # Visualize evoked responses. # for condition, evoked in zip(conditions, evokeds): if condition in cfg.conditions: caption = f'Average: {condition}' section = 'Evoked' else: # It's a contrast of two conditions. caption = f'Average Contrast: {condition[0]} – {condition[1]}' section = 'Contrast' fig = evoked.plot(spatial_colors=True, gfp=True, show=False) rep.add_figs_to_section(figs=fig, captions=caption, comments=evoked.comment, section=section) ####################################################################### # # Visualize decoding results. # if cfg.decode: for contrast in cfg.contrasts: cond_1, cond_2 = contrast a_vs_b = f'{cond_1}+{cond_2}'.replace(op.sep, '') processing = f'{a_vs_b}+{cfg.decoding_metric}' processing = processing.replace('_', '-').replace('-', '') fname_decoding_ = evoked_fname.copy().update(processing=processing, suffix='decoding', extension='.mat') decoding_data = loadmat(fname_decoding_) del fname_decoding_, processing, a_vs_b fig = plot_decoding_scores_gavg(cfg=cfg, decoding_data=decoding_data) caption = f'Time-by-time Decoding: {cond_1} ./. {cond_2}' comment = (f'Based on N={decoding_data["N"].squeeze()} ' f'subjects. Standard error and confidence interval ' f'of the mean were bootstrapped with {cfg.n_boot} ' f'resamples.') rep.add_figs_to_section(figs=fig, captions=caption, comments=comment, section='Decoding') del decoding_data, cond_1, cond_2, caption, comment ####################################################################### # # Visualize inverse solutions. # for condition, evoked in zip(conditions, evokeds): if condition in cfg.conditions: caption = f'Average: {condition}' cond_str = config.sanitize_cond_name(condition) else: # It's a contrast of two conditions. # XXX Will change once we process contrasts here too continue section = 'Source' fname_stc_avg = evoked_fname.copy().update( suffix=f'{cond_str}+{inverse_str}+{morph_str}+{hemi_str}', extension=None) if op.exists(str(fname_stc_avg) + "-lh.stc"): stc = mne.read_source_estimate(fname_stc_avg, subject='fsaverage') _, peak_time = stc.get_peak() # Plot using 3d backend if available, and use Matplotlib # otherwise. if mne.viz.get_3d_backend() is not None: brain = stc.plot(views=['lat'], hemi='both', initial_time=peak_time, backend='pyvistaqt', time_viewer=True, show_traces=True, subjects_dir=cfg.fs_subjects_dir) brain.toggle_interface() figs = brain._renderer.figure captions = caption else: fig_lh = plt.figure() fig_rh = plt.figure() brain_lh = stc.plot(views='lat', hemi='lh', initial_time=peak_time, backend='matplotlib', figure=fig_lh, subjects_dir=cfg.fs_subjects_dir) brain_rh = stc.plot(views='lat', hemi='rh', initial_time=peak_time, backend='matplotlib', figure=fig_rh, subjects_dir=cfg.fs_subjects_dir) figs = [brain_lh, brain_rh] captions = [f'{caption} - left', f'{caption} - right'] rep.add_figs_to_section(figs=figs, captions=captions, section='Sources') del peak_time fname_report = evoked_fname.copy().update(task=cfg.task, suffix='report', extension='.html') rep.save(fname=fname_report, open_browser=False, overwrite=True) plt.close('all') # close all figures to save memory
def _generate_report(data_file_before, raw_before_preprocessing, raw_after_preprocessing, bad_channels, comments_notch, param_freqs_specific_or_start, param_freqs_end, param_freqs_step, param_picks_by_channel_types_or_names, param_picks_by_channel_indices, param_filter_length, param_notch_widths, param_trans_bandwidth, param_n_jobs, param_method, param_iir_parameters, param_mt_bandwidth, param_p_value, param_phase, param_fir_window, param_fir_design, param_pad): # Generate a report # Instance of mne.Report report = mne.Report(title='Results of filtering ', verbose=True) ## Give some info about the file before preprocessing ## # Check if MaxFilter was already applied on the data # if raw_before_preprocessing.info['proc_history']: sss_info = raw_before_preprocessing.info['proc_history'][0][ 'max_info']['sss_info'] tsss_info = raw_before_preprocessing.info['proc_history'][0][ 'max_info']['max_st'] if bool(sss_info) or bool(tsss_info) is True: message_channels = f'Bad channels have been interpolated during MaxFilter' else: message_channels = bad_channels else: message_channels = bad_channels # Give some info about the file before preprocessing sampling_frequency = raw_before_preprocessing.info['sfreq'] highpass = raw_before_preprocessing.info['highpass'] lowpass = raw_before_preprocessing.info['lowpass'] # Put this info in html format # html_text_info = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>Input file: {data_file_before}</td> </tr> <tr> <td>Bad channels: {message_channels}</td> </tr> <tr> <td>Sampling frequency before preprocessing: {sampling_frequency}Hz</td> </tr> <tr> <td>Highpass before preprocessing: {highpass}Hz</td> </tr> <tr> <td>Lowpass before preprocessing: {lowpass}Hz</td> </tr> </table> </body> </html>""" # Add html to reports report.add_htmls_to_section(html_text_info, captions='Data recording features', section='Data info', replace=False) # Define param_picks if param_picks_by_channel_types_or_names is None and param_picks_by_channel_indices is not None: param_picks = param_picks_by_channel_indices elif param_picks_by_channel_types_or_names is not None and param_picks_by_channel_indices is None: param_picks = param_picks_by_channel_types_or_names else: param_picks = None fig_raw = raw_before_preprocessing.pick(param_picks, exclude='bads').plot( duration=10, scalings='auto', butterfly=False, show_scrollbars=False, proj=False, show=False) fig_raw_maxfilter = raw_after_preprocessing.pick(param_picks, exclude='bads').plot( duration=10, scalings='auto', butterfly=False, show_scrollbars=False, proj=False, show=False) # Plot power spectral density fig_raw_psd = raw_before_preprocessing.plot_psd(picks=param_picks, show=False) fig_raw_maxfilter_psd = raw_after_preprocessing.plot_psd(picks=param_picks, show=False) # Add figures to report report.add_figs_to_section(fig_raw, captions='Signals before notch filtering', section='Temporal domain') report.add_figs_to_section(fig_raw_maxfilter, captions='Signals after notch filtering', comments=f'Notch Filter: {comments_notch}', section='Temporal domain') report.add_figs_to_section( fig_raw_psd, captions='Power spectral density before notch filtering', section='Frequency domain') report.add_figs_to_section( fig_raw_maxfilter_psd, captions='Power spectral density after notch filtering', comments=f'Notch Filter: {comments_notch}', section='Frequency domain') # Info on SNR # html_text_snr = f"""<html> # <head> # <style type="text/css"> # table {{ border-collapse: collapse;}} # td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} # </style> # </head> # <body> # <table width="50%" height="80%" border="2px"> # <tr> # <td>SNR before filtering: {snr_before}</td> # </tr> # <tr> # <td>SNR after filtering: {snr_after}</td> # </tr> # </table> # </body> # </html>""" # report.add_htmls_to_section(html_text_snr, captions='Signal to noise ratio', section='Signal to noise ratio', # replace=False) ## Values of the parameters of the App ## mne_version = mne.__version__ # Put this info in html format # html_text_parameters = f"""<html> <head> <style type="text/css"> table {{ border-collapse: collapse;}} td {{ text-align: center; border: 1px solid #000000; border-style: dashed; font-size: 15px; }} </style> </head> <body> <table width="50%" height="80%" border="2px"> <tr> <td>Frequency to notch filter: {param_freqs_specific_or_start} Hz</td> </tr> <tr> <td>End of the interval of frequencies to filter out: {param_freqs_end} Hz</td> </tr> <tr> <td>Step to filter out specific frequency between the two previous frequencies: {param_freqs_step} Hz</td> </tr> <tr> <td>Types or names of channels to include: {param_picks_by_channel_types_or_names}</td> </tr> <tr> <td>Indices of channels to include: {param_picks_by_channel_indices}</td> </tr> <tr> <td>Length of the FIR filter {param_filter_length}</td> </tr> <tr> <td>Widths of the stop band: {param_notch_widths} Hz</td> </tr> <tr> <td>Width of the transition band: {param_trans_bandwidth}</td> </tr> <tr> <td>Number of jobs to run in parallel: {param_n_jobs}</td> </tr> <tr> <td>Filtering method: {param_method}</td> </tr> <tr> <td>IIR parameters: {param_iir_parameters}</td> </tr> <tr> <td>Bandwidth of the multitaper windowing: {param_mt_bandwidth} Hz</td> </tr> <tr> <td>p-value: {param_p_value}</td> </tr> <tr> <td>Phase of the filter: {param_phase}</td> </tr> <tr> <td>FIR window: {param_fir_window}</td> </tr> <tr> <td>FIR design: {param_fir_design}</td> </tr> <tr> <td>Type of padding: {param_pad}</td> </tr> <tr> <td>MNE version used: {mne_version}</td> </tr> </table> </body> </html>""" # Add html to reports report.add_htmls_to_section(html_text_parameters, captions='Values of the parameters of the App', section='Parameters of the App', replace=False) # Save report report.save('out_dir_report/report_filtering.html', overwrite=True)
# # On successful creation of the report, the :meth:`~mne.Report.save` method # will open the HTML in a new tab in the browser. To disable this, use the # ``open_browser=False`` parameter of :meth:`~mne.Report.save`. # # For our first example, we'll generate a barebones report for all the # :file:`.fif` files containing raw data in the sample dataset, by passing the # pattern ``*raw.fif`` to :meth:`~mne.Report.parse_folder`. We'll omit the # ``subject`` and ``subjects_dir`` parameters from the :class:`~mne.Report` # constructor, but we'll also pass ``render_bem=False`` to the # :meth:`~mne.Report.parse_folder` method — otherwise we would get a warning # about not being able to render MRI and ``trans`` files without knowing the # subject. path = mne.datasets.sample.data_path(verbose=False) report = mne.Report(verbose=True) report.parse_folder(path, pattern='*raw.fif', render_bem=False) report.save('report_basic.html', overwrite=True) ############################################################################### # This report yields a textual summary of the :class:`~mne.io.Raw` files # selected by the pattern. For a slightly more useful report, we'll ask for the # power spectral density of the :class:`~mne.io.Raw` files, by passing # ``raw_psd=True`` to the :class:`~mne.Report` constructor. We'll also # visualize the SSP projectors stored in the raw data's `~mne.Info` dictionary # by setting ``projs=True``. Lastly, let's also refine our pattern to select # only the filtered raw recording (omitting the unfiltered data and the # empty-room noise recordings): pattern = 'sample_audvis_filt-0-40_raw.fif' report = mne.Report(raw_psd=True, projs=True, verbose=True)
def saflow_preproc(filepath, savepath, reportpath): report = mne.Report(verbose=True) raw = read_raw_ctf(filepath, preload=True) raw_data = raw.copy().apply_gradient_compensation( grade=3) #required for source reconstruction picks = mne.pick_types(raw_data.info, meg=True, eog=True, exclude='bads') fig = raw_data.plot(show=False) report.add_figs_to_section(fig, captions='Time series', section='Raw data') close(fig) fig = raw_data.plot_psd(average=False, picks=picks, show=False) report.add_figs_to_section(fig, captions='PSD', section='Raw data') close(fig) ## Filtering high_cutoff = 200 low_cutoff = 0.5 raw_data.filter(low_cutoff, high_cutoff, fir_design="firwin") raw_data.notch_filter(np.arange(60, high_cutoff + 1, 60), picks=picks, filter_length='auto', phase='zero', fir_design="firwin") fig = raw_data.plot_psd(average=False, picks=picks, fmax=120, show=False) report.add_figs_to_section(fig, captions='PSD', section='Filtered data') close(fig) ## ICA ica = ICA(n_components=20, random_state=0).fit(raw_data, decim=3) fig = ica.plot_sources(raw_data, show=False) report.add_figs_to_section(fig, captions='Independent Components', section='ICA') close(fig) ## FIND ECG COMPONENTS ecg_threshold = 0.50 ecg_epochs = create_ecg_epochs(raw_data, ch_name='EEG059') ecg_inds, ecg_scores = ica.find_bads_ecg(ecg_epochs, ch_name='EEG059', method='ctps', threshold=ecg_threshold) fig = ica.plot_scores(ecg_scores, ecg_inds, show=False) report.add_figs_to_section(fig, captions='Correlation with ECG (EEG059)', section='ICA - ECG') close(fig) fig = list() try: fig = ica.plot_properties(ecg_epochs, picks=ecg_inds, image_args={'sigma': 1.}, show=False) for i, figure in enumerate(fig): report.add_figs_to_section(figure, captions='Detected component ' + str(i), section='ICA - ECG') close(figure) except: print('No component to remove') ## FIND EOG COMPONENTS eog_threshold = 4 eog_epochs = create_eog_epochs(raw_data, ch_name='EEG057') eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name='EEG057', threshold=eog_threshold) fig = ica.plot_scores(eog_scores, eog_inds, show=False) report.add_figs_to_section(fig, captions='Correlation with EOG (EEG057)', section='ICA - EOG') close(fig) fig = list() try: fig = ica.plot_properties(eog_epochs, picks=eog_inds, image_args={'sigma': 1.}, show=False) for i, figure in enumerate(fig): report.add_figs_to_section(figure, captions='Detected component ' + str(i), section='ICA - EOG') close(figure) except: print('No component to remove') ## EXCLUDE COMPONENTS ica.exclude = ecg_inds ica.apply(raw_data) ica.exclude = eog_inds ica.apply(raw_data) fig = raw_data.plot(show=False) # Plot the clean signal. report.add_figs_to_section(fig, captions='After filtering + ICA', section='Raw data') close(fig) ## SAVE PREPROCESSED FILE report.save(reportpath, open_browser=False, overwrite=True) raw_data.save(savepath, overwrite=False) return raw_data