def test_render_non_fiff(tmp_path): """Test rendering non-FIFF files for mne report.""" tempdir = str(tmp_path) fnames_in = [bdf_fname, edf_fname] fnames_out = [] for fname in fnames_in: basename = op.basename(fname) basename, ext = op.splitext(basename) fname_out = f'{basename}_raw{ext}' outpath = op.join(tempdir, fname_out) shutil.copyfile(fname, outpath) fnames_out.append(fname_out) report = Report() report.parse_folder(data_path=tempdir, render_bem=False, on_error='raise', raw_butterfly=False) # Check correct paths and filenames content_names = [element.name for element in report._content] for fname in fnames_out: assert (op.basename(fname) in [op.basename(x) for x in content_names]) assert len(report._content) == len(fnames_out) report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) html = Path(fname).read_text(encoding='utf-8') assert 'test_raw.bdf' in html assert 'test_raw.edf' in html
def test_render_report(): """Test rendering -*.fif files for mne report. """ report = Report(info_fname=raw_fname) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=base_dir) assert_true(len(w) == 1) # Check correct paths and filenames assert_true(raw_fname in report.fnames) assert_true(event_name in report.fnames) assert_true(report.data_path == base_dir) # Check if all files were rendered in the report fnames = glob.glob(op.join(base_dir, '*.fif')) bad_name = 'test_ctf_comp_raw-eve.fif' decrement = any(fname.endswith(bad_name) for fname in fnames) fnames = [fname for fname in fnames if fname.endswith(('-eve.fif', '-ave.fif', '-cov.fif', '-sol.fif', '-fwd.fif', '-inv.fif', '-src.fif', '-trans.fif', 'raw.fif', 'sss.fif', '-epo.fif')) and not fname.endswith(bad_name)] # last file above gets created by another test, and it shouldn't be there for fname in fnames: assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) evoked1 = read_evokeds(evoked1_fname) evoked2 = read_evokeds(evoked2_fname) assert_equal(len(report.fnames) + len(evoked1) + len(evoked2) - 2, report.initial_id - decrement) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) # Check add_section functionality fig = evoked1[0].plot(show=False) report.add_section(figs=fig, # test non-list input captions=['evoked response']) assert_equal(len(report.html), len(fnames) + 1) assert_equal(len(report.html), len(report.fnames)) assert_raises(ValueError, report.add_section, figs=[fig, fig], captions='H') # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html')))
def test_render_non_fiff(tmpdir): """Test rendering non-FIFF files for mne report.""" tempdir = str(tmpdir) fnames_in = [bdf_fname, edf_fname] fnames_out = [] for fname in fnames_in: basename = op.basename(fname) basename, ext = op.splitext(basename) fname_out = f'{basename}_raw{ext}' outpath = op.join(tempdir, fname_out) shutil.copyfile(fname, outpath) fnames_out.append(fname_out) report = Report() report.parse_folder(data_path=tempdir, render_bem=False, on_error='raise') # Check correct paths and filenames for fname in fnames_out: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert len(report.fnames) == len(fnames_out) assert len(report.html) == len(report.fnames) assert len(report.fnames) == len(report) report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) with open(fname, 'rb') as fid: html = fid.read().decode('utf-8') assert '<h4>Raw: test_raw.bdf</h4>' in html assert '<h4>Raw: test_raw.edf</h4>' in html
def test_render_report_extra(renderer_pyvistaqt, tmp_path, invisible_fig): """Test SVG and projector rendering separately.""" # ... otherwise things are very slow tempdir = str(tmp_path) raw_fname_new = op.join(tempdir, 'temp_raw.fif') shutil.copyfile(raw_fname, raw_fname_new) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, projs=True, image_format='svg') with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise', n_time_points_evokeds=2, raw_butterfly=False, stc_plot_kwargs=stc_plot_kwargs, topomap_kwargs=topomap_kwargs) assert repr(report) report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert op.isfile(fname) html = Path(fname).read_text(encoding='utf-8') # Projectors in Raw.info assert 'SSP Projectors' in html
def test_render_report(): """Test rendering -*.fif files for mne report. """ tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') raw = Raw(raw_fname_new) picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks) epochs.save(epochs_fname) epochs.average().save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir) assert_true(len(w) >= 1) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html')))
def test_render_report(): """Test rendering -*.fif files for mne report. """ tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') raw = Raw(raw_fname_new) picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks) epochs.save(epochs_fname) epochs.average().save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir) assert_true(len(w) >= 1) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true( op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html')))
def setup_provenance(script, results_dir, config=None, use_agg=True, run_id=None): """Setup provenance tracking Parameters ---------- script : str The script that was executed. results_dir : str The results directory. config : None | str The name of the config file. By default, the function expects the config to be under `__script__/' named `config.py`. It can also be another kind of textfile, e.g. .json. use_agg : bool Whether to use the 'Agg' backend for matplotlib or not. Returns ------- report : mne.report.Report The mne report. Side-effects ------------ - make results dir if it does not exists - sets log file for sterr output - writes log file with runtime information """ if use_agg is True: import matplotlib matplotlib.use('Agg') if not op.isfile(script): raise ValueError('sorry, this is not a script!') if not op.isdir(results_dir): results_dir = op.join(op.dirname(op.dirname(script)), results_dir) step = op.splitext(op.split(script)[1])[0] if not op.isabs(results_dir): results_dir = op.abspath(results_dir) start_path = op.dirname(results_dir) results_dir = op.join(results_dir, step) if not op.exists(results_dir): logger.info('generating results dir') _forec_create_dir(results_dir, start=start_path) if run_id is None: run_id = create_run_id() logger.info('generated run id: %s' % run_id) else: logger.info('using existing run id: %s' % run_id) logger.info('preparing logging:') logging_dir = op.join(results_dir, run_id) if not op.exists(logging_dir): logger.info('... making logging directory: %s' % logging_dir) os.mkdir(logging_dir) else: logger.info('... using logging directory: %s' % logging_dir) modules = get_versions(sys) runtime_log = op.join(logging_dir, 'run_time.json') with open(runtime_log, 'w') as fid: json.dump(modules, fid) logger.info('... writing runtime info to: %s' % runtime_log) script_code = op.join(logging_dir, 'script.py') if not op.isfile(script_code): with open(script_code, 'w') as fid: with open(script) as script_fid: source_code = script_fid.read() fid.write(source_code) logger.info('... logging source code of calling script') if config is None: config = 'config.py' if op.isabs(config): config_fname = config else: config_fname = op.join(op.dirname(script), config) config_code = op.join( # weird behavior of join if last arg is path results_dir, run_id, op.split(config_fname)[-1]) if not op.isfile(config_fname): logger.info('... No config found. Logging nothing.') elif op.isfile(config_code): logger.info('... Config already written. I assume that you are using' ' the same run_id for different runs of your script.') else: with open(config_code, 'w') as fid: with open(config_fname) as config_fid: source_code = config_fid.read() fid.write(source_code) logger.info('... logging source code of "%s".' % config_fname) logger.info('... preparing Report') report = Report(title=step) report.data_path = logging_dir std_logfile = op.join(logging_dir, 'run_output.log') logger.info('... setting logfile: %s' % std_logfile) set_log_file(std_logfile) return report, run_id, results_dir, logger
def setup_provenance(script, results_dir, config=None, use_agg=True, run_id=None): """Setup provenance tracking Parameters ---------- script : str The script that was executed. results_dir : str The results directory. config : None | str The name of the config file. By default, the function expects the config to be under `__script__/' named `config.py`. It can also be another kind of textfile, e.g. .json. use_agg : bool Whether to use the 'Agg' backend for matplotlib or not. Returns ------- report : mne.report.Report The mne report. Side-effects ------------ - make results dir if it does not exists - sets log file for sterr output - writes log file with runtime information """ if use_agg is True: import matplotlib matplotlib.use('Agg') if not callable(script): if not op.isfile(script): raise ValueError('sorry, this is not a script!') if not op.isdir(results_dir) and not callable(script): results_dir = op.join(op.dirname(op.dirname(script)), results_dir) else: results_dir = op.join(op.curdir, results_dir) if not callable(script): step = op.splitext(op.split(script)[1])[0] else: step = script.__name__ if not op.isabs(results_dir): results_dir = op.abspath(results_dir) start_path = op.dirname(results_dir) results_dir = op.join(results_dir, step) if not op.exists(results_dir): logger.info('generating results dir') _forec_create_dir(results_dir, start=start_path) if run_id is None: run_id = create_run_id() logger.info('generated run id: %s' % run_id) else: logger.info('using existing run id: %s' % run_id) logger.info('preparing logging:') logging_dir = op.join(results_dir, run_id) if not op.exists(logging_dir): logger.info('... making logging directory: %s' % logging_dir) os.mkdir(logging_dir) else: logger.info('... using logging directory: %s' % logging_dir) modules = get_versions(sys) runtime_log = op.join(logging_dir, 'run_time.json') with open(runtime_log, 'w') as fid: json.dump(modules, fid) logger.info('... writing runtime info to: %s' % runtime_log) script_code_out = op.join(logging_dir, 'script.py') if callable(script): script_code_in = inspect.getsourcefile(script) else: script_code_in = script if not op.isfile(script_code_out): with open(script_code_out, 'w') as fid: with open(script_code_in) as script_fid: source_code = script_fid.read() fid.write(source_code) logger.info('... logging source code of calling script') if config is None: config = 'config.py' if op.isabs(config): config_fname = config else: config_fname = '' config_code = op.join( # weird behavior of join if last arg is path results_dir, run_id, op.split(config_fname)[-1]) if not op.isfile(config_fname): logger.info('... No config found. Logging nothing.') elif op.isfile(config_code): logger.info('... Config already written. I assume that you are using' ' the same run_id for different runs of your script.') else: with open(config_code, 'w') as fid: with open(config_fname) as config_fid: source_code = config_fid.read() fid.write(source_code) logger.info('... logging source code of "%s".' % config_fname) logger.info('... preparing Report') report = Report(title=step) report.data_path = logging_dir std_logfile = op.join(logging_dir, 'run_output.log') logger.info('... setting logfile: %s' % std_logfile) set_log_file(std_logfile) return report, run_id, results_dir, logger
def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121']) epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname) # This can take forever (stall Travis), so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug epochs.average().crop(0.1, 0.2).save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise') assert_true(len(w) >= 1) assert_true(repr(report)) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, pattern=pattern) assert_true(len(w) >= 1) assert_true(repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_raises(ValueError, Report, image_format='foo') assert_raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise')
def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [ms_fname, ms_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121']) raw.del_proj() epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname, overwrite=True) # This can take forever (stall Travis), so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug epochs.average().crop(0.1, 0.2).save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') assert repr(report) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert (op.isfile(fname)) with open(fname, 'rb') as fid: html = fid.read().decode('utf-8') assert '(MaxShield on)' in html assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert (op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert (op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, pattern=pattern) assert (repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) pytest.raises(ValueError, Report, image_format='foo') pytest.raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') # ndarray support smoke test report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section') with pytest.raises(TypeError, match='Each fig must be a'): report.add_figs_to_section('foo', 'caption', 'section') with pytest.raises(TypeError, match='Each fig must be a'): report.add_figs_to_section(['foo'], 'caption', 'section')
def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') raw = read_raw_fif(raw_fname_new, add_eeg_ref=False) picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks, add_eeg_ref=False) epochs.save(epochs_fname) epochs.average().save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) if sys.version.startswith('3.5'): # XXX Some strange MPL/3.5 error... raise SkipTest('Python 3.5 and mpl have unresolved issues') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise') assert_true(len(w) >= 1) assert_true(repr(report)) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true( op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, pattern=pattern) assert_true(len(w) >= 1) assert_true(repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert_true( op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1)
def test_render_report(renderer_pyvistaqt, tmp_path, invisible_fig): """Test rendering *.fif files for mne report.""" tempdir = str(tmp_path) raw_fname_new = op.join(tempdir, 'temp_raw.fif') raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') proj_fname_new = op.join(tempdir, 'temp_ecg-proj.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') nirs_fname_new = op.join(tempdir, 'temp_raw-nirs.snirf') for a, b in [[raw_fname, raw_fname_new], [raw_fname, raw_fname_new_bids], [ms_fname, ms_fname_new], [events_fname, event_fname_new], [cov_fname, cov_fname_new], [ecg_proj_fname, proj_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new], [nirs_fname, nirs_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new) raw.pick_channels(['MEG 0111', 'MEG 0121', 'EEG 001', 'EEG 002']) raw.del_proj() raw.set_eeg_reference(projection=True).load_data() epochs = Epochs(raw, read_events(events_fname), 1, -0.2, 0.2) epochs.save(epochs_fname, overwrite=True) # This can take forever, so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug evoked = epochs.average() with pytest.warns(RuntimeWarning, match='tmax is not in Evoked'): evoked.crop(0.1, 0.2) evoked.save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, projs=False, image_format='png') with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise', n_time_points_evokeds=2, raw_butterfly=False, stc_plot_kwargs=stc_plot_kwargs, topomap_kwargs=topomap_kwargs) assert repr(report) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) fnames.extend(glob.glob(op.join(tempdir, '*.snirf'))) titles = [op.basename(x) for x in fnames if not x.endswith('-ave.fif')] titles.append(f'{op.basename(evoked_fname)}: {evoked.comment}') content_names = [element.name for element in report._content] for title in titles: assert title in content_names assert (''.join(report.html).find(title) != -1) assert len(report._content) == len(fnames) # Check saving functionality report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert (op.isfile(fname)) html = Path(fname).read_text(encoding='utf-8') # Evoked in `evoked_fname` assert f'{op.basename(evoked_fname)}: {evoked.comment}' in html assert 'Topographies' in html assert 'Global field power' in html assert len(report._content) == len(fnames) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert (op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert (op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*proj.fif', '*eve.fif'] with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, pattern=pattern, raw_butterfly=False) assert (repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) content_names = [element.name for element in report._content] for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in content_names]) assert (''.join(report.html).find(op.basename(fname)) != -1) with pytest.raises(ValueError, match='Invalid value'): Report(image_format='foo') with pytest.raises(ValueError, match='Invalid value'): Report(image_format=None) # ndarray support smoke test report.add_figure(fig=np.zeros((2, 3, 3)), title='title') with pytest.raises(TypeError, match='It seems you passed a path'): report.add_figure(fig='foo', title='title')
def test_render_report(renderer, tmpdir): """Test rendering *.fif files for mne report.""" tempdir = str(tmpdir) raw_fname_new = op.join(tempdir, 'temp_raw.fif') raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') proj_fname_new = op.join(tempdir, 'temp_ecg-proj.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') nirs_fname_new = op.join(tempdir, 'temp_raw-nirs.snirf') for a, b in [[raw_fname, raw_fname_new], [raw_fname, raw_fname_new_bids], [ms_fname, ms_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [proj_fname, proj_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new], [nirs_fname, nirs_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121', 'EEG 001', 'EEG 002']) raw.del_proj() raw.set_eeg_reference(projection=True) epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname, overwrite=True) # This can take forever, so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug evoked = epochs.average() with pytest.warns(RuntimeWarning, match='tmax is not in Evoked'): evoked.crop(0.1, 0.2) evoked.save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, projs=True) with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') assert repr(report) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) fnames.extend(glob.glob(op.join(tempdir, '*.snirf'))) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) assert len(report.fnames) == len(fnames) assert len(report.html) == len(report.fnames) assert len(report.fnames) == len(report) # Check saving functionality report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert (op.isfile(fname)) with open(fname, 'rb') as fid: html = fid.read().decode('utf-8') assert '(MaxShield on)' in html # Projectors in Raw.info assert '<h4>SSP Projectors</h4>' in html # Projectors in `proj_fname_new` assert f'SSP Projectors: {op.basename(proj_fname_new)}' in html # Evoked in `evoked_fname` assert f'Evoked: {op.basename(evoked_fname)} ({evoked.comment})' in html assert 'Topomap (ch_type =' in html assert f'Evoked: {op.basename(evoked_fname)} (GFPs)' in html assert len(report.html) == len(fnames) assert len(report.html) == len(report.fnames) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert (op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert (op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, pattern=pattern) assert (repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) pytest.raises(ValueError, Report, image_format='foo') pytest.raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') tempdir = pathlib.Path(tempdir) # test using pathlib.Path with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') # ndarray support smoke test report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section') with pytest.raises(TypeError, match='figure must be a'): report.add_figs_to_section('foo', 'caption', 'section') with pytest.raises(TypeError, match='figure must be a'): report.add_figs_to_section(['foo'], 'caption', 'section')
def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') raw = read_raw_fif(raw_fname_new) picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks) epochs.save(epochs_fname) epochs.average().save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) if sys.version.startswith('3.5'): # XXX Some strange MPL/3.5 error... raise SkipTest('Python 3.5 and mpl have unresolved issues') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, on_error='raise') assert_true(len(w) >= 1) assert_true(repr(report)) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir report.save(fname=op.join(tempdir, 'report.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report.html'))) assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert_true(op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert_true(op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') report.parse_folder(data_path=tempdir, pattern=pattern) assert_true(len(w) >= 1) assert_true(repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert_true(op.basename(fname) in [op.basename(x) for x in report.fnames]) assert_true(''.join(report.html).find(op.basename(fname)) != -1)
def test_render_report(): """Test rendering -*.fif files for mne report.""" tempdir = _TempDir() raw_fname_new = op.join(tempdir, 'temp_raw.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') event_fname_new = op.join(tempdir, 'temp_raw-eve.fif') cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif') fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif') inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif') for a, b in [[raw_fname, raw_fname_new], [ms_fname, ms_fname_new], [event_fname, event_fname_new], [cov_fname, cov_fname_new], [fwd_fname, fwd_fname_new], [inv_fname, inv_fname_new]]: shutil.copyfile(a, b) # create and add -epo.fif and -ave.fif files epochs_fname = op.join(tempdir, 'temp-epo.fif') evoked_fname = op.join(tempdir, 'temp-ave.fif') # Speed it up by picking channels raw = read_raw_fif(raw_fname_new, preload=True) raw.pick_channels(['MEG 0111', 'MEG 0121']) raw.del_proj() epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname) # This can take forever (stall Travis), so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug epochs.average().crop(0.1, 0.2).save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir) with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') assert repr(report) # Check correct paths and filenames fnames = glob.glob(op.join(tempdir, '*.fif')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) assert_equal(len(report.fnames), len(fnames)) assert_equal(len(report.html), len(report.fnames)) assert_equal(len(report.fnames), len(report)) # Check saving functionality report.data_path = tempdir fname = op.join(tempdir, 'report.html') report.save(fname=fname, open_browser=False) assert (op.isfile(fname)) with open(fname, 'rb') as fid: html = fid.read().decode('utf-8') assert '(MaxShield on)' in html assert_equal(len(report.html), len(fnames)) assert_equal(len(report.html), len(report.fnames)) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) assert (op.isfile(op.join(tempdir, 'report2.html'))) # Check overwriting file report.save(fname=op.join(tempdir, 'report.html'), open_browser=False, overwrite=True) assert (op.isfile(op.join(tempdir, 'report.html'))) # Check pattern matching with multiple patterns pattern = ['*raw.fif', '*eve.fif'] with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, pattern=pattern) assert (repr(report)) fnames = glob.glob(op.join(tempdir, '*.raw')) + \ glob.glob(op.join(tempdir, '*.raw')) for fname in fnames: assert (op.basename(fname) in [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) pytest.raises(ValueError, Report, image_format='foo') pytest.raises(ValueError, Report, image_format=None) # SVG rendering report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, image_format='svg') with pytest.warns(RuntimeWarning, match='Cannot render MRI'): report.parse_folder(data_path=tempdir, on_error='raise') # ndarray support smoke test report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
def setup_provenance(script, results_dir, use_agg=True): """Setup provenance tracking Parameters ---------- script : str The script that was executed. results_dir : str The results directory. use_agg : bool Whether to use the 'Agg' backend for matplotlib or not. Returns ------- report : mne.report.Report The mne report. Side-effects ------------ - make results dir if it does not exists - sets log file for sterr output - writes log file with runtime information """ if use_agg is True: import matplotlib matplotlib.use('Agg') if not op.isfile(script): raise ValueError('sorry, this is not a script!') step = op.splitext(op.split(script)[1])[0] results_dir = op.join(results_dir, step) if not op.exists(results_dir): logger.info('generating results dir') os.mkdir(results_dir) run_id = create_run_id() logger.info('generated run id: %s' % run_id) logger.info('preparing logging:') logging_dir = op.join(results_dir, run_id) logger.info('... making logging directory: %s' % logging_dir) os.mkdir(logging_dir) modules = get_versions(sys) runtime_log = op.join(logging_dir, 'run_time.json') with open(runtime_log, 'w') as fid: json.dump(modules, fid) logger.info('... writing runtime info to: %s' % runtime_log) std_logfile = op.join(logging_dir, 'run_output.log') script_code = op.join(logging_dir, 'script.py') with open(script_code, 'w') as fid: with open(script) as script_fid: source_code = script_fid.read() fid.write(source_code) logger.info('... logging source code') logger.info('... preparing Report') report = Report(title=step) report.data_path = logging_dir logger.info('... setting logfile: %s' % std_logfile) set_log_file(std_logfile) return report, run_id, results_dir, logger