def test_setup_provenance(): """Test provenance tracking""" tmp_dir = _TempDir() report, run_id, results_dir, logger = setup_provenance(script=__file__, results_dir=tmp_dir) logging_dir = op.join(results_dir, run_id) assert_true(op.isdir(logging_dir)) assert_true(op.isfile(op.join(logging_dir, 'run_time.json'))) assert_true(op.isfile(op.join(logging_dir, 'run_output.log'))) assert_true(op.isfile(op.join(logging_dir, 'script.py'))) with open(__file__) as fid: this_file_code = fid.read() with open(op.join(results_dir, run_id, 'script.py')) as fid: other_file_code = fid.read() assert_equals(this_file_code, other_file_code) with open(op.join(results_dir, run_id, 'run_time.json')) as fid: modules = json.load(fid) assert_true('meeg_preprocessing' in modules) assert_equals(report.title, op.splitext(op.split(__file__)[1])[0]) assert_equals(report.data_path, logging_dir)
def test_setup_provenance(): """Test provenance tracking""" for config_opt in ['abs_py', 'default', 'other']: tmp_dir = _TempDir() if config_opt == 'default': config_fname = op.join(op.dirname(__file__), 'config.py') config_content = 'import this' config_param = None elif config_opt == 'abs_py': config_fname = op.join(tmp_dir, 'config.py') config_content = 'import antigravity' config_param = config_fname elif config_opt == 'other': config_fname = op.join(tmp_dir, 'config.txt') config_content = 'my_config :: 42' config_param = config_fname with open(config_fname, 'w') as fid_config: fid_config.write(config_content) report, run_id, results_dir, logger = setup_provenance( script=__file__, results_dir=tmp_dir, config=config_param) logging_dir = op.join(results_dir, run_id) assert_true(op.isdir(logging_dir)) assert_true(op.isfile(op.join(logging_dir, 'run_time.json'))) assert_true(op.isfile(op.join(logging_dir, 'run_output.log'))) assert_true(op.isfile(op.join(logging_dir, 'script.py'))) config_basename = op.split(config_fname)[-1] with open(op.join(results_dir, run_id, config_basename)) as config_fid: config_code = config_fid.read() assert_equal(config_code, config_content) with open(__file__) as fid: this_file_code = fid.read() with open(op.join(results_dir, run_id, 'script.py')) as fid: other_file_code = fid.read() assert_equals(this_file_code, other_file_code) with open(op.join(results_dir, run_id, 'run_time.json')) as fid: modules = json.load(fid) assert_true('meeg_preprocessing' in modules) assert_equals(report.title, op.splitext(op.split(__file__)[1])[0]) assert_equals(report.data_path, logging_dir) if config_opt == 'default': os.remove(config_fname)
def _setup_provenance(self): import os from meeg_preprocessing.utils import setup_provenance if not os.path.isdir(self.results_dir): os.mkdir(self.results_dir) # Read script with open(self.script, 'rb') as f: self.pyscript = f.read() # Setup display environment if self.use_agg is None: self.use_agg = os.getenv('use_agg') if self.use_agg: self.use_agg = True # Create report self.report, self.run_id, self.results_dir, self.logger = \ setup_provenance(self.script, self.results_dir, use_agg=self.use_agg)
from toolbox.utils import find_in_df, build_contrast, save_to_dict from utils import get_data from config import ( data_path, pyoutput_path, subjects, paths('report'), contrasts, open_browser, chan_types, ) report, run_id, _, logger = setup_provenance(script=__file__, results_dir=paths('report')) mne.set_log_level('INFO') # force separation of magnetometers and gradiometers if 'meg' in [i['name'] for i in chan_types]: chan_types = [dict(name='mag'), dict(name='grad')] + \ [dict(name=i['name']) for i in chan_types if i['name'] != 'meg'] for subject in subjects: # Extract events from mat file meg_fname = op.join(data_path, subject, 'preprocessed', subject + '_preprocessed') bhv_fname = op.join(data_path, subject, 'behavior', subject + '_fixed.mat') epochs, events = get_data(meg_fname, bhv_fname)
# Author: Denis A. Engemann <*****@*****.**> # License: BSD (3-clause) import os.path as op from meeg_preprocessing import check_apply_filter from meeg_preprocessing.utils import setup_provenance from mne import io from config import ( subject, fname, filter_params, notch_filter_params, plot_fmin, plot_fmax, n_jobs, ) report, run_id, results_dir, logger = setup_provenance( script=__file__, results_dir='results') raw = io.Raw(fname, preload=True) fig, report = check_apply_filter( raw, subject=subject, filter_params=filter_params, notch_filter_params=notch_filter_params, plot_fmin=plot_fmin, plot_fmax=plot_fmax, n_jobs=n_jobs) report.save(op.join(results_dir, run_id, 'report.html'))