def plot_auto_scores(cfg, subject, session): """Plot automated bad channel detection scores. """ import json_tricks fname_scores = BIDSPath(subject=subject, session=session, task=cfg.task, acquisition=cfg.acq, run=None, processing=cfg.proc, recording=cfg.rec, space=cfg.space, suffix='scores', extension='.json', datatype=cfg.datatype, root=cfg.deriv_root, check=False) all_figs = [] all_captions = [] for run in cfg.runs: with open(fname_scores.update(run=run), 'r') as f: auto_scores = json_tricks.load(f) figs = config.plot_auto_scores(auto_scores) all_figs.extend(figs) # Could be more than 1 fig, e.g. "grad" and "mag" captions = [f'Run {run}'] * len(figs) all_captions.extend(captions) return all_figs, all_captions
def plot_auto_scores(subject, session): """Plot automated bad channel detection scores. """ import json_tricks deriv_path = config.get_subject_deriv_path(subject=subject, session=session, kind=config.get_kind()) fname_scores = BIDSPath(subject=subject, session=session, task=config.get_task(), acquisition=config.acq, run=None, processing=config.proc, recording=config.rec, space=config.space, kind='scores', extension='.json', prefix=deriv_path, check=False) all_figs = [] all_captions = [] for run in config.get_runs(): with open(fname_scores.update(run=run), 'r') as f: auto_scores = json_tricks.load(f) figs = config.plot_auto_scores(auto_scores) all_figs.extend(figs) # Could be more than 1 fig, e.g. "grad" and "mag" captions = [f'Run {run}'] * len(figs) all_captions.extend(captions) return all_figs, all_captions
def find_bad_channels(raw, subject, session, task, run): if (config.find_flat_channels_meg and not config.find_noisy_channels_meg): msg = 'Finding flat channels.' elif (config.find_noisy_channels_meg and not config.find_flat_channels_meg): msg = 'Finding noisy channels using Maxwell filtering.' else: msg = ('Finding flat channels, and noisy channels using ' 'Maxwell filtering.') logger.info( gen_log_message(message=msg, step=1, subject=subject, session=session)) bids_path = BIDSPath(subject=subject, session=session, task=task, run=run, acquisition=config.acq, processing=config.proc, recording=config.rec, space=config.space, suffix=config.get_datatype(), datatype=config.get_datatype(), root=config.deriv_root) auto_noisy_chs, auto_flat_chs, auto_scores = find_bad_channels_maxwell( raw=raw, calibration=get_mf_cal_fname(subject, session), cross_talk=get_mf_ctc_fname(subject, session), return_scores=True) preexisting_bads = raw.info['bads'].copy() bads = preexisting_bads.copy() if config.find_flat_channels_meg: msg = f'Found {len(auto_flat_chs)} flat channels.' logger.info( gen_log_message(message=msg, step=1, subject=subject, session=session)) bads.extend(auto_flat_chs) if config.find_noisy_channels_meg: msg = f'Found {len(auto_noisy_chs)} noisy channels.' logger.info( gen_log_message(message=msg, step=1, subject=subject, session=session)) bads.extend(auto_noisy_chs) bads = sorted(set(bads)) raw.info['bads'] = bads msg = f'Marked {len(raw.info["bads"])} channels as bad.' logger.info( gen_log_message(message=msg, step=1, subject=subject, session=session)) if config.find_noisy_channels_meg: auto_scores_fname = bids_path.copy().update(suffix='scores', extension='.json', check=False) with open(auto_scores_fname, 'w') as f: json_tricks.dump(auto_scores, fp=f, allow_nan=True, sort_keys=False) if config.interactive: import matplotlib.pyplot as plt config.plot_auto_scores(auto_scores) plt.show() # Write the bad channels to disk. bads_tsv_fname = bids_path.copy().update(suffix='bads', extension='.tsv', check=False) bads_for_tsv = [] reasons = [] if config.find_flat_channels_meg: bads_for_tsv.extend(auto_flat_chs) reasons.extend(['auto-flat'] * len(auto_flat_chs)) preexisting_bads = set(preexisting_bads) - set(auto_flat_chs) if config.find_noisy_channels_meg: bads_for_tsv.extend(auto_noisy_chs) reasons.extend(['auto-noisy'] * len(auto_noisy_chs)) preexisting_bads = set(preexisting_bads) - set(auto_noisy_chs) preexisting_bads = list(preexisting_bads) if preexisting_bads: bads_for_tsv.extend(preexisting_bads) reasons.extend(['pre-existing (before mne-study-template was run)'] * len(preexisting_bads)) tsv_data = pd.DataFrame(dict(name=bads_for_tsv, reason=reasons)) tsv_data = tsv_data.sort_values(by='name') tsv_data.to_csv(bads_tsv_fname, sep='\t', index=False)