def test_line_freq_estimation(): """Test estimating line frequency.""" bids_root = _TempDir() # read in USA dataset, so it should find 50 Hz raw = mne.io.read_raw_fif(raw_fname) kind = "meg" # assert that we get the same line frequency set bids_fname = bids_basename.copy().update(suffix=f'{kind}.fif') # find sidecar JSON fname write_raw_bids(raw, bids_basename, bids_root, overwrite=True) sidecar_fname = _find_matching_sidecar(bids_fname, bids_root, '{}.json'.format(kind), allow_fail=True) # 1. when nothing is set, default to use PSD estimation -> should be 60 # for `sample` dataset raw.info['line_freq'] = None write_raw_bids(raw, bids_basename, bids_root, overwrite=True) _update_sidecar(sidecar_fname, "PowerLineFrequency", "n/a") with pytest.warns(RuntimeWarning, match="No line frequency found"): raw = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, kind=kind) assert raw.info['line_freq'] == 60 # test that `somato` dataset finds 50 Hz (EU dataset) somato_raw = mne.io.read_raw_fif(somato_raw_fname) somato_raw.info['line_freq'] = None write_raw_bids(somato_raw, bids_basename, bids_root, overwrite=True) sidecar_fname = _find_matching_sidecar(bids_fname, bids_root, '{}.json'.format(kind), allow_fail=True) _update_sidecar(sidecar_fname, "PowerLineFrequency", "n/a") with pytest.warns(RuntimeWarning, match="No line frequency found"): somato_raw = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, kind=kind) assert somato_raw.info['line_freq'] == 50 # assert that line_freq should be None when # all picks are not meg/eeg/ecog/seeg somato_raw.info['line_freq'] = None somato_raw.set_channel_types({ somato_raw.ch_names[i]: 'bio' for i in range(len(somato_raw.ch_names)) }) somato_raw = _handle_info_reading(sidecar_fname, somato_raw, verbose=True) assert somato_raw.info['line_freq'] is None
def test_bdf(_bids_validate): """Test write_raw_bids conversion for Biosemi data.""" output_path = _TempDir() data_path = op.join(base_path, 'edf', 'tests', 'data') raw_fname = op.join(data_path, 'test.bdf') raw = mne.io.read_raw_bdf(raw_fname) with pytest.warns(UserWarning, match='No line frequency found'): write_raw_bids(raw, bids_basename, output_path, overwrite=False) _bids_validate(output_path) # Test also the reading of channel types from channels.tsv # the first channel in the raw data is not MISC right now test_ch_idx = 0 assert coil_type(raw.info, test_ch_idx) != 'misc' # we will change the channel type to MISC and overwrite the channels file bids_fname = bids_basename + '_eeg.bdf' channels_fname = _find_matching_sidecar(bids_fname, output_path, 'channels.tsv') channels_dict = _from_tsv(channels_fname) channels_dict['type'][test_ch_idx] = 'MISC' _to_tsv(channels_dict, channels_fname) # Now read the raw data back from BIDS, with the tampered TSV, to show # that the channels.tsv truly influences how read_raw_bids sets ch_types # in the raw data object raw = read_raw_bids(bids_fname, output_path) assert coil_type(raw.info, test_ch_idx) == 'misc' # Test cropped assertion error raw = mne.io.read_raw_bdf(raw_fname) raw.crop(0, raw.times[-2]) with pytest.raises(AssertionError, match='cropped'): write_raw_bids(raw, bids_basename, output_path)
def test_handle_info_reading(): """Test reading information from a BIDS sidecar.json file.""" bids_root = _TempDir() # read in USA dataset, so it should find 50 Hz raw = mne.io.read_raw_fif(raw_fname) raw.info['line_freq'] = 60 # write copy of raw with line freq of 60 # bids basename and fname bids_basename = make_bids_basename(subject='01', session='01', task='audiovisual', run='01') kind = "meg" bids_fname = bids_basename + '_{}.fif'.format(kind) write_raw_bids(raw, bids_basename, bids_root, overwrite=True) # find sidecar JSON fname sidecar_fname = _find_matching_sidecar(bids_fname, bids_root, '{}.json'.format(kind), allow_fail=True) # assert that we get the same line frequency set raw = mne_bids.read_raw_bids(bids_fname, bids_root) assert raw.info['line_freq'] == 60 # 2. if line frequency is not set in raw file, then default to sidecar raw.info['line_freq'] = None write_raw_bids(raw, bids_basename, bids_root, overwrite=True) _update_sidecar(sidecar_fname, "PowerLineFrequency", 55) raw = mne_bids.read_raw_bids(bids_fname, bids_root) assert raw.info['line_freq'] == 55 # make a copy of the sidecar in "derivatives/" # to check that we make sure we always get the right sidecar # in addition, it should not break the sidecar reading # in `read_raw_bids` deriv_dir = op.join(bids_root, "derivatives") sidecar_copy = op.join(deriv_dir, op.basename(sidecar_fname)) os.mkdir(deriv_dir) with open(sidecar_fname, "r") as fin: sidecar_json = json.load(fin) sidecar_json["PowerLineFrequency"] = 45 _write_json(sidecar_copy, sidecar_json) raw = mne_bids.read_raw_bids(bids_fname, bids_root) assert raw.info['line_freq'] == 55 # 3. if line frequency is set in raw file, but not sidecar raw.info['line_freq'] = 60 write_raw_bids(raw, bids_basename, bids_root, overwrite=True) _update_sidecar(sidecar_fname, "PowerLineFrequency", "n/a") raw = mne_bids.read_raw_bids(bids_fname, bids_root) assert raw.info['line_freq'] == 60 # 4. assert that we get an error when sidecar json doesn't match _update_sidecar(sidecar_fname, "PowerLineFrequency", 55) with pytest.raises(ValueError, match="Line frequency in sidecar json"): raw = mne_bids.read_raw_bids(bids_fname, bids_root)
def test_find_matching_sidecar(return_bids_test_dir): """Test finding a sidecar file from a BIDS dir.""" bids_root = return_bids_test_dir # Now find a sidecar sidecar_fname = _find_matching_sidecar(bids_basename, bids_root, 'coordsystem.json') expected_file = op.join('sub-01', 'ses-01', 'meg', 'sub-01_ses-01_coordsystem.json') assert sidecar_fname.endswith(expected_file) # Find multiple sidecars, tied in score, triggering an error with pytest.raises(RuntimeError, match='Expected to find a single'): open(sidecar_fname.replace('coordsystem.json', '2coordsystem.json'), 'w').close() _find_matching_sidecar(bids_basename, bids_root, 'coordsystem.json') # Find nothing but receive None, because we set `allow_fail` to True with pytest.warns(UserWarning, match='Did not find any'): _find_matching_sidecar(bids_basename, bids_root, 'foo.bogus', True)
def _populate_summary_dict(summ_dict, filenames, bids_root): for j, filename in enumerate(filenames): bids_fname, scan_ext = os.path.splitext(filename) sidecar_fpath = _find_matching_sidecar(bids_fname=bids_fname, suffix="json", bids_root=bids_root) sidecar_dict = file_to_dict(sidecar_fpath) if j == 0: for key, value in sidecar_dict.items(): sidecar_dict[key] = [sidecar_dict[key]] summ_dict.update(sidecar_dict) else: for key, value in sidecar_dict.items(): try: summ_dict[key].append(value) except (KeyError): summ_dict.update({key: value})
def test_handle_coords_reading(): """Test reading coordinates from BIDS files.""" bids_root = _TempDir() data_path = op.join(testing.data_path(), 'EDF') raw_fname = op.join(data_path, 'test_reduced.edf') raw = mne.io.read_raw_edf(raw_fname) # ensure we are writing 'ecog'/'ieeg' data raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names}) # set a `random` montage ch_names = raw.ch_names elec_locs = np.random.random((len(ch_names), 3)).astype(float) ch_pos = dict(zip(ch_names, elec_locs)) montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="mri") raw.set_montage(montage) write_raw_bids(raw, bids_basename, bids_root, overwrite=True) # read in the data and assert montage is the same bids_fname = bids_basename + "_ieeg.edf" raw_test = read_raw_bids(bids_fname, bids_root) # obtain the sensor positions orig_locs = raw.info['dig'][1] test_locs = raw_test.info['dig'][1] assert orig_locs == test_locs assert not object_diff(raw.info['chs'], raw_test.info['chs']) # test error message if electrodes don't match electrodes_fname = _find_matching_sidecar(bids_fname, bids_root, "electrodes.tsv", allow_fail=True) electrodes_dict = _from_tsv(electrodes_fname) # pop off 5 channels for key in electrodes_dict.keys(): for i in range(5): electrodes_dict[key].pop() _to_tsv(electrodes_dict, electrodes_fname) with pytest.raises(RuntimeError, match='Channels do not correspond'): raw_test = read_raw_bids(bids_fname, bids_root)
def test_write_anat(_bids_validate): """Test writing anatomical data.""" # Get the MNE testing sample data import nibabel as nib output_path = _TempDir() data_path = testing.data_path() raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') event_id = { 'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, 'Visual/Right': 4, 'Smiley': 5, 'Button': 32 } events_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw-eve.fif') raw = mne.io.read_raw_fif(raw_fname) write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # Write some MRI data and supply a `trans` trans_fname = raw_fname.replace('_raw.fif', '-trans.fif') trans = mne.read_trans(trans_fname) # Get the T1 weighted MRI data file # Needs to be converted to Nifti because we only have mgh in our test base t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz') anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, raw=raw, trans=trans, deface=True, verbose=True, overwrite=True) _bids_validate(output_path) # Validate that files are as expected t1w_json_path = op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.json') assert op.exists(t1w_json_path) assert op.exists(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) with open(t1w_json_path, 'r') as f: t1w_json = json.load(f) print(t1w_json) # We only should have AnatomicalLandmarkCoordinates as key np.testing.assert_array_equal(list(t1w_json.keys()), ['AnatomicalLandmarkCoordinates']) # And within AnatomicalLandmarkCoordinates only LPA, NAS, RPA in that order anat_dict = t1w_json['AnatomicalLandmarkCoordinates'] point_list = ['LPA', 'NAS', 'RPA'] np.testing.assert_array_equal(list(anat_dict.keys()), point_list) # test the actual values of the voxels (no floating points) for i, point in enumerate([(66, 51, 46), (41, 32, 74), (17, 53, 47)]): coords = anat_dict[point_list[i]] np.testing.assert_array_equal(np.asarray(coords, dtype=int), point) # BONUS: test also that we can find the matching sidecar side_fname = _find_matching_sidecar('sub-01_ses-01_acq-01_T1w.nii.gz', output_path, 'T1w.json') assert op.split(side_fname)[-1] == 'sub-01_ses-01_acq-01_T1w.json' # Now try some anat writing that will fail # We already have some MRI data there with pytest.raises(IOError, match='`overwrite` is set to False'): write_anat(output_path, subject_id, t1w_mgh, session_id, acq, raw=raw, trans=trans, verbose=True, deface=False, overwrite=False) # pass some invalid type as T1 MRI with pytest.raises(ValueError, match='must be a path to a T1 weighted'): write_anat(output_path, subject_id, 9999999999999, session_id, raw=raw, trans=trans, verbose=True, deface=False, overwrite=True) # Return without writing sidecar sh.rmtree(anat_dir) write_anat(output_path, subject_id, t1w_mgh, session_id) # Assert that we truly cannot find a sidecar with pytest.raises(RuntimeError, match='Did not find any'): _find_matching_sidecar('sub-01_ses-01_acq-01_T1w.nii.gz', output_path, 'T1w.json') # trans has a wrong type wrong_type = 1 match = 'transform type {} not known, must be'.format(type(wrong_type)) with pytest.raises(ValueError, match=match): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=wrong_type, verbose=True, deface=False, overwrite=True) # trans is a str, but file does not exist wrong_fname = 'not_a_trans' match = 'trans file "{}" not found'.format(wrong_fname) with pytest.raises(IOError, match=match): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=wrong_fname, verbose=True, overwrite=True) # However, reading trans if it is a string pointing to trans is fine write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=False, overwrite=True) # Writing without a session does NOT yield "ses-None" anywhere anat_dir2 = write_anat(output_path, subject_id, t1w_mgh, None) assert 'ses-None' not in anat_dir2 assert op.exists(op.join(anat_dir2, 'sub-01_T1w.nii.gz')) # specify trans but not raw with pytest.raises(ValueError, match='must be specified if `trans`'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=None, trans=trans, verbose=True, deface=False, overwrite=True) # test deface anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=True, overwrite=True) t1w = nib.load(op.join(anat_dir, 'sub-01_ses-01_T1w.nii.gz')) vox_sum = t1w.get_data().sum() anat_dir2 = write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=dict(inset=25.), overwrite=True) t1w2 = nib.load(op.join(anat_dir2, 'sub-01_ses-01_T1w.nii.gz')) vox_sum2 = t1w2.get_data().sum() assert vox_sum > vox_sum2 anat_dir3 = write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=dict(theta=25), overwrite=True) t1w3 = nib.load(op.join(anat_dir3, 'sub-01_ses-01_T1w.nii.gz')) vox_sum3 = t1w3.get_data().sum() assert vox_sum > vox_sum3 with pytest.raises(ValueError, match='The raw object, trans and raw or the landmarks'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=None, verbose=True, deface=True, overwrite=True) with pytest.raises(ValueError, match='inset must be numeric'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(inset='small'), overwrite=True) with pytest.raises(ValueError, match='inset should be positive'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(inset=-2.), overwrite=True) with pytest.raises(ValueError, match='theta must be numeric'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(theta='big'), overwrite=True) with pytest.raises(ValueError, match='theta should be between 0 and 90 degrees'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(theta=100), overwrite=True) # Write some MRI data and supply `landmarks` mri_voxel_landmarks = mne.channels.make_dig_montage( lpa=[66.08580, 51.33362, 46.52982], nasion=[41.87363, 32.24694, 74.55314], rpa=[17.23812, 53.08294, 47.01789], coord_frame='mri_voxel') mri_landmarks = mne.channels.make_dig_montage( lpa=[-0.07629625, -0.00062556, -0.00776012], nasion=[0.00267222, 0.09362256, 0.03224791], rpa=[0.07635873, -0.00258065, -0.01212903], coord_frame='mri') meg_landmarks = mne.channels.make_dig_montage( lpa=[-7.13766068e-02, 0.00000000e+00, 5.12227416e-09], nasion=[3.72529030e-09, 1.02605611e-01, 4.19095159e-09], rpa=[7.52676800e-02, 0.00000000e+00, 5.58793545e-09], coord_frame='head') # test mri voxel landmarks anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, landmarks=mri_voxel_landmarks, verbose=True, overwrite=True) _bids_validate(output_path) t1w1 = nib.load(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) vox1 = t1w1.get_data() # test mri landmarks anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, landmarks=mri_landmarks, verbose=True, overwrite=True) _bids_validate(output_path) t1w2 = nib.load(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) vox2 = t1w2.get_data() # because of significant rounding errors the voxels are fairly different # but the deface works in all three cases and was checked assert abs(vox1 - vox2).sum() / abs(vox1).sum() < 0.2 # crash for raw also with pytest.raises(ValueError, match='Please use either `landmarks`'): anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, raw=raw, trans=trans, deface=True, landmarks=mri_landmarks, verbose=True, overwrite=True) # crash for trans also with pytest.raises(ValueError, match='`trans` was provided'): anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, trans=trans, deface=True, landmarks=mri_landmarks, verbose=True, overwrite=True) # test meg landmarks tmp_dir = _TempDir() meg_landmarks.save(op.join(tmp_dir, 'meg_landmarks.fif')) anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, trans=trans, landmarks=op.join(tmp_dir, 'meg_landmarks.fif'), verbose=True, overwrite=True) _bids_validate(output_path) t1w3 = nib.load(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) vox3 = t1w3.get_data() assert abs(vox1 - vox3).sum() / abs(vox1).sum() < 0.2 # test raise error on meg_landmarks with no trans with pytest.raises(ValueError, match='Head space landmarks provided'): anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, landmarks=meg_landmarks, verbose=True, overwrite=True) # test unsupported (any coord_frame other than head and mri) coord_frame fail_landmarks = meg_landmarks.copy() fail_landmarks.dig[0]['coord_frame'] = 3 fail_landmarks.dig[1]['coord_frame'] = 3 fail_landmarks.dig[2]['coord_frame'] = 3 with pytest.raises(ValueError, match='Coordinate frame not recognized'): anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, deface=True, landmarks=fail_landmarks, verbose=True, overwrite=True)
def test_handle_ieeg_coords_reading(bids_basename): """Test reading iEEG coordinates from BIDS files.""" bids_root = _TempDir() data_path = op.join(testing.data_path(), 'EDF') raw_fname = op.join(data_path, 'test_reduced.edf') bids_fname = bids_basename.copy().update(suffix='ieeg.edf') raw = mne.io.read_raw_edf(raw_fname) # ensure we are writing 'ecog'/'ieeg' data raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names}) # coordinate frames in mne-python should all map correctly # set a `random` montage ch_names = raw.ch_names elec_locs = np.random.random((len(ch_names), 3)).astype(float) ch_pos = dict(zip(ch_names, elec_locs)) coordinate_frames = ['mri', 'ras'] for coord_frame in coordinate_frames: # XXX: mne-bids doesn't support multiple electrodes.tsv files sh.rmtree(bids_root) montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame=coord_frame) raw.set_montage(montage) write_raw_bids(raw, bids_basename, bids_root, overwrite=True, verbose=False) # read in raw file w/ updated coordinate frame # and make sure all digpoints are correct coordinate frames raw_test = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, verbose=False) coord_frame_int = MNE_STR_TO_FRAME[coord_frame] for digpoint in raw_test.info['dig']: assert digpoint['coord_frame'] == coord_frame_int # start w/ new bids root sh.rmtree(bids_root) write_raw_bids(raw, bids_basename, bids_root, overwrite=True, verbose=False) # obtain the sensor positions and assert ch_coords are same raw_test = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, verbose=False) orig_locs = raw.info['dig'][1] test_locs = raw_test.info['dig'][1] assert orig_locs == test_locs assert not object_diff(raw.info['chs'], raw_test.info['chs']) # read in the data and assert montage is the same # regardless of 'm', 'cm', 'mm', or 'pixel' scalings = {'m': 1, 'cm': 100, 'mm': 1000} coordsystem_fname = _find_matching_sidecar(bids_fname, bids_root, suffix='coordsystem.json', allow_fail=True) electrodes_fname = _find_matching_sidecar(bids_fname, bids_root, "electrodes.tsv", allow_fail=True) orig_electrodes_dict = _from_tsv(electrodes_fname, [str, float, float, float, str]) # not BIDS specified should not be read coord_unit = 'km' scaling = 0.001 _update_sidecar(coordsystem_fname, 'iEEGCoordinateUnits', coord_unit) electrodes_dict = _from_tsv(electrodes_fname, [str, float, float, float, str]) for axis in ['x', 'y', 'z']: electrodes_dict[axis] = \ np.multiply(orig_electrodes_dict[axis], scaling) _to_tsv(electrodes_dict, electrodes_fname) with pytest.warns(RuntimeWarning, match='Coordinate unit is not ' 'an accepted BIDS unit'): raw_test = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, verbose=False) # correct BIDS units should scale to meters properly for coord_unit, scaling in scalings.items(): # update coordinate SI units _update_sidecar(coordsystem_fname, 'iEEGCoordinateUnits', coord_unit) electrodes_dict = _from_tsv(electrodes_fname, [str, float, float, float, str]) for axis in ['x', 'y', 'z']: electrodes_dict[axis] = \ np.multiply(orig_electrodes_dict[axis], scaling) _to_tsv(electrodes_dict, electrodes_fname) # read in raw file w/ updated montage raw_test = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, verbose=False) # obtain the sensor positions and make sure they're the same assert_dig_allclose(raw.info, raw_test.info) # XXX: Improve by changing names to 'unknown' coordframe (needs mne PR) # check that coordinate systems other coordinate systems should be named # in the file and not the CoordinateSystem, which is reserved for keywords coordinate_frames = ['lia', 'ria', 'lip', 'rip', 'las'] for coord_frame in coordinate_frames: # update coordinate units _update_sidecar(coordsystem_fname, 'iEEGCoordinateSystem', coord_frame) # read in raw file w/ updated coordinate frame # and make sure all digpoints are MRI coordinate frame with pytest.warns(RuntimeWarning, match="iEEG Coordinate frame is " "not accepted BIDS keyword"): raw_test = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, verbose=False) assert raw_test.info['dig'] is None # ACPC should be read in as RAS for iEEG _update_sidecar(coordsystem_fname, 'iEEGCoordinateSystem', 'acpc') raw_test = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, verbose=False) coord_frame_int = MNE_STR_TO_FRAME['ras'] for digpoint in raw_test.info['dig']: assert digpoint['coord_frame'] == coord_frame_int # test error message if electrodes don't match write_raw_bids(raw, bids_basename, bids_root, overwrite=True) electrodes_dict = _from_tsv(electrodes_fname) # pop off 5 channels for key in electrodes_dict.keys(): for i in range(5): electrodes_dict[key].pop() _to_tsv(electrodes_dict, electrodes_fname) with pytest.raises(RuntimeError, match='Channels do not correspond'): raw_test = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, verbose=False) # make sure montage is set if there are coordinates w/ 'n/a' raw.info['bads'] = [] write_raw_bids(raw, bids_basename, bids_root, overwrite=True, verbose=False) electrodes_dict = _from_tsv(electrodes_fname) for axis in ['x', 'y', 'z']: electrodes_dict[axis][0] = 'n/a' electrodes_dict[axis][3] = 'n/a' _to_tsv(electrodes_dict, electrodes_fname) # test if montage is correctly set via mne-bids # electrode coordinates should be nan # when coordinate is 'n/a' nan_chs = [electrodes_dict['name'][i] for i in [0, 3]] with pytest.warns(RuntimeWarning, match='There are channels ' 'without locations'): raw = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, verbose=False) for idx, ch in enumerate(raw.info['chs']): if ch['ch_name'] in nan_chs: assert all(np.isnan(ch['loc'][:3])) else: assert not any(np.isnan(ch['loc'][:3])) assert ch['ch_name'] not in raw.info['bads']
def test_handle_eeg_coords_reading(): """Test reading iEEG coordinates from BIDS files.""" bids_root = _TempDir() data_path = op.join(testing.data_path(), 'EDF') raw_fname = op.join(data_path, 'test_reduced.edf') raw = mne.io.read_raw_edf(raw_fname) # ensure we are writing 'eeg' data raw.set_channel_types({ch: 'eeg' for ch in raw.ch_names}) # set a `random` montage ch_names = raw.ch_names elec_locs = np.random.random((len(ch_names), 3)).astype(float) ch_pos = dict(zip(ch_names, elec_locs)) # # create montage in 'unknown' coordinate frame # # and assert coordsystem/electrodes sidecar tsv don't exist montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="unknown") raw.set_montage(montage) with pytest.warns(RuntimeWarning, match="Skipping EEG electrodes.tsv"): write_raw_bids(raw, bids_basename, bids_root, overwrite=True) coordsystem_fname = _find_matching_sidecar(bids_basename, bids_root, suffix='coordsystem.json', allow_fail=True) electrodes_fname = _find_matching_sidecar(bids_basename, bids_root, suffix="electrodes.tsv", allow_fail=True) assert coordsystem_fname is None assert electrodes_fname is None # create montage in head frame and set should result in # warning if landmarks not set montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="head") raw.set_montage(montage) with pytest.warns(RuntimeWarning, match='Setting montage not possible ' 'if anatomical landmarks'): write_raw_bids(raw, bids_basename, bids_root, overwrite=True) montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="head", nasion=[1, 0, 0], lpa=[0, 1, 0], rpa=[0, 0, 1]) raw.set_montage(montage) write_raw_bids(raw, bids_basename, bids_root, overwrite=True) # obtain the sensor positions and assert ch_coords are same raw_test = read_raw_bids(bids_basename, bids_root, verbose=True) assert not object_diff(raw.info['chs'], raw_test.info['chs']) # modify coordinate frame to not-captrak coordsystem_fname = _find_matching_sidecar(bids_basename, bids_root, suffix='coordsystem.json', allow_fail=True) _update_sidecar(coordsystem_fname, 'EEGCoordinateSystem', 'besa') with pytest.warns(RuntimeWarning, match='EEG Coordinate frame is not ' 'accepted BIDS keyword'): raw_test = read_raw_bids(bids_basename, bids_root) assert raw_test.info['dig'] is None
def test_write_anat(_bids_validate): """Test writing anatomical data.""" # Get the MNE testing sample data import nibabel as nib output_path = _TempDir() data_path = testing.data_path() raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') event_id = { 'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, 'Visual/Right': 4, 'Smiley': 5, 'Button': 32 } events_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw-eve.fif') raw = mne.io.read_raw_fif(raw_fname) write_raw_bids(raw, bids_basename, output_path, events_data=events_fname, event_id=event_id, overwrite=False) # Write some MRI data and supply a `trans` trans_fname = raw_fname.replace('_raw.fif', '-trans.fif') trans = mne.read_trans(trans_fname) # Get the T1 weighted MRI data file # Needs to be converted to Nifti because we only have mgh in our test base t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz') anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, acq, raw=raw, trans=trans, deface=True, verbose=True, overwrite=True) _bids_validate(output_path) # Validate that files are as expected t1w_json_path = op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.json') assert op.exists(t1w_json_path) assert op.exists(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) with open(t1w_json_path, 'r') as f: t1w_json = json.load(f) print(t1w_json) # We only should have AnatomicalLandmarkCoordinates as key np.testing.assert_array_equal(list(t1w_json.keys()), ['AnatomicalLandmarkCoordinates']) # And within AnatomicalLandmarkCoordinates only LPA, NAS, RPA in that order anat_dict = t1w_json['AnatomicalLandmarkCoordinates'] point_list = ['LPA', 'NAS', 'RPA'] np.testing.assert_array_equal(list(anat_dict.keys()), point_list) # test the actual values of the voxels (no floating points) for i, point in enumerate([(66, 51, 46), (41, 32, 74), (17, 53, 47)]): coords = anat_dict[point_list[i]] np.testing.assert_array_equal(np.asarray(coords, dtype=int), point) # BONUS: test also that we can find the matching sidecar side_fname = _find_matching_sidecar('sub-01_ses-01_acq-01_T1w.nii.gz', output_path, 'T1w.json') assert op.split(side_fname)[-1] == 'sub-01_ses-01_acq-01_T1w.json' # Now try some anat writing that will fail # We already have some MRI data there with pytest.raises(IOError, match='`overwrite` is set to False'): write_anat(output_path, subject_id, t1w_mgh, session_id, acq, raw=raw, trans=trans, verbose=True, deface=False, overwrite=False) # pass some invalid type as T1 MRI with pytest.raises(ValueError, match='must be a path to a T1 weighted'): write_anat(output_path, subject_id, 9999999999999, session_id, raw=raw, trans=trans, verbose=True, deface=False, overwrite=True) # Return without writing sidecar sh.rmtree(anat_dir) write_anat(output_path, subject_id, t1w_mgh, session_id) # Assert that we truly cannot find a sidecar with pytest.raises(RuntimeError, match='Did not find any'): _find_matching_sidecar('sub-01_ses-01_acq-01_T1w.nii.gz', output_path, 'T1w.json') # trans has a wrong type wrong_type = 1 match = 'transform type {} not known, must be'.format(type(wrong_type)) with pytest.raises(ValueError, match=match): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=wrong_type, verbose=True, deface=False, overwrite=True) # trans is a str, but file does not exist wrong_fname = 'not_a_trans' match = 'trans file "{}" not found'.format(wrong_fname) with pytest.raises(IOError, match=match): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=wrong_fname, verbose=True, overwrite=True) # However, reading trans if it is a string pointing to trans is fine write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=False, overwrite=True) # Writing without a session does NOT yield "ses-None" anywhere anat_dir2 = write_anat(output_path, subject_id, t1w_mgh, None) assert 'ses-None' not in anat_dir2 assert op.exists(op.join(anat_dir2, 'sub-01_T1w.nii.gz')) # specify trans but not raw with pytest.raises(ValueError, match='must be specified if `trans`'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=None, trans=trans, verbose=True, deface=False, overwrite=True) # test deface anat_dir = write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=True, overwrite=True) t1w = nib.load(op.join(anat_dir, 'sub-01_ses-01_T1w.nii.gz')) vox_sum = t1w.get_data().sum() anat_dir2 = write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=dict(inset=25.), overwrite=True) t1w2 = nib.load(op.join(anat_dir2, 'sub-01_ses-01_T1w.nii.gz')) vox_sum2 = t1w2.get_data().sum() assert vox_sum > vox_sum2 anat_dir3 = write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans_fname, verbose=True, deface=dict(theta=25), overwrite=True) t1w3 = nib.load(op.join(anat_dir3, 'sub-01_ses-01_T1w.nii.gz')) vox_sum3 = t1w3.get_data().sum() assert vox_sum > vox_sum3 with pytest.raises(ValueError, match='The raw object, trans and raw must be provided'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=None, verbose=True, deface=True, overwrite=True) with pytest.raises(ValueError, match='inset must be numeric'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(inset='small'), overwrite=True) with pytest.raises(ValueError, match='inset should be positive'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(inset=-2.), overwrite=True) with pytest.raises(ValueError, match='theta must be numeric'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(theta='big'), overwrite=True) with pytest.raises(ValueError, match='theta should be between 0 and 90 degrees'): write_anat(output_path, subject_id, t1w_mgh, session_id, raw=raw, trans=trans, verbose=True, deface=dict(theta=100), overwrite=True)
def get_head_mri_trans(bids_basename, bids_root): """Produce transformation matrix from MEG and MRI landmark points. Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar files of (i) the MEG and (ii) the T1 weighted MRI data. The two sets of points will then be used to calculate a transformation matrix from head coordinates to MRI coordinates. Parameters ---------- bids_basename : str | BIDSPath The base filename of the BIDS-compatible file. Typically, this can be generated using :func:`mne_bids.make_bids_basename`. bids_root : str | pathlib.Path Path to root of the BIDS folder Returns ------- trans : instance of mne.transforms.Transform The data transformation matrix from head to MRI coordinates """ if not has_nibabel(): # pragma: no cover raise ImportError('This function requires nibabel.') import nibabel as nib # convert to BIDS Path if isinstance(bids_basename, str): params = _parse_bids_filename(bids_basename, False) bids_basename = BIDSPath(subject=params.get('sub'), session=params.get('ses'), recording=params.get('rec'), acquisition=params.get('acq'), processing=params.get('proc'), space=params.get('space'), run=params.get('run'), task=params.get('task')) # Get the sidecar file for MRI landmarks bids_fname = bids_basename.get_bids_fname(kind='meg', bids_root=bids_root) t1w_json_path = _find_matching_sidecar(bids_fname, bids_root, 'T1w.json') # Get MRI landmarks from the JSON sidecar with open(t1w_json_path, 'r') as f: t1w_json = json.load(f) mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict()) mri_landmarks = np.asarray( (mri_coords_dict.get('LPA', np.nan), mri_coords_dict.get('NAS', np.nan), mri_coords_dict.get('RPA', np.nan))) if np.isnan(mri_landmarks).any(): raise RuntimeError( 'Could not parse T1w sidecar file: "{}"\n\n' 'The sidecar file MUST contain a key ' '"AnatomicalLandmarkCoordinates" pointing to a ' 'dict with keys "LPA", "NAS", "RPA". ' 'Yet, the following structure was found:\n\n"{}"'.format( t1w_json_path, t1w_json)) # The MRI landmarks are in "voxels". We need to convert the to the # neuromag RAS coordinate system in order to compare the with MEG landmarks # see also: `mne_bids.write.write_anat` t1w_path = t1w_json_path.replace('.json', '.nii') if not op.exists(t1w_path): t1w_path += '.gz' # perhaps it is .nii.gz? ... else raise an error if not op.exists(t1w_path): raise RuntimeError( 'Could not find the T1 weighted MRI associated ' 'with "{}". Tried: "{}" but it does not exist.'.format( t1w_json_path, t1w_path)) t1_nifti = nib.load(t1w_path) # Convert to MGH format to access vox2ras method t1_mgh = nib.MGHImage(t1_nifti.dataobj, t1_nifti.affine) # now extract transformation matrix and put back to RAS coordinates of MRI vox2ras_tkr = t1_mgh.header.get_vox2ras_tkr() mri_landmarks = apply_trans(vox2ras_tkr, mri_landmarks) mri_landmarks = mri_landmarks * 1e-3 # Get MEG landmarks from the raw file _, ext = _parse_ext(bids_basename) extra_params = None if ext == '.fif': extra_params = dict(allow_maxshield=True) raw = read_raw_bids(bids_basename=bids_basename, bids_root=bids_root, extra_params=extra_params, kind='meg') meg_coords_dict = _extract_landmarks(raw.info['dig']) meg_landmarks = np.asarray((meg_coords_dict['LPA'], meg_coords_dict['NAS'], meg_coords_dict['RPA'])) # Given the two sets of points, fit the transform trans_fitted = fit_matched_points(src_pts=meg_landmarks, tgt_pts=mri_landmarks) trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted) return trans
def read_raw_bids(bids_basename, bids_root, kind=None, extra_params=None, verbose=True): """Read BIDS compatible data. Will attempt to read associated events.tsv and channels.tsv files to populate the returned raw object with raw.annotations and raw.info['bads']. Parameters ---------- bids_basename : str | BIDSPath The base filename of the BIDS compatible files. Typically, this can be generated using :func:`mne_bids.make_bids_basename`. bids_root : str | pathlib.Path Path to root of the BIDS folder kind : str | None The kind of recording to read. If ``None`` and only one kind (e.g., only EEG or only MEG data) is present in the dataset, it will be selected automatically. extra_params : None | dict Extra parameters to be passed to MNE read_raw_* functions. If a dict, for example: ``extra_params=dict(allow_maxshield=True)``. verbose : bool The verbosity level. Returns ------- raw : instance of Raw The data as MNE-Python Raw object. Raises ------ RuntimeError If multiple recording kinds are present in the dataset, but ``kind=None``. RuntimeError If more than one data files exist for the specified recording. RuntimeError If no data file in a supported format can be located. ValueError If the specified ``kind`` cannot be found in the dataset. """ # convert to BIDS Path if isinstance(bids_basename, str): params = _parse_bids_filename(bids_basename, verbose) bids_basename = BIDSPath(subject=params.get('sub'), session=params.get('ses'), recording=params.get('rec'), acquisition=params.get('acq'), processing=params.get('proc'), space=params.get('space'), run=params.get('run'), task=params.get('task')) sub = bids_basename.subject ses = bids_basename.session acq = bids_basename.acquisition if kind is None: kind = _infer_kind(bids_basename=bids_basename, bids_root=bids_root, sub=sub, ses=ses) data_dir = make_bids_folders(subject=sub, session=ses, kind=kind, make_dir=False) bids_fname = bids_basename.get_bids_fname(kind=kind, bids_root=bids_root) if op.splitext(bids_fname)[1] == '.pdf': bids_raw_folder = op.join(bids_root, data_dir, f'{bids_basename}_{kind}') bids_fpath = glob.glob(op.join(bids_raw_folder, 'c,rf*'))[0] config = op.join(bids_raw_folder, 'config') else: bids_fpath = op.join(bids_root, data_dir, bids_fname) config = None if extra_params is None: extra_params = dict() raw = _read_raw(bids_fpath, electrode=None, hsp=None, hpi=None, config=config, verbose=None, **extra_params) # Try to find an associated events.tsv to get information about the # events in the recorded data events_fname = _find_matching_sidecar(bids_fname, bids_root, 'events.tsv', allow_fail=True) if events_fname is not None: raw = _handle_events_reading(events_fname, raw) # Try to find an associated channels.tsv to get information about the # status and type of present channels channels_fname = _find_matching_sidecar(bids_fname, bids_root, 'channels.tsv', allow_fail=True) if channels_fname is not None: raw = _handle_channels_reading(channels_fname, bids_fname, raw) # Try to find an associated electrodes.tsv and coordsystem.json # to get information about the status and type of present channels search_modifier = f'acq-{acq}' if acq else '' elec_suffix = f'{search_modifier}*_electrodes.tsv' coord_suffix = f'{search_modifier}*_coordsystem.json' electrodes_fname = _find_matching_sidecar(bids_fname, bids_root, suffix=elec_suffix, allow_fail=True) coordsystem_fname = _find_matching_sidecar(bids_fname, bids_root, suffix=coord_suffix, allow_fail=True) if electrodes_fname is not None: if coordsystem_fname is None: raise RuntimeError("BIDS mandates that the coordsystem.json " "should exist if electrodes.tsv does. " "Please create coordsystem.json for" "{}".format(bids_basename)) if kind in ['meg', 'eeg', 'ieeg']: raw = _read_dig_bids(electrodes_fname, coordsystem_fname, raw, kind, verbose) # Try to find an associated sidecar.json to get information about the # recording snapshot sidecar_fname = _find_matching_sidecar(bids_fname, bids_root, '{}.json'.format(kind), allow_fail=True) if sidecar_fname is not None: raw = _handle_info_reading(sidecar_fname, raw, verbose=verbose) # read in associated subject info from participants.tsv participants_tsv_fpath = op.join(bids_root, 'participants.tsv') subject = f"sub-{bids_basename.subject}" if op.exists(participants_tsv_fpath): raw = _handle_participants_reading(participants_tsv_fpath, raw, subject, verbose=verbose) else: warn("Participants file not found for {}... Not reading " "in any particpants.tsv data.".format(bids_fname)) return raw
def read_raw_bids(bids_fname, bids_root, extra_params=None, verbose=True): """Read BIDS compatible data. Will attempt to read associated events.tsv and channels.tsv files to populate the returned raw object with raw.annotations and raw.info['bads']. Parameters ---------- bids_fname : str Full name of the data file bids_root : str Path to root of the BIDS folder extra_params : None | dict Extra parameters to be passed to MNE read_raw_* functions. If a dict, for example: ``extra_params=dict(allow_maxshield=True)``. verbose : bool The verbosity level Returns ------- raw : instance of Raw The data as MNE-Python Raw object. """ # Full path to data file is needed so that mne-bids knows # what is the modality -- meg, eeg, ieeg to read bids_fname = op.basename(bids_fname) bids_basename = '_'.join(bids_fname.split('_')[:-1]) kind = bids_fname.split('_')[-1].split('.')[0] _, ext = _parse_ext(bids_fname) # Get the BIDS parameters (=entities) params = _parse_bids_filename(bids_basename, verbose) # Construct the path to the "kind" where the data is stored # Subject is mandatory ... kind_dir = op.join(bids_root, 'sub-{}'.format(params['sub'])) # Session is optional ... if params['ses'] is not None: kind_dir = op.join(kind_dir, 'ses-{}'.format(params['ses'])) # Kind is mandatory kind_dir = op.join(kind_dir, kind) config = None if ext in ('.fif', '.ds', '.vhdr', '.edf', '.bdf', '.set', '.sqd', '.con'): bids_fpath = op.join(kind_dir, bids_basename + '_{}{}'.format(kind, ext)) elif ext == '.pdf': bids_raw_folder = op.join(kind_dir, bids_basename + '_{}'.format(kind)) bids_fpath = glob.glob(op.join(bids_raw_folder, 'c,rf*'))[0] config = op.join(bids_raw_folder, 'config') if extra_params is None: extra_params = dict() raw = _read_raw(bids_fpath, electrode=None, hsp=None, hpi=None, config=config, verbose=None, **extra_params) # Try to find an associated events.tsv to get information about the # events in the recorded data events_fname = _find_matching_sidecar(bids_fname, bids_root, 'events.tsv', allow_fail=True) if events_fname is not None: raw = _handle_events_reading(events_fname, raw) # Try to find an associated channels.tsv to get information about the # status and type of present channels channels_fname = _find_matching_sidecar(bids_fname, bids_root, 'channels.tsv', allow_fail=True) if channels_fname is not None: raw = _handle_channels_reading(channels_fname, bids_fname, raw) # Try to find an associated electrodes.tsv and coordsystem.json # to get information about the status and type of present channels electrodes_fname = _find_matching_sidecar(bids_fname, bids_root, 'electrodes.tsv', allow_fail=True) coordsystem_fname = _find_matching_sidecar(bids_fname, bids_root, 'coordsystem.json', allow_fail=True) if electrodes_fname is not None: if coordsystem_fname is None: raise RuntimeError("BIDS mandates that the coordsystem.json " "should exist if electrodes.tsv does. " "Please create coordsystem.json for" "{}".format(bids_basename)) # Get MRI landmarks from the JSON sidecar with open(coordsystem_fname, 'r') as fin: coordsystem_json = json.load(fin) # Get coordinate frames that electrode coordinates are in if kind == "meg": coord_frame = coordsystem_json['MEGCoordinateSystem'] elif kind == "ieeg": coord_frame = coordsystem_json['iEEGCoordinateSystem'] else: # noqa raise RuntimeError("Kind {} not supported yet for " "coordsystem.json and " "electrodes.tsv.".format(kind)) # read in electrode coordinates and attach to raw raw = _handle_electrodes_reading(electrodes_fname, coord_frame, raw, verbose) # Try to find an associated sidecar.json to get information about the # recording snapshot sidecar_fname = _find_matching_sidecar(bids_fname, bids_root, '{}.json'.format(kind), allow_fail=True) if sidecar_fname is not None: raw = _handle_info_reading(sidecar_fname, raw, verbose=verbose) return raw
def read_raw_bids(bids_fname, bids_root, extra_params=None, verbose=True): """Read BIDS compatible data. Will attempt to read associated events.tsv and channels.tsv files to populate the returned raw object with raw.annotations and raw.info['bads']. Parameters ---------- bids_fname : str Full name of the data file bids_root : str Path to root of the BIDS folder extra_params : None | dict Extra parameters to be passed to MNE read_raw_* functions. If a dict, for example: ``extra_params=dict(allow_maxshield=True)``. verbose : bool The verbosity level Returns ------- raw : instance of Raw The data as MNE-Python Raw object. """ # Full path to data file is needed so that mne-bids knows # what is the modality -- meg, eeg, ieeg to read bids_fname = op.basename(bids_fname) bids_basename = '_'.join(bids_fname.split('_')[:-1]) kind = bids_fname.split('_')[-1].split('.')[0] _, ext = _parse_ext(bids_fname) # Get the BIDS parameters (=entities) params = _parse_bids_filename(bids_basename, verbose) # Construct the path to the "kind" where the data is stored # Subject is mandatory ... kind_dir = op.join(bids_root, 'sub-{}'.format(params['sub'])) # Session is optional ... if params['ses'] is not None: kind_dir = op.join(kind_dir, 'ses-{}'.format(params['ses'])) # Kind is mandatory kind_dir = op.join(kind_dir, kind) config = None if ext in ('.fif', '.ds', '.vhdr', '.edf', '.bdf', '.set', '.sqd', '.con'): bids_fpath = op.join(kind_dir, bids_basename + '_{}{}'.format(kind, ext)) elif ext == '.pdf': bids_raw_folder = op.join(kind_dir, bids_basename + '_{}'.format(kind)) bids_fpath = glob.glob(op.join(bids_raw_folder, 'c,rf*'))[0] config = op.join(bids_raw_folder, 'config') if extra_params is None: extra_params = dict() raw = _read_raw(bids_fpath, electrode=None, hsp=None, hpi=None, config=config, verbose=None, **extra_params) # Try to find an associated events.tsv to get information about the # events in the recorded data events_fname = _find_matching_sidecar(bids_fname, bids_root, 'events.tsv', allow_fail=True) if events_fname is not None: raw = _handle_events_reading(events_fname, raw) # Try to find an associated channels.tsv to get information about the # status and type of present channels channels_fname = _find_matching_sidecar(bids_fname, bids_root, 'channels.tsv', allow_fail=True) if channels_fname is not None: raw = _handle_channels_reading(channels_fname, bids_fname, raw) return raw
def read_raw_bids(bids_fname, bids_root, allow_maxshield=False, verbose=True): """Read BIDS compatible data. Will attempt to read associated events.tsv and channels.tsv files to populate the returned raw object with raw.annotations and raw.info['bads']. Parameters ---------- bids_fname : str Full name of the data file bids_root : str Path to root of the BIDS folder allow_maxshield : bool | str (default False) If True, allow loading of data that has been recorded with internal active compensation (MaxShield). Data recorded with MaxShield should generally not be loaded directly, but should first be processed using SSS/tSSS to remove the compensation signals that may also affect brain activity. Can also be “yes” to load without eliciting a warning. verbose : bool The verbosity level Returns ------- raw : instance of Raw The data as MNE-Python Raw object. """ # Full path to data file is needed so that mne-bids knows # what is the modality -- meg, eeg, ieeg to read bids_fname = op.basename(bids_fname) bids_basename = '_'.join(bids_fname.split('_')[:-1]) kind = bids_fname.split('_')[-1].split('.')[0] _, ext = _parse_ext(bids_fname) # Get the BIDS parameters (=entities) params = _parse_bids_filename(bids_basename, verbose) # Construct the path to the "kind" where the data is stored # Subject is mandatory ... kind_dir = op.join(bids_root, 'sub-{}'.format(params['sub'])) # Session is optional ... if params['ses'] is not None: kind_dir = op.join(kind_dir, 'ses-{}'.format(params['ses'])) # Kind is mandatory kind_dir = op.join(kind_dir, kind) config = None if ext in ('.fif', '.ds', '.vhdr', '.edf', '.bdf', '.set', '.sqd', '.con'): bids_fpath = op.join(kind_dir, bids_basename + '_{}{}'.format(kind, ext)) elif ext == '.pdf': bids_raw_folder = op.join(kind_dir, bids_basename + '_{}'.format(kind)) bids_fpath = glob.glob(op.join(bids_raw_folder, 'c,rf*'))[0] config = op.join(bids_raw_folder, 'config') raw = _read_raw(bids_fpath, electrode=None, hsp=None, hpi=None, config=config, allow_maxshield=allow_maxshield, verbose=None) # Try to find an associated events.tsv to get information about the # events in the recorded data events_fname = _find_matching_sidecar(bids_fname, bids_root, 'events.tsv', allow_fail=True) if events_fname is not None: raw = _handle_events_reading(events_fname, raw) # Try to find an associated channels.tsv to get information about the # status and type of present channels channels_fname = _find_matching_sidecar(bids_fname, bids_root, 'channels.tsv', allow_fail=True) if channels_fname is not None: raw = _handle_channels_reading(channels_fname, bids_fname, raw) return raw