def test_set_montage_artinis_fsaverage(kind): """Test that artinis montages match fsaverage's head<->MRI transform.""" # Compare OctaMon and Brite23 to fsaverage trans_fs, _ = _get_trans('fsaverage') montage = make_standard_montage(f'artinis-{kind}') trans = compute_native_head_t(montage) assert trans['to'] == trans_fs['to'] assert trans['from'] == trans_fs['from'] translation = 1000 * np.linalg.norm(trans['trans'][:3, 3] - trans_fs['trans'][:3, 3]) assert 0 < translation < 1 # mm rotation = np.rad2deg( _angle_between_quats(rot_to_quat(trans['trans'][:3, :3]), rot_to_quat(trans_fs['trans'][:3, :3]))) assert 0 < rotation < 1 # degrees
# >>> img = nibabel.load(fname_T1) # >>> vox2mri_t = img.header.get_vox2ras_tkr() # voxel -> mri trans # >>> pos_mri = mne.transforms.apply_trans(vox2mri_t, pos_vox) # >>> pos_mri /= 1000. # mm -> m # # You can also verify that these are correct (or manually convert voxels # to MRI coords) by looking at the points in Freeview or tkmedit. dig_montage = read_custom_montage(fname_mon, head_size=None, coord_frame='mri') dig_montage.plot() ############################################################################## # We can then get our transformation from the MRI coordinate frame (where our # points are defined) to the head coordinate frame from the object. trans = compute_native_head_t(dig_montage) print(trans) # should be mri->head, as the "native" space here is MRI ############################################################################## # Let's apply this digitization to our dataset, and in the process # automatically convert our locations to the head coordinate frame, as # shown by :meth:`~mne.io.Raw.plot_sensors`. raw = mne.io.read_raw_fif(fname_raw) raw.pick_types(meg=False, eeg=True, stim=True, exclude=()).load_data() raw.set_montage(dig_montage) raw.plot_sensors(show_names=True) ############################################################################## # Now we can do standard sensor-space operations like make joint plots of # evoked data.