Exemple #1
0
 def detect_movement(self, thr_mov=.01, plot=True, overwrite=False,
                     save=True):
     from mne.transforms import read_trans
     fname = self.subject + '_' + self.experiment + '_mov.txt'
     out_csv_f = op.join(self.out_annot, fname)
     fname_t = self.subject + '_' + self.experiment + '_dev2head-trans.fif'
     out_csv_f_t = op.join(self.out_annot, fname_t)
     if op.exists(out_csv_f) and not overwrite:
         mov_annot = read_annotations(out_csv_f)
         print('Reading from file, mov segments are:', mov_annot)
         print('Reading from file, dev to head transformation')
         dev_head_t = read_trans(out_csv_f_t)
     else:
         print('Calculating head pos')
         pos = mne.chpi._calculate_head_pos_ctf(self.raw, gof_limit=-1)
         mov_annot, hpi_disp, dev_head_t = annotate_motion(self.raw, pos,
                                                           thr=thr_mov)
         if plot is True:
             plt.figure()
             plt.plot(hpi_disp)
             plt.axhline(y=thr_mov, color='r')
             plt.show(block=True)
         if save is True:
             mov_annot.save(out_csv_f)
             dev_head_t.save(out_csv_f_t)
         #fig.savefig(out_csv_f[:-4]+'.png')
     old_annot = self.raw.annotations #  if orig_time cant + with none time
     self.raw.set_annotations(mov_annot)
     self.raw.set_annotations(self.raw.annotations + old_annot)
     self.raw.info['dev_head_t_old'] = self.raw.info['dev_head_t']
     self.raw.info['dev_head_t'] = dev_head_t
     self.annot_movement = mov_annot
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
    """Compute dipole depth"""
    trans = read_trans(fname_trans)
    trans = _get_mri_head_t(trans)[0]
    bem = read_bem_solution(fname_bem)
    surf = _bem_find_surface(bem, 'inner_skull')
    points = surf['rr']
    points = apply_trans(trans['trans'], points)
    depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
    return np.ravel(depth)
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
    """Compute dipole depth"""
    trans = read_trans(fname_trans)
    trans = _get_mri_head_t(trans)[0]
    bem = read_bem_solution(fname_bem)
    surf = _bem_find_surface(bem, 'inner_skull')
    points = surf['rr']
    points = apply_trans(trans['trans'], points)
    depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
    return np.ravel(depth)
Exemple #4
0
def compute_trans(pos, trans):
    pos = pos.copy()
    if isinstance(trans, str):
        if trans.endswith('fif'):
            trans = read_trans(trans)['trans']
        else:
            with open(trans, 'r') as matfile:
                lines = matfile.read().strip().split("\n")
                trans = [l.split() for l in lines]
                trans = np.array(trans).astype(np.float)

    pos = apply_affine(trans, pos)
    return pos
Exemple #5
0
def dip_depth(dip, fname_trans, subject, subjects_dir):
    trans = read_trans(fname_trans)
    trans = _get_trans(trans)[0]
    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
    fname = os.path.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
    points, faces = read_surface(fname)
    points = apply_trans(trans['trans'], points * 1e-3)

    pos = dip.pos
    ori = dip.ori

    from sklearn.neighbors import NearestNeighbors
    nn = NearestNeighbors()
    nn.fit(points)
    depth, idx = nn.kneighbors(pos, 1, return_distance=True)
    idx = np.ravel(idx)

    direction = pos - points[idx]
    direction /= np.sqrt(np.sum(direction**2, axis=1))[:, None]
    ori /= np.sqrt(np.sum(ori**2, axis=1))[:, None]

    radiality = np.abs(np.sum(ori * direction, axis=1))
    return np.ravel(depth), radiality
Exemple #6
0
def test_coregistration(scale_mode, ref_scale, grow_hair, fiducials,
                        fid_match):
    """Test automated coregistration."""
    subject = 'sample'
    if fiducials is None:
        fiducials, coord_frame = read_fiducials(fid_fname)
        assert coord_frame == FIFF.FIFFV_COORD_MRI
    info = read_info(raw_fname)
    for d in info['dig']:
        d['r'] = d['r'] * ref_scale
    trans = read_trans(trans_fname)
    coreg = Coregistration(info,
                           subject=subject,
                           subjects_dir=subjects_dir,
                           fiducials=fiducials)
    assert np.allclose(coreg._last_parameters, coreg._parameters)
    coreg.set_fid_match(fid_match)
    default_params = list(coreg._default_parameters)
    coreg.set_rotation(default_params[:3])
    coreg.set_translation(default_params[3:6])
    coreg.set_scale(default_params[6:9])
    coreg.set_grow_hair(grow_hair)
    coreg.set_scale_mode(scale_mode)
    # Identity transform
    errs_id = coreg.compute_dig_mri_distances()
    is_scaled = ref_scale != [1., 1., 1.]
    id_max = 0.03 if is_scaled and scale_mode == '3-axis' else 0.02
    assert 0.005 < np.median(errs_id) < id_max
    # Fiducial transform + scale
    coreg.fit_fiducials(verbose=True)
    assert coreg._extra_points_filter is None
    coreg.omit_head_shape_points(distance=0.02)
    assert coreg._extra_points_filter is not None
    errs_fid = coreg.compute_dig_mri_distances()
    assert_array_less(0, errs_fid)
    if is_scaled or scale_mode is not None:
        fid_max = 0.05
        fid_med = 0.02
    else:
        fid_max = 0.03
        fid_med = 0.01
    assert_array_less(errs_fid, fid_max)
    assert 0.001 < np.median(errs_fid) < fid_med
    assert not np.allclose(coreg._parameters, default_params)
    coreg.omit_head_shape_points(distance=-1)
    coreg.omit_head_shape_points(distance=5. / 1000)
    assert coreg._extra_points_filter is not None
    # ICP transform + scale
    coreg.fit_icp(verbose=True)
    assert isinstance(coreg.trans, Transform)
    errs_icp = coreg.compute_dig_mri_distances()
    assert_array_less(0, errs_icp)
    if is_scaled or scale_mode == '3-axis':
        icp_max = 0.015
    else:
        icp_max = 0.01
    assert_array_less(errs_icp, icp_max)
    assert 0.001 < np.median(errs_icp) < 0.004
    assert np.rad2deg(
        _angle_between_quats(rot_to_quat(coreg.trans['trans'][:3, :3]),
                             rot_to_quat(trans['trans'][:3, :3]))) < 13
    if scale_mode is None:
        atol = 1e-7
    else:
        atol = 0.35
    assert_allclose(coreg._scale, ref_scale, atol=atol)
    coreg.reset()
    assert_allclose(coreg._parameters, default_params)
Exemple #7
0
def select_vertices_in_sensor_range(inst,
                                    dist,
                                    info=None,
                                    picks=None,
                                    trans=None,
                                    indices=False,
                                    verbose=None):
    """Find vertices within given distance to a sensor.

    Parameters
    ----------
    inst : instance of Forward | instance of SourceSpaces
        The object to select vertices from.
    dist : float
        The minimum distance between a vertex and the nearest sensor. All
        vertices for which the distance to the nearest sensor exceeds this
        limit are discarded.
    info : instance of Info | None
        The info structure that contains information about the channels. Only
        needs to be specified if the object to select vertices from does is
        an instance of SourceSpaces.
    picks : array-like of int | None
        Indices of sensors to include in the search for the nearest sensor. If
        ``None``, the default, only MEG channels are used.
    trans : str | instance of Transform | None
        Either the full path to the head<->MRI transform ``*-trans.fif`` file
        produced during coregistration, or the Transformation itself. If trans
        is None, an identity matrix is assumed. Only needed when ``inst`` is a
        source space in MRI coordinates.
    indices: False | True
        If ``True``, return vertex indices instead of vertex numbers. Defaults
        to ``False``.
    verbose : bool | str | int | None
        If not None, override default verbose level (see :func:`mne.verbose`
        and :ref:`Logging documentation <tut_logging>` for more).

    Returns
    -------
    vertices : pair of lists | list of int
        Either a list of vertex numbers for the left and right hemisphere (if
        ``indices==False``) or a single list with vertex indices.

    See Also
    --------
    restrict_forward_to_vertices : restrict Forward to the given vertices
    restrict_src_to_vertices : restrict SourceSpaces to the given vertices
    """

    if isinstance(inst, Forward):
        info = inst['info']
        src = inst['src']
    elif isinstance(inst, SourceSpaces):
        src = inst
        if info is None:
            raise ValueError('You need to specify an Info object with '
                             'information about the channels.')

    # Load the head<->MRI transform if necessary
    if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
        if trans is None:
            raise ValueError('Source space is in MRI coordinates, but no '
                             'head<->MRI transform was given. Please specify '
                             'the full path to the appropriate *-trans.fif '
                             'file as the "trans" parameter.')
        if isinstance(trans, string_types):
            trans = read_trans(trans, return_all=True)
            for trans in trans:  # we got at least 1
                try:
                    trans = _ensure_trans(trans, 'head', 'mri')
                except Exception as exp:
                    pass
                else:
                    break
            else:
                raise exp

        src_trans = invert_transform(_ensure_trans(trans, 'head', 'mri'))
        print('Transform!')
    else:
        src_trans = Transform('head', 'head')  # Identity transform

    dev_to_head = _ensure_trans(info['dev_head_t'], 'meg', 'head')

    if picks is None:
        picks = pick_types(info, meg=True)
        if len(picks) > 0:
            logger.info('Using MEG channels')
        else:
            logger.info('Using EEG channels')
            picks = pick_types(info, eeg=True)

    src_pos = np.vstack([
        apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
        for s in src
    ])

    sensor_pos = []
    for ch in picks:
        # MEG channels are in device coordinates, translate them to head
        if channel_type(info, ch) in ['mag', 'grad']:
            sensor_pos.append(
                apply_trans(dev_to_head, info['chs'][ch]['loc'][:3]))
        else:
            sensor_pos.append(info['chs'][ch]['loc'][:3])
    sensor_pos = np.array(sensor_pos)

    # Find vertices that are within range of a sensor. We use a KD-tree for
    # speed.
    logger.info('Finding vertices within sensor range...')
    tree = cKDTree(sensor_pos)
    distances, _ = tree.query(src_pos, distance_upper_bound=dist)

    # Vertices out of range are flagged as np.inf
    src_sel = np.isfinite(distances)
    logger.info('[done]')

    if indices:
        return np.flatnonzero(src_sel)
    else:
        n_lh_verts = src[0]['nuse']
        lh_sel, rh_sel = src_sel[:n_lh_verts], src_sel[n_lh_verts:]
        vert_lh = src[0]['vertno'][lh_sel]
        vert_rh = src[1]['vertno'][rh_sel]
        return [vert_lh, vert_rh]
Exemple #8
0
from mne.io import read_raw_ctf, read_info, read_fiducials
from mne.coreg import _fiducial_coords, fit_matched_points
from mne.transforms import read_trans, write_trans, Transform

if len(sys.argv) != 2:
    print("usage: {} subject".format(sys.argv[0]))
    sys.exit(1)

subject = sys.argv[1]

try:
    FShome = os.environ['FREESURFER_HOME']
except KeyError:
    print("You must set the FREESURFER_HOME environment variable!")
    sys.exit(1)

try:
    Subjdir = os.environ['SUBJECTS_DIR']
except KeyError:
    Subjdir = op.join(FShome, "subjects")
    print("Note: Using the default SUBJECTS_DIR:", Subjdir)

name = op.join(Subjdir, subject, "bem", "{}-fiducials.fif".format(subject))
pts, cframe = read_fiducials(name)
fids = _fiducial_coords(pts)
print(fids)

name = op.join(Subjdir, subject, "bem", "{}-trans.fif".format(subject))
t = read_trans(name)
print(t)
def get_sensor_pos_from_fwd(inst, info=None, picks=None, trans=None):
    from mne import SourceSpaces, Forward
    from mne.io.constants import FIFF
    from six import string_types
    from mne.transforms import read_trans, _ensure_trans, invert_transform, Transform, apply_trans
    from mne.io.pick import channel_type, pick_types

    if isinstance(inst, Forward):
        info = inst['info']
        src = inst['src']
    elif isinstance(inst, SourceSpaces):
        src = inst
        if info is None:
            raise ValueError('You need to specify an Info object with '
                             'information about the channels.')

    # Load the head<->MRI transform if necessary
    if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
        if trans is None:
            raise ValueError('Source space is in MRI coordinates, but no '
                             'head<->MRI transform was given. Please specify '
                             'the full path to the appropriate *-trans.fif '
                             'file as the "trans" parameter.')
        if isinstance(trans, string_types):
            trans = read_trans(trans, return_all=True)
            for trans in trans:  # we got at least 1
                try:
                    trans = _ensure_trans(trans, 'head', 'mri')
                except Exception as exp:
                    pass
                else:
                    break
            else:
                raise exp

        src_trans = invert_transform(_ensure_trans(trans, 'head', 'mri'))
        print('Transform!')
    else:
        src_trans = Transform('head', 'head')  # Identity transform

    dev_to_head = _ensure_trans(info['dev_head_t'], 'meg', 'head')

    if picks is None:
        picks = pick_types(info, meg=True)
        if len(picks) > 0:
            print('Using MEG channels')
        else:
            print('Using EEG channels')
            picks = pick_types(info, eeg=True)

    sensor_pos = []
    for ch in picks:
        # MEG channels are in device coordinates, translate them to head
        if channel_type(info, ch) in ['mag', 'grad']:
            sensor_pos.append(
                apply_trans(dev_to_head, info['chs'][ch]['loc'][:3]))
        else:
            sensor_pos.append(info['chs'][ch]['loc'][:3])
    sensor_pos = np.array(sensor_pos)

    return sensor_pos
    # Read model inverse to get template for cov, mri_head_t
    model_inv = read_inverse_operator(fname_load_inv)
    ###########################################################
    # Create spherical BEM
    sph_bem = make_sphere_model(r0='auto', head_radius='auto', info=subj_info)

    ###########################################################
    # Create foward solution

    # Read source space and trans (mri->head) for foward computation
    src = read_source_spaces(fname_load_src)

    if generic_sph is True:
        # Use this trans if using generic fsaverage head model
        trans = read_trans(fname_load_trans)
    else:
        # Use this trans if using individual's head model
        trans = model_inv['mri_head_t']

    fwd = make_forward_solution(info=subj_info, trans=trans, src=src,
                                bem=sph_bem, fname=fname_save_fwd, meg=False,
                                eeg=True, overwrite=True, n_jobs=n_jobs)

    # Fix orientation
    convert_forward_solution(fwd, surf_ori=True, force_fixed=True, copy=False)

    ###########################################################
    # Create and save inverse solution
    noise_cov = model_inv['noise_cov']