Example #1
0
    def fit(self, epochs):
        self.picks = _handle_picks(info=epochs.info, picks=self.picks)
        _check_data(epochs, picks=self.picks,
                    ch_constraint='single_channel_type', verbose=self.verbose)
        self.ch_type = _get_channel_type(epochs, self.picks)
        n_epochs = len(epochs)
        self.ch_subsets_ = self._get_random_subsets(epochs.info)
        self.mappings_ = self._get_mappings(epochs)

        n_jobs = check_n_jobs(self.n_jobs)
        parallel = Parallel(n_jobs, verbose=10)
        my_iterator = delayed(_iterate_epochs)
        if self.verbose is not False and self.n_jobs > 1:
            print('Iterating epochs ...')
        verbose = False if self.n_jobs > 1 else self.verbose
        corrs = parallel(my_iterator(self, epochs, idxs, verbose)
                         for idxs in np.array_split(np.arange(n_epochs),
                         n_jobs))
        self.corr_ = np.concatenate(corrs)
        if self.verbose is not False and self.n_jobs > 1:
            print('[Done]')

        # compute how many windows is a sensor RANSAC-bad
        self.bad_log = np.zeros_like(self.corr_)
        self.bad_log[self.corr_ < self.min_corr] = 1
        bad_log = self.bad_log.sum(axis=0)

        bad_idx = np.where(bad_log > self.unbroken_time * n_epochs)[0]
        if len(bad_idx) > 0:
            self.bad_chs_ = [
                epochs.info['ch_names'][self.picks[p]] for p in bad_idx]
        else:
            self.bad_chs_ = []
        return self
Example #2
0
    def fit(self, epochs):
        _check_data(epochs)
        self.ch_type = _get_channel_type(epochs)
        n_epochs = len(epochs)
        self.ch_subsets_ = self._get_random_subsets(epochs.info)
        self.mappings_ = self._get_mappings(epochs)

        n_jobs = check_n_jobs(self.n_jobs)
        parallel = Parallel(n_jobs, verbose=10)
        my_iterator = delayed(_iterate_epochs)
        if self.verbose is not False and self.n_jobs > 1:
            print('Iterating epochs ...')
        verbose = False if self.n_jobs > 1 else self.verbose
        corrs = parallel(
            my_iterator(self, epochs, idxs, verbose)
            for idxs in np.array_split(np.arange(n_epochs), n_jobs))
        self.corr_ = np.concatenate(corrs)
        if self.verbose is not False and self.n_jobs > 1:
            print('[Done]')

        # compute how many windows is a sensor RANSAC-bad
        self.bad_log = np.zeros_like(self.corr_)
        self.bad_log[self.corr_ < self.min_corr] = 1
        bad_log = self.bad_log.sum(axis=0)

        bad_idx = np.where(bad_log > self.unbroken_time * n_epochs)[0]
        if len(bad_idx) > 0:
            self.bad_chs_ = [epochs.info['ch_names'][p] for p in bad_idx]
        else:
            self.bad_chs_ = []
        return self
Example #3
0
    def fit(self,
            raw: mne.io.RawArray,
            start: float = None,
            stop: float = None,
            reject_by_annotation: bool = True,
            gfp: bool = False,
            n_jobs: int = 1,
            verbose=None) -> mod_Kmeans:
        """[summary]

        Args:
            raw (mne.io.RawArray): [description]
            start (float, optional): [description]. Defaults to None.
            stop (float, optional): [description]. Defaults to None.
            reject_by_annotation (bool, optional): [description]. Defaults to True.
            gfp (bool, optional): [description]. Defaults to False.
            n_jobs (int, optional): [description]. Defaults to 1.
            verbose ([type], optional): [description]. Defaults to None.

        Returns:
            mod_Kmeans: [description]
        """
        _validate_type(raw, (BaseRaw), 'raw', 'Raw')
        reject_by_annotation = 'omit' if reject_by_annotation else None
        start, stop = _check_start_stop(raw, start, stop)
        n_jobs = check_n_jobs(n_jobs)

        if len(raw.info['bads']) is not 0:
            warn('Bad channels are present in the recording. '
                 'They will still be used to compute microstate topographies. '
                 'Consider using Raw.pick() or Raw.interpolate_bads()'
                 ' before fitting.')

        data = raw.get_data(start,
                            stop,
                            reject_by_annotation=reject_by_annotation)
        if gfp is True:
            data = _extract_gfps(data)

        best_gev = 0
        if n_jobs == 1:
            for _ in range(self.n_init):
                gev, maps, segmentation = self._run_mod_kmeans(data)
                if gev > best_gev:
                    best_gev, best_maps, best_segmentation = gev, maps, segmentation
        else:
            parallel, p_fun, _ = parallel_func(self._run_mod_kmeans,
                                               total=self.n_init,
                                               n_jobs=n_jobs)
            runs = parallel(p_fun(data) for i in range(self.n_init))
            runs = np.array(runs)
            best_run = np.argmax(runs[:, 0])
            best_gev, best_maps, best_segmentation = runs[best_run]

        self.cluster_centers = best_maps
        self.GEV = best_gev
        self.labels = best_segmentation
        self.current_fit = True
        return (self)
Example #4
0
def _parallel_scorer(y_true, y_pred, func, n_jobs=1):
    from nose.tools import assert_true
    from mne.parallel import parallel_func, check_n_jobs
    # check dimensionality
    assert_true(y_true.ndim == 1)
    assert_true(y_pred.ndim == 2)
    # set jobs not > to n_chunk
    n_jobs = min(y_pred.shape[1], check_n_jobs(n_jobs))
    parallel, p_func, n_jobs = parallel_func(func, n_jobs)
    chunks = np.array_split(y_pred.transpose(), n_jobs)
    # run parallel
    out = parallel(p_func(chunk.T, y_true) for chunk in chunks)
    # gather data
    return np.concatenate(out, axis=0)
Example #5
0
    def fit(self, epochs):
        self.picks = _handle_picks(info=epochs.info, picks=self.picks)
        _check_data(epochs,
                    picks=self.picks,
                    ch_constraint='single_channel_type',
                    verbose=self.verbose)
        self.ch_type = _get_channel_type(epochs, self.picks)
        n_epochs = len(epochs)

        n_jobs = check_n_jobs(self.n_jobs)
        parallel = Parallel(n_jobs, verbose=10)
        my_iterator = delayed(_iterate_epochs)
        if self.verbose is not False and self.n_jobs > 1:
            print('Iterating epochs ...')
        verbose = False if self.n_jobs > 1 else self.verbose
        rng = check_random_state(self.random_state)
        base_random_state = rng.randint(np.iinfo(np.int16).max)
        self.ch_subsets_ = [
            self._get_random_subsets(epochs.info,
                                     base_random_state + random_state)
            for random_state in np.arange(0, n_epochs, n_jobs)
        ]
        epoch_idxs = np.array_split(np.arange(n_epochs), n_jobs)
        corrs = parallel(
            my_iterator(self, epochs, idxs, chs, verbose)
            for idxs, chs in zip(epoch_idxs, self.ch_subsets_))
        self.corr_ = np.concatenate(corrs)
        if self.verbose is not False and self.n_jobs > 1:
            print('[Done]')

        # compute how many windows is a sensor RANSAC-bad
        self.bad_log = np.zeros_like(self.corr_)
        self.bad_log[self.corr_ < self.min_corr] = 1
        bad_log = self.bad_log.sum(axis=0)

        bad_idx = np.where(bad_log > self.unbroken_time * n_epochs)[0]
        if len(bad_idx) > 0:
            self.bad_chs_ = [
                epochs.info['ch_names'][self.picks[p]] for p in bad_idx
            ]
        else:
            self.bad_chs_ = []
        return self
Example #6
0
 def __init__(self, estimator=None, n_jobs=-1):
     from mne.parallel import check_n_jobs
     from sklearn.linear_model import LinearRegression
     self.estimator = LinearRegression() if estimator is None else estimator
     self.n_jobs = n_jobs = check_n_jobs(n_jobs)
def make_pert_forward_solution(info,
                               trans,
                               src,
                               bem,
                               perts,
                               meg=True,
                               eeg=True,
                               mindist=0.0,
                               ignore_ref=False,
                               n_jobs=1,
                               verbose=None):
    """Calculate a forward solution for a subject.

    Parameters
    ----------
    info : instance of mne.Info | str
        If str, then it should be a filename to a Raw, Epochs, or Evoked
        file with measurement information. If dict, should be an info
        dict (such as one from Raw, Epochs, or Evoked).
    trans : dict | str | None
        Either a transformation filename (usually made using mne_analyze)
        or an info dict (usually opened using read_trans()).
        If string, an ending of `.fif` or `.fif.gz` will be assumed to
        be in FIF format, any other ending will be assumed to be a text
        file with a 4x4 transformation matrix (like the `--trans` MNE-C
        option). Can be None to use the identity transform.
    src : str | instance of SourceSpaces
        If string, should be a source space filename. Can also be an
        instance of loaded or generated SourceSpaces.
    bem : dict | str
        Filename of the BEM (e.g., "sample-5120-5120-5120-bem-sol.fif") to
        use, or a loaded sphere model (dict).
    meg : bool
        If True (Default), include MEG computations.
    eeg : bool
        If True (Default), include EEG computations.
    mindist : float
        Minimum distance of sources from inner skull surface (in mm).
    ignore_ref : bool
        If True, do not include reference channels in compensation. This
        option should be True for KIT files, since forward computation
        with reference channels is not currently supported.
    n_jobs : int
        Number of jobs to run in parallel.
    perts : dict
        A dictionary containing perturbation parameters for gradiometer
        imbalance, sensor miscalibration, and misalignment
    verbose : bool, str, int, or None
        If not None, override default verbose level (see :func:`mne.verbose`
        and :ref:`Logging documentation <tut_logging>` for more).

    Returns
    -------
    fwd : instance of Forward
        The forward solution.

    See Also
    --------
    convert_forward_solution

    Notes
    -----
    The ``--grad`` option from MNE-C (to compute gradients) is not implemented
    here.

    To create a fixed-orientation forward solution, use this function
    followed by :func:`mne.convert_forward_solution`.
    """
    # Currently not (sup)ported:
    # 1. --grad option (gradients of the field, not used much)
    # 2. --fixed option (can be computed post-hoc)
    # 3. --mricoord option (probably not necessary)

    # read the transformation from MRI to HEAD coordinates
    # (could also be HEAD to MRI)
    mri_head_t, trans = _get_trans(trans)
    if isinstance(bem, ConductorModel):
        bem_extra = 'instance of ConductorModel'
    else:
        bem_extra = bem
    if not isinstance(info, (Info, string_types)):
        raise TypeError('info should be an instance of Info or string')
    if isinstance(info, string_types):
        info_extra = op.split(info)[1]
        info = read_info(info, verbose=False)
    else:
        info_extra = 'instance of Info'
    n_jobs = check_n_jobs(n_jobs)

    # Report the setup
    logger.info('Source space          : %s' % src)
    logger.info('MRI -> head transform : %s' % trans)
    logger.info('Measurement data      : %s' % info_extra)
    if isinstance(bem, ConductorModel) and bem['is_sphere']:
        logger.info('Sphere model      : origin at %s mm' % (bem['r0'], ))
        logger.info('Standard field computations')
    else:
        logger.info('Conductor model   : %s' % bem_extra)
        logger.info('Accurate field computations')
    logger.info('Do computations in %s coordinates',
                _coord_frame_name(FIFF.FIFFV_COORD_HEAD))
    logger.info('Free source orientations')

    megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \
        update_kwargs, bem = _prepare_for_forward(
            src, mri_head_t, info, bem, mindist, n_jobs, perts, bem_extra, trans,
            info_extra, meg, eeg, ignore_ref)
    del (src, mri_head_t, trans, info_extra, bem_extra, mindist, meg, eeg,
         ignore_ref)

    # Time to do the heavy lifting: MEG first, then EEG
    coil_types = ['meg', 'eeg']
    coils = [megcoils, eegels]
    ccoils = [compcoils, None]
    infos = [meg_info, None]
    megfwd, eegfwd = _compute_forwards(rr, bem, coils, ccoils, infos,
                                       coil_types, n_jobs)

    # merge forwards
    fwd = _merge_meg_eeg_fwds(_to_forward_dict(megfwd, megnames),
                              _to_forward_dict(eegfwd, eegnames),
                              verbose=False)
    logger.info('')

    # Don't transform the source spaces back into MRI coordinates (which is
    # done in the C code) because mne-python assumes forward solution source
    # spaces are in head coords.
    fwd.update(**update_kwargs)
    logger.info('Finished.')
    return fwd
Example #8
0
 def __init__(self, estimator=None, n_jobs=-1):
     from mne.parallel import check_n_jobs
     from sklearn.linear_model import LinearRegression
     self.estimator = LinearRegression() if estimator is None else estimator
     self.n_jobs = n_jobs = check_n_jobs(n_jobs)
Example #9
0
def _permutation_cluster_test_AT(X, threshold, tail, n_permutations,
                                 connectivity, n_jobs, seed, max_step, exclude,
                                 step_down_p, t_power, out_type,
                                 check_disjoint, buffer_size):
    n_jobs = check_n_jobs(n_jobs)
    """Aux Function.
    Note. X is required to be a list. Depending on the length of X
    either a 1 sample t-test or an F test / more sample permutation scheme
    is elicited.
    """
    if out_type not in ['mask', 'indices']:
        raise ValueError('out_type must be either \'mask\' or \'indices\'')
    if not isinstance(threshold, dict) and (tail < 0 and threshold > 0
                                            or tail > 0 and threshold < 0
                                            or tail == 0 and threshold < 0):
        raise ValueError(
            'incompatible tail and threshold signs, got %s and %s' %
            (tail, threshold))

    # check dimensions for each group in X (a list at this stage).
    X = [x[:, np.newaxis] if x.ndim == 1 else x for x in X]
    n_times = X[0].shape[0]

    sample_shape = X[0].shape[1:]
    for x in X:
        if x.shape[1:] != sample_shape:
            raise ValueError('All samples mush have the same size')

#    # flatten the last dimensions in case the data is high dimensional
#    X = [np.reshape(x, (x.shape[0], -1)) for x in X]
    n_tests = X[0].shape[1]

    if connectivity is not None and connectivity is not False:
        connectivity = cluster_level._setup_connectivity(
            connectivity, n_tests, n_times)

    if (exclude is not None) and not exclude.size == n_tests:
        raise ValueError('exclude must be the same shape as X[0]')

    # determine if connectivity itself can be separated into disjoint sets
    if check_disjoint is True and (connectivity is not None
                                   and connectivity is not False):
        partitions = cluster_level._get_partitions_from_connectivity(
            connectivity, n_times)
    else:
        partitions = None
    max_clu_lens = np.zeros(n_permutations)
    for i in range(0, n_permutations):
        #logger.info('Running initial clustering')
        include = None
        out = cluster_level._find_clusters(X[i][0],
                                           threshold,
                                           tail,
                                           connectivity,
                                           max_step=max_step,
                                           include=include,
                                           partitions=partitions,
                                           t_power=t_power,
                                           show_info=True)
        clusters, cluster_stats = out

        logger.info('Found %d clusters' % len(clusters))

        # convert clusters to old format
        if connectivity is not None and connectivity is not False:
            # our algorithms output lists of indices by default
            if out_type == 'mask':
                clusters = cluster_level._cluster_indices_to_mask(
                    clusters, n_tests)
        else:
            # ndimage outputs slices or boolean masks by default
            if out_type == 'indices':
                clusters = cluster_level._cluster_mask_to_indices(clusters)

        # The clusters should have the same shape as the samples
        clusters = cluster_level._reshape_clusters(clusters, sample_shape)
        max_clu_len = 0
        for j in range(0, len(clusters)):
            max_new = len(clusters[j][0])
            if max_new > max_clu_len:
                max_clu_len = max_new
        logger.info('Max cluster length %d' % max_clu_len)
        max_clu_lens[i] = max_clu_len
    return max_clu_lens, clusters