示例#1
0
def _mne_to_dict_evoked(eeg):
    # Try loading mne
    try:
        import mne
    except ImportError:
        raise ImportError(
            "NeuroKit error: eeg_add_channel(): the 'mne' module is required for this function to run. ",
            "Please install it first (`pip install mne`).",
        )

    if not isinstance(eeg, list):
        eeg = [eeg]

    data = {}

    old_verbosity_level = mne.set_log_level(verbose="WARNING", return_old_level=True)
    for i, evoked in enumerate(eeg):
        df = evoked.to_data_frame()
        df.index = evoked.times

        # Add info
        info = pd.DataFrame({"Label": [i] * len(df)})
        info["Condition"] = evoked.comment
        info["Time"] = evoked.times
        info.index = evoked.times

        data[i] = pd.concat([info, df], axis=1)

    mne.set_log_level(old_verbosity_level)
    return data
示例#2
0
def _mne_to_dict_epochs(eeg):
    # Try loading mne
    try:
        import mne
    except ImportError:
        raise ImportError(
            "NeuroKit error: eeg_add_channel(): the 'mne' module is required for this function to run. ",
            "Please install it first (`pip install mne`).",
        )

    data = {}

    old_verbosity_level = mne.set_log_level(verbose="WARNING", return_old_level=True)
    for i, dat in enumerate(eeg.get_data()):

        df = pd.DataFrame(dat.T)
        df.columns = eeg[i].ch_names
        df.index = eeg[i].times

        # Add info
        info = pd.DataFrame({"Label": [i] * len(df)})
        info["Condition"] = list(eeg[i].event_id.keys())[0]
        info["Time"] = eeg[i].times
        info.index = eeg[i].times

        data[i] = pd.concat([info, df], axis=1)

    mne.set_log_level(old_verbosity_level)
    return data
示例#3
0
 def setUp(self) -> None:
     mne.set_log_level(False)
     self.dataset = create_dummy_dataset()
     self.dataset.return_person_id = True
     self.dataset.return_session_id = True
     self.transform = ZScore()
     self.dataset.add_transform(self.transform)
示例#4
0
def source_distributed(evo_crop, fwd, cov, methods, dir_base):
    param_loose = np.arange(0.1, 1.1, 0.1)
    param_depth = np.arange(0.1, 1.1, 0.1)
    param_snr = np.arange(1, 5, 1)
    mne.set_log_level('ERROR')  # Only for final run
    print('Computing source estimates - ', methods)
    for method in tqdm(methods, position=0, desc='methods'):
        for loose in tqdm(param_loose, position=1, desc='loose'):
            for depth in tqdm(param_depth, position=2, desc='depth'):
                # anatomical source space
                inv = m_inv_op(evo_crop.info,
                               fwd,
                               cov,
                               loose=loose,
                               depth=depth)

                for snr in param_snr:
                    lambda2 = 1. / snr**2
                    stc = a_inv_op(evo_crop,
                                   inv,
                                   lambda2,
                                   method=method,
                                   pick_ori=None)

                    fname_stc = op.join(
                        dir_base, 'results', 'source_loc', method,
                        '%s__%s_l%0.1f_d%0.1f_s%0.1f-stc' %
                        (evo_crop.info['description'], method, loose, depth,
                         snr))
                    stc.save(fname_stc)
示例#5
0
def eeg_rereference_mne(eeg, reference="average", robust=False, **kwargs):

    eeg = eeg.copy()
    if reference == "average" and robust is True:
        eeg._data = eeg_rereference_array(eeg._data,
                                          reference=reference,
                                          robust=robust)
        eeg.info["custom_ref_applied"] = True
    elif reference in ["lap", "csd"]:
        try:
            import mne
            if mne.__version__ < '0.20':
                raise ImportError
        except ImportError:
            raise ImportError(
                "NeuroKit error: eeg_rereference(): the 'mne' module (version > 0.20) is required "
                "for this function to run. Please install it first (`pip install mne`).",
            )
        old_verbosity_level = mne.set_log_level(verbose="WARNING",
                                                return_old_level=True)
        eeg = mne.preprocessing.compute_current_source_density(eeg)
        mne.set_log_level(old_verbosity_level)
    else:
        eeg = eeg.set_eeg_reference(reference, verbose=False, **kwargs)

    return eeg
示例#6
0
def interpolate_bads(inst, picks, dots=None, reset_bads=True, mode='accurate'):
    """Interpolate bad MEG and EEG channels."""
    import mne
    # to prevent cobyla printf error
    # XXX putting to critical for now unless better solution
    # emerges
    verbose = mne.set_log_level('CRITICAL', return_old_level=True)

    eeg_picks = set(pick_types(inst.info, meg=False, eeg=True, exclude=[]))
    eeg_picks_interp = [p for p in picks if p in eeg_picks]
    if len(eeg_picks_interp) > 0:
        _interpolate_bads_eeg(inst, picks=eeg_picks_interp)

    meg_picks = set(pick_types(inst.info, meg=True, eeg=False, exclude=[]))
    meg_picks_interp = [p for p in picks if p in meg_picks]
    if len(meg_picks_interp) > 0:
        _interpolate_bads_meg_fast(inst,
                                   picks=meg_picks_interp,
                                   dots=dots,
                                   mode=mode)

    if reset_bads is True:
        inst.info['bads'] = []

    mne.set_log_level(verbose)

    return inst
示例#7
0
def raw_clean(montage):
    """Return an `mne.io.Raw` object with no bad channels for use with tests.

    This fixture downloads and reads in subject 30, run 2 from the Physionet
    BCI2000 (eegbci) open dataset, which contains no bad channels on an initial
    pass of :class:`pyprep.NoisyChannels`. Intended for use with tests where
    channels are made artificially bad.

    File attributes:
    - Channels: 64 EEG
    - Sample rate: 160 Hz
    - Duration: 61 seconds

    This is only run once per session to save time downloading.

    """
    mne.set_log_level("WARNING")

    # Download and read S030R02.edf from the BCI2000 dataset
    edf_fpath = eegbci.load_data(30, 2, update_path=True)[0]
    raw = mne.io.read_raw_edf(edf_fpath, preload=True)
    eegbci.standardize(raw)  # Fix non-standard channel names

    # Set a montage for use with RANSAC
    raw.set_montage(montage)

    return raw
示例#8
0
def interpolate_bads(inst, picks, dots=None, reset_bads=True, mode='accurate'):
    """Interpolate bad MEG and EEG channels."""
    import mne
    # to prevent cobyla printf error
    # XXX putting to critical for now unless better solution
    # emerges
    verbose = mne.set_log_level('CRITICAL', return_old_level=True)

    eeg_picks = set(pick_types(inst.info, meg=False, eeg=True, exclude=[]))
    eeg_picks_interp = [p for p in picks if p in eeg_picks]
    if len(eeg_picks_interp) > 0:
        _interpolate_bads_eeg(inst, picks=eeg_picks_interp)

    meg_picks = set(pick_types(inst.info, meg=True, eeg=False, exclude=[]))
    meg_picks_interp = [p for p in picks if p in meg_picks]
    if len(meg_picks_interp) > 0:
        _interpolate_bads_meg_fast(inst, picks=meg_picks_interp,
                                   dots=dots, mode=mode)

    if reset_bads is True:
        inst.info['bads'] = []

    mne.set_log_level(verbose)

    return inst
示例#9
0
def write_mnefiff(data, filename):
    """Export data to MNE using FIFF format.

    Parameters
    ----------
    data : instance of ChanTime
        data with only one trial
    filename : path to file
        file to export to (include '.mat')

    Notes
    -----
    It cannot store data larger than 2 GB.
    The data is assumed to have only EEG electrodes.
    It overwrites a file if it exists.
    """
    from mne import create_info, set_log_level
    from mne.io import RawArray

    set_log_level(WARNING)

    TRIAL = 0
    info = create_info(list(data.axis['chan'][TRIAL]), data.s_freq, ['eeg', ] *
                       data.number_of('chan')[TRIAL])

    UNITS = 1e-6  # mne wants data in uV
    fiff = RawArray(data.data[0] * UNITS, info)

    if data.attr['chan']:
        fiff.set_channel_positions(data.attr['chan'].return_xyz(),
                                   data.attr['chan'].return_label())

    fiff.save(filename, overwrite=True)
示例#10
0
    def transform(self, X):
        from mne.beamformer import apply_lcmv_epochs
        mne.set_log_level('WARNING')
        epochs = mne.EpochsArray(X, self.info, verbose=False)
        epochs.filter(self.filter_specs['lp'],
                      self.filter_specs['hp'],
                      fir_design='firwin',
                      n_jobs=self.n_jobs)
        stcs = apply_lcmv_epochs(epochs,
                                 self.filters,
                                 return_generator=True,
                                 max_ori_out='signed',
                                 verbose=False)
        stcs_mat = np.ones((X.shape[0], self.fwd['nsource'], X.shape[2]))
        for trial in range(X.shape[0]):
            stcs_mat[trial, :, :] = next(stcs).data

        # make an epoch
        # epochs_stcs = source2epoch(stcs_mat, self.fwd['nsource'],
        #                            self.info['sfreq'])
        # epochs_stcs.filter(self.filter_specs['lp'], self.filter_specs['hp'],
        #                    n_jobs=self.n_jobs)

        if self.power_win is None:
            self.power_win = self.t_win
        time_idx = epochs.time_as_index(self.power_win)

        # stcs_mat is [trials, grid points, time points]
        return np.sum(stcs_mat[:, :, time_idx[0]:time_idx[1]]**2, axis=2)
def get_signals(path):
    """
	1) Reading edf file using MNE
	2) Band pass filtering
	3) Downsampling

	:param path: str, filepath to the EDF files (ending in '/')
	:return: dataframe of EEG, EEG2, and EMG signals
	"""
    mne.set_log_level('WARNING')

    raw_edf = mne.io.read_raw_edf(path, preload=True)
    channels = raw_edf.ch_names
    if 'EEG 2' in channels:
        raw_edf.rename_channels({'EEG 2': 'EEG2'})
    elif 'EEG(sec)' in channels:
        raw_edf.rename_channels({'EEG(sec)': 'EEG2'})
    elif 'EEG(SEC)' in channels:
        raw_edf.rename_channels({'EEG(SEC)': 'EEG2'})
    raw_edf.pick_channels(['EEG', 'EEG2', 'EMG'])  # Select channels
    raw_edf.filter(2, 30., fir_design='firwin')  # Band filter
    raw_edf.resample(C.FINAL_SAMPLING_FREQ,
                     npad='auto')  # Downsampling to 62 Hz

    return raw_edf.to_data_frame()
示例#12
0
def from_raw_2_times(night=None, r_events=None, dir_path=None):
    if r_events is None:
        if dir_path is None:
            dir_path = "../data/raw/EEG"
        pattern = r'Nathalie.+\.mff'
        files = [f for f in os.listdir(dir_path) if re.match(pattern, f)]
        mne.set_log_level(verbose='CRITICAL')
        for nf, f in enumerate(files):
            fields = re.findall(pattern='\d+', string=f)
            nonight = int(fields[0])
            if nonight == night:
                break
        path_to_file = os.path.join(dir_path, files[nf])
        raw = mne.io.read_raw_egi(path_to_file,
                                  montage='GSN-HydroCel-256',
                                  preload=False)
        events = mne.find_events(raw)
    else:
        raw = r_events['raw']
        events = r_events['events']
    print(datetime.datetime.fromtimestamp(raw.info['meas_date'][0]))
    s = [t for t, n, e in events if e == raw.event_id['DIN1']]
    word_events = []
    fs = raw.info['sfreq']
    for i, inception in enumerate(s):
        if inception / fs + 55 <= raw.times.max():
            timing = datetime.datetime.fromtimestamp(raw.info['meas_date'][0] +
                                                     inception / fs)
            word_events.append({"time": timing, "index": i})
    return word_events
示例#13
0
def test_morphing():
    mne.set_log_level('warning')
    data_dir = mne.datasets.sample.data_path()
    subjects_dir = os.path.join(data_dir, 'subjects')

    sss = datasets._mne_source_space('fsaverage', 'ico-4', subjects_dir)
    vertices_to = [sss[0]['vertno'], sss[1]['vertno']]
    ds = datasets.get_mne_sample(-0.1,
                                 0.1,
                                 src='ico',
                                 sub='index==0',
                                 stc=True)
    stc = ds['stc', 0]
    morph_mat = mne.compute_morph_matrix('sample', 'fsaverage', stc.vertices,
                                         vertices_to, None, subjects_dir)
    ndvar = ds['src']

    morphed_ndvar = morph_source_space(ndvar, 'fsaverage')
    morphed_stc = mne.morph_data_precomputed('sample', 'fsaverage', stc,
                                             vertices_to, morph_mat)
    assert_array_equal(morphed_ndvar.x[0], morphed_stc.data)
    morphed_stc_ndvar = load.fiff.stc_ndvar([morphed_stc],
                                            'fsaverage',
                                            'ico-4',
                                            subjects_dir,
                                            'dSPM',
                                            False,
                                            'src',
                                            parc=None)
    assert_dataobj_equal(morphed_ndvar, morphed_stc_ndvar)
示例#14
0
def ica_all():
    """Filter all of the EEG data in a directory and save.

    Parameters
    ----------
    l_freq : float
        Low-pass frequency (Hz).
    h_freq : float
        High-pass frequency (Hz).
    read_dir : str
        Directory from which to read the data.
    save_dir : str
        Directory in which to save the filtered data.

    """
    parser = argparse.ArgumentParser(prog='1_filter_all.py',
                                     description=__doc__)
    parser.add_argument('-i', '--input', type=str, required=True,
                        help="Directory of files to be filtered.")
    parser.add_argument('-o', '--output', type=str, required=True,
                        help="Directory in which to save filtered files.")
    parser.add_argument('-m', '--method', type=str, default='extended-infomax',
                        help='ICA method to use.')
    parser.add_argument('-v', '--verbose', type=str, default='error')
    args = parser.parse_args()

    input_dir = op.abspath(args.input)
    output_dir = op.abspath(args.output)
    ica_method = args.method

    if not op.exists(input_dir):
        sys.exit("Input directory not found.")
    if not op.exists(output_dir):
        sys.exit("Output directory not found.")

    set_log_level(verbose=args.verbose)

    input_fnames = op.join(input_dir, '*.fif')
    input_fnames = glob(input_fnames)
    n_files = len(input_fnames)

    print("Preparing to ICA {n} files".format(n=n_files))
    # Initialize a progress bar.
    progress = ProgressBar(n_files, mesg='Performing ICA')
    progress.update_with_increment_value(0)
    for fname in input_fnames:
        # Open file.
        raw = io.read_raw_fif(fname, preload=True, add_eeg_ref=False)
        # Perform ICA.
        ica = ICA(method=ica_method).fit(raw)
        # Save file.
        save_fname = op.splitext(op.split(fname)[-1])[0]
        save_fname += '-ica'
        save_fname = op.join(output_dir, save_fname)
        ica.save(save_fname + '.fif')
        # Update progress bar.
        progress.update_with_increment_value(1)

    print("")  # Get onto new line once progressbar completes.
示例#15
0
def run():
    """Run command."""
    from mne.commands.utils import get_optparser

    parser = get_optparser(__file__)

    parser.add_option('--input', dest='input_fname',
                      help='Input data file name', metavar='filename')
    parser.add_option('--mrk', dest='mrk_fname',
                      help='MEG Marker file name', metavar='filename')
    parser.add_option('--elp', dest='elp_fname',
                      help='Headshape points file name', metavar='filename')
    parser.add_option('--hsp', dest='hsp_fname',
                      help='Headshape file name', metavar='filename')
    parser.add_option('--stim', dest='stim',
                      help='Colon Separated Stimulus Trigger Channels',
                      metavar='chs')
    parser.add_option('--slope', dest='slope', help='Slope direction',
                      metavar='slope')
    parser.add_option('--stimthresh', dest='stimthresh', default=1,
                      help='Threshold value for trigger channels',
                      metavar='value')
    parser.add_option('--output', dest='out_fname',
                      help='Name of the resulting fiff file',
                      metavar='filename')
    parser.add_option('--debug', dest='debug', action='store_true',
                      default=False,
                      help='Set logging level for terminal output to debug')

    options, args = parser.parse_args()

    if options.debug:
        mne.set_log_level('debug')

    input_fname = options.input_fname
    if input_fname is None:
        with ETSContext():
            mne.gui.kit2fiff()
        sys.exit(0)

    hsp_fname = options.hsp_fname
    elp_fname = options.elp_fname
    mrk_fname = options.mrk_fname
    stim = options.stim
    slope = options.slope
    stimthresh = options.stimthresh
    out_fname = options.out_fname

    if isinstance(stim, str):
        stim = map(int, stim.split(':'))

    raw = read_raw_kit(input_fname=input_fname, mrk=mrk_fname, elp=elp_fname,
                       hsp=hsp_fname, stim=stim, slope=slope,
                       stimthresh=stimthresh)

    raw.save(out_fname)
    raw.close()
    sys.exit(0)
示例#16
0
def container_process(conf):
    print('\trun tfr container...')
    path_home = conf.path_home
    kind = conf.kind
    train = conf.train
    frequency = conf.frequency
    spec = conf.spec
    data_path = conf.data_path
    verbose = conf.verbose

    donor = mne.Evoked(f'{path_home}donor-ave.fif', verbose = 'ERROR')
    fpath_events = conf.path_mio + '/mio_out_{}/{}_run{}_mio_corrected_{}{}.txt'
    plot_spectrogram = conf.plot_spectrogram
    single_trial = conf.single_trial

    #get rid of runs, leave frequency data for pos and neg feedback for time course plotting 
    for i in range(len(kind)):
        for subject in conf.subjects:
            data = []
            processing_done = False
            if not plot_spectrogram:
                out_file = conf.path_container + "{}_{}{}_{}{}-ave.fif".format(subject, spec, kind[i], frequency, train)
            else:
                out_file = conf.path_container + "{0}_{1}{2}_{3}{4}-50ms-tfr.h5".format(subject, spec, kind[i], frequency, train)
            for run in conf.runs:
                print('\t\t', kind[i], run, subject)
                path_events = fpath_events.format(kind[i], subject, run, kind[i], train)
                if pathlib.Path(path_events).exists():
                    if verbose:
                        print('This file is being processed: ', path_events)

                    freq_file = conf.path_tfr + data_path.format(subject, run, spec, frequency, kind[i], train)
                    old_level = mne.set_log_level(verbose='ERROR', return_old_level=True)
                    freq_data = mne.time_frequency.read_tfrs(freq_file)[0]
                    mne.set_log_level(verbose=old_level)
                    data.append(freq_data.data)

                    processing_done = True

                if run == conf.runs[-1] and processing_done:
                    container_results(plot_spectrogram, single_trial, freq_data, data, donor, out_file, verbose)

    if plot_spectrogram:
        sdata = []
        for subject in conf.subjects:
            out_file = conf.path_container + "{0}_{1}{2}_{3}{4}-50ms-tfr.h5".format(subject, spec, kind[0], frequency, train)
            if pathlib.Path(out_file).exists():
                freq_subject_data = mne.time_frequency.read_tfrs(out_file)[0]
                sdata.append(freq_subject_data)

        freq_spec_data = mne.grand_average(sdata)
        title = f'Spectrogram ~{conf.L_freq}-{conf.H_freq} Hz TFR in {kind[0]}'
        PM = freq_spec_data.plot(picks='meg', fmin=conf.L_freq, fmax=conf.H_freq, vmin=-0.15, vmax=0.15, title=title)
        os.chdir('/home/asmyasnikova83/')
        PM.savefig('output.png')
        print('\tSpectrogramm completed')

    print('\ttfr container completed')
示例#17
0
def run_lcmv_epochs(epochs, fwd, data_cov, reg, noise_cov=None,
                    pick_ori='max-power', weight_norm='nai', verbose=False):
    """Run LCMV on epochs.

    Run weight-normalized LCMV beamformer on epoch data, will return matrix of
    trials or stc object.

    Parameters:
    -----------
    epochs : MNE epochs
        epochs to source reconstruct.
    fwd : MNE forward model
        forward model.
    data_cov : MNE covariance estimate
        data covariance matrix
    reg : float
        regularization parameter
    noise_cov : MNE covariance estimate
        noise covariance matrix, optional
    verbose : bool
        overrides default verbose level, defaults to False, i.e., no logger
        info.

    Returns
    -------
    stcs_mat : numpy array
        matrix with all source trials
    stc : MNE stc
        single trial stc object (last trial)
    filters : dict
        spatial filter used in computation
    """
    filters = make_lcmv(epochs.info, fwd, data_cov=data_cov,
                        noise_cov=noise_cov, pick_ori=pick_ori, reg=reg,
                        weight_norm=weight_norm, verbose=verbose)

    # apply that filter to epochs
    stcs = apply_lcmv_epochs(epochs, filters, return_generator=True,
                             max_ori_out='signed', verbose=verbose)

    # preallocate matrix
    stcs_mat = np.ones((epochs._data.shape[0], fwd['nsource'],
                        len(epochs.times)))

    if verbose is False:
        mne.set_log_level('WARNING')

    # resolve generator
    for trial in range(epochs._data.shape[0]):
        # last time: also save stc
        if trial == 0:
            stc = next(stcs)
            stcs_mat[trial, :, :] = stc.data
        else:
            stcs_mat[trial, :, :] = next(stcs).data

    return stcs_mat, stc, filters
示例#18
0
def run():
    """Run command."""
    from mne.commands.utils import get_optparser

    parser = get_optparser(__file__)

    parser.add_option('--input', dest='input_fname',
                      help='Input data file name', metavar='filename')
    parser.add_option('--mrk', dest='mrk_fname',
                      help='MEG Marker file name', metavar='filename')
    parser.add_option('--elp', dest='elp_fname',
                      help='Headshape points file name', metavar='filename')
    parser.add_option('--hsp', dest='hsp_fname',
                      help='Headshape file name', metavar='filename')
    parser.add_option('--stim', dest='stim',
                      help='Colon Separated Stimulus Trigger Channels',
                      metavar='chs')
    parser.add_option('--slope', dest='slope', help='Slope direction',
                      metavar='slope')
    parser.add_option('--stimthresh', dest='stimthresh', default=1,
                      help='Threshold value for trigger channels',
                      metavar='value')
    parser.add_option('--output', dest='out_fname',
                      help='Name of the resulting fiff file',
                      metavar='filename')
    parser.add_option('--debug', dest='debug', action='store_true',
                      default=False,
                      help='Set logging level for terminal output to debug')

    options, args = parser.parse_args()

    if options.debug:
        mne.set_log_level('debug')

    input_fname = options.input_fname
    if input_fname is None:
        with ETSContext():
            mne.gui.kit2fiff()
        sys.exit(0)

    hsp_fname = options.hsp_fname
    elp_fname = options.elp_fname
    mrk_fname = options.mrk_fname
    stim = options.stim
    slope = options.slope
    stimthresh = options.stimthresh
    out_fname = options.out_fname

    if isinstance(stim, str):
        stim = map(int, stim.split(':'))

    raw = read_raw_kit(input_fname=input_fname, mrk=mrk_fname, elp=elp_fname,
                       hsp=hsp_fname, stim=stim, slope=slope,
                       stimthresh=stimthresh)

    raw.save(out_fname)
    raw.close()
示例#19
0
def _fast_map_meg_channels(info, pick_from, pick_to,
                           mode='fast'):
    from mne.io.pick import pick_info
    from mne.forward._field_interpolation import _setup_dots
    from mne.forward._field_interpolation import _compute_mapping_matrix
    from mne.forward._make_forward import _create_meg_coils, _read_coil_defs
    from mne.forward._lead_dots import _do_self_dots, _do_cross_dots
    from mne.bem import _check_origin

    miss = 1e-4  # Smoothing criterion for MEG

    # XXX: hack to silence _compute_mapping_matrix
    verbose = mne.get_config('MNE_LOGGING_LEVEL', 'INFO')
    mne.set_log_level('WARNING')

    def _compute_dots(info, mode='fast'):
        """Compute all-to-all dots."""
        templates = _read_coil_defs()
        coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'],
                                  templates)
        my_origin = _check_origin((0., 0., 0.04), info_from)
        int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils, 'meg')
        self_dots = _do_self_dots(int_rad, False, coils, my_origin, 'meg',
                                  lut_fun, n_fact, n_jobs=1)
        cross_dots = _do_cross_dots(int_rad, False, coils, coils,
                                    my_origin, 'meg', lut_fun, n_fact).T
        return self_dots, cross_dots

    _compute_fast_dots = mem.cache(_compute_dots, verbose=0)
    info['bads'] = []  # if bads is different, hash will be different

    info_from = pick_info(info, pick_from, copy=True)
    templates = _read_coil_defs()
    coils_from = _create_meg_coils(info_from['chs'], 'normal',
                                   info_from['dev_head_t'], templates)
    my_origin = _check_origin((0., 0., 0.04), info_from)
    int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from, 'meg')

    # This function needs a clean input. It hates the presence of other
    # channels than MEG channels. Make sure all is picked.
    self_dots, cross_dots = _compute_fast_dots(
        info, mode=mode)

    cross_dots = cross_dots[pick_to, :][:, pick_from]
    self_dots = self_dots[pick_from, :][:, pick_from]

    ch_names = [c['ch_name'] for c in info_from['chs']]
    fmd = dict(kind='meg', ch_names=ch_names,
               origin=my_origin, noise=noise, self_dots=self_dots,
               surface_dots=cross_dots, int_rad=int_rad, miss=miss)

    fmd['data'] = _compute_mapping_matrix(fmd, info_from)
    mne.set_log_level(verbose)

    return fmd['data']
示例#20
0
def raw():
    """Fixture for physionet EEG subject 4, dataset 1."""
    mne.set_log_level("WARNING")
    # load in subject 1, run 1 dataset
    edf_fpath = eegbci.load_data(4, 1, update_path=True)[0]

    # using sample EEG data (https://physionet.org/content/eegmmidb/1.0.0/)
    raw = mne.io.read_raw_edf(edf_fpath, preload=True)

    # The eegbci data has non-standard channel names. We need to rename them:
    eegbci.standardize(raw)

    return raw
示例#21
0
def raw():
    """Fixture for physionet EEG subject 4, dataset 1."""
    mne.set_log_level("WARNING")
    # load in subject 1, run 1 dataset
    edf_fpath = eegbci.load_data(4, 1, update_path=True)[0]

    # using sample EEG data (https://physionet.org/content/eegmmidb/1.0.0/)
    raw = mne.io.read_raw_edf(edf_fpath, preload=True)
    raw.rename_channels(lambda s: s.strip("."))
    raw.rename_channels(
        lambda s: s.replace("c", "C").replace("o", "O").replace("f", "F").
        replace("t", "T").replace("Tp", "TP").replace("Cp", "CP"))
    return raw
示例#22
0
def main(in_pkl, kind):
    logging.basicConfig(level=logging.DEBUG)
    mne.set_log_level(logging.ERROR)

    in_df = pd.read_pickle(in_pkl)
    output_path = os.path.join(
        LABELED_ROOT, os.path.splitext(os.path.split(in_pkl)[1])[0]) + \
        '_sigmas.pkl'
    logging.info(f'Creating sigmas for file {in_pkl} in file {output_path}')

    create_sigma_pkl(in_df, kind, output_path)

    logging.info('Finished procedure.')
示例#23
0
def set_log_level(verbose='info'):
    """Set lot level.

    Set the general log level. level can be 'info', 'debug' or 'warning'
    """
    mne.set_log_level(False)

    level = {
        'debug': logging.DEBUG,
        'info': logging.INFO,
        'warning': logging.WARNING
    }

    coloredlogs.install(level=level.get(verbose, logging.INFO))
示例#24
0
def load_brainvision(path):
    # Import the BrainVision data into an MNE Raw object
    mne.set_log_level("WARNING")
    print('reading raw file...')
    raw= mne.io.read_raw_brainvision(path, 
            preload=True, 
            eog=('EOG1_1','EOG2_1'),
            misc=('EMG1_1','EMG2_1'),
            verbose=True)
    raw.rename_channels(lambda s: s.strip("."))

    # Specify this as the emg channel (channel type)
    raw.set_channel_types({'EMG1_1': 'emg','EMG2_1': 'emg'}) 
    print('Done!')
    return raw    
示例#25
0
def interpolate_bads(inst, picks, reset_bads=True, mode='accurate'):
    """Interpolate bad MEG and EEG channels."""
    import mne
    from mne.channels.interpolation import _interpolate_bads_eeg

    mne.set_log_level('WARNING')  # to prevent cobyla printf error

    # this needs picks, assume our instance is complete and intact
    _interpolate_bads_eeg(inst)
    _interpolate_bads_meg_fast(inst, picks=picks, mode=mode)

    if reset_bads is True:
        inst.info['bads'] = []

    return inst
示例#26
0
def main(kind, file):
    logger = logging.getLogger(__name__)
    logging.basicConfig(level=logging.INFO)
    mne.set_log_level(logging.ERROR)

    if file is not '':
        _, ext = os.path.splitext(file)
        file = files_builder(ext=ext).single_file(file)
        interactive_plot(file)
        return

    logger.info(f'Plotting EEG singals of kind {kind}.')

    for file in files_builder(kind):
        interactive_plot(file)
示例#27
0
def ReadEdf(edffilename):
    mne.set_log_level("WARNING")
    raw = mne.io.read_raw_edf(edffilename, preload=False)
    #raw.plot()
    start, stop = raw.time_as_index([100, 115])  # 100 s to 115 s data segment

    picks = mne.pick_types(raw.info, include=(raw.ch_names))
    n_channels = len(raw.ch_names)
    data, times = raw[picks[:n_channels], start:stop]
    ch_names = [raw.info['ch_names'][p] for p in picks[:n_channels]]
    edfPanda1 = pd.DataFrame(data[0], times)
    edfPanda2 = pd.DataFrame(data[1], times)
    edfPanda3 = pd.DataFrame(data[2], times)
    edfPanda4 = pd.DataFrame(data[3], times)
    edfPanda5 = pd.DataFrame(data[14], times)
    return (edfPanda1, edfPanda2, edfPanda3, edfPanda4, edfPanda5, ch_names)
示例#28
0
def main(fname, kind, ww, ws, minl, maxl):
    logging.basicConfig(level=logging.DEBUG)
    mne.set_log_level(logging.ERROR)

    window = Window(ww, ws) if ww > 0 else None
    existing_df = None

    output_path = os.path.join(LABELED_ROOT, kind)
    assert os.path.exists(output_path), output_path
    fpath = os.path.join(output_path, fname)
    if os.path.isfile(fpath):
        logging.warning(f'File {fpath} exists. We will try to extend it.')
        existing_df = pd.read_pickle(fpath)
    create_training_data(fpath, DataKind(kind), window, minl, maxl,
                         existing_df)
    logging.info('Finished procedure.')
示例#29
0
文件: utils.py 项目: mmagnuski/borsar
def silent_mne(full_silence=False):
    '''
    Context manager that silences warnings from mne-python.
    '''
    import mne

    log_level = mne.set_log_level('error', return_old_level=True)

    if full_silence:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            yield
    else:
        yield

    mne.set_log_level(log_level)
示例#30
0
def set_log_level(level="INFO"):
    """Set lot level.

    Set the general log level.
    Use one of the levels supported by python logging, i.e.:
        DEBUG, INFO, WARNING, ERROR, CRITICAL
    """
    VALID_LEVELS = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
    level = level.upper()
    if level not in VALID_LEVELS:
        raise ValueError(
            f"Invalid level {level}. Choose one of {VALID_LEVELS}.")
    mne.set_log_level(False)
    logging.basicConfig(
        level=level,
        format="%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s",
    )
示例#31
0
def test_morphing():
    mne.set_log_level('warning')
    sss = datasets._mne_source_space('fsaverage', 'ico-4', subjects_dir)
    vertices_to = [sss[0]['vertno'], sss[1]['vertno']]
    ds = datasets.get_mne_sample(-0.1, 0.1, src='ico', sub='index==0', stc=True)
    stc = ds['stc', 0]
    morph_mat = mne.compute_morph_matrix('sample', 'fsaverage', stc.vertno,
                                         vertices_to, None, subjects_dir)
    ndvar = ds['src']

    morphed_ndvar = morph_source_space(ndvar, 'fsaverage')
    morphed_stc = mne.morph_data_precomputed('sample', 'fsaverage', stc,
                                             vertices_to, morph_mat)
    assert_array_equal(morphed_ndvar.x[0], morphed_stc.data)
    morphed_stc_ndvar = load.fiff.stc_ndvar([morphed_stc], 'fsaverage', 'ico-4',
                                            subjects_dir, 'src', parc=None)
    assert_dataobj_equal(morphed_ndvar, morphed_stc_ndvar)
示例#32
0
def interpolate_bads(inst, reset_bads=True, mode='accurate'):
    """Interpolate bad MEG and EEG channels.
    """
    import mne
    from mne.channels.interpolation import _interpolate_bads_eeg
    mne.set_log_level('WARNING')  # to prevent cobyla printf error

    if getattr(inst, 'preload', None) is False:
        raise ValueError('Data must be preloaded.')

    _interpolate_bads_eeg(inst)
    _interpolate_bads_meg_fast(inst, mode=mode)

    if reset_bads is True:
        inst.info['bads'] = []

    return inst
def generate_mmi(seconds, targets):
    mne.set_log_level(False)

    for target in targets:
        perf = _mmi_performance(target, 'saved_models/MMI/{}s/fine_tuned-{}/'.format(seconds, target), alignment=True,
                                tlen=seconds)
        perf.to_csv('analysis/mmi_{}s_{}_ft.csv'.format(seconds, target))
        perf = _mmi_performance(target, 'saved_models/MMI/{}s/fine_tuned-{}/'.format(seconds, target), alignment=False,
                                tlen=seconds)
        perf.to_csv('analysis/mmi_{}s_{}_ft_no_ea.csv'.format(seconds, target))

        perf = _mmi_performance(target, 'saved_models/MMI/{}s/targetted_mdl_{}/'.format(seconds, target), tlen=seconds,
                                alignment=True, subjects=64)
        perf.to_csv('analysis/mmi_{}s_{}_mdl.csv'.format(seconds, target))
        perf = _mmi_performance(target, 'saved_models/MMI/{}s/targetted_mdl_{}/'.format(seconds, target), tlen=seconds,
                                alignment=False, subjects=64)
        perf.to_csv('analysis/mmi_{}s_{}_mdl_no_ea.csv'.format(seconds, target))
def newdatalabels(filename):
    raw_data = mne.io.read_raw_eeglab(filename, preload=True)
    mne.set_log_level("WARNING")
    raw_data.resample(128, npad='auto')  #resampling
    #band-pass filtering in the range 4 Hz - 38 Hz
    raw_data.filter(4, 38, method="iir")
    #    data=raw_data.get_data()
    #    chs = raw_data.pick_channels(['FC6','FCZ','FC5','FC3','FC4','CP3','CP4','C5','C3','C1','CZ','C2','C4','C6'])

    chs = raw_data.pick_channels([
        'FCZ', 'FC5', 'FC1', 'FC2', 'FC6', 'FC3', 'FC4', 'C5', 'C3', 'C1',
        'CZ', 'C2', 'C4', 'C6', 'CP3', 'CP1', 'CP2', 'CP4'
    ])
    data = chs.get_data()
    #    data = data[122:130,:]

    #electrode-wise exponential moving standarlization
    m = np.mean(data[:, 0:1000],
                axis=1)  #m is the first 1000 datapoints mean values
    v = np.var(data[:, 0:1000],
               axis=1)  #v is the first 1000 datapoints variance values
    sd = np.zeros((data.shape[0], data.shape[1]))  #standarlized data

    for i in range(1000, data.shape[1]):
        m = 0.001 * data[:, i] + 0.999 * m
        v = 0.001 * ((data[:, i] - m)**2) + 0.999 * v
        sd[:, i] = (data[:, i] - m) / np.sqrt(v)

    events_from_annot, event_dict = mne.events_from_annotations(raw_data)
    #new standarlized data with labels
    newsd = []
    labels = []
    print(filename)
    for i in range(len(events_from_annot)):
        if events_from_annot[i, 2] == 1 or events_from_annot[i, 2] == 2:
            st = events_from_annot[i, 0] - 256  #start time
            et = events_from_annot[i, 0] + 768  #end time
            labels.append(events_from_annot[i, 2])
            #newsd.append(sd[:,st:et])
            tmp = sd[:, st:et]
            if tmp.shape[1] != 0:
                newsd.append(tmp)

    newsd = np.stack([arr for arr in newsd], axis=0)
    labels = np.array(labels)
    return (newsd, labels)
示例#35
0
def interpolate_bads(inst, reset_bads=True, mode='accurate'):
    """Interpolate bad MEG and EEG channels.
    """
    import mne
    from mne.channels.interpolation import _interpolate_bads_eeg
    mne.set_log_level('WARNING')  # to prevent cobyla printf error

    if getattr(inst, 'preload', None) is False:
        raise ValueError('Data must be preloaded.')

    _interpolate_bads_eeg(inst)
    _interpolate_bads_meg_fast(inst, mode=mode)

    if reset_bads is True:
        inst.info['bads'] = []

    return inst
示例#36
0
def create_3Dmatrix(epochs_dim, ch_type, input_filename, input_filename_trial, output_filename=None):
    
    mne.set_log_level('WARNING')
    raw = mne.fiff.Raw(input_filename)

    datatrial = pickle.load(open(input_filename_trial))
    trigger_times = datatrial['trigger_times']
    trigger_decimal = datatrial['trigger_decimal']
    coi = datatrial['coi']
    
    print
    print "Get the indexes of just the MEG channels"
    picks = mne.fiff.pick_types(raw.info, meg=ch_type['meg'], eeg=ch_type['eeg'], stim=ch_type['stim'], exclude=ch_type['exclude']) #only meg channels
    
    events = np.vstack([trigger_times, np.zeros(len(trigger_times), dtype=np.int), trigger_decimal]).T

    print "Extracting Epochs for each condition for the contrast."
    baseline = (None, 0) # means from the first instant to t = 0
    reject = {}
    tmin = epochs_dim[0]
    tmax = epochs_dim[1]
    epochs = mne.Epochs(raw, events, event_id=None, tmin=tmin, tmax=tmax, proj=True, picks=picks, baseline=baseline, preload=False, reject=reject)

    X = epochs.get_data()
    y = epochs.events[:,2]
    label = np.zeros(len(y))
    for i, yi in enumerate(y):
        if np.sum(yi == coi[0]):
            label[i] = 1
            
    if output_filename == None:
	filename_save = input_filename_trial.split('.')[0]+'_3D.pickle'
    else:
	filename_save = os.path.abspath(output_filename)

    print "Saving to:", filename_save
    pickle.dump({'X': X,
	         'y': label,
	         'tmin': tmin,
	         'sfreq': raw.info['sfreq']
	         }, open(filename_save, 'w'),
	        protocol = pickle.HIGHEST_PROTOCOL)
                
    return filename_save
示例#37
0
def _fast_map_meg_channels(info, pick_from, pick_to,
                           dots=None, mode='fast'):
    from mne.io.pick import pick_info
    from mne.forward._field_interpolation import _setup_dots
    from mne.forward._field_interpolation import _compute_mapping_matrix
    from mne.forward._make_forward import _create_meg_coils, _read_coil_defs
    from mne.bem import _check_origin

    miss = 1e-4  # Smoothing criterion for MEG

    # XXX: hack to silence _compute_mapping_matrix
    verbose = mne.get_config('MNE_LOGGING_LEVEL', 'INFO')
    mne.set_log_level('WARNING')

    info_from = pick_info(info, pick_from, copy=True)
    templates = _read_coil_defs()
    coils_from = _create_meg_coils(info_from['chs'], 'normal',
                                   info_from['dev_head_t'], templates)
    my_origin = _check_origin((0., 0., 0.04), info_from)
    int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from, 'meg')

    # This function needs a clean input. It hates the presence of other
    # channels than MEG channels. Make sure all is picked.
    if dots is None:
        dots = self_dots, cross_dots = _compute_dots(info, mode=mode)
    else:
        self_dots, cross_dots = dots

    self_dots, cross_dots = _pick_dots(dots, pick_from, pick_to)

    ch_names = [c['ch_name'] for c in info_from['chs']]
    fmd = dict(kind='meg', ch_names=ch_names,
               origin=my_origin, noise=noise, self_dots=self_dots,
               surface_dots=cross_dots, int_rad=int_rad, miss=miss)

    fmd['data'] = _compute_mapping_matrix(fmd, info_from)
    mne.set_log_level(verbose)

    return fmd['data']
示例#38
0
    def __init__(self, subject, settings=dict()):
        self.subject = subject
        self.settings = settings

        if 'debug' in settings:
            configure_custom(settings['debug'])
        else:
            configure_custom(debug=True)

        if 'mne_log_level' in settings:
            mne.set_log_level(settings['mne_log_level'])
        else:
            mne.set_log_level('INFO')

        if 'sfreq' in settings:
            self.downsample_sfreq = settings['sfreq']
        else:
            self.downsample_sfreq = 64

        if 'layout' in settings:
            self.layout = settings['layout']
        else:
            self.layout = mne.channels.read_layout('biosemi.lay')

        if 'data_root' in settings:
            self.data_root = settings['data_root']
        else:
            self.data_root = os.path.join(deepthought.DATA_PATH, 'OpenMIIR')

        # load stimuli metadata version
        self.stimuli_version = get_stimuli_version(subject)

        # initial state
        self.raw = None
        self.ica = None

        self.filtered = False
        self.downsampled = False
示例#39
0
def plot_group_fourierICA(fn_groupICA_obj,
                          stim_id=1, stim_delay=0,
                          resp_id=None,
                          corr_event_picking=None,
                          global_scaling=True,
                          subjects_dir=None,
                          bar_plot=False):

    """
    Interface to plot the results from group FourierICA

        Parameters
        ----------
        fn_groupICA_obj: filename of the group ICA object
        stim_id: Id of the event of interest to be considered in
            the stimulus channel. Only of interest if 'stim_name'
            is set
            default: event_id=1
        stim_delay: stimulus delay in milliseconds
            default: stim_delay=0
        resp_id: Response IDs for correct event estimation. Note:
            Must be in the order corresponding to the 'event_id'
            default: resp_id=None
        corr_event_picking: string
            if set should contain the complete python path and
            name of the function used to identify only the correct events
            default: corr_event_picking=None
        subjects_dir: string
            If the subjects directory is not confirm with
            the system variable 'SUBJECTS_DIR' parameter should be set
            default: subjects_dir=None
        bar_plot: boolean
            If set the results of the time-frequency analysis
            are shown as bar plot. This option is recommended
            when FourierICA was applied to resting-state data
            default: bar_plot=False
    """


    # ------------------------------------------
    # import necessary modules
    # ------------------------------------------
    from jumeg.decompose.fourier_ica_plot import plot_results_src_space
    from mne import set_log_level
    from os.path import exists
    from pickle import dump, load

    # set log level to 'WARNING'
    set_log_level('WARNING')


    # ------------------------------------------
    # read in group FourierICA object
    # ------------------------------------------
    with open(fn_groupICA_obj, "rb") as filehandler:
        groupICA_obj = load(filehandler)

    icasso_obj = groupICA_obj['icasso_obj']
    win_length_sec = icasso_obj.tmax_win - icasso_obj.tmin_win
    temp_profile_names = ["Event-ID %i" % i for i in groupICA_obj['icasso_obj'].event_id]

    # ------------------------------------------
    # check if time-courses already exist
    # ------------------------------------------
    fn_temporal_envelope = fn_groupICA_obj[:-4] + '_temporal_envelope.obj'
    # generate time courses if they do not exist
    if not exists(fn_temporal_envelope):
        # import necessary modules
        from jumeg.decompose.group_ica import get_group_fourierICA_time_courses

        # generate time courses
        temporal_envelope, src_loc, vert, sfreq = \
            get_group_fourierICA_time_courses(groupICA_obj, event_id=stim_id,
                                              stim_delay=stim_delay, resp_id=resp_id,
                                              corr_event_picking=corr_event_picking,
                                              unfiltered=False, baseline=(None, 0))

        # save data
        temp_env_obj = {'temporal_envelope': temporal_envelope,
                        'src_loc': src_loc, 'vert': vert, 'sfreq': sfreq}
        with open(fn_temporal_envelope, "wb") as filehandler:
            dump(temp_env_obj, filehandler)

    # when data are stored read them in
    else:
        # read data in
        with open(fn_temporal_envelope, "rb") as filehandler:
            temp_env_obj = load(filehandler)

        # get data
        temporal_envelope = temp_env_obj['temporal_envelope']
        src_loc = temp_env_obj['src_loc']
        vert = temp_env_obj['vert']


    # ------------------------------------------
    # check if classification already exists
    # ------------------------------------------
    if groupICA_obj.has_key('classification') and\
            groupICA_obj.has_key('mni_coords') and\
            groupICA_obj.has_key('labels'):
        classification = groupICA_obj['classification']
        mni_coords = groupICA_obj['mni_coords']
        labels = groupICA_obj['labels']
    else:
        classification = {}
        mni_coords = []
        labels = None


    # ------------------------------------------
    # plot "group" results
    # ------------------------------------------
    fnout_src_fourier_ica = fn_groupICA_obj[:fn_groupICA_obj.rfind('.obj')] + \
                            img_src_group_ica

    mni_coords, classification, labels =\
        plot_results_src_space(groupICA_obj['fourier_ica_obj'],
                               groupICA_obj['W_orig'], groupICA_obj['A_orig'],
                               src_loc_data=src_loc, vertno=vert,
                               subjects_dir=subjects_dir,
                               tpre=icasso_obj.tmin_win,
                               win_length_sec=win_length_sec,
                               flow=icasso_obj.flow, fhigh=icasso_obj.fhigh,
                               fnout=fnout_src_fourier_ica,
                               tICA=icasso_obj.tICA,
                               global_scaling=global_scaling,
                               temporal_envelope=temporal_envelope,
                               temp_profile_names=temp_profile_names,
                               classification=classification,
                               mni_coords=mni_coords, labels=labels,
                               bar_plot=bar_plot)


    # ------------------------------------------
    # adjust groupICA_obj with the new
    # parameters if they didn't exist before
    # ------------------------------------------
    if not groupICA_obj.has_key('classification') and\
            not groupICA_obj.has_key('mni_coords') and\
            not groupICA_obj.has_key('labels'):
        groupICA_obj['classification'] = classification
        groupICA_obj['mni_coords'] = mni_coords
        groupICA_obj['labels'] = labels

        # write new groupICA_obj back to disk
        with open(fn_groupICA_obj, "wb") as filehandler:
            dump(groupICA_obj, filehandler)
示例#40
0
def group_fourierICA_src_space(fname_raw,
                               ica_method='fourierica',              # parameter for ICA method
                               nrep=50,                              # parameter for ICASSO
                               src_loc_method='dSPM', snr=1.0,       # parameter for inverse estimation
                               inv_pattern='-src-meg-fspace-inv.fif',
                               stim_name='STI 014', stim_id=1,       # parameter for epoch generation
                               corr_event_picking=None,
                               stim_delay=0.0,
                               tmin=-0.2, tmax=0.8,
                               average=False,
                               flow=4., fhigh=34.,                   # parameter for Fourier transformation
                               remove_outliers=False,
                               hamming_data=False,
                               dim_reduction='MDL',
                               pca_dim=None, cost_function='g2',     # parameter for complex ICA estimation
                               lrate=0.2, complex_mixing=True,
                               conv_eps=1e-9, max_iter=5000,
                               envelopeICA=False,
                               interpolate_bads=True,
                               decim_epochs=None,
                               fnout=None,                           # parameter for saving the results
                               verbose=True):


    """
    Module to perform group FourierICA (if wished in combination with
    ICASSO --> if 'nrep'=1 only FourierICA is performed, if 'nrep'>1
    FourierICA is performed in combination with ICASSO).

        Parameters
        ----------
        fname_raw: list of strings
            filename(s) of the pre-processed raw file(s)
        ica_method: string
            which ICA method should be used for the group ICA?
            You can chose between 'extended-infomax', 'fastica',
            'fourierica' and 'infomax'
            default: ica_method='fourierica'
        nrep: integer
            number of repetitions ICA, i.e. ICASSO, should be performed
            default: nrep=50
        src_loc_method: string
            method used for source localization.
            default: src_loc_method='dSPM'
        snr: float
            signal-to-noise ratio for performing source
            localization --> for single epochs an snr of 1.0 is recommended,
            if keyword 'average' is set one should use snr=3.0
            default: snr=1.0
        inv_pattern: string
            String containing the ending pattern of the inverse
            solution. Note, here fspace is used if the inverse
            solution is estimated in the Fourier space for later
            applying Fourier (i.e., complex) ICA
            default: inv_pattern='-src-meg-fspace-inv.fif'
        stim_name: string
            name of the stimulus channel. Note, for
            applying FourierCIA data are chopped around stimulus
            onset. If not set data are chopped in overlapping
            windows
            default: stim_names='STI 014'
        stim_id: integer or list of integers
            list containing the event IDs
            default: stim_id=1
        corr_event_picking: string
            if set should contain the complete python path and
            name of the function used to identify only the correct events
            default: corr_event_picking=None
        stim_delay: float
            Stimulus delay in seconds
            default: stim_delay=0.0
        tmin: float
            time of interest prior to stimulus onset for epoch
            generation (in seconds)
            default: tmin=-0.2
        tmax: float
            time of interest after the stimulus onset for epoch
            generation (in seconds)
            default: tmax=0.8
        average: bool
            should data be averaged across subjects before
            FourierICA application? Note, averaged data require
            less memory!
            default: average=False
        flow: float
            lower frequency border for estimating the optimal
            de-mixing matrix using FourierICA
            default: flow=4.0
        fhigh: float
            upper frequency border for estimating the optimal
            de-mixing matrix using FourierICA
            default: fhigh=34.0
            Note: here default flow and fhigh are choosen to
            contain:
                - theta (4-7Hz)
                - low (7.5-9.5Hz) and high alpha (10-12Hz),
                - low (13-23Hz) and high beta (24-34Hz)
        remove_outliers: If set outliers are removed from the Fourier
            transformed data.
            Outliers are defined as windows with large log-average power (LAP)

                 LAP_{c,t}=log \sum_{f}{|X_{c,tf}|^2

            where c, t and f are channels, window time-onsets and frequencies,
            respectively. The threshold is defined as |mean(LAP)+3 std(LAP)|.
            This process can be bypassed or replaced by specifying a function
            handle as an optional parameter.
            remove_outliers=False
        hamming_data: boolean
            if set a hamming window is applied to each
            epoch prior to Fourier transformation
            default: hamming_data=False
        dim_reduction: string {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
            Method for dimension selection. For further information about
            the methods please check the script 'dimension_selection.py'.
            default: dim_reduction='MDL'
        pca_dim: Integer
            The number of components used for PCA decomposition.
            default: pca_dim=None
        cost_function: string
            which cost-function should be used in the complex
            ICA algorithm
            'g1': g_1(y) = 1 / (2 * np.sqrt(lrate + y))
            'g2': g_2(y) = 1 / (lrate + y)
            'g3': g_3(y) = y
            default: cost_function='g2'
        lrate: float
            learning rate which should be used in the applied
            ICA algorithm
            default: lrate=0.3
        complex_mixing: bool
            if mixing matrix should be real or complex
            default: complex_mixing=True
        conv_eps: float
            iteration stop when weight changes are smaller
            then this number
            default: conv_eps = 1e-9
        max_iter: integer
            maximum number of iterations used in FourierICA
            default: max_iter=5000
        envelopeICA: if set ICA is estimated on the envelope
            of the Fourier transformed input data, i.e., the
            mixing model is |x|=As
            default: envelopeICA=False
        interpolate_bads: bool
            if set bad channels are interpolated (using the
            mne routine raw.interpolate_bads()).
            default: interpolate_bads=True
        decim_epochs: integer
            if set the number of epochs will be reduced (per
            subject) to that number for the estimation of the demixing matrix.
            Note: the epochs were chosen randomly from the complete set of
            epochs.
            default: decim_epochs=None
        fnout: string
            output filename of the result structure. If not set the filename
            is generated automatically.
            default: fnout=None
        verbose: bool, str, int, or None
            If not None, override default verbose level
            (see mne.verbose).
            default: verbose=True


        Return
        ------
        groupICA_obj: dictionary
            Group ICA information stored in a dictionary. The dictionary
            has following keys:
            'fn_list': List of filenames which where used to estimate the
                group ICA
            'W_orig': estimated de-mixing matrix
            'A_orig': estimated mixing matrix
            'quality': quality index of the clustering between
                components belonging to one cluster
                (between 0 and 1; 1 refers to small clusters,
                i.e., components in one cluster have a highly similar)
            'icasso_obj': ICASSO object. For further information
                please have a look into the ICASSO routine
            'fourier_ica_obj': FourierICA object. For further information
                please have a look into the FourierICA routine
        fnout: string
            filename where the 'groupICA_obj' is stored
    """

    # ------------------------------------------
    # import necessary modules
    # ------------------------------------------
    from jumeg.decompose.icasso import JuMEG_icasso
    from mne import set_log_level
    import numpy as np
    from os.path import dirname, join
    from pickle import dump

    # set log level to 'WARNING'
    set_log_level('WARNING')


    # ------------------------------------------
    # check input parameter
    # ------------------------------------------
    # filenames
    if isinstance(fname_raw, list):
        fn_list = fname_raw
    else:
        fn_list = [fname_raw]


    # -------------------------------------------
    # set some path parameter
    # -------------------------------------------
    fn_inv = []
    for fn_raw in fn_list:
        fn_inv.append(fn_raw[:fn_raw.rfind('-raw.fif')] + inv_pattern)


    # ------------------------------------------
    # apply FourierICA combined with ICASSO
    # ------------------------------------------
    icasso_obj = JuMEG_icasso(nrep=nrep, fn_inv=fn_inv,
                              src_loc_method=src_loc_method,
                              morph2fsaverage=True,
                              ica_method=ica_method,
                              cost_function=cost_function,
                              dim_reduction=dim_reduction,
                              decim_epochs=decim_epochs,
                              tICA=False, snr=snr, lrate=lrate)

    W_orig, A_orig, quality, fourier_ica_obj \
        = icasso_obj.fit(fn_list, average=average,
                         stim_name=stim_name,
                         event_id=stim_id, pca_dim=pca_dim,
                         stim_delay=stim_delay,
                         tmin_win=tmin, tmax_win=tmax,
                         flow=flow, fhigh=fhigh,
                         max_iter=max_iter, conv_eps=conv_eps,
                         complex_mixing=complex_mixing,
                         envelopeICA=envelopeICA,
                         hamming_data=hamming_data,
                         remove_outliers=remove_outliers,
                         cost_function=cost_function,
                         interpolate_bads=interpolate_bads,
                         corr_event_picking=corr_event_picking,
                         verbose=verbose)


    # ------------------------------------------
    # save results to disk
    # ------------------------------------------
    # generate dictionary to save results
    groupICA_obj = {'fn_list': fn_list,
                    'W_orig': W_orig,
                    'A_orig': A_orig,
                    'quality': quality,
                    'icasso_obj': icasso_obj,
                    'fourier_ica_obj': fourier_ica_obj}


    # check if the output filename is already set
    if not fnout:
        # generate filename for output structure
        if isinstance(stim_id, (list, tuple)):
            fn_base = "group_FourierICA_combined"
            for id in np.sort(stim_id)[::-1]:
                fn_base += "_%ddB" % id
        elif isinstance(stim_id, (int, long)):
            fn_base = "group_FourierICA_%ddB" % stim_id
        else:
            fn_base = "group_ICA_resting_state.obj"

        # write file to disk
        fnout = join(dirname(dirname(fname_raw[0])), fn_base + ".obj")

    with open(fnout, "wb") as filehandler:
        dump(groupICA_obj, filehandler)

    # return dictionary
    return groupICA_obj, fnout
示例#41
0
def test_source_estimate():
    "Test SourceSpace dimension"
    mne.set_log_level('warning')
    ds = datasets.get_mne_sample(src='ico')
    dsa = ds.aggregate('side')

    # test auto-conversion
    asndvar('epochs', ds=ds)
    asndvar('epochs', ds=dsa)
    asndvar(dsa['epochs'][0])

    # source space clustering
    res = testnd.ttest_ind('src', 'side', ds=ds, samples=0, pmin=0.05,
                           tstart=0.05, mintime=0.02, minsource=10)
    assert_equal(res.clusters.n_cases, 52)

    # test disconnecting parc
    src = ds['src']
    source = src.source
    parc = source.parc
    orig_conn = set(map(tuple, source.connectivity()))
    disc_conn = set(map(tuple, source.connectivity(True)))
    assert_true(len(disc_conn) < len(orig_conn))
    for pair in orig_conn:
        s, d = pair
        if pair in disc_conn:
            assert_equal(parc[s], parc[d])
        else:
            assert_not_equal(parc[s], parc[d])

    # threshold-based test with parc
    srcl = src.sub(source='lh')
    res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, pmin=0.05,
                           tstart=0.05, mintime=0.02, minsource=10,
                           parc='source')
    assert_equal(res._cdist.dist.shape[1], len(srcl.source.parc.cells))
    label = 'superiortemporal-lh'
    c_all = res._clusters(maps=True)
    c_label = res._clusters(maps=True, source=label)
    assert_array_equal(c_label['location'], label)
    for case in c_label.itercases():
        id_ = case['id']
        idx = c_all['id'].index(id_)[0]
        assert_equal(case['v'], c_all[idx, 'v'])
        assert_equal(case['tstart'], c_all[idx, 'tstart'])
        assert_equal(case['tstop'], c_all[idx, 'tstop'])
        assert_less_equal(case['p'], c_all[idx, 'p'])
        assert_dataobj_equal(case['cluster'],
                             c_all[idx, 'cluster'].sub(source=label))

    # threshold-free test with parc
    res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, tstart=0.05,
                           parc='source')
    cl = res._clusters(0.05)
    assert_equal(cl.eval("p.min()"), res.p.min())
    mp = res.masked_parameter_map()
    assert_in(mp.min(), (0, res.t.min()))
    assert_in(mp.max(), (0, res.t.max()))

    # indexing source space
    s_sub = src.sub(source='fusiform-lh')
    idx = source.index_for_label('fusiform-lh')
    s_idx = src[idx]
    assert_dataobj_equal(s_sub, s_idx)
示例#42
0
import cProfile
from optparse import OptionParser
import pstats

import mne
from eelbrain import *
import eelbrain

mne.set_log_level("warning")
eelbrain._stats.testnd.multiprocessing = False


# option parser
parser = OptionParser()
parser.add_option("-m", "--make", dest="make", metavar="KIND", help="Make a new profile of kind mne or uts")
parser.add_option("-f", "--file", dest="file_ext", metavar="NAME", help="Use a different file for this profile")
parser.add_option(
    "-s", "--sort", dest="sort", metavar="CRITERION", help="Sort the profile entries according to CRITERION"
)
parser.add_option("-n", dest="number", metavar="NUMBER", help="Display NUMBER entries from the profile.")
(options, args) = parser.parse_args()


# process options
if options.file_ext is None:
    fname = "profile_of_anova.profile"
else:
    fname = "profile_of_anova_%s.profile" % options.file_ext

sort = options.sort
if options.number is None:
示例#43
0
        for case in range(rt.n_cases):
            w = rt[case]['Word']
            i = word_map[w]
            index.append(i)
        rt['index'] = Var(index)
        rt.sort('index')  # align rows of log with rt
        log.update(rt, replace=False, info=False)  # combine

        log = log[log['RealWord'] == 1]  # event file currently does not contain nonwords

        log.sort('AbsTime')  # make sure log file is sorted by time

        return log

# don't print a bunch of info
set_log_level('warning')

# create instance upon importing module
if socket.gethostname() == 'silversurfer.linguistics.fas.nyu.edu':
    e = SufAmb('/Volumes/BackUp/sufAmb')  # data is stored on silversurfer
else:
    print "Trying to connect to data drive."
    e = SufAmb('/Volumes/BackUp/sufAmb')

# Number of good trials per condition
# total: table.frequencies(Y='accept',ds=e.load_selected_events(reject='keep'))
# by condition: # print table.frequencies(Y='Condition',ds=e.load_selected_events(reject='keep').sub("accept==True"))

#e.set_inv(ori, depth, reg, snr, method, pick_normal)
#e.load_inv(fiff)
==============================
Generate simulated evoked data
==============================
compare the regression results between my method and MNE, using the wrapped version
"""

import numpy as np
import matplotlib.pyplot as plt
import numpy.linalg as la

import mne
from mne.viz import plot_evoked, plot_sparse_source_estimates
from mne.simulation import generate_stc
import matplotlib.pyplot as plt
from mne.inverse_sparse.mxne_optim import _Phi, _PhiT
mne.set_log_level('warning')

 
import os,sys,inspect
# laptop
#os.chdir('/home/yingyang/Dropbox/MEG_source_loc_proj/stft_tree_group_lasso/')
# desktop
os.chdir('/home/ying/Dropbox/MEG_source_loc_proj/STFT_R_git_repo/')
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir+"/Simulation") 
sys.path.insert(0,parentdir + "/Spline_Regression") 
sys.path.insert(0,parentdir + "/MNE_stft")
sys.path.insert(0,currentdir)
from mne.inverse_sparse.mxne_optim import *
from Simulation_real_scale import *
'''

import scipy.io
import numpy as np
import os
import sys
import fftw3
from itertools import islice
from collections import deque
from PyQt4 import QtGui, QtCore
import pyqtgraph as pg
import file_handler
import objgraph
import mne

mne.set_log_level('DEBUG')

class Cine(object):
  def __init__(self):
    self.filehandler = file_handler.FileHandler()
    self.app = QtGui.QApplication(sys.argv)
    self.widget = QtGui.QWidget()
    self.start_btn = QtGui.QPushButton('Start')
    self.stop_btn = QtGui.QPushButton('Stop')
    self.file_in = QtGui.QInputDialog()
    self.layout = QtGui.QGridLayout()
    self.layout.addWidget(self.start_btn, 0, 0)
    self.layout.addWidget(self.stop_btn, 0, 1)
    self.widget.setLayout(self.layout)
    self.start_btn.clicked.connect(self.start)
    self.stop_btn.clicked.connect(self.stop)
示例#46
0
    parser.add_argument('--reject', help=help_reject, action='store_true')
    parser.add_argument('--nharm', type=int, default=default_nharm,
                        choices=[0, 1, 2, 3, 4], help=help_nharm)
    parser.add_argument('--epoch_start', type=float, default=None,
                        help=help_epoch_start)
    parser.add_argument('--epoch_end', type=float, default=None,
                        help=help_epoch_end)
    parser.add_argument('--plot_snr', help=help_plot_snr, action='store_true')
    parser.add_argument('--stim_channel', help=help_stim_channel, type=str,
                        default=None)
    parser.add_argument('--stim_mask', type=int, default=None,
                        help=help_sti_mask)

    args = parser.parse_args()

    mne.set_log_level('ERROR')  # reduce mne output

    if args.epochs_file:
        fnbase = os.path.basename(os.path.splitext(args.epochs_file)[0])
    else:
        fnbase = os.path.basename(os.path.splitext(args.snr_file)[0])
    verbose = False

    """ Load cHPI SNR file. It is typically not maxfiltered, so ignore
    MaxShield warnings. """
    raw_chpi = mne.io.Raw(args.snr_file, allow_maxshield='yes',
                          verbose=verbose)
    picks = mne.pick_types(raw_chpi.info, meg=True)

    """ If using a separate file for the actual data epochs, load it. """
    if args.epochs_file:
示例#47
0
def test_logging():
    """Test logging (to file)
    """
    old_log_file = open(fname_log, 'r')
    old_lines = clean_lines(old_log_file.readlines())
    old_log_file.close()
    old_log_file_2 = open(fname_log_2, 'r')
    old_lines_2 = clean_lines(old_log_file_2.readlines())
    old_log_file_2.close()

    if op.isfile(test_name):
        os.remove(test_name)
    # test it one way (printing default off)
    set_log_file(test_name)
    set_log_level('WARNING')
    # should NOT print
    evoked = Evoked(fname_evoked, setno=1)
    assert_true(open(test_name).readlines() == [])
    # should NOT print
    evoked = Evoked(fname_evoked, setno=1, verbose=False)
    assert_true(open(test_name).readlines() == [])
    # should NOT print
    evoked = Evoked(fname_evoked, setno=1, verbose='WARNING')
    assert_true(open(test_name).readlines() == [])
    # SHOULD print
    evoked = Evoked(fname_evoked, setno=1, verbose=True)
    new_log_file = open(test_name, 'r')
    new_lines = clean_lines(new_log_file.readlines())
    assert_equal(new_lines, old_lines)

    # now go the other way (printing default on)
    os.remove(test_name)
    set_log_file(test_name)
    set_log_level('INFO')
    # should NOT print
    evoked = Evoked(fname_evoked, setno=1, verbose='WARNING')
    assert_true(open(test_name).readlines() == [])
    # should NOT print
    evoked = Evoked(fname_evoked, setno=1, verbose=False)
    assert_true(open(test_name).readlines() == [])
    # SHOULD print
    evoked = Evoked(fname_evoked, setno=1)
    new_log_file = open(test_name, 'r')
    old_log_file = open(fname_log, 'r')
    new_lines = clean_lines(new_log_file.readlines())
    assert_equal(new_lines, old_lines)
    # check to make sure appending works (and as default, raises a warning)
    with warnings.catch_warnings(True) as w:
        set_log_file(test_name, overwrite=False)
        assert len(w) == 0
        set_log_file(test_name)
        assert len(w) == 1
    evoked = Evoked(fname_evoked, setno=1)
    new_log_file = open(test_name, 'r')
    new_lines = clean_lines(new_log_file.readlines())
    assert_equal(new_lines, old_lines_2)

    # make sure overwriting works
    set_log_file(test_name, overwrite=True)
    # this line needs to be called to actually do some logging
    evoked = Evoked(fname_evoked, setno=1)
    new_log_file = open(test_name, 'r')
    new_lines = clean_lines(new_log_file.readlines())
    assert_equal(new_lines, old_lines)
示例#48
0
def select_trial_label(event_id, trigger_channels, coi, input_filename, output_filename=None):    
    
    mne.set_log_level('WARNING')
    print "loading dataset " + input_filename
    raw = mne.fiff.Raw(input_filename)
    print
    print "Extract events from channels",
    print trigger_channels
    channels_num = len(trigger_channels)
    triggers = []
    for trigger_channel in trigger_channels:
        print trigger_channel,
        tmp = mne.find_events(raw, stim_channel=trigger_channel)[:,0] 
        triggers.append(tmp)

    trigger_times = np.unique(np.concatenate(triggers)) #concatenate trigger event and order them without repetitions
    threshold_faulty_hw = 5
    print "Fix triggers times occurring <"+str(threshold_faulty_hw)+" timesteps from the previous trigger (due to faulty hardware)"
    idx_triggers_to_fix = np.where(np.diff(trigger_times) < threshold_faulty_hw)[0]
    fix = dict(zip(trigger_times[idx_triggers_to_fix+1], trigger_times[idx_triggers_to_fix]))
    print "Actual triggers times to fix due to faulty HW:", fix
    
    trigger_times = np.concatenate([[trigger_times[0]], trigger_times[1:][np.diff(trigger_times) >= threshold_faulty_hw]])

    for ttf in fix.keys():
        for trigger in triggers:
            trigger[trigger==ttf] = fix[ttf] # this is the actual fix

    # some checks:
    tmp = np.unique(np.concatenate(triggers))
    assert((tmp == trigger_times).all())

    print "Binary to decimal conversion of triggers."
    trigger_decimal = np.zeros(trigger_times.size, dtype=np.int)
    for i, t in enumerate(trigger_times):
        for bit, trigger in enumerate(triggers):
            if (trigger == t).any():
                trigger_decimal[i] += 2**(channels_num - bit - 1) # right lsb
                
    
    trigger_decimal_unique = np.unique(trigger_decimal)
    summary = [(trigger_decimal==v).sum() for v in trigger_decimal_unique]
    print "Summary:"
    print trigger_decimal_unique
    print summary

    triggers_of_interest = np.concatenate(coi)
    print "triggers_of_interest:", triggers_of_interest
    print "corresponding to:"
    for toi in triggers_of_interest:
        print '\t', event_id[toi]

    triggers_mask = np.logical_or.reduce([trigger_decimal==v for v in triggers_of_interest])
    
    if output_filename == None:
        filename_save = input_filename.split('.')[0]+'_trial_time.pickle'
    else:
        filename_save = os.path.abspath(output_filename)
    
    print "Saving to:", filename_save
    pickle.dump({'trigger_times': trigger_times[triggers_mask],
                 'trigger_decimal': trigger_decimal[triggers_mask],
                 'coi': coi,
                 }, open(filename_save, 'w'),
                protocol = pickle.HIGHEST_PROTOCOL)
    
    return filename_save
示例#49
0
# of filenames for various things we'll be using.
import os.path as op
import numpy as np
from scipy.signal import welch, coherence
from mayavi import mlab
from matplotlib import pyplot as plt

import mne
from mne.simulation import simulate_raw
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd

# Suppress irrelevant output
mne.set_log_level('ERROR')

# We use the MEG and MRI setup from the MNE-sample dataset
data_path = sample.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
mri_path = op.join(subjects_dir, 'sample')

# Filenames for various files we'll be using
meg_path = op.join(data_path, 'MEG', 'sample')
raw_fname = op.join(meg_path, 'sample_audvis_raw.fif')
trans_fname = op.join(meg_path, 'sample_audvis_raw-trans.fif')
src_fname = op.join(mri_path, 'bem/sample-oct-6-src.fif')
bem_fname = op.join(mri_path, 'bem/sample-5120-5120-5120-bem-sol.fif')
fwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
cov_fname = op.join(meg_path, 'sample_audvis-cov.fif')
from utils import get_data

from config import (
    data_path,
    pyoutput_path,
    subjects,
    paths('report'),
    contrasts,
    open_browser,
    chan_types,
)

report, run_id, _, logger = setup_provenance(script=__file__,
                                                       results_dir=paths('report'))

mne.set_log_level('INFO')

# force separation of magnetometers and gradiometers
if 'meg' in [i['name'] for i in chan_types]:
    chan_types = [dict(name='mag'), dict(name='grad')] + \
                 [dict(name=i['name']) for i in chan_types
                                           if i['name'] != 'meg']
for subject in subjects:

    # Extract events from mat file
    meg_fname = op.join(data_path, subject, 'preprocessed', subject + '_preprocessed')
    bhv_fname = op.join(data_path, subject, 'behavior', subject + '_fixed.mat')
    epochs, events = get_data(meg_fname, bhv_fname)

    # Apply each contrast
    all_epochs = [[]] * len(contrasts)
示例#51
0
def group_fourierICA_src_space(fname_raw, average=False, stim_name=None,
                               stim_id=1, stim_delay=None, pca_dim=None,
                               ica_method='fourierica',
                               src_loc_method="dSPM", snr=3., nrep=1,
                               tmin=0, tmax=1.0, flow=4., fhigh=34.,
                               hamming_data=True, remove_outliers=True,
                               complex_mixing=True, max_iter=2000,
                               conv_eps=1e-9, lrate=0.2, cost_function='g2',
                               envelopeICA=False, verbose=True):


    """
    Module to perform group FourierICA.

        Parameters
        ----------
        fname_raw: filename(s) of the pre-processed raw file(s)
        average: Should data be averaged across subjects before
            FourierICA application? Note, averaged data require
            less memory!
            default: average=False
        stim_name: name of the stimulus channel. Note, for
            applying FourierCIA data are chopped around stimulus
            onset. If not set data are chopped in overlapping
            windows
            default: stim_names=None
        stim_id: Id of the event of interest to be considered in
            the stimulus channel. Only of interest if 'stim_name'
            is set
            default: event_id=1
        stim_delay: stimulus delay in milliseconds
            default: stim_delay=0
        pca_dim: the number of PCA components used to apply FourierICA.
            If pca_dim > 1 this refers to the exact number of components.
            If between 0 and 1 pca_dim refers to the variance which
            should be explained by the chosen components
            If pca_dim == None the minimum description length (MDL)
            (Wax and Kailath, 1985) criterion is used to estimation
            the number of components
            default: pca_dim=None
        ica_method: which ICA method should be used for the group ICA?
            You can chose between 'extended-infomax', 'fastica',
            'fourierica' and 'infomax'
            default: ica_method='fourierica'
        src_loc_method: method used for source localization.
            default: src_loc_method='dSPM'
        snr: signal-to-noise ratio for performing source
            localization
            default: snr=3.0
        nrep: number of repetitions ICA, i.e. ICASSO, should be performed
            default: nrep=1
        tmin: time of interest prior to stimulus onset.
            Important for generating epochs to apply FourierICA
            default = 0.0
        tmax: time of interest after stimulus onset.
            Important for generating epochs to apply FourierICA
            default = 1.0
        flow: ower frequency border for estimating the optimal
            de-mixing matrix using FourierICA
            default: flow=4.0
        fhigh: upper frequency border for estimating the optimal
            de-mixing matrix using FourierICA
            default: fhigh=34.0
            Note: here default flow and fhigh are choosen to
            contain:
                - theta (4-7Hz)
                - low (7.5-9.5Hz) and high alpha (10-12Hz),
                - low (13-23Hz) and high beta (24-34Hz)
        hamming_data: if set a hamming window is applied to each
            epoch prior to Fourier transformation
            default: hamming_data=True
        remove_outliers: If set outliers are removed from the Fourier
            transformed data.
            Outliers are defined as windows with large log-average power (LAP)

                 LAP_{c,t}=log \sum_{f}{|X_{c,tf}|^2

            where c, t and f are channels, window time-onsets and frequencies,
            respectively. The threshold is defined as |mean(LAP)+3 std(LAP)|.
            This process can be bypassed or replaced by specifying a function
            handle as an optional parameter.
            remove_outliers=True
        complex_mixing:
        max_iter:  maximum number od iterations used in FourierICA
            default: max_iter=2000
        conv_eps: teration stops when weight changes are smaller
            then this number
            default: conv_eps = 1e-9
        lrate: initial learning rate
            default: lrate=0.2
        cost_function: which cost-function should be used in the
            complex ICA algorithm
                'g1': g_1(y) = 1 / (2 * np.sqrt(lrate + y))
                'g2': g_2(y) = 1 / (lrate + y)
                'g3': g_3(y) = y
            default: cost_function = 'g2'
        envelopeICA: if set ICA is estimated on the envelope
            of the Fourier transformed input data, i.e., the
            mixing model is |x|=As
            default: envelopeICA=False
        verbose: bool, str, int, or None
            If not None, override default verbose level
            (see mne.verbose).
            default: verbose=True
    """

    # ------------------------------------------
    # import necessary modules
    # ------------------------------------------
    from jumeg.decompose.icasso import JuMEG_icasso
    from mne import set_log_level
    from os.path import dirname, join
    from pickle import dump

    # set log level to 'WARNING'
    set_log_level('WARNING')


    # ------------------------------------------
    # check input parameter
    # ------------------------------------------
    # filenames
    if isinstance(fname_raw, list):
        fn_list = fname_raw
    else:
        fn_list = [fname_raw]


    # -------------------------------------------
    # set some path parameter
    # -------------------------------------------
    fn_inv = []
    for fn_raw in fn_list:
        fn_inv.append(fn_raw[:fn_raw.rfind('-raw.fif')] + inv_pattern)


    # ------------------------------------------
    # apply FourierICA combined with ICASSO
    # ------------------------------------------
    icasso_obj = JuMEG_icasso(nrep=nrep, fn_inv=fn_inv,
                              src_loc_method=src_loc_method,
                              morph2fsaverage=True,
                              envelopeICA=envelopeICA,
                              ica_method=ica_method,
                              tICA=False,
                              snr=snr, lrate=lrate)

    W_orig, A_orig, quality, fourier_ica_obj \
        = icasso_obj.fit(fn_list, average=average,
                         stim_name=stim_name,
                         event_id=stim_id, pca_dim=pca_dim,
                         stim_delay=stim_delay,
                         tmin_win=tmin, tmax_win=tmax,
                         flow=flow, fhigh=fhigh,
                         max_iter=max_iter, conv_eps=conv_eps,
                         complex_mixing=complex_mixing,
                         hamming_data=hamming_data,
                         remove_outliers=remove_outliers,
                         cost_function=cost_function,
                         verbose=verbose)


    # ------------------------------------------
    # save results to disk
    # ------------------------------------------
    groupICA_obj = {'fn_list': fn_list,
                    'W_orig': W_orig,
                    'A_orig': A_orig,
                    'quality': quality,
                    'icasso_obj': icasso_obj,
                    'fourier_ica_obj': fourier_ica_obj}


    if stim_id:
        fn_base = "group_ICA_%02ddB.obj" % (stim_id)
    else:
        fn_base = "group_ICA_resting_state.obj"

    fnout = join(dirname(dirname(fn_list[0])), fn_base)
    with open(fnout, "wb") as filehandler:
        dump(groupICA_obj, filehandler)

    # return dictionary
    return groupICA_obj, fnout
示例#52
0
###############################################################################
#
# .. _tut_logging:
#
# Logging
# =======
# Configurations also include the default logging level for the functions. This
# field is called "MNE_LOGGING_LEVEL".
mne.set_config('MNE_LOGGING_LEVEL', 'INFO')
print(mne.get_config(key='MNE_LOGGING_LEVEL'))

###############################################################################
# The default value is now set to INFO. This level will now be used by default
# every time we call a function in MNE. We can set the global logging level for
# only this session by calling :func:`mne.set_log_level` function.
mne.set_log_level('WARNING')
print(mne.get_config(key='MNE_LOGGING_LEVEL'))

###############################################################################
# Notice how the value in the config file was not changed. Logging level of
# WARNING only applies for this session. Let's see what logging level of
# WARNING prints for :func:`mne.compute_raw_covariance`.
cov_raw = mne.compute_raw_covariance(raw)

###############################################################################
# Nothing. This means that no warnings were emitted during the computation. If
# you look at the documentation of :func:`mne.compute_raw_covariance`, you
# notice the ``verbose`` keyword. Setting this parameter does not touch the
# configurations, but sets the logging level for just this one function call.
# Let's see what happens with logging level of INFO.
cov = mne.compute_raw_covariance(raw, verbose=True)
示例#53
0
    def fit(self, fn_raw, stim_name=None, event_id=1,
            tmin_stim=0.0, tmax_stim=1.0, flow=4.0, fhigh=34.0,
            pca_dim=0.90, max_iter=10000, conv_eps=1e-16,
            verbose=True):

        """
        Perform ICASSO estimation. ICASSO is based on running ICA
        multiple times with slightly different conditions and
        clustering the obtained components. Note, here FourierICA
        is applied


            Parameters
            ----------
            fn_raw: filename of the input data (expect fif-file).
            stim_name: name of the stimulus channel. Note, for
                applying FourierCIA data are chopped around stimulus
                onset. If not set data are chopped in overlapping
                windows
                default: stim_names=None
            event_id: Id of the event of interest to be considered in
                the stimulus channel. Only of interest if 'stim_name'
                is set
                default: event_id=1
            tmin_stim: time of interest prior to stimulus onset.
                Important for generating epochs to apply FourierICA
                default = 0.0
            tmax_stim: time of interest after stimulus onset.
                Important for generating epochs to apply FourierICA
                default = 1.0
            flow: lower frequency border for estimating the optimal
                de-mixing matrix using FourierICA
                default: flow=4.0
            fhigh: upper frequency border for estimating the optimal
                de-mixing matrix using FourierICA
                default: fhigh=34.0
                Note: here default flow and fhigh are choosen to
                   contain:
                   - theta (4-7Hz)
                   - low (7.5-9.5Hz) and high alpha (10-12Hz),
                   - low (13-23Hz) and high beta (24-34Hz)
            pca_dim: The number of PCA components used to apply FourierICA.
                If pca_dim > 1 this refers to the exact number of components.
                If between 0 and 1 pca_dim refers to the variance which
                should be explained by the chosen components
                default: pca_dim=0.9
            max_iter: maximum number od iterations used in FourierICA
                default: max_iter=10000
            conv_eps: iteration stops when weight changes are smaller
                then this number
                default: conv_eps = 1e-16
            verbose: bool, str, int, or None
                If not None, override default verbose level
                (see mne.verbose).
                default: verbose=True

            Returns
            -------
            W: estimated optimal de-mixing matrix
            A: estimated mixing matrix
            Iq: quality index of the clustering between
                components belonging to one cluster
                (between 0 and 1; 1 refers to small clusters,
                i.e., components in one cluster a highly similar)
            fourier_ica_obj: FourierICA object. For further information
                please have a look into the FourierICA routine
        """

        # ------------------------------------------
        # import necessary module
        # ------------------------------------------
        from fourier_ica import JuMEG_fourier_ica
        from mne import find_events, pick_types, set_log_level
        from mne.io import Raw


        # set log level to 'WARNING'
        set_log_level('WARNING')

        # ------------------------------------------
        # prepare data to apply FourierICA
        # ------------------------------------------
        meg_raw = Raw(fn_raw, preload=True)
        meg_channels = pick_types(meg_raw.info, meg=True, eeg=False,
                                  eog=False, stim=False, exclude='bads')
        meg_data = meg_raw._data[meg_channels, :]

        if stim_name:
            events = find_events(meg_raw, stim_channel=stim_name, consecutive=True)
            events = events[events[:, 2] == event_id, 0]
        else:
            events = []


        # ------------------------------------------
        # generate FourierICA object
        # ------------------------------------------
        if verbose:
            print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"
            print ">>>      Performing FourierICA estimation      <<<"
            print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"

        win_length_sec = tmax_stim - tmin_stim
        fourier_ica_obj = JuMEG_fourier_ica(events=events, tpre=tmin_stim,
                                            flow=flow, fhigh=fhigh,
                                            win_length_sec=win_length_sec,
                                            remove_outliers=True,
                                            hamming_data=True,
                                            complex_mixing=True,
                                            pca_dim=pca_dim,
                                            max_iter=max_iter,
                                            conv_eps=conv_eps)


        # ------------------------------------------
        # perform ICASSO ICA
        # ------------------------------------------
        for irep in range(self.nrep):
            # apply FourierICA
            W_orig, A_orig, _, _, _, whitenMat, dewhitenMat = fourier_ica_obj.fit(meg_data.copy(), verbose=False)

            if irep == 0:
                self.whitenMat = whitenMat
                self.dewhitenMat = dewhitenMat

            # save results in structure
            self.W_est.append(W_orig)
            self.A_est.append(A_orig)

            # print out some information
            if verbose:
                print ">>> Running FourierICA number %d of %d done" % (irep+1, self.nrep)

                if irep == 0:
                    str_hamming_window = "True" if fourier_ica_obj.hamming_data else "False"
                    str_complex_mixing = "True" if fourier_ica_obj.complex_mixing else "False"
                    print "..... Fourier ICA parameter:"
                    print "....."
                    print "..... Sampling frequency set to: %d" % fourier_ica_obj.sfreq
                    print "..... Start of frequency band set to: %d" % fourier_ica_obj.flow
                    print "..... End of frequency band set to: %d" % fourier_ica_obj.fhigh
                    print "..... Using hamming window: %s" % str_hamming_window
                    print "..... Assume complex mixing: %s" % str_complex_mixing
                    print "..... Number of independent components: %d" % fourier_ica_obj.ica_dim
                    print "....."


        # ------------------------------------------
        # perform cluster analysis
        # ------------------------------------------
        if verbose:
            print ">>>"""
            print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"
            print ">>>        Performing cluster analysis         <<<"
            print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"

        Z, order, partitions, indexR, dis, sim = self.cluster()
        proj = self.projection(dis)
        A, W, Iq = self.get_results(partitions, sim)


        # ------------------------------------------
        # return results
        # ------------------------------------------
        return W, A, Iq, fourier_ica_obj
示例#54
0
import mne
import numpy as np

mne.set_log_level(False)

pids = range(1001, 1022)
blocks = ["PV0", "PV1", "WM0", "WM1"]
conditions = ["NEU", "NEG"]

n_trials = 24
n_channels = 72
n_participants = len(pids)
n_blocks = len(blocks)
n_conditions = len(conditions)
n_times = 4097

# construct output file of ranges
fout = open("ranges.txt", "w")
fout.write("pid block valence trial channel range\n")

# spacing between channnels (DC offset)
scale = 1e-5

# loop through the participants
for p, pid in enumerate(pids):
    # loop through the blocks
    for b, block in enumerate(blocks):
        # loop through the conditions
        for c, condition in enumerate(conditions):

            # construct the filename
from __future__ import absolute_import
import mne

from ..datasets import segment as sg
from . import feature_extractor

mne.set_log_level(verbose='WARNING')
from mne.time_frequency.tfr import cwt_morlet
import random
import sys
import numpy as np
from itertools import chain


class EpochShim(object):
    """A wrapper for our segments which mimics the interface of mne.Epoch, for the band_wavelet_synchrony function."""
    def __init__(self, segment, window_size):
        self.segment = segment
        self.window_size = window_size
        # The epoch needs a dictionary attribute with the key 'freq'
        self.info = dict(sfreq=segment.get_sampling_frequency())

    def __iter__(self):
        for window in self.segment.get_windowed(self.window_size):
            yield window.transpose()


def epochs_from_segment(segment, window_size=5.0):
    """
    Creates an MNE Epochs object from a Segment object
示例#56
0
def plot_group_fourierICA(fn_groupICA_obj, stim_name=None,
                          stim_id=1, stim_delay=0,
                          subjects_dir=None):

    """
    Interface to plot the results from group FourierICA

        Parameters
        ----------
        fn_groupICA_obj: filename of the group ICA object
        stim_name: name of the stimulus channel. Note, for
            applying FourierCIA data are chopped around stimulus
            onset. If not set data are chopped in overlapping
            windows
            default: stim_names=None
        stim_id: Id of the event of interest to be considered in
            the stimulus channel. Only of interest if 'stim_name'
            is set
            default: event_id=1
        stim_delay: stimulus delay in milliseconds
            default: stim_delay=0
        subjects_dir: If the subjects directory is not confirm with
            the system variable 'SUBJECTS_DIR' parameter should be set
            default: subjects_dir=None
    """


    # ------------------------------------------
    # import necessary modules
    # ------------------------------------------
    from jumeg.decompose.fourier_ica_plot import plot_results_src_space
    from mne import set_log_level
    from pickle import load

    # set log level to 'WARNING'
    set_log_level('WARNING')


    # ------------------------------------------
    # read in group FourierICA object
    # ------------------------------------------
    with open(fn_groupICA_obj, "rb") as filehandler:
        groupICA_obj = load(filehandler)

    icasso_obj = groupICA_obj['icasso_obj']
    win_length_sec = icasso_obj.tmax_win - icasso_obj.tmin_win


    # ------------------------------------------
    # get temporal envelope
    # ------------------------------------------
    temporal_envelope, src_loc, vert, sfreq = \
        get_group_fourierICA_time_courses(groupICA_obj)


    # ------------------------------------------
    # plot "group" results
    # ------------------------------------------
    if groupICA_obj.has_key('classification'):
        classification = groupICA_obj['classification']
    else:
        classification = []


    fnout_src_fourier_ica = fn_groupICA_obj[:fn_groupICA_obj.rfind('.obj')] + \
                            img_src_group_ica

    plot_results_src_space(groupICA_obj['fourier_ica_obj'],
                           groupICA_obj['W_orig'], groupICA_obj['A_orig'],
                           src_loc, vert, subjects_dir=subjects_dir,
                           tpre=icasso_obj.tmin_win,
                           win_length_sec=win_length_sec, sfreq=sfreq,
                           flow=icasso_obj.flow, fhigh=icasso_obj.fhigh,
                           fnout=fnout_src_fourier_ica,
                           tICA=icasso_obj.tICA,
                           morph2fsaverage=icasso_obj.morph2fsaverage,
                           temporal_envelope=temporal_envelope,
                           classification=classification,
                           show=False)
示例#57
0
def apply_ICASSO_fourierICA(fn_raw, nrep=50, stim_name='STI 014', event_id=1,
                            tmin=-0.2, tmax=0.8, flow=4.0, fhigh=34.0,
                            pca_dim=None, max_iter=10000, conv_eps=1e-10,
                            lrate=None, complex_mixing=True, hamming_data=False,
                            envelopeICA=False, remove_outliers=False,
                            cost_function=None, verbose=True,
                            plot_dir=None, plot_res=True):

    # ------------------------------------------
    # import FourierICA module
    # ------------------------------------------
    from jumeg.decompose.icasso import JuMEG_icasso
    from mne import set_log_level


    # set log level to 'WARNING'
    set_log_level('WARNING')


    # ------------------------------------------
    # apply FourierICA combined with ICASSO
    # ------------------------------------------
    icasso_obj = JuMEG_icasso(nrep=nrep, envelopeICA=envelopeICA, lrate=lrate)
    W_orig, A_orig, quality, fourier_ica_obj, _, _ = icasso_obj.fit(fn_raw,
                                                                    stim_name=stim_name, event_id=event_id,
                                                                    tmin_stim=tmin, tmax_stim=tmax,
                                                                    flow=flow, fhigh=fhigh,
                                                                    pca_dim=pca_dim,
                                                                    max_iter=max_iter, conv_eps=conv_eps,
                                                                    complex_mixing=complex_mixing,
                                                                    hamming_data=hamming_data,
                                                                    remove_outliers=remove_outliers,
                                                                    cost_function=cost_function,
                                                                    verbose=verbose)

    # ------------------------------------------
    # plot results
    # ------------------------------------------
    if plot_res:
        # ------------------------------------------
        # import FourierICA module
        # ------------------------------------------
        from fourier_ica_plot import plot_results
        from mne import pick_types
        from mne.io import Raw
        from os import makedirs
        from os.path import basename, dirname, exists, join

        if plot_dir == None:
            # check if directory for result plots exist
            fn_dir = dirname(fn_raw)
            plot_dir = join(fn_dir, 'plots')
            if not exists(plot_dir):
                makedirs(plot_dir)

        # prepare data for plotting
        meg_raw = Raw(fn_raw, preload=True)
        meg_channels = pick_types(meg_raw.info, meg=True, eeg=False,
                                  eog=False, stim=False, exclude='bads')
        meg_data = meg_raw._data[meg_channels, :]

        # plot data
        fn_fourier_ica = join(plot_dir, basename(fn_raw[:fn_raw.rfind('-raw.fif')] + ',fourierICA'))
        pk_values = plot_results(fourier_ica_obj, meg_data, W_orig, A_orig, meg_raw.info,
                                 meg_channels, cluster_quality=quality, fnout=fn_fourier_ica,
                                 show=False)
    else:
        pk_values = []


    return W_orig, A_orig, quality, fourier_ica_obj, pk_values