Beispiel #1
0
 def events_save_events(self,evt=None,condition=None,postfix="evt",
                             picks=None,reject=None,proj=False,
                             save_condition={"events":True,"epochs":True,"evoked":True}):
     
     from jumeg.jumeg_4raw_data_plot import jumeg_4raw_data_plot as jplt
     jplt.verbose = self.verbose
   
     ep,bc = self.events_apply_epochs_and_baseline(self.raw,evt=evt,reject=reject,proj=proj,picks=picks)      
   
     postfix += '_' + condition
     if bc:
        postfix += '_bc'
            
   #--- save events to txt file    
     if save_condition["events"]:
        fname = jumeg_base.get_fif_name(raw=self.raw,postfix=postfix,extention=".eve",update_raw_fname=False)
        mne.event.write_events( fname,evt['events'] )
        print" ---> done jumeg epocher save events as => EVENTS :" +fname
       
   #--- save epoch data
     if save_condition["epochs"]:
        fname = jumeg_base.get_fif_name(raw=self.raw,postfix=postfix,extention="-epo.fif",update_raw_fname=False)
        ep.save( fname )
        print" ---> done jumeg epocher save events as => EPOCHS :" +fname
       
   #--- save averaged data
        if save_condition["evoked"]:
           fname = jumeg_base.get_fif_name(raw=self.raw,postfix=postfix,extention="-ave.fif",update_raw_fname=False)
           mne.write_evokeds( fname,ep.average() )              
           print" ---> done jumeg epocher save events as => EVOKED (averaged) :" +fname 
           fname = jumeg_base.get_fif_name(raw=self.raw,postfix=postfix,extention="-ave",update_raw_fname=False)  
         #--- plot evoked
           fname = jplt.plot_evoked(ep,fname=fname,condition=condition,show_plot=False,save_plot=True,plot_dir='plots')
           print" ---> done jumeg epocher plot evoked (averaged) :" +fname 
Beispiel #2
0
def test_evoked_standard_error():
    """Test calculation and read/write of standard error
    """
    raw, events, picks = _get_data()
    tempdir = _TempDir()
    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    evoked = [epochs.average(), epochs.standard_error()]
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
    evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
    evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
               read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
                            kind='standard_error')]
    for evoked_new in [evoked2, evoked3]:
        assert_true(evoked_new[0]._aspect_kind ==
                    FIFF.FIFFV_ASPECT_AVERAGE)
        assert_true(evoked_new[0].kind == 'average')
        assert_true(evoked_new[1]._aspect_kind ==
                    FIFF.FIFFV_ASPECT_STD_ERR)
        assert_true(evoked_new[1].kind == 'standard_error')
        for ave, ave2 in zip(evoked, evoked_new):
            assert_array_almost_equal(ave.data, ave2.data)
            assert_array_almost_equal(ave.times, ave2.times)
            assert_equal(ave.nave, ave2.nave)
            assert_equal(ave._aspect_kind, ave2._aspect_kind)
            assert_equal(ave.kind, ave2.kind)
            assert_equal(ave.last, ave2.last)
            assert_equal(ave.first, ave2.first)
def analyze_rest(subject, args, hcp_params, run_index=0, calc_rest_from_raw=False, calc_rest_from_epochs=True):
    flags = {}
    if not op.isfile(meg.RAW):
        raw = read_raw_data(run_index, hcp_params, 1, 60)
        raw.save(meg.RAW)
    else:
        raw = mne.io.read_raw_fif(meg.RAW)
    meg.COR = op.join(op.join(HCP_DIR, 'hcp-meg', subject, '{}-head_mri-trans.fif'.format(subject)))
    epo_fname = meg.EPO.format(cond='all')
    evo_fname = meg.EVO.format(cond='all')
    if not op.isfile(epo_fname) or not op.isfile(evo_fname):
        epochs = hcp.read_epochs(run_index=run_index, **hcp_params)
        evoked = epochs.average()
        epochs.save(epo_fname)
        mne.write_evokeds(evo_fname, evoked)
    else:
        epochs = mne.read_epochs(epo_fname)
    meg.calc_fwd_inv_wrapper(subject, args)
    args.snr = 1.0  # use smaller SNR for raw data
    # args.overwrite_labels_data = True
    # args.n_jobs = 1
    if calc_rest_from_raw:
        meg.calc_labels_avg_for_rest_wrapper(args, raw)
    elif calc_rest_from_epochs:
        args.single_trial_stc = True
        flags, stcs_conds, stcs_num = meg.calc_stc_per_condition_wrapper(
            subject, None, args.inverse_method, args, flags, None, epochs)
        flags = meg.calc_labels_avg_per_condition_wrapper(
            subject, None, args.atlas, args.inverse_method, stcs_conds, args, flags, stcs_num, raw, epochs)

    print('sdf')
Beispiel #4
0
def split_baseline(fif_fnames, clips_length=6, shift=6, overwrite=False):
    output_fol = op.join(utils.get_parent_fol(fif_fnames[0]), 'new_baselines')
    if not overwrite and op.isdir(output_fol) and len(
            glob.glob(op.join(output_fol, '*.fif'))) > 0:
        return glob.glob(op.join(output_fol, '*.fif'))
    utils.make_dir(output_fol)
    data = []
    for fif_fname in fif_fnames:
        clip = mne.read_evokeds(fif_fname)[0]
        freq = clip.info['sfreq']
        step = int(freq * clips_length)
        start_t, end_t = 0, len(clip.times)
        while start_t + clips_length * freq < end_t:
            new_clip = mne.EvokedArray(clip.data[:, start_t:start_t + step],
                                       clip.info,
                                       comment='baseline')
            data.append(new_clip.data[0, :10])
            mne.write_evokeds(
                op.join(
                    output_fol, '{}_{}.fif'.format(utils.namebase(fif_fname),
                                                   int(start_t / freq))),
                new_clip)
            start_t += int(freq * shift)
    data = np.array(data)
    return glob.glob(op.join(output_fol, '*.fif'))
Beispiel #5
0
def test_evoked_standard_error():
    """Test calculation and read/write of standard error
    """
    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0))
    evoked = [epochs.average(), epochs.standard_error()]
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
    evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
    evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
               read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
                            kind='standard_error')]
    for evoked_new in [evoked2, evoked3]:
        assert_true(evoked_new[0]._aspect_kind ==
                    FIFF.FIFFV_ASPECT_AVERAGE)
        assert_true(evoked_new[0].kind == 'average')
        assert_true(evoked_new[1]._aspect_kind ==
                    FIFF.FIFFV_ASPECT_STD_ERR)
        assert_true(evoked_new[1].kind == 'standard_error')
        for ave, ave2 in zip(evoked, evoked_new):
            assert_array_almost_equal(ave.data, ave2.data)
            assert_array_almost_equal(ave.times, ave2.times)
            assert_equal(ave.nave, ave2.nave)
            assert_equal(ave._aspect_kind, ave2._aspect_kind)
            assert_equal(ave.kind, ave2.kind)
            assert_equal(ave.last, ave2.last)
            assert_equal(ave.first, ave2.first)
Beispiel #6
0
def test_evoked_resample():
    """Test for resampling of evoked data
    """
    tempdir = _TempDir()
    # upsample, write it out, read it in
    ave = read_evokeds(fname, 0)
    sfreq_normal = ave.info['sfreq']
    ave.resample(2 * sfreq_normal)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
    ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)

    # compare it to the original
    ave_normal = read_evokeds(fname, 0)

    # and compare the original to the downsampled upsampled version
    ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    ave_new.resample(sfreq_normal)

    assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
    assert_array_almost_equal(ave_normal.times, ave_new.times)
    assert_equal(ave_normal.nave, ave_new.nave)
    assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
    assert_equal(ave_normal.kind, ave_new.kind)
    assert_equal(ave_normal.last, ave_new.last)
    assert_equal(ave_normal.first, ave_new.first)

    # for the above to work, the upsampling just about had to, but
    # we'll add a couple extra checks anyway
    assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
    assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
Beispiel #7
0
def analyze_task(subject, args, hcp_params):
    flags = {}
    events = dict(face=1, tools=2)
    baseline = (-0.5, 0)

    all_events = collect_events(hcp_params)
    files_exist = all([
        op.isfile(meg.get_cond_fname(meg.EPO, event))
        and op.isfile(meg.get_cond_fname(meg.EVO, event))
        for event in events.keys()
    ])
    if not files_exist:
        epochs_hcp, evokeds_from_epochs_hcp = using_preprocessed_epochs(
            all_events, events, baseline, hcp_params)
        for event in events.keys():
            epochs_hcp[event].save(meg.get_cond_fname(meg.EPO, event))
            mne.write_evokeds(meg.get_cond_fname(meg.EVO, event),
                              evokeds_from_epochs_hcp[event])

    flags = meg.calc_fwd_inv_wrapper(subject, args, events, flags)
    flags, stcs_conds, _ = meg.calc_stc_per_condition_wrapper(
        subject, events, args.inverse_method, args, flags)
    flags = meg.calc_labels_avg_per_condition_wrapper(subject, events,
                                                      args.atlas,
                                                      args.inverse_method,
                                                      stcs_conds, args, flags)
Beispiel #8
0
def test_evoked_resample():
    """Test for resampling of evoked data"""
    tempdir = _TempDir()
    # upsample, write it out, read it in
    ave = read_evokeds(fname, 0)
    sfreq_normal = ave.info['sfreq']
    ave.resample(2 * sfreq_normal, npad=100)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
    ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)

    # compare it to the original
    ave_normal = read_evokeds(fname, 0)

    # and compare the original to the downsampled upsampled version
    ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    ave_new.resample(sfreq_normal, npad=100)

    assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
    assert_array_almost_equal(ave_normal.times, ave_new.times)
    assert_equal(ave_normal.nave, ave_new.nave)
    assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
    assert_equal(ave_normal.kind, ave_new.kind)
    assert_equal(ave_normal.last, ave_new.last)
    assert_equal(ave_normal.first, ave_new.first)

    # for the above to work, the upsampling just about had to, but
    # we'll add a couple extra checks anyway
    assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
    assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
def run_evoked(subject, session=None):
    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=config.get_task(),
                         acquisition=config.acq,
                         run=None,
                         recording=config.rec,
                         space=config.space,
                         extension='.fif',
                         datatype=config.get_datatype(),
                         root=config.deriv_root)

    processing = None
    if config.use_ica or config.use_ssp:
        processing = 'clean'

    fname_in = bids_path.copy().update(processing=processing, suffix='epo',
                                       check=False)
    fname_out = bids_path.copy().update(suffix='ave', check=False)

    msg = f'Input: {fname_in}, Output: {fname_out}'
    logger.info(gen_log_message(message=msg, step=6, subject=subject,
                                session=session))

    epochs = mne.read_epochs(fname_in, preload=True)

    msg = 'Creating evoked data based on experimental conditions …'
    logger.info(gen_log_message(message=msg, step=6, subject=subject,
                                session=session))
    all_evoked = dict()

    if isinstance(config.conditions, dict):
        for new_cond_name, orig_cond_name in config.conditions.items():
            evoked = epochs[orig_cond_name].average()
            evoked.comment = evoked.comment.replace(orig_cond_name,
                                                    new_cond_name)
            all_evoked[new_cond_name] = evoked
    else:
        for condition in config.conditions:
            evoked = epochs[condition].average()
            all_evoked[condition] = evoked

    if config.contrasts:
        msg = 'Contrasting evoked responses …'
        logger.info(gen_log_message(message=msg, step=6, subject=subject,
                                    session=session))

        for contrast in config.contrasts:
            cond_1, cond_2 = contrast
            evoked_diff = mne.combine_evoked([all_evoked[cond_1],
                                              all_evoked[cond_2]],
                                             weights=[1, -1])
            all_evoked[contrast] = evoked_diff

    evokeds = list(all_evoked.values())
    mne.write_evokeds(fname_out, evokeds)

    if config.interactive:
        for evoked in evokeds:
            evoked.plot()
Beispiel #10
0
def plot_average_sensors(subject, windows, condition, modality, bad_channels):
    import mne
    import matplotlib.pyplot as plt
    if modality == 'meeg':
        return
    evokes = []
    info = None
    title = '{}-{}-{}-windows'.format(subject, modality, condition)
    root_dir = op.join(EEG_DIR if modality == 'eeg' else MEG_DIR, subject)
    figures_fol = utils.make_dir(
        op.join(MMVT_DIR, subject, 'epilepsy-figures', 'average-sensors'))
    fig_fname = op.join(figures_fol, '{}.jpg'.format(title))
    meg_evoked_fname = op.join(root_dir, '{}.fif'.format(title))
    for window_fname in windows:
        evoked = mne.read_evokeds(window_fname)[0]
        evoked = evoked.pick_types(meg=modality == 'meg',
                                   eeg=modality == 'eeg',
                                   exclude=bad_channels)
        if info is None:
            info = evoked.info
        evokes.append(evoked.data)
    evokes = np.array(evokes).mean(0)
    evoked_object = mne.EvokedArray(evokes, info, comment=title)
    fig = evoked_object.plot(window_title=title,
                             spatial_colors=True,
                             show=False)
    fig.tight_layout()
    plt.savefig(fig_fname, dpi=300)
    plt.close()
    mne.write_evokeds(meg_evoked_fname, evoked_object)
Beispiel #11
0
def _calc_evoked(params):
    subject, events_id, epochs = params
    evoked = {}
    for cond_name in events_id.keys():
        evoked[cond_name] = epochs[cond_name].average()
        evo = op.join(LOCAL_ROOT_DIR, 'evo', '{}_ecr_{}-ave.fif'.format(subject, cond_name))
        mne.write_evokeds(evo, evoked[cond_name])
    return evoked
Beispiel #12
0
 def save_content(self):
     """Saves the mne.Evoked to a fif file in the evoked directory.
     """
     try:
         mne.write_evokeds(self._path, list(self.content.values()))
     except Exception as exc:
         raise Exception("Writing evokeds failed. Please check that the "
                         "entire experiment folder has write permissions.")
def average_evokeds(session):
    # Container for all conditions:
    all_evokeds = defaultdict(list)

    for subject in config.get_subjects():
        fname_in = BIDSPath(subject=subject,
                            session=session,
                            task=config.get_task(),
                            acquisition=config.acq,
                            run=None,
                            recording=config.rec,
                            space=config.space,
                            suffix='ave',
                            extension='.fif',
                            datatype=config.get_datatype(),
                            root=config.deriv_root,
                            check=False)

        msg = f'Input: {fname_in}'
        logger.info(
            gen_log_message(message=msg,
                            step=9,
                            subject=subject,
                            session=session))

        evokeds = mne.read_evokeds(fname_in)
        for idx, evoked in enumerate(evokeds):
            all_evokeds[idx].append(evoked)  # Insert into the container

    for idx, evokeds in all_evokeds.items():
        all_evokeds[idx] = mne.grand_average(
            evokeds, interpolate_bads=config.interpolate_bads_grand_average
        )  # Combine subjects

    subject = 'average'
    fname_out = BIDSPath(subject=subject,
                         session=session,
                         task=config.get_task(),
                         acquisition=config.acq,
                         run=None,
                         processing=config.proc,
                         recording=config.rec,
                         space=config.space,
                         suffix='ave',
                         extension='.fif',
                         datatype=config.get_datatype(),
                         root=config.deriv_root,
                         check=False)

    if not fname_out.fpath.parent.exists():
        os.makedirs(fname_out.fpath.parent)

    msg = f'Saving grand-averaged evoked sensor data: {fname_out}'
    logger.info(
        gen_log_message(message=msg, step=9, subject=subject, session=session))
    mne.write_evokeds(fname_out, list(all_evokeds.values()))
    return list(all_evokeds.values())
Beispiel #14
0
def run_evoked(subject, session=None):
    bids_path = BIDSPath(subject=subject,
                         session=session,
                         task=config.get_task(),
                         acquisition=config.acq,
                         run=None,
                         recording=config.rec,
                         space=config.space,
                         extension='.fif',
                         datatype=config.get_datatype(),
                         root=config.deriv_root)

    processing = None
    if config.use_ica or config.use_ssp:
        processing = 'clean'

    fname_in = bids_path.copy().update(processing=processing,
                                       suffix='epo',
                                       check=False)
    fname_out = bids_path.copy().update(suffix='ave', check=False)

    msg = f'Input: {fname_in}, Output: {fname_out}'
    logger.info(
        gen_log_message(message=msg, step=6, subject=subject, session=session))

    epochs = mne.read_epochs(fname_in, preload=True)

    msg = 'Creating evoked data based on experimental conditions …'
    logger.info(
        gen_log_message(message=msg, step=6, subject=subject, session=session))
    evokeds = []
    for condition in config.conditions:
        evoked = epochs[condition].average()
        evokeds.append(evoked)

    if config.contrasts:
        msg = 'Contrasting evoked responses …'
        logger.info(
            gen_log_message(message=msg,
                            step=6,
                            subject=subject,
                            session=session))

        for contrast in config.contrasts:
            cond_1, cond_2 = contrast
            evoked_1 = epochs[cond_1].average()
            evoked_2 = epochs[cond_2].average()
            evoked_diff = mne.combine_evoked([evoked_1, evoked_2],
                                             weights=[1, -1])
            evokeds.append(evoked_diff)

    mne.write_evokeds(fname_out, evokeds)

    if config.interactive:
        for evoked in evokeds:
            evoked.plot()
def compute_epochs_cov_evokeds(subject):
    """Epoch, compute noise covariance and average.

    params:
    subject : str
        the subject id to be loaded
    """
    raw = Raw(save_folder + "%s_filtered_ica_mc_raw_tsss.fif" % subject,
              preload=True)
    # Select events to extract epochs from.
    event_id = {'ent_left': 1,
                'ent_right': 2,
                'ctl_left': 4,
                'ctl_right': 8}

    #   Setup for reading the raw data
    events = mne.find_events(raw, min_duration=0.01)

    picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=False,
                           include=include, exclude='bads')
    # Read epochs
    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                        baseline=(None, 0), reject=reject,
                        preload=True)

    epochs.save(epochs_folder + "%s_filtered_ica_mc_tsss-epo.fif" % subject)

    # Plot epochs.
    # epochs.plot(trellis=False)

    # Look at channels that caused dropped events, showing that the subject's
    # blinks were likely to blame for most epochs being dropped
    epochs.drop_bad_epochs()
    fig = epochs.plot_drop_log(subject=subject, show=False)
    fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject)

    # Make noise cov
    cov = compute_covariance(epochs, tmin=None, tmax=0, method="auto")
    mne.write_cov(epochs_folder + "%s-cov.fif" % subject, cov)

    # Average epochs and get evoked data corresponding to the left stimulation
    ###########################################################################
    # Save evoked responses for different conditions to disk

    # average epochs and get Evoked datasets
    evokeds = [epochs[cond].average() for cond in ['ent_left', 'ent_right',
                                                   'ctl_left', 'ctl_right']]

    evokeds = [epochs[cond].average() for cond in epochs.event_id.keys()]

    # save evoked data to disk
    mne.write_evokeds(epochs_folder +
                      '%s_filtered_ica_mc_raw_tsss-ave.fif' % subject, evokeds)

    plt.close("all")
Beispiel #16
0
def calc_evoked(indices, epochs_fname, overwrite_epochs=False, overwrite_evoked=False):
    epochs = mne.read_epochs(epochs_fname, preload=False)
    print(epochs.events.shape)
    for event_name, event_indices in indices.items():
        evoked_event_fname = meg.get_cond_fname(meg.EVO, event_name)
        epochs_event_fname = meg.get_cond_fname(meg.EPO, event_name)
        if not op.isfile(epochs_event_fname) or overwrite_epochs:
            print('Saving {} epochs to {}, events num: {}'.format(event_name, epochs_event_fname, len(event_indices)))
            event_epochs = epochs[event_indices]
            event_epochs.save(epochs_event_fname)
        if not op.isfile(evoked_event_fname) or overwrite_evoked:
            print('Saving {} evoked to {}'.format(event_name, evoked_event_fname))
            mne.write_evokeds(evoked_event_fname, event_epochs.average())
Beispiel #17
0
def calc_evoked(indices, epochs_fname, overwrite_epochs=False, overwrite_evoked=False):
    epochs = mne.read_epochs(epochs_fname, preload=False)
    print(epochs.events.shape)
    for event_name, event_indices in indices.items():
        evoked_event_fname = meg.get_cond_fname(meg.EVO, event_name)
        epochs_event_fname = meg.get_cond_fname(meg.EPO, event_name)
        if not op.isfile(epochs_event_fname) or overwrite_epochs:
            print('Saving {} epochs to {}, events num: {}'.format(event_name, epochs_event_fname, len(event_indices)))
            event_epochs = epochs[event_indices]
            event_epochs.save(epochs_event_fname)
        if not op.isfile(evoked_event_fname) or overwrite_evoked:
            print('Saving {} evoked to {}'.format(event_name, evoked_event_fname))
            mne.write_evokeds(evoked_event_fname, event_epochs.average())
Beispiel #18
0
def test_shift_time_evoked():
    """Test for shifting of time scale."""
    tempdir = _TempDir()
    # Shift backward
    ave = read_evokeds(fname, 0).shift_time(-0.1, relative=True)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)

    # Shift forward twice the amount
    ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    ave_bshift.shift_time(0.2, relative=True)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)

    # Shift backward again
    ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    ave_fshift.shift_time(-0.1, relative=True)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)

    ave_normal = read_evokeds(fname, 0)
    ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)

    assert_allclose(ave_normal.data, ave_relative.data, atol=1e-16, rtol=1e-3)
    assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)

    assert_equal(ave_normal.last, ave_relative.last)
    assert_equal(ave_normal.first, ave_relative.first)

    # Absolute time shift
    ave = read_evokeds(fname, 0)
    ave.shift_time(-0.3, relative=False)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)

    ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)

    assert_allclose(ave_normal.data, ave_absolute.data, atol=1e-16, rtol=1e-3)
    assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
def meg_calc_evokeds():
    subj_files = glob.glob(out_dir + '*epochs*.fif')
    subj_files.sort()
    for n, fiff_file in enumerate(subj_files):

        subj_id = os.path.split(fiff_file)[1][:5]
        fname = out_dir + subj_id + 'cond-ave.fif'

        epo = mne.read_epochs(fiff_file)
        ev = [epo[x].average() for x in ['hap', 'sad', 'neu', 'rest']]
        new = ev[0].copy()
        new.comment = 'cond_av'
        new.data = np.mean([ev[0].data, ev[1].data, ev[2].data], axis=0)
        ev.append(new)
        mne.write_evokeds(fname, ev)
Beispiel #20
0
def run_evoked(subject, autoreject=True):
    if autoreject:
        epo_fname = op.join(meg_dir, subject,
                            f'{subject}_audvis-filt-sss-ar-epo.fif')
    else:
        epo_fname = op.join(meg_dir, subject,
                            f'{subject}_audvis-filt-sss-ar-epo.fif')
    evoked_fname = op.join(meg_dir, subject,
                           f'{subject}_audvis-filt-sss-ave.fif')
    epochs = mne.read_epochs(epo_fname, preload=True)

    # define evokeds
    evoked_al = epochs['Auditory/Left'].average()
    evoked_ar = epochs['Auditory/Right'].average()
    evoked_vl = epochs['Visual/Left'].average()
    evoked_vr = epochs['Visual/Right'].average()

    # define contrasts
    contrast_aud = mne.combine_evoked([evoked_al, -evoked_ar], 'equal')
    contrast_vis = mne.combine_evoked([evoked_vl, -evoked_vr], 'equal')

    # let's make trial-count-normalized ones for group statistics
    epochs_eq = epochs.copy().equalize_event_counts(
        ['Auditory/Left', 'Auditory/Right', 'Visual/Left', 'Visual/Right'])[0]
    evoked_al_eq = epochs_eq['Auditory/Left'].average()
    evoked_ar_eq = epochs_eq['Auditory/Right'].average()
    evoked_vl_eq = epochs_eq['Visual/Left'].average()
    evoked_vr_eq = epochs_eq['Visual/Right'].average()
    assert evoked_al_eq.nave == evoked_ar_eq.nave == evoked_vl_eq.nave
    assert evoked_al_eq.nave == evoked_vr_eq.nave

    # simplify comment
    evoked_al.comment = 'aud_left'
    evoked_ar.comment = 'aud_right'
    evoked_vl.comment = 'vis_left'
    evoked_vr.comment = 'vis_right'
    contrast_aud.comment = 'aud_left_minus_right'
    contrast_vis.comment = 'vis_left_minus_right'
    evoked_al_eq.comment = 'aud_left_eq'
    evoked_ar_eq.comment = 'aud_right_eq'
    evoked_vl_eq.comment = 'vis_left_eq'
    evoked_vr_eq.comment = 'vis_right_eq'

    mne.write_evokeds(evoked_fname, [
        evoked_al, evoked_ar, evoked_vl, evoked_vr, contrast_aud, contrast_vis,
        evoked_al_eq, evoked_ar_eq, evoked_vl_eq, evoked_vr_eq
    ])
    print(f'Created Evoked for {subject}')
def combine_evokes(subject, cond, sessions, filter_raw_data,
                   raw_data_filter_freqs):
    freqs_str = '-{}-{}'.format(
        raw_data_filter_freqs[0],
        raw_data_filter_freqs[1]) if filter_raw_data else ''
    combined_evoked_fname = op.join(MEG_DIR, subject,
                                    '{}{}-ave.fif'.format(cond, freqs_str))
    if not op.isfile(combined_evoked_fname):
        all_evokes = []
        for session in sessions:
            evo_fname = op.join(
                MEG_DIR, subject,
                '{}-session{}{}-ave.fif'.format(cond, session, freqs_str))
            evoked = mne.read_evokeds(evo_fname)[0]
            evoked.apply_baseline()
            all_evokes.append(evoked)
        combined_evoked = mne.combine_evoked(all_evokes, 'nave')
        combined_evoked.comment = cond
        mne.write_evokeds(combined_evoked_fname, combined_evoked)
Beispiel #22
0
def container_results(evoked, evoked_ave, donor, out_file, verbose):
    new_evoked = donor.copy()
    new_evoked.info = evoked.info
    new_evoked.nave = 98  #all
    new_evoked.kind = "average"
    new_evoked.times = evoked.times
    new_evoked.first = 0
    new_evoked.last = evoked.times.shape[0] - 1
    ev_data = np.asarray(evoked_ave)
    ev_data = ev_data[:, np.newaxis]
    if verbose:
        print('ev_data shape', ev_data.shape)
    #mean across runs
    ev_data = ev_data.mean(axis=0).mean(axis=0)
    if verbose:
        print('shape', ev_data.shape)
    new_evoked.data = ev_data
    if verbose:
        print(out_file)
    mne.write_evokeds(out_file, new_evoked)
Beispiel #23
0
def test_io_evoked():
    """Test IO for evoked data (fif + gz) with integer and str args"""
    tempdir = _TempDir()
    ave = read_evokeds(fname, 0)

    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
    ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]

    # This not being assert_array_equal due to windows rounding
    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
    assert_array_almost_equal(ave.times, ave2.times)
    assert_equal(ave.nave, ave2.nave)
    assert_equal(ave._aspect_kind, ave2._aspect_kind)
    assert_equal(ave.kind, ave2.kind)
    assert_equal(ave.last, ave2.last)
    assert_equal(ave.first, ave2.first)
    assert_true(repr(ave))

    # test compressed i/o
    ave2 = read_evokeds(fname_gz, 0)
    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))

    # test str access
    condition = 'Left Auditory'
    assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
    assert_raises(ValueError,
                  read_evokeds,
                  fname,
                  condition,
                  kind='standard_error')
    ave3 = read_evokeds(fname, condition)
    assert_array_almost_equal(ave.data, ave3.data, 19)

    # test read_evokeds and write_evokeds
    aves1 = read_evokeds(fname)[1::2]
    aves2 = read_evokeds(fname, [1, 3])
    aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
    aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
    for aves in [aves2, aves3, aves4]:
        for [av1, av2] in zip(aves1, aves):
            assert_array_almost_equal(av1.data, av2.data)
            assert_array_almost_equal(av1.times, av2.times)
            assert_equal(av1.nave, av2.nave)
            assert_equal(av1.kind, av2.kind)
            assert_equal(av1._aspect_kind, av2._aspect_kind)
            assert_equal(av1.last, av2.last)
            assert_equal(av1.first, av2.first)
            assert_equal(av1.comment, av2.comment)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        fname2 = op.join(tempdir, 'test-bad-name.fif')
        write_evokeds(fname2, ave)
        read_evokeds(fname2)
    assert_naming(w, 'test_evoked.py', 2)

    # constructor
    assert_raises(TypeError, Evoked, fname)
Beispiel #24
0
def test_io_evoked():
    """Test IO for evoked data (fif + gz) with integer and str args
    """
    tempdir = _TempDir()
    ave = read_evokeds(fname, 0)

    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
    ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]

    # This not being assert_array_equal due to windows rounding
    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
    assert_array_almost_equal(ave.times, ave2.times)
    assert_equal(ave.nave, ave2.nave)
    assert_equal(ave._aspect_kind, ave2._aspect_kind)
    assert_equal(ave.kind, ave2.kind)
    assert_equal(ave.last, ave2.last)
    assert_equal(ave.first, ave2.first)
    assert_true(repr(ave))

    # test compressed i/o
    ave2 = read_evokeds(fname_gz, 0)
    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))

    # test str access
    condition = 'Left Auditory'
    assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
    assert_raises(ValueError, read_evokeds, fname, condition,
                  kind='standard_error')
    ave3 = read_evokeds(fname, condition)
    assert_array_almost_equal(ave.data, ave3.data, 19)

    # test read_evokeds and write_evokeds
    aves1 = read_evokeds(fname)[1::2]
    aves2 = read_evokeds(fname, [1, 3])
    aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
    aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
    for aves in [aves2, aves3, aves4]:
        for [av1, av2] in zip(aves1, aves):
            assert_array_almost_equal(av1.data, av2.data)
            assert_array_almost_equal(av1.times, av2.times)
            assert_equal(av1.nave, av2.nave)
            assert_equal(av1.kind, av2.kind)
            assert_equal(av1._aspect_kind, av2._aspect_kind)
            assert_equal(av1.last, av2.last)
            assert_equal(av1.first, av2.first)
            assert_equal(av1.comment, av2.comment)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        fname2 = op.join(tempdir, 'test-bad-name.fif')
        write_evokeds(fname2, ave)
        read_evokeds(fname2)
    assert_naming(w, 'test_evoked.py', 2)

    # constructor
    assert_raises(TypeError, Evoked, fname)
Beispiel #25
0
def test_evoked_resample(tmp_path):
    """Test resampling evoked data."""
    tempdir = str(tmp_path)
    # upsample, write it out, read it in
    ave = read_evokeds(fname, 0)
    orig_lp = ave.info['lowpass']
    sfreq_normal = ave.info['sfreq']
    ave.resample(2 * sfreq_normal, npad=100)
    assert ave.info['lowpass'] == orig_lp
    fname_temp = op.join(tempdir, 'evoked-ave.fif')
    write_evokeds(fname_temp, ave)
    ave_up = read_evokeds(fname_temp, 0)

    # compare it to the original
    ave_normal = read_evokeds(fname, 0)

    # and compare the original to the downsampled upsampled version
    ave_new = read_evokeds(fname_temp, 0)
    ave_new.resample(sfreq_normal, npad=100)
    assert ave.info['lowpass'] == orig_lp

    assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
    assert_array_almost_equal(ave_normal.times, ave_new.times)
    assert_equal(ave_normal.nave, ave_new.nave)
    assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
    assert_equal(ave_normal.kind, ave_new.kind)
    assert_equal(ave_normal.last, ave_new.last)
    assert_equal(ave_normal.first, ave_new.first)

    # for the above to work, the upsampling just about had to, but
    # we'll add a couple extra checks anyway
    assert (len(ave_up.times) == 2 * len(ave_normal.times))
    assert (ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])

    ave_new.resample(50)
    assert ave_new.info['sfreq'] == 50.
    assert ave_new.info['lowpass'] == 25.
Beispiel #26
0
def plot_erp(file_path_erp):
    data_dict = dict()
    count = 0
    erp_files = _get_file_list(file_path_erp, 'sub', '-ave.fif')
    for sub_id in erp_files.keys():
        file_name = erp_files[sub_id]
        evoked_list = mne.read_evokeds(file_name)
        for event in evoked_list:
            comment = event.comment
            data = event.data
            norm_data = normalize(
                data, axis=1)  # better normalize here for a prettier plot
            if comment not in data_dict.keys():
                data_dict[comment] = norm_data
            else:
                data_dict[comment] = data_dict[comment] + norm_data
        count += 1

    for comment in data_dict.keys():
        data_dict[comment] = data_dict[comment] / (10000 * count)

    # FAKE ONE
    sub_id = list(erp_files.keys())[0]
    file_name = erp_files[sub_id]
    evoked_list = mne.read_evokeds(file_name)
    for event in evoked_list:
        comment = event.comment
        event.data = data_dict[comment]
        event.plot_joint(
            title=comment)  # maybe you don't want to plot right away

    # save the faked one
    file_name = file_path_erp + 'faked_summary-ave.fif'
    mne.write_evokeds(file_name, evoked_list)
    msg = '====finish faked_summary erp'
    print(msg)
Beispiel #27
0
def test_shift_time_evoked():
    """ Test for shifting of time scale
    """
    tempdir = _TempDir()
    # Shift backward
    ave = read_evokeds(fname, 0)
    ave.shift_time(-0.1, relative=True)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)

    # Shift forward twice the amount
    ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    ave_bshift.shift_time(0.2, relative=True)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)

    # Shift backward again
    ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    ave_fshift.shift_time(-0.1, relative=True)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)

    ave_normal = read_evokeds(fname, 0)
    ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)

    assert_true(np.allclose(ave_normal.data, ave_relative.data,
                            atol=1e-16, rtol=1e-3))
    assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)

    assert_equal(ave_normal.last, ave_relative.last)
    assert_equal(ave_normal.first, ave_relative.first)

    # Absolute time shift
    ave = read_evokeds(fname, 0)
    ave.shift_time(-0.3, relative=False)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)

    ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)

    assert_true(np.allclose(ave_normal.data, ave_absolute.data,
                            atol=1e-16, rtol=1e-3))
    assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
    def save_average(self):
        if self.is_mmn():
            deviant = self.epochs["Deviant"].average()
            dfile = self.average_output_path("deviant")
            logging.info(f"Saved evoked averages of deviant events to {dfile}")
            mne.write_evokeds(dfile, deviant)

            standard = self.epochs["Standard"].average()
            sfile = self.average_output_path("standard")
            mne.write_evokeds(sfile, standard)
            logging.info(
                f"Saved evoked averages of standard events to {sfile}")

        average = self.epochs.average()
        afile = self.average_output_path("all")
        mne.write_evokeds(afile, average)
        logging.info(f"Saved evoked averages to {afile}")
Beispiel #29
0
def test_dipole_fitting():
    """Test dipole fitting."""
    amp = 100e-9
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, 'test.dip')
    fname_sim = op.join(tempdir, 'test-ave.fif')
    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
                                   surf_ori=False,
                                   force_fixed=True,
                                   use_cps=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [
        np.sort(rng.permutation(s['vertno'])[:n_per_hemi]) for s in fwd['src']
    ]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    evoked = simulate_evoked(fwd,
                             stc,
                             evoked.info,
                             cov,
                             nave=evoked.nave,
                             random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(
        np.concatenate([
            pick_types(evoked.info, meg=True, eeg=False)[::2],
            pick_types(evoked.info, meg=False, eeg=True)[::2]
        ]))
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess([
        'mne_dipole_fit',
        '--meas',
        fname_sim,
        '--meg',
        '--eeg',
        '--noise',
        fname_cov,
        '--dip',
        fname_dtemp,
        '--mri',
        fname_fwd,
        '--reg',
        '0',
        '--tmin',
        '0',
    ])
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    with pytest.warns(RuntimeWarning, match='projection'):
        dip, residuals = fit_dipole(evoked, cov, sphere, fname_fwd)

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data**2, axis=0))
    resi_rms = np.sqrt(np.sum(residuals**2, axis=0))
    assert (data_rms > resi_rms * 0.95).all(), \
        '%s (factor: %s)' % ((data_rms / resi_rms).min(), 0.95)

    # Compare to original points
    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
    assert_equal(fwd['src'][0]['coord_frame'], FIFF.FIFFV_COORD_HEAD)
    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)
    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)

    # MNE-C skips the last "time" point :(
    out = dip.crop(dip_c.times[0], dip_c.times[-1])
    assert (dip is out)
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did about as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [
            180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori, axis=1)))
        ]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude)**2))]
        gofs += [np.mean(d.gof)]
    if os.getenv('TRAVIS', 'false').lower() == 'true' and \
            'OPENBLAS_NUM_THREADS' in os.environ:
        # XXX possibly some OpenBLAS numerical differences make
        # things slightly worse for us
        factor = 0.7
    else:
        factor = 0.8
    assert dists[0] / factor >= dists[1], 'dists: %s' % dists
    assert corrs[0] * factor <= corrs[1], 'corrs: %s' % corrs
    assert gc_dists[0] / factor >= gc_dists[1] * 0.8, \
        'gc-dists (ori): %s' % gc_dists
    assert amp_errs[0] / factor >= amp_errs[1],\
        'amplitude errors: %s' % amp_errs
    # This one is weird because our cov/sim/picking is weird
    assert gofs[0] * factor <= gofs[1] * 2, 'gof: %s' % gofs
Beispiel #30
0
vis_r = epochs["visual", "right"].average()

all_evokeds = [aud_l, aud_r, vis_l, vis_r]
print(all_evokeds)

###############################################################################
# This can be simplified with a Python list comprehension:
all_evokeds = [epochs[cond].average() for cond in sorted(event_id.keys())]
print(all_evokeds)

# Then, we construct and plot an unweighted average of left vs. right trials
# this way, too:
mne.combine_evoked(all_evokeds, weights=(0.25, -0.25, 0.25, -0.25)).plot_joint()

###############################################################################
# Often, it makes sense to store Evoked objects in a dictionary or a list -
# either different conditions, or different subjects.

# If they are stored in a list, they can be easily averaged, for example,
# for a grand average across subjects (or conditions).
grand_average = mne.grand_average(all_evokeds)
mne.write_evokeds("/tmp/tmp-ave.fif", all_evokeds)

# If Evokeds objects are stored in a dictionary, they can be retrieved by name.
all_evokeds = dict((cond, epochs[cond].average()) for cond in event_id)
print(all_evokeds["left/auditory"])

# Besides for explicit access, this can be used for example to set titles.
for cond in all_evokeds:
    all_evokeds[cond].plot_joint(title=cond)
Beispiel #31
0
def test_dipole_fitting():
    """Test dipole fitting."""
    amp = 10e-9
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, 'test.dip')
    fname_sim = op.join(tempdir, 'test-ave.fif')
    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
                                   surf_ori=False, force_fixed=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
                for s in fwd['src']]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    evoked = simulate_evoked(fwd, stc, evoked.info, cov, snr=20,
                             random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(np.concatenate([
        pick_types(evoked.info, meg=True, eeg=False)[::2],
        pick_types(evoked.info, meg=False, eeg=True)[::2]]))
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess([
        'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
        '--noise', fname_cov, '--dip', fname_dtemp,
        '--mri', fname_fwd, '--reg', '0', '--tmin', '0',
    ])
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
    resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
    factor = 1.
    # XXX weird, inexplicable differenc for 3.5 build we'll assume is due to
    # Anaconda bug for now...
    if os.getenv('TRAVIS', 'false') == 'true' and \
            sys.version[:3] in ('3.5', '2.7'):
        factor = 0.8
    assert_true((data_rms > factor * resi_rms).all(),
                msg='%s (factor: %s)' % ((data_rms / resi_rms).min(), factor))

    # Compare to original points
    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)
    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)

    # MNE-C skips the last "time" point :(
    dip.crop(dip_c.times[0], dip_c.times[-1])
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did at least as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
                                                     axis=1)))]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
        gofs += [np.mean(d.gof)]
    assert_true(dists[0] >= dists[1] * factor, 'dists: %s' % dists)
    assert_true(corrs[0] <= corrs[1] / factor, 'corrs: %s' % corrs)
    assert_true(gc_dists[0] >= gc_dists[1] * factor,
                'gc-dists (ori): %s' % gc_dists)
    assert_true(amp_errs[0] >= amp_errs[1] * factor,
                'amplitude errors: %s' % amp_errs)
    assert_true(gofs[0] <= gofs[1] / factor, 'gof: %s' % gofs)
all_evokeds = [aud_l, aud_r, vis_l, vis_r]
print(all_evokeds)

###############################################################################
# This can be simplified with a Python list comprehension:
all_evokeds = [epochs[cond].average() for cond in sorted(event_id.keys())]
print(all_evokeds)

# Then, we construct and plot an unweighted average of left vs. right trials
# this way, too:
mne.combine_evoked(all_evokeds,
                   weights=(0.25, -0.25, 0.25, -0.25)).plot_joint()

###############################################################################
# Often, it makes sense to store Evoked objects in a dictionary or a list -
# either different conditions, or different subjects.

# If they are stored in a list, they can be easily averaged, for example,
# for a grand average across subjects (or conditions).
grand_average = mne.grand_average(all_evokeds)
mne.write_evokeds('/tmp/tmp-ave.fif', all_evokeds)

# If Evokeds objects are stored in a dictionary, they can be retrieved by name.
all_evokeds = dict((cond, epochs[cond].average()) for cond in event_id)
print(all_evokeds['left/auditory'])

# Besides for explicit access, this can be used for example to set titles.
for cond in all_evokeds:
    all_evokeds[cond].plot_joint(title=cond)
Beispiel #33
0
def test_dipole_fitting():
    """Test dipole fitting"""
    amp = 10e-9
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, "test.dip")
    fname_sim = op.join(tempdir, "test-ave.fif")
    fwd = convert_forward_solution(read_forward_solution(fname_fwd), surf_ori=False, force_fixed=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [np.sort(rng.permutation(s["vertno"])[:n_per_hemi]) for s in fwd["src"]]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    evoked = simulate_evoked(fwd, stc, evoked.info, cov, snr=20, random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(
        np.concatenate(
            [pick_types(evoked.info, meg=True, eeg=False)[::2], pick_types(evoked.info, meg=False, eeg=True)[::2]]
        )
    )
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess(
        [
            "mne_dipole_fit",
            "--meas",
            fname_sim,
            "--meg",
            "--eeg",
            "--noise",
            fname_cov,
            "--dip",
            fname_dtemp,
            "--mri",
            fname_fwd,
            "--reg",
            "0",
            "--tmin",
            "0",
        ]
    )
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
    resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
    factor = 1.0
    # XXX weird, inexplicable differenc for 3.5 build we'll assume is due to
    # Anaconda bug for now...
    if os.getenv("TRAVIS", "false") == "true" and sys.version[:3] in ("3.5", "2.7"):
        factor = 0.8
    assert_true((data_rms > factor * resi_rms).all(), msg="%s (factor: %s)" % ((data_rms / resi_rms).min(), factor))

    # Compare to original points
    transform_surface_to(fwd["src"][0], "head", fwd["mri_head_t"])
    transform_surface_to(fwd["src"][1], "head", fwd["mri_head_t"])
    src_rr = np.concatenate([s["rr"][v] for s, v in zip(fwd["src"], vertices)], axis=0)
    src_nn = np.concatenate([s["nn"][v] for s, v in zip(fwd["src"], vertices)], axis=0)

    # MNE-C skips the last "time" point :(
    dip.crop(dip_c.times[0], dip_c.times[-1])
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did at least as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori, axis=1)))]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
        gofs += [np.mean(d.gof)]
    assert_true(dists[0] >= dists[1] * factor, "dists: %s" % dists)
    assert_true(corrs[0] <= corrs[1] / factor, "corrs: %s" % corrs)
    assert_true(gc_dists[0] >= gc_dists[1] * factor, "gc-dists (ori): %s" % gc_dists)
    assert_true(amp_errs[0] >= amp_errs[1] * factor, "amplitude errors: %s" % amp_errs)
    assert_true(gofs[0] <= gofs[1] / factor, "gof: %s" % gofs)
Beispiel #34
0
def test_io_evoked():
    """Test IO for evoked data (fif + gz) with integer and str args."""
    tempdir = _TempDir()
    ave = read_evokeds(fname, 0)

    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
    ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]

    # This not being assert_array_equal due to windows rounding
    assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
    assert_array_almost_equal(ave.times, ave2.times)
    assert_equal(ave.nave, ave2.nave)
    assert_equal(ave._aspect_kind, ave2._aspect_kind)
    assert_equal(ave.kind, ave2.kind)
    assert_equal(ave.last, ave2.last)
    assert_equal(ave.first, ave2.first)
    assert (repr(ave))

    # test compressed i/o
    ave2 = read_evokeds(fname_gz, 0)
    assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))

    # test str access
    condition = 'Left Auditory'
    pytest.raises(ValueError, read_evokeds, fname, condition, kind='stderr')
    pytest.raises(ValueError, read_evokeds, fname, condition,
                  kind='standard_error')
    ave3 = read_evokeds(fname, condition)
    assert_array_almost_equal(ave.data, ave3.data, 19)

    # test read_evokeds and write_evokeds
    aves1 = read_evokeds(fname)[1::2]
    aves2 = read_evokeds(fname, [1, 3])
    aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
    aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
    for aves in [aves2, aves3, aves4]:
        for [av1, av2] in zip(aves1, aves):
            assert_array_almost_equal(av1.data, av2.data)
            assert_array_almost_equal(av1.times, av2.times)
            assert_equal(av1.nave, av2.nave)
            assert_equal(av1.kind, av2.kind)
            assert_equal(av1._aspect_kind, av2._aspect_kind)
            assert_equal(av1.last, av2.last)
            assert_equal(av1.first, av2.first)
            assert_equal(av1.comment, av2.comment)

    # test warnings on bad filenames
    fname2 = op.join(tempdir, 'test-bad-name.fif')
    with pytest.warns(RuntimeWarning, match='-ave.fif'):
        write_evokeds(fname2, ave)
    with pytest.warns(RuntimeWarning, match='-ave.fif'):
        read_evokeds(fname2)

    # constructor
    pytest.raises(TypeError, Evoked, fname)

    # MaxShield
    fname_ms = op.join(tempdir, 'test-ave.fif')
    assert (ave.info['maxshield'] is False)
    ave.info['maxshield'] = True
    ave.save(fname_ms)
    pytest.raises(ValueError, read_evokeds, fname_ms)
    with pytest.warns(RuntimeWarning, match='Elekta'):
        aves = read_evokeds(fname_ms, allow_maxshield=True)
    assert all(ave.info['maxshield'] is True for ave in aves)
    aves = read_evokeds(fname_ms, allow_maxshield='yes')
    assert (all(ave.info['maxshield'] is True for ave in aves))
Beispiel #35
0
def test_io_evoked(tmpdir):
    """Test IO for evoked data (fif + gz) with integer and str args."""
    ave = read_evokeds(fname, 0)
    ave_double = ave.copy()
    ave_double.comment = ave.comment + ' doubled nave'
    ave_double.nave = ave.nave * 2

    write_evokeds(tmpdir.join('evoked-ave.fif'), [ave, ave_double])
    ave2, ave_double = read_evokeds(op.join(tmpdir, 'evoked-ave.fif'))
    assert ave2.nave * 2 == ave_double.nave

    # This not being assert_array_equal due to windows rounding
    assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
    assert_array_almost_equal(ave.times, ave2.times)
    assert_equal(ave.nave, ave2.nave)
    assert_equal(ave._aspect_kind, ave2._aspect_kind)
    assert_equal(ave.kind, ave2.kind)
    assert_equal(ave.last, ave2.last)
    assert_equal(ave.first, ave2.first)
    assert (repr(ave))

    # test compressed i/o
    ave2 = read_evokeds(fname_gz, 0)
    assert (np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))

    # test str access
    condition = 'Left Auditory'
    pytest.raises(ValueError, read_evokeds, fname, condition, kind='stderr')
    pytest.raises(ValueError,
                  read_evokeds,
                  fname,
                  condition,
                  kind='standard_error')
    ave3 = read_evokeds(fname, condition)
    assert_array_almost_equal(ave.data, ave3.data, 19)

    # test read_evokeds and write_evokeds
    aves1 = read_evokeds(fname)[1::2]
    aves2 = read_evokeds(fname, [1, 3])
    aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual'])
    write_evokeds(tmpdir.join('evoked-ave.fif'), aves1)
    aves4 = read_evokeds(tmpdir.join('evoked-ave.fif'))
    for aves in [aves2, aves3, aves4]:
        for [av1, av2] in zip(aves1, aves):
            assert_array_almost_equal(av1.data, av2.data)
            assert_array_almost_equal(av1.times, av2.times)
            assert_equal(av1.nave, av2.nave)
            assert_equal(av1.kind, av2.kind)
            assert_equal(av1._aspect_kind, av2._aspect_kind)
            assert_equal(av1.last, av2.last)
            assert_equal(av1.first, av2.first)
            assert_equal(av1.comment, av2.comment)

    # test saving and reading complex numbers in evokeds
    ave_complex = ave.copy()
    ave_complex._data = 1j * ave_complex.data
    fname_temp = str(tmpdir.join('complex-ave.fif'))
    ave_complex.save(fname_temp)
    ave_complex = read_evokeds(fname_temp)[0]
    assert_allclose(ave.data, ave_complex.data.imag)

    # test warnings on bad filenames
    fname2 = tmpdir.join('test-bad-name.fif')
    with pytest.warns(RuntimeWarning, match='-ave.fif'):
        write_evokeds(fname2, ave)
    with pytest.warns(RuntimeWarning, match='-ave.fif'):
        read_evokeds(fname2)

    # test writing when order of bads doesn't match
    fname3 = tmpdir.join('test-bad-order-ave.fif')
    condition = 'Left Auditory'
    ave4 = read_evokeds(fname, condition)
    ave4.info['bads'] = ave4.ch_names[:3]
    ave5 = ave4.copy()
    ave5.info['bads'] = ave4.info['bads'][::-1]
    write_evokeds(fname3, [ave4, ave5])

    # constructor
    pytest.raises(TypeError, Evoked, fname)

    # MaxShield
    fname_ms = tmpdir.join('test-ave.fif')
    assert (ave.info['maxshield'] is False)
    ave.info['maxshield'] = True
    ave.save(fname_ms)
    pytest.raises(ValueError, read_evokeds, fname_ms)
    with pytest.warns(RuntimeWarning, match='Elekta'):
        aves = read_evokeds(fname_ms, allow_maxshield=True)
    assert all(ave.info['maxshield'] is True for ave in aves)
    aves = read_evokeds(fname_ms, allow_maxshield='yes')
    assert (all(ave.info['maxshield'] is True for ave in aves))
Beispiel #36
0
old_data = epochs_tmp.get_data()
data = np.empty_like(old_data)
for ii, e in enumerate(old_data):
    data[ii] = np.dot(projector, e)
# got the threshold from Brainstorm (http://neuroimage.usc.edu/brainstorm/Tutorials/TutRawAvg, 2000 fT)
epochs_clean = mne.epochs.EpochsArray(data, info, epochs_tmp.events, reject=dict(mag=2e-12), event_id=epochs_tmp.event_id, tmin=np.min(epochs_tmp.times))

# and check how different the averages get
evokeds_clean = [epochs_clean[name].average() for name in conds[:3]]
title = 'Averaged data after EOG and bad trials removal'
mne.viz.plot_topo(evokeds_clean, color=colors, title=title)

# save evoked responses for all 5 conditions
print 'Saving evoked data...'
evokeds_clean = [epochs_clean[name].average() for name in conds]
mne.write_evokeds(evoked_dir + subj + '_stop_BP1-35_DS120-ave.fif', evokeds_clean)

# output number of averages
for ev in evokeds_clean:
    print ev.comment, ': %d averages'%ev.nave

# For the data tracker
print '=== Add to data tracker ==='
print 'Y\t%s\t%d\t%d\t%d'%(comps2use,evokeds_clean[1].nave,evokeds_clean[4].nave,evokeds_clean[0].nave) 
print '==============='

# now that we know which epochs to drop, copy the log to the actual epochs structure and drop them
bad_epochs = [i for i,j in enumerate(epochs_clean.drop_log) if len(j)>0]
epochs.drop_epochs(bad_epochs)
# save epochs with (optional) EOG projection
print 'Saving epochs with optional SSP operators...'
    if num in {88, 89, 92, 100}:
        continue
    for run in runs:
        raw = load_subject(num, run)
        fix_channels(raw)
        add_montage(raw)
        raw.pick_channels(ch_names)

        # Band-pass filter to capture the relevant signal (alpha, beta,
        # and mu ranges). Butterworth filter is implied by method='iir'
        # with iir_params=None or left out.

        raw.filter(7.0, 30.0, method='iir', n_jobs=n_cores)
        ica = ICA(n_components=0.95, random_state=random_state)
        ica.fit(raw, decim=3)
        ica.apply(raw)
        events = find_events(raw, consecutive=False)
        epochs = Epochs(raw,
                        events,
                        event_id,
                        tmin,
                        tmax,
                        baseline=baseline,
                        preload=True,
                        proj=False)
        evoked_avg = [epochs[cond].average() for cond in ['left_fist',
                                                          'right_fist']]
        filename = splitext(raw.info['filename'])[0]
        epochs.save(filename + '-epo.fif')
        write_evokeds(filename + '-ave.fif', evoked_avg)
Beispiel #38
0
        if saveEpochs:
            fname_epochs = fstem + '_' + condstem + '-epo.fif'
            epochs.save(respath + fname_epochs)

    # Now save overall onset N100
    epochs = mne.Epochs(raw,
                        eves,
                        event_id=condlists,
                        tmin=-0.4,
                        proj=True,
                        tmax=1.0,
                        baseline=(-0.2, 0.0),
                        reject=dict(grad=5000e-13, mag=5e-12))
    evokeds += [
        epochs.average(),
    ]
    if saveEpochs:
        fname_epochs = fstem + '_onset-epo.fif'
        epochs.save(respath + fname_epochs)

    if saveAve:
        avename = subj + ssstag + '_' + para + '_collapse-ave.fif'
        mne.write_evokeds(respath + avename, evokeds)

    if saveCov:
        # Compute covatiance
        cov = compute_covariance(epochs, tmin=-0.2, tmax=0.0)
        covname = subj + ssstag + '_' + para + '_collapse-cov.fif'
        cov.save(respath + covname)
else:
    subj = 'ABUTRIKQ'

# open the matched BP1-35Hz file and borrow its event labels
epochs_fname = epochs_dir + subj + '_stop_parsed_matched_BP1-35_DS120-epo.fif.gz'
epochs35 = mne.read_epochs(epochs_fname, proj=True)
epochs_fname = epochs_dir + subj + '_stop_parsed_BP1-100_DS300-epo.fif.gz'
epochs = mne.read_epochs(epochs_fname, proj=True)
# we need to drop events that were not matched
if len(epochs35.events) < len(epochs.events):
    drop_me = np.arange(len(epochs.events))
    drop_me[drop_me < len(epochs35.events)] = 0
    drop_me = np.nonzero(drop_me)[0]
    epochs.drop_epochs(drop_me)
epochs.event_id = epochs35.event_id
epochs.events = epochs35.events

# open the cleaned epochs file and grab its SSP operators and clean log
epochs_fname = epochs_dir + subj + '_stop_parsed_matched_clean_BP1-35_DS120-epo.fif.gz'
epochs35 = mne.read_epochs(epochs_fname, proj=True)
bad_epochs = [i for i, j in enumerate(epochs35.drop_log) if len(j) > 0]
epochs.drop_epochs(bad_epochs)
epochs.info['projs'] = epochs35.info['projs']

# make averaged file and save final result
print 'Saving epochs and evoked data with optional SSP operators...'
evokeds = [epochs[name].average() for name in conds]
mne.write_evokeds(evoked_dir + subj + '_stop_parsed_matched_BP1-100_DS300-ave.fif', evokeds)
new_fname = epochs_dir + subj + '_stop_parsed_matched_clean_BP1-100_DS300-epo.fif.gz'
epochs.save(new_fname)
Beispiel #40
0
def test_io_evoked():
    """Test IO for evoked data (fif + gz) with integer and str args
    """
    ave = read_evokeds(fname, 0)

    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
    ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]

    # This not being assert_array_equal due to windows rounding
    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
    assert_array_almost_equal(ave.times, ave2.times)
    assert_equal(ave.nave, ave2.nave)
    assert_equal(ave._aspect_kind, ave2._aspect_kind)
    assert_equal(ave.kind, ave2.kind)
    assert_equal(ave.last, ave2.last)
    assert_equal(ave.first, ave2.first)

    # test compressed i/o
    ave2 = read_evokeds(fname_gz, 0)
    assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))

    # test str access
    condition = 'Left Auditory'
    assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
    assert_raises(ValueError, read_evokeds, fname, condition,
                  kind='standard_error')
    ave3 = read_evokeds(fname, condition)
    assert_array_almost_equal(ave.data, ave3.data, 19)

    # test deprecation warning for read_evoked and write_evoked
    # XXX should be deleted for 0.9 release
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ave = read_evoked(fname, setno=0)
        assert_true(w[0].category == DeprecationWarning)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        write_evoked(op.join(tempdir, 'evoked-ave.fif'), ave)
        assert_true(w[0].category == DeprecationWarning)

    # test read_evokeds and write_evokeds
    types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
    aves1 = read_evokeds(fname)
    aves2 = read_evokeds(fname, [0, 1, 2, 3])
    aves3 = read_evokeds(fname, types)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
    aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
    for aves in [aves2, aves3, aves4]:
        for [av1, av2] in zip(aves1, aves):
            assert_array_almost_equal(av1.data, av2.data)
            assert_array_almost_equal(av1.times, av2.times)
            assert_equal(av1.nave, av2.nave)
            assert_equal(av1.kind, av2.kind)
            assert_equal(av1._aspect_kind, av2._aspect_kind)
            assert_equal(av1.last, av2.last)
            assert_equal(av1.first, av2.first)
            assert_equal(av1.comment, av2.comment)

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        fname2 = op.join(tempdir, 'test-bad-name.fif')
        write_evokeds(fname2, ave)
        read_evokeds(fname2)
    assert_true(len(w) == 2)
        n_ok = len(eps_ok)
        n_dropped = len(drop_log) - n_ok
        print('%d epochs ok, %d epochs rejected' % (n_ok, n_dropped))
        if n_ok == 0:
            print('Skipping category')
            continue
        print('Computing weighted average...')
        w_snr_ok = w_snr[eps_ok]
        weigh_epochs(data_epochs, w_snr_ok)
        evoked = data_epochs.average()
        evoked.comment = cat['comment']
        evokeds.append(evoked)

        if args.plot_snr and eps_ok:
            plt.figure()
            plt.plot(eps_ok, w_snr_ok)
            plt.title('Per-epoch SNR for category: ' + cat['comment'])
            plt.xlabel('Epoch n (good epochs only)')
            plt.ylabel('Epoch weight (unnormalized)')
            figs = True

    """ Write all resulting evoked objects to a fiff file. """
    if evokeds:
        fn = fnbase + '_chpi_weighted-ave.fif'
        print('Saving', fn)
        mne.write_evokeds(fn, evokeds)

    if args.plot_snr and figs:
        plt.show()

Beispiel #42
0
def save_epochs(p, subjects, in_names, in_numbers, analyses, out_names,
                out_numbers, must_match, decim, run_indices):
    """Generate epochs from raw data based on events

    Can only complete after preprocessing is complete.

    Parameters
    ----------
    p : instance of Parameters
        Analysis parameters.
    subjects : list of str
        Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
    in_names : list of str
        Names of input events.
    in_numbers : list of list of int
        Event numbers (in scored event files) associated with each name.
    analyses : list of str
        Lists of analyses of interest.
    out_names : list of list of str
        Event types to make out of old ones.
    out_numbers : list of list of int
        Event numbers to convert to (e.g., [[1, 1, 2, 3, 3], ...] would create
        three event types, where the first two and last two event types from
        the original list get collapsed over).
    must_match : list of int
        Indices from the original in_names that must match in event counts
        before collapsing. Should eventually be expanded to allow for
        ratio-based collapsing.
    decim : int | list of int
        Amount to decimate.
    run_indices : array-like | None
        Run indices to include.
    """
    in_names = np.asanyarray(in_names)
    old_dict = dict()
    for n, e in zip(in_names, in_numbers):
        old_dict[n] = e

    # let's do some sanity checks
    if len(in_names) != len(in_numbers):
        raise RuntimeError('in_names (%d) must have same length as '
                           'in_numbers (%d)' %
                           (len(in_names), len(in_numbers)))
    if np.any(np.array(in_numbers) <= 0):
        raise ValueError('in_numbers must all be > 0')
    if len(out_names) != len(out_numbers):
        raise RuntimeError('out_names must have same length as out_numbers')
    for name, num in zip(out_names, out_numbers):
        num = np.array(num)
        if len(name) != len(np.unique(num[num > 0])):
            raise RuntimeError('each entry in out_names must have length '
                               'equal to the number of unique elements in the '
                               'corresponding entry in out_numbers:\n%s\n%s' %
                               (name, np.unique(num[num > 0])))
        if len(num) != len(in_names):
            raise RuntimeError('each entry in out_numbers must have the same '
                               'length as in_names')
        if (np.array(num) == 0).any():
            raise ValueError('no element of out_numbers can be zero')

    ch_namess = list()
    drop_logs = list()
    sfreqs = set()
    for si, subj in enumerate(subjects):
        if p.disp_files:
            print('  Loading raw files for subject %s.' % subj)
        epochs_dir = op.join(p.work_dir, subj, p.epochs_dir)
        if not op.isdir(epochs_dir):
            os.mkdir(epochs_dir)
        evoked_dir = op.join(p.work_dir, subj, p.inverse_dir)
        if not op.isdir(evoked_dir):
            os.mkdir(evoked_dir)
        # read in raw files
        raw_names = get_raw_fnames(p, subj, 'pca', False, False,
                                   run_indices[si])
        first_samps = []
        last_samps = []
        for raw_fname in raw_names:
            raw = read_raw_fif(raw_fname, preload=False)
            first_samps.append(raw._first_samps[0])
            last_samps.append(raw._last_samps[-1])
        raw = [read_raw_fif(fname, preload=False) for fname in raw_names]
        _fix_raw_eog_cals(raw)  # EOG epoch scales might be bad!
        raw = concatenate_raws(raw)
        # read in events
        events = _read_events(p, subj, run_indices[si], raw)
        this_decim = _handle_decim(decim[si], raw.info['sfreq'])
        new_sfreq = raw.info['sfreq'] / this_decim
        if p.disp_files:
            print('    Epoching data (decim=%s -> sfreq=%0.1f Hz).' %
                  (this_decim, new_sfreq))
        if new_sfreq not in sfreqs:
            if len(sfreqs) > 0:
                warnings.warn('resulting new sampling frequency %s not equal '
                              'to previous values %s' % (new_sfreq, sfreqs))
            sfreqs.add(new_sfreq)
        epochs_fnames, evoked_fnames = get_epochs_evokeds_fnames(
            p, subj, analyses)
        mat_file, fif_file = epochs_fnames
        if p.autoreject_thresholds:
            assert len(p.autoreject_types) > 0
            assert all(a in ('mag', 'grad', 'eeg', 'ecg', 'eog')
                       for a in p.autoreject_types)
            from autoreject import get_rejection_threshold
            print('    Computing autoreject thresholds', end='')
            rtmin = p.reject_tmin if p.reject_tmin is not None else p.tmin
            rtmax = p.reject_tmax if p.reject_tmax is not None else p.tmax
            temp_epochs = Epochs(raw,
                                 events,
                                 event_id=None,
                                 tmin=rtmin,
                                 tmax=rtmax,
                                 baseline=_get_baseline(p),
                                 proj=True,
                                 reject=None,
                                 flat=None,
                                 preload=True,
                                 decim=this_decim,
                                 reject_by_annotation=p.reject_epochs_by_annot)
            kwargs = dict()
            if 'verbose' in get_args(get_rejection_threshold):
                kwargs['verbose'] = False
            new_dict = get_rejection_threshold(temp_epochs, **kwargs)
            use_reject = dict()
            msgs = list()
            for k in p.autoreject_types:
                msgs.append('%s=%d %s' % (k, DEFAULTS['scalings'][k] *
                                          new_dict[k], DEFAULTS['units'][k]))
                use_reject[k] = new_dict[k]
            print(': ' + ', '.join(msgs))
            hdf5_file = fif_file.replace('-epo.fif', '-reject.h5')
            assert hdf5_file.endswith('.h5')
            write_hdf5(hdf5_file, use_reject, overwrite=True)
        else:
            use_reject = _handle_dict(p.reject, subj)
        # create epochs
        flat = _handle_dict(p.flat, subj)
        use_reject, use_flat = _restrict_reject_flat(use_reject, flat, raw)
        epochs = Epochs(raw,
                        events,
                        event_id=old_dict,
                        tmin=p.tmin,
                        tmax=p.tmax,
                        baseline=_get_baseline(p),
                        reject=use_reject,
                        flat=use_flat,
                        proj=p.epochs_proj,
                        preload=True,
                        decim=this_decim,
                        on_missing=p.on_missing,
                        reject_tmin=p.reject_tmin,
                        reject_tmax=p.reject_tmax,
                        reject_by_annotation=p.reject_epochs_by_annot)
        del raw
        if epochs.events.shape[0] < 1:
            epochs.plot_drop_log()
            raise ValueError('No valid epochs')
        drop_logs.append(epochs.drop_log)
        ch_namess.append(epochs.ch_names)
        # only kept trials that were not dropped
        sfreq = epochs.info['sfreq']
        # now deal with conditions to save evoked
        if p.disp_files:
            print('    Matching trial counts and saving data to disk.')
        for var, name in ((out_names, 'out_names'), (out_numbers,
                                                     'out_numbers'),
                          (must_match, 'must_match'), (evoked_fnames,
                                                       'evoked_fnames')):
            if len(var) != len(analyses):
                raise ValueError('len(%s) (%s) != len(analyses) (%s)' %
                                 (name, len(var), len(analyses)))
        for analysis, names, numbers, match, fn in zip(analyses, out_names,
                                                       out_numbers, must_match,
                                                       evoked_fnames):
            # do matching
            numbers = np.asanyarray(numbers)
            nn = numbers[numbers >= 0]
            new_numbers = []
            for num in numbers:
                if num > 0 and num not in new_numbers:
                    # Eventually we could relax this requirement, but not
                    # having it in place is likely to cause people pain...
                    if any(num < n for n in new_numbers):
                        raise RuntimeError('each list of new_numbers must be '
                                           ' monotonically increasing')
                    new_numbers.append(num)
            new_numbers = np.array(new_numbers)
            in_names_match = in_names[match]
            # use some variables to allow safe name re-use
            offset = max(epochs.events[:, 2].max(), new_numbers.max()) + 1
            safety_str = '__mnefun_copy__'
            assert len(new_numbers) == len(names)  # checked above
            if p.match_fun is None:
                # first, equalize trial counts (this will make a copy)
                e = epochs[list(in_names[numbers > 0])]
                if len(in_names_match) > 1:
                    e.equalize_event_counts(in_names_match)

                # second, collapse relevant types
                for num, name in zip(new_numbers, names):
                    collapse = [
                        x for x in in_names[num == numbers] if x in e.event_id
                    ]
                    combine_event_ids(e,
                                      collapse,
                                      {name + safety_str: num + offset},
                                      copy=False)
                for num, name in zip(new_numbers, names):
                    e.events[e.events[:, 2] == num + offset, 2] -= offset
                    e.event_id[name] = num
                    del e.event_id[name + safety_str]
            else:  # custom matching
                e = p.match_fun(epochs.copy(), analysis, nn, in_names_match,
                                names)

            # now make evoked for each out type
            evokeds = list()
            n_standard = 0
            kinds = ['standard']
            if p.every_other:
                kinds += ['even', 'odd']
            for kind in kinds:
                for name in names:
                    this_e = e[name]
                    if kind == 'even':
                        this_e = this_e[::2]
                    elif kind == 'odd':
                        this_e = this_e[1::2]
                    else:
                        assert kind == 'standard'
                    if len(this_e) > 0:
                        ave = this_e.average(picks='all')
                        stde = this_e.standard_error(picks='all')
                        if kind != 'standard':
                            ave.comment += ' %s' % (kind, )
                            stde.comment += ' %s' % (kind, )
                        evokeds.append(ave)
                        evokeds.append(stde)
                        if kind == 'standard':
                            n_standard += 2
            write_evokeds(fn, evokeds)
            naves = [
                str(n) for n in sorted(
                    set([evoked.nave for evoked in evokeds[:n_standard]]))
            ]
            naves = ', '.join(naves)
            if p.disp_files:
                print('      Analysis "%s": %s epochs / condition' %
                      (analysis, naves))

        if p.disp_files:
            print('    Saving epochs to disk.')
        if 'mat' in p.epochs_type:
            spio.savemat(mat_file,
                         dict(epochs=epochs.get_data(),
                              events=epochs.events,
                              sfreq=sfreq,
                              drop_log=epochs.drop_log),
                         do_compression=True,
                         oned_as='column')
        if 'fif' in p.epochs_type:
            epochs.save(fif_file, **_get_epo_kwargs())

    if p.plot_drop_logs:
        for subj, drop_log in zip(subjects, drop_logs):
            plot_drop_log(drop_log, threshold=p.drop_thresh, subject=subj)
    subj, cnt, len(behavior), len(event_order))

# saving changes to Epoch
epochs.event_id = new_event_ids
epochs.events = new_events

print 'Downsampling %s...' % subj
epochs.resample(300)

# let's save a version without dropping based on SSPs
red_epochs = epochs.copy()
no_interest = np.nonzero(red_epochs.events[:, 2] < 13)[0]
red_epochs.drop_epochs(no_interest)
print 'Saving epochs and evoked data...'
evokeds = [red_epochs[name].average() for name in ['STI-correct', 'STI-incorrect']]
mne.write_evokeds(dir_out + subj + '_stop_parsed_matched_BP1-100_DS300-ave.fif', evokeds)
new_fname = dir_out + subj + '_stop_parsed_matched_BP1-100_DS300-epo.fif.gz'
red_epochs.save(new_fname)

# now we save a version after cleaning with SSPs
# grab SSP vectors from previous cleanup sessions
epochs_fname = clean_dir + subj + '_stop_parsed_matched_clean_BP1-35_DS120-epo.fif.gz'
epochs35 = mne.read_epochs(epochs_fname, proj=True)
bad_epochs = [i for i, j in enumerate(epochs35.drop_log) if len(j) > 0]
epochs.drop_epochs(bad_epochs)
epochs.info['projs'] = epochs35.info['projs']

# removing the epochs we don't want, need to do it again because indices
# changed after removing bad epochs based on SSP
no_interest = np.nonzero(epochs.events[:, 2] < 13)[0]
epochs.drop_epochs(no_interest)
                    picks=picks,
                    baseline=(None, 0),
                    reject=dict(eeg=80e-6, eog=150e-6),
                    preload=True)

epochs.plot()

# Look at channels that caused dropped events, showing that the subject's
# blinks were likely to blame for most epochs being dropped
epochs.drop_bad_epochs()
epochs.plot_drop_log(subject='sample')

# Average epochs and get evoked data corresponding to the left stimulation
evoked = epochs['Left'].average()

evoked.save('sample_audvis_eeg-ave.fif')  # save evoked data to disk

###############################################################################
# View evoked response

evoked.plot()

###############################################################################
# Save evoked responses for different conditions to disk

# average epochs and get Evoked datasets
evokeds = [epochs[cond].average() for cond in ['Left', 'Right']]

# save evoked data to disk
mne.write_evokeds('sample_auditory_and_visual_eeg-ave.fif', evokeds)
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
                       include=include, exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=dict(eeg=80e-6, eog=150e-6))
# Let's equalize the trial counts in each condition
epochs.equalize_event_counts(['AudL', 'AudR', 'VisL', 'VisR'], copy=False)
# Now let's combine some conditions
combine_event_ids(epochs, ['AudL', 'AudR'], {'Auditory': 12}, copy=False)
combine_event_ids(epochs, ['VisL', 'VisR'], {'Visual': 34}, copy=False)

# average epochs and get Evoked datasets
evokeds = [epochs[cond].average() for cond in ['Auditory', 'Visual']]

# save evoked data to disk
mne.write_evokeds('sample_auditory_and_visual_eeg-ave.fif', evokeds)

###############################################################################
# View evoked response
import matplotlib.pyplot as plt
plt.clf()
ax = plt.subplot(2, 1, 1)
evokeds[0].plot(axes=ax)
plt.title('EEG evoked potential, auditory trials')
plt.ylabel('Potential (uV)')
ax = plt.subplot(2, 1, 2)
evokeds[1].plot(axes=ax)
plt.title('EEG evoked potential, visual trials')
plt.ylabel('Potential (uV)')
plt.show()
def test_dipole_fitting():
    """Test dipole fitting"""
    amp = 10e-9
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, 'test.dip')
    fname_sim = op.join(tempdir, 'test-ave.fif')
    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
                                   surf_ori=False, force_fixed=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
                for s in fwd['src']]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    with warnings.catch_warnings(record=True):  # semi-def cov
        evoked = generate_evoked(fwd, stc, evoked, cov, snr=20,
                                 random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(np.concatenate([
        pick_types(evoked.info, meg=True, eeg=False)[::2],
        pick_types(evoked.info, meg=False, eeg=True)[::2]]))
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess([
        'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
        '--noise', fname_cov, '--dip', fname_dtemp,
        '--mri', fname_fwd, '--reg', '0', '--tmin', '0',
    ])
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
    resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
    assert_true((data_rms > resi_rms).all())

    # Compare to original points
    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)
    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)

    # MNE-C skips the last "time" point :(
    dip.crop(dip_c.times[0], dip_c.times[-1])
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did at least as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
                                                     axis=1)))]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
        gofs += [np.mean(d.gof)]
    assert_true(dists[0] >= dists[1], 'dists: %s' % dists)
    assert_true(corrs[0] <= corrs[1], 'corrs: %s' % corrs)
    assert_true(gc_dists[0] >= gc_dists[1], 'gc-dists (ori): %s' % gc_dists)
    assert_true(amp_errs[0] >= amp_errs[1], 'amplitude errors: %s' % amp_errs)
Beispiel #47
0
            print('  PSDs')
            psds = np.abs(
                np.fft.rfft(epochs.get_data()[:, picks], axis=-1, n=n_fft))
            freqs = np.fft.rfftfreq(n_fft, 1. / epochs.info['sfreq'])
            # psds, freqs = mne.time_frequency.psd_multitaper(
            #     epochs, fmin, fmax, picks=picks, n_jobs=12)
            info = mne.pick_info(epochs.info, picks)
            info['sfreq'] = 1. / np.diff(freqs[:2])[0]
            psds = np.mean(psds, axis=0)  # average of an average is okay here
            evoked = mne.EvokedArray(psds,
                                     info,
                                     tmin=freqs[0],
                                     nave=len(epochs),
                                     comment=kind)
            evokeds.append(evoked)
        mne.write_evokeds(op.join(psd_dir, subject + '_psd-ave.fif'), evokeds)

flim = [1, 50]
fnames = sorted(glob.glob(op.join(psd_dir, 'f2f_*_OTP_psd-ave.fif')))
evokeds = list()
for kind in kinds:
    evoked = [mne.read_evokeds(fname, kind) for fname in fnames]
    use_names = set(evoked[0].ch_names)
    for e in evoked[1:]:
        use_names = use_names & set(e.ch_names)
    use_names = list(use_names)
    for e in evoked:
        e.pick_channels(use_names)
    evoked = mne.combine_evoked(evoked, 'nave')
    evoked.crop(*flim)
    evoked.comment = kind.capitalize()
Beispiel #48
0
def test_shift_time_evoked():
    """Test for shifting of time scale."""
    tempdir = _TempDir()
    # Shift backward
    ave = read_evokeds(fname, 0).shift_time(-0.1, relative=True)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)

    # Shift forward twice the amount
    ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    ave_bshift.shift_time(0.2, relative=True)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)

    # Shift backward again
    ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    ave_fshift.shift_time(-0.1, relative=True)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)

    ave_normal = read_evokeds(fname, 0)
    ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)

    assert_allclose(ave_normal.data, ave_relative.data, atol=1e-16, rtol=1e-3)
    assert_array_almost_equal(ave_normal.times, ave_relative.times, 8)

    assert_equal(ave_normal.last, ave_relative.last)
    assert_equal(ave_normal.first, ave_relative.first)

    # Absolute time shift
    ave = read_evokeds(fname, 0)
    ave.shift_time(-0.3, relative=False)
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)

    ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)

    assert_allclose(ave_normal.data, ave_absolute.data, atol=1e-16, rtol=1e-3)
    assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))

    # subsample shift
    shift = 1e-6  # 1 µs, should be well below 1/sfreq
    ave = read_evokeds(fname, 0)
    times = ave.times
    ave.shift_time(shift)
    assert_allclose(times + shift, ave.times, atol=1e-16, rtol=1e-12)

    # test handling of Evoked.first, Evoked.last
    ave = read_evokeds(fname, 0)
    first_last = np.array([ave.first, ave.last])
    # should shift by 0 samples
    ave.shift_time(1e-6)
    assert_array_equal(first_last, np.array([ave.first, ave.last]))
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
    ave_loaded = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    assert_array_almost_equal(ave.times, ave_loaded.times, 8)
    # should shift by 57 samples
    ave.shift_time(57. / ave.info['sfreq'])
    assert_array_equal(first_last + 57, np.array([ave.first, ave.last]))
    write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
    ave_loaded = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
    assert_array_almost_equal(ave.times, ave_loaded.times, 8)
Beispiel #49
0
all_evokeds = [aud_l, aud_r, vis_l, vis_r]
print(all_evokeds)

###############################################################################
# This can be simplified with a Python list comprehension:
all_evokeds = [epochs[cond].average() for cond in sorted(event_id.keys())]
print(all_evokeds)

# Then, we construct and plot an unweighted average of left vs. right trials
# this way, too:
mne.combine_evoked(
    all_evokeds, weights=(0.25, -0.25, 0.25, -0.25)).plot_joint(**joint_kwargs)

###############################################################################
# Often, it makes sense to store Evoked objects in a dictionary or a list -
# either different conditions, or different subjects.

# If they are stored in a list, they can be easily averaged, for example,
# for a grand average across subjects (or conditions).
grand_average = mne.grand_average(all_evokeds)
mne.write_evokeds('/tmp/tmp-ave.fif', all_evokeds)

# If Evokeds objects are stored in a dictionary, they can be retrieved by name.
all_evokeds = dict((cond, epochs[cond].average()) for cond in event_id)
print(all_evokeds['left/auditory'])

# Besides for explicit access, this can be used for example to set titles.
for cond in all_evokeds:
    all_evokeds[cond].plot_joint(title=cond, **joint_kwargs)
#   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = []  # or stim channels ['STI 014']
# raw.info['bads'] += ['EEG 053']  # bads + 1 more

# pick EEG and MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
                       include=include, exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=reject,
                    preload=True)

# Plot epochs.
epochs.plot(trellis=False, title='Auditory left/right')

# Look at channels that caused dropped events, showing that the subject's
# blinks were likely to blame for most epochs being dropped
epochs.drop_bad_epochs()
epochs.plot_drop_log(subject='sample')

# Average epochs and get evoked data corresponding to the left stimulation
###############################################################################
# Save evoked responses for different conditions to disk

# average epochs and get Evoked datasets
evokeds = [epochs[cond].average() for cond in ['ent_left', 'ent_right',
                                               'ctl_left', 'ctl_right']]

# save evoked data to disk
mne.write_evokeds('0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif', evokeds)
Beispiel #51
0
def test_dipole_fitting():
    """Test dipole fitting."""
    amp = 100e-9
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, 'test.dip')
    fname_sim = op.join(tempdir, 'test-ave.fif')
    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
                                   surf_ori=False, force_fixed=True,
                                   use_cps=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
                for s in fwd['src']]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    evoked = simulate_evoked(fwd, stc, evoked.info, cov, nave=evoked.nave,
                             random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(np.concatenate([
        pick_types(evoked.info, meg=True, eeg=False)[::2],
        pick_types(evoked.info, meg=False, eeg=True)[::2]]))
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess([
        'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
        '--noise', fname_cov, '--dip', fname_dtemp,
        '--mri', fname_fwd, '--reg', '0', '--tmin', '0',
    ])
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    with pytest.warns(RuntimeWarning, match='projection'):
        dip, residual = fit_dipole(evoked, cov, sphere, fname_fwd)
    assert isinstance(residual, Evoked)

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
    resi_rms = np.sqrt(np.sum(residual.data ** 2, axis=0))
    assert (data_rms > resi_rms * 0.95).all(), \
        '%s (factor: %s)' % ((data_rms / resi_rms).min(), 0.95)

    # Compare to original points
    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
    assert_equal(fwd['src'][0]['coord_frame'], FIFF.FIFFV_COORD_HEAD)
    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)
    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)

    # MNE-C skips the last "time" point :(
    out = dip.crop(dip_c.times[0], dip_c.times[-1])
    assert (dip is out)
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did about as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
                                                     axis=1)))]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
        gofs += [np.mean(d.gof)]
    # XXX possibly some OpenBLAS numerical differences make
    # things slightly worse for us
    factor = 0.7
    assert dists[0] / factor >= dists[1], 'dists: %s' % dists
    assert corrs[0] * factor <= corrs[1], 'corrs: %s' % corrs
    assert gc_dists[0] / factor >= gc_dists[1] * 0.8, \
        'gc-dists (ori): %s' % gc_dists
    assert amp_errs[0] / factor >= amp_errs[1],\
        'amplitude errors: %s' % amp_errs
    # This one is weird because our cov/sim/picking is weird
    assert gofs[0] * factor <= gofs[1] * 2, 'gof: %s' % gofs