Exemplo n.º 1
0
def process_hcp_runs():

    for run_index in range(3):
        raw = hcp.read_raw(subject=subject,
                           data_type='rest',
                           hcp_path=data_folder,
                           run_index=run_index)

        # duration in seconds
        duration = int(raw.n_times / raw.info['sfreq'])

        start: float = 0.0
        for end in range(epoch_size, min(duration, 400), epoch_size):
            raw.crop(tmin=start, tmax=end).load_data()
            features.append(spectral_features(raw))
            labels.append("{}-{}-{}".format(subject, run_index,
                                            int(end / epoch_size)))

            start = end
            # re-read the signal - faster than copying in-memory version
            raw = hcp.read_raw(subject=subject,
                               data_type='rest',
                               hcp_path=data_folder,
                               run_index=run_index)
            # progress...
            print('.', end='', flush=True)
        print('|', end='', flush=True)
    return labels, features
Exemplo n.º 2
0
def compute_noise_cov(subject, hcp_path, noise_cov_fname=''):
    if noise_cov_fname == '':
        noise_cov_fname = meg.NOISE_COV.format(cond='empty_room')
    if op.isfile(noise_cov_fname):
        noise_cov = mne.read_cov(noise_cov_fname)
        return noise_cov
    utils.make_dir(utils.get_parent_fol(noise_cov_fname))

    raw_noise = hcp.read_raw(subject=subject,
                             hcp_path=hcp_path,
                             data_type='noise_empty_room')
    raw_noise.load_data()
    # apply ref channel correction and drop ref channels
    preproc.apply_ref_correction(raw_noise)
    raw_noise.filter(0.50,
                     None,
                     method='iir',
                     iir_params=dict(order=4, ftype='butter'),
                     n_jobs=1)
    raw_noise.filter(None,
                     60,
                     method='iir',
                     iir_params=dict(order=4, ftype='butter'),
                     n_jobs=1)
    ##############################################################################
    # Note that using the empty room noise covariance will inflate the SNR of the
    # evkoked and renders comparisons  to `baseline` rather uninformative.
    noise_cov = mne.compute_raw_covariance(raw_noise, method='empirical')
    noise_cov.save(noise_cov_fname)
    return noise_cov
Exemplo n.º 3
0
def read_raw_data(run_index, hcp_params, lfreq=0.5, hfreq=60):
    raw = hcp.read_raw(run_index=run_index, **hcp_params)
    raw.load_data()
    # apply ref channel correction and drop ref channels
    # preproc.apply_ref_correction(raw)

    annots = hcp.read_annot(run_index=run_index, **hcp_params)
    # construct MNE annotations
    bad_seg = (annots['segments']['all']) / raw.info['sfreq']
    annotations = mne.Annotations(
        bad_seg[:, 0], (bad_seg[:, 1] - bad_seg[:, 0]),
        description='bad')

    raw.annotations = annotations
    raw.info['bads'].extend(annots['channels']['all'])
    raw.pick_types(meg=True, ref_meg=False)

    raw.filter(lfreq, None, method='iir',
               iir_params=dict(order=4, ftype='butter'), n_jobs=1)
    raw.filter(None, hfreq, method='iir',
               iir_params=dict(order=4, ftype='butter'), n_jobs=1)

    # read ICA and remove EOG ECG
    # note that the HCP ICA assumes that bad channels have already been removed
    ica_mat = hcp.read_ica(run_index=run_index, **hcp_params)

    # We will select the brain ICs only
    exclude = annots['ica']['ecg_eog_ic']
    preproc.apply_ica_hcp(raw, ica_mat=ica_mat, exclude=exclude)
    return raw
def test_apply_ica():
    """Test ICA application."""
    raw = hcp.read_raw(data_type='rest', verbose='error', **hcp_params)
    annots = hcp.read_annot(data_type='rest', **hcp_params)
    # construct MNE annotations
    bad_seg = (annots['segments']['all']) / raw.info['sfreq']
    annotations = mne.Annotations(bad_seg[:, 0],
                                  (bad_seg[:, 1] - bad_seg[:, 0]),
                                  description='bad')

    raw.annotations = annotations
    raw.info['bads'].extend(annots['channels']['all'])
    ica_mat = hcp.read_ica(data_type='rest', **hcp_params)
    exclude = [
        ii for ii in range(annots['ica']['total_ic_number'][0])
        if ii not in annots['ica']['brain_ic_vs']
    ]
    assert_raises(RuntimeError,
                  apply_ica_hcp,
                  raw,
                  ica_mat=ica_mat,
                  exclude=exclude)
    # XXX right now this is just a smoke test, should really check some
    # values...
    with warnings.catch_warnings(record=True):
        raw.crop(0, 1).load_data()
    apply_ica_hcp(raw, ica_mat=ica_mat, exclude=exclude)
def test_set_eog_ecg_channels():
    """Test setting of EOG and ECG channels."""
    raw = hcp.read_raw(data_type='rest', **hcp_params)
    raw.crop(0, 1).load_data()
    assert_equal(len(mne.pick_types(raw.info, meg=False, eog=True)), 0)
    assert_equal(len(mne.pick_types(raw.info, meg=False, ecg=True)), 13)
    set_eog_ecg_channels(raw)
    # XXX Probably shouldn't still have 8 ECG channels!
    assert_equal(len(mne.pick_types(raw.info, meg=False, eog=True)), 2)
    assert_equal(len(mne.pick_types(raw.info, meg=False, ecg=True)), 8)
Exemplo n.º 6
0
def test_set_eog_ecg_channels():
    """Test setting of EOG and ECG channels."""
    raw = hcp.read_raw(data_type='rest', **hcp_params)
    raw.crop(0, 1).load_data()
    assert_equal(len(mne.pick_types(raw.info, meg=False, eog=True)), 0)
    assert_equal(len(mne.pick_types(raw.info, meg=False, ecg=True)), 13)
    set_eog_ecg_channels(raw)
    # XXX Probably shouldn't still have 8 ECG channels!
    assert_equal(len(mne.pick_types(raw.info, meg=False, eog=True)), 2)
    assert_equal(len(mne.pick_types(raw.info, meg=False, ecg=True)), 8)
Exemplo n.º 7
0
def read_hcp(subject: str, data_folder: str, run_index: int) -> mne.io.Raw:
    """
    Read a data file from the HCP dataset, return a Raw instance
    """

    raw = hcp.read_raw(subject=subject,
                       data_type='rest',
                       hcp_path=data_folder,
                       run_index=run_index)

    return raw
Exemplo n.º 8
0
def test_read_raw_noise():
    """Test reading raw for empty room noise"""
    for run_index in tconf.run_inds[:tconf.max_runs][:2]:
        for data_type in tconf.noise_types:
            if run_index == 1:
                assert_raises(
                    ValueError, hcp.read_raw,
                    data_type=data_type, run_index=run_index, **hcp_params)
                continue
            raw = hcp.read_raw(
                data_type=data_type, run_index=run_index, **hcp_params)
            _basic_raw_checks(raw=raw)
Exemplo n.º 9
0
def test_read_raw_task():
    """Test reading raw for tasks"""
    for run_index in tconf.run_inds[:tconf.max_runs]:
        for data_type in tconf.task_types:
            if run_index == 2:
                assert_raises(
                    ValueError, hcp.read_raw,
                    data_type=data_type, run_index=run_index, **hcp_params)
                continue
            raw = hcp.read_raw(
                data_type=data_type, run_index=run_index, **hcp_params)
            _basic_raw_checks(raw=raw)
def test_apply_ref_correction():
    """Test reference correction."""
    raw = hcp.read_raw(data_type='rest', run_index=0, **hcp_params)
    # raw.crop(0, 10).load_data()
    raw.load_data()
    # XXX terrible hack to have more samples.
    # The test files are too short.
    raw.append(raw.copy())
    meg_picks = mne.pick_types(raw.info, meg=True, ref_meg=False)
    orig = raw[meg_picks[0]][0][0]
    apply_ref_correction(raw)
    proc = raw[meg_picks[0]][0][0]
    assert_true(np.linalg.norm(orig) > np.linalg.norm(proc))
Exemplo n.º 11
0
def test_apply_ref_correction():
    """Test reference correction."""
    raw = hcp.read_raw(data_type='rest', run_index=0, **hcp_params)
    # raw.crop(0, 10).load_data()
    raw.load_data()
    # XXX terrible hack to have more samples.
    # The test files are too short.
    raw.append(raw.copy())
    meg_picks = mne.pick_types(raw.info, meg=True, ref_meg=False)
    orig = raw[meg_picks[0]][0][0]
    apply_ref_correction(raw)
    proc = raw[meg_picks[0]][0][0]
    assert_true(np.linalg.norm(orig) > np.linalg.norm(proc))
Exemplo n.º 12
0
def test_interpolate_missing():
    """Test interpolation of missing channels."""
    data_type = 'task_working_memory'
    raw = hcp.read_raw(data_type='task_working_memory', run_index=0,
                       **hcp_params)
    raw.load_data()
    n_chan = len(raw.ch_names)
    raw.drop_channels(['A1'])
    assert_equal(len(raw.ch_names), n_chan - 1)
    raw = interpolate_missing(raw, data_type=data_type, **hcp_params)
    assert_equal(len(raw.ch_names), n_chan)

    evoked = hcp.read_evokeds(data_type=data_type, **hcp_params)[0]
    assert_equal(len(evoked.ch_names), 243)
    evoked_int = interpolate_missing(evoked, data_type=data_type, **hcp_params)
    assert_equal(len(evoked_int.ch_names), 248)
def test_interpolate_missing():
    """Test interpolation of missing channels."""
    data_type = 'task_working_memory'
    raw = hcp.read_raw(data_type='task_working_memory',
                       run_index=0,
                       **hcp_params)
    raw.load_data()
    n_chan = len(raw.ch_names)
    raw.drop_channels(['A1'])
    assert_equal(len(raw.ch_names), n_chan - 1)
    raw = interpolate_missing(raw, data_type=data_type, **hcp_params)
    assert_equal(len(raw.ch_names), n_chan)

    evoked = hcp.read_evokeds(data_type=data_type, **hcp_params)[0]
    assert_equal(len(evoked.ch_names), 243)
    evoked_int = interpolate_missing(evoked, data_type=data_type, **hcp_params)
    assert_equal(len(evoked_int.ch_names), 248)
Exemplo n.º 14
0
def test_apply_ica():
    """Test ICA application."""
    raw = hcp.read_raw(data_type='rest', verbose='error', **hcp_params)
    annots = hcp.read_annot(data_type='rest', **hcp_params)
    # construct MNE annotations
    bad_seg = (annots['segments']['all']) / raw.info['sfreq']
    annotations = mne.Annotations(
        bad_seg[:, 0], (bad_seg[:, 1] - bad_seg[:, 0]), description='bad')

    raw.annotations = annotations
    raw.info['bads'].extend(annots['channels']['all'])
    ica_mat = hcp.read_ica(data_type='rest', **hcp_params)
    exclude = [ii for ii in range(annots['ica']['total_ic_number'][0])
               if ii not in annots['ica']['brain_ic_vs']]
    assert_raises(RuntimeError, apply_ica_hcp, raw, ica_mat=ica_mat,
                  exclude=exclude)
    # XXX right now this is just a smoke test, should really check some
    # values...
    with warnings.catch_warnings(record=True):
        raw.crop(0, 1).load_data()
    apply_ica_hcp(raw, ica_mat=ica_mat, exclude=exclude)
Exemplo n.º 15
0
def load_one_record(data_type,
                    subject,
                    run_index,
                    sfreq=300,
                    epoch=None,
                    filter_params=[5., None],
                    n_jobs=1):
    # Load the record and correct the sensor space to get proper visualization
    print(f"subject={subject}, data_type={data_type}, run_index={run_index}, "
          f"hcp_path={HCP_DIR}")
    raw = hcp.read_raw(subject,
                       data_type=data_type,
                       run_index=run_index,
                       hcp_path=HCP_DIR,
                       verbose=0)
    raw.load_data()
    hcp.preprocessing.map_ch_coords_to_mne(raw)
    raw.pick_types(meg='mag', eog=False, stim=True)

    # filter the electrical and low frequency components
    raw.notch_filter([60, 120], n_jobs=n_jobs)
    raw.filter(*filter_params, n_jobs=n_jobs)

    # Resample to the requested sfreq
    if sfreq is not None:
        raw.resample(sfreq=sfreq, n_jobs=n_jobs)

    events = mne.find_events(raw)
    raw.pick_types(meg='mag', stim=False)
    events[:, 0] -= raw.first_samp

    # Deep copy before modifying info to avoid issues when saving EvokedArray
    info = deepcopy(raw.info)
    info['events'] = events
    info['event_id'] = np.unique(events[:, 2])

    # Return the data
    return raw.get_data(), info
src_outputs = hcp.anatomy.compute_forward_stack(
    subject=subject, subjects_dir=subjects_dir,
    hcp_path=hcp_path, recordings_path=recordings_path,
    # speed up computations here. Setting `add_dist` to True may improve the
    # accuracy.
    src_params=dict(add_dist=False),
    info_from=dict(data_type=data_type, run_index=run_index))

fwd = src_outputs['fwd']

##############################################################################
# Now we can compute the noise covariance. For this purpose we will apply
# the same filtering as was used for the computations of the ERF in the first
# place. See also :ref:`tut_reproduce_erf`.

raw_noise = hcp.read_raw(subject=subject, hcp_path=hcp_path,
                         data_type='noise_empty_room')
raw_noise.load_data()

# apply ref channel correction and drop ref channels
preproc.apply_ref_correction(raw_noise)

# Note: MNE complains on Python 2.7
raw_noise.filter(0.50, None, method='iir',
                 iir_params=dict(order=4, ftype='butter'), n_jobs=1)
raw_noise.filter(None, 60, method='iir',
                 iir_params=dict(order=4, ftype='butter'), n_jobs=1)

##############################################################################
# Note that using the empty room noise covariance will inflate the SNR of the
# evkoked and renders comparisons  to `baseline` rather uninformative.
noise_cov = mne.compute_raw_covariance(raw_noise, method='empirical')
Exemplo n.º 17
0
def test_make_layout():
    """Test making a layout."""
    raw = hcp.read_raw(data_type='rest', **hcp_params).crop(0, 1).load_data()
    raw.pick_types()
    lout = make_hcp_bti_layout(raw.info)
    assert_equal(lout.names, raw.info['ch_names'])
Exemplo n.º 18
0
        trial_info['stim']['codes'][:, 6] - 1,  # time sample
        np.zeros(len(trial_info['stim']['codes'])),
        trial_info['stim']['codes'][:, 3]  # event codes
    ].astype(int)
    events = events[np.argsort(events[:, 0])]  # chronological order
    # for some reason in the HCP data the time events may not always be unique
    unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
    events = events[unique_subset]  # use diff to find first unique events

    all_events.append(events)

# now we can go ahead
evokeds = list()
for run_index, events in zip([0, 1], all_events):

    raw = hcp.read_raw(run_index=run_index, **hcp_params)
    raw.load_data()
    # apply ref channel correction and drop ref channels
    # preproc.apply_ref_correction(raw)

    annots = hcp.read_annot(run_index=run_index, **hcp_params)
    # construct MNE annotations
    bad_seg = (annots['segments']['all']) / raw.info['sfreq']
    annotations = mne.Annotations(
        bad_seg[:, 0], (bad_seg[:, 1] - bad_seg[:, 0]),
        description='bad')

    raw.annotations = annotations
    raw.info['bads'].extend(annots['channels']['all'])
    raw.pick_types(meg=True, ref_meg=False)
Exemplo n.º 19
0
def main(filename=None, subjid=None, trans=None, info=None, line_freq=None, 
         emptyroom_filename=None, subjects_dir=None):
    
    raw = hcp.read_raw(subjid, 'rest', hcp_path='/data/EnigmaMeg/HCP/HCP_MEG')
    raw.load_data()
    
    eraw = hcp.read_raw(subjid, 'noise_empty_room',hcp_path='/data/EnigmaMeg/HCP/HCP_MEG')
    eraw.load_data()
    
    hcp.preprocessing.apply_ref_correction(raw)
    hcp.preprocessing.apply_ref_correction(eraw)
    #Below may be useful for testing ICA components
    #ica_mat = hcp.read_ica(subjid, 'rest')
    #annotations_dict=hcp.read_annot(subjid, 'rest')
    #hcp.preprocessing.apply_ica_hcp(raw, ica_mat, annotations_dict['ica']['ecg_eog_ic'])
    
    ## Load and prefilter continuous data
    #raw=load_data(filename)
    #eraw=load_data(emptyroom_filename)
    
    if type(raw)==mne.io.ctf.ctf.RawCTF:
        raw.apply_gradient_compensation(3)
    
    ## Test SSS bad channel detection for non-Elekta data
    # !!!!!!!!!!!  Currently no finecal or crosstalk used  !!!!!!!!!!!!!!!
    # if filename[-3:]=='fif':
    #     raw_bads_dict = assess_bads(filename)
    #     eraw_bads_dict = assess_bads(emptyroom_filename, is_eroom=True)
        
    #     raw.info['bads']=raw_bads_dict['noisy'] + raw_bads_dict['flat']
    #     eraw.info['bads']=eraw_bads_dict['noisy'] + eraw_bads_dict['flat']
    
    resample_freq=300
    
    raw.resample(resample_freq)
    eraw.resample(resample_freq)
    
    raw.filter(0.5, 140)
    eraw.filter(0.5, 140)
    
    if line_freq==None:
        try:
            line_freq = raw.info['line_freq']  # this isn't present in all files
        except:
            raise(ValueError('Could not determine line_frequency'))
    notch_freqs = np.arange(line_freq, 
                            resample_freq/2, 
                            line_freq)
    raw.notch_filter(notch_freqs)
    
    
    ## Create Epochs and covariance 
    epochs = mne.make_fixed_length_epochs(raw, duration=4.0, preload=True)
    epochs.apply_baseline(baseline=(0,None))
    cov = mne.compute_covariance(epochs)
    
    er_epochs=mne.make_fixed_length_epochs(eraw, duration=4.0, preload=True)
    er_epochs.apply_baseline(baseline=(0,None))
    er_cov = mne.compute_covariance(er_epochs)
    
    os.environ['SUBJECTS_DIR']=subjects_dir
    src = mne.read_source_spaces(info.src_filename)
    bem = mne.read_bem_solution(info.bem_sol_filename)
    fwd = mne.make_forward_solution(epochs.info, trans, src, bem)
    
    data_info = epochs.info
    
    from mne.beamformer import make_lcmv, apply_lcmv_epochs
    filters = make_lcmv(epochs.info, fwd, cov, reg=0.01,
                        noise_cov=er_cov, pick_ori='max-power',
                        weight_norm='unit-noise-gain', rank=None)
    
    labels_lh=mne.read_labels_from_annot(subjid, parc='aparc_sub',
                                        subjects_dir=subjects_dir, hemi='lh') 
    labels_rh=mne.read_labels_from_annot(subjid, parc='aparc_sub',
                                        subjects_dir=subjects_dir, hemi='rh') 
    labels=labels_lh + labels_rh 
    
    results_stcs = apply_lcmv_epochs(epochs, filters, return_generator=True)#, max_ori_out='max_power')
    
    #Monkey patch of mne.source_estimate to perform 15 component SVD
    label_ts = mod_source_estimate.extract_label_time_course(results_stcs, 
                                                             labels, 
                                                             fwd['src'],
                                                             mode='pca15_multitaper')
    
    #Convert list of numpy arrays to ndarray (Epoch/Label/Sample)
    label_stack = np.stack(label_ts)

    #HACK HARDCODED FREQ BINS
    freq_bins = np.linspace(1,45,177)    ######################################3######### FIX

    #Initialize 
    label_power = np.zeros([len(labels), len(freq_bins)])  
    alpha_peak = np.zeros(len(labels))
    
    #Create PSD for each label
    for label_idx in range(len(labels)):
        print(str(label_idx))
        current_psd = label_stack[:,label_idx, :].mean(axis=0) 
        label_power[label_idx,:] = current_psd
        
        spectral_image_path = os.path.join(info.outfolder, 'Spectra_'+
                                            labels[label_idx].name + '.png')

        try:
            tmp_fmodel = calc_spec_peak(freq_bins, current_psd, 
                            out_image_path=spectral_image_path)
            
            #FIX FOR MULTIPLE ALPHA PEAKS
            potential_alpha_idx = np.where((8.0 <= tmp_fmodel.peak_params[:,0] ) & \
                                    (tmp_fmodel.peak_params[:,0] <= 12.0 ) )[0]
            if len(potential_alpha_idx) != 1:
                alpha_peak[label_idx] = np.nan         #############FIX ###########################3 FIX     
            else:
                alpha_peak[label_idx] = tmp_fmodel.peak_params[potential_alpha_idx[0]][0]
        except:
            alpha_peak[label_idx] = np.nan  #Fix <<<<<<<<<<<<<<    
        
    #Save the label spectrum to assemble the relative power
    freq_bin_names=[str(binval) for binval in freq_bins]
    label_spectra_dframe = pd.DataFrame(label_power, columns=[freq_bin_names])
    label_spectra_dframe.to_csv( os.path.join(info.outfolder, 'label_spectra.csv') , index=False)
    # with open(os.path.join(info.outfolder, 'label_spectra.npy'), 'wb') as f:
    #     np.save(f, label_power)
    
    relative_power = label_power / label_power.sum(axis=1, keepdims=True)

    #Define bands
    bands = [[1,3], [3,6], [8,12], [13,35], [35,55]]
    band_idxs = get_freq_idx(bands, freq_bins)

    #initialize output
    band_means = np.zeros([len(labels), len(bands)]) 
    #Loop over all bands, select the indexes assocaited with the band and average    
    for mean_band, band_idx in enumerate(band_idxs):
        band_means[:, mean_band] = relative_power[:, band_idx].mean(axis=1) 
    
    output_filename = os.path.join(info.outfolder, 'Band_rel_power.csv')
    

    bands_str = [str(i) for i in bands]
    label_names = [i.name for i in labels]
    
    output_dframe = pd.DataFrame(band_means, columns=bands_str, 
                                 index=label_names)
    output_dframe['AlphaPeak'] = alpha_peak
    output_dframe.to_csv(output_filename, sep='\t')    
Exemplo n.º 20
0
def test_read_raw_rest():
    """Test reading raw for resting state"""
    for run_index in tconf.run_inds[:tconf.max_runs]:
        raw = hcp.read_raw(data_type='rest', run_index=run_index,
                           **hcp_params)
        _basic_raw_checks(raw=raw)
Exemplo n.º 21
0
    psd = psd[freqs > 0]
    plt.plot(np.log10(freqs),
             10 * np.log10(psd.ravel()),
             label=label,
             color=color)


###############################################################################
# Now we read in the data
#
# Then we plot the power spectrum of the MEG and reference channels,
# apply the reference correction and add the resulting cleaned MEG channels
# to our comparison.

raw = hcp.read_raw(subject=subject,
                   hcp_path=hcp_path,
                   run_index=run_index,
                   data_type=data_type)
raw.load_data()

# get meg and ref channels
meg_picks = mne.pick_types(raw.info, meg=True, ref_meg=False)
ref_picks = mne.pick_types(raw.info, ref_meg=True, meg=False)

# put single channel aside for comparison later
chan1 = raw[meg_picks[0]][0]

# add some plotting parameter
decim_fit = 100  # we lean a purely spatial model, we don't need all samples
decim_show = 10  # we can make plotting faster
n_fft = 2**15  # let's use long windows to see low frequencies
Exemplo n.º 22
0
def reprocess_the_data_from_scratch(all_events, event_id, tmin, tmax, baseline,
                                    decim):
    # now we can go ahead
    evokeds = list()
    all_epochs = list()
    for run_index, events in zip([0, 1], all_events):

        raw = hcp.read_raw(run_index=run_index, **hcp_params)
        raw.load_data()
        # apply ref channel correction and drop ref channels
        # preproc.apply_ref_correction(raw)

        annots = hcp.read_annot(run_index=run_index, **hcp_params)
        # construct MNE annotations
        bad_seg = (annots['segments']['all']) / raw.info['sfreq']
        annotations = mne.Annotations(bad_seg[:, 0],
                                      (bad_seg[:, 1] - bad_seg[:, 0]),
                                      description='bad')

        raw.annotations = annotations
        raw.info['bads'].extend(annots['channels']['all'])
        raw.pick_types(meg=True, ref_meg=False)

        #  Note: MNE complains on Python 2.7
        raw.filter(0.50,
                   None,
                   method='iir',
                   iir_params=dict(order=4, ftype='butter'),
                   n_jobs=1)
        raw.filter(None,
                   60,
                   method='iir',
                   iir_params=dict(order=4, ftype='butter'),
                   n_jobs=1)

        # read ICA and remove EOG ECG
        # note that the HCP ICA assumes that bad channels have already been removed
        ica_mat = hcp.read_ica(run_index=run_index, **hcp_params)

        # We will select the brain ICs only
        exclude = annots['ica']['ecg_eog_ic']
        preproc.apply_ica_hcp(raw, ica_mat=ica_mat, exclude=exclude)

        # now we can epoch
        events = np.sort(events, 0)
        epochs = mne.Epochs(raw,
                            events=events[events[:, 2] == 1],
                            event_id=event_id,
                            tmin=tmin,
                            tmax=tmax,
                            reject=None,
                            baseline=baseline,
                            decim=decim,
                            preload=True)

        all_epochs.append(epochs)
        evoked = epochs.average()
        # now we need to add back out channels for comparison across runs.
        evoked = preproc.interpolate_missing(evoked, **hcp_params)
        evokeds.append(evoked)
    return all_epochs, evokeds
Exemplo n.º 23
0
def test_make_layout():
    """Test making a layout."""
    raw = hcp.read_raw(data_type='rest', **hcp_params).crop(0, 1).load_data()
    raw.pick_types()
    lout = make_hcp_bti_layout(raw.info)
    assert_equal(lout.names, raw.info['ch_names'])
Exemplo n.º 24
0
                       noverlap=int(NFFT * 0.8))
    freqs = freqs[freqs > 0]
    psd = psd[freqs > 0]
    plt.plot(np.log10(freqs), 10 * np.log10(psd.ravel()), label=label,
             color=color)


###############################################################################
# Now we read in the data
#
# Then we plot the power spectrum of the MEG and reference channels,
# apply the reference correction and add the resulting cleaned MEG channels
# to our comparison.


raw = hcp.read_raw(subject=subject, hcp_path=hcp_path,
                   run_index=run_index, data_type=data_type)
raw.load_data()

# get meg and ref channels
meg_picks = mne.pick_types(raw.info, meg=True, ref_meg=False)
ref_picks = mne.pick_types(raw.info, ref_meg=True, meg=False)


# put single channel aside for comparison later
chan1 = raw[meg_picks[0]][0]

# add some plotting parameter
decim_fit = 100  # we lean a purely spatial model, we don't need all samples
decim_show = 10  # we can make plotting faster
n_fft = 2 ** 15  # let's use long windows to see low frequencies
Exemplo n.º 25
0
        trial_info['stim']['codes'][:, 6] - 1,  # time sample
        np.zeros(len(trial_info['stim']['codes'])),
        trial_info['stim']['codes'][:, 3]  # event codes
    ].astype(int)
    events = events[np.argsort(events[:, 0])]  # chronological order
    # for some reason in the HCP data the time events may not always be unique
    unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
    events = events[unique_subset]  # use diff to find first unique events

    all_events.append(events)

# now we can go ahead
evokeds = list()
for run_index, events in zip([0, 1], all_events):

    raw = hcp.read_raw(run_index=run_index, **hcp_params)
    raw.load_data()
    # apply ref channel correction and drop ref channels
    # preproc.apply_ref_correction(raw)

    annots = hcp.read_annot(run_index=run_index, **hcp_params)
    # construct MNE annotations
    bad_seg = (annots['segments']['all']) / raw.info['sfreq']
    annotations = mne.Annotations(
        bad_seg[:, 0], (bad_seg[:, 1] - bad_seg[:, 0]),
        description='bad')

    raw.annotations = annotations
    raw.info['bads'].extend(annots['channels']['all'])
    raw.pick_types(meg=True, ref_meg=False)
#     # speed up computations here. Setting `add_dist` to True may improve the
#     # accuracy.
#     src_params=dict(add_dist=False),
#     info_from=dict(data_type=task, run_index=run_index))
#
# fwd = src_outputs['fwd']

fwd = mne.read_forward_solution(fwd_fname)

##############################################################################
# Now we can compute the noise covariance. For this purpose we will apply
# the same filtering as was used for the computations of the ERF in the first
# place. See also :ref:`tut_reproduce_erf`.

if not op.isfile(noise_cov_fname):
    raw_noise = hcp.read_raw(subject=subject, HCP_DIR=HCP_DIR,
                             data_type='noise_empty_room')
    raw_noise.load_data()

    # apply ref channel correction and drop ref channels
    hcp.preprocessing.apply_ref_correction(raw_noise)

    # Note: MNE complains on Python 2.7
    raw_noise.filter(0.50, None, method='iir',
                     iir_params=dict(order=4, ftype='butter'), n_jobs=n_jobs)
    raw_noise.filter(None, 60, method='iir',
                     iir_params=dict(order=4, ftype='butter'), n_jobs=n_jobs)

    ##############################################################################
    # Note that using the empty room noise covariance will inflate the SNR of the
    # evkoked and renders comparisons  to `baseline` rather uninformative.
    noise_cov = mne.compute_raw_covariance(raw_noise, method='empirical')
Exemplo n.º 27
0
        hcp_path=hcp_path,
        recordings_path=recordings_path,
        # seed up computations here. Setting add_dist to True may improve the accuracy
        src_params=dict(add_dist=False),
        info_from=dict(data_type=data_type, run_index=run_index))
    fwd = src_outputs['fwd']
    del src_outputs
    #=================================================================
    # just using baseline to compute the noise cov
    # after this, using empty room noise
    #noise_cov = mne.compute_covariance(hcp_epochs, tmax=-0.5,method=['shrunk', 'empirical'])
    #=================================================================
    # Now we can compute the noise covariance. For this purpose we will apply
    # the same filtering as was used for the computations of the ERF
    raw_noise = hcp.read_raw(subject=subject,
                             hcp_path=hcp_path,
                             data_type='noise_empty_room')
    raw_noise.load_data()
    # apply ref channel correction and drop ref channels
    preproc.apply_ref_correction(raw_noise)

    # Note: MNE complains on Python 2.7
    raw_noise.filter(0.50,
                     None,
                     method='iir',
                     iir_params=dict(order=4, ftype='butter'),
                     n_jobs=1)
    raw_noise.filter(None,
                     60,
                     method='iir',
                     iir_params=dict(order=4, ftype='butter'),
Exemplo n.º 28
0
def compute_src_label_ts(subject,
                         crop_to=[0, 250],
                         resample_to=100.,
                         bads=None,
                         mag_reject=5e-12,
                         win_len=2000,
                         n_wins=11,
                         verbose=None,
                         lambda2=1. / 9.,
                         inv_method='dSPM',
                         extract_ts_mode='mean_flip'):
    """
    Compute source label time series
    """
    """
    Compute anatomy
    """

    hcp.make_mne_anatomy(subject,
                         subjects_dir=subjects_dir,
                         hcp_path=hcp_path,
                         recordings_path=hcp_path)
    """
    Read surface labels
    """
    labels = read_labels_from_annot(subject,
                                    parc='aparc',
                                    subjects_dir=subjects_dir)
    labels_fsav = read_labels_from_annot('fsaverage',
                                         parc='aparc',
                                         subjects_dir=subjects_dir)
    """
    Read raw data
    """

    raw = hcp.read_raw(subject=subject,
                       data_type=data_type,
                       hcp_path=hcp_path,
                       run_index=run_index)

    raw.load_data()

    raw.crop(crop_to[0], crop_to[1])

    raw.resample(resample_to)

    raw.info['bads'] = bads

    hcp.preprocessing.set_eog_ecg_channels(raw)

    hcp.preprocessing.apply_ref_correction(raw)

    info = raw.info.copy()

    raw.info['projs'] = []

    ecg_ave = create_ecg_epochs(raw).average()

    eog_ave = create_eog_epochs(raw).average()

    ssp_eog, _ = compute_proj_eog(raw,
                                  n_grad=1,
                                  n_mag=1,
                                  average=True,
                                  reject=dict(mag=mag_reject))
    raw.add_proj(ssp_eog, remove_existing=True)

    n_fft = next_fast_len(int(round(4 * raw.info['sfreq'])))

    sfreq = raw.info['sfreq']
    """
    Compute forward model
    """
    src_outputs = hcp.anatomy.compute_forward_stack(
        subject=subject,
        subjects_dir=subjects_dir,
        hcp_path=hcp_path,
        recordings_path=hcp_path,
        src_params=dict(add_dist=False),
        info_from=dict(data_type=data_type, run_index=run_index))
    fwd = src_outputs['fwd']
    """
    Compute noise covariance
    """
    raw_noise = hcp.read_raw(subject=subject,
                             hcp_path=hcp_path,
                             data_type='noise_empty_room')
    raw_noise.load_data()
    hcp.preprocessing.apply_ref_correction(raw_noise)
    raw_noise.add_proj(ssp_eog)
    noise_cov = compute_raw_covariance(raw_noise, method='oas')
    """
    Compute inverse operator
    """

    raw.info = info
    inv_op = make_inverse_operator(raw.info,
                                   forward=fwd,
                                   noise_cov=noise_cov,
                                   verbose=verbose)
    """
    Compute source activity
    """

    wins = [[0, win_len]]
    for i in range(n_wins):
        new_wins = [
            wins[0][0] + (win_len * (i + 1)), wins[0][1] + (win_len * (i + 1))
        ]
        wins.append(new_wins)

    raw_srcs = []
    for win in wins:
        res = apply_inverse_raw(raw,
                                inv_op,
                                lambda2=lambda2,
                                method=inv_method,
                                label=None,
                                start=win[0],
                                stop=win[1],
                                nave=1,
                                time_func=None,
                                pick_ori=None,
                                buffer_size=None,
                                prepared=False,
                                method_params=None,
                                verbose=verbose)
        raw_srcs.append(res)
    """
    Compute source label time series
    """
    src = inv_op['src']
    label_ts = extract_label_time_course(raw_srcs,
                                         labels,
                                         src,
                                         mode=extract_ts_mode,
                                         return_generator=False)

    return label_ts, sfreq
Exemplo n.º 29
0
for subj_fold in dirs:
    hcp_params['subject'] = subj_fold

    # Construct folder for saving epochs
    epo_fold = op.join(hcp_path, subj_fold, 'epochs')
    check_and_create_dir(epo_fold)

    ###########################################################################
    # Construct epochs

    for run_index in rest_params['runs']:

        hcp_params['run_index'] = run_index

        raw = hcp.read_raw(**hcp_params)
        raw.load_data()

        # Make 1 second events; assign event code from params, shouldn't
        #     overlap with the motor task 'fixate' events
        events = make_events(raw, id=rest_params['event_id']['rest'],
                             duration=1.)

        #################
        # Preprocess data
        raw, annots = preproc_annot_filter(subj_fold, raw, hcp_params,
                                           apply_ref_correction=rest_params['ref_correction'])

        #################
        # Epoch data
Exemplo n.º 30
0
        mne.write_source_spaces(op.join(src_fold, 'subject-src.fif'),
                                src_outputs['src_subject'])
        mne.write_source_spaces(op.join(src_fold, 'fsaverage-src.fif'),
                                src_outputs['src_fsaverage'])
        mne.write_forward_solution(op.join(hcp_path, subj_fold, 'forward',
                                           '%s-meg-fwd.fif' % subj_fold),
                                   src_outputs['fwd'], overwrite=True)

    if True:
        # Construct SSP projectors using all raw objects
        raw_list =[]
        hcp_params = dict(hcp_path=hcp_path, subject=subj_fold)

        for run_i in rest_params['runs']:
            hcp_params.update(dict(data_type='rest', run_index=run_i))
            raw = hcp.read_raw(**hcp_params)
            raw.load_data()

            annots = hcp.read_annot(**hcp_params)
            bad_seg = (annots['segments']['all']) / raw.info['sfreq']
            annotations = mne.Annotations(
                bad_seg[:, 0], (bad_seg[:, 1] - bad_seg[:, 0]),
                description='bad')

            raw.annotations = annotations

            raw_list.append(raw)

        for run_i in motor_params['runs']:
            hcp_params.update(dict(data_type='task_motor', run_index=run_i))
            raw = hcp.read_raw(**hcp_params)