Exemplo n.º 1
0
def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),
              event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):
    """Aux function for testing GAT viz."""
    with warnings.catch_warnings(record=True):  # deprecated
        gat = GeneralizationAcrossTime()
    raw = read_raw_fif(raw_fname)
    raw.add_proj([], remove_existing=True)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
                       eog=False, exclude='bads')
    picks = picks[1:13:3]
    decim = 30
    # Test on time generalization within one condition
    with warnings.catch_warnings(record=True):
        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                        preload=True, decim=decim)
    epochs_list = [epochs[k] for k in event_id]
    equalize_epoch_counts(epochs_list)
    epochs = concatenate_epochs(epochs_list)

    # Test default running
    with warnings.catch_warnings(record=True):  # deprecated
        gat = GeneralizationAcrossTime(test_times=test_times)
    gat.fit(epochs)
    gat.score(epochs)
    return gat
Exemplo n.º 2
0
    def prepare_data(self, dataset, subjects):
        """Prepare data for classification."""
        if len(dataset.event_id) < 3:
            # multiclass, pick two first classes
            raise (ValueError("Dataset %s only contains two classes" %
                              dataset.name))

        event_id = dataset.event_id
        epochs = self._epochs(dataset, subjects, event_id)
        groups = []
        full_epochs = []

        for ii, epoch in enumerate(epochs):
            epochs_list = [epoch[k] for k in event_id]
            # equalize for accuracy
            equalize_epoch_counts(epochs_list)
            ep = concatenate_epochs(epochs_list)
            groups.extend([ii] * len(ep))
            full_epochs.append(ep)

        epochs = concatenate_epochs(full_epochs)
        X = epochs.get_data() * 1e6
        y = epochs.events[:, -1]
        groups = np.asarray(groups)
        return X, y, groups
def get_Xy_balanced(epochs, conditions, n_sample=100):
    epochs1 = epochs[conditions[0]]
    epochs2 = epochs[conditions[1]]
    equalize_epoch_counts([epochs1, epochs2], method='truncate')
    X1 = epochs1._data[:n_sample,...]
    X2 = epochs2._data[:n_sample,...]
    y1 = [0 for i in range(len(X1))] 
    y2 = [1 for i in range(len(X2))] 
    return np.vstack([X1, X2]), np.asarray(y1 + y2)
Exemplo n.º 4
0
def test_epoch_eq():
    """Test epoch count equalization and condition combining
    """
    # equalizing epochs objects
    epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
    assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
    equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
    assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
    epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
    equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
    assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
    assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])

    # equalizing conditions
    epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
                    tmin, tmax, picks=picks)
    old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
    epochs.equalize_event_counts(['a', 'b'], copy=False)
    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
    assert_true(new_shapes[0] == new_shapes[1])
    assert_true(new_shapes[2] == new_shapes[2])
    assert_true(new_shapes[3] == new_shapes[3])
    # now with two conditions collapsed
    old_shapes = new_shapes
    epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
    assert_true(new_shapes[3] == old_shapes[3])
    assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])

    # now let's combine conditions
    old_shapes = new_shapes
    epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
    assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
    assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
                  {'ab': 1})

    combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
    caught = 0
    for key in ['a', 'b']:
        try:
            epochs[key]
        except KeyError:
            caught += 1
    assert_raises(caught == 2)
    assert_true(not np.any(epochs.events[:, 2] == 1))
    assert_true(not np.any(epochs.events[:, 2] == 2))
    epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
    assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
                                     epochs.events[:, 2] == 34)))
    assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
    assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
def get_Xy_balanced(epochs, conditions, n_sample):  #with shuffling
    epochs1 = epochs[conditions[0]]
    epochs2 = epochs[conditions[1]]
    equalize_epoch_counts([epochs1, epochs2], method='truncate')
    if n_sample == None:
        X1 = epochs1._data
        X2 = epochs2._data
    else:
        X1 = epochs1._data
        X2 = epochs2._data
        np.random.shuffle(X1)
        np.random.shuffle(X2)
        X1 = X1[:n_sample, ...]
        X2 = X2[:n_sample, ...]
    y1 = [0 for i in range(len(X1))]
    y2 = [1 for i in range(len(X2))]
    return np.vstack([X1, X2]), np.asarray(y1 + y2)
Exemplo n.º 6
0
 def extract_data_from_cont(self, ep_list, event_id):
     skip = False
     event_epochs = dict(zip(event_id.keys(), [[]] * len(event_id)))
     for epoch in ep_list:
         for key in event_id.keys():
             if key in epoch.event_id.keys():
                 event_epochs[key].append(epoch[key])
     all_events = []
     for key in event_id.keys():
         if len(event_epochs[key]) > 0:
             all_events.append(concatenate_epochs(event_epochs[key]))
     # equalize for accuracy
     if len(all_events) > 1:
         equalize_epoch_counts(all_events)
     ep = concatenate_epochs(all_events)
     # previously multipled data by 1e6
     X, y = (ep.get_data(), ep.events[:, -1])
     return X, y
Exemplo n.º 7
0
 def prepare_data(self, subjects):
     """Prepare data for classification."""
     event_id = dict(rest=1, left_hand=2, right_hand=3)
     epochs = self._epochs(subjects, event_id)
     # since we are using accuracy, we have to equalize the number of
     # events
     groups = []
     full_epochs = []
     for ii, epoch in enumerate(epochs):
         epochs_list = [epoch[k] for k in event_id]
         equalize_epoch_counts(epochs_list)
         ep = concatenate_epochs(epochs_list)
         groups.extend([ii] * len(ep))
         full_epochs.append(ep)
     epochs = concatenate_epochs(full_epochs)
     X = epochs.get_data()*1e6
     y = epochs.events[:, -1]
     return X, y, groups
Exemplo n.º 8
0
    def prepare_data(self, dataset, subjects):
        """Prepare data for classification."""

        event_id = dataset.event_id
        epochs = self._epochs(dataset, subjects, event_id)
        groups = []
        full_epochs = []

        for ii, epoch in enumerate(epochs):
            epochs_list = [epoch[k] for k in event_id]
            # equalize for accuracy
            equalize_epoch_counts(epochs_list)
            ep = concatenate_epochs(epochs_list)
            groups.extend([ii] * len(ep))
            full_epochs.append(ep)

        epochs = concatenate_epochs(full_epochs)
        #X = epochs.get_data()*1e6
        X = epochs.get_data()
        y = epochs.events[:, -1]
        groups = np.asarray(groups)
        return X, y, groups
Exemplo n.º 9
0
def _get_data(tmin=-0.2,
              tmax=0.5,
              event_id=dict(aud_l=1, vis_l=3),
              event_id_gen=dict(aud_l=2, vis_l=4),
              test_times=None):
    """Aux function for testing GAT viz"""
    gat = GeneralizationAcrossTime()
    raw = read_raw_fif(raw_fname, preload=False, add_eeg_ref=False)
    raw.add_proj([], remove_existing=True)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg='mag',
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    picks = picks[1:13:3]
    decim = 30
    # Test on time generalization within one condition
    with warnings.catch_warnings(record=True):
        epochs = Epochs(raw,
                        events,
                        event_id,
                        tmin,
                        tmax,
                        picks=picks,
                        baseline=(None, 0),
                        preload=True,
                        decim=decim,
                        add_eeg_ref=False)
    epochs_list = [epochs[k] for k in event_id]
    equalize_epoch_counts(epochs_list)
    epochs = concatenate_epochs(epochs_list)

    # Test default running
    gat = GeneralizationAcrossTime(test_times=test_times)
    gat.fit(epochs)
    gat.score(epochs)
    return gat
Exemplo n.º 10
0
def test_epoch_eq():
    """Test epoch count equalization and condition combining
    """
    # equalizing epochs objects
    epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
    epochs_1.drop_bad_epochs()  # make sure drops are logged
    assert_true(
        len([l for l in epochs_1.drop_log if not l]) == len(epochs_1.events))
    drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
    drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l
                 for l in epochs_1.drop_log]
    assert_true(drop_log1 == drop_log2)
    assert_true(
        len([l for l in epochs_1.drop_log if not l]) == len(epochs_1.events))
    assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
    equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
    assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
    epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
    equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
    assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
    assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])

    # equalizing conditions
    epochs = Epochs(raw,
                    events, {
                        'a': 1,
                        'b': 2,
                        'c': 3,
                        'd': 4
                    },
                    tmin,
                    tmax,
                    picks=picks,
                    reject=reject)
    epochs.drop_bad_epochs()  # make sure drops are logged
    assert_true(
        len([l for l in epochs.drop_log if not l]) == len(epochs.events))
    drop_log1 = deepcopy(epochs.drop_log)
    old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
    epochs.equalize_event_counts(['a', 'b'], copy=False)
    # undo the eq logging
    drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l
                 for l in epochs.drop_log]
    assert_true(drop_log1 == drop_log2)

    assert_true(
        len([l for l in epochs.drop_log if not l]) == len(epochs.events))
    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
    assert_true(new_shapes[0] == new_shapes[1])
    assert_true(new_shapes[2] == new_shapes[2])
    assert_true(new_shapes[3] == new_shapes[3])
    # now with two conditions collapsed
    old_shapes = new_shapes
    epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
    assert_true(new_shapes[3] == old_shapes[3])
    assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])

    # now let's combine conditions
    old_shapes = new_shapes
    epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
    new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
    assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
    assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'], {'ab': 1})

    combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
    caught = 0
    for key in ['a', 'b']:
        try:
            epochs[key]
        except KeyError:
            caught += 1
    assert_raises(Exception, caught == 2)
    assert_true(not np.any(epochs.events[:, 2] == 1))
    assert_true(not np.any(epochs.events[:, 2] == 2))
    epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
    assert_true(
        np.all(
            np.logical_or(epochs.events[:, 2] == 12, epochs.events[:,
                                                                   2] == 34)))
    assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
    assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
Exemplo n.º 11
0
def SN_functional_connectivity_bands(i, method):
    s = time.time()
    meg = subjects[i]
    sub_to = MRI_sub[i][1:15]
    stc_SD_file_name = os.path.expanduser(
        '~'
    ) + '/my_semnet/json_files/connectivity/stc_' + method + '200_equalized_bands_SD_sub' + str(
        i) + '.json'
    stc_LD_file_name = os.path.expanduser(
        '~'
    ) + '/my_semnet/json_files/connectivity/stc_' + method + '200_equalized_bands_LD_sub' + str(
        i) + '.json'
    # stc_SD_file_name=os.path.expanduser('~') +'/my_semnet/json_files/connectivity/stc_'+method+'bl_bands_SD_sub'+str(i)+'.json'
    # stc_LD_file_name=os.path.expanduser('~') +'/my_semnet/json_files/connectivity/stc_'+method+'bl_bands_LD_sub'+str(i)+'.json'

    morphed_labels = mne.morph_labels(SN_ROI,subject_to=data_path+sub_to,\
                  subject_from='fsaverage',subjects_dir=data_path)

    # Reading epochs
    epo_name_SD = data_path + meg + 'block_SD_words_epochs-epo.fif'
    epo_name_LD = data_path + meg + 'block_LD_words_epochs-epo.fif'

    epochs_sd = mne.read_epochs(epo_name_SD, preload=True)
    epochs_ld = mne.read_epochs(epo_name_LD, preload=True)

    epochs_SD = epochs_sd['words'].copy().resample(500)
    epochs_LD = epochs_ld['words'].copy().resample(500)

    equalize_epoch_counts([epochs_SD, epochs_LD])
    # Reading inverse operator
    inv_fname_SD = data_path + meg + 'InvOp_SD_EMEG-inv.fif'
    inv_fname_LD = data_path + meg + 'InvOp_LD_EMEG-inv.fif'

    inv_op_SD = read_inverse_operator(inv_fname_SD)
    inv_op_LD = read_inverse_operator(inv_fname_LD)

    stc_sd = apply_inverse_epochs(epochs_SD,
                                  inv_op_SD,
                                  lambda2,
                                  method='MNE',
                                  pick_ori="normal",
                                  return_generator=False)
    stc_ld = apply_inverse_epochs(epochs_LD,
                                  inv_op_LD,
                                  lambda2,
                                  method='MNE',
                                  pick_ori="normal",
                                  return_generator=False)
    src_SD = inv_op_SD['src']
    src_LD = inv_op_LD['src']
    # Construct indices to estimate connectivity between the label time course
    # and all source space time courses
    vertices_SD = [src_SD[j]['vertno'] for j in range(2)]
    n_signals_tot = 1 + len(vertices_SD[0]) + len(vertices_SD[1])
    indices = seed_target_indices([0], np.arange(1, n_signals_tot))

    morph_SD = mne.compute_source_morph(src=inv_op_SD['src'],\
                    subject_from=sub_to, subject_to=C.subject_to,\
                    spacing=C.spacing_morph, subjects_dir=C.data_path)

    morph_LD = mne.compute_source_morph(src= inv_op_LD['src'],\
                    subject_from=sub_to, subject_to=C.subject_to,\
                    spacing=C.spacing_morph, subjects_dir=C.data_path)

    for win in np.arange(0, len(C.con_time_window) - 1):
        print('[i,win]: ', i, win)

        t_min = C.con_time_window[win]
        t_max = C.con_time_window[win + 1]
        stc_SD = []
        stc_LD = []

        for n in np.arange(0, len(stc_sd)):
            stc_SD.append(stc_sd[n].copy().crop(t_min * 1e-3, t_max * 1e-3))

        for n in np.arange(0, len(stc_ld)):
            stc_LD.append(stc_ld[n].copy().crop(t_min * 1e-3, t_max * 1e-3))

        for k in np.arange(0, 6):
            print('[i,win,k]: ', i, win, k)
            morphed_labels[k].name = C.rois_labels[k]

            seed_ts_sd = mne.extract_label_time_course(stc_SD, morphed_labels[k], \
                        src_SD, mode='mean_flip',return_generator=False)
            seed_ts_ld = mne.extract_label_time_course(stc_LD, morphed_labels[k], \
                        src_LD, mode='mean_flip',return_generator=False)

            for f in np.arange(0, len(C.con_freq_band) - 1):
                print('[i,win,k,f]: ', i, win, k, f)
                f_min = C.con_freq_band[f]
                f_max = C.con_freq_band[f + 1]
                print(f_min, f_max)

                comb_ts_sd = zip(seed_ts_sd, stc_SD)
                comb_ts_ld = zip(seed_ts_ld, stc_LD)

                con_SD, freqs, times, n_epochs, n_tapers = spectral_connectivity(
                    comb_ts_sd,
                    method=method,
                    mode='fourier',
                    indices=indices,
                    sfreq=500,
                    fmin=f_min,
                    fmax=f_max,
                    faverage=True,
                    n_jobs=10)

                con_LD, freqs, times, n_epochs, n_tapers = spectral_connectivity(
                    comb_ts_ld,
                    method=method,
                    mode='fourier',
                    indices=indices,
                    sfreq=500,
                    fmin=f_min,
                    fmax=f_max,
                    faverage=True,
                    n_jobs=10)

                con_stc_SD = mne.SourceEstimate(con_SD, vertices=vertices_SD,\
                              tmin=t_min*1e-3, tstep=2e-3,subject=sub_to)

                con_stc_LD = mne.SourceEstimate(con_LD, vertices=vertices_SD,\
                              tmin=t_min*1e-3, tstep=2e-3,subject=sub_to)

                stc_total_SD[win][k][f] = morph_SD.apply(con_stc_SD)
                stc_total_LD[win][k][f] = morph_LD.apply(con_stc_LD)

    with open(stc_SD_file_name, "wb") as fp:  #Pickling
        pickle.dump(stc_total_SD, fp)

    with open(stc_LD_file_name, "wb") as fp:  #Pickling
        pickle.dump(stc_total_LD, fp)
    e = time.time()
    print(e - s)
Exemplo n.º 12
0
def SN_functional_connectivity_betweenROIs(i, method):
    s = time.time()
    meg = subjects[i]
    sub_to = MRI_sub[i][1:15]
    con_SD_file_name = os.path.expanduser(
        '~'
    ) + '/my_semnet/json_files/connectivity/con_labels_' + method + '_bands_SD_sub' + str(
        i) + '.json'
    con_LD_file_name = os.path.expanduser(
        '~'
    ) + '/my_semnet/json_files/connectivity/con_labels_' + method + '_bands_LD_sub' + str(
        i) + '.json'

    morphed_labels = mne.morph_labels(SN_ROI,subject_to=data_path+sub_to,\
                  subject_from='fsaverage',subjects_dir=data_path)

    # Reading epochs
    epo_name_SD = data_path + meg + 'block_SD_words_epochs-epo.fif'
    epo_name_LD = data_path + meg + 'block_LD_words_epochs-epo.fif'

    epochs_sd = mne.read_epochs(epo_name_SD, preload=True)
    epochs_ld = mne.read_epochs(epo_name_LD, preload=True)

    epochs_SD = epochs_sd['words'].copy().resample(500)
    epochs_LD = epochs_ld['words'].copy().resample(500)

    # Equalize trial counts to eliminate bias
    equalize_epoch_counts([epochs_SD, epochs_LD])

    # Reading inverse operator
    inv_fname_SD = data_path + meg + 'InvOp_SD_EMEG-inv.fif'
    inv_fname_LD = data_path + meg + 'InvOp_LD_EMEG-inv.fif'

    inv_op_SD = read_inverse_operator(inv_fname_SD)
    inv_op_LD = read_inverse_operator(inv_fname_LD)

    stc_sd = apply_inverse_epochs(epochs_SD,
                                  inv_op_SD,
                                  lambda2,
                                  method='MNE',
                                  pick_ori="normal",
                                  return_generator=False)
    stc_ld = apply_inverse_epochs(epochs_LD,
                                  inv_op_LD,
                                  lambda2,
                                  method='MNE',
                                  pick_ori="normal",
                                  return_generator=False)
    times = epochs_SD.times
    stc_SD_t = []
    stc_LD_t = []

    src_SD = inv_op_SD['src']
    src_LD = inv_op_LD['src']

    for n in np.arange(0, len(stc_sd)):
        stc_SD_t.append(stc_baseline_correction(stc_sd[n], times))
        stc_LD_t.append(stc_baseline_correction(stc_ld[n], times))

    for win in np.arange(0, len(C.con_time_window) - 1):
        print('[i,win]: ', i, win)

        t_min = C.con_time_window[win]
        t_max = C.con_time_window[win + 1]
        stc_SD = []
        stc_LD = []
        for n in np.arange(0, len(stc_sd)):
            stc_SD.append(stc_SD_t[n].copy().crop(t_min * 1e-3, t_max * 1e-3))
            stc_LD.append(stc_LD_t[n].copy().crop(t_min * 1e-3, t_max * 1e-3))

        for k in np.arange(0, 6):
            # print('[i,win,k]: ',i,win,k)
            morphed_labels[k].name = C.rois_labels[k]

        labels_ts_sd = mne.extract_label_time_course(stc_SD, morphed_labels, \
                   src_SD, mode='mean_flip',return_generator=False)
        labels_ts_ld = mne.extract_label_time_course(stc_LD, morphed_labels, \
                   src_LD, mode='mean_flip',return_generator=False)

        for f in np.arange(0, len(C.con_freq_band) - 1):
            print('[i,win,k,f]: ', i, win, k, f)
            f_min = C.con_freq_band[f]
            f_max = C.con_freq_band[f + 1]
            print(f_min, f_max)

            con_SD, freqs, times, n_epochs, n_tapers = spectral_connectivity(
                labels_ts_sd,
                method=method,
                mode='fourier',
                sfreq=500,
                fmin=f_min,
                fmax=f_max,
                faverage=True,
                n_jobs=10)

            con_LD, freqs, times, n_epochs, n_tapers = spectral_connectivity(
                labels_ts_ld,
                method=method,
                mode='fourier',
                sfreq=500,
                fmin=f_min,
                fmax=f_max,
                faverage=True,
                n_jobs=10)

            con_labels_SD[win][f] = con_SD.reshape(6, 6)
            con_labels_LD[win][f] = con_LD.reshape(6, 6)

    with open(con_SD_file_name, "wb") as fp:  #Pickling
        pickle.dump(con_labels_SD, fp)

    with open(con_LD_file_name, "wb") as fp:  #Pickling
        pickle.dump(con_labels_LD, fp)
    e = time.time()
    print(e - s)
Exemplo n.º 13
0
    morphed_labels = mne.morph_labels(SN_ROI,subject_to=data_path+sub_to,\
                  subject_from='fsaverage',subjects_dir=data_path)

    # Reading epochs
    epo_name_SD = data_path + meg + 'block_SD_words_epochs-epo.fif'
    epo_name_LD = data_path + meg + 'block_LD_words_epochs-epo.fif'

    epochs_sd = mne.read_epochs(epo_name_SD, preload=True)
    epochs_ld = mne.read_epochs(epo_name_LD, preload=True)

    epochs_SD = epochs_sd['words'].copy().resample(500)
    epochs_LD = epochs_ld['words'].copy().resample(500)

    # Equalize trial counts to eliminate bias (which would otherwise be
    # introduced by the abs() performed below)
    equalize_epoch_counts([epochs_SD, epochs_LD])

    # Reading inverse operator
    inv_fname_SD = data_path + meg + 'InvOp_SD_EMEG-inv.fif'
    inv_fname_LD = data_path + meg + 'InvOp_LD_EMEG-inv.fif'

    inv_op_SD = read_inverse_operator(inv_fname_SD)
    inv_op_LD = read_inverse_operator(inv_fname_LD)

    stc_sd = apply_inverse_epochs(epochs_SD,
                                  inv_op_SD,
                                  lambda2,
                                  method='MNE',
                                  pick_ori="normal",
                                  return_generator=False)
    stc_ld = apply_inverse_epochs(epochs_LD,
Exemplo n.º 14
0
            del epochs
            # Take the response events as the reference events
            events_, lag = define_target_events(levents, res_id, tri_id, sfreq,
                                                rstmin, rstmax)
            epochs = mne.Epochs(raw,
                                events_,
                                res_id,
                                rtmin,
                                rtmax,
                                picks=picks,
                                baseline=None,
                                reject=dict(mag=4e-12))
            # Baseline corrected for response events
            #epochs.load_data()
            if len(events_) != 1:
                equalize_epoch_counts([epo_base, epochs])
            line_base = epo_base.get_data().mean(axis=-1, keepdims=True)
            epochs._data = epochs.get_data() - line_base
            res_con = conditions[i][:2] + 'rt'
            fn_epo = fn_raw[:fn_raw.rfind('-raw.fif'
                                          )] + ',evt_%s_bc-epo.fif' % res_con
            epochs.save(fn_epo)
            del epochs
            i = i + 1

###################################
# Crop the unfiltered data
#----------------------------------
if do_unfil_epo:
    #compute_epoch_per_chop = True  # compute and save epoch per chop per subj
    #combine_chopped_epochs = False  # combine the epoch chops
def SN_effective_connectivity_bands(i, method):
    n_subjects = len(subjects)
    meg = subjects[i]
    sub_to = MRI_sub[i][1:15]

    print('Participant : ', i)
    stc_SD_file_name = os.path.expanduser(
        '~'
    ) + '/my_semnet/json_files/connectivity/stc_' + method + '200_bands_SD_sub' + str(
        i) + '.json'
    stc_LD_file_name = os.path.expanduser(
        '~'
    ) + '/my_semnet/json_files/connectivity/stc_' + method + '200_bands_LD_sub' + str(
        i) + '.json'
    morphed_labels = mne.morph_labels(SN_ROI,subject_to=data_path+sub_to,\
                  subject_from='fsaverage',subjects_dir=data_path)

    # Reading epochs
    epo_name_SD = data_path + meg + 'block_SD_words_epochs-epo.fif'
    epo_name_LD = data_path + meg + 'block_LD_words_epochs-epo.fif'

    epochs_sd = mne.read_epochs(epo_name_SD, preload=True)
    epochs_ld = mne.read_epochs(epo_name_LD, preload=True)

    epochs_SD = epochs_sd['words'].copy().resample(500)
    epochs_LD = epochs_ld['words'].copy().resample(500)

    # Equalize trial counts to eliminate bias (which would otherwise be
    # introduced by the abs() performed below)
    equalize_epoch_counts([epochs_SD, epochs_LD])

    # Reading inverse operator
    inv_fname_SD = data_path + meg + 'InvOp_SD_EMEG-inv.fif'
    inv_fname_LD = data_path + meg + 'InvOp_LD_EMEG-inv.fif'

    inv_op_SD = read_inverse_operator(inv_fname_SD)
    inv_op_LD = read_inverse_operator(inv_fname_LD)

    stc_sd = apply_inverse_epochs(epochs_SD,
                                  inv_op_SD,
                                  C.lambda2,
                                  method='MNE',
                                  pick_ori="normal",
                                  return_generator=False)
    stc_ld = apply_inverse_epochs(epochs_LD,
                                  inv_op_LD,
                                  C.lambda2,
                                  method='MNE',
                                  pick_ori="normal",
                                  return_generator=False)
    times = epochs_SD.times
    stc_SD_t = []
    stc_LD_t = []

    src_SD = inv_op_SD['src']
    src_LD = inv_op_LD['src']
    # Construct indices to estimate connectivity between the label time course
    # and all source space time courses
    vertices_SD = [src_SD[j]['vertno'] for j in range(2)]
    n_signals_tot = 1 + len(vertices_SD[0]) + len(vertices_SD[1])
    indices = seed_target_indices([0], np.arange(1, n_signals_tot))

    morph_SD = mne.compute_source_morph(src=inv_op_SD['src'],\
                    subject_from=sub_to, subject_to=C.subject_to,\
                    spacing=C.spacing_morph, subjects_dir=C.data_path)

    morph_LD = mne.compute_source_morph(src= inv_op_LD['src'],\
                    subject_from=sub_to, subject_to=C.subject_to,\
                    spacing=C.spacing_morph, subjects_dir=C.data_path)

    for n in np.arange(0, len(stc_sd)):
        stc_SD_t.append(stc_baseline_correction(stc_sd[n], times))
        stc_LD_t.append(stc_baseline_correction(stc_ld[n], times))

    for win in np.arange(0, len(C.con_time_window)):
        t_min = C.con_time_window[win]
        t_max = C.con_time_window[win] + C.con_time_window_len
        stc_SD = []
        stc_LD = []
        for n in np.arange(0, len(stc_sd)):
            stc_SD.append(stc_SD_t[n].copy().crop(t_min * 1e-3, t_max * 1e-3))
            stc_LD.append(stc_LD_t[n].copy().crop(t_min * 1e-3, t_max * 1e-3))

        for k in np.arange(0, 6):
            print('Participant : ', i, '/ ROI: ', k)
            morphed_labels[k].name = C.rois_labels[k]

            seed_ts_sd = mne.extract_label_time_course(stc_SD, morphed_labels[k], \
                       src_SD, mode='mean_flip',return_generator=False)
            seed_ts_ld = mne.extract_label_time_course(stc_LD, morphed_labels[k], \
                       src_LD, mode='mean_flip',return_generator=False)

            for f in np.arange(0, len(C.con_freq_band_psi) - 1):
                f_min = C.con_freq_band_psi[f]
                f_max = C.con_freq_band_psi[f + 1]
                print('Participant : ' , i, '/ ROI: ',k,' win:', win,\
                      ' freq: ',f)
                comb_ts_sd = zip(seed_ts_sd, stc_SD)
                comb_ts_ld = zip(seed_ts_ld, stc_LD)

                psi_SD, freqs, times, n_epochs, _ = phase_slope_index(
                    comb_ts_sd,
                    mode='fourier',
                    indices=indices,
                    sfreq=500,
                    fmin=f_min,
                    fmax=f_max,
                    n_jobs=15)

                psi_LD, freqs, times, n_epochs, _ = phase_slope_index(
                    comb_ts_ld,
                    mode='fourier',
                    indices=indices,
                    sfreq=500,
                    fmin=f_min,
                    fmax=f_max,
                    n_jobs=15)


                psi_stc_SD = mne.SourceEstimate(psi_SD, vertices=vertices_SD,\
                             tmin=t_min*1e-3, tstep=2e-3,subject=sub_to)

                psi_stc_LD = mne.SourceEstimate(psi_LD, vertices=vertices_SD,\
                             tmin=t_min*1e-3, tstep=2e-3,subject=sub_to)

                stc_total_SD[win][k][f] = morph_SD.apply(psi_stc_SD)
                stc_total_LD[win][k][f] = morph_LD.apply(psi_stc_LD)

    with open(stc_SD_file_name, "wb") as fp:  #Pickling
        pickle.dump(stc_total_SD, fp)

    with open(stc_LD_file_name, "wb") as fp:  #Pickling
        pickle.dump(stc_total_LD, fp)
    e = time.time()
    print(e - s)
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
event_id = 1  # L auditory
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=reject, preload=True)

event_id = 3  # L visual
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=reject, preload=True)

#    Equalize trial counts to eliminate bias (which would otherwise be
#    introduced by the abs() performed below)
equalize_epoch_counts([epochs1, epochs2])

###############################################################################
# Transform to source space
# -------------------------

fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM"  # use dSPM method (could also be MNE, sLORETA, or eLORETA)
inverse_operator = read_inverse_operator(fname_inv)
sample_vertices = [s['vertno'] for s in inverse_operator['src']]

#    Let's average and compute inverse, resampling to speed things up
evoked1 = epochs1.average()
evoked1.resample(50, npad='auto')
Exemplo n.º 17
0
def test_epoch_eq():
    """Test epoch count equalization and condition combining
    """
    # equalizing epochs objects
    epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
    epochs_1.drop_bad_epochs()  # make sure drops are logged
    assert_true(len([l for l in epochs_1.drop_log if not l]) == len(epochs_1.events))
    drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
    drop_log2 = [[] if l == ["EQUALIZED_COUNT"] else l for l in epochs_1.drop_log]
    assert_true(drop_log1 == drop_log2)
    assert_true(len([l for l in epochs_1.drop_log if not l]) == len(epochs_1.events))
    assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
    equalize_epoch_counts([epochs_1, epochs_2], method="mintime")
    assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
    epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
    equalize_epoch_counts([epochs_3, epochs_4], method="truncate")
    assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
    assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])

    # equalizing conditions
    epochs = Epochs(raw, events, {"a": 1, "b": 2, "c": 3, "d": 4}, tmin, tmax, picks=picks, reject=reject)
    epochs.drop_bad_epochs()  # make sure drops are logged
    assert_true(len([l for l in epochs.drop_log if not l]) == len(epochs.events))
    drop_log1 = deepcopy(epochs.drop_log)
    old_shapes = [epochs[key].events.shape[0] for key in ["a", "b", "c", "d"]]
    epochs.equalize_event_counts(["a", "b"], copy=False)
    # undo the eq logging
    drop_log2 = [[] if l == ["EQUALIZED_COUNT"] else l for l in epochs.drop_log]
    assert_true(drop_log1 == drop_log2)

    assert_true(len([l for l in epochs.drop_log if not l]) == len(epochs.events))
    new_shapes = [epochs[key].events.shape[0] for key in ["a", "b", "c", "d"]]
    assert_true(new_shapes[0] == new_shapes[1])
    assert_true(new_shapes[2] == new_shapes[2])
    assert_true(new_shapes[3] == new_shapes[3])
    # now with two conditions collapsed
    old_shapes = new_shapes
    epochs.equalize_event_counts([["a", "b"], "c"], copy=False)
    new_shapes = [epochs[key].events.shape[0] for key in ["a", "b", "c", "d"]]
    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
    assert_true(new_shapes[3] == old_shapes[3])
    assert_raises(KeyError, epochs.equalize_event_counts, [1, "a"])

    # now let's combine conditions
    old_shapes = new_shapes
    epochs = epochs.equalize_event_counts([["a", "b"], ["c", "d"]])[0]
    new_shapes = [epochs[key].events.shape[0] for key in ["a", "b", "c", "d"]]
    assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
    assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
    assert_raises(ValueError, combine_event_ids, epochs, ["a", "b"], {"ab": 1})

    combine_event_ids(epochs, ["a", "b"], {"ab": 12}, copy=False)
    caught = 0
    for key in ["a", "b"]:
        try:
            epochs[key]
        except KeyError:
            caught += 1
    assert_raises(Exception, caught == 2)
    assert_true(not np.any(epochs.events[:, 2] == 1))
    assert_true(not np.any(epochs.events[:, 2] == 2))
    epochs = combine_event_ids(epochs, ["c", "d"], {"cd": 34})
    assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12, epochs.events[:, 2] == 34)))
    assert_true(epochs["ab"].events.shape[0] == old_shapes[0] + old_shapes[1])
    assert_true(epochs["ab"].events.shape[0] == epochs["cd"].events.shape[0])
    input_file = fname.output(subject=subj,
                              processing_step='cue_epochs',
                              file_type='epo.fif')
    cue_epo = read_epochs(input_file, preload=True)

    # extract epochs relevant for analysis apply baseline correction
    a_epo = cue_epo['Correct A']
    a_epo.apply_baseline(baseline=baseline).crop(tmin=-0.3, tmax=2.45)
    a_epochs_info = a_epo.info

    b_epo = cue_epo['Correct B']
    b_epo.apply_baseline(baseline=baseline).crop(tmin=-0.3, tmax=2.45)
    b_epochs_info = b_epo.info

    # number of epochs should be equal between conditions
    equalize_epoch_counts([a_epo, b_epo])

    # compute covariance for analysis time window
    # cue A epochs
    data_cov_a = compute_covariance(a_epo,
                                    tmin=0.01, tmax=2.45,
                                    method='shrunk')
    # cue B epochs
    data_cov_b = compute_covariance(b_epo,
                                    tmin=0.01, tmax=2.45,
                                    method='shrunk')

    # compute covariance for the baseline period (i.e., noise)
    # cue A epochs
    noise_cov_a = compute_covariance(a_epo, tmin=-0.3, tmax=-0.05,
                                     method='shrunk')