Example #1
0
    def prepare_data(self, dataset, subjects):
        """Prepare data for classification."""
        if len(dataset.event_id) < 3:
            # multiclass, pick two first classes
            raise (ValueError("Dataset %s only contains two classes" %
                              dataset.name))

        event_id = dataset.event_id
        epochs = self._epochs(dataset, subjects, event_id)
        groups = []
        full_epochs = []

        for ii, epoch in enumerate(epochs):
            epochs_list = [epoch[k] for k in event_id]
            # equalize for accuracy
            equalize_epoch_counts(epochs_list)
            ep = concatenate_epochs(epochs_list)
            groups.extend([ii] * len(ep))
            full_epochs.append(ep)

        epochs = concatenate_epochs(full_epochs)
        X = epochs.get_data() * 1e6
        y = epochs.events[:, -1]
        groups = np.asarray(groups)
        return X, y, groups
Example #2
0
def load_subject_series_epochs(data_path,
                               subject_range,
                               series_range,
                               tmin=-0.2,
                               tmax=0.5,
                               baseline=(None, 0),
                               stim_channel=None):
    all_epochs_list = []

    for subject in subject_range:
        subject_fname = data_path + '/subj{0}'.format(subject)

        for series in series_range:
            subject_series_fname = subject_fname + '_series{0}_data.csv'.format(
                series)
            subject_series = create_mne_raw_object(subject_series_fname)
            subject_series_events = find_events(subject_series,
                                                stim_channel=stim_channel,
                                                verbose=False)
            epochs = Epochs(subject_series,
                            subject_series_events,
                            tmin=tmin,
                            tmax=tmax,
                            baseline=baseline,
                            verbose=False)
            all_epochs_list.append(epochs)

    all_epochs = concatenate_epochs(all_epochs_list)
    return all_epochs
Example #3
0
def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),
              event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):
    """Aux function for testing GAT viz."""
    with warnings.catch_warnings(record=True):  # deprecated
        gat = GeneralizationAcrossTime()
    raw = read_raw_fif(raw_fname)
    raw.add_proj([], remove_existing=True)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
                       eog=False, exclude='bads')
    picks = picks[1:13:3]
    decim = 30
    # Test on time generalization within one condition
    with warnings.catch_warnings(record=True):
        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                        preload=True, decim=decim)
    epochs_list = [epochs[k] for k in event_id]
    equalize_epoch_counts(epochs_list)
    epochs = concatenate_epochs(epochs_list)

    # Test default running
    with warnings.catch_warnings(record=True):  # deprecated
        gat = GeneralizationAcrossTime(test_times=test_times)
    gat.fit(epochs)
    gat.score(epochs)
    return gat
Example #4
0
 def extract_data_from_cont(self, ep_list, event_id):
     skip = False
     event_epochs = dict(zip(event_id.keys(), [[]] * len(event_id)))
     for epoch in ep_list:
         for key in event_id.keys():
             if key in epoch.event_id.keys():
                 event_epochs[key].append(epoch[key])
     all_events = []
     for key in event_id.keys():
         if len(event_epochs[key]) > 0:
             all_events.append(concatenate_epochs(event_epochs[key]))
     # equalize for accuracy
     if len(all_events) > 1:
         equalize_epoch_counts(all_events)
     ep = concatenate_epochs(all_events)
     # previously multipled data by 1e6
     X, y = (ep.get_data(), ep.events[:, -1])
     return X, y
Example #5
0
 def prepare_data(self, subjects):
     """Prepare data for classification."""
     event_id = dict(rest=1, left_hand=2, right_hand=3)
     epochs = self._epochs(subjects, event_id)
     # since we are using accuracy, we have to equalize the number of
     # events
     groups = []
     full_epochs = []
     for ii, epoch in enumerate(epochs):
         epochs_list = [epoch[k] for k in event_id]
         equalize_epoch_counts(epochs_list)
         ep = concatenate_epochs(epochs_list)
         groups.extend([ii] * len(ep))
         full_epochs.append(ep)
     epochs = concatenate_epochs(full_epochs)
     X = epochs.get_data()*1e6
     y = epochs.events[:, -1]
     return X, y, groups
Example #6
0
 def prepare_data(self, subjects):
     """Prepare data for classification."""
     event_id = dict(left_hand=2, right_hand=3)
     epochs = self._epochs(subjects, event_id)
     groups = []
     for ii, ep in enumerate(epochs):
         groups.extend([ii] * len(ep))
     epochs = concatenate_epochs(epochs)
     X = epochs.get_data()*1e6
     y = epochs.events[:, -1] - 2
     return X, y, groups
Example #7
0
    def prepare_data(self, dataset, subjects):
        """Prepare data for classification."""

        event_id = dataset.event_id
        epochs = self._epochs(dataset, subjects, event_id)
        groups = []
        full_epochs = []

        for ii, epoch in enumerate(epochs):
            epochs_list = [epoch[k] for k in event_id]
            # equalize for accuracy
            equalize_epoch_counts(epochs_list)
            ep = concatenate_epochs(epochs_list)
            groups.extend([ii] * len(ep))
            full_epochs.append(ep)

        epochs = concatenate_epochs(full_epochs)
        #X = epochs.get_data()*1e6
        X = epochs.get_data()
        y = epochs.events[:, -1]
        groups = np.asarray(groups)
        return X, y, groups
Example #8
0
def csp_training(raw, picks, nfilters):
    """ Implement CSP training

    :param raw: Raw data
    :return: The csp filter
    """

    epochs_tot = []
    y = []

    # get event position corresponding to Replace
    events = find_events(raw, stim_channel='HandStart', verbose=False)
    # epochs signal for 1.5 second before the movement
    epochs = Epochs(raw, events, {'during': 1}, 0, 2, proj=False,
                    picks=picks, baseline=None, preload=True,
                    add_eeg_ref=False, verbose=False)

    epochs_tot.append(epochs)
    y.extend([1] * len(epochs))

    # epochs signal for 1.5 second after the movement, this correspond to the
    # rest period.
    epochs_rest = Epochs(raw, events, {'before': 1}, -2, 0, proj=False,
                         picks=picks, baseline=None, preload=True,
                         add_eeg_ref=False, verbose=False)

    # Workaround to be able to concatenate epochs with MNE
    epochs_rest.times = epochs.times

    y.extend([-1] * len(epochs_rest))
    epochs_tot.append(epochs_rest)

    # Concatenate all epochs
    epochs = concatenate_epochs(epochs_tot)

    # get data
    X = epochs.get_data()
    y = np.array(y)

    # train CSP
    csp = CSP(n_components=nfilters, reg='lws')
    csp.fit(X, y)

    return csp
Example #9
0
    def prepare_data(self, dataset, subjects):
        """Prepare data for classification."""

        if len(dataset.event_id) > 2:
            # multiclass, pick two first classes
            raise (ValueError("Dataset %s contain more than two classes" %
                              dataset.name))

        event_id = dataset.event_id
        epochs = self._epochs(dataset, subjects, event_id)
        groups = []
        for ii, ep in enumerate(epochs):
            groups.extend([ii] * len(ep))
        epochs = concatenate_epochs(epochs)
        X = epochs.get_data() * 1e6
        y = epochs.events[:, -1]
        y = np.asarray(y == np.max(y), dtype=np.int32)

        groups = np.asarray(groups)
        return X, y, groups
Example #10
0
def _get_data(tmin=-0.2,
              tmax=0.5,
              event_id=dict(aud_l=1, vis_l=3),
              event_id_gen=dict(aud_l=2, vis_l=4),
              test_times=None):
    """Aux function for testing GAT viz"""
    gat = GeneralizationAcrossTime()
    raw = read_raw_fif(raw_fname, preload=False, add_eeg_ref=False)
    raw.add_proj([], remove_existing=True)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg='mag',
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    picks = picks[1:13:3]
    decim = 30
    # Test on time generalization within one condition
    with warnings.catch_warnings(record=True):
        epochs = Epochs(raw,
                        events,
                        event_id,
                        tmin,
                        tmax,
                        picks=picks,
                        baseline=(None, 0),
                        preload=True,
                        decim=decim,
                        add_eeg_ref=False)
    epochs_list = [epochs[k] for k in event_id]
    equalize_epoch_counts(epochs_list)
    epochs = concatenate_epochs(epochs_list)

    # Test default running
    gat = GeneralizationAcrossTime(test_times=test_times)
    gat.fit(epochs)
    gat.score(epochs)
    return gat
Example #11
0
def compute_transform(subj_id, nfilters=4):
    freqs = [7, 30]
    b, a = butter(5, np.array(freqs) / 250.0, btype='bandpass')
    epochs_tot = []
    y = []
    fnames = glob('data/train/subj%d_series*_data.csv' % (subj_id))

    train_raw = concatenate_raws(
        [create_mne_raw_object(fname) for fname in fnames])

    picks = pick_types(train_raw.info, eeg=True)

    train_raw._data[picks] = lfilter(b, a, train_raw._data[picks])

    events = find_events(train_raw, stim_channel='Replace', verbose=False)
    epochs = Epochs(train_raw,
                    events, {'during': 1},
                    -2,
                    -0.5,
                    proj=False,
                    picks=picks,
                    baseline=None,
                    preload=True,
                    add_eeg_ref=False,
                    verbose=False)

    epochs_tot.append(epochs)
    y.extend([1] * len(epochs))

    epochs_rest = Epochs(train_raw,
                         events, {'after': 1},
                         0.5,
                         2,
                         proj=False,
                         picks=picks,
                         baseline=None,
                         preload=True,
                         add_eeg_ref=False,
                         verbose=False)

    epochs_rest.times = epochs.times

    y.extend([-1] * len(epochs_rest))
    epochs_tot.append(epochs_rest)

    # Concatenate all epochs
    epochs = concatenate_epochs(epochs_tot)

    # get data
    X = epochs.get_data()
    y = np.array(y)

    # train CSP
    csp = CSP(n_components=nfilters, reg='lws')
    csp.fit(X, y)

    #from pyriemann.spatialfilters import Xdawn
    #xdawn = Xdawn(nfilter=nfilters / 2)
    #xdawn.fit(X, y)

    #return xdawn.V
    return csp.filters_[:nfilters]
Example #12
0
for event_type in event_types:
    events_behavior_type = fix_triggers(events_meg, events_behavior,
                                        event_type='trigg' + event_type)

    # Epoch raw data
    epochs_list = list()
    for run in range(1, n_runs):
        fname_raw = op.join(path_data, subject, 'run%02i.fif' % run)
        raw = Raw(fname_raw, preload=True)
        raw.filter(.75, h_freq=30.0)
        sel = events_behavior_type['meg_file'] == run
        time_sample = events_behavior_type['meg_event_tsample'][sel]
        trigger_value = events_behavior_type['meg_event_value'][sel]
        events_meg = np.vstack((time_sample.astype(int),
                                np.zeros_like(time_sample, int),
                                trigger_value.astype(int))).T
        event_id = {'ttl_%i' % ii: ii for ii in np.unique(events_meg[:, 2])}
        epochs = Epochs(raw, events_meg, event_id=event_id,
                        tmin=-1.0, tmax=.500, preload=True)
        # epochs.resample(128)  # XXX BUG MNE when concatenate afterwards
        epochs_list.append(epochs)
    epochs = concatenate_epochs(epochs_list)
    epochs.resample(128)

    # Save data
    fname = op.join(path_data, subject, 'epochs_%s.fif' % event_type)
    epochs.save(fname)
    fname = op.join(path_data,  subject, 'behavior_%s.pkl' % event_type)
    with open(fname, 'wb') as f:
        pickle.dump(events_behavior_type, f)
    ######################################################################
    # Decoding
    # set up a classifier based on a regularized Logistic Regression
    clf = LogisticRegression(C=1)
    # force the classifer to output a probabilistic prediction
    clf = force_predict(clf, axis=1)
    # insert a z-score normalization step before the classification
    clf = make_pipeline(StandardScaler(), clf)
    # initialize the GAT object
    gat = GeneralizationAcrossTime(clf=clf, scorer=scorer_auc, n_jobs=-1,
                                   cv=10)

    # select the trials where a target is presented
    for contrast in ['HL', 'EU', 'PR']:
        epochs_ = concatenate_epochs((epochs[contrast[0]],
                                      epochs[contrast[1]]))
        y = np.hstack((np.zeros(len(epochs[contrast[0]])),
                       np.ones(len(epochs[contrast[1]]))))
        gat.fit(epochs_, y=y)
        fname = op.join(data_path, 's%i_%s_fit.pkl' % (subject, contrast))
        with open(fname, 'wb') as f:
            pickle.dump(gat, f)
        # TODO: should save y_pred separately

        # predict + score
        scores = gat.score(epochs_, y=y)
        fname = op.join(data_path,
                        's%i_%s_scores.npy' % (subject, contrast))
        np.save(fname, np.array(scores))
        all_scores[contrast].append(np.array(scores))
        # plot
Example #14
0
                         0,
                         proj=False,
                         picks=picks,
                         baseline=None,
                         preload=True,
                         add_eeg_ref=False,
                         verbose=False)

    # Workaround to be able to concatenate epochs with MNE
    epochs_rest.times = epochs.times

    y.extend([-1] * len(epochs_rest))
    epochs_tot.append(epochs_rest)

    # Concatenate all epochs
    epochs = concatenate_epochs(epochs_tot)

    # get data
    X = epochs.get_data()
    y = np.array(y)

    # train CSP
    csp = CSP(n_components=nfilters, reg='lws')
    csp.fit(X, y)

    ################ Create Training Features #################################
    # apply csp filters and rectify signal
    feat = np.dot(csp.filters_[0:nfilters], raw._data[picks])**2

    # smoothing by convolution with a rectangle window
    feattr = np.array(
Example #15
0
 y.extend([1]*len(epochs))
 
 # epochs signal for 1.5 second after the movement, this correspond to the 
 # rest period.
 epochs_rest = Epochs(raw, events, {'after' : 1}, 0.5, 2, proj=False,
                 picks=picks, baseline=None, preload=True,
                 add_eeg_ref=False, verbose=False)
 
 # Workaround to be able to concatenate epochs with MNE
 epochs_rest.times = epochs.times
 
 y.extend([-1]*len(epochs_rest))
 epochs_tot.append(epochs_rest)
     
 # Concatenate all epochs
 epochs = concatenate_epochs(epochs_tot)
 
 # get data 
 X = epochs.get_data()
 y = np.array(y)
 
 # train CSP
 csp = CSP(n_components=nfilters, reg='lws')
 csp.fit(X,y)
 
 ################ Create Training Features #################################
 # apply csp filters and rectify signal
 feat = np.dot(csp.filters_[0:nfilters],raw._data[picks])**2
 
 # smoothing by convolution with a rectangle window    
 feattr = np.array(Parallel(n_jobs=-1)(delayed(convolve)(feat[i],boxcar(nwin),'full') for i in range(nfilters)))
Example #16
0
def run(subjectsNum, classifier, subFile):
    print 'hi as we go'

    subjects = range(1, subjectsNum)
    ids_tot = []
    pred_tot = []
    true = np.empty((0, 6))

    # design a butterworth bandpass filter
    freqs = [7, 30]
    b, a = butter(5, np.array(freqs) / 250.0, btype='bandpass')

    # CSP parameters
    # Number of spatial filter to use
    nfilters = 4

    # convolution
    # window for smoothing features
    nwin = 250

    # training subsample
    subsample = 10

    # submission file
    submission_file = subFile
    cols = [
        'HandStart', 'FirstDigitTouch', 'BothStartLoadPhase', 'LiftOff',
        'Replace', 'BothReleased'
    ]

    for subject in subjects:
        epochs_tot = []
        y = []

        ################ READ DATA ################################################
        fnames = glob(
            '/Users/camasa/Documents/University/Grad/DM_Pro/train1/subj%d_series*_data.csv'
            % (subject))

        # read and concatenate all the files
        raw = concatenate_raws(
            [creat_mne_raw_object(fname) for fname in fnames])

        # pick eeg signal
        picks = pick_types(raw.info, eeg=True)

        # Filter data for alpha frequency and beta band
        # Note that MNE implement a zero phase (filtfilt) filtering not compatible
        # with the rule of future data.
        # Here we use left filter compatible with this constraint.
        # The function parallelized for speeding up the script
        raw._data[picks] = np.array(
            Parallel(n_jobs=-1)(delayed(lfilter)(b, a, raw._data[i])
                                for i in picks))

        ################ CSP Filters training #####################################
        # get event posision corresponding to HandStart
        events = find_events(raw, stim_channel='HandStart', verbose=False)
        # epochs signal for 2 second after the event
        epochs = Epochs(raw,
                        events, {'during': 1},
                        0,
                        2,
                        proj=False,
                        picks=picks,
                        baseline=None,
                        preload=True,
                        verbose=False)

        epochs_tot.append(epochs)
        y.extend([1] * len(epochs))

        # epochs signal for 2 second before the event, this correspond to the
        # rest period.
        epochs_rest = Epochs(raw,
                             events, {'before': 1},
                             -2,
                             0,
                             proj=False,
                             picks=picks,
                             baseline=None,
                             preload=True,
                             verbose=False)

        # Workaround to be able to concatenate epochs with MNE
        epochs_rest.times = epochs.times

        y.extend([-1] * len(epochs_rest))
        epochs_tot.append(epochs_rest)

        # Concatenate all epochs
        epochs = concatenate_epochs(epochs_tot)

        # get data
        X = epochs.get_data()
        y = np.array(y)

        # train CSP
        csp = CSP(n_components=nfilters, reg='ledoit_wolf')
        csp.fit(X, y)

        ################ Create Training Features #################################
        # apply csp filters and rectify signal
        feat = np.dot(csp.filters_[0:nfilters], raw._data[picks])**2

        # smoothing by convolution with a rectangle window
        feattr = np.array(
            Parallel(n_jobs=-1)(
                delayed(convolve)(feat[i], boxcar(nwin), 'full')
                for i in range(nfilters)))
        feattr = np.log(feattr[:, 0:feat.shape[1]])

        # training labels
        # they are stored in the 6 last channels of the MNE raw object
        labels = raw._data[32:]

        ################ Create test Features #####################################
        # read test data
        fnames = glob(
            '/Users/camasa/Documents/University/Grad/DM_Pro/test1val/subj%d_series*_data.csv'
            % (subject))
        raw = concatenate_raws(
            [creat_mne_raw_object(fname) for fname in fnames])
        raw._data[picks] = np.array(
            Parallel(n_jobs=-1)(delayed(lfilter)(b, a, raw._data[i])
                                for i in picks))

        # read ids
        ids = np.concatenate(
            [np.array(pd.read_csv(fname)['id']) for fname in fnames])
        ids_tot.append(ids)

        # apply preprocessing on test data
        feat = np.dot(csp.filters_[0:nfilters], raw._data[picks])**2
        featte = np.array(
            Parallel(n_jobs=-1)(
                delayed(convolve)(feat[i], boxcar(nwin), 'full')
                for i in range(nfilters)))
        featte = np.log(featte[:, 0:feat.shape[1]])

        tru = raw._data[32:].T

        #    feattrNorm = normalize(feattr)
        #    featteNorm = normalize(featte)
        #
        #    ctr, rtr = feattr.shape
        #    ctt, rtt = featte.shape
        #    print ctr, rtr
        #    print ctt, rtt
        ############## Train classifiers ########################################
        if classifier == 'LR':
            lr = LogisticRegression()
            pred = np.empty((len(ids), 6))
            for i in range(6):
                print('LR Train subject %d, class %s' % (subject, cols[i]))
                lr.fit(feattr[:, ::subsample].T, labels[i, ::subsample])
                pred[:, i] = lr.predict_proba(featte.T)[:, 1]

#            pred_tot.append(pred)
#            true = np.r_[true,tru]
        elif classifier == 'MLP':
            mlp = MLPClassifier(solver='sgd',
                                learning_rate='adaptive',
                                hidden_layer_sizes=(1000, ),
                                learning_rate_init=0.0005,
                                max_iter=1000000,
                                shuffle=False)
            pred = np.empty((len(ids), 6))
            for i in range(6):
                print('MLP Train subject %d, class %s' % (subject, cols[i]))
                mlp.fit(feattr[:, ::subsample].T, labels[i, ::subsample])
                pred[:, i] = mlp.predict_proba(featte.T)[:, 1]


#            pred_tot.append(pred)
#            true = np.r_[true,tru]

#        elif classifier == 'SVC':
#            svr = SVC()
#            pred = np.empty((len(ids),6))
#            for i in range(6):
#                print('SVC Train subject %d, class %s' % (subject, cols[i]))
#                svr.fit(feattr[:,::subsample].T,labels[i,::subsample])
#                pred[:,i] = svr.predict(featte.T)[:,1]
        elif classifier == 'RF':
            rf = RandomForestClassifier(n_estimators=100,
                                        max_features='auto',
                                        n_jobs=-1)
            pred = np.empty((len(ids), 6))
            for i in range(6):
                print('RF Train subject %d, class %s' % (subject, cols[i]))
                rf.fit(feattr[:, ::subsample].T, labels[i, ::subsample])
                pred[:, i] = rf.predict_proba(featte.T)[:, 1]
                #score =
        elif classifier == 'SVC':
            svc = LinearSVC()
            pred = np.empty((len(ids), 6))
            for i in range(6):
                print('RF Train subject %d, class %s' % (subject, cols[i]))
                svc.fit(feattr[:, ::subsample].T, labels[i, ::subsample])
                pred[:, i] = svc.predict(featte.T)  #[:,1]

        pred_tot.append(pred)
        true = np.r_[true, tru]

    data = np.concatenate(pred_tot)
    #    score = mlp.score(true,data)
    #    print score
    #    auc = roc_auc_score(true,data)
    #    print auc
    #    fpr = dict()
    #    tpr = dict()
    #    thresholds = dict()
    #    roc_auc = dict()
    #
    #    for i in range(6):
    #        fpr, tpr, _ = roc_curve(true[:,i], data[:,i])
    #        roc_auc[i] = auc(fpr[i], tpr[i])
    #    for j in range(6):
    #        plt.figure()
    #        lw = i
    #        plt.plot(fpr[j], tpr[j], color='darkorange',
    #                 lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[j])
    #        plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
    #        plt.xlim([0.0, 1.0])
    #        plt.ylim([0.0, 1.05])
    #        plt.xlabel('False Positive Rate')
    #        plt.ylabel('True Positive Rate')
    #        plt.title('Receiver operating characteristic example')
    #        plt.legend(loc="lower right")
    #        plt.savefig('/Users/camasa/Desktop/roc%d.eps' % (j),format='eps')
    #        plt.show()

    # create pandas object for sbmission
    submission = pd.DataFrame(index=np.concatenate(ids_tot),
                              columns=cols,
                              data=np.concatenate(pred_tot))

    # write file
    submission.to_csv(submission_file, index_label='id', float_format='%.5f')

    trueLabels = pd.DataFrame(index=np.concatenate(ids_tot),
                              columns=cols,
                              data=true)
    trueLabels.to_csv('TrueLabels.csv', index_label='id', float_format='%.5f')
Example #17
0
def separateXY(epochs):
    if isinstance(epochs, list):
        epochs = concatenate_epochs(epochs)

    labels = epochs.events[:, -1]
    return (epochs.get_data() * 1000, labels)
def make_cue_epoch_tf(subject):
    """Create cue epochs during WM task """
    fname_raw = op.join(path_data, subject)
    # Read behavioral file
    fname_bhv = list()
    files = os.listdir(op.join(path_data, subject, 'behavdata'))
    fname_bhv.extend(([
        op.join(fname_raw + '/behavdata/') + f for f in files if 'WorkMem' in f
    ]))
    for fname_behavior in fname_bhv:
        events_behavior = get_events_from_mat(fname_behavior)
    # Read raw MEG data and extract event triggers
    runs = list()
    files = os.listdir(fname_raw)
    runs.extend(([op.join(fname_raw + '/') + f for f in files if '.ds' in f]))
    events_meg = list()
    for run_number, this_run in enumerate(runs):
        fname_raw = op.join(path_data, subject, this_run)
        print(fname_raw)
        raw = read_raw_ctf(fname_raw, preload=True, system_clock='ignore')
        channel_trigger = np.where(np.array(raw.ch_names) == 'USPT001')[0][0]
        # replace 255 values with 0
        trigger_baseline = np.where(raw._data[channel_trigger, :] == 255)[0]
        raw._data[channel_trigger, trigger_baseline] = 0.
        # find triggers
        events_meg_ = mne.find_events(raw)
        # Add 48ms to the trigger events (according to delay with photodiod)
        events_meg_ = np.array(events_meg_, float)
        events_meg_[:, 0] += round(.048 * raw.info['sfreq'])
        events_meg_ = np.array(events_meg_, int)
        # to keep the run from which the event was found
        events_meg_[:, 1] = run_number
        events_meg.append(events_meg_)
    # concatenate all meg events
    events_meg = np.vstack(events_meg)
    # add trigger index to meg_events
    events_target = range(1, 126)
    events_cue = range(126, 130)
    events_probe = range(130, 141)
    triggidx_array = []
    for trigg in events_meg[:, 2]:
        if trigg in events_target:
            triggidx = 1
        elif trigg in events_cue:
            triggidx = 2
        elif trigg in events_probe:
            triggidx = 3
        triggidx_array.append(triggidx)
    events_meg = np.insert(events_meg, 3, triggidx_array, axis=1)
    # Compare MEG and bhv triggers and save events_behavior for each event
    event_type = 'Cue'
    events_behavior_type = []
    events_behavior_type = fix_triggers(events_meg,
                                        events_behavior,
                                        event_type='trigg' + event_type)
    epochs_list = list()
    # Read raw MEG, filter and epochs
    for run_number, this_run in enumerate(runs):
        fname_raw = op.join(path_data, subject, this_run)
        print(fname_raw)
        raw = read_raw_ctf(fname_raw, preload=True, system_clock='ignore')
        events_meg_run = make_events_run(events_behavior_type, run_number)
        event_id = {
            'ttl_%i' % ii: ii
            for ii in np.unique(events_meg_run[:, 2])
        }
        tmin = -.200
        tmax = 1.500
        epochs = Epochs(raw,
                        events_meg_run,
                        event_id=event_id,
                        tmin=tmin,
                        tmax=tmax,
                        preload=True,
                        baseline=None,
                        decim=10)
        # Copy dev_head_t of the first run to others run
        if run_number == 0:
            dev_head_t = epochs.info['dev_head_t']
        else:
            epochs.info['dev_head_t'] = dev_head_t
        # Apply baseline from beginning of epoch to t0
        epochs.apply_baseline((-0.2, 0.))
        epochs_list.append(epochs)
    epochs = concatenate_epochs(epochs_list)
    epochs.pick_types(meg=True, ref_meg=False)
    events = events_behavior_type
    events = complete_behavior(events)
    return epochs, events
Example #19
0
                                         event_id=event_id_bsl,
                                         tmin=-.200,
                                         tmax=0.,
                                         preload=True,
                                         baseline=None,
                                         decim=10)
                # Apply baseline of Target
                bsl_channels = pick_types(epochs.info, meg=True)
                bsl_data = epochs_baseline.get_data()[:, bsl_channels, :]
                bsl_data = np.mean(bsl_data, axis=2)
                epochs._data[:, bsl_channels, :] -= bsl_data[:, :, np.newaxis]
            else:
                # Apply baseline from beginning of epoch to t0
                epochs.apply_baseline((-0.2, 0.))
            epochs_list.append(epochs)
        epochs = concatenate_epochs(epochs_list)
        # Save epochs and hdf5 behavior
        suffix = '' if target_baseline else '_bsl'
        session = '_2' if subject[-1:] == '2' else '_1'
        fname = op.join(path_data, subject,
                        'behavior_%s%s.hdf5' % (event_type, session))
        write_hdf5(fname, events_behavior_type, overwrite=True)
        fname = op.join(path_data, subject,
                        'epochs_%s%s%s.fif' % (event_type, suffix, session))
        epochs.save(fname)

# concatenate the two sessions when 2nd one is existing
subject = sys.argv[1]
suffix = '' if target_baseline else '_bsl'
for event_type in event_types:
    subject_2 = subject + '_2'