Beispiel #1
0
def get_epoch(epoch, recording):
    filenames = get_filenames(epoch, recording)
    epochs = preprocessing.load_epochs([str(f) for f in filenames])
    epochs = preprocessing.concatenate_epochs(epochs, None)
    epochs = epochs.pick_channels(
        [x for x in epochs.ch_names if x.startswith("M")])
    epochs._data -= get_baseline(recording)
    data_cov = get_cov(epochs, tmin=0, tmax=2)
    return data_cov, epochs
Beispiel #2
0
def get_baseline(recording):
    filenames = get_filenames("stimulus", recording)
    print(filenames)
    epochs = preprocessing.load_epochs([str(f) for f in filenames])
    epochs = preprocessing.concatenate_epochs(epochs, None)
    epochs = epochs.pick_channels(
        [x for x in epochs.ch_names if x.startswith("M")])
    id_time = (-0.5 <= epochs.times) & (epochs.times <= -0.250)
    return epochs._data[:, :, id_time].mean(-1)[:, :, np.newaxis]
def get_response_epoch(subject, session, recording):
    stimulus = glob(get_filenames(subject, session, recording)[0])
    response = glob(get_filenames(subject, session, recording)[1])

    stimulus = preprocessing.load_epochs(stimulus)
    stimulus = preprocessing.concatenate_epochs(stimulus, None)
    stimulus = stimulus.pick_channels(
        [x for x in stimulus.ch_names if x.startswith('M')])
    response = preprocessing.load_epochs(response)
    response = preprocessing.concatenate_epochs(response, None)
    response = stimulus.pick_channels(
        [x for x in response.ch_names if x.startswith('M')])

    id_time = (-0.25 <= stimulus.times) & (stimulus.times <= 0)
    means = stimulus._data[:, :, id_time].mean(-1)
    stimulus._data = stimulus._data - means[:, :, np.newaxis]
    response._data = response._data - means[:, :, np.newaxis]
    data_cov = lcmv.get_cov(stimulus, tmin=0, tmax=3)
    return data_cov, response
def get_stim_epoch(subject, session, recording):
    filenames = glob(get_filenames(subject, session, recording)[0])
    epochs = preprocessing.load_epochs(filenames)
    epochs = preprocessing.concatenate_epochs(epochs, None)
    epochs = epochs.pick_channels(
        [x for x in epochs.ch_names if x.startswith('M')])

    id_time = (-0.25 <= epochs.times) & (epochs.times <= 0)
    means = epochs._data[:, :, id_time].mean(-1)
    epochs._data -= means[:, :, np.newaxis]
    data_cov = lcmv.get_cov(epochs, tmin=0, tmax=3)
    return data_cov, epochs, filenames
def get_ref_head_pos(subject, session, trans, N=-1):
    from mne.transforms import apply_trans
    data = metadata.get_epoch_filename(subject, session, 0, 'stimulus', 'fif')
    data = pymegprepr.load_epochs([data])[0]
    cc = head_loc(data.decimate(10))
    nasion = np.stack([c[0] for c in cc[:N]]).mean(0)
    lpa = np.stack([c[1] for c in cc[:N]]).mean(0)
    rpa = np.stack([c[2] for c in cc[:N]]).mean(0)
    nasion, lpa, rpa = nasion.mean(-1), lpa.mean(-1), rpa.mean(-1)

    return {
        'nasion': apply_trans(trans['t_ctf_dev_dev'], np.array(nasion)),
        'lpa': apply_trans(trans['t_ctf_dev_dev'], np.array(lpa)),
        'rpa': apply_trans(trans['t_ctf_dev_dev'], np.array(rpa))
    }
def get_epochs_for_subject(snum, epoch, sessions=None):
    from itertools import product

    if sessions is None:
        sessions = list(range(4))
    data = []
    meta = get_meta_for_subject(snum, epoch, sessions=sessions)
    for session, block in product(ensure_iter(sessions), list(range(5))):
        filename = metadata.get_epoch_filename(snum, session, block, epoch,
                                               'fif')
        if os.path.isfile(filename):
            data.append(filename)
    #assert len(data) == len(list(ensure_iter(sessions)))*5
    data = pymegprepr.load_epochs(data)
    event_ids = reduce(lambda x, y: x + y,
                       [list(d.event_id.values()) for d in data])
    meta = meta.reset_index().set_index('hash')
    meta = meta.loc[event_ids, :]
    assert len(meta) == sum([d._data.shape[0] for d in data])
    return pymegprepr.concatenate_epochs(data, [meta])
def extract_reconstruct_tfr_block(subject,
                                  session,
                                  block,
                                  epoch,
                                  signal_type='BB',
                                  BEM='three_layer',
                                  debug=False,
                                  chunks=50,
                                  njobs=4):

    #check if the block exists and recording
    subject_int = int(subject[1:])
    fname = path + '/filenames_sub%i.pickle' % (subject_int)
    f = open(fname, 'rb')
    data = pickle.load(f)
    df = pd.DataFrame.from_dict(data)
    blocks = np.array(
        df[df.subject == subject_int][df.session == session].block)
    if block in blocks:
        recording = df[df.subject == subject_int][df.session == session][
            df.block == block].trans_matrix.iloc[0]
    else:
        print('block does not exist')
        sys.exit(0)

    if epoch == 'stimulus':
        fname = get_filenames_block(subject, session, block, recording)[0][0]
    else:
        fname = get_filenames_block(subject, session, block, recording)[1][0]

    # fname_aux='filter_sub%i_SESS%i_recording%i_epoch%s.pickle'%( subject_int,session,recording,epoch)
    # filename = join(path+'/extra', fname_aux)
    # f=open(filename,'rb')
    # filters=pickle.load(f)
    # f.close()

    filters = extract_filter(subject,
                             session_int,
                             recording,
                             epoch,
                             signal_type=signal_type)
    fois_h = np.arange(36, 162, 4)
    fois_l = np.arange(2, 36, 1)
    tfr_params = {
        'HF': {
            'foi': fois_h,
            'cycles': fois_h * 0.25,
            'time_bandwidth': 2 + 1,
            'n_jobs': njobs,
            'est_val': fois_h,
            'est_key': 'HF',
            'sf': 600,
            'decim': 10
        },
        'LF': {
            'foi': fois_l,
            'cycles': fois_l * 0.4,
            'time_bandwidth': 1 + 1,
            'n_jobs': njobs,
            'est_val': fois_l,
            'est_key': 'LF',
            'sf': 600,
            'decim': 10
        }
    }

    print(fname)
    print('loading data')
    epochs = preprocessing.load_epochs([fname])
    print('concataneiting data')
    epochs = preprocessing.concatenate_epochs(epochs, None)
    print('Picking pick_channels')
    epochs = epochs.pick_channels(
        [x for x in epochs.ch_names if x.startswith('M')])

    events = epochs.events[:, 2]

    for i in range(0, len(events), chunks):
        print('chunk:', i)
        filename = lcmvfilename_block(subject,
                                      session,
                                      block,
                                      signal_type,
                                      recording,
                                      epoch,
                                      chunk=i)
        if os.path.isfile(filename):
            continue
        if signal_type == 'BB':
            logging.info('Starting reconstruction of BB signal')
            M = lcmv.reconstruct_broadband(filters,
                                           epochs.info,
                                           epochs._data[i:i + chunks],
                                           events[i:i + chunks],
                                           epochs.times,
                                           njobs=1)
        else:
            logging.info('Starting reconstruction of TFR signal')
            M = lcmv.reconstruct_tfr(filters,
                                     epochs.info,
                                     epochs._data[i:i + chunks],
                                     events[i:i + chunks],
                                     epochs.times,
                                     est_args=tfr_params[signal_type],
                                     njobs=1)
        M.to_hdf(filename, 'epochs')
        del M
    del epochs
    print('done')
def extract_reconstruct_tfr(subject,
                            session,
                            recording,
                            epoch,
                            signal_type='BB',
                            BEM='three_layer',
                            debug=False,
                            chunks=50,
                            njobs=4):

    if epoch == 'stimulus':
        filenames = glob(get_filenames(subject, session, recording)[0])
    else:
        filenames = glob(get_filenames(subject, session, recording)[1])

    subject_int = int(subject[1:])
    fname = 'filter_sub%i_SESS%i_recording%i_epoch%s.pickle' % (
        subject_int, session, recording, epoch)
    filename = join(path + '/extra', fname)
    f = open(filename, 'rb')
    filters = pickle.load(f)
    f.close()
    fois_h = np.arange(36, 162, 4)
    fois_l = np.arange(2, 36, 1)
    tfr_params = {
        'HF': {
            'foi': fois_h,
            'cycles': fois_h * 0.25,
            'time_bandwidth': 2 + 1,
            'n_jobs': njobs,
            'est_val': fois_h,
            'est_key': 'HF',
            'sf': 600,
            'decim': 10
        },
        'LF': {
            'foi': fois_l,
            'cycles': fois_l * 0.4,
            'time_bandwidth': 1 + 1,
            'n_jobs': njobs,
            'est_val': fois_l,
            'est_key': 'LF',
            'sf': 600,
            'decim': 10
        }
    }

    print('filters done')
    for ifname, fname in enumerate(filenames):
        print(fname)
        epochs = preprocessing.load_epochs([fname])
        epochs = preprocessing.concatenate_epochs(epochs, None)
        epochs = epochs.pick_channels(
            [x for x in epochs.ch_names if x.startswith('M')])
        events = epochs.events[:, 2]

        for i in range(0, len(events), chunks):
            print('chunk:', i)
            filename = lcmvfilename(subject,
                                    session,
                                    signal_type,
                                    recording,
                                    epoch + str(ifname),
                                    chunk=i)
            if os.path.isfile(filename):
                continue
            if signal_type == 'BB':
                logging.info('Starting reconstruction of BB signal')
                M = lcmv.reconstruct_broadband(filters,
                                               epochs.info,
                                               epochs._data[i:i + chunks],
                                               events[i:i + chunks],
                                               epochs.times,
                                               njobs=1)
            else:
                logging.info('Starting reconstruction of TFR signal')
                M = lcmv.reconstruct_tfr(filters,
                                         epochs.info,
                                         epochs._data[i:i + chunks],
                                         events[i:i + chunks],
                                         epochs.times,
                                         est_args=tfr_params[signal_type],
                                         njobs=1)
            M.to_hdf(filename, 'epochs')
            del M
        del epochs
        print('done')