Exemplo n.º 1
0
def preprocess_raw(recording):
    raw = mne.io.ctf.read_raw_ctf(
        join(inpath, recording.filename))
    blocks = get_blocks(raw)
    min_start, max_end = np.min(raw.times), np.max(raw.times)

    for i, block in blocks.items():
        # Cut into blocks        

        print('Notch filtering')
        midx = np.where([x.startswith('M') for x in r.ch_names])[0]
        r.load_data()
        r.notch_filter(np.arange(50, 251, 50), picks=midx)        

        # Get unique trial indices
        index = get_hash(recording.subject, 
          recording.session, 
          recording.block[i], 
          np.arange(1, len(block_meta)+1))
        block_meta.loc[:, 'trial'] = index
        
        def uniquify(x):
          data = []
          for i in x.values:
            try:
              data.append(i[0])
            except (IndexError,TypeError,) as e:
              data.append(i)
          return data

        # Sometimes subjects press twice, keep only first event
        block_meta.loc[:, 'response'] = uniquify(block_meta.loc[:, 'response'])
        block_timing.loc[:, 'response_time'] = uniquify(block_timing.loc[:, 'response_time'])
        block_meta.loc[:, 'confidence'] = uniquify(block_meta.loc[:, 'confidence'])
        block_timing.loc[:, 'confidence_time'] = uniquify(block_timing.loc[:, 'confidence_time'])
        # Cut into epochs
        for epoch, event, (tmin, tmax), (rmin, rmax) in zip(
                ['stimulus', 'response', 'confidence'],
                ['coherence_on_time', 'response_time', 'confidence_time'],
                [(-1, 2.5), (-1.5, 1.5), (-1.5, 1.5)],
                [(-0.5, 1.5), (-.75, 0.75), (-1, 0.5)]):

            m, s = get_epoch(r, block_meta, block_timing,
                             event=event, epoch_time=(
                                 tmin, tmax),
                             reject_time=(rmin, rmax),
                             epoch_label='trial'
                             )
            artdef['drop_log'] = s.drop_log
            artdef['drop_log_stats'] = s.drop_log_stats()
            if len(s) <= 0:
                continue
            epofname, mfname, afname = filenames(recording.subject, 
              epoch, recording.session, recording.block[i])
            s = s.resample(600, npad='auto')
            s.save(epofname)
            m.to_hdf(mfname, 'meta')
            pickle.dump(artdef, open(afname, 'wb'), protocol=2)
def one_block(snum, session, block_in_raw, block_in_experiment):
    '''
    Preprocess a single block and save results.

    Parameters
    ----------
        snum, session : int
    Subject number and session number
        raw : mne.io.Raw object
    Raw data for an entire session of a subject.
        block_in_raw, block_in_experiment : int

    Each succesfull session consists out of five blocks, yet a sessions MEG
    data file sometimes contains more. This happens, for example, when a block
    is restarted. 'block_in_raw' refers to the actual block in a raw file, and
    block_in_experiment to the block in the metadata that block_in_raw should
    be mapped to. block_in_experiment will be used for saving.
    '''

    try:

        art_fname = metadata.get_epoch_filename(snum, session,
                                                block_in_experiment, None,
                                                'artifacts')

        data = empirical.load_data()
        data = empirical.data_cleanup(data)

        filename = metadata.get_raw_filename(snum, session)
        raw = mne.io.read_raw_ctf(filename, system_clock='ignore')
        trials = blocks(raw)
        if not (block_in_raw in np.unique(trials['block'])):
            err_msg = 'Error when processing %i, %i, %i, %i, data file = %s' % (
                snum, session, block_in_raw, block_in_experiment, filename)
            raise RuntimeError(err_msg)

        # Load data and preprocess it.
        logging.info('Loading block of data: %s; block: %i' %
                     (filename, block_in_experiment))
        r, r_id = load_block(raw, trials, block_in_raw)
        r_id['filname'] = filename
        print(('Working on:', filename, block_in_experiment, block_in_raw))
        logging.info('Starting artifact detection')

        r, ants, artdefs = pymegprepr.preprocess_block(r)
        #r.annotations = r
        print('Notch filtering')
        midx = np.where([x.startswith('M') for x in r.ch_names])[0]
        r.notch_filter(np.arange(50, 251, 50), picks=midx)
        logging.info('Aligning meta data')
        meta, timing = get_meta(data, r, snum, block_in_experiment, filename)
        idx = np.isnan(meta.response.values)
        meta = meta.loc[~idx, :]
        timing = timing.loc[~idx, :]
        artdefs['id'] = r_id
        filenames = []
        for epoch, event, (tmin, tmax), (rmin, rmax) in zip(
            ['stimulus', 'response', 'feedback'],
            ['stim_onset_t', 'button_t', 'meg_feedback_t'],
            [(-.75, 1.5), (-1.5, 1), (-1, 1)], [(0, 1), (-1, 0.5),
                                                (-0.5, 0.5)]):

            logging.info('Processing epoch: %s' % epoch)
            try:
                m, s = pymegprepr.get_epoch(
                    r,
                    meta,
                    timing,
                    event=event,
                    epoch_time=(tmin, tmax),
                    reject_time=(rmin, rmax),
                )
            except RuntimeError as e:
                print(e)
                continue

            if len(s) > 0:
                epo_fname = metadata.get_epoch_filename(
                    snum, session, block_in_experiment, epoch, 'fif')
                epo_metaname = metadata.get_epoch_filename(
                    snum, session, block_in_experiment, epoch, 'meta')
                s = s.resample(600, npad='auto')
                s.save(epo_fname)
                m.to_hdf(epo_metaname, 'meta')
                r_id[epoch] = len(s)
                filenames.append(epo_fname)
        pickle.dump(artdefs, open(art_fname, 'wb'), protocol=2)

    except MemoryError:
        print((snum, session, block_in_raw, block_in_experiment))
        raise RuntimeError('MemoryError caught in one block ' + str(snum) +
                           ' ' + str(session) + ' ' + str(block_in_raw) + ' ' +
                           str(block_in_experiment))
    return 'Finished', snum, session, block_in_experiment, filenames
Exemplo n.º 3
0
def preprocess(subject, session):
    columns_meta = [
        u"baseline_start",
        u"decision_start",
        u"dot_onset",
        u"feedback",
        u"noise",
        u"response",
        u"rest_delay",
        u"trial_end",
        u"trial_num",
        u"trial_start",
        u"wait_fix",
        u"session_number",
        u"block_start",
    ]

    columns_timing = [
        "baseline_start_time",
        "decision_start_time",
        "dot_onset_time",
        "feedback_time",
        "noise_time",
        "response_time",
        "rest_delay_time",
        "trial_end_time",
        "trial_start_time",
        "wait_fix_time",
    ]

    path = "/mnt/homes/home024/gortega/megdata/"
    path_cluster = "/home/gortega/preprocessed_megdata/sensor_space"
    path_megdata = "/home/gortega/megdata/"

    for file_idx, filename in enumerate(
            glob.glob(
                os.path.join(path_megdata,
                             "*S%i-%i_Att*" % (subject, session)))):
        date = filename[-14:-6]
        raw = mne.io.read_raw_ctf(filename)
        #raw._first_samps = np.cumsum(raw._raw_lengths) - raw._raw_lengths[0]
        #raw._last_samps = np.cumsum(raw._last_samps)

        ##pins and mapping
        other_pins = {100: "session_number", 101: "block_start"}
        trial_pins = {150: "trial_num"}
        mapping = {
            ("noise", 0): 111,
            ("noise", 1): 112,
            ("noise", 2): 113,
            ("start_button", 0): 89,
            ("start_button", 1): 91,
            ("trial_start", 0): 150,
            ("trial_end", 0): 151,
            ("wait_fix", 0): 30,
            ("baseline_start", 0): 40,
            ("dot_onset", 0): 50,
            ("decision_start", 0): 60,
            ("response", -1): 61,
            ("response", 1): 62,
            ("no_decisions", 0): 68,
            ("feedback", 0): 70,
            ("rest_delay", 0): 80,
        }
        mapping = dict((v, k) for k, v in mapping.items())

        # I get metadata and timing of the raw data.
        meta, timing = preprocessing.get_meta(raw, mapping, trial_pins, 150,
                                              151, other_pins)
        index = meta.block_start
        block_times, block_idx = get_blocks(meta, timing)

        for bnum in block_times.keys():
            print(
                "******************************** SESSION",
                session,
                "BLOCK",
                bnum,
                "******************************** ",
            )

            mb2 = meta.loc[block_idx[bnum][0]:block_idx[bnum][1]]
            tb2 = timing.loc[block_idx[bnum][0]:block_idx[bnum][1]]

            tb2 = tb2.dropna(subset=["dot_onset_time"])
            mb2 = mb2.dropna(subset=["dot_onset"])
            index = []
            for idx in tb2.index:
                try:
                    if len(tb2.loc[idx, "dot_onset_time"]) == 10:
                        index.append(idx)
                        tb2.loc[idx, "first_dot_time"] = tb2.loc[
                            idx, "dot_onset_time"][0]
                        mb2.loc[idx, "first_dot"] = mb2.loc[idx,
                                                            "dot_onset"][0]
                except TypeError:
                    pass
            tb2 = tb2.loc[index]
            mb2 = mb2.loc[index]

            r = raw.copy()  # created a copy to do not change raw
            r.crop(
                tmin=block_times[bnum][0] /
                1200,  #[tb2.trial_start_time.min() / 1200.0 - 1,
                tmax=block_times[bnum][1] /
                1200  #1 + (tb2.feedback_time.max() / 1200.0),
            )  # crop for each block
            r = interpolate_bad_channels(subject, session, r)
            mb, tb = meg.preprocessing.get_meta(
                r, mapping, trial_pins, 150, 151,
                other_pins)  # get new metadata for each block

            mb = eliminate_spurious_columns(
                mb, columns_meta)  # sometime we need to drop some columns
            tb = eliminate_spurious_columns(tb, columns_timing)
            tb = tb.dropna()
            mb = mb.dropna()
            r, ants, artdef = meg.preprocessing.preprocess_block(
                r,
                blinks=True)  # preprocess of each block looking for artifacts

            print("Notch filtering")
            midx = np.where([x.startswith("M") for x in r.ch_names])[0]
            r.load_data()
            r.notch_filter(np.arange(50, 251, 50), picks=midx)

            bad_channels = r.info["bads"]
            if len(r.info["bads"]) > 0:
                r.load_data()
                r.interpolate_bads(reset_bads=False)
                r.info["bads"] = []

            trial_repeat = []
            mb.loc[:, "hash"] = hash(subject, mb.session_number, bnum,
                                     mb.trial_num)

            # Create a colum for onset of first dot
            tb.loc[:, 'first_dot_time'] = np.array(
                [x[0] for x in tb.dot_onset_time])
            stimmeta, stimlock = preprocessing.get_epoch(
                r,
                mb.dropna(),
                tb.dropna(),
                event="first_dot_time",
                epoch_time=(-1, 3.5),
                epoch_label="hash",
                reject_time=(0, 2),
            )

            if len(stimmeta) > 0:
                stim_filename = join(
                    path_cluster,
                    "down_sample_stim_meta_sub%i_sess%i_block%i_offset%i" %
                    (subject, session, bnum, file_idx),
                )
                stimlock.resample(600, npad="auto")
                stimmeta.to_hdf(stim_filename + ".hdf", "meta")
                stimlock.save(stim_filename + "-epo.fif.gz")

            rlmeta, resplock = preprocessing.get_epoch(
                r,
                mb.dropna(),
                tb.dropna(),
                event="response_time",
                epoch_time=(-2.5, 0.5),
                epoch_label="hash",
                reject_time=(-1, 0.4),
            )
            if len(rlmeta) > 0:
                resp_filename = join(
                    path_cluster,
                    "down_sample_resp_meta_sub%i_sess%i_block%i_offset%i" %
                    (subject, session, bnum, file_idx),
                )
                resplock.resample(600, npad="auto")
                rlmeta.to_hdf(resp_filename + ".hdf", "meta")
                resplock.save(resp_filename + "-epo.fif.gz")

    print("done")
Exemplo n.º 4
0
            mb.loc[:, 'hash'] = 1
            for j in mb.index:
                mb.loc[j, 'hash'], aux = hash4([
                    subject,
                    int(mb.session_number[j]),
                    int(bnum),
                    int(mb.trial_num[j]),
                    int(date)
                ])
                trial_repeat.append(aux)
            #Now i computed the epochs: 1epoch=trial

            stimmeta, stimlock = preprocessing.get_epoch(
                r,
                mb.dropna(),
                tb.dropna(),
                event='first_dot_time',
                epoch_time=(-1, 3.5),
                epoch_label='hash',
                reject_time=(0, 2))
            session_number = int(mb.session_number[mb.index[0]])
            block_number = int(mb.block_start[mb.index[0]])
            if len(stimmeta) > 0:

                stim_filename = join(
                    path_cluster,
                    'down_sample_stim_meta_sub%i_sess%i_block%i_offset%i' %
                    (subject, session_number, block_number, file_idx))
                stimlock.resample(600, npad="auto")
                stimmeta.to_hdf(stim_filename + '.hdf', 'meta')
                stimlock.save(stim_filename + '-epo.fif.gz')