Exemplo n.º 1
0
def load_VTC_data(BIDS_PATH, LOGS_DIR, SUBJ_LIST, BLOCS_LIST):
    VTC_alldata = []
    for subj in SUBJ_LIST:
        all_subj = []  ## all the data of one subject
        for run in BLOCS_LIST:
            # get events from epochs file
            epo_path, epo_filename = get_SAflow_bids(
                BIDS_PATH, subj, run, "epo", cond=None
            )
            events_epoched = mne.read_events(
                epo_filename, verbose=False
            )  # get events from the epochs file (so no resp event)
            # get events from original file (only 599 events)
            events_fname, events_fpath = get_SAflow_bids(
                BIDS_PATH, subj, run, stage="preproc_raw", cond=None
            )
            raw = read_raw_fif(
                events_fpath, preload=False, verbose=False
            )  # , min_duration=2/epochs.info['sfreq'])
            all_events = mne.find_events(
                raw, min_duration=2 / raw.info["sfreq"], verbose=False
            )
            stim_idx = []
            for i in range(len(all_events)):
                if all_events[i, 2] in [21, 31]:
                    stim_idx.append(i)
            all_events = all_events[stim_idx]
            # compute VTC for all trials
            log_file = find_logfile(subj, run, os.listdir(LOGS_DIR))
            VTC, INbounds, OUTbounds, INzone, OUTzone = get_VTC_from_file(
                LOGS_DIR + log_file, lobound=None, hibound=None
            )
            epochs_VTC = []
            for event_time in events_epoched[:, 0]:
                idx = list(all_events[:, 0]).index(event_time)
                epochs_VTC.append(VTC[idx])
            all_subj.append(np.array(epochs_VTC))
        VTC_alldata.append(all_subj)
    return VTC_alldata
Exemplo n.º 2
0
def annotate_precursor_events(BIDS_PATH, subj, bloc):

    _, epopath = get_SAflow_bids(BIDS_PATH,
                                 subj,
                                 bloc,
                                 stage="-epo4001200",
                                 cond=None)
    epochs = mne.read_epochs(epopath)
    # find events
    events = epochs.events
    files_list = os.listdir(LOGS_DIR)
    logfile = LOGS_DIR + find_logfile(subj, bloc, files_list)
    (
        IN_idx,
        OUT_idx,
        VTC_raw,
        VTC_filtered,
        IN_mask,
        OUT_mask,
        performance_dict,
        df_response,
        RT_to_VTC,
    ) = get_VTC_from_file(subj, bloc, files_list, inout_bounds=[50, 50])
    events = annotate_events(logfile, events, inout_idx=[IN_idx, OUT_idx])
    event_id = get_present_events(events)

    events_precursor = []
    for idx, ev in enumerate(events):
        if ev[2] == 310:
            if events[idx - 1][2] == 2111 or events[idx - 1][2] == 2110:
                events_precursor.append(events[idx - 1])
    events_precursor = np.array(events_precursor)

    epochs.events = events_precursor
    epochs.event_id = event_id
    return epochs
Exemplo n.º 3
0
def split_PSD_data(
    BIDS_PATH,
    SUBJ_LIST,
    BLOCS_LIST,
    by="VTC",
    lobound=None,
    hibound=None,
    stage="PSD",
    filt_order=3,
    filt_cutoff=0.1,
):
    """
    This func splits the PSD data into two conditions. It returns a list of 2 (cond1 and cond2), each containing a list of n_subject matrices of shape n_freqs X n_channels X n_trials
    """
    PSD_alldata = load_PSD_data(
        BIDS_PATH, SUBJ_LIST, BLOCS_LIST, time_avg=True, stage=stage
    )
    PSD_cond1 = []
    PSD_cond2 = []
    for subj_idx, subj in enumerate(SUBJ_LIST):
        subj_cond1 = []
        subj_cond2 = []
        for bloc_idx, bloc in enumerate(BLOCS_LIST):
            print("Splitting sub-{}_run-{}".format(subj, bloc))

            # Obtain indices of the two conditions
            if by == "VTC":
                INidx, OUTidx, VTC_epochs, idx_trimmed = get_VTC_epochs(
                    LOGS_DIR,
                    subj,
                    bloc,
                    lobound=lobound,
                    hibound=hibound,
                    save_epochs=False,
                    filt_order=filt_order,
                    filt_cutoff=filt_cutoff,
                )
                cond1_idx = INidx
                cond2_idx = OUTidx
            if by == "odd":
                # Get indices of freq and rare events
                ev_fname, ev_fpath = get_SAflow_bids(BIDS_PATH, subj, bloc, stage="epo")
                events_artrej = mne.read_events(ev_fpath)
                log_file = LOGS_DIR + find_logfile(subj, bloc, os.listdir(LOGS_DIR))
                events_fname, events_fpath = get_SAflow_bids(
                    BIDS_PATH, subj, bloc, stage="preproc_raw", cond=None
                )
                raw = read_raw_fif(
                    events_fpath, preload=False, verbose=False
                )  # , min_duration=2/epochs.info['sfreq'])
                try:
                    events = mne.find_events(
                        raw, min_duration=1 / raw.info["sfreq"], verbose=False
                    )
                except ValueError:
                    events = mne.find_events(
                        raw, min_duration=2 / raw.info["sfreq"], verbose=False
                    )

                events_noerr, events_comerr, events_omerr = remove_errors(
                    log_file, events
                )
                events_trimmed, idx_trimmed = trim_events(events_noerr, events_artrej)
                cond1_idx = []
                cond2_idx = []
                for idx, ev in enumerate(events_trimmed):
                    if ev[2] == 21:  # Frequent events
                        cond1_idx.append(idx)
                    if ev[2] == 31:
                        cond2_idx.append(idx)
                cond1_idx = np.array(cond1_idx)
                cond2_idx = np.array(cond2_idx)
                # Add this to keep the same number of trials in both conditions
                random.seed(0)
                cond1_idx = random.choices(cond1_idx, k=len(cond2_idx))
                print(
                    "N trials retained for each condition : {}".format(len(cond2_idx))
                )
            # Pick the data of each condition
            if bloc_idx == 0:  # if first bloc, init ndarray size using the first matrix
                subj_cond1 = PSD_alldata[subj_idx][bloc_idx][:, :, cond1_idx]
                subj_cond2 = PSD_alldata[subj_idx][bloc_idx][:, :, cond2_idx]
            else:  # if not first bloc, just concatenate along the trials dimension
                subj_cond1 = np.concatenate(
                    (subj_cond1, PSD_alldata[subj_idx][bloc_idx][:, :, cond1_idx]),
                    axis=2,
                )
                subj_cond2 = np.concatenate(
                    (subj_cond2, PSD_alldata[subj_idx][bloc_idx][:, :, cond2_idx]),
                    axis=2,
                )
        PSD_cond1.append(subj_cond1)
        PSD_cond2.append(subj_cond2)
    splitted_PSD = [PSD_cond1, PSD_cond2]
    return splitted_PSD
Exemplo n.º 4
0
def get_VTC_epochs(
    BIDS_PATH,
    LOGS_DIR,
    subj,
    run,
    stage="-epo",
    lobound=None,
    hibound=None,
    save_epochs=False,
    filt_order=3,
    filt_cutoff=0.1,
):
    """
    This functions allows to use the logfile to split the epochs obtained in the epo.fif file.
    It works by comparing the timestamps of IN and OUT events to the timestamps in the epo file events
    It returns IN and OUT indices that are to be used in the split_PSD_data function

    """
    ### Get events after artifact rejection have been performed
    epo_path, epo_filename = get_SAflow_bids(
        BIDS_PATH, subj, run, stage=stage, cond=None
    )
    events_artrej = mne.read_events(
        epo_filename, verbose=False
    )  # get events from the epochs file (so no resp event)

    ### Find logfile to extract VTC
    behav_list = os.listdir(LOGS_DIR)
    log_file = LOGS_DIR + find_logfile(subj, run, behav_list)

    (
        INidx,
        OUTidx,
        VTC_raw,
        VTC_filtered,
        IN_mask,
        OUT_mask,
        performance_dict,
        df_response_out,
    ) = get_VTC_from_file(
        subj, run, behav_list, inout_bounds=[lobound, hibound], filt_cutoff=filt_cutoff
    )

    ### Get original events and split them using the VTC
    events_fname, events_fpath = get_SAflow_bids(
        BIDS_PATH, subj, run, stage="preproc_raw", cond=None
    )
    raw = read_raw_fif(
        events_fpath, preload=False, verbose=False
    )  # , min_duration=2/epochs.info['sfreq'])
    try:
        events = mne.find_events(raw, min_duration=1 / raw.info["sfreq"], verbose=False)
    except ValueError:
        events = mne.find_events(raw, min_duration=2 / raw.info["sfreq"], verbose=False)

    (
        events_noerr,
        events_comerr,
        events_omerr,
        events_comcorr,
        events_omcorr,
    ) = remove_errors(log_file, events)
    # Keep only events that are correct and clean
    events_trimmed, idx_trimmed = trim_events(events_comcorr, events_artrej)
    # Write INidx and OUTidx as indices of clean events
    INidx, OUTidx = trim_INOUT_idx(INidx, OUTidx, events_trimmed, events)

    VTC_epo = np.array([VTC_raw[idx] for idx in idx_trimmed])

    return INidx, OUTidx, VTC_epo, idx_trimmed
Exemplo n.º 5
0
def get_odd_epochs(BIDS_PATH, LOGS_DIR, subj, bloc, stage="-epo"):
    """
    Returns an array of indices of Freqs and Rares epochs. Retains only clean epochs.
    """
    ### Get events after artifact rejection have been performed
    epo_path, epo_filename = get_SAflow_bids(
        BIDS_PATH, subj, bloc, stage=stage, cond=None
    )
    events_artrej = mne.read_events(
        epo_filename, verbose=False
    )  # get events from the epochs file (so no resp event)

    ### Get original events from the raw file, to compare them to the events left in the epochs file
    events_fname, events_fpath = get_SAflow_bids(
        BIDS_PATH, subj, bloc, stage="preproc_raw", cond=None
    )
    raw = read_raw_fif(
        events_fpath, preload=False, verbose=False
    )  # , min_duration=2/epochs.info['sfreq'])
    try:
        events = mne.find_events(raw, min_duration=1 / raw.info["sfreq"], verbose=False)
    except ValueError:
        events = mne.find_events(raw, min_duration=2 / raw.info["sfreq"], verbose=False)

    # Get the list of hits/miss events
    log_file = LOGS_DIR + find_logfile(subj, bloc, os.listdir(LOGS_DIR))
    (
        events_noerr,
        events_comerr,
        events_omerr,
        events_comcorr,
        events_omcorr,
    ) = remove_errors(log_file, events)

    # Keep only events that are clean, and split them by condition
    # Start with correct events
    events_noerr_trimmed, idx_noerr_trimmed = trim_events(events_noerr, events_artrej)
    freqs_hits_idx = np.array(
        [idx_noerr_trimmed[i] for i, x in enumerate(events_noerr_trimmed) if x[2] == 21]
    )
    rares_hits_idx = np.array(
        [idx_noerr_trimmed[i] for i, x in enumerate(events_noerr_trimmed) if x[2] == 31]
    )

    # Then commission errors
    if events_comerr.size > 0:
        events_comerr_trimmed, idx_comerr_trimmed = trim_events(
            events_comerr, events_artrej
        )
        rares_miss_idx = np.array(idx_comerr_trimmed)
    else:
        rares_miss_idx = np.array([])
    # And finally ommission errors
    if events_omerr.size > 0:
        events_omerr_trimmed, idx_omerr_trimmed = trim_events(
            events_omerr, events_artrej
        )
        freqs_miss_idx = np.array(idx_omerr_trimmed)
    else:
        freqs_miss_idx = np.array([])

    return freqs_hits_idx, freqs_miss_idx, rares_hits_idx, rares_miss_idx